diff options
Diffstat (limited to 'src/core/hle/kernel')
| -rw-r--r-- | src/core/hle/kernel/svc.cpp | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 14 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.cpp | 36 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.h | 8 |
4 files changed, 29 insertions, 31 deletions
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index d1cbbc1f2..5db2db687 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -267,7 +267,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id, info_sub_id, handle); - auto& vm_manager = Core::CurrentProcess()->vm_manager; + const auto& vm_manager = Core::CurrentProcess()->vm_manager; switch (static_cast<GetInfoType>(info_id)) { case GetInfoType::AllowedCpuIdBitmask: diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index db1e2ac7a..cdb8120f2 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -252,13 +252,14 @@ void Thread::ResumeFromWait() { * slot: The index of the first free slot in the indicated page. * alloc_needed: Whether there's a need to allocate a new TLS page (All pages are full). */ -std::tuple<u32, u32, bool> GetFreeThreadLocalSlot(std::vector<std::bitset<8>>& tls_slots) { +static std::tuple<std::size_t, std::size_t, bool> GetFreeThreadLocalSlot( + const std::vector<std::bitset<8>>& tls_slots) { // Iterate over all the allocated pages, and try to find one where not all slots are used. - for (unsigned page = 0; page < tls_slots.size(); ++page) { + for (std::size_t page = 0; page < tls_slots.size(); ++page) { const auto& page_tls_slots = tls_slots[page]; if (!page_tls_slots.all()) { // We found a page with at least one free slot, find which slot it is - for (unsigned slot = 0; slot < page_tls_slots.size(); ++slot) { + for (std::size_t slot = 0; slot < page_tls_slots.size(); ++slot) { if (!page_tls_slots.test(slot)) { return std::make_tuple(page, slot, false); } @@ -333,11 +334,8 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point, // Find the next available TLS index, and mark it as used auto& tls_slots = owner_process->tls_slots; - bool needs_allocation = true; - u32 available_page; // Which allocated page has free space - u32 available_slot; // Which slot within the page is free - std::tie(available_page, available_slot, needs_allocation) = GetFreeThreadLocalSlot(tls_slots); + auto [available_page, available_slot, needs_allocation] = GetFreeThreadLocalSlot(tls_slots); if (needs_allocation) { // There are no already-allocated pages with free slots, lets allocate a new one. @@ -359,7 +357,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point, owner_process->linear_heap_used += Memory::PAGE_SIZE; tls_slots.emplace_back(0); // The page is completely available at the start - available_page = static_cast<u32>(tls_slots.size() - 1); + available_page = tls_slots.size() - 1; available_slot = 0; // Use the first slot in the new page auto& vm_manager = owner_process->vm_manager; diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 9d26fd781..479cacb62 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp @@ -2,6 +2,7 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include <algorithm> #include <iterator> #include <utility> #include "common/assert.h" @@ -175,9 +176,9 @@ VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) { ResultCode VMManager::UnmapRange(VAddr target, u64 size) { CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size)); - VAddr target_end = target + size; + const VAddr target_end = target + size; - VMAIter end = vma_map.end(); + const VMAIter end = vma_map.end(); // The comparison against the end of the range must be done using addresses since VMAs can be // merged during this process, causing invalidation of the iterators. while (vma != end && vma->second.base < target_end) { @@ -207,9 +208,9 @@ VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission ne ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_perms) { CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size)); - VAddr target_end = target + size; + const VAddr target_end = target + size; - VMAIter end = vma_map.end(); + const VMAIter end = vma_map.end(); // The comparison against the end of the range must be done using addresses since VMAs can be // merged during this process, causing invalidation of the iterators. while (vma != end && vma->second.base < target_end) { @@ -258,14 +259,14 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u64 size) { return ERR_INVALID_ADDRESS; } - VirtualMemoryArea& vma = vma_handle->second; + const VirtualMemoryArea& vma = vma_handle->second; if (vma.type != VMAType::Free) { // Region is already allocated return ERR_INVALID_ADDRESS_STATE; } - u64 start_in_vma = base - vma.base; - u64 end_in_vma = start_in_vma + size; + const VAddr start_in_vma = base - vma.base; + const VAddr end_in_vma = start_in_vma + size; if (end_in_vma > vma.size) { // Requested allocation doesn't fit inside VMA @@ -288,17 +289,16 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u64 size) { ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x{:016X}", size); ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x{:016X}", target); - VAddr target_end = target + size; + const VAddr target_end = target + size; ASSERT(target_end >= target); ASSERT(target_end <= MAX_ADDRESS); ASSERT(size > 0); VMAIter begin_vma = StripIterConstness(FindVMA(target)); - VMAIter i_end = vma_map.lower_bound(target_end); - for (auto i = begin_vma; i != i_end; ++i) { - if (i->second.type == VMAType::Free) { - return ERR_INVALID_ADDRESS_STATE; - } + const VMAIter i_end = vma_map.lower_bound(target_end); + if (std::any_of(begin_vma, i_end, + [](const auto& entry) { return entry.second.type == VMAType::Free; })) { + return ERR_INVALID_ADDRESS_STATE; } if (target != begin_vma->second.base) { @@ -346,7 +346,7 @@ VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) { } VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) { - VMAIter next_vma = std::next(iter); + const VMAIter next_vma = std::next(iter); if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) { iter->second.size += next_vma->second.size; vma_map.erase(next_vma); @@ -382,22 +382,22 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { } } -u64 VMManager::GetTotalMemoryUsage() { +u64 VMManager::GetTotalMemoryUsage() const { LOG_WARNING(Kernel, "(STUBBED) called"); return 0xF8000000; } -u64 VMManager::GetTotalHeapUsage() { +u64 VMManager::GetTotalHeapUsage() const { LOG_WARNING(Kernel, "(STUBBED) called"); return 0x0; } -VAddr VMManager::GetAddressSpaceBaseAddr() { +VAddr VMManager::GetAddressSpaceBaseAddr() const { LOG_WARNING(Kernel, "(STUBBED) called"); return 0x8000000; } -u64 VMManager::GetAddressSpaceSize() { +u64 VMManager::GetAddressSpaceSize() const { LOG_WARNING(Kernel, "(STUBBED) called"); return MAX_ADDRESS; } diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index 38e4ebcd3..98bd04bea 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h @@ -190,16 +190,16 @@ public: void LogLayout() const; /// Gets the total memory usage, used by svcGetInfo - u64 GetTotalMemoryUsage(); + u64 GetTotalMemoryUsage() const; /// Gets the total heap usage, used by svcGetInfo - u64 GetTotalHeapUsage(); + u64 GetTotalHeapUsage() const; /// Gets the total address space base address, used by svcGetInfo - VAddr GetAddressSpaceBaseAddr(); + VAddr GetAddressSpaceBaseAddr() const; /// Gets the total address space address size, used by svcGetInfo - u64 GetAddressSpaceSize(); + u64 GetAddressSpaceSize() const; /// Each VMManager has its own page table, which is set as the main one when the owning process /// is scheduled. |
