From be1954e04cb5a0c3a526f78ed5490a5e65310280 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Thu, 15 Oct 2020 14:49:45 -0400 Subject: core: Fix clang build Recent changes to the build system that made more warnings be flagged as errors caused building via clang to break. Fixes #4795 --- src/core/hle/kernel/memory/address_space_info.cpp | 2 ++ src/core/hle/kernel/memory/memory_manager.cpp | 4 ++-- src/core/hle/kernel/memory/page_heap.cpp | 17 +++++++++-------- src/core/hle/kernel/memory/page_heap.h | 18 ++++++++++-------- src/core/hle/kernel/memory/page_table.cpp | 6 ++++-- 5 files changed, 27 insertions(+), 20 deletions(-) (limited to 'src/core/hle/kernel/memory') diff --git a/src/core/hle/kernel/memory/address_space_info.cpp b/src/core/hle/kernel/memory/address_space_info.cpp index e4288cab4..6cf43ba24 100644 --- a/src/core/hle/kernel/memory/address_space_info.cpp +++ b/src/core/hle/kernel/memory/address_space_info.cpp @@ -96,6 +96,7 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) { return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address; } UNREACHABLE(); + return 0; } std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) { @@ -112,6 +113,7 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size; } UNREACHABLE(); + return 0; } } // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_manager.cpp b/src/core/hle/kernel/memory/memory_manager.cpp index acf13585c..a96157c37 100644 --- a/src/core/hle/kernel/memory/memory_manager.cpp +++ b/src/core/hle/kernel/memory/memory_manager.cpp @@ -71,7 +71,7 @@ VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align } // If we allocated more than we need, free some - const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)}; + const auto allocated_pages{PageHeap::GetBlockNumPages(static_cast(heap_index))}; if (allocated_pages > num_pages) { chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); } @@ -112,7 +112,7 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa // Keep allocating until we've allocated all our pages for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) { - const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)}; + const auto pages_per_alloc{PageHeap::GetBlockNumPages(static_cast(index))}; while (num_pages >= pages_per_alloc) { // Allocate a block diff --git a/src/core/hle/kernel/memory/page_heap.cpp b/src/core/hle/kernel/memory/page_heap.cpp index 0ab1f7205..7890b8c1a 100644 --- a/src/core/hle/kernel/memory/page_heap.cpp +++ b/src/core/hle/kernel/memory/page_heap.cpp @@ -33,11 +33,12 @@ void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_ } VAddr PageHeap::AllocateBlock(s32 index) { - const std::size_t needed_size{blocks[index].GetSize()}; + const auto u_index = static_cast(index); + const auto needed_size{blocks[u_index].GetSize()}; - for (s32 i{index}; i < static_cast(MemoryBlockPageShifts.size()); i++) { - if (const VAddr addr{blocks[i].PopBlock()}; addr) { - if (const std::size_t allocated_size{blocks[i].GetSize()}; + for (auto i = u_index; i < MemoryBlockPageShifts.size(); i++) { + if (const VAddr addr = blocks[i].PopBlock(); addr != 0) { + if (const std::size_t allocated_size = blocks[i].GetSize(); allocated_size > needed_size) { Free(addr + needed_size, (allocated_size - needed_size) / PageSize); } @@ -50,7 +51,7 @@ VAddr PageHeap::AllocateBlock(s32 index) { void PageHeap::FreeBlock(VAddr block, s32 index) { do { - block = blocks[index++].PushBlock(block); + block = blocks[static_cast(index++)].PushBlock(block); } while (block != 0); } @@ -69,7 +70,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) { VAddr after_start{end}; VAddr after_end{end}; while (big_index >= 0) { - const std::size_t block_size{blocks[big_index].GetSize()}; + const std::size_t block_size{blocks[static_cast(big_index)].GetSize()}; const VAddr big_start{Common::AlignUp((start), block_size)}; const VAddr big_end{Common::AlignDown((end), block_size)}; if (big_start < big_end) { @@ -87,7 +88,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) { // Free space before the big blocks for (s32 i{big_index - 1}; i >= 0; i--) { - const std::size_t block_size{blocks[i].GetSize()}; + const std::size_t block_size{blocks[static_cast(i)].GetSize()}; while (before_start + block_size <= before_end) { before_end -= block_size; FreeBlock(before_end, i); @@ -96,7 +97,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) { // Free space after the big blocks for (s32 i{big_index - 1}; i >= 0; i--) { - const std::size_t block_size{blocks[i].GetSize()}; + const std::size_t block_size{blocks[static_cast(i)].GetSize()}; while (after_start + block_size <= after_end) { FreeBlock(after_start, i); after_start += block_size; diff --git a/src/core/hle/kernel/memory/page_heap.h b/src/core/hle/kernel/memory/page_heap.h index 22b0de860..92a2bce04 100644 --- a/src/core/hle/kernel/memory/page_heap.h +++ b/src/core/hle/kernel/memory/page_heap.h @@ -34,7 +34,9 @@ public: static constexpr s32 GetBlockIndex(std::size_t num_pages) { for (s32 i{static_cast(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) { - if (num_pages >= (static_cast(1) << MemoryBlockPageShifts[i]) / PageSize) { + const auto shift_index = static_cast(i); + if (num_pages >= + (static_cast(1) << MemoryBlockPageShifts[shift_index]) / PageSize) { return i; } } @@ -86,7 +88,7 @@ private: // Set the bitmap pointers for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) { - bit_storages[depth] = storage; + bit_storages[static_cast(depth)] = storage; size = Common::AlignUp(size, 64) / 64; storage += size; } @@ -99,7 +101,7 @@ private: s32 depth{}; do { - const u64 v{bit_storages[depth][offset]}; + const u64 v{bit_storages[static_cast(depth)][offset]}; if (v == 0) { // Non-zero depth indicates that a previous level had a free block ASSERT(depth == 0); @@ -125,7 +127,7 @@ private: constexpr bool ClearRange(std::size_t offset, std::size_t count) { const s32 depth{GetHighestDepthIndex()}; const auto bit_ind{offset / 64}; - u64* bits{bit_storages[depth]}; + u64* bits{bit_storages[static_cast(depth)]}; if (count < 64) { const auto shift{offset % 64}; ASSERT(shift + count <= 64); @@ -177,11 +179,11 @@ private: const auto which{offset % 64}; const u64 mask{1ULL << which}; - u64* bit{std::addressof(bit_storages[depth][ind])}; + u64* bit{std::addressof(bit_storages[static_cast(depth)][ind])}; const u64 v{*bit}; ASSERT((v & mask) == 0); *bit = v | mask; - if (v) { + if (v != 0) { break; } offset = ind; @@ -195,12 +197,12 @@ private: const auto which{offset % 64}; const u64 mask{1ULL << which}; - u64* bit{std::addressof(bit_storages[depth][ind])}; + u64* bit{std::addressof(bit_storages[static_cast(depth)][ind])}; u64 v{*bit}; ASSERT((v & mask) != 0); v &= ~mask; *bit = v; - if (v) { + if (v != 0) { break; } offset = ind; diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp index a3fadb533..4f759d078 100644 --- a/src/core/hle/kernel/memory/page_table.cpp +++ b/src/core/hle/kernel/memory/page_table.cpp @@ -414,7 +414,8 @@ ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { const std::size_t remaining_pages{remaining_size / PageSize}; if (process->GetResourceLimit() && - !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, remaining_size)) { + !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, + static_cast(remaining_size))) { return ERR_RESOURCE_LIMIT_EXCEEDED; } @@ -778,7 +779,8 @@ ResultVal PageTable::SetHeapSize(std::size_t size) { auto process{system.Kernel().CurrentProcess()}; if (process->GetResourceLimit() && delta != 0 && - !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, delta)) { + !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, + static_cast(delta))) { return ERR_RESOURCE_LIMIT_EXCEEDED; } -- cgit v1.2.3 From 3d592972dc3fd61cc88771b889eff237e4e03e0f Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 20 Oct 2020 19:07:39 -0700 Subject: Revert "core: Fix clang build" --- src/core/hle/kernel/memory/address_space_info.cpp | 2 -- src/core/hle/kernel/memory/memory_manager.cpp | 4 ++-- src/core/hle/kernel/memory/page_heap.cpp | 17 ++++++++--------- src/core/hle/kernel/memory/page_heap.h | 18 ++++++++---------- src/core/hle/kernel/memory/page_table.cpp | 6 ++---- 5 files changed, 20 insertions(+), 27 deletions(-) (limited to 'src/core/hle/kernel/memory') diff --git a/src/core/hle/kernel/memory/address_space_info.cpp b/src/core/hle/kernel/memory/address_space_info.cpp index 6cf43ba24..e4288cab4 100644 --- a/src/core/hle/kernel/memory/address_space_info.cpp +++ b/src/core/hle/kernel/memory/address_space_info.cpp @@ -96,7 +96,6 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) { return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address; } UNREACHABLE(); - return 0; } std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) { @@ -113,7 +112,6 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size; } UNREACHABLE(); - return 0; } } // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_manager.cpp b/src/core/hle/kernel/memory/memory_manager.cpp index a96157c37..acf13585c 100644 --- a/src/core/hle/kernel/memory/memory_manager.cpp +++ b/src/core/hle/kernel/memory/memory_manager.cpp @@ -71,7 +71,7 @@ VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align } // If we allocated more than we need, free some - const auto allocated_pages{PageHeap::GetBlockNumPages(static_cast(heap_index))}; + const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)}; if (allocated_pages > num_pages) { chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); } @@ -112,7 +112,7 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa // Keep allocating until we've allocated all our pages for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) { - const auto pages_per_alloc{PageHeap::GetBlockNumPages(static_cast(index))}; + const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)}; while (num_pages >= pages_per_alloc) { // Allocate a block diff --git a/src/core/hle/kernel/memory/page_heap.cpp b/src/core/hle/kernel/memory/page_heap.cpp index 7890b8c1a..0ab1f7205 100644 --- a/src/core/hle/kernel/memory/page_heap.cpp +++ b/src/core/hle/kernel/memory/page_heap.cpp @@ -33,12 +33,11 @@ void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_ } VAddr PageHeap::AllocateBlock(s32 index) { - const auto u_index = static_cast(index); - const auto needed_size{blocks[u_index].GetSize()}; + const std::size_t needed_size{blocks[index].GetSize()}; - for (auto i = u_index; i < MemoryBlockPageShifts.size(); i++) { - if (const VAddr addr = blocks[i].PopBlock(); addr != 0) { - if (const std::size_t allocated_size = blocks[i].GetSize(); + for (s32 i{index}; i < static_cast(MemoryBlockPageShifts.size()); i++) { + if (const VAddr addr{blocks[i].PopBlock()}; addr) { + if (const std::size_t allocated_size{blocks[i].GetSize()}; allocated_size > needed_size) { Free(addr + needed_size, (allocated_size - needed_size) / PageSize); } @@ -51,7 +50,7 @@ VAddr PageHeap::AllocateBlock(s32 index) { void PageHeap::FreeBlock(VAddr block, s32 index) { do { - block = blocks[static_cast(index++)].PushBlock(block); + block = blocks[index++].PushBlock(block); } while (block != 0); } @@ -70,7 +69,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) { VAddr after_start{end}; VAddr after_end{end}; while (big_index >= 0) { - const std::size_t block_size{blocks[static_cast(big_index)].GetSize()}; + const std::size_t block_size{blocks[big_index].GetSize()}; const VAddr big_start{Common::AlignUp((start), block_size)}; const VAddr big_end{Common::AlignDown((end), block_size)}; if (big_start < big_end) { @@ -88,7 +87,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) { // Free space before the big blocks for (s32 i{big_index - 1}; i >= 0; i--) { - const std::size_t block_size{blocks[static_cast(i)].GetSize()}; + const std::size_t block_size{blocks[i].GetSize()}; while (before_start + block_size <= before_end) { before_end -= block_size; FreeBlock(before_end, i); @@ -97,7 +96,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) { // Free space after the big blocks for (s32 i{big_index - 1}; i >= 0; i--) { - const std::size_t block_size{blocks[static_cast(i)].GetSize()}; + const std::size_t block_size{blocks[i].GetSize()}; while (after_start + block_size <= after_end) { FreeBlock(after_start, i); after_start += block_size; diff --git a/src/core/hle/kernel/memory/page_heap.h b/src/core/hle/kernel/memory/page_heap.h index 92a2bce04..22b0de860 100644 --- a/src/core/hle/kernel/memory/page_heap.h +++ b/src/core/hle/kernel/memory/page_heap.h @@ -34,9 +34,7 @@ public: static constexpr s32 GetBlockIndex(std::size_t num_pages) { for (s32 i{static_cast(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) { - const auto shift_index = static_cast(i); - if (num_pages >= - (static_cast(1) << MemoryBlockPageShifts[shift_index]) / PageSize) { + if (num_pages >= (static_cast(1) << MemoryBlockPageShifts[i]) / PageSize) { return i; } } @@ -88,7 +86,7 @@ private: // Set the bitmap pointers for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) { - bit_storages[static_cast(depth)] = storage; + bit_storages[depth] = storage; size = Common::AlignUp(size, 64) / 64; storage += size; } @@ -101,7 +99,7 @@ private: s32 depth{}; do { - const u64 v{bit_storages[static_cast(depth)][offset]}; + const u64 v{bit_storages[depth][offset]}; if (v == 0) { // Non-zero depth indicates that a previous level had a free block ASSERT(depth == 0); @@ -127,7 +125,7 @@ private: constexpr bool ClearRange(std::size_t offset, std::size_t count) { const s32 depth{GetHighestDepthIndex()}; const auto bit_ind{offset / 64}; - u64* bits{bit_storages[static_cast(depth)]}; + u64* bits{bit_storages[depth]}; if (count < 64) { const auto shift{offset % 64}; ASSERT(shift + count <= 64); @@ -179,11 +177,11 @@ private: const auto which{offset % 64}; const u64 mask{1ULL << which}; - u64* bit{std::addressof(bit_storages[static_cast(depth)][ind])}; + u64* bit{std::addressof(bit_storages[depth][ind])}; const u64 v{*bit}; ASSERT((v & mask) == 0); *bit = v | mask; - if (v != 0) { + if (v) { break; } offset = ind; @@ -197,12 +195,12 @@ private: const auto which{offset % 64}; const u64 mask{1ULL << which}; - u64* bit{std::addressof(bit_storages[static_cast(depth)][ind])}; + u64* bit{std::addressof(bit_storages[depth][ind])}; u64 v{*bit}; ASSERT((v & mask) != 0); v &= ~mask; *bit = v; - if (v != 0) { + if (v) { break; } offset = ind; diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp index 4f759d078..a3fadb533 100644 --- a/src/core/hle/kernel/memory/page_table.cpp +++ b/src/core/hle/kernel/memory/page_table.cpp @@ -414,8 +414,7 @@ ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { const std::size_t remaining_pages{remaining_size / PageSize}; if (process->GetResourceLimit() && - !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, - static_cast(remaining_size))) { + !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, remaining_size)) { return ERR_RESOURCE_LIMIT_EXCEEDED; } @@ -779,8 +778,7 @@ ResultVal PageTable::SetHeapSize(std::size_t size) { auto process{system.Kernel().CurrentProcess()}; if (process->GetResourceLimit() && delta != 0 && - !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, - static_cast(delta))) { + !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, delta)) { return ERR_RESOURCE_LIMIT_EXCEEDED; } -- cgit v1.2.3 From 63fd1bb50302867b233325f253b1e2abbc379875 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 13 Nov 2020 23:20:32 -0800 Subject: core: arm: Implement InvalidateCacheRange for CPU cache invalidation. --- src/core/hle/kernel/memory/page_table.cpp | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'src/core/hle/kernel/memory') diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp index a3fadb533..f53a7be82 100644 --- a/src/core/hle/kernel/memory/page_table.cpp +++ b/src/core/hle/kernel/memory/page_table.cpp @@ -670,6 +670,11 @@ ResultCode PageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size, Memo return RESULT_SUCCESS; } + if ((prev_perm & MemoryPermission::Execute) != (perm & MemoryPermission::Execute)) { + // Memory execution state is changing, invalidate CPU cache range + system.InvalidateCpuInstructionCacheRange(addr, size); + } + const std::size_t num_pages{size / PageSize}; const OperationType operation{(perm & MemoryPermission::Execute) != MemoryPermission::None ? OperationType::ChangePermissionsAndRefresh -- cgit v1.2.3 From f95602f15207851b849c57e2a2dd313a087b2493 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Sat, 5 Dec 2020 11:40:14 -0500 Subject: video_core: Resolve more variable shadowing scenarios pt.3 Cleans out the rest of the occurrences of variable shadowing and makes any further occurrences of shadowing compiler errors. --- src/core/hle/kernel/memory/memory_block.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src/core/hle/kernel/memory') diff --git a/src/core/hle/kernel/memory/memory_block.h b/src/core/hle/kernel/memory/memory_block.h index 9d7839d08..37fe19916 100644 --- a/src/core/hle/kernel/memory/memory_block.h +++ b/src/core/hle/kernel/memory/memory_block.h @@ -222,9 +222,9 @@ public: public: constexpr MemoryBlock() = default; - constexpr MemoryBlock(VAddr addr, std::size_t num_pages, MemoryState state, - MemoryPermission perm, MemoryAttribute attribute) - : addr{addr}, num_pages(num_pages), state{state}, perm{perm}, attribute{attribute} {} + constexpr MemoryBlock(VAddr addr_, std::size_t num_pages_, MemoryState state_, + MemoryPermission perm_, MemoryAttribute attribute_) + : addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {} constexpr VAddr GetAddress() const { return addr; -- cgit v1.2.3 From feac654ba04951e77e8642cfe188277a688ca779 Mon Sep 17 00:00:00 2001 From: comex Date: Sat, 14 Nov 2020 17:57:51 -0500 Subject: core: Mark unused fields as [[maybe_unused]] --- src/core/hle/kernel/memory/memory_block_manager.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/core/hle/kernel/memory') diff --git a/src/core/hle/kernel/memory/memory_block_manager.h b/src/core/hle/kernel/memory/memory_block_manager.h index 6e1d41075..f57d1bbcc 100644 --- a/src/core/hle/kernel/memory/memory_block_manager.h +++ b/src/core/hle/kernel/memory/memory_block_manager.h @@ -57,8 +57,8 @@ public: private: void MergeAdjacent(iterator it, iterator& next_it); - const VAddr start_addr; - const VAddr end_addr; + [[maybe_unused]] const VAddr start_addr; + [[maybe_unused]] const VAddr end_addr; MemoryBlockTree memory_block_tree; }; -- cgit v1.2.3 From b3587102d160fb74a12935a79f06ee8a12712f12 Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Tue, 29 Dec 2020 21:16:57 -0300 Subject: core/memory: Read and write page table atomically Squash attributes into the pointer's integer, making them an uintptr_t pair containing 2 bits at the bottom and then the pointer. These bits are currently unused thanks to alignment requirements. Configure Dynarmic to mask out these bits on pointer reads. While we are at it, remove some unused attributes carried over from Citra. Read/Write and other hot functions use a two step unpacking process that is less readable to stop MSVC from emitting an extra AND instruction in the hot path: mov rdi,rcx shr rdx,0Ch mov r8,qword ptr [rax+8] mov rax,qword ptr [r8+rdx*8] mov rdx,rax -and al,3 and rdx,0FFFFFFFFFFFFFFFCh je Core::Memory::Memory::Impl::Read mov rax,qword ptr [vaddr] movzx eax,byte ptr [rdx+rax] --- src/core/hle/kernel/memory/page_table.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/core/hle/kernel/memory') diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp index f53a7be82..f3e8bc333 100644 --- a/src/core/hle/kernel/memory/page_table.cpp +++ b/src/core/hle/kernel/memory/page_table.cpp @@ -265,7 +265,7 @@ ResultCode PageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_t physical_memory_usage = 0; memory_pool = pool; - page_table_impl.Resize(address_space_width, PageBits, true); + page_table_impl.Resize(address_space_width, PageBits); return InitializeMemoryLayout(start, end); } -- cgit v1.2.3 From a745d87971b2c9795e1b2c587bfe30b849b522fa Mon Sep 17 00:00:00 2001 From: Morph <39850852+Morph1984@users.noreply.github.com> Date: Sat, 2 Jan 2021 09:00:05 -0500 Subject: general: Fix various spelling errors --- src/core/hle/kernel/memory/memory_block.h | 14 +++++++------- src/core/hle/kernel/memory/page_table.cpp | 12 ++++++------ 2 files changed, 13 insertions(+), 13 deletions(-) (limited to 'src/core/hle/kernel/memory') diff --git a/src/core/hle/kernel/memory/memory_block.h b/src/core/hle/kernel/memory/memory_block.h index 37fe19916..83acece1e 100644 --- a/src/core/hle/kernel/memory/memory_block.h +++ b/src/core/hle/kernel/memory/memory_block.h @@ -73,12 +73,12 @@ enum class MemoryState : u32 { ThreadLocal = static_cast(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted, - Transfered = static_cast(Svc::MemoryState::Transfered) | FlagsMisc | - FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | - FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + Transferred = static_cast(Svc::MemoryState::Transferred) | FlagsMisc | + FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | + FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, - SharedTransfered = static_cast(Svc::MemoryState::SharedTransfered) | FlagsMisc | - FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + SharedTransferred = static_cast(Svc::MemoryState::SharedTransferred) | FlagsMisc | + FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, SharedCode = static_cast(Svc::MemoryState::SharedCode) | FlagMapped | FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, @@ -111,8 +111,8 @@ static_assert(static_cast(MemoryState::AliasCodeData) == 0x03FFBD09); static_assert(static_cast(MemoryState::Ipc) == 0x005C3C0A); static_assert(static_cast(MemoryState::Stack) == 0x005C3C0B); static_assert(static_cast(MemoryState::ThreadLocal) == 0x0040200C); -static_assert(static_cast(MemoryState::Transfered) == 0x015C3C0D); -static_assert(static_cast(MemoryState::SharedTransfered) == 0x005C380E); +static_assert(static_cast(MemoryState::Transferred) == 0x015C3C0D); +static_assert(static_cast(MemoryState::SharedTransferred) == 0x005C380E); static_assert(static_cast(MemoryState::SharedCode) == 0x0040380F); static_assert(static_cast(MemoryState::Inaccessible) == 0x00000010); static_assert(static_cast(MemoryState::NonSecureIpc) == 0x005C3811); diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp index f3e8bc333..080886554 100644 --- a/src/core/hle/kernel/memory/page_table.cpp +++ b/src/core/hle/kernel/memory/page_table.cpp @@ -1007,8 +1007,8 @@ constexpr VAddr PageTable::GetRegionAddress(MemoryState state) const { case MemoryState::Shared: case MemoryState::AliasCode: case MemoryState::AliasCodeData: - case MemoryState::Transfered: - case MemoryState::SharedTransfered: + case MemoryState::Transferred: + case MemoryState::SharedTransferred: case MemoryState::SharedCode: case MemoryState::GeneratedCode: case MemoryState::CodeOut: @@ -1042,8 +1042,8 @@ constexpr std::size_t PageTable::GetRegionSize(MemoryState state) const { case MemoryState::Shared: case MemoryState::AliasCode: case MemoryState::AliasCodeData: - case MemoryState::Transfered: - case MemoryState::SharedTransfered: + case MemoryState::Transferred: + case MemoryState::SharedTransferred: case MemoryState::SharedCode: case MemoryState::GeneratedCode: case MemoryState::CodeOut: @@ -1080,8 +1080,8 @@ constexpr bool PageTable::CanContain(VAddr addr, std::size_t size, MemoryState s case MemoryState::AliasCodeData: case MemoryState::Stack: case MemoryState::ThreadLocal: - case MemoryState::Transfered: - case MemoryState::SharedTransfered: + case MemoryState::Transferred: + case MemoryState::SharedTransferred: case MemoryState::SharedCode: case MemoryState::GeneratedCode: case MemoryState::CodeOut: -- cgit v1.2.3 From 4f13e270c8c63f86a135426e381eef7d4837e5a4 Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Tue, 5 Jan 2021 04:18:16 -0300 Subject: core: Silence warnings when compiling without asserts --- src/core/hle/kernel/memory/address_space_info.cpp | 2 ++ 1 file changed, 2 insertions(+) (limited to 'src/core/hle/kernel/memory') diff --git a/src/core/hle/kernel/memory/address_space_info.cpp b/src/core/hle/kernel/memory/address_space_info.cpp index e4288cab4..6cf43ba24 100644 --- a/src/core/hle/kernel/memory/address_space_info.cpp +++ b/src/core/hle/kernel/memory/address_space_info.cpp @@ -96,6 +96,7 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) { return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address; } UNREACHABLE(); + return 0; } std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) { @@ -112,6 +113,7 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size; } UNREACHABLE(); + return 0; } } // namespace Kernel::Memory -- cgit v1.2.3