diff options
Diffstat (limited to 'src/core/hle/kernel')
| -rw-r--r-- | src/core/hle/kernel/k_code_memory.cpp | 6 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_table.cpp | 560 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc.cpp | 172 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc_wrap.h | 22 |
4 files changed, 550 insertions, 210 deletions
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp index d69f7ffb7..0b225e8e0 100644 --- a/src/core/hle/kernel/k_code_memory.cpp +++ b/src/core/hle/kernel/k_code_memory.cpp @@ -2,6 +2,7 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "common/alignment.h" #include "common/common_types.h" #include "core/device_memory.h" #include "core/hle/kernel/k_auto_object.h" @@ -28,8 +29,7 @@ ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr auto& page_table = m_owner->PageTable(); // Construct the page group. - KMemoryInfo kBlockInfo = page_table.QueryInfo(addr); - m_page_group = KPageLinkedList(kBlockInfo.GetAddress(), kBlockInfo.GetNumPages()); + m_page_group = KPageLinkedList(addr, Common::DivideUp(size, PageSize)); // Lock the memory. R_TRY(page_table.LockForCodeMemory(addr, size)) @@ -143,4 +143,4 @@ ResultCode KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { return ResultSuccess; } -} // namespace Kernel
\ No newline at end of file +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 912853e5c..88aa2a152 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -41,24 +41,6 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT } } -constexpr u64 GetAddressInRange(const KMemoryInfo& info, VAddr addr) { - if (info.GetAddress() < addr) { - return addr; - } - return info.GetAddress(); -} - -constexpr std::size_t GetSizeInRange(const KMemoryInfo& info, VAddr start, VAddr end) { - std::size_t size{info.GetSize()}; - if (info.GetAddress() < start) { - size -= start - info.GetAddress(); - } - if (info.GetEndAddress() > end) { - size -= info.GetEndAddress() - end; - } - return size; -} - } // namespace KPageTable::KPageTable(Core::System& system_) @@ -400,148 +382,471 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, return ResultSuccess; } -ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { +ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { // Lock the physical memory lock. KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); - // Lock the table. - KScopedLightLock lk(general_lock); - - std::size_t mapped_size{}; - const VAddr end_addr{addr + size}; + // Calculate the last address for convenience. + const VAddr last_address = address + size - 1; - block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { - if (info.state != KMemoryState::Free) { - mapped_size += GetSizeInRange(info, addr, end_addr); - } - }); + // Define iteration variables. + VAddr cur_address; + std::size_t mapped_size; - if (mapped_size == size) { - return ResultSuccess; - } + // The entire mapping process can be retried. + while (true) { + // Check if the memory is already mapped. + { + // Lock the table. + KScopedLightLock lk(general_lock); + + // Iterate over the memory. + cur_address = address; + mapped_size = 0; + + auto it = block_manager->FindIterator(cur_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != block_manager->end()); + + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + // Check if we're done. + if (last_address <= info.GetLastAddress()) { + if (info.GetState() != KMemoryState::Free) { + mapped_size += (last_address + 1 - cur_address); + } + break; + } + + // Track the memory if it's mapped. + if (info.GetState() != KMemoryState::Free) { + mapped_size += VAddr(info.GetEndAddress()) - cur_address; + } + + // Advance. + cur_address = info.GetEndAddress(); + ++it; + } - const std::size_t remaining_size{size - mapped_size}; - const std::size_t remaining_pages{remaining_size / PageSize}; + // If the size mapped is the size requested, we've nothing to do. + R_SUCCEED_IF(size == mapped_size); + } - // Reserve the memory from the process resource limit. - KScopedResourceReservation memory_reservation( - system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, - remaining_size); - if (!memory_reservation.Succeeded()) { - LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size); - return ResultLimitReached; + // Allocate and map the memory. + { + // Reserve the memory from the process resource limit. + KScopedResourceReservation memory_reservation( + system.Kernel().CurrentProcess()->GetResourceLimit(), + LimitableResource::PhysicalMemory, size - mapped_size); + R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); + + // Allocate pages for the new memory. + KPageLinkedList page_linked_list; + R_TRY(system.Kernel().MemoryManager().Allocate( + page_linked_list, (size - mapped_size) / PageSize, memory_pool, allocation_option)); + + // Map the memory. + { + // Lock the table. + KScopedLightLock lk(general_lock); + + size_t num_allocator_blocks = 0; + + // Verify that nobody has mapped memory since we first checked. + { + // Iterate over the memory. + size_t checked_mapped_size = 0; + cur_address = address; + + auto it = block_manager->FindIterator(cur_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != block_manager->end()); + + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + const bool is_free = info.GetState() == KMemoryState::Free; + if (is_free) { + if (info.GetAddress() < address) { + ++num_allocator_blocks; + } + if (last_address < info.GetLastAddress()) { + ++num_allocator_blocks; + } + } + + // Check if we're done. + if (last_address <= info.GetLastAddress()) { + if (!is_free) { + checked_mapped_size += (last_address + 1 - cur_address); + } + break; + } + + // Track the memory if it's mapped. + if (!is_free) { + checked_mapped_size += VAddr(info.GetEndAddress()) - cur_address; + } + + // Advance. + cur_address = info.GetEndAddress(); + ++it; + } + + // If the size now isn't what it was before, somebody mapped or unmapped + // concurrently. If this happened, retry. + if (mapped_size != checked_mapped_size) { + continue; + } + } + + // Reset the current tracking address, and make sure we clean up on failure. + cur_address = address; + auto unmap_guard = detail::ScopeExit([&] { + if (cur_address > address) { + const VAddr last_unmap_address = cur_address - 1; + + // Iterate, unmapping the pages. + cur_address = address; + + auto it = block_manager->FindIterator(cur_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != block_manager->end()); + + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + // If the memory state is free, we mapped it and need to unmap it. + if (info.GetState() == KMemoryState::Free) { + // Determine the range to unmap. + const size_t cur_pages = + std::min(VAddr(info.GetEndAddress()) - cur_address, + last_unmap_address + 1 - cur_address) / + PageSize; + + // Unmap. + ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, + OperationType::Unmap) + .IsSuccess()); + } + + // Check if we're done. + if (last_unmap_address <= info.GetLastAddress()) { + break; + } + + // Advance. + cur_address = info.GetEndAddress(); + ++it; + } + } + }); + + // Iterate over the memory. + auto pg_it = page_linked_list.Nodes().begin(); + PAddr pg_phys_addr = pg_it->GetAddress(); + size_t pg_pages = pg_it->GetNumPages(); + + auto it = block_manager->FindIterator(cur_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != block_manager->end()); + + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + // If it's unmapped, we need to map it. + if (info.GetState() == KMemoryState::Free) { + // Determine the range to map. + size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, + last_address + 1 - cur_address) / + PageSize; + + // While we have pages to map, map them. + while (map_pages > 0) { + // Check if we're at the end of the physical block. + if (pg_pages == 0) { + // Ensure there are more pages to map. + ASSERT(pg_it != page_linked_list.Nodes().end()); + + // Advance our physical block. + ++pg_it; + pg_phys_addr = pg_it->GetAddress(); + pg_pages = pg_it->GetNumPages(); + } + + // Map whatever we can. + const size_t cur_pages = std::min(pg_pages, map_pages); + R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, + OperationType::Map, pg_phys_addr)); + + // Advance. + cur_address += cur_pages * PageSize; + map_pages -= cur_pages; + + pg_phys_addr += cur_pages * PageSize; + pg_pages -= cur_pages; + } + } + + // Check if we're done. + if (last_address <= info.GetLastAddress()) { + break; + } + + // Advance. + cur_address = info.GetEndAddress(); + ++it; + } + + // We succeeded, so commit the memory reservation. + memory_reservation.Commit(); + + // Increase our tracked mapped size. + mapped_physical_memory_size += (size - mapped_size); + + // Update the relevant memory blocks. + block_manager->Update(address, size / PageSize, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryState::Normal, KMemoryPermission::UserReadWrite, + KMemoryAttribute::None); + + // Cancel our guard. + unmap_guard.Cancel(); + + return ResultSuccess; + } + } } +} - KPageLinkedList page_linked_list; +ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { + // Lock the physical memory lock. + KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); - CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages, - memory_pool, allocation_option)); + // Lock the table. + KScopedLightLock lk(general_lock); - // We succeeded, so commit the memory reservation. - memory_reservation.Commit(); + // Calculate the last address for convenience. + const VAddr last_address = address + size - 1; - // Map the memory. - auto node{page_linked_list.Nodes().begin()}; - PAddr map_addr{node->GetAddress()}; - std::size_t src_num_pages{node->GetNumPages()}; - block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { - if (info.state != KMemoryState::Free) { - return; - } + // Define iteration variables. + VAddr cur_address = 0; + std::size_t mapped_size = 0; + std::size_t num_allocator_blocks = 0; - std::size_t dst_num_pages{GetSizeInRange(info, addr, end_addr) / PageSize}; - VAddr dst_addr{GetAddressInRange(info, addr)}; + // Check if the memory is mapped. + { + // Iterate over the memory. + cur_address = address; + mapped_size = 0; + + auto it = block_manager->FindIterator(cur_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != block_manager->end()); + + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + // Verify the memory's state. + const bool is_normal = info.GetState() == KMemoryState::Normal && + info.GetAttribute() == KMemoryAttribute::None; + const bool is_free = info.GetState() == KMemoryState::Free; + R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory); + + if (is_normal) { + R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); + + if (info.GetAddress() < address) { + ++num_allocator_blocks; + } + if (last_address < info.GetLastAddress()) { + ++num_allocator_blocks; + } + } - while (dst_num_pages) { - if (!src_num_pages) { - node = std::next(node); - map_addr = node->GetAddress(); - src_num_pages = node->GetNumPages(); + // Check if we're done. + if (last_address <= info.GetLastAddress()) { + if (is_normal) { + mapped_size += (last_address + 1 - cur_address); + } + break; } - const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)}; - Operate(dst_addr, num_pages, KMemoryPermission::UserReadWrite, OperationType::Map, - map_addr); + // Track the memory if it's mapped. + if (is_normal) { + mapped_size += VAddr(info.GetEndAddress()) - cur_address; + } - dst_addr += num_pages * PageSize; - map_addr += num_pages * PageSize; - src_num_pages -= num_pages; - dst_num_pages -= num_pages; + // Advance. + cur_address = info.GetEndAddress(); + ++it; } - }); - - mapped_physical_memory_size += remaining_size; - - const std::size_t num_pages{size / PageSize}; - block_manager->Update(addr, num_pages, KMemoryState::Free, KMemoryPermission::None, - KMemoryAttribute::None, KMemoryState::Normal, - KMemoryPermission::UserReadWrite, KMemoryAttribute::None); - return ResultSuccess; -} + // If there's nothing mapped, we've nothing to do. + R_SUCCEED_IF(mapped_size == 0); + } -ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) { - // Lock the physical memory lock. - KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); + // Make a page group for the unmap region. + KPageLinkedList pg; + { + auto& impl = this->PageTableImpl(); + + // Begin traversal. + Common::PageTable::TraversalContext context; + Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0}; + bool cur_valid = false; + Common::PageTable::TraversalEntry next_entry; + bool next_valid = false; + size_t tot_size = 0; + + cur_address = address; + next_valid = impl.BeginTraversal(next_entry, context, cur_address); + next_entry.block_size = + (next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1))); + + // Iterate, building the group. + while (true) { + if ((!next_valid && !cur_valid) || + (next_valid && cur_valid && + next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) { + cur_entry.block_size += next_entry.block_size; + } else { + if (cur_valid) { + // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); + R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize)); + } + + // Update tracking variables. + tot_size += cur_entry.block_size; + cur_entry = next_entry; + cur_valid = next_valid; + } - // Lock the table. - KScopedLightLock lk(general_lock); + if (cur_entry.block_size + tot_size >= size) { + break; + } - const VAddr end_addr{addr + size}; - ResultCode result{ResultSuccess}; - std::size_t mapped_size{}; + next_valid = impl.ContinueTraversal(next_entry, context); + } - // Verify that the region can be unmapped - block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { - if (info.state == KMemoryState::Normal) { - if (info.attribute != KMemoryAttribute::None) { - result = ResultInvalidCurrentMemory; - return; + // Add the last block. + if (cur_valid) { + // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); + R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize)); + } + } + ASSERT(pg.GetNumPages() == mapped_size / PageSize); + + // Reset the current tracking address, and make sure we clean up on failure. + cur_address = address; + auto remap_guard = detail::ScopeExit([&] { + if (cur_address > address) { + const VAddr last_map_address = cur_address - 1; + cur_address = address; + + // Iterate over the memory we unmapped. + auto it = block_manager->FindIterator(cur_address); + auto pg_it = pg.Nodes().begin(); + PAddr pg_phys_addr = pg_it->GetAddress(); + size_t pg_pages = pg_it->GetNumPages(); + + while (true) { + // Get the memory info for the pages we unmapped, convert to property. + const KMemoryInfo info = it->GetMemoryInfo(); + + // If the memory is normal, we unmapped it and need to re-map it. + if (info.GetState() == KMemoryState::Normal) { + // Determine the range to map. + size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, + last_map_address + 1 - cur_address) / + PageSize; + + // While we have pages to map, map them. + while (map_pages > 0) { + // Check if we're at the end of the physical block. + if (pg_pages == 0) { + // Ensure there are more pages to map. + ASSERT(pg_it != pg.Nodes().end()); + + // Advance our physical block. + ++pg_it; + pg_phys_addr = pg_it->GetAddress(); + pg_pages = pg_it->GetNumPages(); + } + + // Map whatever we can. + const size_t cur_pages = std::min(pg_pages, map_pages); + ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(), + OperationType::Map, pg_phys_addr) == ResultSuccess); + + // Advance. + cur_address += cur_pages * PageSize; + map_pages -= cur_pages; + + pg_phys_addr += cur_pages * PageSize; + pg_pages -= cur_pages; + } + } + + // Check if we're done. + if (last_map_address <= info.GetLastAddress()) { + break; + } + + // Advance. + ++it; } - mapped_size += GetSizeInRange(info, addr, end_addr); - } else if (info.state != KMemoryState::Free) { - result = ResultInvalidCurrentMemory; } }); - if (result.IsError()) { - return result; - } + // Iterate over the memory, unmapping as we go. + auto it = block_manager->FindIterator(cur_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != block_manager->end()); - if (!mapped_size) { - return ResultSuccess; - } + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); - // Unmap each region within the range - KPageLinkedList page_linked_list; - block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { - if (info.state == KMemoryState::Normal) { - const std::size_t block_size{GetSizeInRange(info, addr, end_addr)}; - const std::size_t block_num_pages{block_size / PageSize}; - const VAddr block_addr{GetAddressInRange(info, addr)}; - - AddRegionToPages(block_addr, block_size / PageSize, page_linked_list); - - if (result = Operate(block_addr, block_num_pages, KMemoryPermission::None, - OperationType::Unmap); - result.IsError()) { - return; - } + // If the memory state is normal, we need to unmap it. + if (info.GetState() == KMemoryState::Normal) { + // Determine the range to unmap. + const size_t cur_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, + last_address + 1 - cur_address) / + PageSize; + + // Unmap. + R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)); } - }); - if (result.IsError()) { - return result; - } - const std::size_t num_pages{size / PageSize}; - system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool, - allocation_option); + // Check if we're done. + if (last_address <= info.GetLastAddress()) { + break; + } - block_manager->Update(addr, num_pages, KMemoryState::Free); + // Advance. + cur_address = info.GetEndAddress(); + ++it; + } + // Release the memory resource. + mapped_physical_memory_size -= mapped_size; auto process{system.Kernel().CurrentProcess()}; process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); - mapped_physical_memory_size -= mapped_size; + + // Update memory blocks. + system.Kernel().MemoryManager().Free(pg, size / PageSize, memory_pool, allocation_option); + block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None); + + // We succeeded. + remap_guard.Cancel(); return ResultSuccess; } @@ -681,9 +986,8 @@ ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked VAddr cur_addr{addr}; for (const auto& node : page_linked_list.Nodes()) { - const std::size_t num_pages{(addr - cur_addr) / PageSize}; - if (const auto result{ - Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)}; + if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, + OperationType::Unmap)}; result.IsError()) { return result; } diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 9387373c1..9836809f2 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -645,6 +645,10 @@ static void OutputDebugString(Core::System& system, VAddr address, u64 len) { LOG_DEBUG(Debug_Emulated, "{}", str); } +static void OutputDebugString32(Core::System& system, u32 address, u32 len) { + OutputDebugString(system, address, len); +} + /// Gets system/memory information for the current process static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle, u64 info_sub_id) { @@ -1404,7 +1408,7 @@ static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Ha } static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) { - LOG_TRACE(Kernel_SVC, "called, handle_out=0x{:X}, address=0x{:X}, size=0x{:X}", + LOG_TRACE(Kernel_SVC, "called, handle_out={}, address=0x{:X}, size=0x{:X}", static_cast<void*>(out), address, size); // Get kernel instance. auto& kernel = system.Kernel(); @@ -1438,6 +1442,10 @@ static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr addr return ResultSuccess; } +static ResultCode CreateCodeMemory32(Core::System& system, Handle* out, u32 address, u32 size) { + return CreateCodeMemory(system, out, address, size); +} + static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation, VAddr address, size_t size, Svc::MemoryPermission perm) { @@ -1517,6 +1525,12 @@ static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_han return ResultSuccess; } +static ResultCode ControlCodeMemory32(Core::System& system, Handle code_memory_handle, + u32 operation, u64 address, u64 size, + Svc::MemoryPermission perm) { + return ControlCodeMemory(system, code_memory_handle, operation, address, size, perm); +} + static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address, VAddr page_info_address, Handle process_handle, VAddr address) { @@ -2559,9 +2573,9 @@ struct FunctionDef { } // namespace static const FunctionDef SVC_Table_32[] = { - {0x00, nullptr, "Unknown"}, + {0x00, nullptr, "Unknown0"}, {0x01, SvcWrap32<SetHeapSize32>, "SetHeapSize32"}, - {0x02, nullptr, "Unknown"}, + {0x02, nullptr, "SetMemoryPermission32"}, {0x03, SvcWrap32<SetMemoryAttribute32>, "SetMemoryAttribute32"}, {0x04, SvcWrap32<MapMemory32>, "MapMemory32"}, {0x05, SvcWrap32<UnmapMemory32>, "UnmapMemory32"}, @@ -2591,97 +2605,97 @@ static const FunctionDef SVC_Table_32[] = { {0x1d, SvcWrap32<SignalProcessWideKey32>, "SignalProcessWideKey32"}, {0x1e, SvcWrap32<GetSystemTick32>, "GetSystemTick32"}, {0x1f, SvcWrap32<ConnectToNamedPort32>, "ConnectToNamedPort32"}, - {0x20, nullptr, "Unknown"}, + {0x20, nullptr, "SendSyncRequestLight32"}, {0x21, SvcWrap32<SendSyncRequest32>, "SendSyncRequest32"}, {0x22, nullptr, "SendSyncRequestWithUserBuffer32"}, - {0x23, nullptr, "Unknown"}, + {0x23, nullptr, "SendAsyncRequestWithUserBuffer32"}, {0x24, SvcWrap32<GetProcessId32>, "GetProcessId32"}, {0x25, SvcWrap32<GetThreadId32>, "GetThreadId32"}, {0x26, SvcWrap32<Break32>, "Break32"}, - {0x27, nullptr, "OutputDebugString32"}, - {0x28, nullptr, "Unknown"}, + {0x27, SvcWrap32<OutputDebugString32>, "OutputDebugString32"}, + {0x28, nullptr, "ReturnFromException32"}, {0x29, SvcWrap32<GetInfo32>, "GetInfo32"}, - {0x2a, nullptr, "Unknown"}, - {0x2b, nullptr, "Unknown"}, + {0x2a, nullptr, "FlushEntireDataCache32"}, + {0x2b, nullptr, "FlushDataCache32"}, {0x2c, SvcWrap32<MapPhysicalMemory32>, "MapPhysicalMemory32"}, {0x2d, SvcWrap32<UnmapPhysicalMemory32>, "UnmapPhysicalMemory32"}, - {0x2e, nullptr, "Unknown"}, - {0x2f, nullptr, "Unknown"}, - {0x30, nullptr, "Unknown"}, - {0x31, nullptr, "Unknown"}, + {0x2e, nullptr, "GetDebugFutureThreadInfo32"}, + {0x2f, nullptr, "GetLastThreadInfo32"}, + {0x30, nullptr, "GetResourceLimitLimitValue32"}, + {0x31, nullptr, "GetResourceLimitCurrentValue32"}, {0x32, SvcWrap32<SetThreadActivity32>, "SetThreadActivity32"}, {0x33, SvcWrap32<GetThreadContext32>, "GetThreadContext32"}, {0x34, SvcWrap32<WaitForAddress32>, "WaitForAddress32"}, {0x35, SvcWrap32<SignalToAddress32>, "SignalToAddress32"}, {0x36, SvcWrap32<SynchronizePreemptionState>, "SynchronizePreemptionState32"}, - {0x37, nullptr, "Unknown"}, - {0x38, nullptr, "Unknown"}, - {0x39, nullptr, "Unknown"}, - {0x3a, nullptr, "Unknown"}, - {0x3b, nullptr, "Unknown"}, - {0x3c, nullptr, "Unknown"}, - {0x3d, nullptr, "Unknown"}, - {0x3e, nullptr, "Unknown"}, - {0x3f, nullptr, "Unknown"}, + {0x37, nullptr, "GetResourceLimitPeakValue32"}, + {0x38, nullptr, "Unknown38"}, + {0x39, nullptr, "CreateIoPool32"}, + {0x3a, nullptr, "CreateIoRegion32"}, + {0x3b, nullptr, "Unknown3b"}, + {0x3c, nullptr, "KernelDebug32"}, + {0x3d, nullptr, "ChangeKernelTraceState32"}, + {0x3e, nullptr, "Unknown3e"}, + {0x3f, nullptr, "Unknown3f"}, {0x40, nullptr, "CreateSession32"}, {0x41, nullptr, "AcceptSession32"}, - {0x42, nullptr, "Unknown"}, + {0x42, nullptr, "ReplyAndReceiveLight32"}, {0x43, nullptr, "ReplyAndReceive32"}, - {0x44, nullptr, "Unknown"}, + {0x44, nullptr, "ReplyAndReceiveWithUserBuffer32"}, {0x45, SvcWrap32<CreateEvent32>, "CreateEvent32"}, - {0x46, nullptr, "Unknown"}, - {0x47, nullptr, "Unknown"}, - {0x48, nullptr, "Unknown"}, - {0x49, nullptr, "Unknown"}, - {0x4a, nullptr, "Unknown"}, - {0x4b, nullptr, "Unknown"}, - {0x4c, nullptr, "Unknown"}, - {0x4d, nullptr, "Unknown"}, - {0x4e, nullptr, "Unknown"}, - {0x4f, nullptr, "Unknown"}, - {0x50, nullptr, "Unknown"}, - {0x51, nullptr, "Unknown"}, - {0x52, nullptr, "Unknown"}, - {0x53, nullptr, "Unknown"}, - {0x54, nullptr, "Unknown"}, - {0x55, nullptr, "Unknown"}, - {0x56, nullptr, "Unknown"}, - {0x57, nullptr, "Unknown"}, - {0x58, nullptr, "Unknown"}, - {0x59, nullptr, "Unknown"}, - {0x5a, nullptr, "Unknown"}, - {0x5b, nullptr, "Unknown"}, - {0x5c, nullptr, "Unknown"}, - {0x5d, nullptr, "Unknown"}, - {0x5e, nullptr, "Unknown"}, + {0x46, nullptr, "MapIoRegion32"}, + {0x47, nullptr, "UnmapIoRegion32"}, + {0x48, nullptr, "MapPhysicalMemoryUnsafe32"}, + {0x49, nullptr, "UnmapPhysicalMemoryUnsafe32"}, + {0x4a, nullptr, "SetUnsafeLimit32"}, + {0x4b, SvcWrap32<CreateCodeMemory32>, "CreateCodeMemory32"}, + {0x4c, SvcWrap32<ControlCodeMemory32>, "ControlCodeMemory32"}, + {0x4d, nullptr, "SleepSystem32"}, + {0x4e, nullptr, "ReadWriteRegister32"}, + {0x4f, nullptr, "SetProcessActivity32"}, + {0x50, nullptr, "CreateSharedMemory32"}, + {0x51, nullptr, "MapTransferMemory32"}, + {0x52, nullptr, "UnmapTransferMemory32"}, + {0x53, nullptr, "CreateInterruptEvent32"}, + {0x54, nullptr, "QueryPhysicalAddress32"}, + {0x55, nullptr, "QueryIoMapping32"}, + {0x56, nullptr, "CreateDeviceAddressSpace32"}, + {0x57, nullptr, "AttachDeviceAddressSpace32"}, + {0x58, nullptr, "DetachDeviceAddressSpace32"}, + {0x59, nullptr, "MapDeviceAddressSpaceByForce32"}, + {0x5a, nullptr, "MapDeviceAddressSpaceAligned32"}, + {0x5b, nullptr, "MapDeviceAddressSpace32"}, + {0x5c, nullptr, "UnmapDeviceAddressSpace32"}, + {0x5d, nullptr, "InvalidateProcessDataCache32"}, + {0x5e, nullptr, "StoreProcessDataCache32"}, {0x5F, SvcWrap32<FlushProcessDataCache32>, "FlushProcessDataCache32"}, - {0x60, nullptr, "Unknown"}, - {0x61, nullptr, "Unknown"}, - {0x62, nullptr, "Unknown"}, - {0x63, nullptr, "Unknown"}, - {0x64, nullptr, "Unknown"}, + {0x60, nullptr, "StoreProcessDataCache32"}, + {0x61, nullptr, "BreakDebugProcess32"}, + {0x62, nullptr, "TerminateDebugProcess32"}, + {0x63, nullptr, "GetDebugEvent32"}, + {0x64, nullptr, "ContinueDebugEvent32"}, {0x65, nullptr, "GetProcessList32"}, - {0x66, nullptr, "Unknown"}, - {0x67, nullptr, "Unknown"}, - {0x68, nullptr, "Unknown"}, - {0x69, nullptr, "Unknown"}, - {0x6A, nullptr, "Unknown"}, - {0x6B, nullptr, "Unknown"}, - {0x6C, nullptr, "Unknown"}, - {0x6D, nullptr, "Unknown"}, - {0x6E, nullptr, "Unknown"}, + {0x66, nullptr, "GetThreadList"}, + {0x67, nullptr, "GetDebugThreadContext32"}, + {0x68, nullptr, "SetDebugThreadContext32"}, + {0x69, nullptr, "QueryDebugProcessMemory32"}, + {0x6A, nullptr, "ReadDebugProcessMemory32"}, + {0x6B, nullptr, "WriteDebugProcessMemory32"}, + {0x6C, nullptr, "SetHardwareBreakPoint32"}, + {0x6D, nullptr, "GetDebugThreadParam32"}, + {0x6E, nullptr, "Unknown6E"}, {0x6f, nullptr, "GetSystemInfo32"}, {0x70, nullptr, "CreatePort32"}, {0x71, nullptr, "ManageNamedPort32"}, {0x72, nullptr, "ConnectToPort32"}, {0x73, nullptr, "SetProcessMemoryPermission32"}, - {0x74, nullptr, "Unknown"}, - {0x75, nullptr, "Unknown"}, - {0x76, nullptr, "Unknown"}, + {0x74, nullptr, "MapProcessMemory32"}, + {0x75, nullptr, "UnmapProcessMemory32"}, + {0x76, nullptr, "QueryProcessMemory32"}, {0x77, nullptr, "MapProcessCodeMemory32"}, {0x78, nullptr, "UnmapProcessCodeMemory32"}, - {0x79, nullptr, "Unknown"}, - {0x7A, nullptr, "Unknown"}, + {0x79, nullptr, "CreateProcess32"}, + {0x7A, nullptr, "StartProcess32"}, {0x7B, nullptr, "TerminateProcess32"}, {0x7C, nullptr, "GetProcessInfo32"}, {0x7D, nullptr, "CreateResourceLimit32"}, @@ -2754,7 +2768,7 @@ static const FunctionDef SVC_Table_32[] = { }; static const FunctionDef SVC_Table_64[] = { - {0x00, nullptr, "Unknown"}, + {0x00, nullptr, "Unknown0"}, {0x01, SvcWrap64<SetHeapSize>, "SetHeapSize"}, {0x02, SvcWrap64<SetMemoryPermission>, "SetMemoryPermission"}, {0x03, SvcWrap64<SetMemoryAttribute>, "SetMemoryAttribute"}, @@ -2809,23 +2823,23 @@ static const FunctionDef SVC_Table_64[] = { {0x34, SvcWrap64<WaitForAddress>, "WaitForAddress"}, {0x35, SvcWrap64<SignalToAddress>, "SignalToAddress"}, {0x36, SvcWrap64<SynchronizePreemptionState>, "SynchronizePreemptionState"}, - {0x37, nullptr, "Unknown"}, - {0x38, nullptr, "Unknown"}, - {0x39, nullptr, "Unknown"}, - {0x3A, nullptr, "Unknown"}, - {0x3B, nullptr, "Unknown"}, + {0x37, nullptr, "GetResourceLimitPeakValue"}, + {0x38, nullptr, "Unknown38"}, + {0x39, nullptr, "CreateIoPool"}, + {0x3A, nullptr, "CreateIoRegion"}, + {0x3B, nullptr, "Unknown3B"}, {0x3C, SvcWrap64<KernelDebug>, "KernelDebug"}, {0x3D, SvcWrap64<ChangeKernelTraceState>, "ChangeKernelTraceState"}, - {0x3E, nullptr, "Unknown"}, - {0x3F, nullptr, "Unknown"}, + {0x3E, nullptr, "Unknown3e"}, + {0x3F, nullptr, "Unknown3f"}, {0x40, nullptr, "CreateSession"}, {0x41, nullptr, "AcceptSession"}, {0x42, nullptr, "ReplyAndReceiveLight"}, {0x43, nullptr, "ReplyAndReceive"}, {0x44, nullptr, "ReplyAndReceiveWithUserBuffer"}, {0x45, SvcWrap64<CreateEvent>, "CreateEvent"}, - {0x46, nullptr, "Unknown"}, - {0x47, nullptr, "Unknown"}, + {0x46, nullptr, "MapIoRegion"}, + {0x47, nullptr, "UnmapIoRegion"}, {0x48, nullptr, "MapPhysicalMemoryUnsafe"}, {0x49, nullptr, "UnmapPhysicalMemoryUnsafe"}, {0x4A, nullptr, "SetUnsafeLimit"}, @@ -2864,7 +2878,7 @@ static const FunctionDef SVC_Table_64[] = { {0x6B, nullptr, "WriteDebugProcessMemory"}, {0x6C, nullptr, "SetHardwareBreakPoint"}, {0x6D, nullptr, "GetDebugThreadParam"}, - {0x6E, nullptr, "Unknown"}, + {0x6E, nullptr, "Unknown6E"}, {0x6F, nullptr, "GetSystemInfo"}, {0x70, nullptr, "CreatePort"}, {0x71, nullptr, "ManageNamedPort"}, diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index a60adfcab..d309f166c 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -669,4 +669,26 @@ void SvcWrap32(Core::System& system) { FuncReturn(system, retval); } +// Used by CreateCodeMemory32 +template <ResultCode func(Core::System&, Handle*, u32, u32)> +void SvcWrap32(Core::System& system) { + Handle handle = 0; + + const u32 retval = func(system, &handle, Param32(system, 1), Param32(system, 2)).raw; + + system.CurrentArmInterface().SetReg(1, handle); + FuncReturn(system, retval); +} + +// Used by ControlCodeMemory32 +template <ResultCode func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)> +void SvcWrap32(Core::System& system) { + const u32 retval = + func(system, Param32(system, 0), Param32(system, 1), Param(system, 2), Param(system, 4), + static_cast<Svc::MemoryPermission>(Param32(system, 6))) + .raw; + + FuncReturn(system, retval); +} + } // namespace Kernel |
