aboutsummaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp (renamed from src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp)12
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h4
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp36
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.h4
-rw-r--r--src/core/hle/kernel/code_set.h6
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp33
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h29
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp521
-rw-r--r--src/core/hle/kernel/hle_ipc.h412
-rw-r--r--src/core/hle/kernel/init/init_slab_setup.cpp33
-rw-r--r--src/core/hle/kernel/initial_process.h2
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp205
-rw-r--r--src/core/hle/kernel/k_address_arbiter.h45
-rw-r--r--src/core/hle/kernel/k_address_space_info.cpp84
-rw-r--r--src/core/hle/kernel/k_address_space_info.h2
-rw-r--r--src/core/hle/kernel/k_affinity_mask.h20
-rw-r--r--src/core/hle/kernel/k_auto_object.cpp4
-rw-r--r--src/core/hle/kernel/k_auto_object.h41
-rw-r--r--src/core/hle/kernel/k_capabilities.cpp358
-rw-r--r--src/core/hle/kernel/k_capabilities.h295
-rw-r--r--src/core/hle/kernel/k_client_port.cpp85
-rw-r--r--src/core/hle/kernel/k_client_port.h24
-rw-r--r--src/core/hle/kernel/k_client_session.cpp15
-rw-r--r--src/core/hle/kernel/k_client_session.h13
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp41
-rw-r--r--src/core/hle/kernel/k_code_memory.h20
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp177
-rw-r--r--src/core/hle/kernel/k_condition_variable.h26
-rw-r--r--src/core/hle/kernel/k_debug.h4
-rw-r--r--src/core/hle/kernel/k_device_address_space.cpp150
-rw-r--r--src/core/hle/kernel/k_device_address_space.h61
-rw-r--r--src/core/hle/kernel/k_dynamic_page_manager.h19
-rw-r--r--src/core/hle/kernel/k_dynamic_slab_heap.h6
-rw-r--r--src/core/hle/kernel/k_event.cpp8
-rw-r--r--src/core/hle/kernel/k_event.h2
-rw-r--r--src/core/hle/kernel/k_event_info.h5
-rw-r--r--src/core/hle/kernel/k_handle_table.h3
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.cpp2
-rw-r--r--src/core/hle/kernel/k_light_condition_variable.cpp18
-rw-r--r--src/core/hle/kernel/k_light_condition_variable.h6
-rw-r--r--src/core/hle/kernel/k_light_lock.cpp37
-rw-r--r--src/core/hle/kernel/k_light_lock.h8
-rw-r--r--src/core/hle/kernel/k_linked_list.h238
-rw-r--r--src/core/hle/kernel/k_memory_block.h52
-rw-r--r--src/core/hle/kernel/k_memory_block_manager.cpp68
-rw-r--r--src/core/hle/kernel/k_memory_block_manager.h40
-rw-r--r--src/core/hle/kernel/k_memory_layout.cpp28
-rw-r--r--src/core/hle/kernel/k_memory_layout.h112
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp51
-rw-r--r--src/core/hle/kernel/k_memory_manager.h48
-rw-r--r--src/core/hle/kernel/k_memory_region.h78
-rw-r--r--src/core/hle/kernel/k_object_name.cpp102
-rw-r--r--src/core/hle/kernel/k_object_name.h88
-rw-r--r--src/core/hle/kernel/k_page_buffer.cpp4
-rw-r--r--src/core/hle/kernel/k_page_buffer.h4
-rw-r--r--src/core/hle/kernel/k_page_group.h2
-rw-r--r--src/core/hle/kernel/k_page_heap.cpp40
-rw-r--r--src/core/hle/kernel/k_page_heap.h48
-rw-r--r--src/core/hle/kernel/k_page_table.cpp944
-rw-r--r--src/core/hle/kernel/k_page_table.h331
-rw-r--r--src/core/hle/kernel/k_page_table_manager.h14
-rw-r--r--src/core/hle/kernel/k_page_table_slab_heap.h15
-rw-r--r--src/core/hle/kernel/k_port.cpp48
-rw-r--r--src/core/hle/kernel/k_port.h28
-rw-r--r--src/core/hle/kernel/k_priority_queue.h168
-rw-r--r--src/core/hle/kernel/k_process.cpp419
-rw-r--r--src/core/hle/kernel/k_process.h242
-rw-r--r--src/core/hle/kernel/k_readable_event.cpp20
-rw-r--r--src/core/hle/kernel/k_readable_event.h2
-rw-r--r--src/core/hle/kernel/k_resource_limit.cpp105
-rw-r--r--src/core/hle/kernel/k_resource_limit.h22
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp93
-rw-r--r--src/core/hle/kernel/k_scheduler.h14
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h64
-rw-r--r--src/core/hle/kernel/k_scoped_lock.h24
-rw-r--r--src/core/hle/kernel/k_scoped_resource_reservation.h36
-rw-r--r--src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h29
-rw-r--r--src/core/hle/kernel/k_server_port.cpp35
-rw-r--r--src/core/hle/kernel/k_server_port.h16
-rw-r--r--src/core/hle/kernel/k_server_session.cpp86
-rw-r--r--src/core/hle/kernel/k_server_session.h31
-rw-r--r--src/core/hle/kernel/k_session.cpp57
-rw-r--r--src/core/hle/kernel/k_session.h41
-rw-r--r--src/core/hle/kernel/k_session_request.cpp31
-rw-r--r--src/core/hle/kernel/k_session_request.h75
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp74
-rw-r--r--src/core/hle/kernel/k_shared_memory.h36
-rw-r--r--src/core/hle/kernel/k_shared_memory_info.h21
-rw-r--r--src/core/hle/kernel/k_slab_heap.h3
-rw-r--r--src/core/hle/kernel/k_spin_lock.cpp6
-rw-r--r--src/core/hle/kernel/k_spin_lock.h14
-rw-r--r--src/core/hle/kernel/k_synchronization_object.cpp40
-rw-r--r--src/core/hle/kernel/k_synchronization_object.h27
-rw-r--r--src/core/hle/kernel/k_system_resource.cpp9
-rw-r--r--src/core/hle/kernel/k_system_resource.h10
-rw-r--r--src/core/hle/kernel/k_thread.cpp953
-rw-r--r--src/core/hle/kernel/k_thread.h720
-rw-r--r--src/core/hle/kernel/k_thread_local_page.cpp8
-rw-r--r--src/core/hle/kernel/k_thread_local_page.h31
-rw-r--r--src/core/hle/kernel/k_thread_queue.cpp20
-rw-r--r--src/core/hle/kernel/k_thread_queue.h14
-rw-r--r--src/core/hle/kernel/k_transfer_memory.cpp27
-rw-r--r--src/core/hle/kernel/k_transfer_memory.h26
-rw-r--r--src/core/hle/kernel/k_typed_address.h12
-rw-r--r--src/core/hle/kernel/k_worker_task.h2
-rw-r--r--src/core/hle/kernel/k_worker_task_manager.cpp2
-rw-r--r--src/core/hle/kernel/k_worker_task_manager.h2
-rw-r--r--src/core/hle/kernel/kernel.cpp593
-rw-r--r--src/core/hle/kernel/kernel.h170
-rw-r--r--src/core/hle/kernel/memory_types.h4
-rw-r--r--src/core/hle/kernel/physical_core.cpp41
-rw-r--r--src/core/hle/kernel/physical_core.h37
-rw-r--r--src/core/hle/kernel/service_thread.cpp206
-rw-r--r--src/core/hle/kernel/service_thread.h29
-rw-r--r--src/core/hle/kernel/slab_helpers.h22
-rw-r--r--src/core/hle/kernel/svc.cpp6406
-rw-r--r--src/core/hle/kernel/svc.h528
-rw-r--r--src/core/hle/kernel/svc/svc_activity.cpp66
-rw-r--r--src/core/hle/kernel/svc/svc_address_arbiter.cpp105
-rw-r--r--src/core/hle/kernel/svc/svc_address_translation.cpp50
-rw-r--r--src/core/hle/kernel/svc/svc_cache.cpp98
-rw-r--r--src/core/hle/kernel/svc/svc_code_memory.cpp171
-rw-r--r--src/core/hle/kernel/svc/svc_condition_variable.cpp72
-rw-r--r--src/core/hle/kernel/svc/svc_debug.cpp194
-rw-r--r--src/core/hle/kernel/svc/svc_debug_string.cpp30
-rw-r--r--src/core/hle/kernel/svc/svc_device_address_space.cpp258
-rw-r--r--src/core/hle/kernel/svc/svc_event.cpp120
-rw-r--r--src/core/hle/kernel/svc/svc_exception.cpp137
-rw-r--r--src/core/hle/kernel/svc/svc_info.cpp277
-rw-r--r--src/core/hle/kernel/svc/svc_insecure_memory.cpp35
-rw-r--r--src/core/hle/kernel/svc/svc_interrupt_event.cpp25
-rw-r--r--src/core/hle/kernel/svc/svc_io_pool.cpp71
-rw-r--r--src/core/hle/kernel/svc/svc_ipc.cpp173
-rw-r--r--src/core/hle/kernel/svc/svc_kernel_debug.cpp35
-rw-r--r--src/core/hle/kernel/svc/svc_light_ipc.cpp73
-rw-r--r--src/core/hle/kernel/svc/svc_lock.cpp51
-rw-r--r--src/core/hle/kernel/svc/svc_memory.cpp216
-rw-r--r--src/core/hle/kernel/svc/svc_physical_memory.cpp183
-rw-r--r--src/core/hle/kernel/svc/svc_port.cpp159
-rw-r--r--src/core/hle/kernel/svc/svc_power_management.cpp21
-rw-r--r--src/core/hle/kernel/svc/svc_process.cpp194
-rw-r--r--src/core/hle/kernel/svc/svc_process_memory.cpp320
-rw-r--r--src/core/hle/kernel/svc/svc_processor.cpp25
-rw-r--r--src/core/hle/kernel/svc/svc_query_memory.cpp65
-rw-r--r--src/core/hle/kernel/svc/svc_register.cpp27
-rw-r--r--src/core/hle/kernel/svc/svc_resource_limit.cpp145
-rw-r--r--src/core/hle/kernel/svc/svc_secure_monitor_call.cpp53
-rw-r--r--src/core/hle/kernel/svc/svc_session.cpp127
-rw-r--r--src/core/hle/kernel/svc/svc_shared_memory.cpp130
-rw-r--r--src/core/hle/kernel/svc/svc_synchronization.cpp175
-rw-r--r--src/core/hle/kernel/svc/svc_thread.cpp412
-rw-r--r--src/core/hle/kernel/svc/svc_thread_profiler.cpp60
-rw-r--r--src/core/hle/kernel/svc/svc_tick.cpp27
-rw-r--r--src/core/hle/kernel/svc/svc_transfer_memory.cpp115
-rw-r--r--src/core/hle/kernel/svc_generator.py716
-rw-r--r--src/core/hle/kernel/svc_results.h1
-rw-r--r--src/core/hle/kernel/svc_types.h35
-rw-r--r--src/core/hle/kernel/svc_version.h58
-rw-r--r--src/core/hle/kernel/svc_wrap.h733
159 files changed, 14842 insertions, 8158 deletions
diff --git a/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp
index 098ba6eac..24eb3f886 100644
--- a/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp
@@ -76,22 +76,24 @@ void SetupDevicePhysicalMemoryRegions(KMemoryLayout& memory_layout) {
void SetupDramPhysicalMemoryRegions(KMemoryLayout& memory_layout) {
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
- const PAddr physical_memory_base_address =
+ const KPhysicalAddress physical_memory_base_address =
KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress);
// Insert blocks into the tree.
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
- physical_memory_base_address, intended_memory_size, KMemoryRegionType_Dram));
+ GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram));
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
- physical_memory_base_address, ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly));
+ GetInteger(physical_memory_base_address), ReservedEarlyDramSize,
+ KMemoryRegionType_DramReservedEarly));
// Insert the KTrace block at the end of Dram, if KTrace is enabled.
static_assert(!IsKTraceEnabled || KTraceBufferSize > 0);
if constexpr (IsKTraceEnabled) {
- const PAddr ktrace_buffer_phys_addr =
+ const KPhysicalAddress ktrace_buffer_phys_addr =
physical_memory_base_address + intended_memory_size - KTraceBufferSize;
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
- ktrace_buffer_phys_addr, KTraceBufferSize, KMemoryRegionType_KernelTraceBuffer));
+ GetInteger(ktrace_buffer_phys_addr), KTraceBufferSize,
+ KMemoryRegionType_KernelTraceBuffer));
}
}
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h
index d02ee61c3..f8fee4f5b 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h
+++ b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h
@@ -3,10 +3,10 @@
#pragma once
-#include "common/common_types.h"
+#include "core/hle/kernel/k_typed_address.h"
namespace Kernel {
-constexpr inline PAddr MainMemoryAddress = 0x80000000;
+constexpr inline KPhysicalAddress MainMemoryAddress = 0x80000000;
} // namespace Kernel
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
index c10b7bf30..49bdc671e 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
@@ -14,9 +14,12 @@ namespace Kernel::Board::Nintendo::Nx {
namespace impl {
-constexpr const std::size_t RequiredNonSecureSystemMemorySizeVi = 0x2238 * 4 * 1024;
-constexpr const std::size_t RequiredNonSecureSystemMemorySizeNvservices = 0x710 * 4 * 1024;
-constexpr const std::size_t RequiredNonSecureSystemMemorySizeMisc = 0x80 * 4 * 1024;
+using namespace Common::Literals;
+
+constexpr const std::size_t RequiredNonSecureSystemMemorySizeVi = 0x2280 * 4_KiB;
+constexpr const std::size_t RequiredNonSecureSystemMemorySizeViFatal = 0x200 * 4_KiB;
+constexpr const std::size_t RequiredNonSecureSystemMemorySizeNvservices = 0x704 * 4_KiB;
+constexpr const std::size_t RequiredNonSecureSystemMemorySizeMisc = 0x80 * 4_KiB;
} // namespace impl
@@ -24,17 +27,21 @@ constexpr const std::size_t RequiredNonSecureSystemMemorySize =
impl::RequiredNonSecureSystemMemorySizeVi + impl::RequiredNonSecureSystemMemorySizeNvservices +
impl::RequiredNonSecureSystemMemorySizeMisc;
+constexpr const std::size_t RequiredNonSecureSystemMemorySizeWithFatal =
+ RequiredNonSecureSystemMemorySize + impl::RequiredNonSecureSystemMemorySizeViFatal;
+
namespace {
using namespace Common::Literals;
u32 GetMemorySizeForInit() {
- return Settings::values.use_extended_memory_layout ? Smc::MemorySize_6GB : Smc::MemorySize_4GB;
+ return Settings::values.use_unsafe_extended_memory_layout ? Smc::MemorySize_8GB
+ : Smc::MemorySize_4GB;
}
Smc::MemoryArrangement GetMemoryArrangeForInit() {
- return Settings::values.use_extended_memory_layout ? Smc::MemoryArrangement_6GB
- : Smc::MemoryArrangement_4GB;
+ return Settings::values.use_unsafe_extended_memory_layout ? Smc::MemoryArrangement_8GB
+ : Smc::MemoryArrangement_4GB;
}
} // namespace
@@ -55,7 +62,7 @@ size_t KSystemControl::Init::GetIntendedMemorySize() {
}
}
-PAddr KSystemControl::Init::GetKernelPhysicalBaseAddress(u64 base_address) {
+KPhysicalAddress KSystemControl::Init::GetKernelPhysicalBaseAddress(KPhysicalAddress base_address) {
const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize();
const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
if (intended_dram_size * 2 < real_dram_size) {
@@ -85,7 +92,8 @@ std::size_t KSystemControl::Init::GetApplicationPoolSize() {
case Smc::MemoryArrangement_6GBForAppletDev:
return 3285_MiB;
case Smc::MemoryArrangement_8GB:
- return 4916_MiB;
+ // Real kernel sets this to 4916_MiB. We are not debugging applets.
+ return 6547_MiB;
}
}();
@@ -109,7 +117,8 @@ size_t KSystemControl::Init::GetAppletPoolSize() {
case Smc::MemoryArrangement_6GBForAppletDev:
return 2193_MiB;
case Smc::MemoryArrangement_8GB:
- return 2193_MiB;
+ //! Real kernel sets this to 2193_MiB. We are not debugging applets.
+ return 562_MiB;
}
}();
@@ -120,10 +129,13 @@ size_t KSystemControl::Init::GetAppletPoolSize() {
size_t KSystemControl::Init::GetMinimumNonSecureSystemPoolSize() {
// Verify that our minimum is at least as large as Nintendo's.
- constexpr size_t MinimumSize = RequiredNonSecureSystemMemorySize;
- static_assert(MinimumSize >= 0x29C8000);
+ constexpr size_t MinimumSizeWithFatal = RequiredNonSecureSystemMemorySizeWithFatal;
+ static_assert(MinimumSizeWithFatal >= 0x2C04000);
+
+ constexpr size_t MinimumSizeWithoutFatal = RequiredNonSecureSystemMemorySize;
+ static_assert(MinimumSizeWithoutFatal >= 0x2A00000);
- return MinimumSize;
+ return MinimumSizeWithFatal;
}
namespace {
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
index 4b717d091..b477e8193 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
@@ -3,7 +3,7 @@
#pragma once
-#include "common/common_types.h"
+#include "core/hle/kernel/k_typed_address.h"
namespace Kernel::Board::Nintendo::Nx {
@@ -18,7 +18,7 @@ public:
// Initialization.
static std::size_t GetRealMemorySize();
static std::size_t GetIntendedMemorySize();
- static PAddr GetKernelPhysicalBaseAddress(u64 base_address);
+ static KPhysicalAddress GetKernelPhysicalBaseAddress(KPhysicalAddress base_address);
static bool ShouldIncreaseThreadResourceLimit();
static std::size_t GetApplicationPoolSize();
static std::size_t GetAppletPoolSize();
diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h
index 5220dbcb6..af1af2b78 100644
--- a/src/core/hle/kernel/code_set.h
+++ b/src/core/hle/kernel/code_set.h
@@ -5,7 +5,7 @@
#include <cstddef>
-#include "common/common_types.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/physical_memory.h"
namespace Kernel {
@@ -36,7 +36,7 @@ struct CodeSet final {
std::size_t offset = 0;
/// The address to map this segment to.
- VAddr addr = 0;
+ KProcessAddress addr = 0;
/// The size of this segment in bytes.
u32 size = 0;
@@ -82,7 +82,7 @@ struct CodeSet final {
std::array<Segment, 3> segments;
/// The entry point address for this code set.
- VAddr entrypoint = 0;
+ KProcessAddress entrypoint = 0;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
index fd911a3a5..7b090ccb5 100644
--- a/src/core/hle/kernel/global_scheduler_context.cpp
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -12,20 +12,19 @@
namespace Kernel {
-GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel_)
- : kernel{kernel_}, scheduler_lock{kernel_} {}
+GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
+ : m_kernel{kernel}, m_scheduler_lock{kernel} {}
GlobalSchedulerContext::~GlobalSchedulerContext() = default;
void GlobalSchedulerContext::AddThread(KThread* thread) {
- std::scoped_lock lock{global_list_guard};
- thread_list.push_back(thread);
+ std::scoped_lock lock{m_global_list_guard};
+ m_thread_list.push_back(thread);
}
void GlobalSchedulerContext::RemoveThread(KThread* thread) {
- std::scoped_lock lock{global_list_guard};
- thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
- thread_list.end());
+ std::scoped_lock lock{m_global_list_guard};
+ std::erase(m_thread_list, thread);
}
void GlobalSchedulerContext::PreemptThreads() {
@@ -38,37 +37,37 @@ void GlobalSchedulerContext::PreemptThreads() {
63,
};
- ASSERT(IsLocked());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
const u32 priority = preemption_priorities[core_id];
- KScheduler::RotateScheduledQueue(kernel, core_id, priority);
+ KScheduler::RotateScheduledQueue(m_kernel, core_id, priority);
}
}
bool GlobalSchedulerContext::IsLocked() const {
- return scheduler_lock.IsLockedByCurrentThread();
+ return m_scheduler_lock.IsLockedByCurrentThread();
}
void GlobalSchedulerContext::RegisterDummyThreadForWakeup(KThread* thread) {
- ASSERT(IsLocked());
+ ASSERT(this->IsLocked());
- woken_dummy_threads.insert(thread);
+ m_woken_dummy_threads.insert(thread);
}
void GlobalSchedulerContext::UnregisterDummyThreadForWakeup(KThread* thread) {
- ASSERT(IsLocked());
+ ASSERT(this->IsLocked());
- woken_dummy_threads.erase(thread);
+ m_woken_dummy_threads.erase(thread);
}
void GlobalSchedulerContext::WakeupWaitingDummyThreads() {
- ASSERT(IsLocked());
+ ASSERT(this->IsLocked());
- for (auto* thread : woken_dummy_threads) {
+ for (auto* thread : m_woken_dummy_threads) {
thread->DummyThreadEndWait();
}
- woken_dummy_threads.clear();
+ m_woken_dummy_threads.clear();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
index 220ed6192..c48e8cd12 100644
--- a/src/core/hle/kernel/global_scheduler_context.h
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -33,7 +33,7 @@ class GlobalSchedulerContext final {
public:
using LockType = KAbstractSchedulerLock<KScheduler>;
- explicit GlobalSchedulerContext(KernelCore& kernel_);
+ explicit GlobalSchedulerContext(KernelCore& kernel);
~GlobalSchedulerContext();
/// Adds a new thread to the scheduler
@@ -43,8 +43,9 @@ public:
void RemoveThread(KThread* thread);
/// Returns a list of all threads managed by the scheduler
- [[nodiscard]] const std::vector<KThread*>& GetThreadList() const {
- return thread_list;
+ /// This is only safe to iterate while holding the scheduler lock
+ const std::vector<KThread*>& GetThreadList() const {
+ return m_thread_list;
}
/**
@@ -63,30 +64,26 @@ public:
void RegisterDummyThreadForWakeup(KThread* thread);
void WakeupWaitingDummyThreads();
- [[nodiscard]] LockType& SchedulerLock() {
- return scheduler_lock;
- }
-
- [[nodiscard]] const LockType& SchedulerLock() const {
- return scheduler_lock;
+ LockType& SchedulerLock() {
+ return m_scheduler_lock;
}
private:
friend class KScopedSchedulerLock;
friend class KScopedSchedulerLockAndSleep;
- KernelCore& kernel;
+ KernelCore& m_kernel;
- std::atomic_bool scheduler_update_needed{};
- KSchedulerPriorityQueue priority_queue;
- LockType scheduler_lock;
+ std::atomic_bool m_scheduler_update_needed{};
+ KSchedulerPriorityQueue m_priority_queue;
+ LockType m_scheduler_lock;
/// Lists dummy threads pending wakeup on lock release
- std::set<KThread*> woken_dummy_threads;
+ std::set<KThread*> m_woken_dummy_threads;
/// Lists all thread ids that aren't deleted/etc.
- std::vector<KThread*> thread_list;
- std::mutex global_list_guard;
+ std::vector<KThread*> m_thread_list;
+ std::mutex m_global_list_guard;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
deleted file mode 100644
index 738b6d0f1..000000000
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ /dev/null
@@ -1,521 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <algorithm>
-#include <array>
-#include <sstream>
-
-#include <boost/range/algorithm_ext/erase.hpp>
-
-#include "common/assert.h"
-#include "common/common_funcs.h"
-#include "common/common_types.h"
-#include "common/logging/log.h"
-#include "core/hle/ipc_helpers.h"
-#include "core/hle/kernel/hle_ipc.h"
-#include "core/hle/kernel/k_auto_object.h"
-#include "core/hle/kernel/k_handle_table.h"
-#include "core/hle/kernel/k_process.h"
-#include "core/hle/kernel/k_server_port.h"
-#include "core/hle/kernel/k_server_session.h"
-#include "core/hle/kernel/k_thread.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/service_thread.h"
-#include "core/memory.h"
-
-namespace Kernel {
-
-SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_,
- ServiceThreadType thread_type)
- : kernel{kernel_}, service_thread{thread_type == ServiceThreadType::CreateNew
- ? kernel.CreateServiceThread(service_name_)
- : kernel.GetDefaultServiceThread()} {}
-
-SessionRequestHandler::~SessionRequestHandler() {
- kernel.ReleaseServiceThread(service_thread);
-}
-
-void SessionRequestHandler::AcceptSession(KServerPort* server_port) {
- auto* server_session = server_port->AcceptSession();
- ASSERT(server_session != nullptr);
-
- RegisterSession(server_session, std::make_shared<SessionRequestManager>(kernel));
-}
-
-void SessionRequestHandler::RegisterSession(KServerSession* server_session,
- std::shared_ptr<SessionRequestManager> manager) {
- manager->SetSessionHandler(shared_from_this());
- service_thread.RegisterServerSession(server_session, manager);
- server_session->Close();
-}
-
-SessionRequestManager::SessionRequestManager(KernelCore& kernel_) : kernel{kernel_} {}
-
-SessionRequestManager::~SessionRequestManager() = default;
-
-bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& context) const {
- if (IsDomain() && context.HasDomainMessageHeader()) {
- const auto& message_header = context.GetDomainMessageHeader();
- const auto object_id = message_header.object_id;
-
- if (object_id > DomainHandlerCount()) {
- LOG_CRITICAL(IPC, "object_id {} is too big!", object_id);
- return false;
- }
- return !DomainHandler(object_id - 1).expired();
- } else {
- return session_handler != nullptr;
- }
-}
-
-Result SessionRequestManager::CompleteSyncRequest(KServerSession* server_session,
- HLERequestContext& context) {
- Result result = ResultSuccess;
-
- // If the session has been converted to a domain, handle the domain request
- if (this->HasSessionRequestHandler(context)) {
- if (IsDomain() && context.HasDomainMessageHeader()) {
- result = HandleDomainSyncRequest(server_session, context);
- // If there is no domain header, the regular session handler is used
- } else if (this->HasSessionHandler()) {
- // If this manager has an associated HLE handler, forward the request to it.
- result = this->SessionHandler().HandleSyncRequest(*server_session, context);
- }
- } else {
- ASSERT_MSG(false, "Session handler is invalid, stubbing response!");
- IPC::ResponseBuilder rb(context, 2);
- rb.Push(ResultSuccess);
- }
-
- if (convert_to_domain) {
- ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance.");
- this->ConvertToDomain();
- convert_to_domain = false;
- }
-
- return result;
-}
-
-Result SessionRequestManager::HandleDomainSyncRequest(KServerSession* server_session,
- HLERequestContext& context) {
- if (!context.HasDomainMessageHeader()) {
- return ResultSuccess;
- }
-
- // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
- ASSERT(context.GetManager().get() == this);
-
- // If there is a DomainMessageHeader, then this is CommandType "Request"
- const auto& domain_message_header = context.GetDomainMessageHeader();
- const u32 object_id{domain_message_header.object_id};
- switch (domain_message_header.command) {
- case IPC::DomainMessageHeader::CommandType::SendMessage:
- if (object_id > this->DomainHandlerCount()) {
- LOG_CRITICAL(IPC,
- "object_id {} is too big! This probably means a recent service call "
- "needed to return a new interface!",
- object_id);
- ASSERT(false);
- return ResultSuccess; // Ignore error if asserts are off
- }
- if (auto strong_ptr = this->DomainHandler(object_id - 1).lock()) {
- return strong_ptr->HandleSyncRequest(*server_session, context);
- } else {
- ASSERT(false);
- return ResultSuccess;
- }
-
- case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
- LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
-
- this->CloseDomainHandler(object_id - 1);
-
- IPC::ResponseBuilder rb{context, 2};
- rb.Push(ResultSuccess);
- return ResultSuccess;
- }
- }
-
- LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value());
- ASSERT(false);
- return ResultSuccess;
-}
-
-HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_,
- KServerSession* server_session_, KThread* thread_)
- : server_session(server_session_), thread(thread_), kernel{kernel_}, memory{memory_} {
- cmd_buf[0] = 0;
-}
-
-HLERequestContext::~HLERequestContext() = default;
-
-void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf,
- bool incoming) {
- IPC::RequestParser rp(src_cmdbuf);
- command_header = rp.PopRaw<IPC::CommandHeader>();
-
- if (command_header->IsCloseCommand()) {
- // Close does not populate the rest of the IPC header
- return;
- }
-
- // If handle descriptor is present, add size of it
- if (command_header->enable_handle_descriptor) {
- handle_descriptor_header = rp.PopRaw<IPC::HandleDescriptorHeader>();
- if (handle_descriptor_header->send_current_pid) {
- pid = rp.Pop<u64>();
- }
- if (incoming) {
- // Populate the object lists with the data in the IPC request.
- incoming_copy_handles.reserve(handle_descriptor_header->num_handles_to_copy);
- incoming_move_handles.reserve(handle_descriptor_header->num_handles_to_move);
-
- for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_copy; ++handle) {
- incoming_copy_handles.push_back(rp.Pop<Handle>());
- }
- for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_move; ++handle) {
- incoming_move_handles.push_back(rp.Pop<Handle>());
- }
- } else {
- // For responses we just ignore the handles, they're empty and will be populated when
- // translating the response.
- rp.Skip(handle_descriptor_header->num_handles_to_copy, false);
- rp.Skip(handle_descriptor_header->num_handles_to_move, false);
- }
- }
-
- buffer_x_desciptors.reserve(command_header->num_buf_x_descriptors);
- buffer_a_desciptors.reserve(command_header->num_buf_a_descriptors);
- buffer_b_desciptors.reserve(command_header->num_buf_b_descriptors);
- buffer_w_desciptors.reserve(command_header->num_buf_w_descriptors);
-
- for (u32 i = 0; i < command_header->num_buf_x_descriptors; ++i) {
- buffer_x_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorX>());
- }
- for (u32 i = 0; i < command_header->num_buf_a_descriptors; ++i) {
- buffer_a_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorABW>());
- }
- for (u32 i = 0; i < command_header->num_buf_b_descriptors; ++i) {
- buffer_b_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorABW>());
- }
- for (u32 i = 0; i < command_header->num_buf_w_descriptors; ++i) {
- buffer_w_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorABW>());
- }
-
- const auto buffer_c_offset = rp.GetCurrentOffset() + command_header->data_size;
-
- if (!command_header->IsTipc()) {
- // Padding to align to 16 bytes
- rp.AlignWithPadding();
-
- if (GetManager()->IsDomain() &&
- ((command_header->type == IPC::CommandType::Request ||
- command_header->type == IPC::CommandType::RequestWithContext) ||
- !incoming)) {
- // If this is an incoming message, only CommandType "Request" has a domain header
- // All outgoing domain messages have the domain header, if only incoming has it
- if (incoming || domain_message_header) {
- domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>();
- } else {
- if (GetManager()->IsDomain()) {
- LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!");
- }
- }
- }
-
- data_payload_header = rp.PopRaw<IPC::DataPayloadHeader>();
-
- data_payload_offset = rp.GetCurrentOffset();
-
- if (domain_message_header &&
- domain_message_header->command ==
- IPC::DomainMessageHeader::CommandType::CloseVirtualHandle) {
- // CloseVirtualHandle command does not have SFC* or any data
- return;
- }
-
- if (incoming) {
- ASSERT(data_payload_header->magic == Common::MakeMagic('S', 'F', 'C', 'I'));
- } else {
- ASSERT(data_payload_header->magic == Common::MakeMagic('S', 'F', 'C', 'O'));
- }
- }
-
- rp.SetCurrentOffset(buffer_c_offset);
-
- // For Inline buffers, the response data is written directly to buffer_c_offset
- // and in this case we don't have any BufferDescriptorC on the request.
- if (command_header->buf_c_descriptor_flags >
- IPC::CommandHeader::BufferDescriptorCFlag::InlineDescriptor) {
- if (command_header->buf_c_descriptor_flags ==
- IPC::CommandHeader::BufferDescriptorCFlag::OneDescriptor) {
- buffer_c_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorC>());
- } else {
- u32 num_buf_c_descriptors =
- static_cast<u32>(command_header->buf_c_descriptor_flags.Value()) - 2;
-
- // This is used to detect possible underflows, in case something is broken
- // with the two ifs above and the flags value is == 0 || == 1.
- ASSERT(num_buf_c_descriptors < 14);
-
- for (u32 i = 0; i < num_buf_c_descriptors; ++i) {
- buffer_c_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorC>());
- }
- }
- }
-
- rp.SetCurrentOffset(data_payload_offset);
-
- command = rp.Pop<u32_le>();
- rp.Skip(1, false); // The command is actually an u64, but we don't use the high part.
-}
-
-Result HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
- u32_le* src_cmdbuf) {
- ParseCommandBuffer(handle_table, src_cmdbuf, true);
-
- if (command_header->IsCloseCommand()) {
- // Close does not populate the rest of the IPC header
- return ResultSuccess;
- }
-
- std::copy_n(src_cmdbuf, IPC::COMMAND_BUFFER_LENGTH, cmd_buf.begin());
-
- return ResultSuccess;
-}
-
-Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_thread) {
- auto current_offset = handles_offset;
- auto& owner_process = *requesting_thread.GetOwnerProcess();
- auto& handle_table = owner_process.GetHandleTable();
-
- for (auto& object : outgoing_copy_objects) {
- Handle handle{};
- if (object) {
- R_TRY(handle_table.Add(&handle, object));
- }
- cmd_buf[current_offset++] = handle;
- }
- for (auto& object : outgoing_move_objects) {
- Handle handle{};
- if (object) {
- R_TRY(handle_table.Add(&handle, object));
-
- // Close our reference to the object, as it is being moved to the caller.
- object->Close();
- }
- cmd_buf[current_offset++] = handle;
- }
-
- // Write the domain objects to the command buffer, these go after the raw untranslated data.
- // TODO(Subv): This completely ignores C buffers.
-
- if (GetManager()->IsDomain()) {
- current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size());
- for (auto& object : outgoing_domain_objects) {
- GetManager()->AppendDomainHandler(std::move(object));
- cmd_buf[current_offset++] = static_cast<u32_le>(GetManager()->DomainHandlerCount());
- }
- }
-
- // Copy the translated command buffer back into the thread's command buffer area.
- memory.WriteBlock(owner_process, requesting_thread.GetTLSAddress(), cmd_buf.data(),
- write_size * sizeof(u32));
-
- return ResultSuccess;
-}
-
-std::vector<u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
- const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
- BufferDescriptorA()[buffer_index].Size()};
- if (is_buffer_a) {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorA().size() > buffer_index, { return {}; },
- "BufferDescriptorA invalid buffer_index {}", buffer_index);
- std::vector<u8> buffer(BufferDescriptorA()[buffer_index].Size());
- memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(), buffer.size());
- return buffer;
- } else {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorX().size() > buffer_index, { return {}; },
- "BufferDescriptorX invalid buffer_index {}", buffer_index);
- std::vector<u8> buffer(BufferDescriptorX()[buffer_index].Size());
- memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(), buffer.size());
- return buffer;
- }
-}
-
-std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
- std::size_t buffer_index) const {
- if (size == 0) {
- LOG_WARNING(Core, "skip empty buffer write");
- return 0;
- }
-
- const bool is_buffer_b{BufferDescriptorB().size() > buffer_index &&
- BufferDescriptorB()[buffer_index].Size()};
- const std::size_t buffer_size{GetWriteBufferSize(buffer_index)};
- if (size > buffer_size) {
- LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
- buffer_size);
- size = buffer_size; // TODO(bunnei): This needs to be HW tested
- }
-
- if (is_buffer_b) {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorB().size() > buffer_index &&
- BufferDescriptorB()[buffer_index].Size() >= size,
- { return 0; }, "BufferDescriptorB is invalid, index={}, size={}", buffer_index, size);
- WriteBufferB(buffer, size, buffer_index);
- } else {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorC().size() > buffer_index &&
- BufferDescriptorC()[buffer_index].Size() >= size,
- { return 0; }, "BufferDescriptorC is invalid, index={}, size={}", buffer_index, size);
- WriteBufferC(buffer, size, buffer_index);
- }
-
- return size;
-}
-
-std::size_t HLERequestContext::WriteBufferB(const void* buffer, std::size_t size,
- std::size_t buffer_index) const {
- if (buffer_index >= BufferDescriptorB().size() || size == 0) {
- return 0;
- }
-
- const auto buffer_size{BufferDescriptorB()[buffer_index].Size()};
- if (size > buffer_size) {
- LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
- buffer_size);
- size = buffer_size; // TODO(bunnei): This needs to be HW tested
- }
-
- memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size);
- return size;
-}
-
-std::size_t HLERequestContext::WriteBufferC(const void* buffer, std::size_t size,
- std::size_t buffer_index) const {
- if (buffer_index >= BufferDescriptorC().size() || size == 0) {
- return 0;
- }
-
- const auto buffer_size{BufferDescriptorC()[buffer_index].Size()};
- if (size > buffer_size) {
- LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
- buffer_size);
- size = buffer_size; // TODO(bunnei): This needs to be HW tested
- }
-
- memory.WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size);
- return size;
-}
-
-std::size_t HLERequestContext::GetReadBufferSize(std::size_t buffer_index) const {
- const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
- BufferDescriptorA()[buffer_index].Size()};
- if (is_buffer_a) {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorA().size() > buffer_index, { return 0; },
- "BufferDescriptorA invalid buffer_index {}", buffer_index);
- return BufferDescriptorA()[buffer_index].Size();
- } else {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorX().size() > buffer_index, { return 0; },
- "BufferDescriptorX invalid buffer_index {}", buffer_index);
- return BufferDescriptorX()[buffer_index].Size();
- }
-}
-
-std::size_t HLERequestContext::GetWriteBufferSize(std::size_t buffer_index) const {
- const bool is_buffer_b{BufferDescriptorB().size() > buffer_index &&
- BufferDescriptorB()[buffer_index].Size()};
- if (is_buffer_b) {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorB().size() > buffer_index, { return 0; },
- "BufferDescriptorB invalid buffer_index {}", buffer_index);
- return BufferDescriptorB()[buffer_index].Size();
- } else {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorC().size() > buffer_index, { return 0; },
- "BufferDescriptorC invalid buffer_index {}", buffer_index);
- return BufferDescriptorC()[buffer_index].Size();
- }
- return 0;
-}
-
-bool HLERequestContext::CanReadBuffer(std::size_t buffer_index) const {
- const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
- BufferDescriptorA()[buffer_index].Size()};
-
- if (is_buffer_a) {
- return BufferDescriptorA().size() > buffer_index;
- } else {
- return BufferDescriptorX().size() > buffer_index;
- }
-}
-
-bool HLERequestContext::CanWriteBuffer(std::size_t buffer_index) const {
- const bool is_buffer_b{BufferDescriptorB().size() > buffer_index &&
- BufferDescriptorB()[buffer_index].Size()};
-
- if (is_buffer_b) {
- return BufferDescriptorB().size() > buffer_index;
- } else {
- return BufferDescriptorC().size() > buffer_index;
- }
-}
-
-std::string HLERequestContext::Description() const {
- if (!command_header) {
- return "No command header available";
- }
- std::ostringstream s;
- s << "IPC::CommandHeader: Type:" << static_cast<u32>(command_header->type.Value());
- s << ", X(Pointer):" << command_header->num_buf_x_descriptors;
- if (command_header->num_buf_x_descriptors) {
- s << '[';
- for (u64 i = 0; i < command_header->num_buf_x_descriptors; ++i) {
- s << "0x" << std::hex << BufferDescriptorX()[i].Size();
- if (i < command_header->num_buf_x_descriptors - 1)
- s << ", ";
- }
- s << ']';
- }
- s << ", A(Send):" << command_header->num_buf_a_descriptors;
- if (command_header->num_buf_a_descriptors) {
- s << '[';
- for (u64 i = 0; i < command_header->num_buf_a_descriptors; ++i) {
- s << "0x" << std::hex << BufferDescriptorA()[i].Size();
- if (i < command_header->num_buf_a_descriptors - 1)
- s << ", ";
- }
- s << ']';
- }
- s << ", B(Receive):" << command_header->num_buf_b_descriptors;
- if (command_header->num_buf_b_descriptors) {
- s << '[';
- for (u64 i = 0; i < command_header->num_buf_b_descriptors; ++i) {
- s << "0x" << std::hex << BufferDescriptorB()[i].Size();
- if (i < command_header->num_buf_b_descriptors - 1)
- s << ", ";
- }
- s << ']';
- }
- s << ", C(ReceiveList):" << BufferDescriptorC().size();
- if (!BufferDescriptorC().empty()) {
- s << '[';
- for (u64 i = 0; i < BufferDescriptorC().size(); ++i) {
- s << "0x" << std::hex << BufferDescriptorC()[i].Size();
- if (i < BufferDescriptorC().size() - 1)
- s << ", ";
- }
- s << ']';
- }
- s << ", data_size:" << command_header->data_size.Value();
-
- return s.str();
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
deleted file mode 100644
index e252b5f4b..000000000
--- a/src/core/hle/kernel/hle_ipc.h
+++ /dev/null
@@ -1,412 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include <array>
-#include <functional>
-#include <memory>
-#include <optional>
-#include <string>
-#include <type_traits>
-#include <vector>
-
-#include "common/assert.h"
-#include "common/common_types.h"
-#include "common/concepts.h"
-#include "common/swap.h"
-#include "core/hle/ipc.h"
-#include "core/hle/kernel/svc_common.h"
-
-union Result;
-
-namespace Core::Memory {
-class Memory;
-}
-
-namespace IPC {
-class ResponseBuilder;
-}
-
-namespace Service {
-class ServiceFrameworkBase;
-}
-
-enum class ServiceThreadType {
- Default,
- CreateNew,
-};
-
-namespace Kernel {
-
-class Domain;
-class HLERequestContext;
-class KAutoObject;
-class KernelCore;
-class KEvent;
-class KHandleTable;
-class KServerPort;
-class KProcess;
-class KServerSession;
-class KThread;
-class KReadableEvent;
-class KSession;
-class SessionRequestManager;
-class ServiceThread;
-
-enum class ThreadWakeupReason;
-
-/**
- * Interface implemented by HLE Session handlers.
- * This can be provided to a ServerSession in order to hook into several relevant events
- * (such as a new connection or a SyncRequest) so they can be implemented in the emulator.
- */
-class SessionRequestHandler : public std::enable_shared_from_this<SessionRequestHandler> {
-public:
- SessionRequestHandler(KernelCore& kernel_, const char* service_name_,
- ServiceThreadType thread_type);
- virtual ~SessionRequestHandler();
-
- /**
- * Handles a sync request from the emulated application.
- * @param server_session The ServerSession that was triggered for this sync request,
- * it should be used to differentiate which client (As in ClientSession) we're answering to.
- * TODO(Subv): Use a wrapper structure to hold all the information relevant to
- * this request (ServerSession, Originator thread, Translated command buffer, etc).
- * @returns Result the result code of the translate operation.
- */
- virtual Result HandleSyncRequest(Kernel::KServerSession& session,
- Kernel::HLERequestContext& context) = 0;
-
- void AcceptSession(KServerPort* server_port);
- void RegisterSession(KServerSession* server_session,
- std::shared_ptr<SessionRequestManager> manager);
-
- ServiceThread& GetServiceThread() const {
- return service_thread;
- }
-
-protected:
- KernelCore& kernel;
- ServiceThread& service_thread;
-};
-
-using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>;
-using SessionRequestHandlerPtr = std::shared_ptr<SessionRequestHandler>;
-
-/**
- * Manages the underlying HLE requests for a session, and whether (or not) the session should be
- * treated as a domain. This is managed separately from server sessions, as this state is shared
- * when objects are cloned.
- */
-class SessionRequestManager final {
-public:
- explicit SessionRequestManager(KernelCore& kernel);
- ~SessionRequestManager();
-
- bool IsDomain() const {
- return is_domain;
- }
-
- void ConvertToDomain() {
- domain_handlers = {session_handler};
- is_domain = true;
- }
-
- void ConvertToDomainOnRequestEnd() {
- convert_to_domain = true;
- }
-
- std::size_t DomainHandlerCount() const {
- return domain_handlers.size();
- }
-
- bool HasSessionHandler() const {
- return session_handler != nullptr;
- }
-
- SessionRequestHandler& SessionHandler() {
- return *session_handler;
- }
-
- const SessionRequestHandler& SessionHandler() const {
- return *session_handler;
- }
-
- void CloseDomainHandler(std::size_t index) {
- if (index < DomainHandlerCount()) {
- domain_handlers[index] = nullptr;
- } else {
- ASSERT_MSG(false, "Unexpected handler index {}", index);
- }
- }
-
- SessionRequestHandlerWeakPtr DomainHandler(std::size_t index) const {
- ASSERT_MSG(index < DomainHandlerCount(), "Unexpected handler index {}", index);
- return domain_handlers.at(index);
- }
-
- void AppendDomainHandler(SessionRequestHandlerPtr&& handler) {
- domain_handlers.emplace_back(std::move(handler));
- }
-
- void SetSessionHandler(SessionRequestHandlerPtr&& handler) {
- session_handler = std::move(handler);
- }
-
- ServiceThread& GetServiceThread() const {
- return session_handler->GetServiceThread();
- }
-
- bool HasSessionRequestHandler(const HLERequestContext& context) const;
-
- Result HandleDomainSyncRequest(KServerSession* server_session, HLERequestContext& context);
- Result CompleteSyncRequest(KServerSession* server_session, HLERequestContext& context);
-
-private:
- bool convert_to_domain{};
- bool is_domain{};
- SessionRequestHandlerPtr session_handler;
- std::vector<SessionRequestHandlerPtr> domain_handlers;
-
-private:
- KernelCore& kernel;
-};
-
-/**
- * Class containing information about an in-flight IPC request being handled by an HLE service
- * implementation. Services should avoid using old global APIs (e.g. Kernel::GetCommandBuffer()) and
- * when possible use the APIs in this class to service the request.
- *
- * HLE handle protocol
- * ===================
- *
- * To avoid needing HLE services to keep a separate handle table, or having to directly modify the
- * requester's table, a tweaked protocol is used to receive and send handles in requests. The kernel
- * will decode the incoming handles into object pointers and insert a id in the buffer where the
- * handle would normally be. The service then calls GetIncomingHandle() with that id to get the
- * pointer to the object. Similarly, instead of inserting a handle into the command buffer, the
- * service calls AddOutgoingHandle() and stores the returned id where the handle would normally go.
- *
- * The end result is similar to just giving services their own real handle tables, but since these
- * ids are local to a specific context, it avoids requiring services to manage handles for objects
- * across multiple calls and ensuring that unneeded handles are cleaned up.
- */
-class HLERequestContext {
-public:
- explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
- KServerSession* session, KThread* thread);
- ~HLERequestContext();
-
- /// Returns a pointer to the IPC command buffer for this request.
- [[nodiscard]] u32* CommandBuffer() {
- return cmd_buf.data();
- }
-
- /**
- * Returns the session through which this request was made. This can be used as a map key to
- * access per-client data on services.
- */
- [[nodiscard]] Kernel::KServerSession* Session() {
- return server_session;
- }
-
- /// Populates this context with data from the requesting process/thread.
- Result PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf);
-
- /// Writes data from this context back to the requesting process/thread.
- Result WriteToOutgoingCommandBuffer(KThread& requesting_thread);
-
- [[nodiscard]] u32_le GetHipcCommand() const {
- return command;
- }
-
- [[nodiscard]] u32_le GetTipcCommand() const {
- return static_cast<u32_le>(command_header->type.Value()) -
- static_cast<u32_le>(IPC::CommandType::TIPC_CommandRegion);
- }
-
- [[nodiscard]] u32_le GetCommand() const {
- return command_header->IsTipc() ? GetTipcCommand() : GetHipcCommand();
- }
-
- [[nodiscard]] bool IsTipc() const {
- return command_header->IsTipc();
- }
-
- [[nodiscard]] IPC::CommandType GetCommandType() const {
- return command_header->type;
- }
-
- [[nodiscard]] u64 GetPID() const {
- return pid;
- }
-
- [[nodiscard]] u32 GetDataPayloadOffset() const {
- return data_payload_offset;
- }
-
- [[nodiscard]] const std::vector<IPC::BufferDescriptorX>& BufferDescriptorX() const {
- return buffer_x_desciptors;
- }
-
- [[nodiscard]] const std::vector<IPC::BufferDescriptorABW>& BufferDescriptorA() const {
- return buffer_a_desciptors;
- }
-
- [[nodiscard]] const std::vector<IPC::BufferDescriptorABW>& BufferDescriptorB() const {
- return buffer_b_desciptors;
- }
-
- [[nodiscard]] const std::vector<IPC::BufferDescriptorC>& BufferDescriptorC() const {
- return buffer_c_desciptors;
- }
-
- [[nodiscard]] const IPC::DomainMessageHeader& GetDomainMessageHeader() const {
- return domain_message_header.value();
- }
-
- [[nodiscard]] bool HasDomainMessageHeader() const {
- return domain_message_header.has_value();
- }
-
- /// Helper function to read a buffer using the appropriate buffer descriptor
- [[nodiscard]] std::vector<u8> ReadBuffer(std::size_t buffer_index = 0) const;
-
- /// Helper function to write a buffer using the appropriate buffer descriptor
- std::size_t WriteBuffer(const void* buffer, std::size_t size,
- std::size_t buffer_index = 0) const;
-
- /// Helper function to write buffer B
- std::size_t WriteBufferB(const void* buffer, std::size_t size,
- std::size_t buffer_index = 0) const;
-
- /// Helper function to write buffer C
- std::size_t WriteBufferC(const void* buffer, std::size_t size,
- std::size_t buffer_index = 0) const;
-
- /* Helper function to write a buffer using the appropriate buffer descriptor
- *
- * @tparam T an arbitrary container that satisfies the
- * ContiguousContainer concept in the C++ standard library or a trivially copyable type.
- *
- * @param data The container/data to write into a buffer.
- * @param buffer_index The buffer in particular to write to.
- */
- template <typename T, typename = std::enable_if_t<!std::is_pointer_v<T>>>
- std::size_t WriteBuffer(const T& data, std::size_t buffer_index = 0) const {
- if constexpr (Common::IsContiguousContainer<T>) {
- using ContiguousType = typename T::value_type;
- static_assert(std::is_trivially_copyable_v<ContiguousType>,
- "Container to WriteBuffer must contain trivially copyable objects");
- return WriteBuffer(std::data(data), std::size(data) * sizeof(ContiguousType),
- buffer_index);
- } else {
- static_assert(std::is_trivially_copyable_v<T>, "T must be trivially copyable");
- return WriteBuffer(&data, sizeof(T), buffer_index);
- }
- }
-
- /// Helper function to get the size of the input buffer
- [[nodiscard]] std::size_t GetReadBufferSize(std::size_t buffer_index = 0) const;
-
- /// Helper function to get the size of the output buffer
- [[nodiscard]] std::size_t GetWriteBufferSize(std::size_t buffer_index = 0) const;
-
- /// Helper function to derive the number of elements able to be contained in the read buffer
- template <typename T>
- [[nodiscard]] std::size_t GetReadBufferNumElements(std::size_t buffer_index = 0) const {
- return GetReadBufferSize(buffer_index) / sizeof(T);
- }
-
- /// Helper function to derive the number of elements able to be contained in the write buffer
- template <typename T>
- [[nodiscard]] std::size_t GetWriteBufferNumElements(std::size_t buffer_index = 0) const {
- return GetWriteBufferSize(buffer_index) / sizeof(T);
- }
-
- /// Helper function to test whether the input buffer at buffer_index can be read
- [[nodiscard]] bool CanReadBuffer(std::size_t buffer_index = 0) const;
-
- /// Helper function to test whether the output buffer at buffer_index can be written
- [[nodiscard]] bool CanWriteBuffer(std::size_t buffer_index = 0) const;
-
- [[nodiscard]] Handle GetCopyHandle(std::size_t index) const {
- return incoming_copy_handles.at(index);
- }
-
- [[nodiscard]] Handle GetMoveHandle(std::size_t index) const {
- return incoming_move_handles.at(index);
- }
-
- void AddMoveObject(KAutoObject* object) {
- outgoing_move_objects.emplace_back(object);
- }
-
- void AddCopyObject(KAutoObject* object) {
- outgoing_copy_objects.emplace_back(object);
- }
-
- void AddDomainObject(SessionRequestHandlerPtr object) {
- outgoing_domain_objects.emplace_back(std::move(object));
- }
-
- template <typename T>
- std::shared_ptr<T> GetDomainHandler(std::size_t index) const {
- return std::static_pointer_cast<T>(GetManager()->DomainHandler(index).lock());
- }
-
- void SetSessionRequestManager(std::weak_ptr<SessionRequestManager> manager_) {
- manager = manager_;
- }
-
- [[nodiscard]] std::string Description() const;
-
- [[nodiscard]] KThread& GetThread() {
- return *thread;
- }
-
- [[nodiscard]] std::shared_ptr<SessionRequestManager> GetManager() const {
- return manager.lock();
- }
-
-private:
- friend class IPC::ResponseBuilder;
-
- void ParseCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf, bool incoming);
-
- std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
- Kernel::KServerSession* server_session{};
- KThread* thread;
-
- std::vector<Handle> incoming_move_handles;
- std::vector<Handle> incoming_copy_handles;
-
- std::vector<KAutoObject*> outgoing_move_objects;
- std::vector<KAutoObject*> outgoing_copy_objects;
- std::vector<SessionRequestHandlerPtr> outgoing_domain_objects;
-
- std::optional<IPC::CommandHeader> command_header;
- std::optional<IPC::HandleDescriptorHeader> handle_descriptor_header;
- std::optional<IPC::DataPayloadHeader> data_payload_header;
- std::optional<IPC::DomainMessageHeader> domain_message_header;
- std::vector<IPC::BufferDescriptorX> buffer_x_desciptors;
- std::vector<IPC::BufferDescriptorABW> buffer_a_desciptors;
- std::vector<IPC::BufferDescriptorABW> buffer_b_desciptors;
- std::vector<IPC::BufferDescriptorABW> buffer_w_desciptors;
- std::vector<IPC::BufferDescriptorC> buffer_c_desciptors;
-
- u32_le command{};
- u64 pid{};
- u32 write_size{};
- u32 data_payload_offset{};
- u32 handles_offset{};
- u32 domain_offset{};
-
- std::weak_ptr<SessionRequestManager> manager{};
-
- KernelCore& kernel;
- Core::Memory::Memory& memory;
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp
index 7b363eb1e..1f2db673c 100644
--- a/src/core/hle/kernel/init/init_slab_setup.cpp
+++ b/src/core/hle/kernel/init/init_slab_setup.cpp
@@ -4,17 +4,18 @@
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_funcs.h"
-#include "common/common_types.h"
#include "core/core.h"
#include "core/device_memory.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/init/init_slab_setup.h"
#include "core/hle/kernel/k_code_memory.h"
#include "core/hle/kernel/k_debug.h"
+#include "core/hle/kernel/k_device_address_space.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_event_info.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
+#include "core/hle/kernel/k_object_name.h"
#include "core/hle/kernel/k_page_buffer.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_process.h"
@@ -28,9 +29,13 @@
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_local_page.h"
#include "core/hle/kernel/k_transfer_memory.h"
+#include "core/hle/kernel/k_typed_address.h"
namespace Kernel::Init {
+// For macro convenience.
+using KThreadLockInfo = KThread::LockWithPriorityInheritanceInfo;
+
#define SLAB_COUNT(CLASS) kernel.SlabResourceCounts().num_##CLASS
#define FOREACH_SLAB_TYPE(HANDLER, ...) \
@@ -43,14 +48,17 @@ namespace Kernel::Init {
HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \
HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ##__VA_ARGS__) \
+ HANDLER(KDeviceAddressSpace, (SLAB_COUNT(KDeviceAddressSpace)), ##__VA_ARGS__) \
HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \
HANDLER(KThreadLocalPage, \
(SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \
##__VA_ARGS__) \
+ HANDLER(KObjectName, (SLAB_COUNT(KObjectName)), ##__VA_ARGS__) \
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) \
HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ##__VA_ARGS__) \
HANDLER(KDebug, (SLAB_COUNT(KDebug)), ##__VA_ARGS__) \
- HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ##__VA_ARGS__)
+ HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ##__VA_ARGS__) \
+ HANDLER(KThreadLockInfo, (SLAB_COUNT(KThread)), ##__VA_ARGS__)
namespace {
@@ -96,17 +104,18 @@ static_assert(KernelPageBufferAdditionalSize ==
/// Helper function to translate from the slab virtual address to the reserved location in physical
/// memory.
-static PAddr TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout, VAddr slab_addr) {
- slab_addr -= memory_layout.GetSlabRegionAddress();
- return slab_addr + Core::DramMemoryMap::SlabHeapBase;
+static KPhysicalAddress TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout,
+ KVirtualAddress slab_addr) {
+ slab_addr -= GetInteger(memory_layout.GetSlabRegionAddress());
+ return GetInteger(slab_addr) + Core::DramMemoryMap::SlabHeapBase;
}
template <typename T>
-VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address,
- size_t num_objects) {
+KVirtualAddress InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout,
+ KVirtualAddress address, size_t num_objects) {
const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*));
- VAddr start = Common::AlignUp(address, alignof(T));
+ KVirtualAddress start = Common::AlignUp(GetInteger(address), alignof(T));
// This should use the virtual memory address passed in, but currently, we do not setup the
// kernel virtual memory layout. Instead, we simply map these at a region of physical memory
@@ -127,7 +136,7 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
}
size_t CalculateSlabHeapGapSize() {
- constexpr size_t KernelSlabHeapGapSize = 2_MiB - 320_KiB;
+ constexpr size_t KernelSlabHeapGapSize = 2_MiB - 356_KiB;
static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax);
return KernelSlabHeapGapSize;
}
@@ -187,7 +196,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
auto& kernel = system.Kernel();
// Get the start of the slab region, since that's where we'll be working.
- VAddr address = memory_layout.GetSlabRegionAddress();
+ KVirtualAddress address = memory_layout.GetSlabRegionAddress();
// Initialize slab type array to be in sorted order.
std::array<KSlabType, KSlabType_Count> slab_types;
@@ -220,7 +229,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
}
// Track the gaps, so that we can free them to the unused slab tree.
- VAddr gap_start = address;
+ KVirtualAddress gap_start = address;
size_t gap_size = 0;
for (size_t i = 0; i < slab_gaps.size(); i++) {
@@ -272,7 +281,7 @@ void KPageBufferSlabHeap::Initialize(Core::System& system) {
// Allocate memory for the slab.
constexpr auto AllocateOption = KMemoryManager::EncodeOption(
KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
- const PAddr slab_address =
+ const KPhysicalAddress slab_address =
kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
ASSERT(slab_address != 0);
diff --git a/src/core/hle/kernel/initial_process.h b/src/core/hle/kernel/initial_process.h
index af0fb23b6..82195f4f7 100644
--- a/src/core/hle/kernel/initial_process.h
+++ b/src/core/hle/kernel/initial_process.h
@@ -14,7 +14,7 @@ using namespace Common::Literals;
constexpr std::size_t InitialProcessBinarySizeMax = 12_MiB;
-static inline PAddr GetInitialProcessBinaryPhysicalAddress() {
+static inline KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() {
return Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetKernelPhysicalBaseAddress(
MainMemoryAddress);
}
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
index a442a3b98..78d43d729 100644
--- a/src/core/hle/kernel/k_address_arbiter.cpp
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -8,46 +8,57 @@
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
#include "core/memory.h"
namespace Kernel {
-KAddressArbiter::KAddressArbiter(Core::System& system_)
- : system{system_}, kernel{system.Kernel()} {}
+KAddressArbiter::KAddressArbiter(Core::System& system)
+ : m_system{system}, m_kernel{system.Kernel()} {}
KAddressArbiter::~KAddressArbiter() = default;
namespace {
-bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
- *out = system.Memory().Read32(address);
+bool ReadFromUser(KernelCore& kernel, s32* out, KProcessAddress address) {
+ *out = GetCurrentMemory(kernel).Read32(GetInteger(address));
return true;
}
-bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
+bool DecrementIfLessThan(Core::System& system, s32* out, KProcessAddress address, s32 value) {
auto& monitor = system.Monitor();
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
- // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
+ // NOTE: If scheduler lock is not held here, interrupt disable is required.
+ // KScopedInterruptDisable di;
+
// TODO(bunnei): We should call CanAccessAtomic(..) here.
- // Load the value from the address.
- const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
+ s32 current_value{};
+
+ while (true) {
+ // Load the value from the address.
+ current_value =
+ static_cast<s32>(monitor.ExclusiveRead32(current_core, GetInteger(address)));
+
+ // Compare it to the desired one.
+ if (current_value < value) {
+ // If less than, we want to try to decrement.
+ const s32 decrement_value = current_value - 1;
- // Compare it to the desired one.
- if (current_value < value) {
- // If less than, we want to try to decrement.
- const s32 decrement_value = current_value - 1;
+ // Decrement and try to store.
+ if (monitor.ExclusiveWrite32(current_core, GetInteger(address),
+ static_cast<u32>(decrement_value))) {
+ break;
+ }
- // Decrement and try to store.
- if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value))) {
// If we failed to store, try again.
- DecrementIfLessThan(system, out, address, value);
+ } else {
+ // Otherwise, clear our exclusive hold and finish
+ monitor.ClearExclusive(current_core);
+ break;
}
- } else {
- // Otherwise, clear our exclusive hold and finish
- monitor.ClearExclusive(current_core);
}
// We're done.
@@ -55,28 +66,39 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
return true;
}
-bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
+bool UpdateIfEqual(Core::System& system, s32* out, KProcessAddress address, s32 value,
+ s32 new_value) {
auto& monitor = system.Monitor();
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
- // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
+ // NOTE: If scheduler lock is not held here, interrupt disable is required.
+ // KScopedInterruptDisable di;
+
// TODO(bunnei): We should call CanAccessAtomic(..) here.
- // Load the value from the address.
- const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
+ s32 current_value{};
- // Compare it to the desired one.
- if (current_value == value) {
- // If equal, we want to try to write the new value.
+ // Load the value from the address.
+ while (true) {
+ current_value =
+ static_cast<s32>(monitor.ExclusiveRead32(current_core, GetInteger(address)));
+
+ // Compare it to the desired one.
+ if (current_value == value) {
+ // If equal, we want to try to write the new value.
+
+ // Try to store.
+ if (monitor.ExclusiveWrite32(current_core, GetInteger(address),
+ static_cast<u32>(new_value))) {
+ break;
+ }
- // Try to store.
- if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(new_value))) {
// If we failed to store, try again.
- UpdateIfEqual(system, out, address, value, new_value);
+ } else {
+ // Otherwise, clear our exclusive hold and finish.
+ monitor.ClearExclusive(current_core);
+ break;
}
- } else {
- // Otherwise, clear our exclusive hold and finish.
- monitor.ClearExclusive(current_core);
}
// We're done.
@@ -86,8 +108,8 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
class ThreadQueueImplForKAddressArbiter final : public KThreadQueue {
public:
- explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t)
- : KThreadQueue(kernel_), m_tree(t) {}
+ explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel, KAddressArbiter::ThreadTree* t)
+ : KThreadQueue(kernel), m_tree(t) {}
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// If the thread is waiting on an address arbiter, remove it from the tree.
@@ -101,19 +123,19 @@ public:
}
private:
- KAddressArbiter::ThreadTree* m_tree;
+ KAddressArbiter::ThreadTree* m_tree{};
};
} // namespace
-Result KAddressArbiter::Signal(VAddr addr, s32 count) {
+Result KAddressArbiter::Signal(uint64_t addr, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
- KScopedSchedulerLock sl(kernel);
+ KScopedSchedulerLock sl(m_kernel);
- auto it = thread_tree.nfind_key({addr, -1});
- while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
+ auto it = m_tree.nfind_key({addr, -1});
+ while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
(it->GetAddressArbiterKey() == addr)) {
// End the thread's wait.
KThread* target_thread = std::addressof(*it);
@@ -122,31 +144,27 @@ Result KAddressArbiter::Signal(VAddr addr, s32 count) {
ASSERT(target_thread->IsWaitingForAddressArbiter());
target_thread->ClearAddressArbiter();
- it = thread_tree.erase(it);
+ it = m_tree.erase(it);
++num_waiters;
}
}
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
+Result KAddressArbiter::SignalAndIncrementIfEqual(uint64_t addr, s32 value, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
- KScopedSchedulerLock sl(kernel);
+ KScopedSchedulerLock sl(m_kernel);
// Check the userspace value.
s32 user_value{};
- if (!UpdateIfEqual(system, &user_value, addr, value, value + 1)) {
- LOG_ERROR(Kernel, "Invalid current memory!");
- return ResultInvalidCurrentMemory;
- }
- if (user_value != value) {
- return ResultInvalidState;
- }
+ R_UNLESS(UpdateIfEqual(m_system, std::addressof(user_value), addr, value, value + 1),
+ ResultInvalidCurrentMemory);
+ R_UNLESS(user_value == value, ResultInvalidState);
- auto it = thread_tree.nfind_key({addr, -1});
- while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
+ auto it = m_tree.nfind_key({addr, -1});
+ while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
(it->GetAddressArbiterKey() == addr)) {
// End the thread's wait.
KThread* target_thread = std::addressof(*it);
@@ -155,33 +173,33 @@ Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 cou
ASSERT(target_thread->IsWaitingForAddressArbiter());
target_thread->ClearAddressArbiter();
- it = thread_tree.erase(it);
+ it = m_tree.erase(it);
++num_waiters;
}
}
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
+Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(uint64_t addr, s32 value, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
- [[maybe_unused]] const KScopedSchedulerLock sl(kernel);
+ KScopedSchedulerLock sl(m_kernel);
- auto it = thread_tree.nfind_key({addr, -1});
+ auto it = m_tree.nfind_key({addr, -1});
// Determine the updated value.
s32 new_value{};
if (count <= 0) {
- if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) {
+ if (it != m_tree.end() && it->GetAddressArbiterKey() == addr) {
new_value = value - 2;
} else {
new_value = value + 1;
}
} else {
- if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) {
+ if (it != m_tree.end() && it->GetAddressArbiterKey() == addr) {
auto tmp_it = it;
s32 tmp_num_waiters{};
- while (++tmp_it != thread_tree.end() && tmp_it->GetAddressArbiterKey() == addr) {
+ while (++tmp_it != m_tree.end() && tmp_it->GetAddressArbiterKey() == addr) {
if (tmp_num_waiters++ >= count) {
break;
}
@@ -201,20 +219,15 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val
s32 user_value{};
bool succeeded{};
if (value != new_value) {
- succeeded = UpdateIfEqual(system, &user_value, addr, value, new_value);
+ succeeded = UpdateIfEqual(m_system, std::addressof(user_value), addr, value, new_value);
} else {
- succeeded = ReadFromUser(system, &user_value, addr);
+ succeeded = ReadFromUser(m_kernel, std::addressof(user_value), addr);
}
- if (!succeeded) {
- LOG_ERROR(Kernel, "Invalid current memory!");
- return ResultInvalidCurrentMemory;
- }
- if (user_value != value) {
- return ResultInvalidState;
- }
+ R_UNLESS(succeeded, ResultInvalidCurrentMemory);
+ R_UNLESS(user_value == value, ResultInvalidState);
- while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
+ while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
(it->GetAddressArbiterKey() == addr)) {
// End the thread's wait.
KThread* target_thread = std::addressof(*it);
@@ -223,58 +236,60 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val
ASSERT(target_thread->IsWaitingForAddressArbiter());
target_thread->ClearAddressArbiter();
- it = thread_tree.erase(it);
+ it = m_tree.erase(it);
++num_waiters;
}
}
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
+Result KAddressArbiter::WaitIfLessThan(uint64_t addr, s32 value, bool decrement, s64 timeout) {
// Prepare to wait.
- KThread* cur_thread = GetCurrentThreadPointer(kernel);
- ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
+ KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
+ KHardwareTimer* timer{};
+ ThreadQueueImplForKAddressArbiter wait_queue(m_kernel, std::addressof(m_tree));
{
- KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
+ KScopedSchedulerLockAndSleep slp{m_kernel, std::addressof(timer), cur_thread, timeout};
// Check that the thread isn't terminating.
if (cur_thread->IsTerminationRequested()) {
slp.CancelSleep();
- return ResultTerminationRequested;
+ R_THROW(ResultTerminationRequested);
}
// Read the value from userspace.
s32 user_value{};
bool succeeded{};
if (decrement) {
- succeeded = DecrementIfLessThan(system, &user_value, addr, value);
+ succeeded = DecrementIfLessThan(m_system, std::addressof(user_value), addr, value);
} else {
- succeeded = ReadFromUser(system, &user_value, addr);
+ succeeded = ReadFromUser(m_kernel, std::addressof(user_value), addr);
}
if (!succeeded) {
slp.CancelSleep();
- return ResultInvalidCurrentMemory;
+ R_THROW(ResultInvalidCurrentMemory);
}
// Check that the value is less than the specified one.
if (user_value >= value) {
slp.CancelSleep();
- return ResultInvalidState;
+ R_THROW(ResultInvalidState);
}
// Check that the timeout is non-zero.
if (timeout == 0) {
slp.CancelSleep();
- return ResultTimedOut;
+ R_THROW(ResultTimedOut);
}
// Set the arbiter.
- cur_thread->SetAddressArbiter(&thread_tree, addr);
- thread_tree.insert(*cur_thread);
+ cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
+ m_tree.insert(*cur_thread);
// Wait for the thread to finish.
+ wait_queue.SetHardwareTimer(timer);
cur_thread->BeginWait(std::addressof(wait_queue));
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
}
@@ -283,44 +298,46 @@ Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s6
return cur_thread->GetWaitResult();
}
-Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
+Result KAddressArbiter::WaitIfEqual(uint64_t addr, s32 value, s64 timeout) {
// Prepare to wait.
- KThread* cur_thread = GetCurrentThreadPointer(kernel);
- ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
+ KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
+ KHardwareTimer* timer{};
+ ThreadQueueImplForKAddressArbiter wait_queue(m_kernel, std::addressof(m_tree));
{
- KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
+ KScopedSchedulerLockAndSleep slp{m_kernel, std::addressof(timer), cur_thread, timeout};
// Check that the thread isn't terminating.
if (cur_thread->IsTerminationRequested()) {
slp.CancelSleep();
- return ResultTerminationRequested;
+ R_THROW(ResultTerminationRequested);
}
// Read the value from userspace.
s32 user_value{};
- if (!ReadFromUser(system, &user_value, addr)) {
+ if (!ReadFromUser(m_kernel, std::addressof(user_value), addr)) {
slp.CancelSleep();
- return ResultInvalidCurrentMemory;
+ R_THROW(ResultInvalidCurrentMemory);
}
// Check that the value is equal.
if (value != user_value) {
slp.CancelSleep();
- return ResultInvalidState;
+ R_THROW(ResultInvalidState);
}
// Check that the timeout is non-zero.
if (timeout == 0) {
slp.CancelSleep();
- return ResultTimedOut;
+ R_THROW(ResultTimedOut);
}
// Set the arbiter.
- cur_thread->SetAddressArbiter(&thread_tree, addr);
- thread_tree.insert(*cur_thread);
+ cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
+ m_tree.insert(*cur_thread);
// Wait for the thread to finish.
+ wait_queue.SetHardwareTimer(timer);
cur_thread->BeginWait(std::addressof(wait_queue));
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
}
diff --git a/src/core/hle/kernel/k_address_arbiter.h b/src/core/hle/kernel/k_address_arbiter.h
index e4085ae22..3b70e1ab2 100644
--- a/src/core/hle/kernel/k_address_arbiter.h
+++ b/src/core/hle/kernel/k_address_arbiter.h
@@ -22,47 +22,46 @@ class KAddressArbiter {
public:
using ThreadTree = KConditionVariable::ThreadTree;
- explicit KAddressArbiter(Core::System& system_);
+ explicit KAddressArbiter(Core::System& system);
~KAddressArbiter();
- [[nodiscard]] Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) {
+ Result SignalToAddress(uint64_t addr, Svc::SignalType type, s32 value, s32 count) {
switch (type) {
case Svc::SignalType::Signal:
- return Signal(addr, count);
+ R_RETURN(this->Signal(addr, count));
case Svc::SignalType::SignalAndIncrementIfEqual:
- return SignalAndIncrementIfEqual(addr, value, count);
+ R_RETURN(this->SignalAndIncrementIfEqual(addr, value, count));
case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
- return SignalAndModifyByWaitingCountIfEqual(addr, value, count);
+ R_RETURN(this->SignalAndModifyByWaitingCountIfEqual(addr, value, count));
+ default:
+ UNREACHABLE();
}
- ASSERT(false);
- return ResultUnknown;
}
- [[nodiscard]] Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
- s64 timeout) {
+ Result WaitForAddress(uint64_t addr, Svc::ArbitrationType type, s32 value, s64 timeout) {
switch (type) {
case Svc::ArbitrationType::WaitIfLessThan:
- return WaitIfLessThan(addr, value, false, timeout);
+ R_RETURN(WaitIfLessThan(addr, value, false, timeout));
case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
- return WaitIfLessThan(addr, value, true, timeout);
+ R_RETURN(WaitIfLessThan(addr, value, true, timeout));
case Svc::ArbitrationType::WaitIfEqual:
- return WaitIfEqual(addr, value, timeout);
+ R_RETURN(WaitIfEqual(addr, value, timeout));
+ default:
+ UNREACHABLE();
}
- ASSERT(false);
- return ResultUnknown;
}
private:
- [[nodiscard]] Result Signal(VAddr addr, s32 count);
- [[nodiscard]] Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
- [[nodiscard]] Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
- [[nodiscard]] Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
- [[nodiscard]] Result WaitIfEqual(VAddr addr, s32 value, s64 timeout);
+ Result Signal(uint64_t addr, s32 count);
+ Result SignalAndIncrementIfEqual(uint64_t addr, s32 value, s32 count);
+ Result SignalAndModifyByWaitingCountIfEqual(uint64_t addr, s32 value, s32 count);
+ Result WaitIfLessThan(uint64_t addr, s32 value, bool decrement, s64 timeout);
+ Result WaitIfEqual(uint64_t addr, s32 value, s64 timeout);
- ThreadTree thread_tree;
-
- Core::System& system;
- KernelCore& kernel;
+private:
+ ThreadTree m_tree;
+ Core::System& m_system;
+ KernelCore& m_kernel;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_address_space_info.cpp b/src/core/hle/kernel/k_address_space_info.cpp
index 3e612a207..32173e52b 100644
--- a/src/core/hle/kernel/k_address_space_info.cpp
+++ b/src/core/hle/kernel/k_address_space_info.cpp
@@ -23,86 +23,38 @@ constexpr std::array<KAddressSpaceInfo, 13> AddressSpaceInfos{{
{ .bit_width = 32, .address = Size_Invalid, .size = 1_GiB , .type = KAddressSpaceInfo::Type::Heap, },
{ .bit_width = 36, .address = 128_MiB , .size = 2_GiB - 128_MiB, .type = KAddressSpaceInfo::Type::MapSmall, },
{ .bit_width = 36, .address = 2_GiB , .size = 64_GiB - 2_GiB , .type = KAddressSpaceInfo::Type::MapLarge, },
- { .bit_width = 36, .address = Size_Invalid, .size = 6_GiB , .type = KAddressSpaceInfo::Type::Heap, },
+ { .bit_width = 36, .address = Size_Invalid, .size = 8_GiB , .type = KAddressSpaceInfo::Type::Heap, },
{ .bit_width = 36, .address = Size_Invalid, .size = 6_GiB , .type = KAddressSpaceInfo::Type::Alias, },
+#ifdef ANDROID
+ // With Android, we use a 38-bit address space due to memory limitations. This should (safely) truncate ASLR region.
+ { .bit_width = 39, .address = 128_MiB , .size = 256_GiB - 128_MiB, .type = KAddressSpaceInfo::Type::Map39Bit, },
+#else
{ .bit_width = 39, .address = 128_MiB , .size = 512_GiB - 128_MiB, .type = KAddressSpaceInfo::Type::Map39Bit, },
+#endif
{ .bit_width = 39, .address = Size_Invalid, .size = 64_GiB , .type = KAddressSpaceInfo::Type::MapSmall },
- { .bit_width = 39, .address = Size_Invalid, .size = 6_GiB , .type = KAddressSpaceInfo::Type::Heap, },
+ { .bit_width = 39, .address = Size_Invalid, .size = 8_GiB , .type = KAddressSpaceInfo::Type::Heap, },
{ .bit_width = 39, .address = Size_Invalid, .size = 64_GiB , .type = KAddressSpaceInfo::Type::Alias, },
{ .bit_width = 39, .address = Size_Invalid, .size = 2_GiB , .type = KAddressSpaceInfo::Type::Stack, },
}};
// clang-format on
-constexpr bool IsAllowedIndexForAddress(std::size_t index) {
- return index < AddressSpaceInfos.size() && AddressSpaceInfos[index].address != Size_Invalid;
-}
-
-using IndexArray =
- std::array<std::size_t, static_cast<std::size_t>(KAddressSpaceInfo::Type::Count)>;
-
-constexpr IndexArray AddressSpaceIndices32Bit{
- 0, 1, 0, 2, 0, 3,
-};
-
-constexpr IndexArray AddressSpaceIndices36Bit{
- 4, 5, 4, 6, 4, 7,
-};
-
-constexpr IndexArray AddressSpaceIndices39Bit{
- 9, 8, 8, 10, 12, 11,
-};
-
-constexpr bool IsAllowed32BitType(KAddressSpaceInfo::Type type) {
- return type < KAddressSpaceInfo::Type::Count && type != KAddressSpaceInfo::Type::Map39Bit &&
- type != KAddressSpaceInfo::Type::Stack;
-}
-
-constexpr bool IsAllowed36BitType(KAddressSpaceInfo::Type type) {
- return type < KAddressSpaceInfo::Type::Count && type != KAddressSpaceInfo::Type::Map39Bit &&
- type != KAddressSpaceInfo::Type::Stack;
-}
-
-constexpr bool IsAllowed39BitType(KAddressSpaceInfo::Type type) {
- return type < KAddressSpaceInfo::Type::Count && type != KAddressSpaceInfo::Type::MapLarge;
+const KAddressSpaceInfo& GetAddressSpaceInfo(size_t width, KAddressSpaceInfo::Type type) {
+ for (auto& info : AddressSpaceInfos) {
+ if (info.bit_width == width && info.type == type) {
+ return info;
+ }
+ }
+ UNREACHABLE_MSG("Could not find AddressSpaceInfo");
}
} // namespace
-u64 KAddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
- const std::size_t index{static_cast<std::size_t>(type)};
- switch (width) {
- case 32:
- ASSERT(IsAllowed32BitType(type));
- ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices32Bit[index]));
- return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].address;
- case 36:
- ASSERT(IsAllowed36BitType(type));
- ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices36Bit[index]));
- return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].address;
- case 39:
- ASSERT(IsAllowed39BitType(type));
- ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index]));
- return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
- }
- ASSERT(false);
- return 0;
+std::size_t KAddressSpaceInfo::GetAddressSpaceStart(size_t width, KAddressSpaceInfo::Type type) {
+ return GetAddressSpaceInfo(width, type).address;
}
-std::size_t KAddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
- const std::size_t index{static_cast<std::size_t>(type)};
- switch (width) {
- case 32:
- ASSERT(IsAllowed32BitType(type));
- return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].size;
- case 36:
- ASSERT(IsAllowed36BitType(type));
- return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].size;
- case 39:
- ASSERT(IsAllowed39BitType(type));
- return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
- }
- ASSERT(false);
- return 0;
+std::size_t KAddressSpaceInfo::GetAddressSpaceSize(size_t width, KAddressSpaceInfo::Type type) {
+ return GetAddressSpaceInfo(width, type).size;
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_address_space_info.h b/src/core/hle/kernel/k_address_space_info.h
index 69e9d77f2..9a26f6b90 100644
--- a/src/core/hle/kernel/k_address_space_info.h
+++ b/src/core/hle/kernel/k_address_space_info.h
@@ -18,7 +18,7 @@ struct KAddressSpaceInfo final {
Count,
};
- static u64 GetAddressSpaceStart(std::size_t width, Type type);
+ static std::size_t GetAddressSpaceStart(std::size_t width, Type type);
static std::size_t GetAddressSpaceSize(std::size_t width, Type type);
const std::size_t bit_width{};
diff --git a/src/core/hle/kernel/k_affinity_mask.h b/src/core/hle/kernel/k_affinity_mask.h
index b58716e90..07a5a822c 100644
--- a/src/core/hle/kernel/k_affinity_mask.h
+++ b/src/core/hle/kernel/k_affinity_mask.h
@@ -13,40 +13,40 @@ class KAffinityMask {
public:
constexpr KAffinityMask() = default;
- [[nodiscard]] constexpr u64 GetAffinityMask() const {
- return this->mask;
+ constexpr u64 GetAffinityMask() const {
+ return m_mask;
}
constexpr void SetAffinityMask(u64 new_mask) {
ASSERT((new_mask & ~AllowedAffinityMask) == 0);
- this->mask = new_mask;
+ m_mask = new_mask;
}
- [[nodiscard]] constexpr bool GetAffinity(s32 core) const {
- return (this->mask & GetCoreBit(core)) != 0;
+ constexpr bool GetAffinity(s32 core) const {
+ return (m_mask & GetCoreBit(core)) != 0;
}
constexpr void SetAffinity(s32 core, bool set) {
if (set) {
- this->mask |= GetCoreBit(core);
+ m_mask |= GetCoreBit(core);
} else {
- this->mask &= ~GetCoreBit(core);
+ m_mask &= ~GetCoreBit(core);
}
}
constexpr void SetAll() {
- this->mask = AllowedAffinityMask;
+ m_mask = AllowedAffinityMask;
}
private:
- [[nodiscard]] static constexpr u64 GetCoreBit(s32 core) {
+ static constexpr u64 GetCoreBit(s32 core) {
ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
return (1ULL << core);
}
static constexpr u64 AllowedAffinityMask = (1ULL << Core::Hardware::NUM_CPU_CORES) - 1;
- u64 mask{};
+ u64 m_mask{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_auto_object.cpp b/src/core/hle/kernel/k_auto_object.cpp
index 691af8ccb..0ae42c95c 100644
--- a/src/core/hle/kernel/k_auto_object.cpp
+++ b/src/core/hle/kernel/k_auto_object.cpp
@@ -12,11 +12,11 @@ KAutoObject* KAutoObject::Create(KAutoObject* obj) {
}
void KAutoObject::RegisterWithKernel() {
- kernel.RegisterKernelObject(this);
+ m_kernel.RegisterKernelObject(this);
}
void KAutoObject::UnregisterWithKernel() {
- kernel.UnregisterKernelObject(this);
+ m_kernel.UnregisterKernelObject(this);
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h
index 2827763d5..f384b1568 100644
--- a/src/core/hle/kernel/k_auto_object.h
+++ b/src/core/hle/kernel/k_auto_object.h
@@ -24,9 +24,7 @@ private:
friend class ::Kernel::KClassTokenGenerator; \
static constexpr inline auto ObjectType = ::Kernel::KClassTokenGenerator::ObjectType::CLASS; \
static constexpr inline const char* const TypeName = #CLASS; \
- static constexpr inline ClassTokenType ClassToken() { \
- return ::Kernel::ClassToken<CLASS>; \
- } \
+ static constexpr inline ClassTokenType ClassToken() { return ::Kernel::ClassToken<CLASS>; } \
\
public: \
YUZU_NON_COPYABLE(CLASS); \
@@ -37,15 +35,9 @@ public:
constexpr ClassTokenType Token = ClassToken(); \
return TypeObj(TypeName, Token); \
} \
- static constexpr const char* GetStaticTypeName() { \
- return TypeName; \
- } \
- virtual TypeObj GetTypeObj() ATTRIBUTE { \
- return GetStaticTypeObj(); \
- } \
- virtual const char* GetTypeName() ATTRIBUTE { \
- return GetStaticTypeName(); \
- } \
+ static constexpr const char* GetStaticTypeName() { return TypeName; } \
+ virtual TypeObj GetTypeObj() ATTRIBUTE { return GetStaticTypeObj(); } \
+ virtual const char* GetTypeName() ATTRIBUTE { return GetStaticTypeName(); } \
\
private: \
constexpr bool operator!=(const TypeObj& rhs)
@@ -88,7 +80,7 @@ private:
KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const);
public:
- explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) {
+ explicit KAutoObject(KernelCore& kernel) : m_kernel(kernel) {
RegisterWithKernel();
}
virtual ~KAutoObject() = default;
@@ -172,17 +164,12 @@ public:
}
}
- const std::string& GetName() const {
- return name;
- }
-
private:
void RegisterWithKernel();
void UnregisterWithKernel();
protected:
- KernelCore& kernel;
- std::string name;
+ KernelCore& m_kernel;
private:
std::atomic<u32> m_ref_count{};
@@ -192,11 +179,11 @@ class KAutoObjectWithListContainer;
class KAutoObjectWithList : public KAutoObject, public boost::intrusive::set_base_hook<> {
public:
- explicit KAutoObjectWithList(KernelCore& kernel_) : KAutoObject(kernel_) {}
+ explicit KAutoObjectWithList(KernelCore& kernel) : KAutoObject(kernel) {}
static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) {
- const u64 lid = lhs.GetId();
- const u64 rid = rhs.GetId();
+ const uintptr_t lid = reinterpret_cast<uintptr_t>(std::addressof(lhs));
+ const uintptr_t rid = reinterpret_cast<uintptr_t>(std::addressof(rhs));
if (lid < rid) {
return -1;
@@ -208,7 +195,7 @@ public:
}
friend bool operator<(const KAutoObjectWithList& left, const KAutoObjectWithList& right) {
- return &left < &right;
+ return KAutoObjectWithList::Compare(left, right) < 0;
}
public:
@@ -216,10 +203,6 @@ public:
return reinterpret_cast<u64>(this);
}
- virtual const std::string& GetName() const {
- return name;
- }
-
private:
friend class KAutoObjectWithListContainer;
};
@@ -245,8 +228,8 @@ public:
}
template <typename U>
- requires(std::derived_from<T, U> ||
- std::derived_from<U, T>) constexpr KScopedAutoObject(KScopedAutoObject<U>&& rhs) {
+ requires(std::derived_from<T, U> || std::derived_from<U, T>)
+ constexpr KScopedAutoObject(KScopedAutoObject<U>&& rhs) {
if constexpr (std::derived_from<U, T>) {
// Upcast.
m_obj = rhs.m_obj;
diff --git a/src/core/hle/kernel/k_capabilities.cpp b/src/core/hle/kernel/k_capabilities.cpp
new file mode 100644
index 000000000..90e4e8fb0
--- /dev/null
+++ b/src/core/hle/kernel/k_capabilities.cpp
@@ -0,0 +1,358 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hardware_properties.h"
+#include "core/hle/kernel/k_capabilities.h"
+#include "core/hle/kernel/k_memory_layout.h"
+#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc_results.h"
+#include "core/hle/kernel/svc_version.h"
+
+namespace Kernel {
+
+Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table) {
+ // We're initializing an initial process.
+ m_svc_access_flags.reset();
+ m_irq_access_flags.reset();
+ m_debug_capabilities = 0;
+ m_handle_table_size = 0;
+ m_intended_kernel_version = 0;
+ m_program_type = 0;
+
+ // Initial processes may run on all cores.
+ constexpr u64 VirtMask = Core::Hardware::VirtualCoreMask;
+ constexpr u64 PhysMask = Core::Hardware::ConvertVirtualCoreMaskToPhysical(VirtMask);
+
+ m_core_mask = VirtMask;
+ m_phys_core_mask = PhysMask;
+
+ // Initial processes may use any user priority they like.
+ m_priority_mask = ~0xFULL;
+
+ // Here, Nintendo sets the kernel version to the current kernel version.
+ // We will follow suit and set the version to the highest supported kernel version.
+ KernelVersion intended_kernel_version{};
+ intended_kernel_version.major_version.Assign(Svc::SupportedKernelMajorVersion);
+ intended_kernel_version.minor_version.Assign(Svc::SupportedKernelMinorVersion);
+ m_intended_kernel_version = intended_kernel_version.raw;
+
+ // Parse the capabilities array.
+ R_RETURN(this->SetCapabilities(kern_caps, page_table));
+}
+
+Result KCapabilities::InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table) {
+ // We're initializing a user process.
+ m_svc_access_flags.reset();
+ m_irq_access_flags.reset();
+ m_debug_capabilities = 0;
+ m_handle_table_size = 0;
+ m_intended_kernel_version = 0;
+ m_program_type = 0;
+
+ // User processes must specify what cores/priorities they can use.
+ m_core_mask = 0;
+ m_priority_mask = 0;
+
+ // Parse the user capabilities array.
+ R_RETURN(this->SetCapabilities(user_caps, page_table));
+}
+
+Result KCapabilities::SetCorePriorityCapability(const u32 cap) {
+ // We can't set core/priority if we've already set them.
+ R_UNLESS(m_core_mask == 0, ResultInvalidArgument);
+ R_UNLESS(m_priority_mask == 0, ResultInvalidArgument);
+
+ // Validate the core/priority.
+ CorePriority pack{cap};
+ const u32 min_core = pack.minimum_core_id;
+ const u32 max_core = pack.maximum_core_id;
+ const u32 max_prio = pack.lowest_thread_priority;
+ const u32 min_prio = pack.highest_thread_priority;
+
+ R_UNLESS(min_core <= max_core, ResultInvalidCombination);
+ R_UNLESS(min_prio <= max_prio, ResultInvalidCombination);
+ R_UNLESS(max_core < Core::Hardware::NumVirtualCores, ResultInvalidCoreId);
+
+ ASSERT(max_prio < Common::BitSize<u64>());
+
+ // Set core mask.
+ for (auto core_id = min_core; core_id <= max_core; core_id++) {
+ m_core_mask |= (1ULL << core_id);
+ }
+ ASSERT((m_core_mask & Core::Hardware::VirtualCoreMask) == m_core_mask);
+
+ // Set physical core mask.
+ m_phys_core_mask = Core::Hardware::ConvertVirtualCoreMaskToPhysical(m_core_mask);
+
+ // Set priority mask.
+ for (auto prio = min_prio; prio <= max_prio; prio++) {
+ m_priority_mask |= (1ULL << prio);
+ }
+
+ // We must have some core/priority we can use.
+ R_UNLESS(m_core_mask != 0, ResultInvalidArgument);
+ R_UNLESS(m_priority_mask != 0, ResultInvalidArgument);
+
+ // Processes must not have access to kernel thread priorities.
+ R_UNLESS((m_priority_mask & 0xF) == 0, ResultInvalidArgument);
+
+ R_SUCCEED();
+}
+
+Result KCapabilities::SetSyscallMaskCapability(const u32 cap, u32& set_svc) {
+ // Validate the index.
+ SyscallMask pack{cap};
+ const u32 mask = pack.mask;
+ const u32 index = pack.index;
+
+ const u32 index_flag = (1U << index);
+ R_UNLESS((set_svc & index_flag) == 0, ResultInvalidCombination);
+ set_svc |= index_flag;
+
+ // Set SVCs.
+ for (size_t i = 0; i < decltype(SyscallMask::mask)::bits; i++) {
+ const u32 svc_id = static_cast<u32>(decltype(SyscallMask::mask)::bits * index + i);
+ if (mask & (1U << i)) {
+ R_UNLESS(this->SetSvcAllowed(svc_id), ResultOutOfRange);
+ }
+ }
+
+ R_SUCCEED();
+}
+
+Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table) {
+ const auto range_pack = MapRange{cap};
+ const auto size_pack = MapRangeSize{size_cap};
+
+ // Get/validate address/size
+ const u64 phys_addr = range_pack.address.Value() * PageSize;
+
+ // Validate reserved bits are unused.
+ R_UNLESS(size_pack.reserved.Value() == 0, ResultOutOfRange);
+
+ const size_t num_pages = size_pack.pages;
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(num_pages != 0, ResultInvalidSize);
+ R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
+ R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress);
+
+ // Do the mapping.
+ [[maybe_unused]] const KMemoryPermission perm = range_pack.read_only.Value()
+ ? KMemoryPermission::UserRead
+ : KMemoryPermission::UserReadWrite;
+ if (MapRangeSize{size_cap}.normal) {
+ // R_RETURN(page_table->MapStatic(phys_addr, size, perm));
+ } else {
+ // R_RETURN(page_table->MapIo(phys_addr, size, perm));
+ }
+
+ UNIMPLEMENTED();
+ R_SUCCEED();
+}
+
+Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) {
+ // Get/validate address/size
+ const u64 phys_addr = MapIoPage{cap}.address.Value() * PageSize;
+ const size_t num_pages = 1;
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(num_pages != 0, ResultInvalidSize);
+ R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
+ R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress);
+
+ // Do the mapping.
+ // R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite));
+
+ UNIMPLEMENTED();
+ R_SUCCEED();
+}
+
+template <typename F>
+Result KCapabilities::ProcessMapRegionCapability(const u32 cap, F f) {
+ // Define the allowed memory regions.
+ constexpr std::array<KMemoryRegionType, 4> MemoryRegions{
+ KMemoryRegionType_None,
+ KMemoryRegionType_KernelTraceBuffer,
+ KMemoryRegionType_OnMemoryBootImage,
+ KMemoryRegionType_DTB,
+ };
+
+ // Extract regions/read only.
+ const MapRegion pack{cap};
+ const std::array<RegionType, 3> types{pack.region0, pack.region1, pack.region2};
+ const std::array<u32, 3> ro{pack.read_only0, pack.read_only1, pack.read_only2};
+
+ for (size_t i = 0; i < types.size(); i++) {
+ const auto type = types[i];
+ const auto perm = ro[i] ? KMemoryPermission::UserRead : KMemoryPermission::UserReadWrite;
+ switch (type) {
+ case RegionType::NoMapping:
+ break;
+ case RegionType::KernelTraceBuffer:
+ case RegionType::OnMemoryBootImage:
+ case RegionType::DTB:
+ R_TRY(f(MemoryRegions[static_cast<u32>(type)], perm));
+ break;
+ default:
+ R_THROW(ResultNotFound);
+ }
+ }
+
+ R_SUCCEED();
+}
+
+Result KCapabilities::MapRegion_(const u32 cap, KPageTable* page_table) {
+ // Map each region into the process's page table.
+ return ProcessMapRegionCapability(
+ cap, [](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
+ // R_RETURN(page_table->MapRegion(region_type, perm));
+ UNIMPLEMENTED();
+ R_SUCCEED();
+ });
+}
+
+Result KCapabilities::CheckMapRegion(KernelCore& kernel, const u32 cap) {
+ // Check that each region has a physical backing store.
+ return ProcessMapRegionCapability(
+ cap, [&](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
+ R_UNLESS(kernel.MemoryLayout().GetPhysicalMemoryRegionTree().FindFirstDerived(
+ region_type) != nullptr,
+ ResultOutOfRange);
+ R_SUCCEED();
+ });
+}
+
+Result KCapabilities::SetInterruptPairCapability(const u32 cap) {
+ // Extract interrupts.
+ const InterruptPair pack{cap};
+ const std::array<u32, 2> ids{pack.interrupt_id0, pack.interrupt_id1};
+
+ for (size_t i = 0; i < ids.size(); i++) {
+ if (ids[i] != PaddingInterruptId) {
+ UNIMPLEMENTED();
+ // R_UNLESS(Kernel::GetInterruptManager().IsInterruptDefined(ids[i]), ResultOutOfRange);
+ // R_UNLESS(this->SetInterruptPermitted(ids[i]), ResultOutOfRange);
+ }
+ }
+
+ R_SUCCEED();
+}
+
+Result KCapabilities::SetProgramTypeCapability(const u32 cap) {
+ // Validate.
+ const ProgramType pack{cap};
+ R_UNLESS(pack.reserved == 0, ResultReservedUsed);
+
+ m_program_type = pack.type;
+ R_SUCCEED();
+}
+
+Result KCapabilities::SetKernelVersionCapability(const u32 cap) {
+ // Ensure we haven't set our version before.
+ R_UNLESS(KernelVersion{m_intended_kernel_version}.major_version == 0, ResultInvalidArgument);
+
+ // Set, ensure that we set a valid version.
+ m_intended_kernel_version = cap;
+ R_UNLESS(KernelVersion{m_intended_kernel_version}.major_version != 0, ResultInvalidArgument);
+
+ R_SUCCEED();
+}
+
+Result KCapabilities::SetHandleTableCapability(const u32 cap) {
+ // Validate.
+ const HandleTable pack{cap};
+ R_UNLESS(pack.reserved == 0, ResultReservedUsed);
+
+ m_handle_table_size = pack.size;
+ R_SUCCEED();
+}
+
+Result KCapabilities::SetDebugFlagsCapability(const u32 cap) {
+ // Validate.
+ const DebugFlags pack{cap};
+ R_UNLESS(pack.reserved == 0, ResultReservedUsed);
+
+ DebugFlags debug_capabilities{m_debug_capabilities};
+ debug_capabilities.allow_debug.Assign(pack.allow_debug);
+ debug_capabilities.force_debug.Assign(pack.force_debug);
+ m_debug_capabilities = debug_capabilities.raw;
+
+ R_SUCCEED();
+}
+
+Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
+ KPageTable* page_table) {
+ // Validate this is a capability we can act on.
+ const auto type = GetCapabilityType(cap);
+ R_UNLESS(type != CapabilityType::Invalid, ResultInvalidArgument);
+
+ // If the type is padding, we have no work to do.
+ R_SUCCEED_IF(type == CapabilityType::Padding);
+
+ // Check that we haven't already processed this capability.
+ const auto flag = GetCapabilityFlag(type);
+ R_UNLESS(((set_flags & InitializeOnceFlags) & flag) == 0, ResultInvalidCombination);
+ set_flags |= flag;
+
+ // Process the capability.
+ switch (type) {
+ case CapabilityType::CorePriority:
+ R_RETURN(this->SetCorePriorityCapability(cap));
+ case CapabilityType::SyscallMask:
+ R_RETURN(this->SetSyscallMaskCapability(cap, set_svc));
+ case CapabilityType::MapIoPage:
+ R_RETURN(this->MapIoPage_(cap, page_table));
+ case CapabilityType::MapRegion:
+ R_RETURN(this->MapRegion_(cap, page_table));
+ case CapabilityType::InterruptPair:
+ R_RETURN(this->SetInterruptPairCapability(cap));
+ case CapabilityType::ProgramType:
+ R_RETURN(this->SetProgramTypeCapability(cap));
+ case CapabilityType::KernelVersion:
+ R_RETURN(this->SetKernelVersionCapability(cap));
+ case CapabilityType::HandleTable:
+ R_RETURN(this->SetHandleTableCapability(cap));
+ case CapabilityType::DebugFlags:
+ R_RETURN(this->SetDebugFlagsCapability(cap));
+ default:
+ R_THROW(ResultInvalidArgument);
+ }
+}
+
+Result KCapabilities::SetCapabilities(std::span<const u32> caps, KPageTable* page_table) {
+ u32 set_flags = 0, set_svc = 0;
+
+ for (size_t i = 0; i < caps.size(); i++) {
+ const u32 cap{caps[i]};
+
+ if (GetCapabilityType(cap) == CapabilityType::MapRange) {
+ // Check that the pair cap exists.
+ R_UNLESS((++i) < caps.size(), ResultInvalidCombination);
+
+ // Check the pair cap is a map range cap.
+ const u32 size_cap{caps[i]};
+ R_UNLESS(GetCapabilityType(size_cap) == CapabilityType::MapRange,
+ ResultInvalidCombination);
+
+ // Map the range.
+ R_TRY(this->MapRange_(cap, size_cap, page_table));
+ } else {
+ R_TRY(this->SetCapability(cap, set_flags, set_svc, page_table));
+ }
+ }
+
+ R_SUCCEED();
+}
+
+Result KCapabilities::CheckCapabilities(KernelCore& kernel, std::span<const u32> caps) {
+ for (auto cap : caps) {
+ // Check the capability refers to a valid region.
+ if (GetCapabilityType(cap) == CapabilityType::MapRegion) {
+ R_TRY(CheckMapRegion(kernel, cap));
+ }
+ }
+
+ R_SUCCEED();
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_capabilities.h b/src/core/hle/kernel/k_capabilities.h
new file mode 100644
index 000000000..de766c811
--- /dev/null
+++ b/src/core/hle/kernel/k_capabilities.h
@@ -0,0 +1,295 @@
+
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <bitset>
+#include <span>
+
+#include "common/bit_field.h"
+#include "common/common_types.h"
+
+#include "core/hle/kernel/svc_types.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+class KPageTable;
+class KernelCore;
+
+class KCapabilities {
+public:
+ constexpr explicit KCapabilities() = default;
+
+ Result InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table);
+ Result InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table);
+
+ static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps);
+
+ constexpr u64 GetCoreMask() const {
+ return m_core_mask;
+ }
+
+ constexpr u64 GetPhysicalCoreMask() const {
+ return m_phys_core_mask;
+ }
+
+ constexpr u64 GetPriorityMask() const {
+ return m_priority_mask;
+ }
+
+ constexpr s32 GetHandleTableSize() const {
+ return m_handle_table_size;
+ }
+
+ constexpr const Svc::SvcAccessFlagSet& GetSvcPermissions() const {
+ return m_svc_access_flags;
+ }
+
+ constexpr bool IsPermittedSvc(u32 id) const {
+ return (id < m_svc_access_flags.size()) && m_svc_access_flags[id];
+ }
+
+ constexpr bool IsPermittedInterrupt(u32 id) const {
+ return (id < m_irq_access_flags.size()) && m_irq_access_flags[id];
+ }
+
+ constexpr bool IsPermittedDebug() const {
+ return DebugFlags{m_debug_capabilities}.allow_debug.Value() != 0;
+ }
+
+ constexpr bool CanForceDebug() const {
+ return DebugFlags{m_debug_capabilities}.force_debug.Value() != 0;
+ }
+
+ constexpr u32 GetIntendedKernelMajorVersion() const {
+ return KernelVersion{m_intended_kernel_version}.major_version;
+ }
+
+ constexpr u32 GetIntendedKernelMinorVersion() const {
+ return KernelVersion{m_intended_kernel_version}.minor_version;
+ }
+
+private:
+ static constexpr size_t InterruptIdCount = 0x400;
+ using InterruptFlagSet = std::bitset<InterruptIdCount>;
+
+ enum class CapabilityType : u32 {
+ CorePriority = (1U << 3) - 1,
+ SyscallMask = (1U << 4) - 1,
+ MapRange = (1U << 6) - 1,
+ MapIoPage = (1U << 7) - 1,
+ MapRegion = (1U << 10) - 1,
+ InterruptPair = (1U << 11) - 1,
+ ProgramType = (1U << 13) - 1,
+ KernelVersion = (1U << 14) - 1,
+ HandleTable = (1U << 15) - 1,
+ DebugFlags = (1U << 16) - 1,
+
+ Invalid = 0U,
+ Padding = ~0U,
+ };
+
+ using RawCapabilityValue = u32;
+
+ static constexpr CapabilityType GetCapabilityType(const RawCapabilityValue value) {
+ return static_cast<CapabilityType>((~value & (value + 1)) - 1);
+ }
+
+ static constexpr u32 GetCapabilityFlag(CapabilityType type) {
+ return static_cast<u32>(type) + 1;
+ }
+
+ template <CapabilityType Type>
+ static constexpr inline u32 CapabilityFlag = static_cast<u32>(Type) + 1;
+
+ template <CapabilityType Type>
+ static constexpr inline u32 CapabilityId = std::countr_zero(CapabilityFlag<Type>);
+
+ union CorePriority {
+ static_assert(CapabilityId<CapabilityType::CorePriority> + 1 == 4);
+
+ RawCapabilityValue raw;
+ BitField<0, 4, CapabilityType> id;
+ BitField<4, 6, u32> lowest_thread_priority;
+ BitField<10, 6, u32> highest_thread_priority;
+ BitField<16, 8, u32> minimum_core_id;
+ BitField<24, 8, u32> maximum_core_id;
+ };
+
+ union SyscallMask {
+ static_assert(CapabilityId<CapabilityType::SyscallMask> + 1 == 5);
+
+ RawCapabilityValue raw;
+ BitField<0, 5, CapabilityType> id;
+ BitField<5, 24, u32> mask;
+ BitField<29, 3, u32> index;
+ };
+
+ // #undef MESOSPHERE_ENABLE_LARGE_PHYSICAL_ADDRESS_CAPABILITIES
+ static constexpr u64 PhysicalMapAllowedMask = (1ULL << 36) - 1;
+
+ union MapRange {
+ static_assert(CapabilityId<CapabilityType::MapRange> + 1 == 7);
+
+ RawCapabilityValue raw;
+ BitField<0, 7, CapabilityType> id;
+ BitField<7, 24, u32> address;
+ BitField<31, 1, u32> read_only;
+ };
+
+ union MapRangeSize {
+ static_assert(CapabilityId<CapabilityType::MapRange> + 1 == 7);
+
+ RawCapabilityValue raw;
+ BitField<0, 7, CapabilityType> id;
+ BitField<7, 20, u32> pages;
+ BitField<27, 4, u32> reserved;
+ BitField<31, 1, u32> normal;
+ };
+
+ union MapIoPage {
+ static_assert(CapabilityId<CapabilityType::MapIoPage> + 1 == 8);
+
+ RawCapabilityValue raw;
+ BitField<0, 8, CapabilityType> id;
+ BitField<8, 24, u32> address;
+ };
+
+ enum class RegionType : u32 {
+ NoMapping = 0,
+ KernelTraceBuffer = 1,
+ OnMemoryBootImage = 2,
+ DTB = 3,
+ };
+
+ union MapRegion {
+ static_assert(CapabilityId<CapabilityType::MapRegion> + 1 == 11);
+
+ RawCapabilityValue raw;
+ BitField<0, 11, CapabilityType> id;
+ BitField<11, 6, RegionType> region0;
+ BitField<17, 1, u32> read_only0;
+ BitField<18, 6, RegionType> region1;
+ BitField<24, 1, u32> read_only1;
+ BitField<25, 6, RegionType> region2;
+ BitField<31, 1, u32> read_only2;
+ };
+
+ union InterruptPair {
+ static_assert(CapabilityId<CapabilityType::InterruptPair> + 1 == 12);
+
+ RawCapabilityValue raw;
+ BitField<0, 12, CapabilityType> id;
+ BitField<12, 10, u32> interrupt_id0;
+ BitField<22, 10, u32> interrupt_id1;
+ };
+
+ union ProgramType {
+ static_assert(CapabilityId<CapabilityType::ProgramType> + 1 == 14);
+
+ RawCapabilityValue raw;
+ BitField<0, 14, CapabilityType> id;
+ BitField<14, 3, u32> type;
+ BitField<17, 15, u32> reserved;
+ };
+
+ union KernelVersion {
+ static_assert(CapabilityId<CapabilityType::KernelVersion> + 1 == 15);
+
+ RawCapabilityValue raw;
+ BitField<0, 15, CapabilityType> id;
+ BitField<15, 4, u32> major_version;
+ BitField<19, 13, u32> minor_version;
+ };
+
+ union HandleTable {
+ static_assert(CapabilityId<CapabilityType::HandleTable> + 1 == 16);
+
+ RawCapabilityValue raw;
+ BitField<0, 16, CapabilityType> id;
+ BitField<16, 10, u32> size;
+ BitField<26, 6, u32> reserved;
+ };
+
+ union DebugFlags {
+ static_assert(CapabilityId<CapabilityType::DebugFlags> + 1 == 17);
+
+ RawCapabilityValue raw;
+ BitField<0, 17, CapabilityType> id;
+ BitField<17, 1, u32> allow_debug;
+ BitField<18, 1, u32> force_debug;
+ BitField<19, 13, u32> reserved;
+ };
+
+ static_assert(sizeof(CorePriority) == 4);
+ static_assert(sizeof(SyscallMask) == 4);
+ static_assert(sizeof(MapRange) == 4);
+ static_assert(sizeof(MapRangeSize) == 4);
+ static_assert(sizeof(MapIoPage) == 4);
+ static_assert(sizeof(MapRegion) == 4);
+ static_assert(sizeof(InterruptPair) == 4);
+ static_assert(sizeof(ProgramType) == 4);
+ static_assert(sizeof(KernelVersion) == 4);
+ static_assert(sizeof(HandleTable) == 4);
+ static_assert(sizeof(DebugFlags) == 4);
+
+ static constexpr u32 InitializeOnceFlags =
+ CapabilityFlag<CapabilityType::CorePriority> | CapabilityFlag<CapabilityType::ProgramType> |
+ CapabilityFlag<CapabilityType::KernelVersion> |
+ CapabilityFlag<CapabilityType::HandleTable> | CapabilityFlag<CapabilityType::DebugFlags>;
+
+ static const u32 PaddingInterruptId = 0x3FF;
+ static_assert(PaddingInterruptId < InterruptIdCount);
+
+private:
+ constexpr bool SetSvcAllowed(u32 id) {
+ if (id < m_svc_access_flags.size()) [[likely]] {
+ m_svc_access_flags[id] = true;
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ constexpr bool SetInterruptPermitted(u32 id) {
+ if (id < m_irq_access_flags.size()) [[likely]] {
+ m_irq_access_flags[id] = true;
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ Result SetCorePriorityCapability(const u32 cap);
+ Result SetSyscallMaskCapability(const u32 cap, u32& set_svc);
+ Result MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table);
+ Result MapIoPage_(const u32 cap, KPageTable* page_table);
+ Result MapRegion_(const u32 cap, KPageTable* page_table);
+ Result SetInterruptPairCapability(const u32 cap);
+ Result SetProgramTypeCapability(const u32 cap);
+ Result SetKernelVersionCapability(const u32 cap);
+ Result SetHandleTableCapability(const u32 cap);
+ Result SetDebugFlagsCapability(const u32 cap);
+
+ template <typename F>
+ static Result ProcessMapRegionCapability(const u32 cap, F f);
+ static Result CheckMapRegion(KernelCore& kernel, const u32 cap);
+
+ Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc, KPageTable* page_table);
+ Result SetCapabilities(std::span<const u32> caps, KPageTable* page_table);
+
+private:
+ Svc::SvcAccessFlagSet m_svc_access_flags{};
+ InterruptFlagSet m_irq_access_flags{};
+ u64 m_core_mask{};
+ u64 m_phys_core_mask{};
+ u64 m_priority_mask{};
+ u32 m_debug_capabilities{};
+ s32 m_handle_table_size{};
+ u32 m_intended_kernel_version{};
+ u32 m_program_type{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_client_port.cpp b/src/core/hle/kernel/k_client_port.cpp
index 2ec623a58..40e09e532 100644
--- a/src/core/hle/kernel/k_client_port.cpp
+++ b/src/core/hle/kernel/k_client_port.cpp
@@ -2,7 +2,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/scope_exit.h"
-#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_scheduler.h"
@@ -12,26 +11,21 @@
namespace Kernel {
-KClientPort::KClientPort(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
+KClientPort::KClientPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
KClientPort::~KClientPort() = default;
-void KClientPort::Initialize(KPort* parent_port_, s32 max_sessions_, std::string&& name_) {
+void KClientPort::Initialize(KPort* parent, s32 max_sessions) {
// Set member variables.
- num_sessions = 0;
- peak_sessions = 0;
- parent = parent_port_;
- max_sessions = max_sessions_;
- name = std::move(name_);
+ m_num_sessions = 0;
+ m_peak_sessions = 0;
+ m_parent = parent;
+ m_max_sessions = max_sessions;
}
void KClientPort::OnSessionFinalized() {
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
- // This might happen if a session was improperly used with this port.
- ASSERT_MSG(num_sessions > 0, "num_sessions is invalid");
-
- const auto prev = num_sessions--;
- if (prev == max_sessions) {
+ if (const auto prev = m_num_sessions--; prev == m_max_sessions) {
this->NotifyAvailable();
}
}
@@ -48,80 +42,81 @@ bool KClientPort::IsServerClosed() const {
void KClientPort::Destroy() {
// Note with our parent that we're closed.
- parent->OnClientClosed();
+ m_parent->OnClientClosed();
// Close our reference to our parent.
- parent->Close();
+ m_parent->Close();
}
bool KClientPort::IsSignaled() const {
- return num_sessions < max_sessions;
+ return m_num_sessions.load() < m_max_sessions;
}
Result KClientPort::CreateSession(KClientSession** out) {
+ // Declare the session we're going to allocate.
+ KSession* session{};
+
// Reserve a new session from the resource limit.
- KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(),
- LimitableResource::SessionCountMax);
+ //! FIXME: we are reserving this from the wrong resource limit!
+ KScopedResourceReservation session_reservation(
+ m_kernel.ApplicationProcess()->GetResourceLimit(), LimitableResource::SessionCountMax);
R_UNLESS(session_reservation.Succeeded(), ResultLimitReached);
+ // Allocate a session normally.
+ session = KSession::Create(m_kernel);
+
+ // Check that we successfully created a session.
+ R_UNLESS(session != nullptr, ResultOutOfResource);
+
// Update the session counts.
{
+ ON_RESULT_FAILURE {
+ session->Close();
+ };
+
// Atomically increment the number of sessions.
s32 new_sessions{};
{
- const auto max = max_sessions;
- auto cur_sessions = num_sessions.load(std::memory_order_acquire);
+ const auto max = m_max_sessions;
+ auto cur_sessions = m_num_sessions.load(std::memory_order_acquire);
do {
R_UNLESS(cur_sessions < max, ResultOutOfSessions);
new_sessions = cur_sessions + 1;
- } while (!num_sessions.compare_exchange_weak(cur_sessions, new_sessions,
- std::memory_order_relaxed));
+ } while (!m_num_sessions.compare_exchange_weak(cur_sessions, new_sessions,
+ std::memory_order_relaxed));
}
// Atomically update the peak session tracking.
{
- auto peak = peak_sessions.load(std::memory_order_acquire);
+ auto peak = m_peak_sessions.load(std::memory_order_acquire);
do {
if (peak >= new_sessions) {
break;
}
- } while (!peak_sessions.compare_exchange_weak(peak, new_sessions,
- std::memory_order_relaxed));
+ } while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions,
+ std::memory_order_relaxed));
}
}
- // Create a new session.
- KSession* session = KSession::Create(kernel);
- if (session == nullptr) {
- // Decrement the session count.
- const auto prev = num_sessions--;
- if (prev == max_sessions) {
- this->NotifyAvailable();
- }
-
- return ResultOutOfResource;
- }
-
// Initialize the session.
- session->Initialize(this, parent->GetName());
+ session->Initialize(this, m_parent->GetName());
// Commit the session reservation.
session_reservation.Commit();
// Register the session.
- KSession::Register(kernel, session);
- auto session_guard = SCOPE_GUARD({
+ KSession::Register(m_kernel, session);
+ ON_RESULT_FAILURE {
session->GetClientSession().Close();
session->GetServerSession().Close();
- });
+ };
// Enqueue the session with our parent.
- R_TRY(parent->EnqueueSession(std::addressof(session->GetServerSession())));
+ R_TRY(m_parent->EnqueueSession(std::addressof(session->GetServerSession())));
// We succeeded, so set the output.
- session_guard.Cancel();
*out = std::addressof(session->GetClientSession());
- return ResultSuccess;
+ R_SUCCEED();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_client_port.h b/src/core/hle/kernel/k_client_port.h
index 81046fb86..23db06ddf 100644
--- a/src/core/hle/kernel/k_client_port.h
+++ b/src/core/hle/kernel/k_client_port.h
@@ -4,7 +4,6 @@
#pragma once
#include <memory>
-#include <string>
#include "common/common_types.h"
#include "core/hle/kernel/k_synchronization_object.h"
@@ -15,34 +14,33 @@ namespace Kernel {
class KClientSession;
class KernelCore;
class KPort;
-class SessionRequestManager;
class KClientPort final : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject);
public:
- explicit KClientPort(KernelCore& kernel_);
+ explicit KClientPort(KernelCore& kernel);
~KClientPort() override;
- void Initialize(KPort* parent_, s32 max_sessions_, std::string&& name_);
+ void Initialize(KPort* parent, s32 max_sessions);
void OnSessionFinalized();
void OnServerClosed();
const KPort* GetParent() const {
- return parent;
+ return m_parent;
}
KPort* GetParent() {
- return parent;
+ return m_parent;
}
s32 GetNumSessions() const {
- return num_sessions;
+ return m_num_sessions;
}
s32 GetPeakSessions() const {
- return peak_sessions;
+ return m_peak_sessions;
}
s32 GetMaxSessions() const {
- return max_sessions;
+ return m_max_sessions;
}
bool IsLight() const;
@@ -55,10 +53,10 @@ public:
Result CreateSession(KClientSession** out);
private:
- std::atomic<s32> num_sessions{};
- std::atomic<s32> peak_sessions{};
- s32 max_sessions{};
- KPort* parent{};
+ std::atomic<s32> m_num_sessions{};
+ std::atomic<s32> m_peak_sessions{};
+ s32 m_max_sessions{};
+ KPort* m_parent{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp
index b4197a8d5..72b66270d 100644
--- a/src/core/hle/kernel/k_client_session.cpp
+++ b/src/core/hle/kernel/k_client_session.cpp
@@ -2,7 +2,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/scope_exit.h"
-#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
@@ -13,28 +12,28 @@ namespace Kernel {
static constexpr u32 MessageBufferSize = 0x100;
-KClientSession::KClientSession(KernelCore& kernel_)
- : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
+KClientSession::KClientSession(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
KClientSession::~KClientSession() = default;
void KClientSession::Destroy() {
- parent->OnClientClosed();
- parent->Close();
+ m_parent->OnClientClosed();
+ m_parent->Close();
}
void KClientSession::OnServerClosed() {}
Result KClientSession::SendSyncRequest() {
// Create a session request.
- KSessionRequest* request = KSessionRequest::Create(kernel);
+ KSessionRequest* request = KSessionRequest::Create(m_kernel);
R_UNLESS(request != nullptr, ResultOutOfResource);
SCOPE_EXIT({ request->Close(); });
// Initialize the request.
- request->Initialize(nullptr, GetCurrentThread(kernel).GetTLSAddress(), MessageBufferSize);
+ request->Initialize(nullptr, GetInteger(GetCurrentThread(m_kernel).GetTlsAddress()),
+ MessageBufferSize);
// Send the request.
- return parent->GetServerSession().OnRequest(request);
+ R_RETURN(m_parent->GetServerSession().OnRequest(request));
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_client_session.h b/src/core/hle/kernel/k_client_session.h
index b4a19c546..9b62e55e4 100644
--- a/src/core/hle/kernel/k_client_session.h
+++ b/src/core/hle/kernel/k_client_session.h
@@ -30,20 +30,19 @@ class KClientSession final
KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject);
public:
- explicit KClientSession(KernelCore& kernel_);
+ explicit KClientSession(KernelCore& kernel);
~KClientSession() override;
- void Initialize(KSession* parent_session_, std::string&& name_) {
+ void Initialize(KSession* parent) {
// Set member variables.
- parent = parent_session_;
- name = std::move(name_);
+ m_parent = parent;
}
void Destroy() override;
- static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
+ static void PostDestroy(uintptr_t arg) {}
KSession* GetParent() const {
- return parent;
+ return m_parent;
}
Result SendSyncRequest();
@@ -51,7 +50,7 @@ public:
void OnServerClosed();
private:
- KSession* parent{};
+ KSession* m_parent{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index d9da1e600..3583bee44 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -16,18 +16,19 @@
namespace Kernel {
-KCodeMemory::KCodeMemory(KernelCore& kernel_)
- : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_lock(kernel_) {}
+KCodeMemory::KCodeMemory(KernelCore& kernel)
+ : KAutoObjectWithSlabHeapAndContainer{kernel}, m_lock(kernel) {}
-Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
+Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, KProcessAddress addr,
+ size_t size) {
// Set members.
- m_owner = kernel.CurrentProcess();
+ m_owner = GetCurrentProcessPointer(m_kernel);
// Get the owner page table.
auto& page_table = m_owner->PageTable();
// Construct the page group.
- m_page_group.emplace(kernel, page_table.GetBlockInfoManager());
+ m_page_group.emplace(m_kernel, page_table.GetBlockInfoManager());
// Lock the memory.
R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size))
@@ -45,7 +46,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
m_is_mapped = false;
// We succeeded.
- return ResultSuccess;
+ R_SUCCEED();
}
void KCodeMemory::Finalize() {
@@ -63,7 +64,7 @@ void KCodeMemory::Finalize() {
m_owner->Close();
}
-Result KCodeMemory::Map(VAddr address, size_t size) {
+Result KCodeMemory::Map(KProcessAddress address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -74,16 +75,16 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
R_UNLESS(!m_is_mapped, ResultInvalidState);
// Map the memory.
- R_TRY(kernel.CurrentProcess()->PageTable().MapPages(
+ R_TRY(GetCurrentProcess(m_kernel).PageTable().MapPageGroup(
address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
// Mark ourselves as mapped.
m_is_mapped = true;
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KCodeMemory::Unmap(VAddr address, size_t size) {
+Result KCodeMemory::Unmap(KProcessAddress address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -91,16 +92,16 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
KScopedLightLock lk(m_lock);
// Unmap the memory.
- R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, *m_page_group,
- KMemoryState::CodeOut));
+ R_TRY(GetCurrentProcess(m_kernel).PageTable().UnmapPageGroup(address, *m_page_group,
+ KMemoryState::CodeOut));
// Mark ourselves as unmapped.
m_is_mapped = false;
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
+Result KCodeMemory::MapToOwner(KProcessAddress address, size_t size, Svc::MemoryPermission perm) {
// Validate the size.
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -125,16 +126,16 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
}
// Map the memory.
- R_TRY(
- m_owner->PageTable().MapPages(address, *m_page_group, KMemoryState::GeneratedCode, k_perm));
+ R_TRY(m_owner->PageTable().MapPageGroup(address, *m_page_group, KMemoryState::GeneratedCode,
+ k_perm));
// Mark ourselves as mapped.
m_is_owner_mapped = true;
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
+Result KCodeMemory::UnmapFromOwner(KProcessAddress address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -142,12 +143,12 @@ Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
KScopedLightLock lk(m_lock);
// Unmap the memory.
- R_TRY(m_owner->PageTable().UnmapPages(address, *m_page_group, KMemoryState::GeneratedCode));
+ R_TRY(m_owner->PageTable().UnmapPageGroup(address, *m_page_group, KMemoryState::GeneratedCode));
// Mark ourselves as unmapped.
m_is_owner_mapped = false;
- return ResultSuccess;
+ R_SUCCEED();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h
index 5b260b385..26fe6b3dc 100644
--- a/src/core/hle/kernel/k_code_memory.h
+++ b/src/core/hle/kernel/k_code_memory.h
@@ -5,12 +5,12 @@
#include <optional>
-#include "common/common_types.h"
#include "core/device_memory.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
@@ -29,25 +29,25 @@ class KCodeMemory final
KERNEL_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject);
public:
- explicit KCodeMemory(KernelCore& kernel_);
+ explicit KCodeMemory(KernelCore& kernel);
- Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
+ Result Initialize(Core::DeviceMemory& device_memory, KProcessAddress address, size_t size);
void Finalize() override;
- Result Map(VAddr address, size_t size);
- Result Unmap(VAddr address, size_t size);
- Result MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm);
- Result UnmapFromOwner(VAddr address, size_t size);
+ Result Map(KProcessAddress address, size_t size);
+ Result Unmap(KProcessAddress address, size_t size);
+ Result MapToOwner(KProcessAddress address, size_t size, Svc::MemoryPermission perm);
+ Result UnmapFromOwner(KProcessAddress address, size_t size);
bool IsInitialized() const override {
return m_is_initialized;
}
- static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
+ static void PostDestroy(uintptr_t arg) {}
KProcess* GetOwner() const override {
return m_owner;
}
- VAddr GetSourceAddress() const {
+ KProcessAddress GetSourceAddress() const {
return m_address;
}
size_t GetSize() const {
@@ -57,7 +57,7 @@ public:
private:
std::optional<KPageGroup> m_page_group{};
KProcess* m_owner{};
- VAddr m_address{};
+ KProcessAddress m_address{};
KLightLock m_lock;
bool m_is_initialized{};
bool m_is_owner_mapped{};
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 124149697..efbac0e6a 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -4,7 +4,6 @@
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
#include "core/hle/kernel/k_condition_variable.h"
-#include "core/hle/kernel/k_linked_list.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
@@ -19,36 +18,41 @@ namespace Kernel {
namespace {
-bool ReadFromUser(Core::System& system, u32* out, VAddr address) {
- *out = system.Memory().Read32(address);
+bool ReadFromUser(KernelCore& kernel, u32* out, KProcessAddress address) {
+ *out = GetCurrentMemory(kernel).Read32(GetInteger(address));
return true;
}
-bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
- system.Memory().Write32(address, *p);
+bool WriteToUser(KernelCore& kernel, KProcessAddress address, const u32* p) {
+ GetCurrentMemory(kernel).Write32(GetInteger(address), *p);
return true;
}
-bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
+bool UpdateLockAtomic(Core::System& system, u32* out, KProcessAddress address, u32 if_zero,
u32 new_orr_mask) {
auto& monitor = system.Monitor();
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
- // Load the value from the address.
- const auto expected = monitor.ExclusiveRead32(current_core, address);
+ u32 expected{};
- // Orr in the new mask.
- u32 value = expected | new_orr_mask;
+ while (true) {
+ // Load the value from the address.
+ expected = monitor.ExclusiveRead32(current_core, GetInteger(address));
- // If the value is zero, use the if_zero value, otherwise use the newly orr'd value.
- if (!expected) {
- value = if_zero;
- }
+ // Orr in the new mask.
+ u32 value = expected | new_orr_mask;
+
+ // If the value is zero, use the if_zero value, otherwise use the newly orr'd value.
+ if (!expected) {
+ value = if_zero;
+ }
+
+ // Try to store.
+ if (monitor.ExclusiveWrite32(current_core, GetInteger(address), value)) {
+ break;
+ }
- // Try to store.
- if (!monitor.ExclusiveWrite32(current_core, address, value)) {
// If we failed to store, try again.
- return UpdateLockAtomic(system, out, address, if_zero, new_orr_mask);
}
// We're done.
@@ -58,8 +62,8 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero
class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue {
public:
- explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_)
- : KThreadQueue(kernel_) {}
+ explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel)
+ : KThreadQueue(kernel) {}
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread as a waiter from its owner.
@@ -76,8 +80,8 @@ private:
public:
explicit ThreadQueueImplForKConditionVariableWaitConditionVariable(
- KernelCore& kernel_, KConditionVariable::ThreadTree* t)
- : KThreadQueue(kernel_), m_tree(t) {}
+ KernelCore& kernel, KConditionVariable::ThreadTree* t)
+ : KThreadQueue(kernel), m_tree(t) {}
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread as a waiter from its owner.
@@ -98,111 +102,113 @@ public:
} // namespace
-KConditionVariable::KConditionVariable(Core::System& system_)
- : system{system_}, kernel{system.Kernel()} {}
+KConditionVariable::KConditionVariable(Core::System& system)
+ : m_system{system}, m_kernel{system.Kernel()} {}
KConditionVariable::~KConditionVariable() = default;
-Result KConditionVariable::SignalToAddress(VAddr addr) {
- KThread* owner_thread = GetCurrentThreadPointer(kernel);
+Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
+ KThread* owner_thread = GetCurrentThreadPointer(m_kernel);
// Signal the address.
{
- KScopedSchedulerLock sl(kernel);
+ KScopedSchedulerLock sl(m_kernel);
// Remove waiter thread.
- s32 num_waiters{};
- KThread* next_owner_thread =
- owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
+ bool has_waiters{};
+ KThread* const next_owner_thread =
+ owner_thread->RemoveUserWaiterByKey(std::addressof(has_waiters), addr);
// Determine the next tag.
u32 next_value{};
if (next_owner_thread != nullptr) {
next_value = next_owner_thread->GetAddressKeyValue();
- if (num_waiters > 1) {
+ if (has_waiters) {
next_value |= Svc::HandleWaitMask;
}
+ }
- // Write the value to userspace.
- Result result{ResultSuccess};
- if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
- result = ResultSuccess;
- } else {
- result = ResultInvalidCurrentMemory;
- }
+ // Synchronize memory before proceeding.
+ std::atomic_thread_fence(std::memory_order_seq_cst);
- // Signal the next owner thread.
- next_owner_thread->EndWait(result);
- return result;
+ // Write the value to userspace.
+ Result result{ResultSuccess};
+ if (WriteToUser(m_kernel, addr, std::addressof(next_value))) [[likely]] {
+ result = ResultSuccess;
} else {
- // Just write the value to userspace.
- R_UNLESS(WriteToUser(system, addr, std::addressof(next_value)),
- ResultInvalidCurrentMemory);
+ result = ResultInvalidCurrentMemory;
+ }
- return ResultSuccess;
+ // If necessary, signal the next owner thread.
+ if (next_owner_thread != nullptr) {
+ next_owner_thread->EndWait(result);
}
+
+ R_RETURN(result);
}
}
-Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
- KThread* cur_thread = GetCurrentThreadPointer(kernel);
- ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
+Result KConditionVariable::WaitForAddress(Handle handle, KProcessAddress addr, u32 value) {
+ KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
+ ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(m_kernel);
// Wait for the address.
KThread* owner_thread{};
{
- KScopedSchedulerLock sl(kernel);
+ KScopedSchedulerLock sl(m_kernel);
// Check if the thread should terminate.
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
// Read the tag from userspace.
u32 test_tag{};
- R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
+ R_UNLESS(ReadFromUser(m_kernel, std::addressof(test_tag), addr),
+ ResultInvalidCurrentMemory);
// If the tag isn't the handle (with wait mask), we're done.
R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
// Get the lock owner thread.
- owner_thread = kernel.CurrentProcess()
- ->GetHandleTable()
+ owner_thread = GetCurrentProcess(m_kernel)
+ .GetHandleTable()
.GetObjectWithoutPseudoHandle<KThread>(handle)
.ReleasePointerUnsafe();
R_UNLESS(owner_thread != nullptr, ResultInvalidHandle);
// Update the lock.
- cur_thread->SetAddressKey(addr, value);
+ cur_thread->SetUserAddressKey(addr, value);
owner_thread->AddWaiter(cur_thread);
// Begin waiting.
cur_thread->BeginWait(std::addressof(wait_queue));
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
- cur_thread->SetMutexWaitAddressForDebugging(addr);
}
// Close our reference to the owner thread, now that the wait is over.
owner_thread->Close();
// Get the wait result.
- return cur_thread->GetWaitResult();
+ R_RETURN(cur_thread->GetWaitResult());
}
void KConditionVariable::SignalImpl(KThread* thread) {
// Check pre-conditions.
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Update the tag.
- VAddr address = thread->GetAddressKey();
+ KProcessAddress address = thread->GetAddressKey();
u32 own_tag = thread->GetAddressKeyValue();
u32 prev_tag{};
bool can_access{};
{
- // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
+ // NOTE: If scheduler lock is not held here, interrupt disable is required.
+ // KScopedInterruptDisable di;
+
// TODO(bunnei): We should call CanAccessAtomic(..) here.
can_access = true;
if (can_access) [[likely]] {
- UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
+ UpdateLockAtomic(m_system, std::addressof(prev_tag), address, own_tag,
Svc::HandleWaitMask);
}
}
@@ -213,8 +219,8 @@ void KConditionVariable::SignalImpl(KThread* thread) {
thread->EndWait(ResultSuccess);
} else {
// Get the previous owner.
- KThread* owner_thread = kernel.CurrentProcess()
- ->GetHandleTable()
+ KThread* owner_thread = GetCurrentProcess(m_kernel)
+ .GetHandleTable()
.GetObjectWithoutPseudoHandle<KThread>(
static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
.ReleasePointerUnsafe();
@@ -238,55 +244,58 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
- KScopedSchedulerLock sl(kernel);
+ KScopedSchedulerLock sl(m_kernel);
- auto it = thread_tree.nfind_key({cv_key, -1});
- while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
+ auto it = m_tree.nfind_key({cv_key, -1});
+ while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
(it->GetConditionVariableKey() == cv_key)) {
KThread* target_thread = std::addressof(*it);
- this->SignalImpl(target_thread);
- it = thread_tree.erase(it);
+ it = m_tree.erase(it);
target_thread->ClearConditionVariable();
+
+ this->SignalImpl(target_thread);
+
++num_waiters;
}
// If we have no waiters, clear the has waiter flag.
- if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
+ if (it == m_tree.end() || it->GetConditionVariableKey() != cv_key) {
const u32 has_waiter_flag{};
- WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
+ WriteToUser(m_kernel, cv_key, std::addressof(has_waiter_flag));
}
}
}
-Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
+Result KConditionVariable::Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout) {
// Prepare to wait.
- KThread* cur_thread = GetCurrentThreadPointer(kernel);
- ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
- kernel, std::addressof(thread_tree));
+ KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
+ KHardwareTimer* timer{};
+ ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(m_kernel,
+ std::addressof(m_tree));
{
- KScopedSchedulerLockAndSleep slp(kernel, cur_thread, timeout);
+ KScopedSchedulerLockAndSleep slp(m_kernel, std::addressof(timer), cur_thread, timeout);
// Check that the thread isn't terminating.
if (cur_thread->IsTerminationRequested()) {
slp.CancelSleep();
- return ResultTerminationRequested;
+ R_THROW(ResultTerminationRequested);
}
// Update the value and process for the next owner.
{
// Remove waiter thread.
- s32 num_waiters{};
+ bool has_waiters{};
KThread* next_owner_thread =
- cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
+ cur_thread->RemoveUserWaiterByKey(std::addressof(has_waiters), addr);
// Update for the next owner thread.
u32 next_value{};
if (next_owner_thread != nullptr) {
// Get the next tag value.
next_value = next_owner_thread->GetAddressKeyValue();
- if (num_waiters > 1) {
+ if (has_waiters) {
next_value |= Svc::HandleWaitMask;
}
@@ -297,14 +306,14 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
// Write to the cv key.
{
const u32 has_waiter_flag = 1;
- WriteToUser(system, key, std::addressof(has_waiter_flag));
- // TODO(bunnei): We should call DataMemoryBarrier(..) here.
+ WriteToUser(m_kernel, key, std::addressof(has_waiter_flag));
+ std::atomic_thread_fence(std::memory_order_seq_cst);
}
// Write the value to userspace.
- if (!WriteToUser(system, addr, std::addressof(next_value))) {
+ if (!WriteToUser(m_kernel, addr, std::addressof(next_value))) {
slp.CancelSleep();
- return ResultInvalidCurrentMemory;
+ R_THROW(ResultInvalidCurrentMemory);
}
}
@@ -312,17 +321,17 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
R_UNLESS(timeout != 0, ResultTimedOut);
// Update condition variable tracking.
- cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
- thread_tree.insert(*cur_thread);
+ cur_thread->SetConditionVariable(std::addressof(m_tree), addr, key, value);
+ m_tree.insert(*cur_thread);
// Begin waiting.
+ wait_queue.SetHardwareTimer(timer);
cur_thread->BeginWait(std::addressof(wait_queue));
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
- cur_thread->SetMutexWaitAddressForDebugging(addr);
}
// Get the wait result.
- return cur_thread->GetWaitResult();
+ R_RETURN(cur_thread->GetWaitResult());
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
index fad4ed011..8c2f3ae51 100644
--- a/src/core/hle/kernel/k_condition_variable.h
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -4,10 +4,10 @@
#pragma once
#include "common/assert.h"
-#include "common/common_types.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/result.h"
@@ -21,36 +21,36 @@ class KConditionVariable {
public:
using ThreadTree = typename KThread::ConditionVariableThreadTreeType;
- explicit KConditionVariable(Core::System& system_);
+ explicit KConditionVariable(Core::System& system);
~KConditionVariable();
// Arbitration
- [[nodiscard]] Result SignalToAddress(VAddr addr);
- [[nodiscard]] Result WaitForAddress(Handle handle, VAddr addr, u32 value);
+ Result SignalToAddress(KProcessAddress addr);
+ Result WaitForAddress(Handle handle, KProcessAddress addr, u32 value);
// Condition variable
void Signal(u64 cv_key, s32 count);
- [[nodiscard]] Result Wait(VAddr addr, u64 key, u32 value, s64 timeout);
+ Result Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout);
private:
void SignalImpl(KThread* thread);
- ThreadTree thread_tree;
-
- Core::System& system;
- KernelCore& kernel;
+private:
+ Core::System& m_system;
+ KernelCore& m_kernel;
+ ThreadTree m_tree{};
};
-inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
+inline void BeforeUpdatePriority(KernelCore& kernel, KConditionVariable::ThreadTree* tree,
KThread* thread) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
tree->erase(tree->iterator_to(*thread));
}
-inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
+inline void AfterUpdatePriority(KernelCore& kernel, KConditionVariable::ThreadTree* tree,
KThread* thread) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
tree->insert(*thread);
}
diff --git a/src/core/hle/kernel/k_debug.h b/src/core/hle/kernel/k_debug.h
index e3a0689c8..2290e3bca 100644
--- a/src/core/hle/kernel/k_debug.h
+++ b/src/core/hle/kernel/k_debug.h
@@ -12,9 +12,9 @@ class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KAutoObj
KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject);
public:
- explicit KDebug(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
+ explicit KDebug(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
- static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
+ static void PostDestroy(uintptr_t arg) {}
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_device_address_space.cpp b/src/core/hle/kernel/k_device_address_space.cpp
new file mode 100644
index 000000000..f48896715
--- /dev/null
+++ b/src/core/hle/kernel/k_device_address_space.cpp
@@ -0,0 +1,150 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/assert.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_device_address_space.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel {
+
+KDeviceAddressSpace::KDeviceAddressSpace(KernelCore& kernel)
+ : KAutoObjectWithSlabHeapAndContainer(kernel), m_lock(kernel), m_is_initialized(false) {}
+KDeviceAddressSpace::~KDeviceAddressSpace() = default;
+
+void KDeviceAddressSpace::Initialize() {
+ // This just forwards to the device page table manager.
+ // KDevicePageTable::Initialize();
+}
+
+// Member functions.
+Result KDeviceAddressSpace::Initialize(u64 address, u64 size) {
+ // Initialize the device page table.
+ // R_TRY(m_table.Initialize(address, size));
+
+ // Set member variables.
+ m_space_address = address;
+ m_space_size = size;
+ m_is_initialized = true;
+
+ R_SUCCEED();
+}
+
+void KDeviceAddressSpace::Finalize() {
+ // Finalize the table.
+ // m_table.Finalize();
+}
+
+Result KDeviceAddressSpace::Attach(Svc::DeviceName device_name) {
+ // Lock the address space.
+ KScopedLightLock lk(m_lock);
+
+ // Attach.
+ // R_RETURN(m_table.Attach(device_name, m_space_address, m_space_size));
+ R_SUCCEED();
+}
+
+Result KDeviceAddressSpace::Detach(Svc::DeviceName device_name) {
+ // Lock the address space.
+ KScopedLightLock lk(m_lock);
+
+ // Detach.
+ // R_RETURN(m_table.Detach(device_name));
+ R_SUCCEED();
+}
+
+Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_address,
+ size_t size, u64 device_address, u32 option, bool is_aligned) {
+ // Check that the address falls within the space.
+ R_UNLESS((m_space_address <= device_address &&
+ device_address + size - 1 <= m_space_address + m_space_size - 1),
+ ResultInvalidCurrentMemory);
+
+ // Decode the option.
+ const Svc::MapDeviceAddressSpaceOption option_pack{option};
+ const auto device_perm = option_pack.permission.Value();
+ const auto flags = option_pack.flags.Value();
+ const auto reserved = option_pack.reserved.Value();
+
+ // Validate the option.
+ // TODO: It is likely that this check for flags == none is only on NX board.
+ R_UNLESS(flags == Svc::MapDeviceAddressSpaceFlag::None, ResultInvalidEnumValue);
+ R_UNLESS(reserved == 0, ResultInvalidEnumValue);
+
+ // Lock the address space.
+ KScopedLightLock lk(m_lock);
+
+ // Lock the page table to prevent concurrent device mapping operations.
+ // KScopedLightLock pt_lk = page_table->AcquireDeviceMapLock();
+
+ // Lock the pages.
+ bool is_io{};
+ R_TRY(page_table->LockForMapDeviceAddressSpace(std::addressof(is_io), process_address, size,
+ ConvertToKMemoryPermission(device_perm),
+ is_aligned, true));
+
+ // Ensure that if we fail, we don't keep unmapped pages locked.
+ ON_RESULT_FAILURE {
+ ASSERT(page_table->UnlockForDeviceAddressSpace(process_address, size) == ResultSuccess);
+ };
+
+ // Check that the io status is allowable.
+ if (is_io) {
+ R_UNLESS(static_cast<u32>(flags & Svc::MapDeviceAddressSpaceFlag::NotIoRegister) == 0,
+ ResultInvalidCombination);
+ }
+
+ // Map the pages.
+ {
+ // Perform the mapping.
+ // R_TRY(m_table.Map(page_table, process_address, size, device_address, device_perm,
+ // is_aligned, is_io));
+
+ // Ensure that we unmap the pages if we fail to update the protections.
+ // NOTE: Nintendo does not check the result of this unmap call.
+ // ON_RESULT_FAILURE { m_table.Unmap(device_address, size); };
+
+ // Update the protections in accordance with how much we mapped.
+ // R_TRY(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size));
+ }
+
+ // We succeeded.
+ R_SUCCEED();
+}
+
+Result KDeviceAddressSpace::Unmap(KPageTable* page_table, KProcessAddress process_address,
+ size_t size, u64 device_address) {
+ // Check that the address falls within the space.
+ R_UNLESS((m_space_address <= device_address &&
+ device_address + size - 1 <= m_space_address + m_space_size - 1),
+ ResultInvalidCurrentMemory);
+
+ // Lock the address space.
+ KScopedLightLock lk(m_lock);
+
+ // Lock the page table to prevent concurrent device mapping operations.
+ // KScopedLightLock pt_lk = page_table->AcquireDeviceMapLock();
+
+ // Lock the pages.
+ R_TRY(page_table->LockForUnmapDeviceAddressSpace(process_address, size, true));
+
+ // Unmap the pages.
+ {
+ // If we fail to unmap, we want to do a partial unlock.
+ // ON_RESULT_FAILURE {
+ // ASSERT(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size) ==
+ // ResultSuccess);
+ // };
+
+ // Perform the unmap.
+ // R_TRY(m_table.Unmap(page_table, process_address, size, device_address));
+ }
+
+ // Unlock the pages.
+ ASSERT(page_table->UnlockForDeviceAddressSpace(process_address, size) == ResultSuccess);
+
+ R_SUCCEED();
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_device_address_space.h b/src/core/hle/kernel/k_device_address_space.h
new file mode 100644
index 000000000..18556e3cc
--- /dev/null
+++ b/src/core/hle/kernel/k_device_address_space.h
@@ -0,0 +1,61 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <string>
+
+#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/k_typed_address.h"
+#include "core/hle/kernel/slab_helpers.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+class KDeviceAddressSpace final
+ : public KAutoObjectWithSlabHeapAndContainer<KDeviceAddressSpace, KAutoObjectWithList> {
+ KERNEL_AUTOOBJECT_TRAITS(KDeviceAddressSpace, KAutoObject);
+
+public:
+ explicit KDeviceAddressSpace(KernelCore& kernel);
+ ~KDeviceAddressSpace();
+
+ Result Initialize(u64 address, u64 size);
+ void Finalize() override;
+
+ bool IsInitialized() const override {
+ return m_is_initialized;
+ }
+ static void PostDestroy(uintptr_t arg) {}
+
+ Result Attach(Svc::DeviceName device_name);
+ Result Detach(Svc::DeviceName device_name);
+
+ Result MapByForce(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ u64 device_address, u32 option) {
+ R_RETURN(this->Map(page_table, process_address, size, device_address, option, false));
+ }
+
+ Result MapAligned(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ u64 device_address, u32 option) {
+ R_RETURN(this->Map(page_table, process_address, size, device_address, option, true));
+ }
+
+ Result Unmap(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ u64 device_address);
+
+ static void Initialize();
+
+private:
+ Result Map(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ u64 device_address, u32 option, bool is_aligned);
+
+private:
+ KLightLock m_lock;
+ // KDevicePageTable m_table;
+ u64 m_space_address{};
+ u64 m_space_size{};
+ bool m_is_initialized{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h
index ac80d60a1..ad11e84b7 100644
--- a/src/core/hle/kernel/k_dynamic_page_manager.h
+++ b/src/core/hle/kernel/k_dynamic_page_manager.h
@@ -6,9 +6,9 @@
#include <vector>
#include "common/alignment.h"
-#include "common/common_types.h"
#include "core/hle/kernel/k_page_bitmap.h"
#include "core/hle/kernel/k_spin_lock.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/svc_results.h"
@@ -26,23 +26,23 @@ public:
KDynamicPageManager() = default;
template <typename T>
- T* GetPointer(VAddr addr) {
+ T* GetPointer(KVirtualAddress addr) {
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
}
template <typename T>
- const T* GetPointer(VAddr addr) const {
+ const T* GetPointer(KVirtualAddress addr) const {
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
}
- Result Initialize(VAddr memory, size_t size, size_t align) {
+ Result Initialize(KVirtualAddress memory, size_t size, size_t align) {
// We need to have positive size.
R_UNLESS(size > 0, ResultOutOfMemory);
m_backing_memory.resize(size);
// Set addresses.
m_address = memory;
- m_aligned_address = Common::AlignDown(memory, align);
+ m_aligned_address = Common::AlignDown(GetInteger(memory), align);
// Calculate extents.
const size_t managed_size = m_address + size - m_aligned_address;
@@ -79,7 +79,7 @@ public:
R_SUCCEED();
}
- VAddr GetAddress() const {
+ KVirtualAddress GetAddress() const {
return m_address;
}
size_t GetSize() const {
@@ -145,7 +145,8 @@ public:
KScopedSpinLock lk(m_lock);
// Set the bit for the free page.
- size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_aligned_address) / sizeof(PageBuffer);
+ size_t offset =
+ (reinterpret_cast<uint64_t>(pb) - GetInteger(m_aligned_address)) / sizeof(PageBuffer);
m_page_bitmap.SetBit(offset);
// Decrement our used count.
@@ -158,8 +159,8 @@ private:
size_t m_used{};
size_t m_peak{};
size_t m_count{};
- VAddr m_address{};
- VAddr m_aligned_address{};
+ KVirtualAddress m_address{};
+ KVirtualAddress m_aligned_address{};
size_t m_size{};
// TODO(bunnei): Back by host memory until we emulate kernel virtual address space.
diff --git a/src/core/hle/kernel/k_dynamic_slab_heap.h b/src/core/hle/kernel/k_dynamic_slab_heap.h
index 3a0ddd050..76ed4cac1 100644
--- a/src/core/hle/kernel/k_dynamic_slab_heap.h
+++ b/src/core/hle/kernel/k_dynamic_slab_heap.h
@@ -19,7 +19,7 @@ class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
public:
constexpr KDynamicSlabHeap() = default;
- constexpr VAddr GetAddress() const {
+ constexpr KVirtualAddress GetAddress() const {
return m_address;
}
constexpr size_t GetSize() const {
@@ -35,7 +35,7 @@ public:
return m_count.load();
}
- constexpr bool IsInRange(VAddr addr) const {
+ constexpr bool IsInRange(KVirtualAddress addr) const {
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
}
@@ -115,7 +115,7 @@ private:
std::atomic<size_t> m_used{};
std::atomic<size_t> m_peak{};
std::atomic<size_t> m_count{};
- VAddr m_address{};
+ KVirtualAddress m_address{};
size_t m_size{};
};
diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp
index d973853ab..d92b491f8 100644
--- a/src/core/hle/kernel/k_event.cpp
+++ b/src/core/hle/kernel/k_event.cpp
@@ -7,8 +7,8 @@
namespace Kernel {
-KEvent::KEvent(KernelCore& kernel_)
- : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_readable_event{kernel_} {}
+KEvent::KEvent(KernelCore& kernel)
+ : KAutoObjectWithSlabHeapAndContainer{kernel}, m_readable_event{kernel} {}
KEvent::~KEvent() = default;
@@ -36,7 +36,7 @@ void KEvent::Finalize() {
}
Result KEvent::Signal() {
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
R_SUCCEED_IF(m_readable_event_destroyed);
@@ -44,7 +44,7 @@ Result KEvent::Signal() {
}
Result KEvent::Clear() {
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
R_SUCCEED_IF(m_readable_event_destroyed);
diff --git a/src/core/hle/kernel/k_event.h b/src/core/hle/kernel/k_event.h
index 48ce7d9a0..f522b0a84 100644
--- a/src/core/hle/kernel/k_event.h
+++ b/src/core/hle/kernel/k_event.h
@@ -16,7 +16,7 @@ class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObj
KERNEL_AUTOOBJECT_TRAITS(KEvent, KAutoObject);
public:
- explicit KEvent(KernelCore& kernel_);
+ explicit KEvent(KernelCore& kernel);
~KEvent() override;
void Initialize(KProcess* owner);
diff --git a/src/core/hle/kernel/k_event_info.h b/src/core/hle/kernel/k_event_info.h
index 25b3ff594..eacfa5dc6 100644
--- a/src/core/hle/kernel/k_event_info.h
+++ b/src/core/hle/kernel/k_event_info.h
@@ -5,14 +5,15 @@
#include <array>
-#include <boost/intrusive/list.hpp>
+#include "common/intrusive_list.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/svc_types.h"
namespace Kernel {
-class KEventInfo : public KSlabAllocated<KEventInfo>, public boost::intrusive::list_base_hook<> {
+class KEventInfo : public KSlabAllocated<KEventInfo>,
+ public Common::IntrusiveListBaseNode<KEventInfo> {
public:
struct InfoCreateThread {
u32 thread_id{};
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h
index 37a24e7d9..d7660630c 100644
--- a/src/core/hle/kernel/k_handle_table.h
+++ b/src/core/hle/kernel/k_handle_table.h
@@ -90,7 +90,8 @@ public:
// Handle pseudo-handles.
if constexpr (std::derived_from<KProcess, T>) {
if (handle == Svc::PseudoHandle::CurrentProcess) {
- auto* const cur_process = m_kernel.CurrentProcess();
+ //! FIXME: this is the wrong process!
+ auto* const cur_process = m_kernel.ApplicationProcess();
ASSERT(cur_process != nullptr);
return cur_process;
}
diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp
index 4a6b60d26..fe6a20168 100644
--- a/src/core/hle/kernel/k_interrupt_manager.cpp
+++ b/src/core/hle/kernel/k_interrupt_manager.cpp
@@ -16,7 +16,7 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
auto& current_thread = GetCurrentThread(kernel);
- if (auto* process = kernel.CurrentProcess(); process) {
+ if (auto* process = GetCurrentProcessPointer(kernel); process) {
// If the user disable count is set, we may need to pin the current thread.
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
KScopedSchedulerLock sl{kernel};
diff --git a/src/core/hle/kernel/k_light_condition_variable.cpp b/src/core/hle/kernel/k_light_condition_variable.cpp
index cade99cfd..6d5a815aa 100644
--- a/src/core/hle/kernel/k_light_condition_variable.cpp
+++ b/src/core/hle/kernel/k_light_condition_variable.cpp
@@ -13,9 +13,9 @@ namespace {
class ThreadQueueImplForKLightConditionVariable final : public KThreadQueue {
public:
- ThreadQueueImplForKLightConditionVariable(KernelCore& kernel_, KThread::WaiterList* wl,
+ ThreadQueueImplForKLightConditionVariable(KernelCore& kernel, KThread::WaiterList* wl,
bool term)
- : KThreadQueue(kernel_), m_wait_list(wl), m_allow_terminating_thread(term) {}
+ : KThreadQueue(kernel), m_wait_list(wl), m_allow_terminating_thread(term) {}
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Only process waits if we're allowed to.
@@ -39,14 +39,15 @@ private:
void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_terminating_thread) {
// Create thread queue.
- KThread* owner = GetCurrentThreadPointer(kernel);
+ KThread* owner = GetCurrentThreadPointer(m_kernel);
+ KHardwareTimer* timer{};
- ThreadQueueImplForKLightConditionVariable wait_queue(kernel, std::addressof(wait_list),
+ ThreadQueueImplForKLightConditionVariable wait_queue(m_kernel, std::addressof(m_wait_list),
allow_terminating_thread);
// Sleep the thread.
{
- KScopedSchedulerLockAndSleep lk(kernel, owner, timeout);
+ KScopedSchedulerLockAndSleep lk(m_kernel, std::addressof(timer), owner, timeout);
if (!allow_terminating_thread && owner->IsTerminationRequested()) {
lk.CancelSleep();
@@ -56,9 +57,10 @@ void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_ter
lock->Unlock();
// Add the thread to the queue.
- wait_list.push_back(*owner);
+ m_wait_list.push_back(*owner);
// Begin waiting.
+ wait_queue.SetHardwareTimer(timer);
owner->BeginWait(std::addressof(wait_queue));
}
@@ -67,10 +69,10 @@ void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_ter
}
void KLightConditionVariable::Broadcast() {
- KScopedSchedulerLock lk(kernel);
+ KScopedSchedulerLock lk(m_kernel);
// Signal all threads.
- for (auto it = wait_list.begin(); it != wait_list.end(); it = wait_list.erase(it)) {
+ for (auto it = m_wait_list.begin(); it != m_wait_list.end(); it = m_wait_list.erase(it)) {
it->EndWait(ResultSuccess);
}
}
diff --git a/src/core/hle/kernel/k_light_condition_variable.h b/src/core/hle/kernel/k_light_condition_variable.h
index 3cabd6b4f..ab612426d 100644
--- a/src/core/hle/kernel/k_light_condition_variable.h
+++ b/src/core/hle/kernel/k_light_condition_variable.h
@@ -13,13 +13,13 @@ class KLightLock;
class KLightConditionVariable {
public:
- explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {}
+ explicit KLightConditionVariable(KernelCore& kernel) : m_kernel{kernel} {}
void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true);
void Broadcast();
private:
- KernelCore& kernel;
- KThread::WaiterList wait_list{};
+ KernelCore& m_kernel;
+ KThread::WaiterList m_wait_list{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp
index 43185320d..e87ee8b65 100644
--- a/src/core/hle/kernel/k_light_lock.cpp
+++ b/src/core/hle/kernel/k_light_lock.cpp
@@ -13,7 +13,7 @@ namespace {
class ThreadQueueImplForKLightLock final : public KThreadQueue {
public:
- explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {}
+ explicit ThreadQueueImplForKLightLock(KernelCore& kernel) : KThreadQueue(kernel) {}
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread as a waiter from its owner.
@@ -29,13 +29,13 @@ public:
} // namespace
void KLightLock::Lock() {
- const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
+ const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel));
while (true) {
- uintptr_t old_tag = tag.load(std::memory_order_relaxed);
+ uintptr_t old_tag = m_tag.load(std::memory_order_relaxed);
- while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1),
- std::memory_order_acquire)) {
+ while (!m_tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1),
+ std::memory_order_acquire)) {
}
if (old_tag == 0 || this->LockSlowPath(old_tag | 1, cur_thread)) {
@@ -45,30 +45,30 @@ void KLightLock::Lock() {
}
void KLightLock::Unlock() {
- const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
+ const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel));
uintptr_t expected = cur_thread;
- if (!tag.compare_exchange_strong(expected, 0, std::memory_order_release)) {
+ if (!m_tag.compare_exchange_strong(expected, 0, std::memory_order_release)) {
this->UnlockSlowPath(cur_thread);
}
}
bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
- ThreadQueueImplForKLightLock wait_queue(kernel);
+ ThreadQueueImplForKLightLock wait_queue(m_kernel);
// Pend the current thread waiting on the owner thread.
{
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
// Ensure we actually have locking to do.
- if (tag.load(std::memory_order_relaxed) != _owner) {
+ if (m_tag.load(std::memory_order_relaxed) != _owner) {
return false;
}
// Add the current thread as a waiter on the owner.
KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL);
- cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
+ cur_thread->SetKernelAddressKey(reinterpret_cast<uintptr_t>(std::addressof(m_tag)));
owner_thread->AddWaiter(cur_thread);
// Begin waiting to hold the lock.
@@ -87,18 +87,18 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
// Unlock.
{
- KScopedSchedulerLock sl(kernel);
+ KScopedSchedulerLock sl(m_kernel);
// Get the next owner.
- s32 num_waiters;
- KThread* next_owner = owner_thread->RemoveWaiterByKey(
- std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
+ bool has_waiters;
+ KThread* next_owner = owner_thread->RemoveKernelWaiterByKey(
+ std::addressof(has_waiters), reinterpret_cast<uintptr_t>(std::addressof(m_tag)));
// Pass the lock to the next owner.
uintptr_t next_tag = 0;
if (next_owner != nullptr) {
next_tag =
- reinterpret_cast<uintptr_t>(next_owner) | static_cast<uintptr_t>(num_waiters > 1);
+ reinterpret_cast<uintptr_t>(next_owner) | static_cast<uintptr_t>(has_waiters);
next_owner->EndWait(ResultSuccess);
@@ -114,12 +114,13 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
}
// Write the new tag value.
- tag.store(next_tag, std::memory_order_release);
+ m_tag.store(next_tag, std::memory_order_release);
}
}
bool KLightLock::IsLockedByCurrentThread() const {
- return (tag | 1ULL) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)) | 1ULL);
+ return (m_tag.load() | 1ULL) ==
+ (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel)) | 1ULL);
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_light_lock.h b/src/core/hle/kernel/k_light_lock.h
index 7edd950c0..626f57596 100644
--- a/src/core/hle/kernel/k_light_lock.h
+++ b/src/core/hle/kernel/k_light_lock.h
@@ -13,7 +13,7 @@ class KernelCore;
class KLightLock {
public:
- explicit KLightLock(KernelCore& kernel_) : kernel{kernel_} {}
+ explicit KLightLock(KernelCore& kernel) : m_kernel{kernel} {}
void Lock();
@@ -24,14 +24,14 @@ public:
void UnlockSlowPath(uintptr_t cur_thread);
bool IsLocked() const {
- return tag != 0;
+ return m_tag.load() != 0;
}
bool IsLockedByCurrentThread() const;
private:
- std::atomic<uintptr_t> tag{};
- KernelCore& kernel;
+ std::atomic<uintptr_t> m_tag{};
+ KernelCore& m_kernel;
};
using KScopedLightLock = KScopedLock<KLightLock>;
diff --git a/src/core/hle/kernel/k_linked_list.h b/src/core/hle/kernel/k_linked_list.h
deleted file mode 100644
index 29ebd16b7..000000000
--- a/src/core/hle/kernel/k_linked_list.h
+++ /dev/null
@@ -1,238 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include <boost/intrusive/list.hpp>
-
-#include "common/assert.h"
-#include "core/hle/kernel/slab_helpers.h"
-
-namespace Kernel {
-
-class KernelCore;
-
-class KLinkedListNode : public boost::intrusive::list_base_hook<>,
- public KSlabAllocated<KLinkedListNode> {
-
-public:
- explicit KLinkedListNode(KernelCore&) {}
- KLinkedListNode() = default;
-
- void Initialize(void* it) {
- m_item = it;
- }
-
- void* GetItem() const {
- return m_item;
- }
-
-private:
- void* m_item = nullptr;
-};
-
-template <typename T>
-class KLinkedList : private boost::intrusive::list<KLinkedListNode> {
-private:
- using BaseList = boost::intrusive::list<KLinkedListNode>;
-
-public:
- template <bool Const>
- class Iterator;
-
- using value_type = T;
- using size_type = size_t;
- using difference_type = ptrdiff_t;
- using pointer = value_type*;
- using const_pointer = const value_type*;
- using reference = value_type&;
- using const_reference = const value_type&;
- using iterator = Iterator<false>;
- using const_iterator = Iterator<true>;
- using reverse_iterator = std::reverse_iterator<iterator>;
- using const_reverse_iterator = std::reverse_iterator<const_iterator>;
-
- template <bool Const>
- class Iterator {
- private:
- using BaseIterator = BaseList::iterator;
- friend class KLinkedList;
-
- public:
- using iterator_category = std::bidirectional_iterator_tag;
- using value_type = typename KLinkedList::value_type;
- using difference_type = typename KLinkedList::difference_type;
- using pointer = std::conditional_t<Const, KLinkedList::const_pointer, KLinkedList::pointer>;
- using reference =
- std::conditional_t<Const, KLinkedList::const_reference, KLinkedList::reference>;
-
- public:
- explicit Iterator(BaseIterator it) : m_base_it(it) {}
-
- pointer GetItem() const {
- return static_cast<pointer>(m_base_it->GetItem());
- }
-
- bool operator==(const Iterator& rhs) const {
- return m_base_it == rhs.m_base_it;
- }
-
- bool operator!=(const Iterator& rhs) const {
- return !(*this == rhs);
- }
-
- pointer operator->() const {
- return this->GetItem();
- }
-
- reference operator*() const {
- return *this->GetItem();
- }
-
- Iterator& operator++() {
- ++m_base_it;
- return *this;
- }
-
- Iterator& operator--() {
- --m_base_it;
- return *this;
- }
-
- Iterator operator++(int) {
- const Iterator it{*this};
- ++(*this);
- return it;
- }
-
- Iterator operator--(int) {
- const Iterator it{*this};
- --(*this);
- return it;
- }
-
- operator Iterator<true>() const {
- return Iterator<true>(m_base_it);
- }
-
- private:
- BaseIterator m_base_it;
- };
-
-public:
- constexpr KLinkedList(KernelCore& kernel_) : BaseList(), kernel{kernel_} {}
-
- ~KLinkedList() {
- // Erase all elements.
- for (auto it = begin(); it != end(); it = erase(it)) {
- }
-
- // Ensure we succeeded.
- ASSERT(this->empty());
- }
-
- // Iterator accessors.
- iterator begin() {
- return iterator(BaseList::begin());
- }
-
- const_iterator begin() const {
- return const_iterator(BaseList::begin());
- }
-
- iterator end() {
- return iterator(BaseList::end());
- }
-
- const_iterator end() const {
- return const_iterator(BaseList::end());
- }
-
- const_iterator cbegin() const {
- return this->begin();
- }
-
- const_iterator cend() const {
- return this->end();
- }
-
- reverse_iterator rbegin() {
- return reverse_iterator(this->end());
- }
-
- const_reverse_iterator rbegin() const {
- return const_reverse_iterator(this->end());
- }
-
- reverse_iterator rend() {
- return reverse_iterator(this->begin());
- }
-
- const_reverse_iterator rend() const {
- return const_reverse_iterator(this->begin());
- }
-
- const_reverse_iterator crbegin() const {
- return this->rbegin();
- }
-
- const_reverse_iterator crend() const {
- return this->rend();
- }
-
- // Content management.
- using BaseList::empty;
- using BaseList::size;
-
- reference back() {
- return *(--this->end());
- }
-
- const_reference back() const {
- return *(--this->end());
- }
-
- reference front() {
- return *this->begin();
- }
-
- const_reference front() const {
- return *this->begin();
- }
-
- iterator insert(const_iterator pos, reference ref) {
- KLinkedListNode* new_node = KLinkedListNode::Allocate(kernel);
- ASSERT(new_node != nullptr);
- new_node->Initialize(std::addressof(ref));
- return iterator(BaseList::insert(pos.m_base_it, *new_node));
- }
-
- void push_back(reference ref) {
- this->insert(this->end(), ref);
- }
-
- void push_front(reference ref) {
- this->insert(this->begin(), ref);
- }
-
- void pop_back() {
- this->erase(--this->end());
- }
-
- void pop_front() {
- this->erase(this->begin());
- }
-
- iterator erase(const iterator pos) {
- KLinkedListNode* freed_node = std::addressof(*pos.m_base_it);
- iterator ret = iterator(BaseList::erase(pos.m_base_it));
- KLinkedListNode::Free(kernel, freed_node);
-
- return ret;
- }
-
-private:
- KernelCore& kernel;
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h
index 87ca65592..41a29da24 100644
--- a/src/core/hle/kernel/k_memory_block.h
+++ b/src/core/hle/kernel/k_memory_block.h
@@ -5,8 +5,8 @@
#include "common/alignment.h"
#include "common/assert.h"
-#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/svc_types.h"
@@ -282,7 +282,7 @@ class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>
private:
u16 m_device_disable_merge_left_count{};
u16 m_device_disable_merge_right_count{};
- VAddr m_address{};
+ KProcessAddress m_address{};
size_t m_num_pages{};
KMemoryState m_memory_state{KMemoryState::None};
u16 m_ipc_lock_count{};
@@ -306,7 +306,7 @@ public:
}
public:
- constexpr VAddr GetAddress() const {
+ constexpr KProcessAddress GetAddress() const {
return m_address;
}
@@ -318,11 +318,11 @@ public:
return this->GetNumPages() * PageSize;
}
- constexpr VAddr GetEndAddress() const {
+ constexpr KProcessAddress GetEndAddress() const {
return this->GetAddress() + this->GetSize();
}
- constexpr VAddr GetLastAddress() const {
+ constexpr KProcessAddress GetLastAddress() const {
return this->GetEndAddress() - 1;
}
@@ -348,7 +348,7 @@ public:
constexpr KMemoryInfo GetMemoryInfo() const {
return {
- .m_address = this->GetAddress(),
+ .m_address = GetInteger(this->GetAddress()),
.m_size = this->GetSize(),
.m_state = m_memory_state,
.m_device_disable_merge_left_count = m_device_disable_merge_left_count,
@@ -366,12 +366,12 @@ public:
public:
explicit KMemoryBlock() = default;
- constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
+ constexpr KMemoryBlock(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p,
KMemoryAttribute attr)
: Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(), m_address(addr), m_num_pages(np),
m_memory_state(ms), m_permission(p), m_attribute(attr) {}
- constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
+ constexpr void Initialize(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p,
KMemoryAttribute attr) {
m_device_disable_merge_left_count = 0;
m_device_disable_merge_right_count = 0;
@@ -408,7 +408,7 @@ public:
KMemoryBlockDisableMergeAttribute::None;
}
- constexpr bool Contains(VAddr addr) const {
+ constexpr bool Contains(KProcessAddress addr) const {
return this->GetAddress() <= addr && addr <= this->GetEndAddress();
}
@@ -443,10 +443,10 @@ public:
}
}
- constexpr void Split(KMemoryBlock* block, VAddr addr) {
+ constexpr void Split(KMemoryBlock* block, KProcessAddress addr) {
ASSERT(this->GetAddress() < addr);
ASSERT(this->Contains(addr));
- ASSERT(Common::IsAligned(addr, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
block->m_address = m_address;
block->m_num_pages = (addr - this->GetAddress()) / PageSize;
@@ -471,8 +471,8 @@ public:
m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight);
}
- constexpr void UpdateDeviceDisableMergeStateForShareLeft(
- [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
+ constexpr void UpdateDeviceDisableMergeStateForShareLeft(KMemoryPermission new_perm, bool left,
+ bool right) {
// New permission/right aren't used.
if (left) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
@@ -482,8 +482,8 @@ public:
}
}
- constexpr void UpdateDeviceDisableMergeStateForShareRight(
- [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
+ constexpr void UpdateDeviceDisableMergeStateForShareRight(KMemoryPermission new_perm, bool left,
+ bool right) {
// New permission/left aren't used.
if (right) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
@@ -499,8 +499,7 @@ public:
this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right);
}
- constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
- bool right) {
+ constexpr void ShareToDevice(KMemoryPermission new_perm, bool left, bool right) {
// New permission isn't used.
// We must either be shared or have a zero lock count.
@@ -516,8 +515,8 @@ public:
this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right);
}
- constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(
- [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
+ constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(KMemoryPermission new_perm,
+ bool left, bool right) {
// New permission/right aren't used.
if (left) {
@@ -536,8 +535,8 @@ public:
}
}
- constexpr void UpdateDeviceDisableMergeStateForUnshareRight(
- [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
+ constexpr void UpdateDeviceDisableMergeStateForUnshareRight(KMemoryPermission new_perm,
+ bool left, bool right) {
// New permission/left aren't used.
if (right) {
@@ -556,8 +555,7 @@ public:
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
}
- constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
- bool right) {
+ constexpr void UnshareToDevice(KMemoryPermission new_perm, bool left, bool right) {
// New permission isn't used.
// We must be shared.
@@ -575,8 +573,7 @@ public:
this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right);
}
- constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left,
- bool right) {
+ constexpr void UnshareToDeviceRight(KMemoryPermission new_perm, bool left, bool right) {
// New permission isn't used.
// We must be shared.
@@ -594,7 +591,7 @@ public:
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
}
- constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
+ constexpr void LockForIpc(KMemoryPermission new_perm, bool left, bool right) {
// We must either be locked or have a zero lock count.
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked ||
m_ipc_lock_count == 0);
@@ -626,8 +623,7 @@ public:
}
}
- constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left,
- [[maybe_unused]] bool right) {
+ constexpr void UnlockForIpc(KMemoryPermission new_perm, bool left, bool right) {
// New permission isn't used.
// We must be locked.
diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp
index cf4c1e371..ab75f550e 100644
--- a/src/core/hle/kernel/k_memory_block_manager.cpp
+++ b/src/core/hle/kernel/k_memory_block_manager.cpp
@@ -7,7 +7,8 @@ namespace Kernel {
KMemoryBlockManager::KMemoryBlockManager() = default;
-Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) {
+Result KMemoryBlockManager::Initialize(KProcessAddress st, KProcessAddress nd,
+ KMemoryBlockSlabManager* slab_manager) {
// Allocate a block to encapsulate the address space, insert it into the tree.
KMemoryBlock* start_block = slab_manager->Allocate();
R_UNLESS(start_block != nullptr, ResultOutOfResource);
@@ -15,8 +16,8 @@ Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManag
// Set our start and end.
m_start_address = st;
m_end_address = nd;
- ASSERT(Common::IsAligned(m_start_address, PageSize));
- ASSERT(Common::IsAligned(m_end_address, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(m_start_address), PageSize));
+ ASSERT(Common::IsAligned(GetInteger(m_end_address), PageSize));
// Initialize and insert the block.
start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize,
@@ -40,12 +41,13 @@ void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
ASSERT(m_memory_block_tree.empty());
}
-VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages,
- size_t num_pages, size_t alignment, size_t offset,
- size_t guard_pages) const {
+KProcessAddress KMemoryBlockManager::FindFreeArea(KProcessAddress region_start,
+ size_t region_num_pages, size_t num_pages,
+ size_t alignment, size_t offset,
+ size_t guard_pages) const {
if (num_pages > 0) {
- const VAddr region_end = region_start + region_num_pages * PageSize;
- const VAddr region_last = region_end - 1;
+ const KProcessAddress region_end = region_start + region_num_pages * PageSize;
+ const KProcessAddress region_last = region_end - 1;
for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend();
it++) {
const KMemoryInfo info = it->GetMemoryInfo();
@@ -56,17 +58,19 @@ VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pa
continue;
}
- VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress();
+ KProcessAddress area =
+ (info.GetAddress() <= GetInteger(region_start)) ? region_start : info.GetAddress();
area += guard_pages * PageSize;
- const VAddr offset_area = Common::AlignDown(area, alignment) + offset;
+ const KProcessAddress offset_area =
+ Common::AlignDown(GetInteger(area), alignment) + offset;
area = (area <= offset_area) ? offset_area : offset_area + alignment;
- const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize;
- const VAddr area_last = area_end - 1;
+ const KProcessAddress area_end = area + num_pages * PageSize + guard_pages * PageSize;
+ const KProcessAddress area_last = area_end - 1;
- if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
- area_last <= info.GetLastAddress()) {
+ if (info.GetAddress() <= GetInteger(area) && area < area_last &&
+ area_last <= region_last && area_last <= info.GetLastAddress()) {
return area;
}
}
@@ -76,7 +80,7 @@ VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pa
}
void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator,
- VAddr address, size_t num_pages) {
+ KProcessAddress address, size_t num_pages) {
// Find the iterator now that we've updated.
iterator it = this->FindIterator(address);
if (address != m_start_address) {
@@ -104,18 +108,18 @@ void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator*
}
}
-void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
- size_t num_pages, KMemoryState state, KMemoryPermission perm,
- KMemoryAttribute attr,
+void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator,
+ KProcessAddress address, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm, KMemoryAttribute attr,
KMemoryBlockDisableMergeAttribute set_disable_attr,
KMemoryBlockDisableMergeAttribute clear_disable_attr) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
- ASSERT(Common::IsAligned(address, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(address), PageSize));
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
KMemoryAttribute::None);
- VAddr cur_address = address;
+ KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
@@ -168,17 +172,17 @@ void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator,
}
void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator,
- VAddr address, size_t num_pages, KMemoryState test_state,
- KMemoryPermission test_perm, KMemoryAttribute test_attr,
- KMemoryState state, KMemoryPermission perm,
- KMemoryAttribute attr) {
+ KProcessAddress address, size_t num_pages,
+ KMemoryState test_state, KMemoryPermission test_perm,
+ KMemoryAttribute test_attr, KMemoryState state,
+ KMemoryPermission perm, KMemoryAttribute attr) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
- ASSERT(Common::IsAligned(address, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(address), PageSize));
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
KMemoryAttribute::None);
- VAddr cur_address = address;
+ KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
@@ -230,18 +234,18 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo
this->CoalesceForUpdate(allocator, address, num_pages);
}
-void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
- size_t num_pages, MemoryBlockLockFunction lock_func,
- KMemoryPermission perm) {
+void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator,
+ KProcessAddress address, size_t num_pages,
+ MemoryBlockLockFunction lock_func, KMemoryPermission perm) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
- ASSERT(Common::IsAligned(address, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(address), PageSize));
- VAddr cur_address = address;
+ KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
- const VAddr end_address = address + (num_pages * PageSize);
+ const KProcessAddress end_address = address + (num_pages * PageSize);
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h
index d382722a6..96496e990 100644
--- a/src/core/hle/kernel/k_memory_block_manager.h
+++ b/src/core/hle/kernel/k_memory_block_manager.h
@@ -7,9 +7,9 @@
#include <functional>
#include "common/common_funcs.h"
-#include "common/common_types.h"
#include "core/hle/kernel/k_dynamic_resource_manager.h"
#include "core/hle/kernel/k_memory_block.h"
+#include "core/hle/kernel/k_typed_address.h"
namespace Kernel {
@@ -85,9 +85,10 @@ public:
public:
KMemoryBlockManager();
- using HostUnmapCallback = std::function<void(VAddr, u64)>;
+ using HostUnmapCallback = std::function<void(Common::ProcessAddress, u64)>;
- Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager);
+ Result Initialize(KProcessAddress st, KProcessAddress nd,
+ KMemoryBlockSlabManager* slab_manager);
void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback);
iterator end() {
@@ -100,27 +101,28 @@ public:
return m_memory_block_tree.cend();
}
- VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
- size_t alignment, size_t offset, size_t guard_pages) const;
+ KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
+ size_t num_pages, size_t alignment, size_t offset,
+ size_t guard_pages) const;
- void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
- KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
+ void Update(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
+ size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
KMemoryBlockDisableMergeAttribute set_disable_attr,
KMemoryBlockDisableMergeAttribute clear_disable_attr);
- void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
- MemoryBlockLockFunction lock_func, KMemoryPermission perm);
+ void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
+ size_t num_pages, MemoryBlockLockFunction lock_func, KMemoryPermission perm);
- void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
+ void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
KMemoryAttribute attr);
- iterator FindIterator(VAddr address) const {
+ iterator FindIterator(KProcessAddress address) const {
return m_memory_block_tree.find(KMemoryBlock(
address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None));
}
- const KMemoryBlock* FindBlock(VAddr address) const {
+ const KMemoryBlock* FindBlock(KProcessAddress address) const {
if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) {
return std::addressof(*it);
}
@@ -132,24 +134,20 @@ public:
bool CheckState() const;
private:
- void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
+ void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
size_t num_pages);
MemoryBlockTree m_memory_block_tree;
- VAddr m_start_address{};
- VAddr m_end_address{};
+ KProcessAddress m_start_address{};
+ KProcessAddress m_end_address{};
};
class KScopedMemoryBlockManagerAuditor {
public:
- explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) {
- ASSERT(m_manager->CheckState());
- }
+ explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) {}
explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m)
: KScopedMemoryBlockManagerAuditor(std::addressof(m)) {}
- ~KScopedMemoryBlockManagerAuditor() {
- ASSERT(m_manager->CheckState());
- }
+ ~KScopedMemoryBlockManagerAuditor() = default;
private:
KMemoryBlockManager* m_manager;
diff --git a/src/core/hle/kernel/k_memory_layout.cpp b/src/core/hle/kernel/k_memory_layout.cpp
index 72c3ee4b7..af40092c0 100644
--- a/src/core/hle/kernel/k_memory_layout.cpp
+++ b/src/core/hle/kernel/k_memory_layout.cpp
@@ -18,11 +18,11 @@ KMemoryRegion* AllocateRegion(KMemoryRegionAllocator& memory_region_allocator, A
} // namespace
-KMemoryRegionTree::KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_)
- : memory_region_allocator{memory_region_allocator_} {}
+KMemoryRegionTree::KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator)
+ : m_memory_region_allocator{memory_region_allocator} {}
void KMemoryRegionTree::InsertDirectly(u64 address, u64 last_address, u32 attr, u32 type_id) {
- this->insert(*AllocateRegion(memory_region_allocator, address, last_address, attr, type_id));
+ this->insert(*AllocateRegion(m_memory_region_allocator, address, last_address, attr, type_id));
}
bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) {
@@ -69,7 +69,7 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at
const u64 new_pair = (old_pair != std::numeric_limits<u64>::max())
? old_pair + (address - old_address)
: old_pair;
- this->insert(*AllocateRegion(memory_region_allocator, address, inserted_region_last,
+ this->insert(*AllocateRegion(m_memory_region_allocator, address, inserted_region_last,
new_pair, new_attr, type_id));
}
@@ -78,14 +78,15 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at
const u64 after_pair = (old_pair != std::numeric_limits<u64>::max())
? old_pair + (inserted_region_end - old_address)
: old_pair;
- this->insert(*AllocateRegion(memory_region_allocator, inserted_region_end, old_last,
+ this->insert(*AllocateRegion(m_memory_region_allocator, inserted_region_end, old_last,
after_pair, old_attr, old_type));
}
return true;
}
-VAddr KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id) {
+KVirtualAddress KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment,
+ u32 type_id) {
// We want to find the total extents of the type id.
const auto extents = this->GetDerivedRegionExtents(static_cast<KMemoryRegionType>(type_id));
@@ -126,14 +127,17 @@ VAddr KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u
}
KMemoryLayout::KMemoryLayout()
- : virtual_tree{memory_region_allocator}, physical_tree{memory_region_allocator},
- virtual_linear_tree{memory_region_allocator}, physical_linear_tree{memory_region_allocator} {}
+ : m_virtual_tree{m_memory_region_allocator}, m_physical_tree{m_memory_region_allocator},
+ m_virtual_linear_tree{m_memory_region_allocator}, m_physical_linear_tree{
+ m_memory_region_allocator} {}
-void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start,
- VAddr linear_virtual_start) {
+void KMemoryLayout::InitializeLinearMemoryRegionTrees(KPhysicalAddress aligned_linear_phys_start,
+ KVirtualAddress linear_virtual_start) {
// Set static differences.
- linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start;
- linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start;
+ m_linear_phys_to_virt_diff =
+ GetInteger(linear_virtual_start) - GetInteger(aligned_linear_phys_start);
+ m_linear_virt_to_phys_diff =
+ GetInteger(aligned_linear_phys_start) - GetInteger(linear_virtual_start);
// Initialize linear trees.
for (auto& region : GetPhysicalMemoryRegionTree()) {
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h
index fd6e1d3e6..54a71df56 100644
--- a/src/core/hle/kernel/k_memory_layout.h
+++ b/src/core/hle/kernel/k_memory_layout.h
@@ -10,6 +10,7 @@
#include "core/device_memory.h"
#include "core/hle/kernel/k_memory_region.h"
#include "core/hle/kernel/k_memory_region_type.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/memory_types.h"
namespace Kernel {
@@ -67,12 +68,13 @@ constexpr size_t KernelPageBufferAdditionalSize = 0x33C000;
constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize +
KernelSlabHeapSize + KernelPageBufferHeapSize;
-constexpr bool IsKernelAddressKey(VAddr key) {
- return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast;
-}
+//! NB: Use KThread::GetAddressKeyIsKernel().
+//! See explanation for deviation of GetAddressKey.
+bool IsKernelAddressKey(KProcessAddress key) = delete;
-constexpr bool IsKernelAddress(VAddr address) {
- return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd;
+constexpr bool IsKernelAddress(KProcessAddress address) {
+ return KernelVirtualAddressSpaceBase <= GetInteger(address) &&
+ address < KernelVirtualAddressSpaceEnd;
}
class KMemoryLayout final {
@@ -80,62 +82,62 @@ public:
KMemoryLayout();
KMemoryRegionTree& GetVirtualMemoryRegionTree() {
- return virtual_tree;
+ return m_virtual_tree;
}
const KMemoryRegionTree& GetVirtualMemoryRegionTree() const {
- return virtual_tree;
+ return m_virtual_tree;
}
KMemoryRegionTree& GetPhysicalMemoryRegionTree() {
- return physical_tree;
+ return m_physical_tree;
}
const KMemoryRegionTree& GetPhysicalMemoryRegionTree() const {
- return physical_tree;
+ return m_physical_tree;
}
KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() {
- return virtual_linear_tree;
+ return m_virtual_linear_tree;
}
const KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() const {
- return virtual_linear_tree;
+ return m_virtual_linear_tree;
}
KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() {
- return physical_linear_tree;
+ return m_physical_linear_tree;
}
const KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() const {
- return physical_linear_tree;
+ return m_physical_linear_tree;
}
- VAddr GetLinearVirtualAddress(PAddr address) const {
- return address + linear_phys_to_virt_diff;
+ KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress address) const {
+ return GetInteger(address) + m_linear_phys_to_virt_diff;
}
- PAddr GetLinearPhysicalAddress(VAddr address) const {
- return address + linear_virt_to_phys_diff;
+ KPhysicalAddress GetLinearPhysicalAddress(KVirtualAddress address) const {
+ return GetInteger(address) + m_linear_virt_to_phys_diff;
}
- const KMemoryRegion* FindVirtual(VAddr address) const {
+ const KMemoryRegion* FindVirtual(KVirtualAddress address) const {
return Find(address, GetVirtualMemoryRegionTree());
}
- const KMemoryRegion* FindPhysical(PAddr address) const {
+ const KMemoryRegion* FindPhysical(KPhysicalAddress address) const {
return Find(address, GetPhysicalMemoryRegionTree());
}
- const KMemoryRegion* FindVirtualLinear(VAddr address) const {
+ const KMemoryRegion* FindVirtualLinear(KVirtualAddress address) const {
return Find(address, GetVirtualLinearMemoryRegionTree());
}
- const KMemoryRegion* FindPhysicalLinear(PAddr address) const {
+ const KMemoryRegion* FindPhysicalLinear(KPhysicalAddress address) const {
return Find(address, GetPhysicalLinearMemoryRegionTree());
}
- VAddr GetMainStackTopAddress(s32 core_id) const {
+ KVirtualAddress GetMainStackTopAddress(s32 core_id) const {
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscMainStack);
}
- VAddr GetIdleStackTopAddress(s32 core_id) const {
+ KVirtualAddress GetIdleStackTopAddress(s32 core_id) const {
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscIdleStack);
}
- VAddr GetExceptionStackTopAddress(s32 core_id) const {
+ KVirtualAddress GetExceptionStackTopAddress(s32 core_id) const {
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack);
}
- VAddr GetSlabRegionAddress() const {
+ KVirtualAddress GetSlabRegionAddress() const {
return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab))
.GetAddress();
}
@@ -143,10 +145,10 @@ public:
const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const {
return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type));
}
- PAddr GetDevicePhysicalAddress(KMemoryRegionType type) const {
+ KPhysicalAddress GetDevicePhysicalAddress(KMemoryRegionType type) const {
return GetDeviceRegion(type).GetAddress();
}
- VAddr GetDeviceVirtualAddress(KMemoryRegionType type) const {
+ KVirtualAddress GetDeviceVirtualAddress(KMemoryRegionType type) const {
return GetDeviceRegion(type).GetPairAddress();
}
@@ -175,11 +177,11 @@ public:
KMemoryRegionType_VirtualDramKernelSecureAppletMemory));
}
- const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const {
+ const KMemoryRegion& GetVirtualLinearRegion(KVirtualAddress address) const {
return Dereference(FindVirtualLinear(address));
}
- const KMemoryRegion& GetPhysicalLinearRegion(PAddr address) const {
+ const KMemoryRegion& GetPhysicalLinearRegion(KPhysicalAddress address) const {
return Dereference(FindPhysicalLinear(address));
}
@@ -193,29 +195,32 @@ public:
return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DTB);
}
- bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address) const {
+ bool IsHeapPhysicalAddress(const KMemoryRegion*& region, KPhysicalAddress address) const {
return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(),
KMemoryRegionType_DramUserPool);
}
- bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address) const {
+ bool IsHeapVirtualAddress(const KMemoryRegion*& region, KVirtualAddress address) const {
return IsTypedAddress(region, address, GetVirtualLinearMemoryRegionTree(),
KMemoryRegionType_VirtualDramUserPool);
}
- bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address, size_t size) const {
+ bool IsHeapPhysicalAddress(const KMemoryRegion*& region, KPhysicalAddress address,
+ size_t size) const {
return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(),
KMemoryRegionType_DramUserPool);
}
- bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address, size_t size) const {
+ bool IsHeapVirtualAddress(const KMemoryRegion*& region, KVirtualAddress address,
+ size_t size) const {
return IsTypedAddress(region, address, size, GetVirtualLinearMemoryRegionTree(),
KMemoryRegionType_VirtualDramUserPool);
}
- bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address) const {
+ bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region,
+ KPhysicalAddress address) const {
return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(),
static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped));
}
- bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address,
+ bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, KPhysicalAddress address,
size_t size) const {
return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(),
static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped));
@@ -234,8 +239,8 @@ public:
return std::make_pair(total_size, kernel_size);
}
- void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start,
- VAddr linear_virtual_start);
+ void InitializeLinearMemoryRegionTrees(KPhysicalAddress aligned_linear_phys_start,
+ KVirtualAddress linear_virtual_start);
static size_t GetResourceRegionSizeForInit(bool use_extra_resource);
auto GetKernelRegionExtents() const {
@@ -261,8 +266,8 @@ public:
auto GetLinearRegionVirtualExtents() const {
const auto physical = GetLinearRegionPhysicalExtents();
- return KMemoryRegion(GetLinearVirtualAddress(physical.GetAddress()),
- GetLinearVirtualAddress(physical.GetLastAddress()), 0,
+ return KMemoryRegion(GetInteger(GetLinearVirtualAddress(physical.GetAddress())),
+ GetInteger(GetLinearVirtualAddress(physical.GetLastAddress())), 0,
KMemoryRegionType_None);
}
@@ -334,12 +339,12 @@ private:
static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address,
const KMemoryRegionTree& tree, KMemoryRegionType type) {
// Check if the cached region already contains the address.
- if (region != nullptr && region->Contains(address)) {
+ if (region != nullptr && region->Contains(GetInteger(address))) {
return true;
}
// Find the containing region, and update the cache.
- if (const KMemoryRegion* found = tree.Find(address);
+ if (const KMemoryRegion* found = tree.Find(GetInteger(address));
found != nullptr && found->IsDerivedFrom(type)) {
region = found;
return true;
@@ -352,11 +357,12 @@ private:
static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address, size_t size,
const KMemoryRegionTree& tree, KMemoryRegionType type) {
// Get the end of the checked region.
- const u64 last_address = address + size - 1;
+ const u64 last_address = GetInteger(address) + size - 1;
// Walk the tree to verify the region is correct.
- const KMemoryRegion* cur =
- (region != nullptr && region->Contains(address)) ? region : tree.Find(address);
+ const KMemoryRegion* cur = (region != nullptr && region->Contains(GetInteger(address)))
+ ? region
+ : tree.Find(GetInteger(address));
while (cur != nullptr && cur->IsDerivedFrom(type)) {
if (last_address <= cur->GetLastAddress()) {
region = cur;
@@ -370,7 +376,7 @@ private:
template <typename AddressType>
static const KMemoryRegion* Find(AddressType address, const KMemoryRegionTree& tree) {
- return tree.Find(address);
+ return tree.Find(GetInteger(address));
}
static KMemoryRegion& Dereference(KMemoryRegion* region) {
@@ -383,7 +389,7 @@ private:
return *region;
}
- VAddr GetStackTopAddress(s32 core_id, KMemoryRegionType type) const {
+ KVirtualAddress GetStackTopAddress(s32 core_id, KMemoryRegionType type) const {
const auto& region = Dereference(
GetVirtualMemoryRegionTree().FindByTypeAndAttribute(type, static_cast<u32>(core_id)));
ASSERT(region.GetEndAddress() != 0);
@@ -391,13 +397,13 @@ private:
}
private:
- u64 linear_phys_to_virt_diff{};
- u64 linear_virt_to_phys_diff{};
- KMemoryRegionAllocator memory_region_allocator;
- KMemoryRegionTree virtual_tree;
- KMemoryRegionTree physical_tree;
- KMemoryRegionTree virtual_linear_tree;
- KMemoryRegionTree physical_linear_tree;
+ u64 m_linear_phys_to_virt_diff{};
+ u64 m_linear_virt_to_phys_diff{};
+ KMemoryRegionAllocator m_memory_region_allocator;
+ KMemoryRegionTree m_virtual_tree;
+ KMemoryRegionTree m_physical_tree;
+ KMemoryRegionTree m_virtual_linear_tree;
+ KMemoryRegionTree m_physical_linear_tree;
};
namespace Init {
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index cd6ea388e..74d8169e0 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -5,7 +5,6 @@
#include "common/alignment.h"
#include "common/assert.h"
-#include "common/common_types.h"
#include "common/scope_exit.h"
#include "core/core.h"
#include "core/device_memory.h"
@@ -44,10 +43,10 @@ KMemoryManager::KMemoryManager(Core::System& system)
KLightLock{system.Kernel()},
} {}
-void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) {
+void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size) {
// Clear the management region to zero.
- const VAddr management_region_end = management_region + management_region_size;
+ const KVirtualAddress management_region_end = management_region + management_region_size;
// std::memset(GetVoidPointer(management_region), 0, management_region_size);
// Reset our manager count.
@@ -56,7 +55,7 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
// Traverse the virtual memory layout tree, initializing each manager as appropriate.
while (m_num_managers != MaxManagerCount) {
// Locate the region that should initialize the current manager.
- PAddr region_address = 0;
+ KPhysicalAddress region_address = 0;
size_t region_size = 0;
Pool region_pool = Pool::Count;
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
@@ -70,8 +69,8 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
continue;
}
- const PAddr cur_start = it.GetAddress();
- const PAddr cur_end = it.GetEndAddress();
+ const KPhysicalAddress cur_start = it.GetAddress();
+ const KPhysicalAddress cur_end = it.GetEndAddress();
// Validate the region.
ASSERT(cur_end != 0);
@@ -119,17 +118,17 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
// Free each region to its corresponding heap.
size_t reserved_sizes[MaxManagerCount] = {};
- const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress();
- const PAddr ini_end = ini_start + InitialProcessBinarySizeMax;
- const PAddr ini_last = ini_end - 1;
+ const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress();
+ const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax;
+ const KPhysicalAddress ini_last = ini_end - 1;
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
// Get the manager for the region.
auto& manager = m_managers[it.GetAttributes()];
- const PAddr cur_start = it.GetAddress();
- const PAddr cur_last = it.GetLastAddress();
- const PAddr cur_end = it.GetEndAddress();
+ const KPhysicalAddress cur_start = it.GetAddress();
+ const KPhysicalAddress cur_last = it.GetLastAddress();
+ const KPhysicalAddress cur_end = it.GetEndAddress();
if (cur_start <= ini_start && ini_last <= cur_last) {
// Free memory before the ini to the heap.
@@ -175,7 +174,8 @@ void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
UNREACHABLE();
}
-PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) {
+KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages,
+ u32 option) {
// Early return if we're allocating no pages.
if (num_pages == 0) {
return 0;
@@ -190,7 +190,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
// Loop, trying to iterate from each block.
Impl* chosen_manager = nullptr;
- PAddr allocated_block = 0;
+ KPhysicalAddress allocated_block = 0;
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr;
chosen_manager = this->GetNextManager(chosen_manager, dir)) {
allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages);
@@ -239,7 +239,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
cur_manager = this->GetNextManager(cur_manager, dir)) {
while (num_pages >= pages_per_alloc) {
// Allocate a block.
- PAddr allocated_block = cur_manager->AllocateBlock(index, random);
+ KPhysicalAddress allocated_block = cur_manager->AllocateBlock(index, random);
if (allocated_block == 0) {
break;
}
@@ -286,7 +286,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
// Open the first reference to the pages.
for (const auto& block : *out) {
- PAddr cur_address = block.GetAddress();
+ KPhysicalAddress cur_address = block.GetAddress();
size_t remaining_pages = block.GetNumPages();
while (remaining_pages > 0) {
// Get the manager for the current address.
@@ -337,7 +337,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
// Iterate over the allocated blocks.
for (const auto& block : *out) {
// Get the block extents.
- const PAddr block_address = block.GetAddress();
+ const KPhysicalAddress block_address = block.GetAddress();
const size_t block_pages = block.GetNumPages();
// If it has no pages, we don't need to do anything.
@@ -348,7 +348,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
// Fill all the pages that we need to fill.
bool any_new = false;
{
- PAddr cur_address = block_address;
+ KPhysicalAddress cur_address = block_address;
size_t remaining_pages = block_pages;
while (remaining_pages > 0) {
// Get the manager for the current address.
@@ -369,7 +369,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
// If there are new pages, update tracking for the allocation.
if (any_new) {
// Update tracking for the allocation.
- PAddr cur_address = block_address;
+ KPhysicalAddress cur_address = block_address;
size_t remaining_pages = block_pages;
while (remaining_pages > 0) {
// Get the manager for the current address.
@@ -400,8 +400,9 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
R_SUCCEED();
}
-size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management,
- VAddr management_end, Pool p) {
+size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size,
+ KVirtualAddress management, KVirtualAddress management_end,
+ Pool p) {
// Calculate management sizes.
const size_t ref_count_size = (size / PageSize) * sizeof(u16);
const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(size);
@@ -417,7 +418,7 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage
m_management_region = management;
m_page_reference_counts.resize(
Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize);
- ASSERT(Common::IsAligned(m_management_region, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(m_management_region), PageSize));
// Initialize the manager's KPageHeap.
m_heap.Initialize(address, size, management + manager_size, page_heap_size);
@@ -425,15 +426,15 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage
return total_management_size;
}
-void KMemoryManager::Impl::TrackUnoptimizedAllocation(PAddr block, size_t num_pages) {
+void KMemoryManager::Impl::TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
UNREACHABLE();
}
-void KMemoryManager::Impl::TrackOptimizedAllocation(PAddr block, size_t num_pages) {
+void KMemoryManager::Impl::TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
UNREACHABLE();
}
-bool KMemoryManager::Impl::ProcessOptimizedAllocation(PAddr block, size_t num_pages,
+bool KMemoryManager::Impl::ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages,
u8 fill_pattern) {
UNREACHABLE();
}
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h
index 401d4e644..7e4b41319 100644
--- a/src/core/hle/kernel/k_memory_manager.h
+++ b/src/core/hle/kernel/k_memory_manager.h
@@ -7,10 +7,10 @@
#include <tuple>
#include "common/common_funcs.h"
-#include "common/common_types.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_page_heap.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/result.h"
namespace Core {
@@ -50,21 +50,21 @@ public:
explicit KMemoryManager(Core::System& system);
- void Initialize(VAddr management_region, size_t management_region_size);
+ void Initialize(KVirtualAddress management_region, size_t management_region_size);
Result InitializeOptimizedMemory(u64 process_id, Pool pool);
void FinalizeOptimizedMemory(u64 process_id, Pool pool);
- PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
+ KPhysicalAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
Result AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
u8 fill_pattern);
- Pool GetPool(PAddr address) const {
+ Pool GetPool(KPhysicalAddress address) const {
return this->GetManager(address).GetPool();
}
- void Open(PAddr address, size_t num_pages) {
+ void Open(KPhysicalAddress address, size_t num_pages) {
// Repeatedly open references until we've done so for all pages.
while (num_pages) {
auto& manager = this->GetManager(address);
@@ -80,7 +80,7 @@ public:
}
}
- void OpenFirst(PAddr address, size_t num_pages) {
+ void OpenFirst(KPhysicalAddress address, size_t num_pages) {
// Repeatedly open references until we've done so for all pages.
while (num_pages) {
auto& manager = this->GetManager(address);
@@ -96,7 +96,7 @@ public:
}
}
- void Close(PAddr address, size_t num_pages) {
+ void Close(KPhysicalAddress address, size_t num_pages) {
// Repeatedly close references until we've done so for all pages.
while (num_pages) {
auto& manager = this->GetManager(address);
@@ -199,16 +199,16 @@ private:
public:
Impl() = default;
- size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end,
- Pool p);
+ size_t Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management,
+ KVirtualAddress management_end, Pool p);
- PAddr AllocateBlock(s32 index, bool random) {
+ KPhysicalAddress AllocateBlock(s32 index, bool random) {
return m_heap.AllocateBlock(index, random);
}
- PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
+ KPhysicalAddress AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
return m_heap.AllocateAligned(index, num_pages, align_pages);
}
- void Free(PAddr addr, size_t num_pages) {
+ void Free(KPhysicalAddress addr, size_t num_pages) {
m_heap.Free(addr, num_pages);
}
@@ -220,10 +220,10 @@ private:
UNIMPLEMENTED();
}
- void TrackUnoptimizedAllocation(PAddr block, size_t num_pages);
- void TrackOptimizedAllocation(PAddr block, size_t num_pages);
+ void TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages);
+ void TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages);
- bool ProcessOptimizedAllocation(PAddr block, size_t num_pages, u8 fill_pattern);
+ bool ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, u8 fill_pattern);
constexpr Pool GetPool() const {
return m_pool;
@@ -231,7 +231,7 @@ private:
constexpr size_t GetSize() const {
return m_heap.GetSize();
}
- constexpr PAddr GetEndAddress() const {
+ constexpr KPhysicalAddress GetEndAddress() const {
return m_heap.GetEndAddress();
}
@@ -243,10 +243,10 @@ private:
UNIMPLEMENTED();
}
- constexpr size_t GetPageOffset(PAddr address) const {
+ constexpr size_t GetPageOffset(KPhysicalAddress address) const {
return m_heap.GetPageOffset(address);
}
- constexpr size_t GetPageOffsetToEnd(PAddr address) const {
+ constexpr size_t GetPageOffsetToEnd(KPhysicalAddress address) const {
return m_heap.GetPageOffsetToEnd(address);
}
@@ -263,7 +263,7 @@ private:
return m_prev;
}
- void OpenFirst(PAddr address, size_t num_pages) {
+ void OpenFirst(KPhysicalAddress address, size_t num_pages) {
size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages;
while (index < end) {
@@ -274,7 +274,7 @@ private:
}
}
- void Open(PAddr address, size_t num_pages) {
+ void Open(KPhysicalAddress address, size_t num_pages) {
size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages;
while (index < end) {
@@ -285,7 +285,7 @@ private:
}
}
- void Close(PAddr address, size_t num_pages) {
+ void Close(KPhysicalAddress address, size_t num_pages) {
size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages;
@@ -323,18 +323,18 @@ private:
KPageHeap m_heap;
std::vector<RefCount> m_page_reference_counts;
- VAddr m_management_region{};
+ KVirtualAddress m_management_region{};
Pool m_pool{};
Impl* m_next{};
Impl* m_prev{};
};
private:
- Impl& GetManager(PAddr address) {
+ Impl& GetManager(KPhysicalAddress address) {
return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
}
- const Impl& GetManager(PAddr address) const {
+ const Impl& GetManager(KPhysicalAddress address) const {
return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
}
diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h
index 5037e657f..e3044f022 100644
--- a/src/core/hle/kernel/k_memory_region.h
+++ b/src/core/hle/kernel/k_memory_region.h
@@ -5,9 +5,9 @@
#include "common/assert.h"
#include "common/common_funcs.h"
-#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
#include "core/hle/kernel/k_memory_region_type.h"
+#include "core/hle/kernel/k_typed_address.h"
namespace Kernel {
@@ -21,15 +21,15 @@ public:
YUZU_NON_MOVEABLE(KMemoryRegion);
constexpr KMemoryRegion() = default;
- constexpr KMemoryRegion(u64 address_, u64 last_address_)
- : address{address_}, last_address{last_address_} {}
- constexpr KMemoryRegion(u64 address_, u64 last_address_, u64 pair_address_, u32 attributes_,
- u32 type_id_)
- : address(address_), last_address(last_address_), pair_address(pair_address_),
- attributes(attributes_), type_id(type_id_) {}
- constexpr KMemoryRegion(u64 address_, u64 last_address_, u32 attributes_, u32 type_id_)
- : KMemoryRegion(address_, last_address_, std::numeric_limits<u64>::max(), attributes_,
- type_id_) {}
+ constexpr KMemoryRegion(u64 address, u64 last_address)
+ : m_address{address}, m_last_address{last_address} {}
+ constexpr KMemoryRegion(u64 address, u64 last_address, u64 pair_address, u32 attributes,
+ u32 type_id)
+ : m_address(address), m_last_address(last_address), m_pair_address(pair_address),
+ m_attributes(attributes), m_type_id(type_id) {}
+ constexpr KMemoryRegion(u64 address, u64 last_address, u32 attributes, u32 type_id)
+ : KMemoryRegion(address, last_address, std::numeric_limits<u64>::max(), attributes,
+ type_id) {}
~KMemoryRegion() = default;
@@ -44,15 +44,15 @@ public:
}
constexpr u64 GetAddress() const {
- return address;
+ return m_address;
}
constexpr u64 GetPairAddress() const {
- return pair_address;
+ return m_pair_address;
}
constexpr u64 GetLastAddress() const {
- return last_address;
+ return m_last_address;
}
constexpr u64 GetEndAddress() const {
@@ -64,16 +64,16 @@ public:
}
constexpr u32 GetAttributes() const {
- return attributes;
+ return m_attributes;
}
constexpr u32 GetType() const {
- return type_id;
+ return m_type_id;
}
constexpr void SetType(u32 type) {
ASSERT(this->CanDerive(type));
- type_id = type;
+ m_type_id = type;
}
constexpr bool Contains(u64 addr) const {
@@ -94,27 +94,27 @@ public:
}
constexpr void SetPairAddress(u64 a) {
- pair_address = a;
+ m_pair_address = a;
}
constexpr void SetTypeAttribute(u32 attr) {
- type_id |= attr;
+ m_type_id |= attr;
}
private:
constexpr void Reset(u64 a, u64 la, u64 p, u32 r, u32 t) {
- address = a;
- pair_address = p;
- last_address = la;
- attributes = r;
- type_id = t;
- }
-
- u64 address{};
- u64 last_address{};
- u64 pair_address{};
- u32 attributes{};
- u32 type_id{};
+ m_address = a;
+ m_pair_address = p;
+ m_last_address = la;
+ m_attributes = r;
+ m_type_id = t;
+ }
+
+ u64 m_address{};
+ u64 m_last_address{};
+ u64 m_pair_address{};
+ u32 m_attributes{};
+ u32 m_type_id{};
};
class KMemoryRegionTree final {
@@ -243,10 +243,10 @@ public:
void InsertDirectly(u64 address, u64 last_address, u32 attr = 0, u32 type_id = 0);
bool Insert(u64 address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0);
- VAddr GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id);
+ KVirtualAddress GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id);
- VAddr GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id,
- size_t guard_size) {
+ KVirtualAddress GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id,
+ size_t guard_size) {
return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size;
}
@@ -322,7 +322,7 @@ public:
private:
TreeType m_tree{};
- KMemoryRegionAllocator& memory_region_allocator;
+ KMemoryRegionAllocator& m_memory_region_allocator;
};
class KMemoryRegionAllocator final {
@@ -338,18 +338,18 @@ public:
template <typename... Args>
KMemoryRegion* Allocate(Args&&... args) {
// Ensure we stay within the bounds of our heap.
- ASSERT(this->num_regions < MaxMemoryRegions);
+ ASSERT(m_num_regions < MaxMemoryRegions);
// Create the new region.
- KMemoryRegion* region = std::addressof(this->region_heap[this->num_regions++]);
- new (region) KMemoryRegion(std::forward<Args>(args)...);
+ KMemoryRegion* region = std::addressof(m_region_heap[m_num_regions++]);
+ std::construct_at(region, std::forward<Args>(args)...);
return region;
}
private:
- std::array<KMemoryRegion, MaxMemoryRegions> region_heap{};
- size_t num_regions{};
+ std::array<KMemoryRegion, MaxMemoryRegions> m_region_heap{};
+ size_t m_num_regions{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_object_name.cpp b/src/core/hle/kernel/k_object_name.cpp
new file mode 100644
index 000000000..df3a1c4c5
--- /dev/null
+++ b/src/core/hle/kernel/k_object_name.cpp
@@ -0,0 +1,102 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/k_object_name.h"
+
+namespace Kernel {
+
+KObjectNameGlobalData::KObjectNameGlobalData(KernelCore& kernel) : m_object_list_lock{kernel} {}
+KObjectNameGlobalData::~KObjectNameGlobalData() = default;
+
+void KObjectName::Initialize(KAutoObject* obj, const char* name) {
+ // Set member variables.
+ m_object = obj;
+ std::strncpy(m_name.data(), name, sizeof(m_name) - 1);
+ m_name[sizeof(m_name) - 1] = '\x00';
+
+ // Open a reference to the object we hold.
+ m_object->Open();
+}
+
+bool KObjectName::MatchesName(const char* name) const {
+ return std::strncmp(m_name.data(), name, sizeof(m_name)) == 0;
+}
+
+Result KObjectName::NewFromName(KernelCore& kernel, KAutoObject* obj, const char* name) {
+ // Create a new object name.
+ KObjectName* new_name = KObjectName::Allocate(kernel);
+ R_UNLESS(new_name != nullptr, ResultOutOfResource);
+
+ // Initialize the new name.
+ new_name->Initialize(obj, name);
+
+ // Check if there's an existing name.
+ {
+ // Get the global data.
+ KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
+
+ // Ensure we have exclusive access to the global list.
+ KScopedLightLock lk{gd.GetObjectListLock()};
+
+ // If the object doesn't exist, put it into the list.
+ KScopedAutoObject existing_object = FindImpl(kernel, name);
+ if (existing_object.IsNull()) {
+ gd.GetObjectList().push_back(*new_name);
+ R_SUCCEED();
+ }
+ }
+
+ // The object already exists, which is an error condition. Perform cleanup.
+ obj->Close();
+ KObjectName::Free(kernel, new_name);
+ R_THROW(ResultInvalidState);
+}
+
+Result KObjectName::Delete(KernelCore& kernel, KAutoObject* obj, const char* compare_name) {
+ // Get the global data.
+ KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
+
+ // Ensure we have exclusive access to the global list.
+ KScopedLightLock lk{gd.GetObjectListLock()};
+
+ // Find a matching entry in the list, and delete it.
+ for (auto& name : gd.GetObjectList()) {
+ if (name.MatchesName(compare_name) && obj == name.GetObject()) {
+ // We found a match, clean up its resources.
+ obj->Close();
+ gd.GetObjectList().erase(gd.GetObjectList().iterator_to(name));
+ KObjectName::Free(kernel, std::addressof(name));
+ R_SUCCEED();
+ }
+ }
+
+ // We didn't find the object in the list.
+ R_THROW(ResultNotFound);
+}
+
+KScopedAutoObject<KAutoObject> KObjectName::Find(KernelCore& kernel, const char* name) {
+ // Get the global data.
+ KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
+
+ // Ensure we have exclusive access to the global list.
+ KScopedLightLock lk{gd.GetObjectListLock()};
+
+ return FindImpl(kernel, name);
+}
+
+KScopedAutoObject<KAutoObject> KObjectName::FindImpl(KernelCore& kernel, const char* compare_name) {
+ // Get the global data.
+ KObjectNameGlobalData& gd{kernel.ObjectNameGlobalData()};
+
+ // Try to find a matching object in the global list.
+ for (const auto& name : gd.GetObjectList()) {
+ if (name.MatchesName(compare_name)) {
+ return name.GetObject();
+ }
+ }
+
+ // There's no matching entry in the list.
+ return nullptr;
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_object_name.h b/src/core/hle/kernel/k_object_name.h
new file mode 100644
index 000000000..a8876fe37
--- /dev/null
+++ b/src/core/hle/kernel/k_object_name.h
@@ -0,0 +1,88 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <array>
+#include <memory>
+
+#include "common/intrusive_list.h"
+
+#include "core/hle/kernel/k_light_lock.h"
+#include "core/hle/kernel/slab_helpers.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel {
+
+class KObjectNameGlobalData;
+
+class KObjectName : public KSlabAllocated<KObjectName>,
+ public Common::IntrusiveListBaseNode<KObjectName> {
+public:
+ explicit KObjectName(KernelCore&) {}
+ virtual ~KObjectName() = default;
+
+ static constexpr size_t NameLengthMax = 12;
+ using List = Common::IntrusiveListBaseTraits<KObjectName>::ListType;
+
+ static Result NewFromName(KernelCore& kernel, KAutoObject* obj, const char* name);
+ static Result Delete(KernelCore& kernel, KAutoObject* obj, const char* name);
+
+ static KScopedAutoObject<KAutoObject> Find(KernelCore& kernel, const char* name);
+
+ template <typename Derived>
+ static Result Delete(KernelCore& kernel, const char* name) {
+ // Find the object.
+ KScopedAutoObject obj = Find(kernel, name);
+ R_UNLESS(obj.IsNotNull(), ResultNotFound);
+
+ // Cast the object to the desired type.
+ Derived* derived = obj->DynamicCast<Derived*>();
+ R_UNLESS(derived != nullptr, ResultNotFound);
+
+ // Check that the object is closed.
+ R_UNLESS(derived->IsServerClosed(), ResultInvalidState);
+
+ R_RETURN(Delete(kernel, obj.GetPointerUnsafe(), name));
+ }
+
+ template <typename Derived>
+ requires(std::derived_from<Derived, KAutoObject>)
+ static KScopedAutoObject<Derived> Find(KernelCore& kernel, const char* name) {
+ return Find(kernel, name);
+ }
+
+private:
+ static KScopedAutoObject<KAutoObject> FindImpl(KernelCore& kernel, const char* name);
+
+ void Initialize(KAutoObject* obj, const char* name);
+
+ bool MatchesName(const char* name) const;
+ KAutoObject* GetObject() const {
+ return m_object;
+ }
+
+private:
+ std::array<char, NameLengthMax> m_name{};
+ KAutoObject* m_object{};
+};
+
+class KObjectNameGlobalData {
+public:
+ explicit KObjectNameGlobalData(KernelCore& kernel);
+ ~KObjectNameGlobalData();
+
+ KLightLock& GetObjectListLock() {
+ return m_object_list_lock;
+ }
+
+ KObjectName::List& GetObjectList() {
+ return m_object_list;
+ }
+
+private:
+ KLightLock m_object_list_lock;
+ KObjectName::List m_object_list;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_buffer.cpp b/src/core/hle/kernel/k_page_buffer.cpp
index 0c16dded4..e9830e6d9 100644
--- a/src/core/hle/kernel/k_page_buffer.cpp
+++ b/src/core/hle/kernel/k_page_buffer.cpp
@@ -10,8 +10,8 @@
namespace Kernel {
-KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) {
- ASSERT(Common::IsAligned(phys_addr, PageSize));
+KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, KPhysicalAddress phys_addr) {
+ ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize));
return system.DeviceMemory().GetPointer<KPageBuffer>(phys_addr);
}
diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h
index cfedaae61..f6a7f1e39 100644
--- a/src/core/hle/kernel/k_page_buffer.h
+++ b/src/core/hle/kernel/k_page_buffer.h
@@ -26,10 +26,10 @@ public:
explicit KPageBuffer(KernelCore&) {}
KPageBuffer() = default;
- static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr);
+ static KPageBuffer* FromPhysicalAddress(Core::System& system, KPhysicalAddress phys_addr);
private:
- [[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{};
+ alignas(PageSize) std::array<u8, PageSize> m_buffer{};
};
static_assert(sizeof(KPageBuffer) == KPageBufferSlabHeap::BufferSize);
diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h
index c07f17663..b32909f05 100644
--- a/src/core/hle/kernel/k_page_group.h
+++ b/src/core/hle/kernel/k_page_group.h
@@ -22,7 +22,7 @@ public:
constexpr explicit KBlockInfo() : m_next(nullptr) {}
constexpr void Initialize(KPhysicalAddress addr, size_t np) {
- ASSERT(Common::IsAligned(addr, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
ASSERT(static_cast<u32>(np) == np);
m_page_index = static_cast<u32>(addr / PageSize);
diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp
index 7b02c7d8b..95762b5a2 100644
--- a/src/core/hle/kernel/k_page_heap.cpp
+++ b/src/core/hle/kernel/k_page_heap.cpp
@@ -6,14 +6,14 @@
namespace Kernel {
-void KPageHeap::Initialize(PAddr address, size_t size, VAddr management_address,
- size_t management_size, const size_t* block_shifts,
- size_t num_block_shifts) {
+void KPageHeap::Initialize(KPhysicalAddress address, size_t size,
+ KVirtualAddress management_address, size_t management_size,
+ const size_t* block_shifts, size_t num_block_shifts) {
// Check our assumptions.
- ASSERT(Common::IsAligned(address, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(address), PageSize));
ASSERT(Common::IsAligned(size, PageSize));
ASSERT(0 < num_block_shifts && num_block_shifts <= NumMemoryBlockPageShifts);
- const VAddr management_end = management_address + management_size;
+ const KVirtualAddress management_end = management_address + management_size;
// Set our members.
m_heap_address = address;
@@ -31,7 +31,7 @@ void KPageHeap::Initialize(PAddr address, size_t size, VAddr management_address,
}
// Ensure we didn't overextend our bounds.
- ASSERT(VAddr(cur_bitmap_storage) <= management_end);
+ ASSERT(KVirtualAddress(cur_bitmap_storage) <= management_end);
}
size_t KPageHeap::GetNumFreePages() const {
@@ -44,11 +44,11 @@ size_t KPageHeap::GetNumFreePages() const {
return num_free;
}
-PAddr KPageHeap::AllocateByLinearSearch(s32 index) {
+KPhysicalAddress KPageHeap::AllocateByLinearSearch(s32 index) {
const size_t needed_size = m_blocks[index].GetSize();
for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
- if (const PAddr addr = m_blocks[i].PopBlock(false); addr != 0) {
+ if (const KPhysicalAddress addr = m_blocks[i].PopBlock(false); addr != 0) {
if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
}
@@ -59,7 +59,7 @@ PAddr KPageHeap::AllocateByLinearSearch(s32 index) {
return 0;
}
-PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) {
+KPhysicalAddress KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) {
// Get the size and required alignment.
const size_t needed_size = num_pages * PageSize;
const size_t align_size = align_pages * PageSize;
@@ -110,7 +110,7 @@ PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_page
}
// Pop a block from the index we selected.
- if (PAddr addr = m_blocks[index].PopBlock(true); addr != 0) {
+ if (KPhysicalAddress addr = m_blocks[index].PopBlock(true); addr != 0) {
// Determine how much size we have left over.
if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size;
leftover_size > 0) {
@@ -141,13 +141,13 @@ PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_page
return 0;
}
-void KPageHeap::FreeBlock(PAddr block, s32 index) {
+void KPageHeap::FreeBlock(KPhysicalAddress block, s32 index) {
do {
block = m_blocks[index++].PushBlock(block);
} while (block != 0);
}
-void KPageHeap::Free(PAddr addr, size_t num_pages) {
+void KPageHeap::Free(KPhysicalAddress addr, size_t num_pages) {
// Freeing no pages is a no-op.
if (num_pages == 0) {
return;
@@ -155,16 +155,16 @@ void KPageHeap::Free(PAddr addr, size_t num_pages) {
// Find the largest block size that we can free, and free as many as possible.
s32 big_index = static_cast<s32>(m_num_blocks) - 1;
- const PAddr start = addr;
- const PAddr end = addr + num_pages * PageSize;
- PAddr before_start = start;
- PAddr before_end = start;
- PAddr after_start = end;
- PAddr after_end = end;
+ const KPhysicalAddress start = addr;
+ const KPhysicalAddress end = addr + num_pages * PageSize;
+ KPhysicalAddress before_start = start;
+ KPhysicalAddress before_end = start;
+ KPhysicalAddress after_start = end;
+ KPhysicalAddress after_end = end;
while (big_index >= 0) {
const size_t block_size = m_blocks[big_index].GetSize();
- const PAddr big_start = Common::AlignUp(start, block_size);
- const PAddr big_end = Common::AlignDown(end, block_size);
+ const KPhysicalAddress big_start = Common::AlignUp(GetInteger(start), block_size);
+ const KPhysicalAddress big_end = Common::AlignDown(GetInteger(end), block_size);
if (big_start < big_end) {
// Free as many big blocks as we can.
for (auto block = big_start; block < big_end; block += block_size) {
diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h
index 9021edcf7..c55225bac 100644
--- a/src/core/hle/kernel/k_page_heap.h
+++ b/src/core/hle/kernel/k_page_heap.h
@@ -8,8 +8,8 @@
#include "common/alignment.h"
#include "common/common_funcs.h"
-#include "common/common_types.h"
#include "core/hle/kernel/k_page_bitmap.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/memory_types.h"
namespace Kernel {
@@ -18,24 +18,24 @@ class KPageHeap {
public:
KPageHeap() = default;
- constexpr PAddr GetAddress() const {
+ constexpr KPhysicalAddress GetAddress() const {
return m_heap_address;
}
constexpr size_t GetSize() const {
return m_heap_size;
}
- constexpr PAddr GetEndAddress() const {
+ constexpr KPhysicalAddress GetEndAddress() const {
return this->GetAddress() + this->GetSize();
}
- constexpr size_t GetPageOffset(PAddr block) const {
+ constexpr size_t GetPageOffset(KPhysicalAddress block) const {
return (block - this->GetAddress()) / PageSize;
}
- constexpr size_t GetPageOffsetToEnd(PAddr block) const {
+ constexpr size_t GetPageOffsetToEnd(KPhysicalAddress block) const {
return (this->GetEndAddress() - block) / PageSize;
}
- void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address,
- size_t management_size) {
+ void Initialize(KPhysicalAddress heap_address, size_t heap_size,
+ KVirtualAddress management_address, size_t management_size) {
return this->Initialize(heap_address, heap_size, management_address, management_size,
MemoryBlockPageShifts.data(), NumMemoryBlockPageShifts);
}
@@ -53,7 +53,7 @@ public:
m_initial_used_size = m_heap_size - free_size - reserved_size;
}
- PAddr AllocateBlock(s32 index, bool random) {
+ KPhysicalAddress AllocateBlock(s32 index, bool random) {
if (random) {
const size_t block_pages = m_blocks[index].GetNumPages();
return this->AllocateByRandom(index, block_pages, block_pages);
@@ -62,12 +62,12 @@ public:
}
}
- PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
+ KPhysicalAddress AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
// TODO: linear search support?
return this->AllocateByRandom(index, num_pages, align_pages);
}
- void Free(PAddr addr, size_t num_pages);
+ void Free(KPhysicalAddress addr, size_t num_pages);
static size_t CalculateManagementOverheadSize(size_t region_size) {
return CalculateManagementOverheadSize(region_size, MemoryBlockPageShifts.data(),
@@ -125,24 +125,25 @@ private:
return this->GetNumFreeBlocks() * this->GetNumPages();
}
- u64* Initialize(PAddr addr, size_t size, size_t bs, size_t nbs, u64* bit_storage) {
+ u64* Initialize(KPhysicalAddress addr, size_t size, size_t bs, size_t nbs,
+ u64* bit_storage) {
// Set shifts.
m_block_shift = bs;
m_next_block_shift = nbs;
// Align up the address.
- PAddr end = addr + size;
+ KPhysicalAddress end = addr + size;
const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift)
: (u64(1) << m_block_shift);
- addr = Common::AlignDown(addr, align);
- end = Common::AlignUp(end, align);
+ addr = Common::AlignDown(GetInteger(addr), align);
+ end = Common::AlignUp(GetInteger(end), align);
m_heap_address = addr;
m_end_offset = (end - addr) / (u64(1) << m_block_shift);
return m_bitmap.Initialize(bit_storage, m_end_offset);
}
- PAddr PushBlock(PAddr address) {
+ KPhysicalAddress PushBlock(KPhysicalAddress address) {
// Set the bit for the free block.
size_t offset = (address - m_heap_address) >> this->GetShift();
m_bitmap.SetBit(offset);
@@ -161,7 +162,7 @@ private:
return {};
}
- PAddr PopBlock(bool random) {
+ KPhysicalAddress PopBlock(bool random) {
// Find a free block.
s64 soffset = m_bitmap.FindFreeBlock(random);
if (soffset < 0) {
@@ -187,18 +188,19 @@ private:
private:
KPageBitmap m_bitmap;
- PAddr m_heap_address{};
+ KPhysicalAddress m_heap_address{};
uintptr_t m_end_offset{};
size_t m_block_shift{};
size_t m_next_block_shift{};
};
private:
- void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address,
- size_t management_size, const size_t* block_shifts, size_t num_block_shifts);
+ void Initialize(KPhysicalAddress heap_address, size_t heap_size,
+ KVirtualAddress management_address, size_t management_size,
+ const size_t* block_shifts, size_t num_block_shifts);
size_t GetNumFreePages() const;
- void FreeBlock(PAddr block, s32 index);
+ void FreeBlock(KPhysicalAddress block, s32 index);
static constexpr size_t NumMemoryBlockPageShifts{7};
static constexpr std::array<size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
@@ -206,14 +208,14 @@ private:
};
private:
- PAddr AllocateByLinearSearch(s32 index);
- PAddr AllocateByRandom(s32 index, size_t num_pages, size_t align_pages);
+ KPhysicalAddress AllocateByLinearSearch(s32 index);
+ KPhysicalAddress AllocateByRandom(s32 index, size_t num_pages, size_t align_pages);
static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,
size_t num_block_shifts);
private:
- PAddr m_heap_address{};
+ KPhysicalAddress m_heap_address{};
size_t m_heap_size{};
size_t m_initial_used_size{};
size_t m_num_blocks{};
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 9c7ac22dc..02b5cada4 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -106,9 +106,10 @@ KPageTable::~KPageTable() = default;
Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
bool enable_das_merge, bool from_back,
- KMemoryManager::Pool pool, VAddr code_addr,
+ KMemoryManager::Pool pool, KProcessAddress code_addr,
size_t code_size, KSystemResource* system_resource,
- KResourceLimit* resource_limit) {
+ KResourceLimit* resource_limit,
+ Core::Memory::Memory& memory) {
const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
@@ -117,10 +118,13 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
};
+ // Set the tracking memory
+ m_memory = std::addressof(memory);
+
// Set our width and heap/alias sizes
m_address_space_width = GetAddressSpaceWidthFromType(as_type);
- const VAddr start = 0;
- const VAddr end{1ULL << m_address_space_width};
+ const KProcessAddress start = 0;
+ const KProcessAddress end{1ULL << m_address_space_width};
size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
@@ -135,8 +139,8 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
// Set code regions and determine remaining
constexpr size_t RegionAlignment{2_MiB};
- VAddr process_code_start{};
- VAddr process_code_end{};
+ KProcessAddress process_code_start{};
+ KProcessAddress process_code_end{};
size_t stack_region_size{};
size_t kernel_map_region_size{};
@@ -149,8 +153,8 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
m_alias_code_region_start = m_code_region_start;
m_alias_code_region_end = m_code_region_end;
- process_code_start = Common::AlignDown(code_addr, RegionAlignment);
- process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment);
+ process_code_start = Common::AlignDown(GetInteger(code_addr), RegionAlignment);
+ process_code_end = Common::AlignUp(GetInteger(code_addr) + code_size, RegionAlignment);
} else {
stack_region_size = 0;
kernel_map_region_size = 0;
@@ -178,7 +182,7 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
m_resource_limit = resource_limit;
// Determine the region we can place our undetermineds in
- VAddr alloc_start{};
+ KProcessAddress alloc_start{};
size_t alloc_size{};
if ((process_code_start - m_code_region_start) >= (end - process_code_end)) {
alloc_start = m_code_region_start;
@@ -292,7 +296,7 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
: KMemoryManager::Direction::FromFront);
// Ensure that we regions inside our address space
- auto IsInAddressSpace = [&](VAddr addr) {
+ auto IsInAddressSpace = [&](KProcessAddress addr) {
return m_address_space_start <= addr && addr <= m_address_space_end;
};
ASSERT(IsInAddressSpace(m_alias_region_start));
@@ -305,14 +309,14 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
ASSERT(IsInAddressSpace(m_kernel_map_region_end));
// Ensure that we selected regions that don't overlap
- const VAddr alias_start{m_alias_region_start};
- const VAddr alias_last{m_alias_region_end - 1};
- const VAddr heap_start{m_heap_region_start};
- const VAddr heap_last{m_heap_region_end - 1};
- const VAddr stack_start{m_stack_region_start};
- const VAddr stack_last{m_stack_region_end - 1};
- const VAddr kmap_start{m_kernel_map_region_start};
- const VAddr kmap_last{m_kernel_map_region_end - 1};
+ const KProcessAddress alias_start{m_alias_region_start};
+ const KProcessAddress alias_last{m_alias_region_end - 1};
+ const KProcessAddress heap_start{m_heap_region_start};
+ const KProcessAddress heap_last{m_heap_region_end - 1};
+ const KProcessAddress stack_start{m_stack_region_start};
+ const KProcessAddress stack_last{m_stack_region_end - 1};
+ const KProcessAddress kmap_start{m_kernel_map_region_start};
+ const KProcessAddress kmap_last{m_kernel_map_region_end - 1};
ASSERT(alias_last < heap_start || heap_last < alias_start);
ASSERT(alias_last < stack_start || stack_last < alias_start);
ASSERT(alias_last < kmap_start || kmap_last < alias_start);
@@ -334,9 +338,10 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
void KPageTable::Finalize() {
// Finalize memory blocks.
- m_memory_block_manager.Finalize(m_memory_block_slab_manager, [&](VAddr addr, u64 size) {
- m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size);
- });
+ m_memory_block_manager.Finalize(m_memory_block_slab_manager,
+ [&](KProcessAddress addr, u64 size) {
+ m_memory->UnmapRegion(*m_page_table_impl, addr, size);
+ });
// Release any insecure mapped memory.
if (m_mapped_insecure_memory) {
@@ -352,7 +357,7 @@ void KPageTable::Finalize() {
m_page_table_impl.reset();
}
-Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState state,
+Result KPageTable::MapProcessCode(KProcessAddress addr, size_t num_pages, KMemoryState state,
KMemoryPermission perm) {
const u64 size{num_pages * PageSize};
@@ -388,7 +393,8 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta
R_SUCCEED();
}
-Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size) {
+Result KPageTable::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
// Validate the mapping request.
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
ResultInvalidMemoryRegion);
@@ -435,6 +441,9 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
KPageGroup pg{m_kernel, m_block_info_manager};
AddRegionToPages(src_address, num_pages, pg);
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
// Reprotect the source as kernel-read/not mapped.
const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead |
KMemoryPermission::NotMapped);
@@ -447,7 +456,10 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
});
// Map the alias pages.
- R_TRY(MapPages(dst_address, pg, new_perm));
+ const KPageProperties dst_properties = {new_perm, false, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(
+ this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
// We successfully mapped the alias pages, so we don't need to unprotect the src pages on
// failure.
@@ -467,7 +479,8 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
R_SUCCEED();
}
-Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
+Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size,
ICacheInvalidationStrategy icache_invalidation_strategy) {
// Validate the mapping request.
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
@@ -519,7 +532,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t
SCOPE_EXIT({
if (reprotected_pages && any_code_pages) {
if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) {
- m_system.InvalidateCpuInstructionCacheRange(dst_address, size);
+ m_system.InvalidateCpuInstructionCacheRange(GetInteger(dst_address), size);
} else {
m_system.InvalidateCpuInstructionCaches();
}
@@ -569,9 +582,10 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t
R_SUCCEED();
}
-VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
- size_t alignment, size_t offset, size_t guard_pages) {
- VAddr address = 0;
+KProcessAddress KPageTable::FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
+ size_t num_pages, size_t alignment, size_t offset,
+ size_t guard_pages) {
+ KProcessAddress address = 0;
if (num_pages <= region_num_pages) {
if (this->IsAslrEnabled()) {
@@ -587,7 +601,7 @@ VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size
return address;
}
-Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
+Result KPageTable::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
ASSERT(this->IsLockedByCurrentThread());
const size_t size = num_pages * PageSize;
@@ -598,11 +612,11 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
// Begin traversal.
Common::PageTable::TraversalContext context;
Common::PageTable::TraversalEntry next_entry;
- R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, addr),
+ R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr)),
ResultInvalidCurrentMemory);
// Prepare tracking variables.
- PAddr cur_addr = next_entry.phys_addr;
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
size_t tot_size = cur_size;
@@ -640,7 +654,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
R_SUCCEED();
}
-bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages) {
+bool KPageTable::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
ASSERT(this->IsLockedByCurrentThread());
const size_t size = num_pages * PageSize;
@@ -653,7 +667,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_p
// We're going to validate that the group we'd expect is the group we see.
auto cur_it = pg.begin();
- PAddr cur_block_address = cur_it->GetAddress();
+ KPhysicalAddress cur_block_address = cur_it->GetAddress();
size_t cur_block_pages = cur_it->GetNumPages();
auto UpdateCurrentIterator = [&]() {
@@ -671,12 +685,12 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_p
// Begin traversal.
Common::PageTable::TraversalContext context;
Common::PageTable::TraversalEntry next_entry;
- if (!m_page_table_impl->BeginTraversal(next_entry, context, addr)) {
+ if (!m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr))) {
return false;
}
// Prepare tracking variables.
- PAddr cur_addr = next_entry.phys_addr;
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
size_t tot_size = cur_size;
@@ -728,8 +742,8 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_p
return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
}
-Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
- VAddr src_addr) {
+Result KPageTable::UnmapProcessMemory(KProcessAddress dst_addr, size_t size,
+ KPageTable& src_page_table, KProcessAddress src_addr) {
// Acquire the table locks.
KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock);
@@ -768,8 +782,8 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& s
}
Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
- VAddr address, size_t size, KMemoryPermission test_perm,
- KMemoryState dst_state) {
+ KProcessAddress address, size_t size,
+ KMemoryPermission test_perm, KMemoryState dst_state) {
// Validate pre-conditions.
ASSERT(this->IsLockedByCurrentThread());
ASSERT(test_perm == KMemoryPermission::UserReadWrite ||
@@ -784,10 +798,10 @@ Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_bloc
: KMemoryPermission::UserRead;
// Get aligned extents.
- const VAddr aligned_src_start = Common::AlignDown((address), PageSize);
- const VAddr aligned_src_end = Common::AlignUp((address) + size, PageSize);
- const VAddr mapping_src_start = Common::AlignUp((address), PageSize);
- const VAddr mapping_src_end = Common::AlignDown((address) + size, PageSize);
+ const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize);
+ const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize);
+ const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize);
+ const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize);
const auto aligned_src_last = (aligned_src_end)-1;
const auto mapping_src_last = (mapping_src_end)-1;
@@ -834,14 +848,15 @@ Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_bloc
test_attr_mask, KMemoryAttribute::None));
if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() &&
- info.GetAddress() < (mapping_src_end)) {
- const auto cur_start =
- info.GetAddress() >= (mapping_src_start) ? info.GetAddress() : (mapping_src_start);
+ info.GetAddress() < GetInteger(mapping_src_end)) {
+ const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start)
+ ? info.GetAddress()
+ : (mapping_src_start);
const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress()
: (mapping_src_end);
const size_t cur_size = cur_end - cur_start;
- if (info.GetAddress() < (mapping_src_start)) {
+ if (info.GetAddress() < GetInteger(mapping_src_start)) {
++blocks_needed;
}
if (mapping_src_last < info.GetLastAddress()) {
@@ -876,30 +891,32 @@ Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_bloc
R_SUCCEED();
}
-Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr,
- KMemoryPermission test_perm, KMemoryState dst_state,
- KPageTable& src_page_table, bool send) {
+Result KPageTable::SetupForIpcServer(KProcessAddress* out_addr, size_t size,
+ KProcessAddress src_addr, KMemoryPermission test_perm,
+ KMemoryState dst_state, KPageTable& src_page_table,
+ bool send) {
ASSERT(this->IsLockedByCurrentThread());
ASSERT(src_page_table.IsLockedByCurrentThread());
// Check that we can theoretically map.
- const VAddr region_start = m_alias_region_start;
+ const KProcessAddress region_start = m_alias_region_start;
const size_t region_size = m_alias_region_end - m_alias_region_start;
R_UNLESS(size < region_size, ResultOutOfAddressSpace);
// Get aligned source extents.
- const VAddr src_start = src_addr;
- const VAddr src_end = src_addr + size;
- const VAddr aligned_src_start = Common::AlignDown((src_start), PageSize);
- const VAddr aligned_src_end = Common::AlignUp((src_start) + size, PageSize);
- const VAddr mapping_src_start = Common::AlignUp((src_start), PageSize);
- const VAddr mapping_src_end = Common::AlignDown((src_start) + size, PageSize);
+ const KProcessAddress src_start = src_addr;
+ const KProcessAddress src_end = src_addr + size;
+ const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize);
+ const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize);
+ const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize);
+ const KProcessAddress mapping_src_end =
+ Common::AlignDown(GetInteger(src_start) + size, PageSize);
const size_t aligned_src_size = aligned_src_end - aligned_src_start;
const size_t mapping_src_size =
(mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
// Select a random address to map at.
- VAddr dst_addr =
+ KProcessAddress dst_addr =
this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize,
PageSize, 0, this->GetNumGuardPages());
@@ -924,9 +941,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
// Ensure that we manage page references correctly.
- PAddr start_partial_page = 0;
- PAddr end_partial_page = 0;
- VAddr cur_mapped_addr = dst_addr;
+ KPhysicalAddress start_partial_page = 0;
+ KPhysicalAddress end_partial_page = 0;
+ KProcessAddress cur_mapped_addr = dst_addr;
// If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
// free on scope exit.
@@ -971,11 +988,12 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
// Begin traversal.
Common::PageTable::TraversalContext context;
Common::PageTable::TraversalEntry next_entry;
- bool traverse_valid = src_impl.BeginTraversal(next_entry, context, aligned_src_start);
+ bool traverse_valid =
+ src_impl.BeginTraversal(next_entry, context, GetInteger(aligned_src_start));
ASSERT(traverse_valid);
// Prepare tracking variables.
- PAddr cur_block_addr = next_entry.phys_addr;
+ KPhysicalAddress cur_block_addr = next_entry.phys_addr;
size_t cur_block_size =
next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1));
size_t tot_block_size = cur_block_size;
@@ -983,7 +1001,7 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
// Map the start page, if we have one.
if (start_partial_page != 0) {
// Ensure the page holds correct data.
- const VAddr start_partial_virt =
+ const KVirtualAddress start_partial_virt =
GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page);
if (send) {
const size_t partial_offset = src_start - aligned_src_start;
@@ -996,21 +1014,22 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
clear_size = 0;
}
- std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val,
+ std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val,
partial_offset);
std::memcpy(
- m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset),
- m_system.Memory().GetPointer<void>(
- GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), cur_block_addr) +
- partial_offset),
+ m_memory->GetPointer<void>(GetInteger(start_partial_virt) + partial_offset),
+ m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress(
+ m_system.Kernel().MemoryLayout(), cur_block_addr)) +
+ partial_offset),
copy_size);
if (clear_size > 0) {
- std::memset(m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset +
- copy_size),
+ std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt) +
+ partial_offset + copy_size),
fill_val, clear_size);
}
} else {
- std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val, PageSize);
+ std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val,
+ PageSize);
}
// Map the page.
@@ -1055,7 +1074,8 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
}
// Handle the last direct-mapped page.
- if (const VAddr mapped_block_end = aligned_src_start + tot_block_size - cur_block_size;
+ if (const KProcessAddress mapped_block_end =
+ aligned_src_start + tot_block_size - cur_block_size;
mapped_block_end < mapping_src_end) {
const size_t last_block_size = mapping_src_end - mapped_block_end;
@@ -1078,18 +1098,19 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
// Map the end page, if we have one.
if (end_partial_page != 0) {
// Ensure the page holds correct data.
- const VAddr end_partial_virt =
+ const KVirtualAddress end_partial_virt =
GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page);
if (send) {
const size_t copy_size = src_end - mapping_src_end;
- std::memcpy(m_system.Memory().GetPointer<void>(end_partial_virt),
- m_system.Memory().GetPointer<void>(GetHeapVirtualAddress(
- m_system.Kernel().MemoryLayout(), cur_block_addr)),
+ std::memcpy(m_memory->GetPointer<void>(GetInteger(end_partial_virt)),
+ m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress(
+ m_system.Kernel().MemoryLayout(), cur_block_addr))),
copy_size);
- std::memset(m_system.Memory().GetPointer<void>(end_partial_virt + copy_size), fill_val,
- PageSize - copy_size);
+ std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt) + copy_size),
+ fill_val, PageSize - copy_size);
} else {
- std::memset(m_system.Memory().GetPointer<void>(end_partial_virt), fill_val, PageSize);
+ std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt)), fill_val,
+ PageSize);
}
// Map the page.
@@ -1110,7 +1131,7 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
R_SUCCEED();
}
-Result KPageTable::SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr,
+Result KPageTable::SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
KPageTable& src_page_table, KMemoryPermission test_perm,
KMemoryState dst_state, bool send) {
// For convenience, alias this.
@@ -1136,8 +1157,8 @@ Result KPageTable::SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr,
R_TRY(allocator_result);
// Get the mapped extents.
- const VAddr src_map_start = Common::AlignUp((src_addr), PageSize);
- const VAddr src_map_end = Common::AlignDown((src_addr) + size, PageSize);
+ const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize);
+ const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize);
const size_t src_map_size = src_map_end - src_map_start;
// Ensure that we clean up appropriately if we fail after this.
@@ -1166,7 +1187,8 @@ Result KPageTable::SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr,
R_SUCCEED();
}
-Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state) {
+Result KPageTable::CleanupForIpcServer(KProcessAddress address, size_t size,
+ KMemoryState dst_state) {
// Validate the address.
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
@@ -1190,8 +1212,8 @@ Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState
KScopedPageTableUpdater updater(this);
// Get aligned extents.
- const VAddr aligned_start = Common::AlignDown((address), PageSize);
- const VAddr aligned_end = Common::AlignUp((address) + size, PageSize);
+ const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize);
+ const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize);
const size_t aligned_size = aligned_end - aligned_start;
const size_t aligned_num_pages = aligned_size / PageSize;
@@ -1205,22 +1227,23 @@ Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState
KMemoryBlockDisableMergeAttribute::Normal);
// Release from the resource limit as relevant.
- const VAddr mapping_start = Common::AlignUp((address), PageSize);
- const VAddr mapping_end = Common::AlignDown((address) + size, PageSize);
+ const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
+ const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, aligned_size - mapping_size);
R_SUCCEED();
}
-Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state) {
+Result KPageTable::CleanupForIpcClient(KProcessAddress address, size_t size,
+ KMemoryState dst_state) {
// Validate the address.
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
// Get aligned source extents.
- const VAddr mapping_start = Common::AlignUp((address), PageSize);
- const VAddr mapping_end = Common::AlignDown((address) + size, PageSize);
- const VAddr mapping_last = mapping_end - 1;
+ const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
+ const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
+ const KProcessAddress mapping_last = mapping_end - 1;
const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
// If nothing was mapped, we're actually done immediately.
@@ -1273,7 +1296,7 @@ Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState
KMemoryInfo cur_info = start_it->GetMemoryInfo();
// Create tracking variables.
- VAddr cur_address = cur_info.GetAddress();
+ KProcessAddress cur_address = cur_info.GetAddress();
size_t cur_size = cur_info.GetSize();
bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
@@ -1346,7 +1369,7 @@ Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState
.IsSuccess());
// Create tracking variables.
- VAddr cur_address = cur_info.GetAddress();
+ KProcessAddress cur_address = cur_info.GetAddress();
size_t cur_size = cur_info.GetSize();
bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
@@ -1433,16 +1456,16 @@ Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState
}
void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list,
- VAddr address, size_t size,
+ KProcessAddress address, size_t size,
KMemoryPermission prot_perm) {
ASSERT(this->IsLockedByCurrentThread());
- ASSERT(Common::IsAligned(address, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(address), PageSize));
ASSERT(Common::IsAligned(size, PageSize));
// Get the mapped extents.
- const VAddr src_map_start = address;
- const VAddr src_map_end = address + size;
- const VAddr src_map_last = src_map_end - 1;
+ const KProcessAddress src_map_start = address;
+ const KProcessAddress src_map_end = address + size;
+ const KProcessAddress src_map_last = src_map_end - 1;
// This function is only invoked when there's something to do.
ASSERT(src_map_end > src_map_start);
@@ -1452,8 +1475,9 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi
while (true) {
const KMemoryInfo info = it->GetMemoryInfo();
- const auto cur_start =
- info.GetAddress() >= src_map_start ? info.GetAddress() : src_map_start;
+ const auto cur_start = info.GetAddress() >= GetInteger(src_map_start)
+ ? info.GetAddress()
+ : GetInteger(src_map_start);
const auto cur_end =
src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
@@ -1463,7 +1487,7 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi
(info.GetIpcLockCount() != 0 &&
(info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) {
// Check if we actually need to fix the protections on the block.
- if (cur_end == src_map_end || info.GetAddress() <= src_map_start ||
+ if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) ||
(info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) {
ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(),
OperationType::ChangePermissions)
@@ -1482,15 +1506,15 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi
}
}
-Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
+Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
// Lock the physical memory lock.
KScopedLightLock phys_lk(m_map_physical_memory_lock);
// Calculate the last address for convenience.
- const VAddr last_address = address + size - 1;
+ const KProcessAddress last_address = address + size - 1;
// Define iteration variables.
- VAddr cur_address;
+ KProcessAddress cur_address;
size_t mapped_size;
// The entire mapping process can be retried.
@@ -1522,7 +1546,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
// Track the memory if it's mapped.
if (info.GetState() != KMemoryState::Free) {
- mapped_size += VAddr(info.GetEndAddress()) - cur_address;
+ mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
}
// Advance.
@@ -1575,7 +1599,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
const bool is_free = info.GetState() == KMemoryState::Free;
if (is_free) {
- if (info.GetAddress() < address) {
+ if (info.GetAddress() < GetInteger(address)) {
++num_allocator_blocks;
}
if (last_address < info.GetLastAddress()) {
@@ -1593,7 +1617,8 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
// Track the memory if it's mapped.
if (!is_free) {
- checked_mapped_size += VAddr(info.GetEndAddress()) - cur_address;
+ checked_mapped_size +=
+ KProcessAddress(info.GetEndAddress()) - cur_address;
}
// Advance.
@@ -1621,7 +1646,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
// Prepare to iterate over the memory.
auto pg_it = pg.begin();
- PAddr pg_phys_addr = pg_it->GetAddress();
+ KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
size_t pg_pages = pg_it->GetNumPages();
// Reset the current tracking address, and make sure we clean up on failure.
@@ -1629,7 +1654,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
cur_address = address;
ON_RESULT_FAILURE {
if (cur_address > address) {
- const VAddr last_unmap_address = cur_address - 1;
+ const KProcessAddress last_unmap_address = cur_address - 1;
// Iterate, unmapping the pages.
cur_address = address;
@@ -1646,7 +1671,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
if (info.GetState() == KMemoryState::Free) {
// Determine the range to unmap.
const size_t cur_pages =
- std::min(VAddr(info.GetEndAddress()) - cur_address,
+ std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
last_unmap_address + 1 - cur_address) /
PageSize;
@@ -1689,9 +1714,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
// If it's unmapped, we need to map it.
if (info.GetState() == KMemoryState::Free) {
// Determine the range to map.
- size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
- last_address + 1 - cur_address) /
- PageSize;
+ size_t map_pages =
+ std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
+ last_address + 1 - cur_address) /
+ PageSize;
// While we have pages to map, map them.
while (map_pages > 0) {
@@ -1748,7 +1774,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
}
}
-Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
+Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
// Lock the physical memory lock.
KScopedLightLock phys_lk(m_map_physical_memory_lock);
@@ -1756,13 +1782,13 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
KScopedLightLock lk(m_general_lock);
// Calculate the last address for convenience.
- const VAddr last_address = address + size - 1;
+ const KProcessAddress last_address = address + size - 1;
// Define iteration variables.
- VAddr map_start_address = 0;
- VAddr map_last_address = 0;
+ KProcessAddress map_start_address = 0;
+ KProcessAddress map_last_address = 0;
- VAddr cur_address;
+ KProcessAddress cur_address;
size_t mapped_size;
size_t num_allocator_blocks = 0;
@@ -1795,7 +1821,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
map_last_address =
(last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
- if (info.GetAddress() < address) {
+ if (info.GetAddress() < GetInteger(address)) {
++num_allocator_blocks;
}
if (last_address < info.GetLastAddress()) {
@@ -1848,7 +1874,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
// If the memory state is normal, we need to unmap it.
if (info.GetState() == KMemoryState::Normal) {
// Determine the range to unmap.
- const size_t cur_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
+ const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
last_address + 1 - cur_address) /
PageSize;
@@ -1881,7 +1907,8 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
R_SUCCEED();
}
-Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) {
+Result KPageTable::MapMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
// Lock the table.
KScopedLightLock lk(m_general_lock);
@@ -1902,53 +1929,73 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size)
KMemoryAttribute::None));
// Create an update allocator for the source.
- Result src_allocator_result{ResultSuccess};
+ Result src_allocator_result;
KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
m_memory_block_slab_manager,
num_src_allocator_blocks);
R_TRY(src_allocator_result);
// Create an update allocator for the destination.
- Result dst_allocator_result{ResultSuccess};
+ Result dst_allocator_result;
KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
m_memory_block_slab_manager,
num_dst_allocator_blocks);
R_TRY(dst_allocator_result);
// Map the memory.
- KPageGroup page_linked_list{m_kernel, m_block_info_manager};
- const size_t num_pages{size / PageSize};
- const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
- KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
- const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
-
- AddRegionToPages(src_address, num_pages, page_linked_list);
{
+ // Determine the number of pages being operated on.
+ const size_t num_pages = size / PageSize;
+
+ // Create page groups for the memory being unmapped.
+ KPageGroup pg{m_kernel, m_block_info_manager};
+
+ // Create the page group representing the source.
+ R_TRY(this->MakePageGroup(pg, src_address, num_pages));
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
// Reprotect the source as kernel-read/not mapped.
- auto block_guard = detail::ScopeExit([&] {
- Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
- OperationType::ChangePermissions);
- });
- R_TRY(Operate(src_address, num_pages, new_src_perm, OperationType::ChangePermissions));
- R_TRY(MapPages(dst_address, page_linked_list, KMemoryPermission::UserReadWrite));
+ const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
+ KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
+ const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
+ const KPageProperties src_properties = {new_src_perm, false, false,
+ DisableMergeAttribute::DisableHeadBodyTail};
+ R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
+ OperationType::ChangePermissions));
- block_guard.Cancel();
- }
+ // Ensure that we unprotect the source pages on failure.
+ ON_RESULT_FAILURE {
+ const KPageProperties unprotect_properties = {
+ KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::EnableHeadBodyTail};
+ ASSERT(this->Operate(src_address, num_pages, unprotect_properties.perm,
+ OperationType::ChangePermissions) == ResultSuccess);
+ };
- // Apply the memory block updates.
- m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state,
- new_src_perm, new_src_attr,
- KMemoryBlockDisableMergeAttribute::Locked,
- KMemoryBlockDisableMergeAttribute::None);
- m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
- KMemoryState::Stack, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
+ // Map the alias pages.
+ const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties,
+ false));
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
+ src_state, new_src_perm, new_src_attr,
+ KMemoryBlockDisableMergeAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::None);
+ m_memory_block_manager.Update(
+ std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
+ }
R_SUCCEED();
}
-Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) {
+Result KPageTable::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
// Lock the table.
KScopedLightLock lk(m_general_lock);
@@ -1970,108 +2017,209 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size
KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
// Create an update allocator for the source.
- Result src_allocator_result{ResultSuccess};
+ Result src_allocator_result;
KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
m_memory_block_slab_manager,
num_src_allocator_blocks);
R_TRY(src_allocator_result);
// Create an update allocator for the destination.
- Result dst_allocator_result{ResultSuccess};
+ Result dst_allocator_result;
KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
m_memory_block_slab_manager,
num_dst_allocator_blocks);
R_TRY(dst_allocator_result);
- KPageGroup src_pages{m_kernel, m_block_info_manager};
- KPageGroup dst_pages{m_kernel, m_block_info_manager};
- const size_t num_pages{size / PageSize};
+ // Unmap the memory.
+ {
+ // Determine the number of pages being operated on.
+ const size_t num_pages = size / PageSize;
- AddRegionToPages(src_address, num_pages, src_pages);
- AddRegionToPages(dst_address, num_pages, dst_pages);
+ // Create page groups for the memory being unmapped.
+ KPageGroup pg{m_kernel, m_block_info_manager};
- R_UNLESS(dst_pages.IsEquivalentTo(src_pages), ResultInvalidMemoryRegion);
+ // Create the page group representing the destination.
+ R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
- {
- auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); });
+ // Ensure the page group is the valid for the source.
+ R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
- R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
- R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
- OperationType::ChangePermissions));
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
- block_guard.Cancel();
- }
+ // Unmap the aliased copy of the pages.
+ const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(
+ this->Operate(dst_address, num_pages, dst_unmap_properties.perm, OperationType::Unmap));
+
+ // Ensure that we re-map the aliased pages on failure.
+ ON_RESULT_FAILURE {
+ this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
+ };
- // Apply the memory block updates.
- m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Locked);
- m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
- KMemoryState::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal);
+ // Try to set the permissions for the source pages back to what they should be.
+ const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::EnableAndMergeHeadBodyTail};
+ R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
+ OperationType::ChangePermissions));
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(
+ std::addressof(src_allocator), src_address, num_pages, src_state,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
+ m_memory_block_manager.Update(
+ std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
+ }
R_SUCCEED();
}
-Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
- KMemoryPermission perm) {
+Result KPageTable::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
+ size_t num_pages, KMemoryPermission perm) {
ASSERT(this->IsLockedByCurrentThread());
- VAddr cur_addr{addr};
+ // Create a page group to hold the pages we allocate.
+ KPageGroup pg{m_kernel, m_block_info_manager};
- for (const auto& node : page_linked_list) {
- if (const auto result{
- Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};
- result.IsError()) {
- const size_t num_pages{(addr - cur_addr) / PageSize};
+ // Allocate the pages.
+ R_TRY(
+ m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
- ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)
- .IsSuccess());
+ // Ensure that the page group is closed when we're done working with it.
+ SCOPE_EXIT({ pg.Close(); });
- R_RETURN(result);
+ // Clear all pages.
+ for (const auto& it : pg) {
+ std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
+ it.GetSize());
+ }
+
+ // Map the pages.
+ R_RETURN(this->Operate(address, num_pages, pg, OperationType::MapGroup));
+}
+
+Result KPageTable::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
+ const KPageGroup& pg, const KPageProperties properties,
+ bool reuse_ll) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Note the current address, so that we can iterate.
+ const KProcessAddress start_address = address;
+ KProcessAddress cur_address = address;
+
+ // Ensure that we clean up on failure.
+ ON_RESULT_FAILURE {
+ ASSERT(!reuse_ll);
+ if (cur_address != start_address) {
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ ASSERT(this->Operate(start_address, (cur_address - start_address) / PageSize,
+ unmap_properties.perm, OperationType::Unmap) == ResultSuccess);
}
+ };
- cur_addr += node.GetNumPages() * PageSize;
+ // Iterate, mapping all pages in the group.
+ for (const auto& block : pg) {
+ // Map and advance.
+ const KPageProperties cur_properties =
+ (cur_address == start_address)
+ ? properties
+ : KPageProperties{properties.perm, properties.io, properties.uncached,
+ DisableMergeAttribute::None};
+ this->Operate(cur_address, block.GetNumPages(), cur_properties.perm, OperationType::Map,
+ block.GetAddress());
+ cur_address += block.GetSize();
}
+ // We succeeded!
R_SUCCEED();
}
-Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state,
- KMemoryPermission perm) {
- // Check that the map is in range.
- const size_t num_pages{page_linked_list.GetNumPages()};
- const size_t size{num_pages * PageSize};
- R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
+void KPageTable::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
+ const KPageGroup& pg) {
+ ASSERT(this->IsLockedByCurrentThread());
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
+ // Note the current address, so that we can iterate.
+ const KProcessAddress start_address = address;
+ const KProcessAddress last_address = start_address + size - 1;
+ const KProcessAddress end_address = last_address + 1;
- // Check the memory state.
- R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryAttribute::None));
+ // Iterate over the memory.
+ auto pg_it = pg.begin();
+ ASSERT(pg_it != pg.end());
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager);
+ KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
+ size_t pg_pages = pg_it->GetNumPages();
- // Map the pages.
- R_TRY(MapPages(address, page_linked_list, perm));
+ auto it = m_memory_block_manager.FindIterator(start_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
- R_SUCCEED();
+ // Determine the range to map.
+ KProcessAddress map_address = std::max<KProcessAddress>(info.GetAddress(), start_address);
+ const KProcessAddress map_end_address =
+ std::min<KProcessAddress>(info.GetEndAddress(), end_address);
+ ASSERT(map_end_address != map_address);
+
+ // Determine if we should disable head merge.
+ const bool disable_head_merge =
+ info.GetAddress() >= GetInteger(start_address) &&
+ True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal);
+ const KPageProperties map_properties = {
+ info.GetPermission(), false, false,
+ disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None};
+
+ // While we have pages to map, map them.
+ size_t map_pages = (map_end_address - map_address) / PageSize;
+ while (map_pages > 0) {
+ // Check if we're at the end of the physical block.
+ if (pg_pages == 0) {
+ // Ensure there are more pages to map.
+ ASSERT(pg_it != pg.end());
+
+ // Advance our physical block.
+ ++pg_it;
+ pg_phys_addr = pg_it->GetAddress();
+ pg_pages = pg_it->GetNumPages();
+ }
+
+ // Map whatever we can.
+ const size_t cur_pages = std::min(pg_pages, map_pages);
+ ASSERT(this->Operate(map_address, map_pages, map_properties.perm, OperationType::Map,
+ pg_phys_addr) == ResultSuccess);
+
+ // Advance.
+ map_address += cur_pages * PageSize;
+ map_pages -= cur_pages;
+
+ pg_phys_addr += cur_pages * PageSize;
+ pg_pages -= cur_pages;
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
+ }
+
+ // Check that we re-mapped precisely the page group.
+ ASSERT((++pg_it) == pg.end());
}
-Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
- bool is_pa_valid, VAddr region_start, size_t region_num_pages,
+Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, bool is_pa_valid,
+ KProcessAddress region_start, size_t region_num_pages,
KMemoryState state, KMemoryPermission perm) {
ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
@@ -2084,26 +2232,30 @@ Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment,
KScopedLightLock lk(m_general_lock);
// Find a random address to map at.
- VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
- this->GetNumGuardPages());
+ KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment,
+ 0, this->GetNumGuardPages());
R_UNLESS(addr != 0, ResultOutOfMemory);
- ASSERT(Common::IsAligned(addr, alignment));
+ ASSERT(Common::IsAligned(GetInteger(addr), alignment));
ASSERT(this->CanContain(addr, num_pages * PageSize, state));
ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryAttribute::None)
- .IsSuccess());
+ KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
// Create an update allocator.
- Result allocator_result{ResultSuccess};
+ Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
// Perform mapping operation.
if (is_pa_valid) {
- R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr));
+ const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(addr, num_pages, properties.perm, OperationType::Map, phys_addr));
} else {
- UNIMPLEMENTED();
+ R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
}
// Update the blocks.
@@ -2116,28 +2268,45 @@ Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment,
R_SUCCEED();
}
-Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
- ASSERT(this->IsLockedByCurrentThread());
+Result KPageTable::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
+ // Check that the map is in range.
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
- VAddr cur_addr{addr};
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
- for (const auto& node : page_linked_list) {
- if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
- OperationType::Unmap)};
- result.IsError()) {
- R_RETURN(result);
- }
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
- cur_addr += node.GetNumPages() * PageSize;
- }
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Map the pages.
+ R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
R_SUCCEED();
}
-Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) {
+Result KPageTable::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
// Check that the unmap is in range.
- const size_t num_pages{page_linked_list.GetNumPages()};
- const size_t size{num_pages * PageSize};
+ const size_t size = num_pages * PageSize;
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
// Lock the table.
@@ -2151,13 +2320,18 @@ Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemo
KMemoryAttribute::None));
// Create an update allocator.
- Result allocator_result{ResultSuccess};
+ Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
// Perform the unmap.
- R_TRY(UnmapPages(address, page_linked_list));
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(address, num_pages, unmap_properties.perm, OperationType::Unmap));
// Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
@@ -2168,29 +2342,130 @@ Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemo
R_SUCCEED();
}
-Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) {
- // Check that the unmap is in range.
+Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
+ KProcessAddress region_start, size_t region_num_pages,
+ KMemoryState state, KMemoryPermission perm) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Ensure this is a valid map request.
+ const size_t num_pages = pg.GetNumPages();
+ R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
+ ResultInvalidCurrentMemory);
+ R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Find a random address to map at.
+ KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize,
+ 0, this->GetNumGuardPages());
+ R_UNLESS(addr != 0, ResultOutOfMemory);
+ ASSERT(this->CanContain(addr, num_pages * PageSize, state));
+ ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {perm, state == KMemoryState::Io, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ *out_addr = addr;
+ R_SUCCEED();
+}
+
+Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
+ KMemoryPermission perm) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Ensure this is a valid map request.
+ const size_t num_pages = pg.GetNumPages();
const size_t size = num_pages * PageSize;
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+ R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
// Lock the table.
KScopedLightLock lk(m_general_lock);
- // Check the memory state.
- size_t num_allocator_blocks{};
+ // Check if state allows us to map.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {perm, state == KMemoryState::Io, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ R_SUCCEED();
+}
+
+Result KPageTable::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg,
+ KMemoryState state) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Ensure this is a valid unmap request.
+ const size_t num_pages = pg.GetNumPages();
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check if state allows us to unmap.
+ size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
KMemoryState::All, state, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::None));
+ // Check that the page group is valid.
+ R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory);
+
// Create an update allocator.
- Result allocator_result{ResultSuccess};
+ Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
- // Perform the unmap.
- R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap));
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform unmapping operation.
+ const KPageProperties properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(address, num_pages, properties.perm, OperationType::Unmap));
// Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
@@ -2201,7 +2476,7 @@ Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState stat
R_SUCCEED();
}
-Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
+Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr) {
@@ -2226,7 +2501,7 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n
R_SUCCEED();
}
-Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size,
+Result KPageTable::SetProcessMemoryPermission(KProcessAddress addr, size_t size,
Svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
@@ -2287,23 +2562,23 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size,
// Ensure cache coherency, if we're setting pages as executable.
if (is_x) {
- m_system.InvalidateCpuInstructionCacheRange(addr, size);
+ m_system.InvalidateCpuInstructionCacheRange(GetInteger(addr), size);
}
R_SUCCEED();
}
-KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) {
+KMemoryInfo KPageTable::QueryInfoImpl(KProcessAddress addr) {
KScopedLightLock lk(m_general_lock);
return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo();
}
-KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
+KMemoryInfo KPageTable::QueryInfo(KProcessAddress addr) {
if (!Contains(addr, 1)) {
return {
- .m_address = m_address_space_end,
- .m_size = 0 - m_address_space_end,
+ .m_address = GetInteger(m_address_space_end),
+ .m_size = 0 - GetInteger(m_address_space_end),
.m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
.m_device_disable_merge_left_count = 0,
.m_device_disable_merge_right_count = 0,
@@ -2320,7 +2595,8 @@ KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
return QueryInfoImpl(addr);
}
-Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) {
+Result KPageTable::SetMemoryPermission(KProcessAddress addr, size_t size,
+ Svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
// Lock the table.
@@ -2357,7 +2633,7 @@ Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermi
R_SUCCEED();
}
-Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr) {
+Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
const size_t num_pages = size / PageSize;
ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
KMemoryAttribute::SetMask);
@@ -2412,12 +2688,12 @@ Result KPageTable::SetMaxHeapSize(size_t size) {
R_SUCCEED();
}
-Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
+Result KPageTable::SetHeapSize(u64* out, size_t size) {
// Lock the physical memory mutex.
KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
// Try to perform a reduction in heap, instead of an extension.
- VAddr cur_address{};
+ KProcessAddress cur_address{};
size_t allocation_size{};
{
// Lock the table.
@@ -2468,11 +2744,11 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
m_current_heap_end = m_heap_region_start + size;
// Set the output.
- *out = m_heap_region_start;
+ *out = GetInteger(m_heap_region_start);
R_SUCCEED();
} else if (size == GetHeapSize()) {
// The size requested is exactly the current size.
- *out = m_heap_region_start;
+ *out = GetInteger(m_heap_region_start);
R_SUCCEED();
} else {
// We have to allocate memory. Determine how much to allocate and where while the table
@@ -2526,7 +2802,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
// Clear all the newly allocated pages.
for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
- std::memset(m_system.Memory().GetPointer(m_current_heap_end + (cur_page * PageSize)), 0,
+ std::memset(m_memory->GetPointer(m_current_heap_end + (cur_page * PageSize)), 0,
PageSize);
}
@@ -2545,62 +2821,14 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
m_current_heap_end = m_heap_region_start + size;
// Set the output.
- *out = m_heap_region_start;
+ *out = GetInteger(m_heap_region_start);
R_SUCCEED();
}
}
-ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align,
- bool is_map_only, VAddr region_start,
- size_t region_num_pages, KMemoryState state,
- KMemoryPermission perm, PAddr map_addr) {
- KScopedLightLock lk(m_general_lock);
-
- R_UNLESS(CanContain(region_start, region_num_pages * PageSize, state),
- ResultInvalidCurrentMemory);
- R_UNLESS(region_num_pages > needed_num_pages, ResultOutOfMemory);
- const VAddr addr{
- AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)};
- R_UNLESS(addr, ResultOutOfMemory);
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager);
-
- if (is_map_only) {
- R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
- } else {
- // Create a page group tohold the pages we allocate.
- KPageGroup pg{m_kernel, m_block_info_manager};
-
- R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
- &pg, needed_num_pages,
- KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
-
- // Ensure that the page group is closed when we're done working with it.
- SCOPE_EXIT({ pg.Close(); });
-
- // Clear all pages.
- for (const auto& it : pg) {
- std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()),
- m_heap_fill_value, it.GetSize());
- }
-
- R_TRY(Operate(addr, needed_num_pages, pg, OperationType::MapGroup));
- }
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- return addr;
-}
-
-Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
- KMemoryPermission perm, bool is_aligned,
- bool check_heap) {
+Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address,
+ size_t size, KMemoryPermission perm,
+ bool is_aligned, bool check_heap) {
// Lightly validate the range before doing anything else.
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
@@ -2636,7 +2864,8 @@ Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address,
R_SUCCEED();
}
-Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap) {
+Result KPageTable::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size,
+ bool check_heap) {
// Lightly validate the range before doing anything else.
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
@@ -2670,7 +2899,7 @@ Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bo
R_SUCCEED();
}
-Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) {
+Result KPageTable::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
// Lightly validate the range before doing anything else.
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
@@ -2698,7 +2927,8 @@ Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) {
R_SUCCEED();
}
-Result KPageTable::LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size) {
+Result KPageTable::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address,
+ size_t size) {
R_RETURN(this->LockMemoryAndOpen(
nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer,
KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All,
@@ -2707,7 +2937,7 @@ Result KPageTable::LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size)
KMemoryAttribute::Locked));
}
-Result KPageTable::UnlockForIpcUserBuffer(VAddr address, size_t size) {
+Result KPageTable::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer,
KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::All,
@@ -2715,7 +2945,7 @@ Result KPageTable::UnlockForIpcUserBuffer(VAddr address, size_t size) {
KMemoryAttribute::Locked, nullptr));
}
-Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) {
+Result KPageTable::LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size) {
R_RETURN(this->LockMemoryAndOpen(
out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
@@ -2723,17 +2953,17 @@ Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) {
KMemoryAttribute::Locked));
}
-Result KPageTable::UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg) {
+Result KPageTable::UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg) {
R_RETURN(this->UnlockMemory(
addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg));
}
-bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
- auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(addr);
+bool KPageTable::IsRegionContiguous(KProcessAddress addr, u64 size) const {
+ auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr));
for (u64 offset{}; offset < size; offset += PageSize) {
- if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(addr + offset)) {
+ if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr) + offset)) {
return false;
}
start_ptr += PageSize;
@@ -2741,18 +2971,19 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
return true;
}
-void KPageTable::AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list) {
- VAddr addr{start};
+void KPageTable::AddRegionToPages(KProcessAddress start, size_t num_pages,
+ KPageGroup& page_linked_list) {
+ KProcessAddress addr{start};
while (addr < start + (num_pages * PageSize)) {
- const PAddr paddr{GetPhysicalAddr(addr)};
+ const KPhysicalAddress paddr{GetPhysicalAddr(addr)};
ASSERT(paddr != 0);
page_linked_list.AddBlock(paddr, 1);
addr += PageSize;
}
}
-VAddr KPageTable::AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
- size_t align) {
+KProcessAddress KPageTable::AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
+ u64 needed_num_pages, size_t align) {
if (m_enable_aslr) {
UNIMPLEMENTED();
}
@@ -2760,11 +2991,11 @@ VAddr KPageTable::AllocateVirtualMemory(VAddr start, size_t region_num_pages, u6
IsKernel() ? 1 : 4);
}
-Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
+Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
OperationType operation) {
ASSERT(this->IsLockedByCurrentThread());
- ASSERT(Common::IsAligned(addr, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
ASSERT(num_pages > 0);
ASSERT(num_pages == page_group.GetNumPages());
@@ -2777,7 +3008,7 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_
const size_t size{node.GetNumPages() * PageSize};
// Map the pages.
- m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress());
+ m_memory->MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress());
addr += size;
}
@@ -2795,12 +3026,12 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_
R_SUCCEED();
}
-Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
- OperationType operation, PAddr map_addr) {
+Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
+ OperationType operation, KPhysicalAddress map_addr) {
ASSERT(this->IsLockedByCurrentThread());
ASSERT(num_pages > 0);
- ASSERT(Common::IsAligned(addr, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
ASSERT(ContainsPages(addr, num_pages));
switch (operation) {
@@ -2810,14 +3041,14 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
this->AddRegionToPages(addr, num_pages, pages_to_close);
- m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
+ m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
break;
}
case OperationType::MapFirst:
case OperationType::Map: {
ASSERT(map_addr);
- ASSERT(Common::IsAligned(map_addr, PageSize));
- m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
+ ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize));
+ m_memory->MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
// Open references to pages, if we should.
if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) {
@@ -2854,7 +3085,7 @@ void KPageTable::FinalizeUpdate(PageLinkedList* page_list) {
}
}
-VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
+KProcessAddress KPageTable::GetRegionAddress(KMemoryState state) const {
switch (state) {
case KMemoryState::Free:
case KMemoryState::Kernel:
@@ -2926,11 +3157,11 @@ size_t KPageTable::GetRegionSize(KMemoryState state) const {
}
}
-bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const {
- const VAddr end = addr + size;
- const VAddr last = end - 1;
+bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
+ const KProcessAddress end = addr + size;
+ const KProcessAddress last = end - 1;
- const VAddr region_start = this->GetRegionAddress(state);
+ const KProcessAddress region_start = this->GetRegionAddress(state);
const size_t region_size = this->GetRegionSize(state);
const bool is_in_region =
@@ -2985,21 +3216,21 @@ Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_
R_SUCCEED();
}
-Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask,
+Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr,
+ size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr) const {
ASSERT(this->IsLockedByCurrentThread());
// Get information about the first block.
- const VAddr last_addr = addr + size - 1;
+ const KProcessAddress last_addr = addr + size - 1;
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
KMemoryInfo info = it->GetMemoryInfo();
// If the start address isn't aligned, we need a block.
const size_t blocks_for_start_align =
- (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0;
+ (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
while (true) {
// Validate against the provided masks.
@@ -3018,7 +3249,7 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr a
// If the end address isn't aligned, we need a block.
const size_t blocks_for_end_align =
- (Common::AlignUp(addr + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
+ (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
if (out_blocks_needed != nullptr) {
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
@@ -3029,20 +3260,20 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr a
Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
- VAddr addr, size_t size, KMemoryState state_mask,
+ KProcessAddress addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
ASSERT(this->IsLockedByCurrentThread());
// Get information about the first block.
- const VAddr last_addr = addr + size - 1;
+ const KProcessAddress last_addr = addr + size - 1;
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
KMemoryInfo info = it->GetMemoryInfo();
// If the start address isn't aligned, we need a block.
const size_t blocks_for_start_align =
- (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0;
+ (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
// Validate all blocks in the range have correct state.
const KMemoryState first_state = info.m_state;
@@ -3071,7 +3302,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
// If the end address isn't aligned, we need a block.
const size_t blocks_for_end_align =
- (Common::AlignUp(addr + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
+ (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
// Write output state.
if (out_state != nullptr) {
@@ -3089,11 +3320,12 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
R_SUCCEED();
}
-Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
+Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
+ KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryPermission new_perm,
+ KMemoryAttribute lock_attr) {
// Validate basic preconditions.
ASSERT((lock_attr & attr) == KMemoryAttribute::None);
ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
@@ -3123,8 +3355,8 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
attr_mask, attr));
// Get the physical address, if we're supposed to.
- if (out_paddr != nullptr) {
- ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr));
+ if (out_KPhysicalAddress != nullptr) {
+ ASSERT(this->GetPhysicalAddressLocked(out_KPhysicalAddress, addr));
}
// Make the page group, if we're supposed to.
@@ -3155,7 +3387,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
R_SUCCEED();
}
-Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
+Result KPageTable::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr, KMemoryPermission new_perm,
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 0a454b05b..022d15f35 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -6,7 +6,6 @@
#include <memory>
#include "common/common_funcs.h"
-#include "common/common_types.h"
#include "common/page_table.h"
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/k_dynamic_resource_manager.h"
@@ -15,6 +14,7 @@
#include "core/hle/kernel/k_memory_block_manager.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/result.h"
#include "core/memory.h"
@@ -24,12 +24,36 @@ class System;
namespace Kernel {
+enum class DisableMergeAttribute : u8 {
+ None = (0U << 0),
+ DisableHead = (1U << 0),
+ DisableHeadAndBody = (1U << 1),
+ EnableHeadAndBody = (1U << 2),
+ DisableTail = (1U << 3),
+ EnableTail = (1U << 4),
+ EnableAndMergeHeadBodyTail = (1U << 5),
+ EnableHeadBodyTail = EnableHeadAndBody | EnableTail,
+ DisableHeadBodyTail = DisableHeadAndBody | DisableTail,
+};
+
+struct KPageProperties {
+ KMemoryPermission perm;
+ bool io;
+ bool uncached;
+ DisableMergeAttribute disable_merge_attributes;
+};
+static_assert(std::is_trivial_v<KPageProperties>);
+static_assert(sizeof(KPageProperties) == sizeof(u32));
+
class KBlockInfoManager;
class KMemoryBlockManager;
class KResourceLimit;
class KSystemResource;
class KPageTable final {
+protected:
+ struct PageLinkedList;
+
public:
enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll };
@@ -41,60 +65,48 @@ public:
Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
- VAddr code_addr, size_t code_size, KSystemResource* system_resource,
- KResourceLimit* resource_limit);
+ KProcessAddress code_addr, size_t code_size,
+ KSystemResource* system_resource, KResourceLimit* resource_limit,
+ Core::Memory::Memory& memory);
void Finalize();
- Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state,
+ Result MapProcessCode(KProcessAddress addr, size_t pages_count, KMemoryState state,
KMemoryPermission perm);
- Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size);
- Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
+ Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
ICacheInvalidationStrategy icache_invalidation_strategy);
- Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
- VAddr src_addr);
- Result MapPhysicalMemory(VAddr addr, size_t size);
- Result UnmapPhysicalMemory(VAddr addr, size_t size);
- Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
- Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
- Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
- KMemoryPermission perm);
- Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
- KMemoryState state, KMemoryPermission perm) {
- R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
- this->GetRegionAddress(state),
- this->GetRegionSize(state) / PageSize, state, perm));
- }
- Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
- Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state);
- Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm);
- KMemoryInfo QueryInfo(VAddr addr);
- Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm);
- Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr);
+ Result UnmapProcessMemory(KProcessAddress dst_addr, size_t size, KPageTable& src_page_table,
+ KProcessAddress src_addr);
+ Result MapPhysicalMemory(KProcessAddress addr, size_t size);
+ Result UnmapPhysicalMemory(KProcessAddress addr, size_t size);
+ Result MapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
+ Result UnmapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
+ Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
+ Svc::MemoryPermission svc_perm);
+ KMemoryInfo QueryInfo(KProcessAddress addr);
+ Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm);
+ Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr);
Result SetMaxHeapSize(size_t size);
- Result SetHeapSize(VAddr* out, size_t size);
- ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only,
- VAddr region_start, size_t region_num_pages,
- KMemoryState state, KMemoryPermission perm,
- PAddr map_addr = 0);
-
- Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
+ Result SetHeapSize(u64* out, size_t size);
+ Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
KMemoryPermission perm, bool is_aligned, bool check_heap);
- Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap);
+ Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
- Result UnlockForDeviceAddressSpace(VAddr addr, size_t size);
+ Result UnlockForDeviceAddressSpace(KProcessAddress addr, size_t size);
- Result LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size);
- Result UnlockForIpcUserBuffer(VAddr address, size_t size);
+ Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size);
+ Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
- Result SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, KPageTable& src_page_table,
- KMemoryPermission test_perm, KMemoryState dst_state, bool send);
- Result CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state);
- Result CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state);
+ Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
+ KPageTable& src_page_table, KMemoryPermission test_perm,
+ KMemoryState dst_state, bool send);
+ Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state);
+ Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
- Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size);
- Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg);
- Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
+ Result LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size);
+ Result UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg);
+ Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr);
@@ -111,7 +123,41 @@ public:
return m_block_info_manager;
}
- bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
+ bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const;
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, KProcessAddress region_start,
+ size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
+ R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start,
+ region_num_pages, state, perm));
+ }
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
+ R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
+ this->GetRegionAddress(state),
+ this->GetRegionSize(state) / PageSize, state, perm));
+ }
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
+ R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false,
+ this->GetRegionAddress(state),
+ this->GetRegionSize(state) / PageSize, state, perm));
+ }
+
+ Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm);
+ Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
+
+ Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
+ KProcessAddress region_start, size_t region_num_pages, KMemoryState state,
+ KMemoryPermission perm);
+ Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state,
+ KMemoryPermission perm);
+ Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state);
+ void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
+ const KPageGroup& pg);
protected:
struct PageLinkedList {
@@ -130,8 +176,8 @@ protected:
m_root = n;
}
- void Push(Core::Memory::Memory& memory, VAddr addr) {
- this->Push(memory.GetPointer<Node>(addr));
+ void Push(Core::Memory::Memory& memory, KVirtualAddress addr) {
+ this->Push(memory.GetPointer<Node>(GetInteger(addr)));
}
Node* Peek() const {
@@ -166,32 +212,31 @@ private:
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
- Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
- Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
- bool is_pa_valid, VAddr region_start, size_t region_num_pages,
- KMemoryState state, KMemoryPermission perm);
- Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
- bool IsRegionContiguous(VAddr addr, u64 size) const;
- void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list);
- KMemoryInfo QueryInfoImpl(VAddr addr);
- VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
- size_t align);
- Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
+ size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
+ bool IsRegionContiguous(KProcessAddress addr, u64 size) const;
+ void AddRegionToPages(KProcessAddress start, size_t num_pages, KPageGroup& page_linked_list);
+ KMemoryInfo QueryInfoImpl(KProcessAddress addr);
+ KProcessAddress AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
+ u64 needed_num_pages, size_t align);
+ Result Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
OperationType operation);
- Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation,
- PAddr map_addr = 0);
+ Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
+ OperationType operation, KPhysicalAddress map_addr = 0);
void FinalizeUpdate(PageLinkedList* page_list);
- VAddr GetRegionAddress(KMemoryState state) const;
+ KProcessAddress GetRegionAddress(KMemoryState state) const;
size_t GetRegionSize(KMemoryState state) const;
- VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
- size_t alignment, size_t offset, size_t guard_pages);
+ KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
+ size_t num_pages, size_t alignment, size_t offset,
+ size_t guard_pages);
- Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
+ Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
- Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask,
+ Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr) const {
@@ -203,12 +248,12 @@ private:
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr,
- size_t size, KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+ KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
- Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
+ Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
@@ -217,39 +262,40 @@ private:
state_mask, state, perm_mask, perm, attr_mask, attr,
ignore_attr));
}
- Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
+ Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
attr_mask, attr, ignore_attr));
}
- Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryPermission new_perm, KMemoryAttribute lock_attr);
- Result UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
+ Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
+ KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryPermission new_perm,
+ KMemoryAttribute lock_attr);
+ Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryPermission new_perm, KMemoryAttribute lock_attr,
const KPageGroup* pg);
- Result MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages);
- bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
+ Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages);
+ bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages);
bool IsLockedByCurrentThread() const {
return m_general_lock.IsLockedByCurrentThread();
}
- bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
+ bool IsHeapPhysicalAddress(const KMemoryLayout& layout, KPhysicalAddress phys_addr) {
ASSERT(this->IsLockedByCurrentThread());
return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
}
- bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
+ bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const {
ASSERT(this->IsLockedByCurrentThread());
*out = GetPhysicalAddr(virt_addr);
@@ -257,73 +303,79 @@ private:
return *out != 0;
}
- Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, VAddr address,
- size_t size, KMemoryPermission test_perm, KMemoryState dst_state);
- Result SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr,
+ Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
+ KProcessAddress address, size_t size, KMemoryPermission test_perm,
+ KMemoryState dst_state);
+ Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr,
KMemoryPermission test_perm, KMemoryState dst_state,
KPageTable& src_page_table, bool send);
- void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address,
+ void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address,
size_t size, KMemoryPermission prot_perm);
+ Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
+ size_t num_pages, KMemoryPermission perm);
+ Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
+ const KPageGroup& pg, const KPageProperties properties, bool reuse_ll);
+
mutable KLightLock m_general_lock;
mutable KLightLock m_map_physical_memory_lock;
public:
- constexpr VAddr GetAddressSpaceStart() const {
+ constexpr KProcessAddress GetAddressSpaceStart() const {
return m_address_space_start;
}
- constexpr VAddr GetAddressSpaceEnd() const {
+ constexpr KProcessAddress GetAddressSpaceEnd() const {
return m_address_space_end;
}
constexpr size_t GetAddressSpaceSize() const {
return m_address_space_end - m_address_space_start;
}
- constexpr VAddr GetHeapRegionStart() const {
+ constexpr KProcessAddress GetHeapRegionStart() const {
return m_heap_region_start;
}
- constexpr VAddr GetHeapRegionEnd() const {
+ constexpr KProcessAddress GetHeapRegionEnd() const {
return m_heap_region_end;
}
constexpr size_t GetHeapRegionSize() const {
return m_heap_region_end - m_heap_region_start;
}
- constexpr VAddr GetAliasRegionStart() const {
+ constexpr KProcessAddress GetAliasRegionStart() const {
return m_alias_region_start;
}
- constexpr VAddr GetAliasRegionEnd() const {
+ constexpr KProcessAddress GetAliasRegionEnd() const {
return m_alias_region_end;
}
constexpr size_t GetAliasRegionSize() const {
return m_alias_region_end - m_alias_region_start;
}
- constexpr VAddr GetStackRegionStart() const {
+ constexpr KProcessAddress GetStackRegionStart() const {
return m_stack_region_start;
}
- constexpr VAddr GetStackRegionEnd() const {
+ constexpr KProcessAddress GetStackRegionEnd() const {
return m_stack_region_end;
}
constexpr size_t GetStackRegionSize() const {
return m_stack_region_end - m_stack_region_start;
}
- constexpr VAddr GetKernelMapRegionStart() const {
+ constexpr KProcessAddress GetKernelMapRegionStart() const {
return m_kernel_map_region_start;
}
- constexpr VAddr GetKernelMapRegionEnd() const {
+ constexpr KProcessAddress GetKernelMapRegionEnd() const {
return m_kernel_map_region_end;
}
- constexpr VAddr GetCodeRegionStart() const {
+ constexpr KProcessAddress GetCodeRegionStart() const {
return m_code_region_start;
}
- constexpr VAddr GetCodeRegionEnd() const {
+ constexpr KProcessAddress GetCodeRegionEnd() const {
return m_code_region_end;
}
- constexpr VAddr GetAliasCodeRegionStart() const {
+ constexpr KProcessAddress GetAliasCodeRegionStart() const {
return m_alias_code_region_start;
}
- constexpr VAddr GetAliasCodeRegionEnd() const {
+ constexpr KProcessAddress GetAliasCodeRegionEnd() const {
return m_alias_code_region_end;
}
- constexpr VAddr GetAliasCodeRegionSize() const {
+ constexpr size_t GetAliasCodeRegionSize() const {
return m_alias_code_region_end - m_alias_code_region_start;
}
size_t GetNormalMemorySize() {
@@ -336,25 +388,25 @@ public:
constexpr size_t GetHeapSize() const {
return m_current_heap_end - m_heap_region_start;
}
- constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const {
+ constexpr bool IsInsideAddressSpace(KProcessAddress address, size_t size) const {
return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1;
}
- constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const {
+ constexpr bool IsOutsideAliasRegion(KProcessAddress address, size_t size) const {
return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1;
}
- constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const {
+ constexpr bool IsOutsideStackRegion(KProcessAddress address, size_t size) const {
return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1;
}
- constexpr bool IsInvalidRegion(VAddr address, size_t size) const {
+ constexpr bool IsInvalidRegion(KProcessAddress address, size_t size) const {
return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
}
- constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const {
+ constexpr bool IsInsideHeapRegion(KProcessAddress address, size_t size) const {
return address + size > m_heap_region_start && m_heap_region_end > address;
}
- constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const {
+ constexpr bool IsInsideAliasRegion(KProcessAddress address, size_t size) const {
return address + size > m_alias_region_start && m_alias_region_end > address;
}
- constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const {
+ constexpr bool IsOutsideASLRRegion(KProcessAddress address, size_t size) const {
if (IsInvalidRegion(address, size)) {
return true;
}
@@ -366,47 +418,53 @@ public:
}
return {};
}
- constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const {
+ constexpr bool IsInsideASLRRegion(KProcessAddress address, size_t size) const {
return !IsOutsideASLRRegion(address, size);
}
constexpr size_t GetNumGuardPages() const {
return IsKernel() ? 1 : 4;
}
- PAddr GetPhysicalAddr(VAddr addr) const {
+ KPhysicalAddress GetPhysicalAddr(KProcessAddress addr) const {
const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
ASSERT(backing_addr);
- return backing_addr + addr;
+ return backing_addr + GetInteger(addr);
}
- constexpr bool Contains(VAddr addr) const {
+ constexpr bool Contains(KProcessAddress addr) const {
return m_address_space_start <= addr && addr <= m_address_space_end - 1;
}
- constexpr bool Contains(VAddr addr, size_t size) const {
+ constexpr bool Contains(KProcessAddress addr, size_t size) const {
return m_address_space_start <= addr && addr < addr + size &&
addr + size - 1 <= m_address_space_end - 1;
}
public:
- static VAddr GetLinearMappedVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
+ static KVirtualAddress GetLinearMappedVirtualAddress(const KMemoryLayout& layout,
+ KPhysicalAddress addr) {
return layout.GetLinearVirtualAddress(addr);
}
- static PAddr GetLinearMappedPhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
+ static KPhysicalAddress GetLinearMappedPhysicalAddress(const KMemoryLayout& layout,
+ KVirtualAddress addr) {
return layout.GetLinearPhysicalAddress(addr);
}
- static VAddr GetHeapVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
+ static KVirtualAddress GetHeapVirtualAddress(const KMemoryLayout& layout,
+ KPhysicalAddress addr) {
return GetLinearMappedVirtualAddress(layout, addr);
}
- static PAddr GetHeapPhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
+ static KPhysicalAddress GetHeapPhysicalAddress(const KMemoryLayout& layout,
+ KVirtualAddress addr) {
return GetLinearMappedPhysicalAddress(layout, addr);
}
- static VAddr GetPageTableVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
+ static KVirtualAddress GetPageTableVirtualAddress(const KMemoryLayout& layout,
+ KPhysicalAddress addr) {
return GetLinearMappedVirtualAddress(layout, addr);
}
- static PAddr GetPageTablePhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
+ static KPhysicalAddress GetPageTablePhysicalAddress(const KMemoryLayout& layout,
+ KVirtualAddress addr) {
return GetLinearMappedPhysicalAddress(layout, addr);
}
@@ -418,7 +476,7 @@ private:
return m_enable_aslr;
}
- constexpr bool ContainsPages(VAddr addr, size_t num_pages) const {
+ constexpr bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
return (m_address_space_start <= addr) &&
(num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
(addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
@@ -438,26 +496,26 @@ private:
}
PageLinkedList* GetPageList() {
- return &m_ll;
+ return std::addressof(m_ll);
}
};
private:
- VAddr m_address_space_start{};
- VAddr m_address_space_end{};
- VAddr m_heap_region_start{};
- VAddr m_heap_region_end{};
- VAddr m_current_heap_end{};
- VAddr m_alias_region_start{};
- VAddr m_alias_region_end{};
- VAddr m_stack_region_start{};
- VAddr m_stack_region_end{};
- VAddr m_kernel_map_region_start{};
- VAddr m_kernel_map_region_end{};
- VAddr m_code_region_start{};
- VAddr m_code_region_end{};
- VAddr m_alias_code_region_start{};
- VAddr m_alias_code_region_end{};
+ KProcessAddress m_address_space_start{};
+ KProcessAddress m_address_space_end{};
+ KProcessAddress m_heap_region_start{};
+ KProcessAddress m_heap_region_end{};
+ KProcessAddress m_current_heap_end{};
+ KProcessAddress m_alias_region_start{};
+ KProcessAddress m_alias_region_end{};
+ KProcessAddress m_stack_region_start{};
+ KProcessAddress m_stack_region_end{};
+ KProcessAddress m_kernel_map_region_start{};
+ KProcessAddress m_kernel_map_region_end{};
+ KProcessAddress m_code_region_start{};
+ KProcessAddress m_code_region_end{};
+ KProcessAddress m_alias_code_region_start{};
+ KProcessAddress m_alias_code_region_end{};
size_t m_max_heap_size{};
size_t m_mapped_physical_memory_size{};
@@ -489,6 +547,7 @@ private:
Core::System& m_system;
KernelCore& m_kernel;
+ Core::Memory::Memory* m_memory{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table_manager.h b/src/core/hle/kernel/k_page_table_manager.h
index 91a45cde3..4b0e034d0 100644
--- a/src/core/hle/kernel/k_page_table_manager.h
+++ b/src/core/hle/kernel/k_page_table_manager.h
@@ -5,9 +5,9 @@
#include <atomic>
-#include "common/common_types.h"
#include "core/hle/kernel/k_dynamic_resource_manager.h"
#include "core/hle/kernel/k_page_table_slab_heap.h"
+#include "core/hle/kernel/k_typed_address.h"
namespace Kernel {
@@ -26,23 +26,23 @@ public:
BaseHeap::Initialize(page_allocator, pt_heap);
}
- VAddr Allocate() {
- return VAddr(BaseHeap::Allocate());
+ KVirtualAddress Allocate() {
+ return KVirtualAddress(BaseHeap::Allocate());
}
- RefCount GetRefCount(VAddr addr) const {
+ RefCount GetRefCount(KVirtualAddress addr) const {
return m_pt_heap->GetRefCount(addr);
}
- void Open(VAddr addr, int count) {
+ void Open(KVirtualAddress addr, int count) {
return m_pt_heap->Open(addr, count);
}
- bool Close(VAddr addr, int count) {
+ bool Close(KVirtualAddress addr, int count) {
return m_pt_heap->Close(addr, count);
}
- bool IsInPageTableHeap(VAddr addr) const {
+ bool IsInPageTableHeap(KVirtualAddress addr) const {
return m_pt_heap->IsInRange(addr);
}
diff --git a/src/core/hle/kernel/k_page_table_slab_heap.h b/src/core/hle/kernel/k_page_table_slab_heap.h
index a9543cbd0..7da0ea669 100644
--- a/src/core/hle/kernel/k_page_table_slab_heap.h
+++ b/src/core/hle/kernel/k_page_table_slab_heap.h
@@ -6,8 +6,8 @@
#include <array>
#include <vector>
-#include "common/common_types.h"
#include "core/hle/kernel/k_dynamic_slab_heap.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
@@ -20,7 +20,8 @@ public:
PageTablePage() = default;
private:
- std::array<u8, PageSize> m_buffer{};
+ // Initializer intentionally skipped
+ std::array<u8, PageSize> m_buffer;
};
static_assert(sizeof(PageTablePage) == PageSize);
@@ -44,12 +45,12 @@ public:
this->Initialize(rc);
}
- RefCount GetRefCount(VAddr addr) {
+ RefCount GetRefCount(KVirtualAddress addr) {
ASSERT(this->IsInRange(addr));
return *this->GetRefCountPointer(addr);
}
- void Open(VAddr addr, int count) {
+ void Open(KVirtualAddress addr, int count) {
ASSERT(this->IsInRange(addr));
*this->GetRefCountPointer(addr) += static_cast<RefCount>(count);
@@ -57,7 +58,7 @@ public:
ASSERT(this->GetRefCount(addr) > 0);
}
- bool Close(VAddr addr, int count) {
+ bool Close(KVirtualAddress addr, int count) {
ASSERT(this->IsInRange(addr));
ASSERT(this->GetRefCount(addr) >= count);
@@ -65,7 +66,7 @@ public:
return this->GetRefCount(addr) == 0;
}
- bool IsInPageTableHeap(VAddr addr) const {
+ bool IsInPageTableHeap(KVirtualAddress addr) const {
return this->IsInRange(addr);
}
@@ -80,7 +81,7 @@ private:
}
}
- RefCount* GetRefCountPointer(VAddr addr) {
+ RefCount* GetRefCountPointer(KVirtualAddress addr) {
return m_ref_counts.data() + ((addr - this->GetAddress()) / PageSize);
}
diff --git a/src/core/hle/kernel/k_port.cpp b/src/core/hle/kernel/k_port.cpp
index 77d00ae2c..1621ca1d3 100644
--- a/src/core/hle/kernel/k_port.cpp
+++ b/src/core/hle/kernel/k_port.cpp
@@ -1,63 +1,61 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
-#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
-KPort::KPort(KernelCore& kernel_)
- : KAutoObjectWithSlabHeapAndContainer{kernel_}, server{kernel_}, client{kernel_} {}
+KPort::KPort(KernelCore& kernel)
+ : KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
KPort::~KPort() = default;
-void KPort::Initialize(s32 max_sessions_, bool is_light_, const std::string& name_) {
+void KPort::Initialize(s32 max_sessions, bool is_light, uintptr_t name) {
// Open a new reference count to the initialized port.
- Open();
+ this->Open();
// Create and initialize our server/client pair.
- KAutoObject::Create(std::addressof(server));
- KAutoObject::Create(std::addressof(client));
- server.Initialize(this, name_ + ":Server");
- client.Initialize(this, max_sessions_, name_ + ":Client");
+ KAutoObject::Create(std::addressof(m_server));
+ KAutoObject::Create(std::addressof(m_client));
+ m_server.Initialize(this);
+ m_client.Initialize(this, max_sessions);
// Set our member variables.
- is_light = is_light_;
- name = name_;
- state = State::Normal;
+ m_is_light = is_light;
+ m_name = name;
+ m_state = State::Normal;
}
void KPort::OnClientClosed() {
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
- if (state == State::Normal) {
- state = State::ClientClosed;
+ if (m_state == State::Normal) {
+ m_state = State::ClientClosed;
}
}
void KPort::OnServerClosed() {
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
- if (state == State::Normal) {
- state = State::ServerClosed;
+ if (m_state == State::Normal) {
+ m_state = State::ServerClosed;
}
}
bool KPort::IsServerClosed() const {
- KScopedSchedulerLock sl{kernel};
- return state == State::ServerClosed;
+ KScopedSchedulerLock sl{m_kernel};
+ return m_state == State::ServerClosed;
}
Result KPort::EnqueueSession(KServerSession* session) {
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
- R_UNLESS(state == State::Normal, ResultPortClosed);
+ R_UNLESS(m_state == State::Normal, ResultPortClosed);
- server.EnqueueSession(session);
-
- return ResultSuccess;
+ m_server.EnqueueSession(session);
+ R_SUCCEED();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_port.h b/src/core/hle/kernel/k_port.h
index 0cfc16dab..991be27ab 100644
--- a/src/core/hle/kernel/k_port.h
+++ b/src/core/hle/kernel/k_port.h
@@ -19,17 +19,20 @@ class KPort final : public KAutoObjectWithSlabHeapAndContainer<KPort, KAutoObjec
KERNEL_AUTOOBJECT_TRAITS(KPort, KAutoObject);
public:
- explicit KPort(KernelCore& kernel_);
+ explicit KPort(KernelCore& kernel);
~KPort() override;
- static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
+ static void PostDestroy(uintptr_t arg) {}
- void Initialize(s32 max_sessions_, bool is_light_, const std::string& name_);
+ void Initialize(s32 max_sessions, bool is_light, uintptr_t name);
void OnClientClosed();
void OnServerClosed();
+ uintptr_t GetName() const {
+ return m_name;
+ }
bool IsLight() const {
- return is_light;
+ return m_is_light;
}
bool IsServerClosed() const;
@@ -37,16 +40,16 @@ public:
Result EnqueueSession(KServerSession* session);
KClientPort& GetClientPort() {
- return client;
+ return m_client;
}
KServerPort& GetServerPort() {
- return server;
+ return m_server;
}
const KClientPort& GetClientPort() const {
- return client;
+ return m_client;
}
const KServerPort& GetServerPort() const {
- return server;
+ return m_server;
}
private:
@@ -57,10 +60,11 @@ private:
ServerClosed = 3,
};
- KServerPort server;
- KClientPort client;
- State state{State::Invalid};
- bool is_light{};
+ KServerPort m_server;
+ KClientPort m_client;
+ uintptr_t m_name;
+ State m_state{State::Invalid};
+ bool m_is_light{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h
index cb2512b0b..26677ec65 100644
--- a/src/core/hle/kernel/k_priority_queue.h
+++ b/src/core/hle/kernel/k_priority_queue.h
@@ -17,35 +17,41 @@ namespace Kernel {
class KThread;
template <typename T>
-concept KPriorityQueueAffinityMask = !std::is_reference_v<T> && requires(T & t) {
- { t.GetAffinityMask() } -> Common::ConvertibleTo<u64>;
- {t.SetAffinityMask(0)};
+concept KPriorityQueueAffinityMask = !
+std::is_reference_v<T>&& requires(T& t) {
+ { t.GetAffinityMask() } -> Common::ConvertibleTo<u64>;
+ { t.SetAffinityMask(0) };
- { t.GetAffinity(0) } -> std::same_as<bool>;
- {t.SetAffinity(0, false)};
- {t.SetAll()};
-};
+ { t.GetAffinity(0) } -> std::same_as<bool>;
+ { t.SetAffinity(0, false) };
+ { t.SetAll() };
+ };
template <typename T>
-concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
- {typename T::QueueEntry()};
- {(typename T::QueueEntry()).Initialize()};
- {(typename T::QueueEntry()).SetPrev(std::addressof(t))};
- {(typename T::QueueEntry()).SetNext(std::addressof(t))};
- { (typename T::QueueEntry()).GetNext() } -> std::same_as<T*>;
- { (typename T::QueueEntry()).GetPrev() } -> std::same_as<T*>;
- { t.GetPriorityQueueEntry(0) } -> std::same_as<typename T::QueueEntry&>;
-
- {t.GetAffinityMask()};
- { std::remove_cvref_t<decltype(t.GetAffinityMask())>() } -> KPriorityQueueAffinityMask;
-
- { t.GetActiveCore() } -> Common::ConvertibleTo<s32>;
- { t.GetPriority() } -> Common::ConvertibleTo<s32>;
- { t.IsDummyThread() } -> Common::ConvertibleTo<bool>;
-};
+concept KPriorityQueueMember = !
+std::is_reference_v<T>&& requires(T& t) {
+ { typename T::QueueEntry() };
+ { (typename T::QueueEntry()).Initialize() };
+ { (typename T::QueueEntry()).SetPrev(std::addressof(t)) };
+ { (typename T::QueueEntry()).SetNext(std::addressof(t)) };
+ { (typename T::QueueEntry()).GetNext() } -> std::same_as<T*>;
+ { (typename T::QueueEntry()).GetPrev() } -> std::same_as<T*>;
+ {
+ t.GetPriorityQueueEntry(0)
+ } -> std::same_as<typename T::QueueEntry&>;
+
+ { t.GetAffinityMask() };
+ {
+ std::remove_cvref_t<decltype(t.GetAffinityMask())>()
+ } -> KPriorityQueueAffinityMask;
+
+ { t.GetActiveCore() } -> Common::ConvertibleTo<s32>;
+ { t.GetPriority() } -> Common::ConvertibleTo<s32>;
+ { t.IsDummyThread() } -> Common::ConvertibleTo<bool>;
+ };
template <typename Member, size_t NumCores_, int LowestPriority, int HighestPriority>
-requires KPriorityQueueMember<Member>
+ requires KPriorityQueueMember<Member>
class KPriorityQueue {
public:
using AffinityMaskType = std::remove_cv_t<
@@ -71,11 +77,11 @@ private:
public:
class KPerCoreQueue {
private:
- std::array<Entry, NumCores> root{};
+ std::array<Entry, NumCores> m_root{};
public:
constexpr KPerCoreQueue() {
- for (auto& per_core_root : root) {
+ for (auto& per_core_root : m_root) {
per_core_root.Initialize();
}
}
@@ -85,15 +91,15 @@ public:
Entry& member_entry = member->GetPriorityQueueEntry(core);
// Get the entry associated with the end of the queue.
- Member* tail = this->root[core].GetPrev();
+ Member* tail = m_root[core].GetPrev();
Entry& tail_entry =
- (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core];
+ (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : m_root[core];
// Link the entries.
member_entry.SetPrev(tail);
member_entry.SetNext(nullptr);
tail_entry.SetNext(member);
- this->root[core].SetPrev(member);
+ m_root[core].SetPrev(member);
return tail == nullptr;
}
@@ -103,15 +109,15 @@ public:
Entry& member_entry = member->GetPriorityQueueEntry(core);
// Get the entry associated with the front of the queue.
- Member* head = this->root[core].GetNext();
+ Member* head = m_root[core].GetNext();
Entry& head_entry =
- (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core];
+ (head != nullptr) ? head->GetPriorityQueueEntry(core) : m_root[core];
// Link the entries.
member_entry.SetPrev(nullptr);
member_entry.SetNext(head);
head_entry.SetPrev(member);
- this->root[core].SetNext(member);
+ m_root[core].SetNext(member);
return (head == nullptr);
}
@@ -124,9 +130,9 @@ public:
Member* prev = member_entry.GetPrev();
Member* next = member_entry.GetNext();
Entry& prev_entry =
- (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core];
+ (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : m_root[core];
Entry& next_entry =
- (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core];
+ (next != nullptr) ? next->GetPriorityQueueEntry(core) : m_root[core];
// Unlink.
prev_entry.SetNext(next);
@@ -136,7 +142,7 @@ public:
}
constexpr Member* GetFront(s32 core) const {
- return this->root[core].GetNext();
+ return m_root[core].GetNext();
}
};
@@ -152,8 +158,8 @@ public:
return;
}
- if (this->queues[priority].PushBack(core, member)) {
- this->available_priorities[core].SetBit(priority);
+ if (m_queues[priority].PushBack(core, member)) {
+ m_available_priorities[core].SetBit(priority);
}
}
@@ -165,8 +171,8 @@ public:
return;
}
- if (this->queues[priority].PushFront(core, member)) {
- this->available_priorities[core].SetBit(priority);
+ if (m_queues[priority].PushFront(core, member)) {
+ m_available_priorities[core].SetBit(priority);
}
}
@@ -178,18 +184,17 @@ public:
return;
}
- if (this->queues[priority].Remove(core, member)) {
- this->available_priorities[core].ClearBit(priority);
+ if (m_queues[priority].Remove(core, member)) {
+ m_available_priorities[core].ClearBit(priority);
}
}
constexpr Member* GetFront(s32 core) const {
ASSERT(IsValidCore(core));
- const s32 priority =
- static_cast<s32>(this->available_priorities[core].CountLeadingZero());
+ const s32 priority = static_cast<s32>(m_available_priorities[core].CountLeadingZero());
if (priority <= LowestPriority) {
- return this->queues[priority].GetFront(core);
+ return m_queues[priority].GetFront(core);
} else {
return nullptr;
}
@@ -200,7 +205,7 @@ public:
ASSERT(IsValidPriority(priority));
if (priority <= LowestPriority) {
- return this->queues[priority].GetFront(core);
+ return m_queues[priority].GetFront(core);
} else {
return nullptr;
}
@@ -212,9 +217,9 @@ public:
Member* next = member->GetPriorityQueueEntry(core).GetNext();
if (next == nullptr) {
const s32 priority = static_cast<s32>(
- this->available_priorities[core].GetNextSet(member->GetPriority()));
+ m_available_priorities[core].GetNextSet(member->GetPriority()));
if (priority <= LowestPriority) {
- next = this->queues[priority].GetFront(core);
+ next = m_queues[priority].GetFront(core);
}
}
return next;
@@ -225,8 +230,8 @@ public:
ASSERT(IsValidPriority(priority));
if (priority <= LowestPriority) {
- this->queues[priority].Remove(core, member);
- this->queues[priority].PushFront(core, member);
+ m_queues[priority].Remove(core, member);
+ m_queues[priority].PushFront(core, member);
}
}
@@ -235,29 +240,29 @@ public:
ASSERT(IsValidPriority(priority));
if (priority <= LowestPriority) {
- this->queues[priority].Remove(core, member);
- this->queues[priority].PushBack(core, member);
- return this->queues[priority].GetFront(core);
+ m_queues[priority].Remove(core, member);
+ m_queues[priority].PushBack(core, member);
+ return m_queues[priority].GetFront(core);
} else {
return nullptr;
}
}
private:
- std::array<KPerCoreQueue, NumPriority> queues{};
- std::array<Common::BitSet64<NumPriority>, NumCores> available_priorities{};
+ std::array<KPerCoreQueue, NumPriority> m_queues{};
+ std::array<Common::BitSet64<NumPriority>, NumCores> m_available_priorities{};
};
private:
- KPriorityQueueImpl scheduled_queue;
- KPriorityQueueImpl suggested_queue;
+ KPriorityQueueImpl m_scheduled_queue;
+ KPriorityQueueImpl m_suggested_queue;
private:
- constexpr void ClearAffinityBit(u64& affinity, s32 core) {
+ static constexpr void ClearAffinityBit(u64& affinity, s32 core) {
affinity &= ~(UINT64_C(1) << core);
}
- constexpr s32 GetNextCore(u64& affinity) {
+ static constexpr s32 GetNextCore(u64& affinity) {
const s32 core = std::countr_zero(affinity);
ClearAffinityBit(affinity, core);
return core;
@@ -269,13 +274,13 @@ private:
// Push onto the scheduled queue for its core, if we can.
u64 affinity = member->GetAffinityMask().GetAffinityMask();
if (const s32 core = member->GetActiveCore(); core >= 0) {
- this->scheduled_queue.PushBack(priority, core, member);
+ m_scheduled_queue.PushBack(priority, core, member);
ClearAffinityBit(affinity, core);
}
// And suggest the thread for all other cores.
while (affinity) {
- this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
+ m_suggested_queue.PushBack(priority, GetNextCore(affinity), member);
}
}
@@ -285,14 +290,14 @@ private:
// Push onto the scheduled queue for its core, if we can.
u64 affinity = member->GetAffinityMask().GetAffinityMask();
if (const s32 core = member->GetActiveCore(); core >= 0) {
- this->scheduled_queue.PushFront(priority, core, member);
+ m_scheduled_queue.PushFront(priority, core, member);
ClearAffinityBit(affinity, core);
}
// And suggest the thread for all other cores.
// Note: Nintendo pushes onto the back of the suggested queue, not the front.
while (affinity) {
- this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
+ m_suggested_queue.PushBack(priority, GetNextCore(affinity), member);
}
}
@@ -302,13 +307,13 @@ private:
// Remove from the scheduled queue for its core.
u64 affinity = member->GetAffinityMask().GetAffinityMask();
if (const s32 core = member->GetActiveCore(); core >= 0) {
- this->scheduled_queue.Remove(priority, core, member);
+ m_scheduled_queue.Remove(priority, core, member);
ClearAffinityBit(affinity, core);
}
// Remove from the suggested queue for all other cores.
while (affinity) {
- this->suggested_queue.Remove(priority, GetNextCore(affinity), member);
+ m_suggested_queue.Remove(priority, GetNextCore(affinity), member);
}
}
@@ -317,27 +322,27 @@ public:
// Getters.
constexpr Member* GetScheduledFront(s32 core) const {
- return this->scheduled_queue.GetFront(core);
+ return m_scheduled_queue.GetFront(core);
}
constexpr Member* GetScheduledFront(s32 core, s32 priority) const {
- return this->scheduled_queue.GetFront(priority, core);
+ return m_scheduled_queue.GetFront(priority, core);
}
constexpr Member* GetSuggestedFront(s32 core) const {
- return this->suggested_queue.GetFront(core);
+ return m_suggested_queue.GetFront(core);
}
constexpr Member* GetSuggestedFront(s32 core, s32 priority) const {
- return this->suggested_queue.GetFront(priority, core);
+ return m_suggested_queue.GetFront(priority, core);
}
constexpr Member* GetScheduledNext(s32 core, const Member* member) const {
- return this->scheduled_queue.GetNext(core, member);
+ return m_scheduled_queue.GetNext(core, member);
}
constexpr Member* GetSuggestedNext(s32 core, const Member* member) const {
- return this->suggested_queue.GetNext(core, member);
+ return m_suggested_queue.GetNext(core, member);
}
constexpr Member* GetSamePriorityNext(s32 core, const Member* member) const {
@@ -369,7 +374,7 @@ public:
return;
}
- this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
+ m_scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
}
constexpr KThread* MoveToScheduledBack(Member* member) {
@@ -378,8 +383,7 @@ public:
return {};
}
- return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(),
- member);
+ return m_scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), member);
}
// First class fancy operations.
@@ -419,9 +423,9 @@ public:
for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
if (prev_affinity.GetAffinity(core)) {
if (core == prev_core) {
- this->scheduled_queue.Remove(priority, core, member);
+ m_scheduled_queue.Remove(priority, core, member);
} else {
- this->suggested_queue.Remove(priority, core, member);
+ m_suggested_queue.Remove(priority, core, member);
}
}
}
@@ -430,9 +434,9 @@ public:
for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
if (new_affinity.GetAffinity(core)) {
if (core == new_core) {
- this->scheduled_queue.PushBack(priority, core, member);
+ m_scheduled_queue.PushBack(priority, core, member);
} else {
- this->suggested_queue.PushBack(priority, core, member);
+ m_suggested_queue.PushBack(priority, core, member);
}
}
}
@@ -452,22 +456,22 @@ public:
if (prev_core != new_core) {
// Remove from the scheduled queue for the previous core.
if (prev_core >= 0) {
- this->scheduled_queue.Remove(priority, prev_core, member);
+ m_scheduled_queue.Remove(priority, prev_core, member);
}
// Remove from the suggested queue and add to the scheduled queue for the new core.
if (new_core >= 0) {
- this->suggested_queue.Remove(priority, new_core, member);
+ m_suggested_queue.Remove(priority, new_core, member);
if (to_front) {
- this->scheduled_queue.PushFront(priority, new_core, member);
+ m_scheduled_queue.PushFront(priority, new_core, member);
} else {
- this->scheduled_queue.PushBack(priority, new_core, member);
+ m_scheduled_queue.PushBack(priority, new_core, member);
}
}
// Add to the suggested queue for the previous core.
if (prev_core >= 0) {
- this->suggested_queue.PushBack(priority, prev_core, member);
+ m_suggested_queue.PushBack(priority, prev_core, member);
}
}
}
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index a1abf5d68..efe86ad27 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -36,22 +36,23 @@ namespace {
* @param owner_process The parent process for the main thread
* @param priority The priority to give the main thread
*/
-void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority, VAddr stack_top) {
- const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
+void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority,
+ KProcessAddress stack_top) {
+ const KProcessAddress entry_point = owner_process.PageTable().GetCodeRegionStart();
ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::ThreadCountMax, 1));
KThread* thread = KThread::Create(system.Kernel());
SCOPE_EXIT({ thread->Close(); });
ASSERT(KThread::InitializeUserThread(system, thread, entry_point, 0, stack_top, priority,
- owner_process.GetIdealCoreId(), &owner_process)
+ owner_process.GetIdealCoreId(),
+ std::addressof(owner_process))
.IsSuccess());
// Register 1 must be a handle to the main thread
Handle thread_handle{};
- owner_process.GetHandleTable().Add(&thread_handle, thread);
+ owner_process.GetHandleTable().Add(std::addressof(thread_handle), thread);
- thread->SetName("main");
thread->GetContext32().cpu_registers[0] = 0;
thread->GetContext64().cpu_registers[0] = 0;
thread->GetContext32().cpu_registers[1] = thread_handle;
@@ -71,32 +72,32 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
auto& kernel = system.Kernel();
process->name = std::move(process_name);
- process->resource_limit = res_limit;
- process->system_resource_address = 0;
- process->state = State::Created;
- process->program_id = 0;
- process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
- : kernel.CreateNewUserProcessID();
- process->capabilities.InitializeForMetadatalessProcess();
- process->is_initialized = true;
+ process->m_resource_limit = res_limit;
+ process->m_system_resource_address = 0;
+ process->m_state = State::Created;
+ process->m_program_id = 0;
+ process->m_process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
+ : kernel.CreateNewUserProcessID();
+ process->m_capabilities.InitializeForMetadatalessProcess();
+ process->m_is_initialized = true;
std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr)));
std::uniform_int_distribution<u64> distribution;
- std::generate(process->random_entropy.begin(), process->random_entropy.end(),
+ std::generate(process->m_random_entropy.begin(), process->m_random_entropy.end(),
[&] { return distribution(rng); });
kernel.AppendNewProcess(process);
// Clear remaining fields.
- process->num_running_threads = 0;
- process->is_signaled = false;
- process->exception_thread = nullptr;
- process->is_suspended = false;
- process->schedule_count = 0;
- process->is_handle_table_initialized = false;
+ process->m_num_running_threads = 0;
+ process->m_is_signaled = false;
+ process->m_exception_thread = nullptr;
+ process->m_is_suspended = false;
+ process->m_schedule_count = 0;
+ process->m_is_handle_table_initialized = false;
// Open a reference to the resource limit.
- process->resource_limit->Open();
+ process->m_resource_limit->Open();
R_SUCCEED();
}
@@ -106,66 +107,65 @@ void KProcess::DoWorkerTaskImpl() {
}
KResourceLimit* KProcess::GetResourceLimit() const {
- return resource_limit;
+ return m_resource_limit;
}
void KProcess::IncrementRunningThreadCount() {
- ASSERT(num_running_threads.load() >= 0);
- ++num_running_threads;
+ ASSERT(m_num_running_threads.load() >= 0);
+ ++m_num_running_threads;
}
void KProcess::DecrementRunningThreadCount() {
- ASSERT(num_running_threads.load() > 0);
+ ASSERT(m_num_running_threads.load() > 0);
- if (const auto prev = num_running_threads--; prev == 1) {
+ if (const auto prev = m_num_running_threads--; prev == 1) {
// TODO(bunnei): Process termination to be implemented when multiprocess is supported.
- UNIMPLEMENTED_MSG("KProcess termination is not implemennted!");
}
}
u64 KProcess::GetTotalPhysicalMemoryAvailable() {
- const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) +
- page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size +
- main_thread_stack_size};
- if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
+ const u64 capacity{m_resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) +
+ m_page_table.GetNormalMemorySize() + GetSystemResourceSize() + m_image_size +
+ m_main_thread_stack_size};
+ if (const auto pool_size = m_kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
capacity != pool_size) {
LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size);
}
- if (capacity < memory_usage_capacity) {
+ if (capacity < m_memory_usage_capacity) {
return capacity;
}
- return memory_usage_capacity;
+ return m_memory_usage_capacity;
}
u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() {
- return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize();
+ return this->GetTotalPhysicalMemoryAvailable() - this->GetSystemResourceSize();
}
u64 KProcess::GetTotalPhysicalMemoryUsed() {
- return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() +
- GetSystemResourceSize();
+ return m_image_size + m_main_thread_stack_size + m_page_table.GetNormalMemorySize() +
+ this->GetSystemResourceSize();
}
u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
- return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
+ return this->GetTotalPhysicalMemoryUsed() - this->GetSystemResourceUsage();
}
bool KProcess::ReleaseUserException(KThread* thread) {
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
- if (exception_thread == thread) {
- exception_thread = nullptr;
+ if (m_exception_thread == thread) {
+ m_exception_thread = nullptr;
// Remove waiter thread.
- s32 num_waiters{};
- if (KThread* next = thread->RemoveWaiterByKey(
- std::addressof(num_waiters),
- reinterpret_cast<uintptr_t>(std::addressof(exception_thread)));
+ bool has_waiters{};
+ if (KThread* next = thread->RemoveKernelWaiterByKey(
+ std::addressof(has_waiters),
+ reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)));
next != nullptr) {
next->EndWait(ResultSuccess);
}
- KScheduler::SetSchedulerUpdateNeeded(kernel);
+ KScheduler::SetSchedulerUpdateNeeded(m_kernel);
return true;
} else {
@@ -174,72 +174,72 @@ bool KProcess::ReleaseUserException(KThread* thread) {
}
void KProcess::PinCurrentThread(s32 core_id) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Get the current thread.
KThread* cur_thread =
- kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
+ m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
// If the thread isn't terminated, pin it.
if (!cur_thread->IsTerminationRequested()) {
// Pin it.
- PinThread(core_id, cur_thread);
+ this->PinThread(core_id, cur_thread);
cur_thread->Pin(core_id);
// An update is needed.
- KScheduler::SetSchedulerUpdateNeeded(kernel);
+ KScheduler::SetSchedulerUpdateNeeded(m_kernel);
}
}
void KProcess::UnpinCurrentThread(s32 core_id) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Get the current thread.
KThread* cur_thread =
- kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
+ m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
// Unpin it.
cur_thread->Unpin();
- UnpinThread(core_id, cur_thread);
+ this->UnpinThread(core_id, cur_thread);
// An update is needed.
- KScheduler::SetSchedulerUpdateNeeded(kernel);
+ KScheduler::SetSchedulerUpdateNeeded(m_kernel);
}
void KProcess::UnpinThread(KThread* thread) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Get the thread's core id.
const auto core_id = thread->GetActiveCore();
// Unpin it.
- UnpinThread(core_id, thread);
+ this->UnpinThread(core_id, thread);
thread->Unpin();
// An update is needed.
- KScheduler::SetSchedulerUpdateNeeded(kernel);
+ KScheduler::SetSchedulerUpdateNeeded(m_kernel);
}
-Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
+Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] KProcessAddress address,
[[maybe_unused]] size_t size) {
// Lock ourselves, to prevent concurrent access.
- KScopedLightLock lk(state_lock);
+ KScopedLightLock lk(m_state_lock);
// Try to find an existing info for the memory.
KSharedMemoryInfo* shemen_info = nullptr;
const auto iter = std::find_if(
- shared_memory_list.begin(), shared_memory_list.end(),
+ m_shared_memory_list.begin(), m_shared_memory_list.end(),
[shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; });
- if (iter != shared_memory_list.end()) {
+ if (iter != m_shared_memory_list.end()) {
shemen_info = *iter;
}
if (shemen_info == nullptr) {
- shemen_info = KSharedMemoryInfo::Allocate(kernel);
+ shemen_info = KSharedMemoryInfo::Allocate(m_kernel);
R_UNLESS(shemen_info != nullptr, ResultOutOfMemory);
shemen_info->Initialize(shmem);
- shared_memory_list.push_back(shemen_info);
+ m_shared_memory_list.push_back(shemen_info);
}
// Open a reference to the shared memory and its info.
@@ -249,24 +249,24 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
R_SUCCEED();
}
-void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
+void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] KProcessAddress address,
[[maybe_unused]] size_t size) {
// Lock ourselves, to prevent concurrent access.
- KScopedLightLock lk(state_lock);
+ KScopedLightLock lk(m_state_lock);
KSharedMemoryInfo* shemen_info = nullptr;
const auto iter = std::find_if(
- shared_memory_list.begin(), shared_memory_list.end(),
+ m_shared_memory_list.begin(), m_shared_memory_list.end(),
[shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; });
- if (iter != shared_memory_list.end()) {
+ if (iter != m_shared_memory_list.end()) {
shemen_info = *iter;
}
ASSERT(shemen_info != nullptr);
if (shemen_info->Close()) {
- shared_memory_list.erase(iter);
- KSharedMemoryInfo::Free(kernel, shemen_info);
+ m_shared_memory_list.erase(iter);
+ KSharedMemoryInfo::Free(m_kernel, shemen_info);
}
// Close a reference to the shared memory.
@@ -274,22 +274,22 @@ void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr a
}
void KProcess::RegisterThread(KThread* thread) {
- KScopedLightLock lk{list_lock};
+ KScopedLightLock lk{m_list_lock};
- thread_list.push_back(thread);
+ m_thread_list.push_back(thread);
}
void KProcess::UnregisterThread(KThread* thread) {
- KScopedLightLock lk{list_lock};
+ KScopedLightLock lk{m_list_lock};
- thread_list.remove(thread);
+ m_thread_list.remove(thread);
}
u64 KProcess::GetFreeThreadCount() const {
- if (resource_limit != nullptr) {
+ if (m_resource_limit != nullptr) {
const auto current_value =
- resource_limit->GetCurrentValue(LimitableResource::ThreadCountMax);
- const auto limit_value = resource_limit->GetLimitValue(LimitableResource::ThreadCountMax);
+ m_resource_limit->GetCurrentValue(LimitableResource::ThreadCountMax);
+ const auto limit_value = m_resource_limit->GetLimitValue(LimitableResource::ThreadCountMax);
return limit_value - current_value;
} else {
return 0;
@@ -298,87 +298,85 @@ u64 KProcess::GetFreeThreadCount() const {
Result KProcess::Reset() {
// Lock the process and the scheduler.
- KScopedLightLock lk(state_lock);
- KScopedSchedulerLock sl{kernel};
+ KScopedLightLock lk(m_state_lock);
+ KScopedSchedulerLock sl{m_kernel};
// Validate that we're in a state that we can reset.
- R_UNLESS(state != State::Terminated, ResultInvalidState);
- R_UNLESS(is_signaled, ResultInvalidState);
+ R_UNLESS(m_state != State::Terminated, ResultInvalidState);
+ R_UNLESS(m_is_signaled, ResultInvalidState);
// Clear signaled.
- is_signaled = false;
+ m_is_signaled = false;
R_SUCCEED();
}
Result KProcess::SetActivity(ProcessActivity activity) {
// Lock ourselves and the scheduler.
- KScopedLightLock lk{state_lock};
- KScopedLightLock list_lk{list_lock};
- KScopedSchedulerLock sl{kernel};
+ KScopedLightLock lk{m_state_lock};
+ KScopedLightLock list_lk{m_list_lock};
+ KScopedSchedulerLock sl{m_kernel};
// Validate our state.
- R_UNLESS(state != State::Terminating, ResultInvalidState);
- R_UNLESS(state != State::Terminated, ResultInvalidState);
+ R_UNLESS(m_state != State::Terminating, ResultInvalidState);
+ R_UNLESS(m_state != State::Terminated, ResultInvalidState);
// Either pause or resume.
if (activity == ProcessActivity::Paused) {
// Verify that we're not suspended.
- R_UNLESS(!is_suspended, ResultInvalidState);
+ R_UNLESS(!m_is_suspended, ResultInvalidState);
// Suspend all threads.
- for (auto* thread : GetThreadList()) {
+ for (auto* thread : this->GetThreadList()) {
thread->RequestSuspend(SuspendType::Process);
}
// Set ourselves as suspended.
- SetSuspended(true);
+ this->SetSuspended(true);
} else {
ASSERT(activity == ProcessActivity::Runnable);
// Verify that we're suspended.
- R_UNLESS(is_suspended, ResultInvalidState);
+ R_UNLESS(m_is_suspended, ResultInvalidState);
// Resume all threads.
- for (auto* thread : GetThreadList()) {
+ for (auto* thread : this->GetThreadList()) {
thread->Resume(SuspendType::Process);
}
// Set ourselves as resumed.
- SetSuspended(false);
+ this->SetSuspended(false);
}
R_SUCCEED();
}
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
- program_id = metadata.GetTitleID();
- ideal_core = metadata.GetMainThreadCore();
- is_64bit_process = metadata.Is64BitProgram();
- system_resource_size = metadata.GetSystemResourceSize();
- image_size = code_size;
-
- // We currently do not support process-specific system resource
- UNIMPLEMENTED_IF(system_resource_size != 0);
+ m_program_id = metadata.GetTitleID();
+ m_ideal_core = metadata.GetMainThreadCore();
+ m_is_64bit_process = metadata.Is64BitProgram();
+ m_system_resource_size = metadata.GetSystemResourceSize();
+ m_image_size = code_size;
KScopedResourceReservation memory_reservation(
- resource_limit, LimitableResource::PhysicalMemoryMax, code_size + system_resource_size);
+ m_resource_limit, LimitableResource::PhysicalMemoryMax, code_size + m_system_resource_size);
if (!memory_reservation.Succeeded()) {
LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
- code_size + system_resource_size);
+ code_size + m_system_resource_size);
R_RETURN(ResultLimitReached);
}
- // Initialize proces address space
- if (const Result result{page_table.InitializeForProcess(
+ // Initialize process address space
+ if (const Result result{m_page_table.InitializeForProcess(
metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application,
- 0x8000000, code_size, &kernel.GetSystemSystemResource(), resource_limit)};
+ 0x8000000, code_size, std::addressof(m_kernel.GetAppSystemResource()), m_resource_limit,
+ m_kernel.System().ApplicationMemory())};
result.IsError()) {
R_RETURN(result);
}
// Map process code region
- if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(),
- code_size / PageSize, KMemoryState::Code,
- KMemoryPermission::None)};
+ if (const Result result{m_page_table.MapProcessCode(m_page_table.GetCodeRegionStart(),
+ code_size / PageSize, KMemoryState::Code,
+ KMemoryPermission::None)};
result.IsError()) {
R_RETURN(result);
}
@@ -386,7 +384,7 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
// Initialize process capabilities
const auto& caps{metadata.GetKernelCapabilities()};
if (const Result result{
- capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)};
+ m_capabilities.InitializeForUserProcess(caps.data(), caps.size(), m_page_table)};
result.IsError()) {
R_RETURN(result);
}
@@ -396,12 +394,14 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
case FileSys::ProgramAddressSpaceType::Is32Bit:
case FileSys::ProgramAddressSpaceType::Is36Bit:
case FileSys::ProgramAddressSpaceType::Is39Bit:
- memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart();
+ m_memory_usage_capacity =
+ m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart();
break;
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
- memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() +
- page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart();
+ m_memory_usage_capacity =
+ (m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart()) +
+ (m_page_table.GetAliasRegionEnd() - m_page_table.GetAliasRegionStart());
break;
default:
@@ -410,34 +410,34 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
}
// Create TLS region
- R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address)));
+ R_TRY(this->CreateThreadLocalRegion(std::addressof(m_plr_address)));
memory_reservation.Commit();
- R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize()));
+ R_RETURN(m_handle_table.Initialize(m_capabilities.GetHandleTableSize()));
}
void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
- AllocateMainThreadStack(stack_size);
- resource_limit->Reserve(LimitableResource::ThreadCountMax, 1);
- resource_limit->Reserve(LimitableResource::PhysicalMemoryMax, main_thread_stack_size);
+ ASSERT(this->AllocateMainThreadStack(stack_size) == ResultSuccess);
+ m_resource_limit->Reserve(LimitableResource::ThreadCountMax, 1);
- const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};
- ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError());
+ const std::size_t heap_capacity{m_memory_usage_capacity -
+ (m_main_thread_stack_size + m_image_size)};
+ ASSERT(!m_page_table.SetMaxHeapSize(heap_capacity).IsError());
- ChangeState(State::Running);
+ this->ChangeState(State::Running);
- SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top);
+ SetupMainThread(m_kernel.System(), *this, main_thread_priority, m_main_thread_stack_top);
}
void KProcess::PrepareForTermination() {
- ChangeState(State::Terminating);
+ this->ChangeState(State::Terminating);
const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
for (auto* thread : in_thread_list) {
if (thread->GetOwnerProcess() != this)
continue;
- if (thread == GetCurrentThreadPointer(kernel))
+ if (thread == GetCurrentThreadPointer(m_kernel))
continue;
// TODO(Subv): When are the other running/ready threads terminated?
@@ -448,24 +448,24 @@ void KProcess::PrepareForTermination() {
}
};
- stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
+ stop_threads(m_kernel.System().GlobalSchedulerContext().GetThreadList());
- this->DeleteThreadLocalRegion(plr_address);
- plr_address = 0;
+ this->DeleteThreadLocalRegion(m_plr_address);
+ m_plr_address = 0;
- if (resource_limit) {
- resource_limit->Release(LimitableResource::PhysicalMemoryMax,
- main_thread_stack_size + image_size);
+ if (m_resource_limit) {
+ m_resource_limit->Release(LimitableResource::PhysicalMemoryMax,
+ m_main_thread_stack_size + m_image_size);
}
- ChangeState(State::Terminated);
+ this->ChangeState(State::Terminated);
}
void KProcess::Finalize() {
// Free all shared memory infos.
{
- auto it = shared_memory_list.begin();
- while (it != shared_memory_list.end()) {
+ auto it = m_shared_memory_list.begin();
+ while (it != m_shared_memory_list.end()) {
KSharedMemoryInfo* info = *it;
KSharedMemory* shmem = info->GetSharedMemory();
@@ -475,40 +475,40 @@ void KProcess::Finalize() {
shmem->Close();
- it = shared_memory_list.erase(it);
- KSharedMemoryInfo::Free(kernel, info);
+ it = m_shared_memory_list.erase(it);
+ KSharedMemoryInfo::Free(m_kernel, info);
}
}
// Release memory to the resource limit.
- if (resource_limit != nullptr) {
- resource_limit->Close();
- resource_limit = nullptr;
+ if (m_resource_limit != nullptr) {
+ m_resource_limit->Close();
+ m_resource_limit = nullptr;
}
// Finalize the page table.
- page_table.Finalize();
+ m_page_table.Finalize();
// Perform inherited finalization.
- KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
+ KSynchronizationObject::Finalize();
}
-Result KProcess::CreateThreadLocalRegion(VAddr* out) {
+Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
KThreadLocalPage* tlp = nullptr;
- VAddr tlr = 0;
+ KProcessAddress tlr = 0;
// See if we can get a region from a partially used TLP.
{
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
- if (auto it = partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) {
+ if (auto it = m_partially_used_tlp_tree.begin(); it != m_partially_used_tlp_tree.end()) {
tlr = it->Reserve();
ASSERT(tlr != 0);
if (it->IsAllUsed()) {
tlp = std::addressof(*it);
- partially_used_tlp_tree.erase(it);
- fully_used_tlp_tree.insert(*tlp);
+ m_partially_used_tlp_tree.erase(it);
+ m_fully_used_tlp_tree.insert(*tlp);
}
*out = tlr;
@@ -517,12 +517,12 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
}
// Allocate a new page.
- tlp = KThreadLocalPage::Allocate(kernel);
+ tlp = KThreadLocalPage::Allocate(m_kernel);
R_UNLESS(tlp != nullptr, ResultOutOfMemory);
- auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(kernel, tlp); });
+ auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(m_kernel, tlp); });
// Initialize the new page.
- R_TRY(tlp->Initialize(kernel, this));
+ R_TRY(tlp->Initialize(m_kernel, this));
// Reserve a TLR.
tlr = tlp->Reserve();
@@ -530,11 +530,11 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
// Insert into our tree.
{
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
if (tlp->IsAllUsed()) {
- fully_used_tlp_tree.insert(*tlp);
+ m_fully_used_tlp_tree.insert(*tlp);
} else {
- partially_used_tlp_tree.insert(*tlp);
+ m_partially_used_tlp_tree.insert(*tlp);
}
}
@@ -544,30 +544,30 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
R_SUCCEED();
}
-Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
+Result KProcess::DeleteThreadLocalRegion(KProcessAddress addr) {
KThreadLocalPage* page_to_free = nullptr;
// Release the region.
{
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
// Try to find the page in the partially used list.
- auto it = partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
- if (it == partially_used_tlp_tree.end()) {
+ auto it = m_partially_used_tlp_tree.find_key(Common::AlignDown(GetInteger(addr), PageSize));
+ if (it == m_partially_used_tlp_tree.end()) {
// If we don't find it, it has to be in the fully used list.
- it = fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
- R_UNLESS(it != fully_used_tlp_tree.end(), ResultInvalidAddress);
+ it = m_fully_used_tlp_tree.find_key(Common::AlignDown(GetInteger(addr), PageSize));
+ R_UNLESS(it != m_fully_used_tlp_tree.end(), ResultInvalidAddress);
// Release the region.
it->Release(addr);
// Move the page out of the fully used list.
KThreadLocalPage* tlp = std::addressof(*it);
- fully_used_tlp_tree.erase(it);
+ m_fully_used_tlp_tree.erase(it);
if (tlp->IsAllFree()) {
page_to_free = tlp;
} else {
- partially_used_tlp_tree.insert(*tlp);
+ m_partially_used_tlp_tree.insert(*tlp);
}
} else {
// Release the region.
@@ -576,7 +576,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
// Handle the all-free case.
KThreadLocalPage* tlp = std::addressof(*it);
if (tlp->IsAllFree()) {
- partially_used_tlp_tree.erase(it);
+ m_partially_used_tlp_tree.erase(it);
page_to_free = tlp;
}
}
@@ -586,19 +586,18 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
if (page_to_free != nullptr) {
page_to_free->Finalize();
- KThreadLocalPage::Free(kernel, page_to_free);
+ KThreadLocalPage::Free(m_kernel, page_to_free);
}
R_SUCCEED();
}
-bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
- DebugWatchpointType type) {
- const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) {
+bool KProcess::InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) {
+ const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
return wp.type == DebugWatchpointType::None;
})};
- if (watch == watchpoints.end()) {
+ if (watch == m_watchpoints.end()) {
return false;
}
@@ -606,21 +605,21 @@ bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
watch->end_address = addr + size;
watch->type = type;
- for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
- debug_page_refcounts[page]++;
- system.Memory().MarkRegionDebug(page, PageSize, true);
+ for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
+ page += PageSize) {
+ m_debug_page_refcounts[page]++;
+ this->GetMemory().MarkRegionDebug(page, PageSize, true);
}
return true;
}
-bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
- DebugWatchpointType type) {
- const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) {
+bool KProcess::RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) {
+ const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
})};
- if (watch == watchpoints.end()) {
+ if (watch == m_watchpoints.end()) {
return false;
}
@@ -628,24 +627,24 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
watch->end_address = 0;
watch->type = DebugWatchpointType::None;
- for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
- debug_page_refcounts[page]--;
- if (!debug_page_refcounts[page]) {
- system.Memory().MarkRegionDebug(page, PageSize, false);
+ for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
+ page += PageSize) {
+ m_debug_page_refcounts[page]--;
+ if (!m_debug_page_refcounts[page]) {
+ this->GetMemory().MarkRegionDebug(page, PageSize, false);
}
}
return true;
}
-void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
+void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
Svc::MemoryPermission permission) {
- page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
+ m_page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
};
- kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
- code_set.memory.size());
+ this->GetMemory().WriteBlock(base_addr, code_set.memory.data(), code_set.memory.size());
ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute);
ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read);
@@ -653,44 +652,60 @@ void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
}
bool KProcess::IsSignaled() const {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
- return is_signaled;
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
+ return m_is_signaled;
}
-KProcess::KProcess(KernelCore& kernel_)
- : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()},
- handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()},
- state_lock{kernel_}, list_lock{kernel_} {}
+KProcess::KProcess(KernelCore& kernel)
+ : KAutoObjectWithSlabHeapAndContainer{kernel}, m_page_table{m_kernel.System()},
+ m_handle_table{m_kernel}, m_address_arbiter{m_kernel.System()},
+ m_condition_var{m_kernel.System()}, m_state_lock{m_kernel}, m_list_lock{m_kernel} {}
KProcess::~KProcess() = default;
void KProcess::ChangeState(State new_state) {
- if (state == new_state) {
+ if (m_state == new_state) {
return;
}
- state = new_state;
- is_signaled = true;
- NotifyAvailable();
+ m_state = new_state;
+ m_is_signaled = true;
+ this->NotifyAvailable();
}
Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
- ASSERT(stack_size);
-
- // The kernel always ensures that the given stack size is page aligned.
- main_thread_stack_size = Common::AlignUp(stack_size, PageSize);
-
- const VAddr start{page_table.GetStackRegionStart()};
- const std::size_t size{page_table.GetStackRegionEnd() - start};
-
- CASCADE_RESULT(main_thread_stack_top,
- page_table.AllocateAndMapMemory(
- main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize,
- KMemoryState::Stack, KMemoryPermission::UserReadWrite));
+ // Ensure that we haven't already allocated stack.
+ ASSERT(m_main_thread_stack_size == 0);
+
+ // Ensure that we're allocating a valid stack.
+ stack_size = Common::AlignUp(stack_size, PageSize);
+ // R_UNLESS(stack_size + image_size <= m_max_process_memory, ResultOutOfMemory);
+ R_UNLESS(stack_size + m_image_size >= m_image_size, ResultOutOfMemory);
+
+ // Place a tentative reservation of memory for our new stack.
+ KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax,
+ stack_size);
+ R_UNLESS(mem_reservation.Succeeded(), ResultLimitReached);
+
+ // Allocate and map our stack.
+ if (stack_size) {
+ KProcessAddress stack_bottom;
+ R_TRY(m_page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize,
+ KMemoryState::Stack, KMemoryPermission::UserReadWrite));
+
+ m_main_thread_stack_top = stack_bottom + stack_size;
+ m_main_thread_stack_size = stack_size;
+ }
- main_thread_stack_top += main_thread_stack_size;
+ // We succeeded! Commit our memory reservation.
+ mem_reservation.Commit();
R_SUCCEED();
}
+Core::Memory::Memory& KProcess::GetMemory() const {
+ // TODO: per-process memory
+ return m_kernel.System().ApplicationMemory();
+}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 09bf2f1d0..925981d06 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -8,7 +8,6 @@
#include <list>
#include <map>
#include <string>
-#include "common/common_types.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_condition_variable.h"
@@ -16,14 +15,19 @@
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread_local_page.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/k_worker_task.h"
#include "core/hle/kernel/process_capability.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Core {
+namespace Memory {
+class Memory;
+};
+
class System;
-}
+} // namespace Core
namespace FileSys {
class ProgramMetadata;
@@ -59,8 +63,8 @@ enum class DebugWatchpointType : u8 {
DECLARE_ENUM_FLAG_OPERATORS(DebugWatchpointType);
struct DebugWatchpoint {
- VAddr start_address;
- VAddr end_address;
+ KProcessAddress start_address;
+ KProcessAddress end_address;
DebugWatchpointType type;
};
@@ -68,7 +72,7 @@ class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWor
KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
public:
- explicit KProcess(KernelCore& kernel_);
+ explicit KProcess(KernelCore& kernel);
~KProcess() override;
enum class State {
@@ -107,66 +111,80 @@ public:
/// Gets a reference to the process' page table.
KPageTable& PageTable() {
- return page_table;
+ return m_page_table;
}
/// Gets const a reference to the process' page table.
const KPageTable& PageTable() const {
- return page_table;
+ return m_page_table;
+ }
+
+ /// Gets a reference to the process' page table.
+ KPageTable& GetPageTable() {
+ return m_page_table;
+ }
+
+ /// Gets const a reference to the process' page table.
+ const KPageTable& GetPageTable() const {
+ return m_page_table;
}
/// Gets a reference to the process' handle table.
KHandleTable& GetHandleTable() {
- return handle_table;
+ return m_handle_table;
}
/// Gets a const reference to the process' handle table.
const KHandleTable& GetHandleTable() const {
- return handle_table;
+ return m_handle_table;
}
- Result SignalToAddress(VAddr address) {
- return condition_var.SignalToAddress(address);
+ /// Gets a reference to process's memory.
+ Core::Memory::Memory& GetMemory() const;
+
+ Result SignalToAddress(KProcessAddress address) {
+ return m_condition_var.SignalToAddress(address);
}
- Result WaitForAddress(Handle handle, VAddr address, u32 tag) {
- return condition_var.WaitForAddress(handle, address, tag);
+ Result WaitForAddress(Handle handle, KProcessAddress address, u32 tag) {
+ return m_condition_var.WaitForAddress(handle, address, tag);
}
void SignalConditionVariable(u64 cv_key, int32_t count) {
- return condition_var.Signal(cv_key, count);
+ return m_condition_var.Signal(cv_key, count);
}
- Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
- R_RETURN(condition_var.Wait(address, cv_key, tag, ns));
+ Result WaitConditionVariable(KProcessAddress address, u64 cv_key, u32 tag, s64 ns) {
+ R_RETURN(m_condition_var.Wait(address, cv_key, tag, ns));
}
- Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
- R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count));
+ Result SignalAddressArbiter(uint64_t address, Svc::SignalType signal_type, s32 value,
+ s32 count) {
+ R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
}
- Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
+ Result WaitAddressArbiter(uint64_t address, Svc::ArbitrationType arb_type, s32 value,
s64 timeout) {
- R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout));
+ R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
}
- VAddr GetProcessLocalRegionAddress() const {
- return plr_address;
+ KProcessAddress GetProcessLocalRegionAddress() const {
+ return m_plr_address;
}
/// Gets the current status of the process
State GetState() const {
- return state;
+ return m_state;
}
/// Gets the unique ID that identifies this particular process.
- u64 GetProcessID() const {
- return process_id;
+ u64 GetProcessId() const {
+ return m_process_id;
}
/// Gets the program ID corresponding to this process.
- u64 GetProgramID() const {
- return program_id;
+ u64 GetProgramId() const {
+ return m_program_id;
}
/// Gets the resource limit descriptor for this process
@@ -174,7 +192,7 @@ public:
/// Gets the ideal CPU core ID for this process
u8 GetIdealCoreId() const {
- return ideal_core;
+ return m_ideal_core;
}
/// Checks if the specified thread priority is valid.
@@ -184,17 +202,17 @@ public:
/// Gets the bitmask of allowed cores that this process' threads can run on.
u64 GetCoreMask() const {
- return capabilities.GetCoreMask();
+ return m_capabilities.GetCoreMask();
}
/// Gets the bitmask of allowed thread priorities.
u64 GetPriorityMask() const {
- return capabilities.GetPriorityMask();
+ return m_capabilities.GetPriorityMask();
}
/// Gets the amount of secure memory to allocate for memory management.
u32 GetSystemResourceSize() const {
- return system_resource_size;
+ return m_system_resource_size;
}
/// Gets the amount of secure memory currently in use for memory management.
@@ -214,67 +232,67 @@ public:
/// Whether this process is an AArch64 or AArch32 process.
bool Is64BitProcess() const {
- return is_64bit_process;
+ return m_is_64bit_process;
}
- [[nodiscard]] bool IsSuspended() const {
- return is_suspended;
+ bool IsSuspended() const {
+ return m_is_suspended;
}
void SetSuspended(bool suspended) {
- is_suspended = suspended;
+ m_is_suspended = suspended;
}
/// Gets the total running time of the process instance in ticks.
u64 GetCPUTimeTicks() const {
- return total_process_running_time_ticks;
+ return m_total_process_running_time_ticks;
}
/// Updates the total running time, adding the given ticks to it.
void UpdateCPUTimeTicks(u64 ticks) {
- total_process_running_time_ticks += ticks;
+ m_total_process_running_time_ticks += ticks;
}
- /// Gets the process schedule count, used for thread yelding
+ /// Gets the process schedule count, used for thread yielding
s64 GetScheduledCount() const {
- return schedule_count;
+ return m_schedule_count;
}
/// Increments the process schedule count, used for thread yielding.
void IncrementScheduledCount() {
- ++schedule_count;
+ ++m_schedule_count;
}
void IncrementRunningThreadCount();
void DecrementRunningThreadCount();
void SetRunningThread(s32 core, KThread* thread, u64 idle_count) {
- running_threads[core] = thread;
- running_thread_idle_counts[core] = idle_count;
+ m_running_threads[core] = thread;
+ m_running_thread_idle_counts[core] = idle_count;
}
void ClearRunningThread(KThread* thread) {
- for (size_t i = 0; i < running_threads.size(); ++i) {
- if (running_threads[i] == thread) {
- running_threads[i] = nullptr;
+ for (size_t i = 0; i < m_running_threads.size(); ++i) {
+ if (m_running_threads[i] == thread) {
+ m_running_threads[i] = nullptr;
}
}
}
[[nodiscard]] KThread* GetRunningThread(s32 core) const {
- return running_threads[core];
+ return m_running_threads[core];
}
bool ReleaseUserException(KThread* thread);
[[nodiscard]] KThread* GetPinnedThread(s32 core_id) const {
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
- return pinned_threads[core_id];
+ return m_pinned_threads[core_id];
}
/// Gets 8 bytes of random data for svcGetInfo RandomEntropy
u64 GetRandomEntropy(std::size_t index) const {
- return random_entropy.at(index);
+ return m_random_entropy.at(index);
}
/// Retrieves the total physical memory available to this process in bytes.
@@ -293,7 +311,7 @@ public:
/// Gets the list of all threads created with this process as their owner.
std::list<KThread*>& GetThreadList() {
- return thread_list;
+ return m_thread_list;
}
/// Registers a thread as being created under this process,
@@ -310,10 +328,10 @@ public:
/// Clears the signaled state of the process if and only if it's signaled.
///
/// @pre The process must not be already terminated. If this is called on a
- /// terminated process, then ERR_INVALID_STATE will be returned.
+ /// terminated process, then ResultInvalidState will be returned.
///
/// @pre The process must be in a signaled state. If this is called on a
- /// process instance that is not signaled, ERR_INVALID_STATE will be
+ /// process instance that is not signaled, ResultInvalidState will be
/// returned.
Result Reset();
@@ -342,18 +360,18 @@ public:
*/
void PrepareForTermination();
- void LoadModule(CodeSet code_set, VAddr base_addr);
+ void LoadModule(CodeSet code_set, KProcessAddress base_addr);
bool IsInitialized() const override {
- return is_initialized;
+ return m_is_initialized;
}
- static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
+ static void PostDestroy(uintptr_t arg) {}
void Finalize() override;
u64 GetId() const override {
- return GetProcessID();
+ return GetProcessId();
}
bool IsSignaled() const override;
@@ -367,55 +385,59 @@ public:
void UnpinThread(KThread* thread);
KLightLock& GetStateLock() {
- return state_lock;
+ return m_state_lock;
}
- Result AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
- void RemoveSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
+ Result AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
+ void RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
///////////////////////////////////////////////////////////////////////////////////////////////
// Thread-local storage management
// Marks the next available region as used and returns the address of the slot.
- [[nodiscard]] Result CreateThreadLocalRegion(VAddr* out);
+ [[nodiscard]] Result CreateThreadLocalRegion(KProcessAddress* out);
// Frees a used TLS slot identified by the given address
- Result DeleteThreadLocalRegion(VAddr addr);
+ Result DeleteThreadLocalRegion(KProcessAddress addr);
///////////////////////////////////////////////////////////////////////////////////////////////
// Debug watchpoint management
// Attempts to insert a watchpoint into a free slot. Returns false if none are available.
- bool InsertWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type);
+ bool InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
// Attempts to remove the watchpoint specified by the given parameters.
- bool RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type);
+ bool RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
- return watchpoints;
+ return m_watchpoints;
+ }
+
+ const std::string& GetName() {
+ return name;
}
private:
void PinThread(s32 core_id, KThread* thread) {
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
ASSERT(thread != nullptr);
- ASSERT(pinned_threads[core_id] == nullptr);
- pinned_threads[core_id] = thread;
+ ASSERT(m_pinned_threads[core_id] == nullptr);
+ m_pinned_threads[core_id] = thread;
}
void UnpinThread(s32 core_id, KThread* thread) {
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
ASSERT(thread != nullptr);
- ASSERT(pinned_threads[core_id] == thread);
- pinned_threads[core_id] = nullptr;
+ ASSERT(m_pinned_threads[core_id] == thread);
+ m_pinned_threads[core_id] = nullptr;
}
void FinalizeHandleTable() {
// Finalize the table.
- handle_table.Finalize();
+ m_handle_table.Finalize();
// Note that the table is finalized.
- is_handle_table_initialized = false;
+ m_is_handle_table_initialized = false;
}
void ChangeState(State new_state);
@@ -424,105 +446,107 @@ private:
Result AllocateMainThreadStack(std::size_t stack_size);
/// Memory manager for this process
- KPageTable page_table;
+ KPageTable m_page_table;
/// Current status of the process
- State state{};
+ State m_state{};
/// The ID of this process
- u64 process_id = 0;
+ u64 m_process_id = 0;
/// Title ID corresponding to the process
- u64 program_id = 0;
+ u64 m_program_id = 0;
/// Specifies additional memory to be reserved for the process's memory management by the
/// system. When this is non-zero, secure memory is allocated and used for page table allocation
/// instead of using the normal global page tables/memory block management.
- u32 system_resource_size = 0;
+ u32 m_system_resource_size = 0;
/// Resource limit descriptor for this process
- KResourceLimit* resource_limit{};
+ KResourceLimit* m_resource_limit{};
- VAddr system_resource_address{};
+ KVirtualAddress m_system_resource_address{};
/// The ideal CPU core for this process, threads are scheduled on this core by default.
- u8 ideal_core = 0;
+ u8 m_ideal_core = 0;
/// Contains the parsed process capability descriptors.
- ProcessCapabilities capabilities;
+ ProcessCapabilities m_capabilities;
/// Whether or not this process is AArch64, or AArch32.
/// By default, we currently assume this is true, unless otherwise
/// specified by metadata provided to the process during loading.
- bool is_64bit_process = true;
+ bool m_is_64bit_process = true;
/// Total running time for the process in ticks.
- std::atomic<u64> total_process_running_time_ticks = 0;
+ std::atomic<u64> m_total_process_running_time_ticks = 0;
/// Per-process handle table for storing created object handles in.
- KHandleTable handle_table;
+ KHandleTable m_handle_table;
/// Per-process address arbiter.
- KAddressArbiter address_arbiter;
+ KAddressArbiter m_address_arbiter;
/// The per-process mutex lock instance used for handling various
/// forms of services, such as lock arbitration, and condition
/// variable related facilities.
- KConditionVariable condition_var;
+ KConditionVariable m_condition_var;
/// Address indicating the location of the process' dedicated TLS region.
- VAddr plr_address = 0;
+ KProcessAddress m_plr_address = 0;
/// Random values for svcGetInfo RandomEntropy
- std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
+ std::array<u64, RANDOM_ENTROPY_SIZE> m_random_entropy{};
/// List of threads that are running with this process as their owner.
- std::list<KThread*> thread_list;
+ std::list<KThread*> m_thread_list;
/// List of shared memory that are running with this process as their owner.
- std::list<KSharedMemoryInfo*> shared_memory_list;
+ std::list<KSharedMemoryInfo*> m_shared_memory_list;
/// Address of the top of the main thread's stack
- VAddr main_thread_stack_top{};
+ KProcessAddress m_main_thread_stack_top{};
/// Size of the main thread's stack
- std::size_t main_thread_stack_size{};
+ std::size_t m_main_thread_stack_size{};
/// Memory usage capacity for the process
- std::size_t memory_usage_capacity{};
+ std::size_t m_memory_usage_capacity{};
/// Process total image size
- std::size_t image_size{};
+ std::size_t m_image_size{};
/// Schedule count of this process
- s64 schedule_count{};
+ s64 m_schedule_count{};
+
+ size_t m_memory_release_hint{};
- size_t memory_release_hint{};
+ std::string name{};
- bool is_signaled{};
- bool is_suspended{};
- bool is_immortal{};
- bool is_handle_table_initialized{};
- bool is_initialized{};
+ bool m_is_signaled{};
+ bool m_is_suspended{};
+ bool m_is_immortal{};
+ bool m_is_handle_table_initialized{};
+ bool m_is_initialized{};
- std::atomic<u16> num_running_threads{};
+ std::atomic<u16> m_num_running_threads{};
- std::array<KThread*, Core::Hardware::NUM_CPU_CORES> running_threads{};
- std::array<u64, Core::Hardware::NUM_CPU_CORES> running_thread_idle_counts{};
- std::array<KThread*, Core::Hardware::NUM_CPU_CORES> pinned_threads{};
- std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> watchpoints{};
- std::map<VAddr, u64> debug_page_refcounts;
+ std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_running_threads{};
+ std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{};
+ std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{};
+ std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
+ std::map<KProcessAddress, u64> m_debug_page_refcounts;
- KThread* exception_thread{};
+ KThread* m_exception_thread{};
- KLightLock state_lock;
- KLightLock list_lock;
+ KLightLock m_state_lock;
+ KLightLock m_list_lock;
using TLPTree =
Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
using TLPIterator = TLPTree::iterator;
- TLPTree fully_used_tlp_tree;
- TLPTree partially_used_tlp_tree;
+ TLPTree m_fully_used_tlp_tree;
+ TLPTree m_partially_used_tlp_tree;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_readable_event.cpp b/src/core/hle/kernel/k_readable_event.cpp
index 5c942d47c..c30662666 100644
--- a/src/core/hle/kernel/k_readable_event.cpp
+++ b/src/core/hle/kernel/k_readable_event.cpp
@@ -11,7 +11,7 @@
namespace Kernel {
-KReadableEvent::KReadableEvent(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
+KReadableEvent::KReadableEvent(KernelCore& kernel) : KSynchronizationObject{kernel} {}
KReadableEvent::~KReadableEvent() = default;
@@ -25,7 +25,7 @@ void KReadableEvent::Initialize(KEvent* parent) {
}
bool KReadableEvent::IsSignaled() const {
- ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
return m_is_signaled;
}
@@ -33,7 +33,7 @@ bool KReadableEvent::IsSignaled() const {
void KReadableEvent::Destroy() {
if (m_parent) {
{
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
m_parent->OnReadableEventDestroyed();
}
m_parent->Close();
@@ -41,31 +41,29 @@ void KReadableEvent::Destroy() {
}
Result KReadableEvent::Signal() {
- KScopedSchedulerLock lk{kernel};
+ KScopedSchedulerLock lk{m_kernel};
if (!m_is_signaled) {
m_is_signaled = true;
this->NotifyAvailable();
}
- return ResultSuccess;
+ R_SUCCEED();
}
Result KReadableEvent::Clear() {
this->Reset();
- return ResultSuccess;
+ R_SUCCEED();
}
Result KReadableEvent::Reset() {
- KScopedSchedulerLock lk{kernel};
+ KScopedSchedulerLock lk{m_kernel};
- if (!m_is_signaled) {
- return ResultInvalidState;
- }
+ R_UNLESS(m_is_signaled, ResultInvalidState);
m_is_signaled = false;
- return ResultSuccess;
+ R_SUCCEED();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_readable_event.h b/src/core/hle/kernel/k_readable_event.h
index 743f96bf5..d2ec36323 100644
--- a/src/core/hle/kernel/k_readable_event.h
+++ b/src/core/hle/kernel/k_readable_event.h
@@ -17,7 +17,7 @@ class KReadableEvent : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject);
public:
- explicit KReadableEvent(KernelCore& kernel_);
+ explicit KReadableEvent(KernelCore& kernel);
~KReadableEvent() override;
void Initialize(KEvent* parent);
diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp
index b9d22b414..fcee26a29 100644
--- a/src/core/hle/kernel/k_resource_limit.cpp
+++ b/src/core/hle/kernel/k_resource_limit.cpp
@@ -2,6 +2,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
+#include "common/overflow.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/hle/kernel/k_resource_limit.h"
@@ -10,12 +11,12 @@
namespace Kernel {
constexpr s64 DefaultTimeout = 10000000000; // 10 seconds
-KResourceLimit::KResourceLimit(KernelCore& kernel_)
- : KAutoObjectWithSlabHeapAndContainer{kernel_}, lock{kernel_}, cond_var{kernel_} {}
+KResourceLimit::KResourceLimit(KernelCore& kernel)
+ : KAutoObjectWithSlabHeapAndContainer{kernel}, m_lock{m_kernel}, m_cond_var{m_kernel} {}
KResourceLimit::~KResourceLimit() = default;
-void KResourceLimit::Initialize(const Core::Timing::CoreTiming* core_timing_) {
- core_timing = core_timing_;
+void KResourceLimit::Initialize(const Core::Timing::CoreTiming* core_timing) {
+ m_core_timing = core_timing;
}
void KResourceLimit::Finalize() {}
@@ -24,11 +25,11 @@ s64 KResourceLimit::GetLimitValue(LimitableResource which) const {
const auto index = static_cast<std::size_t>(which);
s64 value{};
{
- KScopedLightLock lk{lock};
- value = limit_values[index];
+ KScopedLightLock lk{m_lock};
+ value = m_limit_values[index];
ASSERT(value >= 0);
- ASSERT(current_values[index] <= limit_values[index]);
- ASSERT(current_hints[index] <= current_values[index]);
+ ASSERT(m_current_values[index] <= m_limit_values[index]);
+ ASSERT(m_current_hints[index] <= m_current_values[index]);
}
return value;
}
@@ -37,11 +38,11 @@ s64 KResourceLimit::GetCurrentValue(LimitableResource which) const {
const auto index = static_cast<std::size_t>(which);
s64 value{};
{
- KScopedLightLock lk{lock};
- value = current_values[index];
+ KScopedLightLock lk{m_lock};
+ value = m_current_values[index];
ASSERT(value >= 0);
- ASSERT(current_values[index] <= limit_values[index]);
- ASSERT(current_hints[index] <= current_values[index]);
+ ASSERT(m_current_values[index] <= m_limit_values[index]);
+ ASSERT(m_current_hints[index] <= m_current_values[index]);
}
return value;
}
@@ -50,11 +51,11 @@ s64 KResourceLimit::GetPeakValue(LimitableResource which) const {
const auto index = static_cast<std::size_t>(which);
s64 value{};
{
- KScopedLightLock lk{lock};
- value = peak_values[index];
+ KScopedLightLock lk{m_lock};
+ value = m_peak_values[index];
ASSERT(value >= 0);
- ASSERT(current_values[index] <= limit_values[index]);
- ASSERT(current_hints[index] <= current_values[index]);
+ ASSERT(m_current_values[index] <= m_limit_values[index]);
+ ASSERT(m_current_hints[index] <= m_current_values[index]);
}
return value;
}
@@ -63,11 +64,11 @@ s64 KResourceLimit::GetFreeValue(LimitableResource which) const {
const auto index = static_cast<std::size_t>(which);
s64 value{};
{
- KScopedLightLock lk(lock);
- ASSERT(current_values[index] >= 0);
- ASSERT(current_values[index] <= limit_values[index]);
- ASSERT(current_hints[index] <= current_values[index]);
- value = limit_values[index] - current_values[index];
+ KScopedLightLock lk(m_lock);
+ ASSERT(m_current_values[index] >= 0);
+ ASSERT(m_current_values[index] <= m_limit_values[index]);
+ ASSERT(m_current_hints[index] <= m_current_values[index]);
+ value = m_limit_values[index] - m_current_values[index];
}
return value;
@@ -75,51 +76,51 @@ s64 KResourceLimit::GetFreeValue(LimitableResource which) const {
Result KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
const auto index = static_cast<std::size_t>(which);
- KScopedLightLock lk(lock);
- R_UNLESS(current_values[index] <= value, ResultInvalidState);
+ KScopedLightLock lk(m_lock);
+ R_UNLESS(m_current_values[index] <= value, ResultInvalidState);
- limit_values[index] = value;
- peak_values[index] = current_values[index];
+ m_limit_values[index] = value;
+ m_peak_values[index] = m_current_values[index];
- return ResultSuccess;
+ R_SUCCEED();
}
bool KResourceLimit::Reserve(LimitableResource which, s64 value) {
- return Reserve(which, value, core_timing->GetGlobalTimeNs().count() + DefaultTimeout);
+ return Reserve(which, value, m_core_timing->GetGlobalTimeNs().count() + DefaultTimeout);
}
bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
ASSERT(value >= 0);
const auto index = static_cast<std::size_t>(which);
- KScopedLightLock lk(lock);
+ KScopedLightLock lk(m_lock);
- ASSERT(current_hints[index] <= current_values[index]);
- if (current_hints[index] >= limit_values[index]) {
+ ASSERT(m_current_hints[index] <= m_current_values[index]);
+ if (m_current_hints[index] >= m_limit_values[index]) {
return false;
}
// Loop until we reserve or run out of time.
while (true) {
- ASSERT(current_values[index] <= limit_values[index]);
- ASSERT(current_hints[index] <= current_values[index]);
+ ASSERT(m_current_values[index] <= m_limit_values[index]);
+ ASSERT(m_current_hints[index] <= m_current_values[index]);
// If we would overflow, don't allow to succeed.
- if (current_values[index] + value <= current_values[index]) {
+ if (Common::WrappingAdd(m_current_values[index], value) <= m_current_values[index]) {
break;
}
- if (current_values[index] + value <= limit_values[index]) {
- current_values[index] += value;
- current_hints[index] += value;
- peak_values[index] = std::max(peak_values[index], current_values[index]);
+ if (m_current_values[index] + value <= m_limit_values[index]) {
+ m_current_values[index] += value;
+ m_current_hints[index] += value;
+ m_peak_values[index] = std::max(m_peak_values[index], m_current_values[index]);
return true;
}
- if (current_hints[index] + value <= limit_values[index] &&
- (timeout < 0 || core_timing->GetGlobalTimeNs().count() < timeout)) {
- waiter_count++;
- cond_var.Wait(&lock, timeout, false);
- waiter_count--;
+ if (m_current_hints[index] + value <= m_limit_values[index] &&
+ (timeout < 0 || m_core_timing->GetGlobalTimeNs().count() < timeout)) {
+ m_waiter_count++;
+ m_cond_var.Wait(std::addressof(m_lock), timeout, false);
+ m_waiter_count--;
} else {
break;
}
@@ -137,23 +138,23 @@ void KResourceLimit::Release(LimitableResource which, s64 value, s64 hint) {
ASSERT(hint >= 0);
const auto index = static_cast<std::size_t>(which);
- KScopedLightLock lk(lock);
- ASSERT(current_values[index] <= limit_values[index]);
- ASSERT(current_hints[index] <= current_values[index]);
- ASSERT(value <= current_values[index]);
- ASSERT(hint <= current_hints[index]);
+ KScopedLightLock lk(m_lock);
+ ASSERT(m_current_values[index] <= m_limit_values[index]);
+ ASSERT(m_current_hints[index] <= m_current_values[index]);
+ ASSERT(value <= m_current_values[index]);
+ ASSERT(hint <= m_current_hints[index]);
- current_values[index] -= value;
- current_hints[index] -= hint;
+ m_current_values[index] -= value;
+ m_current_hints[index] -= hint;
- if (waiter_count != 0) {
- cond_var.Broadcast();
+ if (m_waiter_count != 0) {
+ m_cond_var.Broadcast();
}
}
KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical_memory_size) {
auto* resource_limit = KResourceLimit::Create(system.Kernel());
- resource_limit->Initialize(&system.CoreTiming());
+ resource_limit->Initialize(std::addressof(system.CoreTiming()));
// Initialize default resource limit values.
// TODO(bunnei): These values are the system defaults, the limits for service processes are
diff --git a/src/core/hle/kernel/k_resource_limit.h b/src/core/hle/kernel/k_resource_limit.h
index 2573d1b7c..15e69af56 100644
--- a/src/core/hle/kernel/k_resource_limit.h
+++ b/src/core/hle/kernel/k_resource_limit.h
@@ -28,10 +28,10 @@ class KResourceLimit final
KERNEL_AUTOOBJECT_TRAITS(KResourceLimit, KAutoObject);
public:
- explicit KResourceLimit(KernelCore& kernel_);
+ explicit KResourceLimit(KernelCore& kernel);
~KResourceLimit() override;
- void Initialize(const Core::Timing::CoreTiming* core_timing_);
+ void Initialize(const Core::Timing::CoreTiming* core_timing);
void Finalize() override;
s64 GetLimitValue(LimitableResource which) const;
@@ -46,18 +46,18 @@ public:
void Release(LimitableResource which, s64 value);
void Release(LimitableResource which, s64 value, s64 hint);
- static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
+ static void PostDestroy(uintptr_t arg) {}
private:
using ResourceArray = std::array<s64, static_cast<std::size_t>(LimitableResource::Count)>;
- ResourceArray limit_values{};
- ResourceArray current_values{};
- ResourceArray current_hints{};
- ResourceArray peak_values{};
- mutable KLightLock lock;
- s32 waiter_count{};
- KLightConditionVariable cond_var;
- const Core::Timing::CoreTiming* core_timing{};
+ ResourceArray m_limit_values{};
+ ResourceArray m_current_values{};
+ ResourceArray m_current_hints{};
+ ResourceArray m_peak_values{};
+ mutable KLightLock m_lock;
+ s32 m_waiter_count{};
+ KLightConditionVariable m_cond_var;
+ const Core::Timing::CoreTiming* m_core_timing{};
};
KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical_memory_size);
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index d6676904b..75ce5a23c 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -27,7 +27,7 @@ static void IncrementScheduledCount(Kernel::KThread* thread) {
}
}
-KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} {
+KScheduler::KScheduler(KernelCore& kernel) : m_kernel{kernel} {
m_switch_fiber = std::make_shared<Common::Fiber>([this] {
while (true) {
ScheduleImplFiber();
@@ -47,7 +47,7 @@ void KScheduler::SetInterruptTaskRunnable() {
void KScheduler::RequestScheduleOnInterrupt() {
m_state.needs_scheduling = true;
- if (CanSchedule(kernel)) {
+ if (CanSchedule(m_kernel)) {
ScheduleOnInterrupt();
}
}
@@ -97,50 +97,50 @@ u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
}
void KScheduler::Schedule() {
- ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
- ASSERT(m_core_id == GetCurrentCoreId(kernel));
+ ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
+ ASSERT(m_core_id == GetCurrentCoreId(m_kernel));
ScheduleImpl();
}
void KScheduler::ScheduleOnInterrupt() {
- GetCurrentThread(kernel).DisableDispatch();
+ GetCurrentThread(m_kernel).DisableDispatch();
Schedule();
- GetCurrentThread(kernel).EnableDispatch();
+ GetCurrentThread(m_kernel).EnableDispatch();
}
void KScheduler::PreemptSingleCore() {
- GetCurrentThread(kernel).DisableDispatch();
+ GetCurrentThread(m_kernel).DisableDispatch();
- auto* thread = GetCurrentThreadPointer(kernel);
- auto& previous_scheduler = kernel.Scheduler(thread->GetCurrentCore());
+ auto* thread = GetCurrentThreadPointer(m_kernel);
+ auto& previous_scheduler = m_kernel.Scheduler(thread->GetCurrentCore());
previous_scheduler.Unload(thread);
Common::Fiber::YieldTo(thread->GetHostContext(), *m_switch_fiber);
- GetCurrentThread(kernel).EnableDispatch();
+ GetCurrentThread(m_kernel).EnableDispatch();
}
void KScheduler::RescheduleCurrentCore() {
- ASSERT(!kernel.IsPhantomModeForSingleCore());
- ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
+ ASSERT(!m_kernel.IsPhantomModeForSingleCore());
+ ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
- GetCurrentThread(kernel).EnableDispatch();
+ GetCurrentThread(m_kernel).EnableDispatch();
if (m_state.needs_scheduling.load()) {
// Disable interrupts, and then check again if rescheduling is needed.
// KScopedInterruptDisable intr_disable;
- kernel.CurrentScheduler()->RescheduleCurrentCoreImpl();
+ m_kernel.CurrentScheduler()->RescheduleCurrentCoreImpl();
}
}
void KScheduler::RescheduleCurrentCoreImpl() {
// Check that scheduling is needed.
if (m_state.needs_scheduling.load()) [[likely]] {
- GetCurrentThread(kernel).DisableDispatch();
+ GetCurrentThread(m_kernel).DisableDispatch();
Schedule();
- GetCurrentThread(kernel).EnableDispatch();
+ GetCurrentThread(m_kernel).EnableDispatch();
}
}
@@ -149,18 +149,18 @@ void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core
m_core_id = core_id;
m_idle_thread = idle_thread;
// m_state.idle_thread_stack = m_idle_thread->GetStackTop();
- // m_state.interrupt_task_manager = &kernel.GetInterruptTaskManager();
+ // m_state.interrupt_task_manager = std::addressof(kernel.GetInterruptTaskManager());
// Insert the main thread into the priority queue.
// {
- // KScopedSchedulerLock lk{kernel};
- // GetPriorityQueue(kernel).PushBack(GetCurrentThreadPointer(kernel));
- // SetSchedulerUpdateNeeded(kernel);
+ // KScopedSchedulerLock lk{m_kernel};
+ // GetPriorityQueue(m_kernel).PushBack(GetCurrentThreadPointer(m_kernel));
+ // SetSchedulerUpdateNeeded(m_kernel);
// }
// Bind interrupt handler.
// kernel.GetInterruptManager().BindHandler(
- // GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id,
+ // GetSchedulerInterruptHandler(m_kernel), KInterruptName::Scheduler, m_core_id,
// KInterruptController::PriorityLevel::Scheduler, false, false);
// Set the current thread.
@@ -168,7 +168,7 @@ void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core
}
void KScheduler::Activate() {
- ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
+ ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
// m_state.should_count_idle = KTargetSystem::IsDebugMode();
m_is_active = true;
@@ -176,7 +176,7 @@ void KScheduler::Activate() {
}
void KScheduler::OnThreadStart() {
- GetCurrentThread(kernel).EnableDispatch();
+ GetCurrentThread(m_kernel).EnableDispatch();
}
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
@@ -184,7 +184,8 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
prev_highest_thread != highest_thread) [[likely]] {
if (prev_highest_thread != nullptr) [[likely]] {
IncrementScheduledCount(prev_highest_thread);
- prev_highest_thread->SetLastScheduledTick(kernel.System().CoreTiming().GetCPUTicks());
+ prev_highest_thread->SetLastScheduledTick(
+ m_kernel.System().CoreTiming().GetClockTicks());
}
if (m_state.should_count_idle) {
if (highest_thread != nullptr) [[likely]] {
@@ -328,8 +329,8 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
}
void KScheduler::SwitchThread(KThread* next_thread) {
- KProcess* const cur_process = kernel.CurrentProcess();
- KThread* const cur_thread = GetCurrentThreadPointer(kernel);
+ KProcess* const cur_process = GetCurrentProcessPointer(m_kernel);
+ KThread* const cur_thread = GetCurrentThreadPointer(m_kernel);
// We never want to schedule a null thread, so use the idle thread if we don't have a next.
if (next_thread == nullptr) {
@@ -351,7 +352,7 @@ void KScheduler::SwitchThread(KThread* next_thread) {
// Update the CPU time tracking variables.
const s64 prev_tick = m_last_context_switch_time;
- const s64 cur_tick = kernel.System().CoreTiming().GetCPUTicks();
+ const s64 cur_tick = m_kernel.System().CoreTiming().GetClockTicks();
const s64 tick_diff = cur_tick - prev_tick;
cur_thread->AddCpuTime(m_core_id, tick_diff);
if (cur_process != nullptr) {
@@ -375,7 +376,7 @@ void KScheduler::SwitchThread(KThread* next_thread) {
// }
// Set the new thread.
- SetCurrentThread(kernel, next_thread);
+ SetCurrentThread(m_kernel, next_thread);
m_current_thread = next_thread;
// Set the new Thread Local region.
@@ -388,7 +389,7 @@ void KScheduler::ScheduleImpl() {
std::atomic_thread_fence(std::memory_order_seq_cst);
// Load the appropriate thread pointers for scheduling.
- KThread* const cur_thread{GetCurrentThreadPointer(kernel)};
+ KThread* const cur_thread{GetCurrentThreadPointer(m_kernel)};
KThread* highest_priority_thread{m_state.highest_priority_thread};
// Check whether there are runnable interrupt tasks.
@@ -411,7 +412,7 @@ void KScheduler::ScheduleImpl() {
m_switch_cur_thread = cur_thread;
m_switch_highest_priority_thread = highest_priority_thread;
m_switch_from_schedule = true;
- Common::Fiber::YieldTo(cur_thread->host_context, *m_switch_fiber);
+ Common::Fiber::YieldTo(cur_thread->m_host_context, *m_switch_fiber);
// Returning from ScheduleImpl occurs after this thread has been scheduled again.
}
@@ -450,7 +451,7 @@ void KScheduler::ScheduleImplFiber() {
// We want to try to lock the highest priority thread's context.
// Try to take it.
- while (!highest_priority_thread->context_guard.try_lock()) {
+ while (!highest_priority_thread->m_context_guard.try_lock()) {
// The highest priority thread's context is already locked.
// Check if we need scheduling. If we don't, we can retry directly.
if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
@@ -468,7 +469,7 @@ void KScheduler::ScheduleImplFiber() {
if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
// Our switch failed.
// We should unlock the thread context, and then retry.
- highest_priority_thread->context_guard.unlock();
+ highest_priority_thread->m_context_guard.unlock();
goto retry;
} else {
break;
@@ -489,30 +490,30 @@ void KScheduler::ScheduleImplFiber() {
Reload(highest_priority_thread);
// Reload the host thread.
- Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->host_context);
+ Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->m_host_context);
}
void KScheduler::Unload(KThread* thread) {
- auto& cpu_core = kernel.System().ArmInterface(m_core_id);
+ auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
cpu_core.SaveContext(thread->GetContext32());
cpu_core.SaveContext(thread->GetContext64());
// Save the TPIDR_EL0 system register in case it was modified.
- thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
+ thread->SetTpidrEl0(cpu_core.GetTPIDR_EL0());
cpu_core.ClearExclusiveState();
// Check if the thread is terminated by checking the DPC flags.
if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) {
// The thread isn't terminated, so we want to unlock it.
- thread->context_guard.unlock();
+ thread->m_context_guard.unlock();
}
}
void KScheduler::Reload(KThread* thread) {
- auto& cpu_core = kernel.System().ArmInterface(m_core_id);
+ auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
cpu_core.LoadContext(thread->GetContext32());
cpu_core.LoadContext(thread->GetContext64());
- cpu_core.SetTlsAddress(thread->GetTLSAddress());
- cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
+ cpu_core.SetTlsAddress(GetInteger(thread->GetTlsAddress()));
+ cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0());
cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints());
cpu_core.ClearExclusiveState();
}
@@ -689,11 +690,11 @@ void KScheduler::RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 prior
void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
// Validate preconditions.
ASSERT(CanSchedule(kernel));
- ASSERT(kernel.CurrentProcess() != nullptr);
+ ASSERT(GetCurrentProcessPointer(kernel) != nullptr);
// Get the current thread and process.
KThread& cur_thread = GetCurrentThread(kernel);
- KProcess& cur_process = *kernel.CurrentProcess();
+ KProcess& cur_process = GetCurrentProcess(kernel);
// If the thread's yield count matches, there's nothing for us to do.
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
@@ -728,11 +729,11 @@ void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
// Validate preconditions.
ASSERT(CanSchedule(kernel));
- ASSERT(kernel.CurrentProcess() != nullptr);
+ ASSERT(GetCurrentProcessPointer(kernel) != nullptr);
// Get the current thread and process.
KThread& cur_thread = GetCurrentThread(kernel);
- KProcess& cur_process = *kernel.CurrentProcess();
+ KProcess& cur_process = GetCurrentProcess(kernel);
// If the thread's yield count matches, there's nothing for us to do.
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
@@ -816,11 +817,11 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
void KScheduler::YieldToAnyThread(KernelCore& kernel) {
// Validate preconditions.
ASSERT(CanSchedule(kernel));
- ASSERT(kernel.CurrentProcess() != nullptr);
+ ASSERT(GetCurrentProcessPointer(kernel) != nullptr);
// Get the current thread and process.
KThread& cur_thread = GetCurrentThread(kernel);
- KProcess& cur_process = *kernel.CurrentProcess();
+ KProcess& cur_process = GetCurrentProcess(kernel);
// If the thread's yield count matches, there's nothing for us to do.
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
@@ -891,7 +892,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) {
if (const u64 core_mask = cores_needing_scheduling & ~(1ULL << m_core_id); core_mask != 0) {
- RescheduleCores(kernel, core_mask);
+ RescheduleCores(m_kernel, core_mask);
}
}
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 534321d8d..d85a0c040 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -80,17 +80,17 @@ public:
return GetCurrentThread(kernel).GetDisableDispatchCount() == 0;
}
static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) {
- return kernel.GlobalSchedulerContext().scheduler_lock.IsLockedByCurrentThread();
+ return kernel.GlobalSchedulerContext().m_scheduler_lock.IsLockedByCurrentThread();
}
static bool IsSchedulerUpdateNeeded(KernelCore& kernel) {
- return kernel.GlobalSchedulerContext().scheduler_update_needed;
+ return kernel.GlobalSchedulerContext().m_scheduler_update_needed;
}
static void SetSchedulerUpdateNeeded(KernelCore& kernel) {
- kernel.GlobalSchedulerContext().scheduler_update_needed = true;
+ kernel.GlobalSchedulerContext().m_scheduler_update_needed = true;
}
static void ClearSchedulerUpdateNeeded(KernelCore& kernel) {
- kernel.GlobalSchedulerContext().scheduler_update_needed = false;
+ kernel.GlobalSchedulerContext().m_scheduler_update_needed = false;
}
static void DisableScheduling(KernelCore& kernel);
@@ -115,7 +115,7 @@ public:
private:
// Static private API.
static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel) {
- return kernel.GlobalSchedulerContext().priority_queue;
+ return kernel.GlobalSchedulerContext().m_priority_queue;
}
static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
@@ -149,7 +149,7 @@ private:
KInterruptTaskManager* interrupt_task_manager{nullptr};
};
- KernelCore& kernel;
+ KernelCore& m_kernel;
SchedulingState m_state;
bool m_is_active{false};
s32 m_core_id{0};
@@ -166,7 +166,7 @@ private:
class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> {
public:
explicit KScopedSchedulerLock(KernelCore& kernel)
- : KScopedLock(kernel.GlobalSchedulerContext().scheduler_lock) {}
+ : KScopedLock(kernel.GlobalSchedulerContext().m_scheduler_lock) {}
~KScopedSchedulerLock() = default;
};
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index 129d60472..caa1404f1 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -14,73 +14,67 @@
namespace Kernel {
class KernelCore;
+class GlobalSchedulerContext;
template <typename SchedulerType>
class KAbstractSchedulerLock {
public:
- explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {}
+ explicit KAbstractSchedulerLock(KernelCore& kernel) : m_kernel{kernel} {}
bool IsLockedByCurrentThread() const {
- return owner_thread == GetCurrentThreadPointer(kernel);
+ return m_owner_thread == GetCurrentThreadPointer(m_kernel);
}
void Lock() {
- // If we are shutting down the kernel, none of this is relevant anymore.
- if (kernel.IsShuttingDown()) {
- return;
- }
-
- if (IsLockedByCurrentThread()) {
- // If we already own the lock, we can just increment the count.
- ASSERT(lock_count > 0);
- lock_count++;
+ if (this->IsLockedByCurrentThread()) {
+ // If we already own the lock, the lock count should be > 0.
+ // For debug, ensure this is true.
+ ASSERT(m_lock_count > 0);
} else {
// Otherwise, we want to disable scheduling and acquire the spinlock.
- SchedulerType::DisableScheduling(kernel);
- spin_lock.Lock();
+ SchedulerType::DisableScheduling(m_kernel);
+ m_spin_lock.Lock();
- // For debug, ensure that our state is valid.
- ASSERT(lock_count == 0);
- ASSERT(owner_thread == nullptr);
+ ASSERT(m_lock_count == 0);
+ ASSERT(m_owner_thread == nullptr);
- // Increment count, take ownership.
- lock_count = 1;
- owner_thread = GetCurrentThreadPointer(kernel);
+ // Take ownership of the lock.
+ m_owner_thread = GetCurrentThreadPointer(m_kernel);
}
+
+ // Increment the lock count.
+ m_lock_count++;
}
void Unlock() {
- // If we are shutting down the kernel, none of this is relevant anymore.
- if (kernel.IsShuttingDown()) {
- return;
- }
-
- ASSERT(IsLockedByCurrentThread());
- ASSERT(lock_count > 0);
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(m_lock_count > 0);
// Release an instance of the lock.
- if ((--lock_count) == 0) {
+ if ((--m_lock_count) == 0) {
// Perform a memory barrier here.
std::atomic_thread_fence(std::memory_order_seq_cst);
// We're no longer going to hold the lock. Take note of what cores need scheduling.
const u64 cores_needing_scheduling =
- SchedulerType::UpdateHighestPriorityThreads(kernel);
+ SchedulerType::UpdateHighestPriorityThreads(m_kernel);
// Note that we no longer hold the lock, and unlock the spinlock.
- owner_thread = nullptr;
- spin_lock.Unlock();
+ m_owner_thread = nullptr;
+ m_spin_lock.Unlock();
// Enable scheduling, and perform a rescheduling operation.
- SchedulerType::EnableScheduling(kernel, cores_needing_scheduling);
+ SchedulerType::EnableScheduling(m_kernel, cores_needing_scheduling);
}
}
private:
- KernelCore& kernel;
- KAlignedSpinLock spin_lock{};
- s32 lock_count{};
- std::atomic<KThread*> owner_thread{};
+ friend class GlobalSchedulerContext;
+
+ KernelCore& m_kernel;
+ KAlignedSpinLock m_spin_lock{};
+ s32 m_lock_count{};
+ std::atomic<KThread*> m_owner_thread{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_lock.h b/src/core/hle/kernel/k_scoped_lock.h
index 857e21156..629a7d20d 100644
--- a/src/core/hle/kernel/k_scoped_lock.h
+++ b/src/core/hle/kernel/k_scoped_lock.h
@@ -4,27 +4,29 @@
#pragma once
#include <concepts>
+#include <memory>
#include <type_traits>
namespace Kernel {
template <typename T>
-concept KLockable = !std::is_reference_v<T> && requires(T & t) {
- { t.Lock() } -> std::same_as<void>;
- { t.Unlock() } -> std::same_as<void>;
-};
+concept KLockable = !
+std::is_reference_v<T>&& requires(T& t) {
+ { t.Lock() } -> std::same_as<void>;
+ { t.Unlock() } -> std::same_as<void>;
+ };
template <typename T>
-requires KLockable<T>
-class [[nodiscard]] KScopedLock {
+ requires KLockable<T>
+class KScopedLock {
public:
- explicit KScopedLock(T* l) : lock_ptr(l) {
- this->lock_ptr->Lock();
+ explicit KScopedLock(T* l) : m_lock(*l) {}
+ explicit KScopedLock(T& l) : m_lock(l) {
+ m_lock.Lock();
}
- explicit KScopedLock(T& l) : KScopedLock(std::addressof(l)) {}
~KScopedLock() {
- this->lock_ptr->Unlock();
+ m_lock.Unlock();
}
KScopedLock(const KScopedLock&) = delete;
@@ -34,7 +36,7 @@ public:
KScopedLock& operator=(KScopedLock&&) = delete;
private:
- T* lock_ptr;
+ T& m_lock;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_resource_reservation.h b/src/core/hle/kernel/k_scoped_resource_reservation.h
index 436bcf9fe..2cc464612 100644
--- a/src/core/hle/kernel/k_scoped_resource_reservation.h
+++ b/src/core/hle/kernel/k_scoped_resource_reservation.h
@@ -12,20 +12,20 @@ namespace Kernel {
class KScopedResourceReservation {
public:
explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v, s64 timeout)
- : resource_limit(std::move(l)), value(v), resource(r) {
- if (resource_limit && value) {
- success = resource_limit->Reserve(resource, value, timeout);
+ : m_limit(l), m_value(v), m_resource(r) {
+ if (m_limit && m_value) {
+ m_succeeded = m_limit->Reserve(m_resource, m_value, timeout);
} else {
- success = true;
+ m_succeeded = true;
}
}
explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v = 1)
- : resource_limit(std::move(l)), value(v), resource(r) {
- if (resource_limit && value) {
- success = resource_limit->Reserve(resource, value);
+ : m_limit(l), m_value(v), m_resource(r) {
+ if (m_limit && m_value) {
+ m_succeeded = m_limit->Reserve(m_resource, m_value);
} else {
- success = true;
+ m_succeeded = true;
}
}
@@ -36,26 +36,26 @@ public:
: KScopedResourceReservation(p->GetResourceLimit(), r, v) {}
~KScopedResourceReservation() noexcept {
- if (resource_limit && value && success) {
- // resource was not committed, release the reservation.
- resource_limit->Release(resource, value);
+ if (m_limit && m_value && m_succeeded) {
+ // Resource was not committed, release the reservation.
+ m_limit->Release(m_resource, m_value);
}
}
/// Commit the resource reservation, destruction of this object does not release the resource
void Commit() {
- resource_limit = nullptr;
+ m_limit = nullptr;
}
- [[nodiscard]] bool Succeeded() const {
- return success;
+ bool Succeeded() const {
+ return m_succeeded;
}
private:
- KResourceLimit* resource_limit{};
- s64 value;
- LimitableResource resource;
- bool success;
+ KResourceLimit* m_limit{};
+ s64 m_value{};
+ LimitableResource m_resource{};
+ bool m_succeeded{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
index 76db65a4d..c485022f5 100644
--- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
+++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
@@ -11,32 +11,39 @@
namespace Kernel {
-class [[nodiscard]] KScopedSchedulerLockAndSleep {
+class KScopedSchedulerLockAndSleep {
public:
- explicit KScopedSchedulerLockAndSleep(KernelCore& kernel_, KThread* t, s64 timeout)
- : kernel(kernel_), thread(t), timeout_tick(timeout) {
+ explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, KHardwareTimer** out_timer,
+ KThread* thread, s64 timeout_tick)
+ : m_kernel(kernel), m_timeout_tick(timeout_tick), m_thread(thread), m_timer() {
// Lock the scheduler.
- kernel.GlobalSchedulerContext().scheduler_lock.Lock();
+ kernel.GlobalSchedulerContext().m_scheduler_lock.Lock();
+
+ // Set our timer only if the time is positive.
+ m_timer = (timeout_tick > 0) ? std::addressof(kernel.HardwareTimer()) : nullptr;
+
+ *out_timer = m_timer;
}
~KScopedSchedulerLockAndSleep() {
// Register the sleep.
- if (timeout_tick > 0) {
- kernel.HardwareTimer().RegisterTask(thread, timeout_tick);
+ if (m_timeout_tick > 0) {
+ m_timer->RegisterTask(m_thread, m_timeout_tick);
}
// Unlock the scheduler.
- kernel.GlobalSchedulerContext().scheduler_lock.Unlock();
+ m_kernel.GlobalSchedulerContext().m_scheduler_lock.Unlock();
}
void CancelSleep() {
- timeout_tick = 0;
+ m_timeout_tick = 0;
}
private:
- KernelCore& kernel;
- KThread* thread{};
- s64 timeout_tick{};
+ KernelCore& m_kernel;
+ s64 m_timeout_tick{};
+ KThread* m_thread{};
+ KHardwareTimer* m_timer{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_server_port.cpp b/src/core/hle/kernel/k_server_port.cpp
index 16968ba97..a29d34bc1 100644
--- a/src/core/hle/kernel/k_server_port.cpp
+++ b/src/core/hle/kernel/k_server_port.cpp
@@ -12,13 +12,12 @@
namespace Kernel {
-KServerPort::KServerPort(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
+KServerPort::KServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
KServerPort::~KServerPort() = default;
-void KServerPort::Initialize(KPort* parent_port_, std::string&& name_) {
+void KServerPort::Initialize(KPort* parent) {
// Set member variables.
- parent = parent_port_;
- name = std::move(name_);
+ m_parent = parent;
}
bool KServerPort::IsLight() const {
@@ -36,10 +35,10 @@ void KServerPort::CleanupSessions() {
// Get the last session in the list
KServerSession* session = nullptr;
{
- KScopedSchedulerLock sl{kernel};
- if (!session_list.empty()) {
- session = std::addressof(session_list.front());
- session_list.pop_front();
+ KScopedSchedulerLock sl{m_kernel};
+ if (!m_session_list.empty()) {
+ session = std::addressof(m_session_list.front());
+ m_session_list.pop_front();
}
}
@@ -54,13 +53,13 @@ void KServerPort::CleanupSessions() {
void KServerPort::Destroy() {
// Note with our parent that we're closed.
- parent->OnServerClosed();
+ m_parent->OnServerClosed();
// Perform necessary cleanup of our session lists.
this->CleanupSessions();
// Close our reference to our parent.
- parent->Close();
+ m_parent->Close();
}
bool KServerPort::IsSignaled() const {
@@ -68,18 +67,18 @@ bool KServerPort::IsSignaled() const {
UNIMPLEMENTED();
return false;
} else {
- return !session_list.empty();
+ return !m_session_list.empty();
}
}
void KServerPort::EnqueueSession(KServerSession* session) {
ASSERT(!this->IsLight());
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
// Add the session to our queue.
- session_list.push_back(*session);
- if (session_list.size() == 1) {
+ m_session_list.push_back(*session);
+ if (m_session_list.size() == 1) {
this->NotifyAvailable();
}
}
@@ -87,15 +86,15 @@ void KServerPort::EnqueueSession(KServerSession* session) {
KServerSession* KServerPort::AcceptSession() {
ASSERT(!this->IsLight());
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
// Return the first session in the list.
- if (session_list.empty()) {
+ if (m_session_list.empty()) {
return nullptr;
}
- KServerSession* session = std::addressof(session_list.front());
- session_list.pop_front();
+ KServerSession* session = std::addressof(m_session_list.front());
+ m_session_list.pop_front();
return session;
}
diff --git a/src/core/hle/kernel/k_server_port.h b/src/core/hle/kernel/k_server_port.h
index 5fc7ee683..625280290 100644
--- a/src/core/hle/kernel/k_server_port.h
+++ b/src/core/hle/kernel/k_server_port.h
@@ -7,7 +7,7 @@
#include <string>
#include <utility>
-#include <boost/intrusive/list.hpp>
+#include "common/intrusive_list.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_synchronization_object.h"
@@ -22,17 +22,17 @@ class KServerPort final : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KServerPort, KSynchronizationObject);
public:
- explicit KServerPort(KernelCore& kernel_);
+ explicit KServerPort(KernelCore& kernel);
~KServerPort() override;
- void Initialize(KPort* parent_port_, std::string&& name_);
+ void Initialize(KPort* parent);
- void EnqueueSession(KServerSession* pending_session);
+ void EnqueueSession(KServerSession* session);
KServerSession* AcceptSession();
const KPort* GetParent() const {
- return parent;
+ return m_parent;
}
bool IsLight() const;
@@ -42,12 +42,12 @@ public:
bool IsSignaled() const override;
private:
- using SessionList = boost::intrusive::list<KServerSession>;
+ using SessionList = Common::IntrusiveListBaseTraits<KServerSession>::ListType;
void CleanupSessions();
- SessionList session_list;
- KPort* parent{};
+ SessionList m_session_list{};
+ KPort* m_parent{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index aa1941f01..c66aff501 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -10,8 +10,6 @@
#include "common/scope_exit.h"
#include "core/core.h"
#include "core/core_timing.h"
-#include "core/hle/ipc_helpers.h"
-#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
@@ -22,29 +20,25 @@
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/kernel.h"
+#include "core/hle/service/hle_ipc.h"
+#include "core/hle/service/ipc_helpers.h"
#include "core/memory.h"
namespace Kernel {
using ThreadQueueImplForKServerSessionRequest = KThreadQueue;
-KServerSession::KServerSession(KernelCore& kernel_)
- : KSynchronizationObject{kernel_}, m_lock{kernel_} {}
+KServerSession::KServerSession(KernelCore& kernel)
+ : KSynchronizationObject{kernel}, m_lock{m_kernel} {}
KServerSession::~KServerSession() = default;
-void KServerSession::Initialize(KSession* parent_session_, std::string&& name_) {
- // Set member variables.
- parent = parent_session_;
- name = std::move(name_);
-}
-
void KServerSession::Destroy() {
- parent->OnServerClosed();
+ m_parent->OnServerClosed();
this->CleanupRequests();
- parent->Close();
+ m_parent->Close();
}
void KServerSession::OnClientClosed() {
@@ -62,7 +56,7 @@ void KServerSession::OnClientClosed() {
// Get the next request.
{
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
if (m_current_request != nullptr && m_current_request != prev_request) {
// Set the request, open a reference as we process it.
@@ -121,7 +115,7 @@ void KServerSession::OnClientClosed() {
// // Get the process and page table.
// KProcess *client_process = thread->GetOwnerProcess();
- // auto &client_pt = client_process->GetPageTable();
+ // auto& client_pt = client_process->GetPageTable();
// // Reply to the request.
// ReplyAsyncError(client_process, request->GetAddress(), request->GetSize(),
@@ -141,10 +135,10 @@ void KServerSession::OnClientClosed() {
}
bool KServerSession::IsSignaled() const {
- ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// If the client is closed, we're always signaled.
- if (parent->IsClientClosed()) {
+ if (m_parent->IsClientClosed()) {
return true;
}
@@ -154,17 +148,17 @@ bool KServerSession::IsSignaled() const {
Result KServerSession::OnRequest(KSessionRequest* request) {
// Create the wait queue.
- ThreadQueueImplForKServerSessionRequest wait_queue{kernel};
+ ThreadQueueImplForKServerSessionRequest wait_queue{m_kernel};
{
// Lock the scheduler.
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
// Ensure that we can handle new requests.
- R_UNLESS(!parent->IsServerClosed(), ResultSessionClosed);
+ R_UNLESS(!m_parent->IsServerClosed(), ResultSessionClosed);
// Check that we're not terminating.
- R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested);
+ R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested);
// Get whether we're empty.
const bool was_empty = m_request_list.empty();
@@ -182,11 +176,11 @@ Result KServerSession::OnRequest(KSessionRequest* request) {
R_SUCCEED_IF(request->GetEvent() != nullptr);
// This is a synchronous request, so we should wait for our request to complete.
- GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
- GetCurrentThread(kernel).BeginWait(&wait_queue);
+ GetCurrentThread(m_kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
+ GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue));
}
- return GetCurrentThread(kernel).GetWaitResult();
+ return GetCurrentThread(m_kernel).GetWaitResult();
}
Result KServerSession::SendReply(bool is_hle) {
@@ -196,7 +190,7 @@ Result KServerSession::SendReply(bool is_hle) {
// Get the request.
KSessionRequest* request;
{
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
// Get the current request.
request = m_current_request;
@@ -219,7 +213,7 @@ Result KServerSession::SendReply(bool is_hle) {
KEvent* event = request->GetEvent();
// Check whether we're closed.
- const bool closed = (client_thread == nullptr || parent->IsClientClosed());
+ const bool closed = (client_thread == nullptr || m_parent->IsClientClosed());
Result result = ResultSuccess;
if (!closed) {
@@ -228,11 +222,11 @@ Result KServerSession::SendReply(bool is_hle) {
// HLE servers write directly to a pointer to the thread command buffer. Therefore
// the reply has already been written in this case.
} else {
- Core::Memory::Memory& memory{kernel.System().Memory()};
- KThread* server_thread{GetCurrentThreadPointer(kernel)};
+ Core::Memory::Memory& memory{client_thread->GetOwnerProcess()->GetMemory()};
+ KThread* server_thread{GetCurrentThreadPointer(m_kernel)};
UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
- auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress());
+ auto* src_msg_buffer = memory.GetPointer(server_thread->GetTlsAddress());
auto* dst_msg_buffer = memory.GetPointer(client_message);
std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
}
@@ -254,7 +248,7 @@ Result KServerSession::SendReply(bool is_hle) {
if (event != nullptr) {
// // Get the client process/page table.
// KProcess *client_process = client_thread->GetOwnerProcess();
- // KPageTable *client_page_table = &client_process->PageTable();
+ // KPageTable *client_page_table = std::addressof(client_process->PageTable());
// // If we need to, reply with an async error.
// if (R_FAILED(client_result)) {
@@ -270,7 +264,7 @@ Result KServerSession::SendReply(bool is_hle) {
event->Signal();
} else {
// End the client thread's wait.
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
if (!client_thread->IsTerminationRequested()) {
client_thread->EndWait(client_result);
@@ -278,11 +272,11 @@ Result KServerSession::SendReply(bool is_hle) {
}
}
- return result;
+ R_RETURN(result);
}
-Result KServerSession::ReceiveRequest(std::shared_ptr<HLERequestContext>* out_context,
- std::weak_ptr<SessionRequestManager> manager) {
+Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext>* out_context,
+ std::weak_ptr<Service::SessionRequestManager> manager) {
// Lock the session.
KScopedLightLock lk{m_lock};
@@ -291,10 +285,10 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<HLERequestContext>* out_co
KThread* client_thread;
{
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
// Ensure that we can service the request.
- R_UNLESS(!parent->IsClientClosed(), ResultSessionClosed);
+ R_UNLESS(!m_parent->IsClientClosed(), ResultSessionClosed);
// Ensure we aren't already servicing a request.
R_UNLESS(m_current_request == nullptr, ResultNotFound);
@@ -303,7 +297,7 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<HLERequestContext>* out_co
R_UNLESS(!m_request_list.empty(), ResultNotFound);
// Pop the first request from the list.
- request = &m_request_list.front();
+ request = std::addressof(m_request_list.front());
m_request_list.pop_front();
// Get the thread for the request.
@@ -325,26 +319,27 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<HLERequestContext>* out_co
// bool recv_list_broken = false;
// Receive the message.
- Core::Memory::Memory& memory{kernel.System().Memory()};
+ Core::Memory::Memory& memory{client_thread->GetOwnerProcess()->GetMemory()};
if (out_context != nullptr) {
// HLE request.
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(client_message))};
- *out_context = std::make_shared<HLERequestContext>(kernel, memory, this, client_thread);
+ *out_context =
+ std::make_shared<Service::HLERequestContext>(m_kernel, memory, this, client_thread);
(*out_context)->SetSessionRequestManager(manager);
(*out_context)
->PopulateFromIncomingCommandBuffer(client_thread->GetOwnerProcess()->GetHandleTable(),
cmd_buf);
} else {
- KThread* server_thread{GetCurrentThreadPointer(kernel)};
+ KThread* server_thread{GetCurrentThreadPointer(m_kernel)};
UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
auto* src_msg_buffer = memory.GetPointer(client_message);
- auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress());
+ auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTlsAddress());
std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
}
// We succeeded.
- return ResultSuccess;
+ R_SUCCEED();
}
void KServerSession::CleanupRequests() {
@@ -355,7 +350,7 @@ void KServerSession::CleanupRequests() {
// Get the next request.
KSessionRequest* request = nullptr;
{
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
if (m_current_request) {
// Choose the current request if we have one.
@@ -363,7 +358,7 @@ void KServerSession::CleanupRequests() {
m_current_request = nullptr;
} else if (!m_request_list.empty()) {
// Pop the request from the front of the list.
- request = &m_request_list.front();
+ request = std::addressof(m_request_list.front());
m_request_list.pop_front();
}
}
@@ -386,7 +381,8 @@ void KServerSession::CleanupRequests() {
// KProcess *client_process = (client_thread != nullptr) ?
// client_thread->GetOwnerProcess() : nullptr;
// KProcessPageTable *client_page_table = (client_process != nullptr) ?
- // &client_process->GetPageTable() : nullptr;
+ // std::addressof(client_process->GetPageTable())
+ // : nullptr;
// Cleanup the mappings.
// Result result = CleanupMap(request, server_process, client_page_table);
@@ -406,7 +402,7 @@ void KServerSession::CleanupRequests() {
event->Signal();
} else {
// End the client thread's wait.
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
if (!client_thread->IsTerminationRequested()) {
client_thread->EndWait(ResultSessionClosed);
diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h
index 6e189af8b..403891919 100644
--- a/src/core/hle/kernel/k_server_session.h
+++ b/src/core/hle/kernel/k_server_session.h
@@ -8,42 +8,42 @@
#include <string>
#include <utility>
-#include <boost/intrusive/list.hpp>
+#include "common/intrusive_list.h"
-#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_session_request.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/result.h"
+namespace Service {
+class HLERequestContext;
+class SessionRequestManager;
+} // namespace Service
+
namespace Kernel {
-class HLERequestContext;
class KernelCore;
class KSession;
-class SessionRequestManager;
class KThread;
class KServerSession final : public KSynchronizationObject,
- public boost::intrusive::list_base_hook<> {
+ public Common::IntrusiveListBaseNode<KServerSession> {
KERNEL_AUTOOBJECT_TRAITS(KServerSession, KSynchronizationObject);
friend class ServiceThread;
public:
- explicit KServerSession(KernelCore& kernel_);
+ explicit KServerSession(KernelCore& kernel);
~KServerSession() override;
void Destroy() override;
- void Initialize(KSession* parent_session_, std::string&& name_);
-
- KSession* GetParent() {
- return parent;
+ void Initialize(KSession* p) {
+ m_parent = p;
}
const KSession* GetParent() const {
- return parent;
+ return m_parent;
}
bool IsSignaled() const override;
@@ -52,8 +52,8 @@ public:
/// TODO: flesh these out to match the real kernel
Result OnRequest(KSessionRequest* request);
Result SendReply(bool is_hle = false);
- Result ReceiveRequest(std::shared_ptr<HLERequestContext>* out_context = nullptr,
- std::weak_ptr<SessionRequestManager> manager = {});
+ Result ReceiveRequest(std::shared_ptr<Service::HLERequestContext>* out_context = nullptr,
+ std::weak_ptr<Service::SessionRequestManager> manager = {});
Result SendReplyHLE() {
return SendReply(true);
@@ -64,10 +64,11 @@ private:
void CleanupRequests();
/// KSession that owns this KServerSession
- KSession* parent{};
+ KSession* m_parent{};
/// List of threads which are pending a reply.
- boost::intrusive::list<KSessionRequest> m_request_list;
+ using RequestList = Common::IntrusiveListBaseTraits<KSessionRequest>::ListType;
+ RequestList m_request_list{};
KSessionRequest* m_current_request{};
KLightLock m_lock;
diff --git a/src/core/hle/kernel/k_session.cpp b/src/core/hle/kernel/k_session.cpp
index b6f6fe9d9..44d7a8f02 100644
--- a/src/core/hle/kernel/k_session.cpp
+++ b/src/core/hle/kernel/k_session.cpp
@@ -9,68 +9,63 @@
namespace Kernel {
-KSession::KSession(KernelCore& kernel_)
- : KAutoObjectWithSlabHeapAndContainer{kernel_}, server{kernel_}, client{kernel_} {}
+KSession::KSession(KernelCore& kernel)
+ : KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
KSession::~KSession() = default;
-void KSession::Initialize(KClientPort* port_, const std::string& name_) {
+void KSession::Initialize(KClientPort* client_port, uintptr_t name) {
// Increment reference count.
// Because reference count is one on creation, this will result
// in a reference count of two. Thus, when both server and client are closed
// this object will be destroyed.
- Open();
+ this->Open();
// Create our sub sessions.
- KAutoObject::Create(std::addressof(server));
- KAutoObject::Create(std::addressof(client));
+ KAutoObject::Create(std::addressof(m_server));
+ KAutoObject::Create(std::addressof(m_client));
// Initialize our sub sessions.
- server.Initialize(this, name_ + ":Server");
- client.Initialize(this, name_ + ":Client");
+ m_server.Initialize(this);
+ m_client.Initialize(this);
// Set state and name.
- SetState(State::Normal);
- name = name_;
+ this->SetState(State::Normal);
+ m_name = name;
// Set our owner process.
- process = kernel.CurrentProcess();
- process->Open();
+ //! FIXME: this is the wrong process!
+ m_process = m_kernel.ApplicationProcess();
+ m_process->Open();
// Set our port.
- port = port_;
- if (port != nullptr) {
- port->Open();
+ m_port = client_port;
+ if (m_port != nullptr) {
+ m_port->Open();
}
// Mark initialized.
- initialized = true;
+ m_initialized = true;
}
void KSession::Finalize() {
- if (port == nullptr) {
- return;
+ if (m_port != nullptr) {
+ m_port->OnSessionFinalized();
+ m_port->Close();
}
-
- port->OnSessionFinalized();
- port->Close();
}
void KSession::OnServerClosed() {
- if (GetState() != State::Normal) {
- return;
+ if (this->GetState() == State::Normal) {
+ this->SetState(State::ServerClosed);
+ m_client.OnServerClosed();
}
-
- SetState(State::ServerClosed);
- client.OnServerClosed();
}
void KSession::OnClientClosed() {
- if (GetState() != State::Normal) {
- return;
+ if (this->GetState() == State::Normal) {
+ SetState(State::ClientClosed);
+ m_server.OnClientClosed();
}
-
- SetState(State::ClientClosed);
- server.OnClientClosed();
}
void KSession::PostDestroy(uintptr_t arg) {
diff --git a/src/core/hle/kernel/k_session.h b/src/core/hle/kernel/k_session.h
index 93e5e6f71..f69bab088 100644
--- a/src/core/hle/kernel/k_session.h
+++ b/src/core/hle/kernel/k_session.h
@@ -18,19 +18,18 @@ class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession, KAut
KERNEL_AUTOOBJECT_TRAITS(KSession, KAutoObject);
public:
- explicit KSession(KernelCore& kernel_);
+ explicit KSession(KernelCore& kernel);
~KSession() override;
- void Initialize(KClientPort* port_, const std::string& name_);
-
+ void Initialize(KClientPort* port, uintptr_t name);
void Finalize() override;
bool IsInitialized() const override {
- return initialized;
+ return m_initialized;
}
uintptr_t GetPostDestroyArgument() const override {
- return reinterpret_cast<uintptr_t>(process);
+ return reinterpret_cast<uintptr_t>(m_process);
}
static void PostDestroy(uintptr_t arg);
@@ -48,27 +47,23 @@ public:
}
KClientSession& GetClientSession() {
- return client;
+ return m_client;
}
KServerSession& GetServerSession() {
- return server;
+ return m_server;
}
const KClientSession& GetClientSession() const {
- return client;
+ return m_client;
}
const KServerSession& GetServerSession() const {
- return server;
+ return m_server;
}
const KClientPort* GetParent() const {
- return port;
- }
-
- KClientPort* GetParent() {
- return port;
+ return m_port;
}
private:
@@ -80,20 +75,20 @@ private:
};
void SetState(State state) {
- atomic_state = static_cast<u8>(state);
+ m_atomic_state = static_cast<u8>(state);
}
State GetState() const {
- return static_cast<State>(atomic_state.load(std::memory_order_relaxed));
+ return static_cast<State>(m_atomic_state.load());
}
- KServerSession server;
- KClientSession client;
- std::atomic<std::underlying_type_t<State>> atomic_state{
- static_cast<std::underlying_type_t<State>>(State::Invalid)};
- KClientPort* port{};
- KProcess* process{};
- bool initialized{};
+ KServerSession m_server;
+ KClientSession m_client;
+ KClientPort* m_port{};
+ uintptr_t m_name{};
+ KProcess* m_process{};
+ std::atomic<u8> m_atomic_state{static_cast<u8>(State::Invalid)};
+ bool m_initialized{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_session_request.cpp b/src/core/hle/kernel/k_session_request.cpp
index 520da6aa7..9a69b4ffc 100644
--- a/src/core/hle/kernel/k_session_request.cpp
+++ b/src/core/hle/kernel/k_session_request.cpp
@@ -6,54 +6,55 @@
namespace Kernel {
-Result KSessionRequest::SessionMappings::PushMap(VAddr client, VAddr server, size_t size,
- KMemoryState state, size_t index) {
+Result KSessionRequest::SessionMappings::PushMap(KProcessAddress client, KProcessAddress server,
+ size_t size, KMemoryState state, size_t index) {
// At most 15 buffers of each type (4-bit descriptor counts).
ASSERT(index < ((1ul << 4) - 1) * 3);
// Get the mapping.
Mapping* mapping;
if (index < NumStaticMappings) {
- mapping = &m_static_mappings[index];
+ mapping = std::addressof(m_static_mappings[index]);
} else {
// Allocate a page for the extra mappings.
if (m_mappings == nullptr) {
- KPageBuffer* page_buffer = KPageBuffer::Allocate(kernel);
+ KPageBuffer* page_buffer = KPageBuffer::Allocate(m_kernel);
R_UNLESS(page_buffer != nullptr, ResultOutOfMemory);
m_mappings = reinterpret_cast<Mapping*>(page_buffer);
}
- mapping = &m_mappings[index - NumStaticMappings];
+ mapping = std::addressof(m_mappings[index - NumStaticMappings]);
}
// Set the mapping.
mapping->Set(client, server, size, state);
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KSessionRequest::SessionMappings::PushSend(VAddr client, VAddr server, size_t size,
- KMemoryState state) {
+Result KSessionRequest::SessionMappings::PushSend(KProcessAddress client, KProcessAddress server,
+ size_t size, KMemoryState state) {
ASSERT(m_num_recv == 0);
ASSERT(m_num_exch == 0);
- return this->PushMap(client, server, size, state, m_num_send++);
+ R_RETURN(this->PushMap(client, server, size, state, m_num_send++));
}
-Result KSessionRequest::SessionMappings::PushReceive(VAddr client, VAddr server, size_t size,
- KMemoryState state) {
+Result KSessionRequest::SessionMappings::PushReceive(KProcessAddress client, KProcessAddress server,
+ size_t size, KMemoryState state) {
ASSERT(m_num_exch == 0);
- return this->PushMap(client, server, size, state, m_num_send + m_num_recv++);
+ R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv++));
}
-Result KSessionRequest::SessionMappings::PushExchange(VAddr client, VAddr server, size_t size,
+Result KSessionRequest::SessionMappings::PushExchange(KProcessAddress client,
+ KProcessAddress server, size_t size,
KMemoryState state) {
- return this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++);
+ R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++));
}
void KSessionRequest::SessionMappings::Finalize() {
if (m_mappings) {
- KPageBuffer::Free(kernel, reinterpret_cast<KPageBuffer*>(m_mappings));
+ KPageBuffer::Free(m_kernel, reinterpret_cast<KPageBuffer*>(m_mappings));
m_mappings = nullptr;
}
}
diff --git a/src/core/hle/kernel/k_session_request.h b/src/core/hle/kernel/k_session_request.h
index e5558bc2c..283669e0a 100644
--- a/src/core/hle/kernel/k_session_request.h
+++ b/src/core/hle/kernel/k_session_request.h
@@ -5,6 +5,8 @@
#include <array>
+#include "common/intrusive_list.h"
+
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_memory_block.h"
@@ -16,7 +18,7 @@ namespace Kernel {
class KSessionRequest final : public KSlabAllocated<KSessionRequest>,
public KAutoObject,
- public boost::intrusive::list_base_hook<> {
+ public Common::IntrusiveListBaseNode<KSessionRequest> {
KERNEL_AUTOOBJECT_TRAITS(KSessionRequest, KAutoObject);
public:
@@ -26,17 +28,17 @@ public:
class Mapping {
public:
- constexpr void Set(VAddr c, VAddr s, size_t sz, KMemoryState st) {
+ constexpr void Set(KProcessAddress c, KProcessAddress s, size_t sz, KMemoryState st) {
m_client_address = c;
m_server_address = s;
m_size = sz;
m_state = st;
}
- constexpr VAddr GetClientAddress() const {
+ constexpr KProcessAddress GetClientAddress() const {
return m_client_address;
}
- constexpr VAddr GetServerAddress() const {
+ constexpr KProcessAddress GetServerAddress() const {
return m_server_address;
}
constexpr size_t GetSize() const {
@@ -47,14 +49,14 @@ public:
}
private:
- VAddr m_client_address;
- VAddr m_server_address;
- size_t m_size;
- KMemoryState m_state;
+ KProcessAddress m_client_address{};
+ KProcessAddress m_server_address{};
+ size_t m_size{};
+ KMemoryState m_state{};
};
public:
- explicit SessionMappings(KernelCore& kernel_) : kernel(kernel_) {}
+ explicit SessionMappings(KernelCore& kernel) : m_kernel(kernel) {}
void Initialize() {}
void Finalize();
@@ -69,14 +71,17 @@ public:
return m_num_exch;
}
- Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state);
- Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state);
- Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state);
+ Result PushSend(KProcessAddress client, KProcessAddress server, size_t size,
+ KMemoryState state);
+ Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size,
+ KMemoryState state);
+ Result PushExchange(KProcessAddress client, KProcessAddress server, size_t size,
+ KMemoryState state);
- VAddr GetSendClientAddress(size_t i) const {
+ KProcessAddress GetSendClientAddress(size_t i) const {
return GetSendMapping(i).GetClientAddress();
}
- VAddr GetSendServerAddress(size_t i) const {
+ KProcessAddress GetSendServerAddress(size_t i) const {
return GetSendMapping(i).GetServerAddress();
}
size_t GetSendSize(size_t i) const {
@@ -86,10 +91,10 @@ public:
return GetSendMapping(i).GetMemoryState();
}
- VAddr GetReceiveClientAddress(size_t i) const {
+ KProcessAddress GetReceiveClientAddress(size_t i) const {
return GetReceiveMapping(i).GetClientAddress();
}
- VAddr GetReceiveServerAddress(size_t i) const {
+ KProcessAddress GetReceiveServerAddress(size_t i) const {
return GetReceiveMapping(i).GetServerAddress();
}
size_t GetReceiveSize(size_t i) const {
@@ -99,10 +104,10 @@ public:
return GetReceiveMapping(i).GetMemoryState();
}
- VAddr GetExchangeClientAddress(size_t i) const {
+ KProcessAddress GetExchangeClientAddress(size_t i) const {
return GetExchangeMapping(i).GetClientAddress();
}
- VAddr GetExchangeServerAddress(size_t i) const {
+ KProcessAddress GetExchangeServerAddress(size_t i) const {
return GetExchangeMapping(i).GetServerAddress();
}
size_t GetExchangeSize(size_t i) const {
@@ -113,7 +118,8 @@ public:
}
private:
- Result PushMap(VAddr client, VAddr server, size_t size, KMemoryState state, size_t index);
+ Result PushMap(KProcessAddress client, KProcessAddress server, size_t size,
+ KMemoryState state, size_t index);
const Mapping& GetSendMapping(size_t i) const {
ASSERT(i < m_num_send);
@@ -149,8 +155,8 @@ public:
}
private:
- KernelCore& kernel;
- std::array<Mapping, NumStaticMappings> m_static_mappings;
+ KernelCore& m_kernel;
+ std::array<Mapping, NumStaticMappings> m_static_mappings{};
Mapping* m_mappings{};
u8 m_num_send{};
u8 m_num_recv{};
@@ -158,7 +164,7 @@ public:
};
public:
- explicit KSessionRequest(KernelCore& kernel_) : KAutoObject(kernel_), m_mappings(kernel_) {}
+ explicit KSessionRequest(KernelCore& kernel) : KAutoObject(kernel), m_mappings(kernel) {}
static KSessionRequest* Create(KernelCore& kernel) {
KSessionRequest* req = KSessionRequest::Allocate(kernel);
@@ -170,13 +176,13 @@ public:
void Destroy() override {
this->Finalize();
- KSessionRequest::Free(kernel, this);
+ KSessionRequest::Free(m_kernel, this);
}
void Initialize(KEvent* event, uintptr_t address, size_t size) {
m_mappings.Initialize();
- m_thread = GetCurrentThreadPointer(kernel);
+ m_thread = GetCurrentThreadPointer(m_kernel);
m_event = event;
m_address = address;
m_size = size;
@@ -227,22 +233,25 @@ public:
return m_mappings.GetExchangeCount();
}
- Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state) {
+ Result PushSend(KProcessAddress client, KProcessAddress server, size_t size,
+ KMemoryState state) {
return m_mappings.PushSend(client, server, size, state);
}
- Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state) {
+ Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size,
+ KMemoryState state) {
return m_mappings.PushReceive(client, server, size, state);
}
- Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state) {
+ Result PushExchange(KProcessAddress client, KProcessAddress server, size_t size,
+ KMemoryState state) {
return m_mappings.PushExchange(client, server, size, state);
}
- VAddr GetSendClientAddress(size_t i) const {
+ KProcessAddress GetSendClientAddress(size_t i) const {
return m_mappings.GetSendClientAddress(i);
}
- VAddr GetSendServerAddress(size_t i) const {
+ KProcessAddress GetSendServerAddress(size_t i) const {
return m_mappings.GetSendServerAddress(i);
}
size_t GetSendSize(size_t i) const {
@@ -252,10 +261,10 @@ public:
return m_mappings.GetSendMemoryState(i);
}
- VAddr GetReceiveClientAddress(size_t i) const {
+ KProcessAddress GetReceiveClientAddress(size_t i) const {
return m_mappings.GetReceiveClientAddress(i);
}
- VAddr GetReceiveServerAddress(size_t i) const {
+ KProcessAddress GetReceiveServerAddress(size_t i) const {
return m_mappings.GetReceiveServerAddress(i);
}
size_t GetReceiveSize(size_t i) const {
@@ -265,10 +274,10 @@ public:
return m_mappings.GetReceiveMemoryState(i);
}
- VAddr GetExchangeClientAddress(size_t i) const {
+ KProcessAddress GetExchangeClientAddress(size_t i) const {
return m_mappings.GetExchangeClientAddress(i);
}
- VAddr GetExchangeServerAddress(size_t i) const {
+ KProcessAddress GetExchangeServerAddress(size_t i) const {
return m_mappings.GetExchangeServerAddress(i);
}
size_t GetExchangeSize(size_t i) const {
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 3cf2b5d91..efb5699de 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -12,29 +12,27 @@
namespace Kernel {
-KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
+KSharedMemory::KSharedMemory(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
KSharedMemory::~KSharedMemory() = default;
-Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
- Svc::MemoryPermission owner_permission_,
- Svc::MemoryPermission user_permission_, std::size_t size_,
- std::string name_) {
+Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory, KProcess* owner_process,
+ Svc::MemoryPermission owner_permission,
+ Svc::MemoryPermission user_permission, std::size_t size) {
// Set members.
- owner_process = owner_process_;
- device_memory = &device_memory_;
- owner_permission = owner_permission_;
- user_permission = user_permission_;
- size = Common::AlignUp(size_, PageSize);
- name = std::move(name_);
+ m_owner_process = owner_process;
+ m_device_memory = std::addressof(device_memory);
+ m_owner_permission = owner_permission;
+ m_user_permission = user_permission;
+ m_size = Common::AlignUp(size, PageSize);
const size_t num_pages = Common::DivideUp(size, PageSize);
// Get the resource limit.
- KResourceLimit* reslimit = kernel.GetSystemResourceLimit();
+ KResourceLimit* reslimit = m_kernel.GetSystemResourceLimit();
// Reserve memory for ourselves.
KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemoryMax,
- size_);
+ size);
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
// Allocate the memory.
@@ -42,67 +40,67 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
//! HACK: Open continuous mapping from sysmodule pool.
auto option = KMemoryManager::EncodeOption(KMemoryManager::Pool::Secure,
KMemoryManager::Direction::FromBack);
- physical_address = kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, option);
- R_UNLESS(physical_address != 0, ResultOutOfMemory);
+ m_physical_address = m_kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, option);
+ R_UNLESS(m_physical_address != 0, ResultOutOfMemory);
//! Insert the result into our page group.
- page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager());
- page_group->AddBlock(physical_address, num_pages);
+ m_page_group.emplace(m_kernel,
+ std::addressof(m_kernel.GetSystemSystemResource().GetBlockInfoManager()));
+ m_page_group->AddBlock(m_physical_address, num_pages);
// Commit our reservation.
memory_reservation.Commit();
// Set our resource limit.
- resource_limit = reslimit;
- resource_limit->Open();
+ m_resource_limit = reslimit;
+ m_resource_limit->Open();
// Mark initialized.
- is_initialized = true;
+ m_is_initialized = true;
// Clear all pages in the memory.
- for (const auto& block : *page_group) {
- std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize());
+ for (const auto& block : *m_page_group) {
+ std::memset(m_device_memory->GetPointer<void>(block.GetAddress()), 0, block.GetSize());
}
- return ResultSuccess;
+ R_SUCCEED();
}
void KSharedMemory::Finalize() {
// Close and finalize the page group.
- page_group->Close();
- page_group->Finalize();
+ m_page_group->Close();
+ m_page_group->Finalize();
// Release the memory reservation.
- resource_limit->Release(LimitableResource::PhysicalMemoryMax, size);
- resource_limit->Close();
-
- // Perform inherited finalization.
- KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList>::Finalize();
+ m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, m_size);
+ m_resource_limit->Close();
}
-Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size,
+Result KSharedMemory::Map(KProcess& target_process, KProcessAddress address, std::size_t map_size,
Svc::MemoryPermission map_perm) {
// Validate the size.
- R_UNLESS(size == map_size, ResultInvalidSize);
+ R_UNLESS(m_size == map_size, ResultInvalidSize);
// Validate the permission.
const Svc::MemoryPermission test_perm =
- &target_process == owner_process ? owner_permission : user_permission;
+ std::addressof(target_process) == m_owner_process ? m_owner_permission : m_user_permission;
if (test_perm == Svc::MemoryPermission::DontCare) {
ASSERT(map_perm == Svc::MemoryPermission::Read || map_perm == Svc::MemoryPermission::Write);
} else {
R_UNLESS(map_perm == test_perm, ResultInvalidNewMemoryPermission);
}
- return target_process.PageTable().MapPages(address, *page_group, KMemoryState::Shared,
- ConvertToKMemoryPermission(map_perm));
+ R_RETURN(target_process.PageTable().MapPageGroup(address, *m_page_group, KMemoryState::Shared,
+ ConvertToKMemoryPermission(map_perm)));
}
-Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
+Result KSharedMemory::Unmap(KProcess& target_process, KProcessAddress address,
+ std::size_t unmap_size) {
// Validate the size.
- R_UNLESS(size == unmap_size, ResultInvalidSize);
+ R_UNLESS(m_size == unmap_size, ResultInvalidSize);
- return target_process.PageTable().UnmapPages(address, *page_group, KMemoryState::Shared);
+ R_RETURN(
+ target_process.PageTable().UnmapPageGroup(address, *m_page_group, KMemoryState::Shared));
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h
index 8b29f0b4a..54b23d7ac 100644
--- a/src/core/hle/kernel/k_shared_memory.h
+++ b/src/core/hle/kernel/k_shared_memory.h
@@ -6,11 +6,11 @@
#include <optional>
#include <string>
-#include "common/common_types.h"
#include "core/device_memory.h"
#include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
@@ -23,12 +23,12 @@ class KSharedMemory final
KERNEL_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject);
public:
- explicit KSharedMemory(KernelCore& kernel_);
+ explicit KSharedMemory(KernelCore& kernel);
~KSharedMemory() override;
Result Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
Svc::MemoryPermission owner_permission_,
- Svc::MemoryPermission user_permission_, std::size_t size_, std::string name_);
+ Svc::MemoryPermission user_permission_, std::size_t size_);
/**
* Maps a shared memory block to an address in the target process' address space
@@ -37,7 +37,7 @@ public:
* @param map_size Size of the shared memory block to map
* @param permissions Memory block map permissions (specified by SVC field)
*/
- Result Map(KProcess& target_process, VAddr address, std::size_t map_size,
+ Result Map(KProcess& target_process, KProcessAddress address, std::size_t map_size,
Svc::MemoryPermission permissions);
/**
@@ -46,7 +46,7 @@ public:
* @param address Address in system memory to unmap shared memory block
* @param unmap_size Size of the shared memory block to unmap
*/
- Result Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size);
+ Result Unmap(KProcess& target_process, KProcessAddress address, std::size_t unmap_size);
/**
* Gets a pointer to the shared memory block
@@ -54,7 +54,7 @@ public:
* @return A pointer to the shared memory block from the specified offset
*/
u8* GetPointer(std::size_t offset = 0) {
- return device_memory->GetPointer<u8>(physical_address + offset);
+ return m_device_memory->GetPointer<u8>(m_physical_address + offset);
}
/**
@@ -63,26 +63,26 @@ public:
* @return A pointer to the shared memory block from the specified offset
*/
const u8* GetPointer(std::size_t offset = 0) const {
- return device_memory->GetPointer<u8>(physical_address + offset);
+ return m_device_memory->GetPointer<u8>(m_physical_address + offset);
}
void Finalize() override;
bool IsInitialized() const override {
- return is_initialized;
+ return m_is_initialized;
}
- static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
+ static void PostDestroy(uintptr_t arg) {}
private:
- Core::DeviceMemory* device_memory{};
- KProcess* owner_process{};
- std::optional<KPageGroup> page_group{};
- Svc::MemoryPermission owner_permission{};
- Svc::MemoryPermission user_permission{};
- PAddr physical_address{};
- std::size_t size{};
- KResourceLimit* resource_limit{};
- bool is_initialized{};
+ Core::DeviceMemory* m_device_memory{};
+ KProcess* m_owner_process{};
+ std::optional<KPageGroup> m_page_group{};
+ Svc::MemoryPermission m_owner_permission{};
+ Svc::MemoryPermission m_user_permission{};
+ KPhysicalAddress m_physical_address{};
+ std::size_t m_size{};
+ KResourceLimit* m_resource_limit{};
+ bool m_is_initialized{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_shared_memory_info.h b/src/core/hle/kernel/k_shared_memory_info.h
index 2bb6b6d08..2d8ff20d6 100644
--- a/src/core/hle/kernel/k_shared_memory_info.h
+++ b/src/core/hle/kernel/k_shared_memory_info.h
@@ -3,7 +3,7 @@
#pragma once
-#include <boost/intrusive/list.hpp>
+#include "common/intrusive_list.h"
#include "core/hle/kernel/slab_helpers.h"
@@ -12,31 +12,34 @@ namespace Kernel {
class KSharedMemory;
class KSharedMemoryInfo final : public KSlabAllocated<KSharedMemoryInfo>,
- public boost::intrusive::list_base_hook<> {
+ public Common::IntrusiveListBaseNode<KSharedMemoryInfo> {
public:
explicit KSharedMemoryInfo(KernelCore&) {}
KSharedMemoryInfo() = default;
- constexpr void Initialize(KSharedMemory* shmem) {
- shared_memory = shmem;
+ constexpr void Initialize(KSharedMemory* m) {
+ m_shared_memory = m;
+ m_reference_count = 0;
}
constexpr KSharedMemory* GetSharedMemory() const {
- return shared_memory;
+ return m_shared_memory;
}
constexpr void Open() {
- ++reference_count;
+ ++m_reference_count;
+ ASSERT(m_reference_count > 0);
}
constexpr bool Close() {
- return (--reference_count) == 0;
+ ASSERT(m_reference_count > 0);
+ return (--m_reference_count) == 0;
}
private:
- KSharedMemory* shared_memory{};
- size_t reference_count{};
+ KSharedMemory* m_shared_memory{};
+ size_t m_reference_count{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h
index 68469b041..334afebb7 100644
--- a/src/core/hle/kernel/k_slab_heap.h
+++ b/src/core/hle/kernel/k_slab_heap.h
@@ -89,7 +89,8 @@ private:
if (alloc_peak <= cur_peak) {
break;
}
- } while (!Common::AtomicCompareAndSwap(&m_peak, alloc_peak, cur_peak, cur_peak));
+ } while (
+ !Common::AtomicCompareAndSwap(std::addressof(m_peak), alloc_peak, cur_peak, cur_peak));
}
public:
diff --git a/src/core/hle/kernel/k_spin_lock.cpp b/src/core/hle/kernel/k_spin_lock.cpp
index 6e16a1849..852532037 100644
--- a/src/core/hle/kernel/k_spin_lock.cpp
+++ b/src/core/hle/kernel/k_spin_lock.cpp
@@ -6,15 +6,15 @@
namespace Kernel {
void KSpinLock::Lock() {
- lck.lock();
+ m_lock.lock();
}
void KSpinLock::Unlock() {
- lck.unlock();
+ m_lock.unlock();
}
bool KSpinLock::TryLock() {
- return lck.try_lock();
+ return m_lock.try_lock();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_spin_lock.h b/src/core/hle/kernel/k_spin_lock.h
index 397a93d21..094a1e6be 100644
--- a/src/core/hle/kernel/k_spin_lock.h
+++ b/src/core/hle/kernel/k_spin_lock.h
@@ -5,26 +5,24 @@
#include <mutex>
+#include "common/common_funcs.h"
#include "core/hle/kernel/k_scoped_lock.h"
namespace Kernel {
class KSpinLock {
public:
- KSpinLock() = default;
+ explicit KSpinLock() = default;
- KSpinLock(const KSpinLock&) = delete;
- KSpinLock& operator=(const KSpinLock&) = delete;
-
- KSpinLock(KSpinLock&&) = delete;
- KSpinLock& operator=(KSpinLock&&) = delete;
+ YUZU_NON_COPYABLE(KSpinLock);
+ YUZU_NON_MOVEABLE(KSpinLock);
void Lock();
void Unlock();
- [[nodiscard]] bool TryLock();
+ bool TryLock();
private:
- std::mutex lck;
+ std::mutex m_lock;
};
// TODO(bunnei): Alias for now, in case we want to implement these accurately in the future.
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp
index 802dca046..3e5b735b1 100644
--- a/src/core/hle/kernel/k_synchronization_object.cpp
+++ b/src/core/hle/kernel/k_synchronization_object.cpp
@@ -3,6 +3,7 @@
#include "common/assert.h"
#include "common/common_types.h"
+#include "common/scratch_buffer.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_synchronization_object.h"
@@ -17,9 +18,9 @@ namespace {
class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait {
public:
- ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel_, KSynchronizationObject** o,
+ ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel, KSynchronizationObject** o,
KSynchronizationObject::ThreadListNode* n, s32 c)
- : KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {}
+ : KThreadQueueWithoutEndWait(kernel), m_objects(o), m_nodes(n), m_count(c) {}
void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
Result wait_result) override {
@@ -71,25 +72,26 @@ void KSynchronizationObject::Finalize() {
KAutoObject::Finalize();
}
-Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
+Result KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
KSynchronizationObject** objects, const s32 num_objects,
s64 timeout) {
// Allocate space on stack for thread nodes.
- std::vector<ThreadListNode> thread_nodes(num_objects);
+ std::array<ThreadListNode, Svc::ArgumentHandleCountMax> thread_nodes;
// Prepare for wait.
- KThread* thread = GetCurrentThreadPointer(kernel_ctx);
- ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel_ctx, objects,
- thread_nodes.data(), num_objects);
+ KThread* thread = GetCurrentThreadPointer(kernel);
+ KHardwareTimer* timer{};
+ ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel, objects, thread_nodes.data(),
+ num_objects);
{
// Setup the scheduling lock and sleep.
- KScopedSchedulerLockAndSleep slp(kernel_ctx, thread, timeout);
+ KScopedSchedulerLockAndSleep slp(kernel, std::addressof(timer), thread, timeout);
// Check if the thread should terminate.
if (thread->IsTerminationRequested()) {
slp.CancelSleep();
- return ResultTerminationRequested;
+ R_THROW(ResultTerminationRequested);
}
// Check if any of the objects are already signaled.
@@ -99,21 +101,21 @@ Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
if (objects[i]->IsSignaled()) {
*out_index = i;
slp.CancelSleep();
- return ResultSuccess;
+ R_THROW(ResultSuccess);
}
}
// Check if the timeout is zero.
if (timeout == 0) {
slp.CancelSleep();
- return ResultTimedOut;
+ R_THROW(ResultTimedOut);
}
// Check if waiting was canceled.
if (thread->IsWaitCancelled()) {
slp.CancelSleep();
thread->ClearWaitCancelled();
- return ResultCancelled;
+ R_THROW(ResultCancelled);
}
// Add the waiters.
@@ -131,6 +133,7 @@ Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
thread->SetSyncedIndex(-1);
// Wait for an object to be signaled.
+ wait_queue.SetHardwareTimer(timer);
thread->BeginWait(std::addressof(wait_queue));
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
}
@@ -139,16 +142,15 @@ Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
*out_index = thread->GetSyncedIndex();
// Get the wait result.
- return thread->GetWaitResult();
+ R_RETURN(thread->GetWaitResult());
}
-KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
- : KAutoObjectWithList{kernel_} {}
+KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : KAutoObjectWithList{kernel} {}
KSynchronizationObject::~KSynchronizationObject() = default;
void KSynchronizationObject::NotifyAvailable(Result result) {
- KScopedSchedulerLock sl(kernel);
+ KScopedSchedulerLock sl(m_kernel);
// If we're not signaled, we've nothing to notify.
if (!this->IsSignaled()) {
@@ -156,7 +158,7 @@ void KSynchronizationObject::NotifyAvailable(Result result) {
}
// Iterate over each thread.
- for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
+ for (auto* cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
cur_node->thread->NotifyAvailable(this, result);
}
}
@@ -166,8 +168,8 @@ std::vector<KThread*> KSynchronizationObject::GetWaitingThreadsForDebugging() co
// If debugging, dump the list of waiters.
{
- KScopedSchedulerLock lock(kernel);
- for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
+ KScopedSchedulerLock lock(m_kernel);
+ for (auto* cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
threads.emplace_back(cur_node->thread);
}
}
diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h
index 8d8122ab7..d55a2673d 100644
--- a/src/core/hle/kernel/k_synchronization_object.h
+++ b/src/core/hle/kernel/k_synchronization_object.h
@@ -24,31 +24,30 @@ public:
KThread* thread{};
};
- [[nodiscard]] static Result Wait(KernelCore& kernel, s32* out_index,
- KSynchronizationObject** objects, const s32 num_objects,
- s64 timeout);
+ static Result Wait(KernelCore& kernel, s32* out_index, KSynchronizationObject** objects,
+ const s32 num_objects, s64 timeout);
void Finalize() override;
- [[nodiscard]] virtual bool IsSignaled() const = 0;
+ virtual bool IsSignaled() const = 0;
- [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
+ std::vector<KThread*> GetWaitingThreadsForDebugging() const;
void LinkNode(ThreadListNode* node_) {
// Link the node to the list.
- if (thread_list_tail == nullptr) {
- thread_list_head = node_;
+ if (m_thread_list_tail == nullptr) {
+ m_thread_list_head = node_;
} else {
- thread_list_tail->next = node_;
+ m_thread_list_tail->next = node_;
}
- thread_list_tail = node_;
+ m_thread_list_tail = node_;
}
void UnlinkNode(ThreadListNode* node_) {
// Unlink the node from the list.
ThreadListNode* prev_ptr =
- reinterpret_cast<ThreadListNode*>(std::addressof(thread_list_head));
+ reinterpret_cast<ThreadListNode*>(std::addressof(m_thread_list_head));
ThreadListNode* prev_val = nullptr;
ThreadListNode *prev, *tail_prev;
@@ -59,8 +58,8 @@ public:
prev_val = prev_ptr;
} while (prev_ptr != node_);
- if (thread_list_tail == node_) {
- thread_list_tail = tail_prev;
+ if (m_thread_list_tail == node_) {
+ m_thread_list_tail = tail_prev;
}
prev->next = node_->next;
@@ -78,8 +77,8 @@ protected:
}
private:
- ThreadListNode* thread_list_head{};
- ThreadListNode* thread_list_tail{};
+ ThreadListNode* m_thread_list_head{};
+ ThreadListNode* m_thread_list_tail{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_system_resource.cpp b/src/core/hle/kernel/k_system_resource.cpp
index 4cc377a6c..e6c8d589a 100644
--- a/src/core/hle/kernel/k_system_resource.cpp
+++ b/src/core/hle/kernel/k_system_resource.cpp
@@ -5,9 +5,8 @@
namespace Kernel {
-Result KSecureSystemResource::Initialize([[maybe_unused]] size_t size,
- [[maybe_unused]] KResourceLimit* resource_limit,
- [[maybe_unused]] KMemoryManager::Pool pool) {
+Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_limit,
+ KMemoryManager::Pool pool) {
// Unimplemented
UNREACHABLE();
}
@@ -17,8 +16,8 @@ void KSecureSystemResource::Finalize() {
UNREACHABLE();
}
-size_t KSecureSystemResource::CalculateRequiredSecureMemorySize(
- [[maybe_unused]] size_t size, [[maybe_unused]] KMemoryManager::Pool pool) {
+size_t KSecureSystemResource::CalculateRequiredSecureMemorySize(size_t size,
+ KMemoryManager::Pool pool) {
// Unimplemented
UNREACHABLE();
}
diff --git a/src/core/hle/kernel/k_system_resource.h b/src/core/hle/kernel/k_system_resource.h
index 9a991f725..6ea482185 100644
--- a/src/core/hle/kernel/k_system_resource.h
+++ b/src/core/hle/kernel/k_system_resource.h
@@ -21,7 +21,7 @@ class KSystemResource : public KAutoObject {
KERNEL_AUTOOBJECT_TRAITS(KSystemResource, KAutoObject);
public:
- explicit KSystemResource(KernelCore& kernel_) : KAutoObject(kernel_) {}
+ explicit KSystemResource(KernelCore& kernel) : KAutoObject(kernel) {}
protected:
void SetSecureResource() {
@@ -87,8 +87,8 @@ private:
class KSecureSystemResource final
: public KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource> {
public:
- explicit KSecureSystemResource(KernelCore& kernel_)
- : KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel_) {
+ explicit KSecureSystemResource(KernelCore& kernel)
+ : KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel) {
// Mark ourselves as being a secure resource.
this->SetSecureResource();
}
@@ -99,7 +99,7 @@ public:
bool IsInitialized() const {
return m_is_initialized;
}
- static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
+ static void PostDestroy(uintptr_t arg) {}
size_t CalculateRequiredSecureMemorySize() const {
return CalculateRequiredSecureMemorySize(m_resource_size, m_resource_pool);
@@ -130,7 +130,7 @@ private:
KBlockInfoSlabHeap m_block_info_heap;
KPageTableSlabHeap m_page_table_heap;
KResourceLimit* m_resource_limit{};
- VAddr m_resource_address{};
+ KVirtualAddress m_resource_address{};
size_t m_resource_size{};
};
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 21207fe99..adb6ec581 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -4,6 +4,8 @@
#include <algorithm>
#include <atomic>
#include <cinttypes>
+#include <condition_variable>
+#include <mutex>
#include <optional>
#include <vector>
@@ -29,36 +31,34 @@
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/k_worker_task_manager.h"
#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
#include "core/memory.h"
-#ifdef ARCHITECTURE_x86_64
-#include "core/arm/dynarmic/arm_dynarmic_32.h"
-#endif
-
namespace {
constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1;
-static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
+static void ResetThreadContext32(Kernel::KThread::ThreadContext32& context, u32 stack_top,
u32 entry_point, u32 arg) {
context = {};
context.cpu_registers[0] = arg;
context.cpu_registers[15] = entry_point;
context.cpu_registers[13] = stack_top;
+ context.fpscr = 0;
}
-static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top,
- VAddr entry_point, u64 arg) {
+static void ResetThreadContext64(Kernel::KThread::ThreadContext64& context, u64 stack_top,
+ u64 entry_point, u64 arg) {
context = {};
context.cpu_registers[0] = arg;
context.cpu_registers[18] = Kernel::KSystemControl::GenerateRandomU64() | 1;
context.pc = entry_point;
context.sp = stack_top;
- // TODO(merry): Perform a hardware test to determine the below value.
context.fpcr = 0;
+ context.fpsr = 0;
}
} // namespace
@@ -75,14 +75,14 @@ struct ThreadLocalRegion {
class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait {
public:
- explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_)
- : KThreadQueueWithoutEndWait(kernel_) {}
+ explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel)
+ : KThreadQueueWithoutEndWait(kernel) {}
};
class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue {
public:
- explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl)
- : KThreadQueue(kernel_), m_wait_list(wl) {}
+ explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel, KThread::WaiterList* wl)
+ : KThreadQueue(kernel), m_wait_list(wl) {}
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread from the wait list.
@@ -93,17 +93,17 @@ public:
}
private:
- KThread::WaiterList* m_wait_list;
+ KThread::WaiterList* m_wait_list{};
};
} // namespace
-KThread::KThread(KernelCore& kernel_)
- : KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {}
+KThread::KThread(KernelCore& kernel)
+ : KAutoObjectWithSlabHeapAndContainer{kernel}, m_activity_pause_lock{kernel} {}
KThread::~KThread() = default;
-Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
- s32 virt_core, KProcess* owner, ThreadType type) {
+Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top,
+ s32 prio, s32 virt_core, KProcess* owner, ThreadType type) {
// Assert parameters are valid.
ASSERT((type == ThreadType::Main) || (type == ThreadType::Dummy) ||
(Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
@@ -115,7 +115,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
ASSERT(0 <= phys_core && phys_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
// First, clear the TLS address.
- tls_address = {};
+ m_tls_address = {};
// Next, assert things based on the type.
switch (type) {
@@ -139,110 +139,110 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
ASSERT_MSG(false, "KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
break;
}
- thread_type = type;
+ m_thread_type = type;
// Set the ideal core ID and affinity mask.
- virtual_ideal_core_id = virt_core;
- physical_ideal_core_id = phys_core;
- virtual_affinity_mask = 1ULL << virt_core;
- physical_affinity_mask.SetAffinity(phys_core, true);
+ m_virtual_ideal_core_id = virt_core;
+ m_physical_ideal_core_id = phys_core;
+ m_virtual_affinity_mask = 1ULL << virt_core;
+ m_physical_affinity_mask.SetAffinity(phys_core, true);
// Set the thread state.
- thread_state = (type == ThreadType::Main || type == ThreadType::Dummy)
- ? ThreadState::Runnable
- : ThreadState::Initialized;
+ m_thread_state = (type == ThreadType::Main || type == ThreadType::Dummy)
+ ? ThreadState::Runnable
+ : ThreadState::Initialized;
// Set TLS address.
- tls_address = 0;
+ m_tls_address = 0;
// Set parent and condvar tree.
- parent = nullptr;
- condvar_tree = nullptr;
+ m_parent = nullptr;
+ m_condvar_tree = nullptr;
// Set sync booleans.
- signaled = false;
- termination_requested = false;
- wait_cancelled = false;
- cancellable = false;
+ m_signaled = false;
+ m_termination_requested = false;
+ m_wait_cancelled = false;
+ m_cancellable = false;
// Set core ID and wait result.
- core_id = phys_core;
- wait_result = ResultNoSynchronizationObject;
+ m_core_id = phys_core;
+ m_wait_result = ResultNoSynchronizationObject;
// Set priorities.
- priority = prio;
- base_priority = prio;
+ m_priority = prio;
+ m_base_priority = prio;
// Initialize sleeping queue.
- wait_queue = nullptr;
+ m_wait_queue = nullptr;
// Set suspend flags.
- suspend_request_flags = 0;
- suspend_allowed_flags = static_cast<u32>(ThreadState::SuspendFlagMask);
+ m_suspend_request_flags = 0;
+ m_suspend_allowed_flags = static_cast<u32>(ThreadState::SuspendFlagMask);
// We're neither debug attached, nor are we nesting our priority inheritance.
- debug_attached = false;
- priority_inheritance_count = 0;
+ m_debug_attached = false;
+ m_priority_inheritance_count = 0;
// We haven't been scheduled, and we have done no light IPC.
- schedule_count = -1;
- last_scheduled_tick = 0;
- light_ipc_data = nullptr;
+ m_schedule_count = -1;
+ m_last_scheduled_tick = 0;
+ m_light_ipc_data = nullptr;
// We're not waiting for a lock, and we haven't disabled migration.
- lock_owner = nullptr;
- num_core_migration_disables = 0;
+ m_waiting_lock_info = nullptr;
+ m_num_core_migration_disables = 0;
// We have no waiters, but we do have an entrypoint.
- num_kernel_waiters = 0;
+ m_num_kernel_waiters = 0;
// Set our current core id.
- current_core_id = phys_core;
+ m_current_core_id = phys_core;
// We haven't released our resource limit hint, and we've spent no time on the cpu.
- resource_limit_release_hint = false;
- cpu_time = 0;
+ m_resource_limit_release_hint = false;
+ m_cpu_time = 0;
// Set debug context.
- stack_top = user_stack_top;
- argument = arg;
+ m_stack_top = user_stack_top;
+ m_argument = arg;
// Clear our stack parameters.
- std::memset(static_cast<void*>(std::addressof(GetStackParameters())), 0,
+ std::memset(static_cast<void*>(std::addressof(this->GetStackParameters())), 0,
sizeof(StackParameters));
// Set parent, if relevant.
if (owner != nullptr) {
// Setup the TLS, if needed.
if (type == ThreadType::User) {
- R_TRY(owner->CreateThreadLocalRegion(std::addressof(tls_address)));
+ R_TRY(owner->CreateThreadLocalRegion(std::addressof(m_tls_address)));
}
- parent = owner;
- parent->Open();
+ m_parent = owner;
+ m_parent->Open();
}
// Initialize thread context.
- ResetThreadContext64(thread_context_64, user_stack_top, func, arg);
- ResetThreadContext32(thread_context_32, static_cast<u32>(user_stack_top),
- static_cast<u32>(func), static_cast<u32>(arg));
+ ResetThreadContext64(m_thread_context_64, GetInteger(user_stack_top), GetInteger(func), arg);
+ ResetThreadContext32(m_thread_context_32, static_cast<u32>(GetInteger(user_stack_top)),
+ static_cast<u32>(GetInteger(func)), static_cast<u32>(arg));
// Setup the stack parameters.
- StackParameters& sp = GetStackParameters();
+ StackParameters& sp = this->GetStackParameters();
sp.cur_thread = this;
sp.disable_count = 1;
- SetInExceptionHandler();
+ this->SetInExceptionHandler();
// Set thread ID.
- thread_id = kernel.CreateNewThreadID();
+ m_thread_id = m_kernel.CreateNewThreadID();
// We initialized!
- initialized = true;
+ m_initialized = true;
// Register ourselves with our parent process.
- if (parent != nullptr) {
- parent->RegisterThread(this);
- if (parent->IsSuspended()) {
+ if (m_parent != nullptr) {
+ m_parent->RegisterThread(this);
+ if (m_parent->IsSuspended()) {
RequestSuspend(SuspendType::Process);
}
}
@@ -251,14 +251,14 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
}
Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
- VAddr user_stack_top, s32 prio, s32 core, KProcess* owner,
- ThreadType type, std::function<void()>&& init_func) {
+ KProcessAddress user_stack_top, s32 prio, s32 core,
+ KProcess* owner, ThreadType type,
+ std::function<void()>&& init_func) {
// Initialize the thread.
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
// Initialize emulation parameters.
- thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func));
- thread->is_single_core = !Settings::values.use_multi_core.GetValue();
+ thread->m_host_context = std::make_shared<Common::Fiber>(std::move(init_func));
R_SUCCEED();
}
@@ -268,7 +268,7 @@ Result KThread::InitializeDummyThread(KThread* thread, KProcess* owner) {
R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, owner, ThreadType::Dummy));
// Initialize emulation parameters.
- thread->stack_parameters.disable_count = 0;
+ thread->m_stack_parameters.disable_count = 0;
R_SUCCEED();
}
@@ -291,13 +291,32 @@ Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thre
}
Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
- uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
- KProcess* owner) {
+ uintptr_t arg, KProcessAddress user_stack_top, s32 prio,
+ s32 virt_core, KProcess* owner) {
system.Kernel().GlobalSchedulerContext().AddThread(thread);
R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()));
}
+Result KThread::InitializeServiceThread(Core::System& system, KThread* thread,
+ std::function<void()>&& func, s32 prio, s32 virt_core,
+ KProcess* owner) {
+ system.Kernel().GlobalSchedulerContext().AddThread(thread);
+ std::function<void()> func2{[&system, func{std::move(func)}] {
+ // Similar to UserModeThreadStarter.
+ system.Kernel().CurrentScheduler()->OnThreadStart();
+
+ // Run the guest function.
+ func();
+
+ // Exit.
+ Svc::ExitThread(system);
+ }};
+
+ R_RETURN(InitializeThread(thread, {}, {}, {}, prio, virt_core, owner, ThreadType::HighPriority,
+ std::move(func2)));
+}
+
void KThread::PostDestroy(uintptr_t arg) {
KProcess* owner = reinterpret_cast<KProcess*>(arg & ~1ULL);
const bool resource_limit_release_hint = (arg & 1);
@@ -310,96 +329,110 @@ void KThread::PostDestroy(uintptr_t arg) {
void KThread::Finalize() {
// If the thread has an owner process, unregister it.
- if (parent != nullptr) {
- parent->UnregisterThread(this);
+ if (m_parent != nullptr) {
+ m_parent->UnregisterThread(this);
}
// If the thread has a local region, delete it.
- if (tls_address != 0) {
- ASSERT(parent->DeleteThreadLocalRegion(tls_address).IsSuccess());
+ if (m_tls_address != 0) {
+ ASSERT(m_parent->DeleteThreadLocalRegion(m_tls_address).IsSuccess());
}
// Release any waiters.
{
- ASSERT(lock_owner == nullptr);
- KScopedSchedulerLock sl{kernel};
+ ASSERT(m_waiting_lock_info == nullptr);
+ KScopedSchedulerLock sl{m_kernel};
+
+ // Check that we have no kernel waiters.
+ ASSERT(m_num_kernel_waiters == 0);
- auto it = waiter_list.begin();
- while (it != waiter_list.end()) {
- // Get the thread.
- KThread* const waiter = std::addressof(*it);
+ auto it = m_held_lock_info_list.begin();
+ while (it != m_held_lock_info_list.end()) {
+ // Get the lock info.
+ auto* const lock_info = std::addressof(*it);
- // The thread shouldn't be a kernel waiter.
- ASSERT(!IsKernelAddressKey(waiter->GetAddressKey()));
+ // The lock shouldn't have a kernel waiter.
+ ASSERT(!lock_info->GetIsKernelAddressKey());
- // Clear the lock owner.
- waiter->SetLockOwner(nullptr);
+ // Remove all waiters.
+ while (lock_info->GetWaiterCount() != 0) {
+ // Get the front waiter.
+ KThread* const waiter = lock_info->GetHighestPriorityWaiter();
- // Erase the waiter from our list.
- it = waiter_list.erase(it);
+ // Remove it from the lock.
+ if (lock_info->RemoveWaiter(waiter)) {
+ ASSERT(lock_info->GetWaiterCount() == 0);
+ }
+
+ // Cancel the thread's wait.
+ waiter->CancelWait(ResultInvalidState, true);
+ }
- // Cancel the thread's wait.
- waiter->CancelWait(ResultInvalidState, true);
+ // Remove the held lock from our list.
+ it = m_held_lock_info_list.erase(it);
+
+ // Free the lock info.
+ LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
}
}
// Release host emulation members.
- host_context.reset();
+ m_host_context.reset();
// Perform inherited finalization.
KSynchronizationObject::Finalize();
}
bool KThread::IsSignaled() const {
- return signaled;
+ return m_signaled;
}
void KThread::OnTimer() {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// If we're waiting, cancel the wait.
- if (GetState() == ThreadState::Waiting) {
- wait_queue->CancelWait(this, ResultTimedOut, false);
+ if (this->GetState() == ThreadState::Waiting) {
+ m_wait_queue->CancelWait(this, ResultTimedOut, false);
}
}
void KThread::StartTermination() {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Release user exception and unpin, if relevant.
- if (parent != nullptr) {
- parent->ReleaseUserException(this);
- if (parent->GetPinnedThread(GetCurrentCoreId(kernel)) == this) {
- parent->UnpinCurrentThread(core_id);
+ if (m_parent != nullptr) {
+ m_parent->ReleaseUserException(this);
+ if (m_parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) {
+ m_parent->UnpinCurrentThread(m_core_id);
}
}
// Set state to terminated.
- SetState(ThreadState::Terminated);
+ this->SetState(ThreadState::Terminated);
// Clear the thread's status as running in parent.
- if (parent != nullptr) {
- parent->ClearRunningThread(this);
+ if (m_parent != nullptr) {
+ m_parent->ClearRunningThread(this);
}
// Signal.
- signaled = true;
+ m_signaled = true;
KSynchronizationObject::NotifyAvailable();
// Clear previous thread in KScheduler.
- KScheduler::ClearPreviousThread(kernel, this);
+ KScheduler::ClearPreviousThread(m_kernel, this);
// Register terminated dpc flag.
- RegisterDpc(DpcFlag::Terminated);
+ this->RegisterDpc(DpcFlag::Terminated);
}
void KThread::FinishTermination() {
// Ensure that the thread is not executing on any core.
- if (parent != nullptr) {
+ if (m_parent != nullptr) {
for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
KThread* core_thread{};
do {
- core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread();
+ core_thread = m_kernel.Scheduler(i).GetSchedulerCurrentThread();
} while (core_thread == this);
}
}
@@ -414,182 +447,183 @@ void KThread::DoWorkerTaskImpl() {
}
void KThread::Pin(s32 current_core) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Set ourselves as pinned.
GetStackParameters().is_pinned = true;
// Disable core migration.
- ASSERT(num_core_migration_disables == 0);
+ ASSERT(m_num_core_migration_disables == 0);
{
- ++num_core_migration_disables;
+ ++m_num_core_migration_disables;
// Save our ideal state to restore when we're unpinned.
- original_physical_ideal_core_id = physical_ideal_core_id;
- original_physical_affinity_mask = physical_affinity_mask;
+ m_original_physical_ideal_core_id = m_physical_ideal_core_id;
+ m_original_physical_affinity_mask = m_physical_affinity_mask;
// Bind ourselves to this core.
- const s32 active_core = GetActiveCore();
+ const s32 active_core = this->GetActiveCore();
- SetActiveCore(current_core);
- physical_ideal_core_id = current_core;
- physical_affinity_mask.SetAffinityMask(1ULL << current_core);
+ this->SetActiveCore(current_core);
+ m_physical_ideal_core_id = current_core;
+ m_physical_affinity_mask.SetAffinityMask(1ULL << current_core);
- if (active_core != current_core || physical_affinity_mask.GetAffinityMask() !=
- original_physical_affinity_mask.GetAffinityMask()) {
- KScheduler::OnThreadAffinityMaskChanged(kernel, this, original_physical_affinity_mask,
- active_core);
+ if (active_core != current_core ||
+ m_physical_affinity_mask.GetAffinityMask() !=
+ m_original_physical_affinity_mask.GetAffinityMask()) {
+ KScheduler::OnThreadAffinityMaskChanged(m_kernel, this,
+ m_original_physical_affinity_mask, active_core);
}
}
// Disallow performing thread suspension.
{
// Update our allow flags.
- suspend_allowed_flags &= ~(1 << (static_cast<u32>(SuspendType::Thread) +
- static_cast<u32>(ThreadState::SuspendShift)));
+ m_suspend_allowed_flags &= ~(1 << (static_cast<u32>(SuspendType::Thread) +
+ static_cast<u32>(ThreadState::SuspendShift)));
// Update our state.
- UpdateState();
+ this->UpdateState();
}
// TODO(bunnei): Update our SVC access permissions.
- ASSERT(parent != nullptr);
+ ASSERT(m_parent != nullptr);
}
void KThread::Unpin() {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Set ourselves as unpinned.
- GetStackParameters().is_pinned = false;
+ this->GetStackParameters().is_pinned = false;
// Enable core migration.
- ASSERT(num_core_migration_disables == 1);
+ ASSERT(m_num_core_migration_disables == 1);
{
- num_core_migration_disables--;
+ m_num_core_migration_disables--;
// Restore our original state.
- const KAffinityMask old_mask = physical_affinity_mask;
+ const KAffinityMask old_mask = m_physical_affinity_mask;
- physical_ideal_core_id = original_physical_ideal_core_id;
- physical_affinity_mask = original_physical_affinity_mask;
+ m_physical_ideal_core_id = m_original_physical_ideal_core_id;
+ m_physical_affinity_mask = m_original_physical_affinity_mask;
- if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
- const s32 active_core = GetActiveCore();
+ if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
+ const s32 active_core = this->GetActiveCore();
- if (!physical_affinity_mask.GetAffinity(active_core)) {
- if (physical_ideal_core_id >= 0) {
- SetActiveCore(physical_ideal_core_id);
+ if (!m_physical_affinity_mask.GetAffinity(active_core)) {
+ if (m_physical_ideal_core_id >= 0) {
+ this->SetActiveCore(m_physical_ideal_core_id);
} else {
- SetActiveCore(static_cast<s32>(
+ this->SetActiveCore(static_cast<s32>(
Common::BitSize<u64>() - 1 -
- std::countl_zero(physical_affinity_mask.GetAffinityMask())));
+ std::countl_zero(m_physical_affinity_mask.GetAffinityMask())));
}
}
- KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core);
+ KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core);
}
}
// Allow performing thread suspension (if termination hasn't been requested).
- if (!IsTerminationRequested()) {
+ if (!this->IsTerminationRequested()) {
// Update our allow flags.
- suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) +
- static_cast<u32>(ThreadState::SuspendShift)));
+ m_suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) +
+ static_cast<u32>(ThreadState::SuspendShift)));
// Update our state.
- UpdateState();
+ this->UpdateState();
}
// TODO(bunnei): Update our SVC access permissions.
- ASSERT(parent != nullptr);
+ ASSERT(m_parent != nullptr);
// Resume any threads that began waiting on us while we were pinned.
- for (auto it = pinned_waiter_list.begin(); it != pinned_waiter_list.end(); ++it) {
+ for (auto it = m_pinned_waiter_list.begin(); it != m_pinned_waiter_list.end(); ++it) {
it->EndWait(ResultSuccess);
}
}
u16 KThread::GetUserDisableCount() const {
- if (!IsUserThread()) {
+ if (!this->IsUserThread()) {
// We only emulate TLS for user threads
return {};
}
- auto& memory = kernel.System().Memory();
- return memory.Read16(tls_address + offsetof(ThreadLocalRegion, disable_count));
+ auto& memory = this->GetOwnerProcess()->GetMemory();
+ return memory.Read16(m_tls_address + offsetof(ThreadLocalRegion, disable_count));
}
void KThread::SetInterruptFlag() {
- if (!IsUserThread()) {
+ if (!this->IsUserThread()) {
// We only emulate TLS for user threads
return;
}
- auto& memory = kernel.System().Memory();
- memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1);
+ auto& memory = this->GetOwnerProcess()->GetMemory();
+ memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1);
}
void KThread::ClearInterruptFlag() {
- if (!IsUserThread()) {
+ if (!this->IsUserThread()) {
// We only emulate TLS for user threads
return;
}
- auto& memory = kernel.System().Memory();
- memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0);
+ auto& memory = this->GetOwnerProcess()->GetMemory();
+ memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0);
}
Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
// Get the virtual mask.
- *out_ideal_core = virtual_ideal_core_id;
- *out_affinity_mask = virtual_affinity_mask;
+ *out_ideal_core = m_virtual_ideal_core_id;
+ *out_affinity_mask = m_virtual_affinity_mask;
R_SUCCEED();
}
Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
- KScopedSchedulerLock sl{kernel};
- ASSERT(num_core_migration_disables >= 0);
+ KScopedSchedulerLock sl{m_kernel};
+ ASSERT(m_num_core_migration_disables >= 0);
// Select between core mask and original core mask.
- if (num_core_migration_disables == 0) {
- *out_ideal_core = physical_ideal_core_id;
- *out_affinity_mask = physical_affinity_mask.GetAffinityMask();
+ if (m_num_core_migration_disables == 0) {
+ *out_ideal_core = m_physical_ideal_core_id;
+ *out_affinity_mask = m_physical_affinity_mask.GetAffinityMask();
} else {
- *out_ideal_core = original_physical_ideal_core_id;
- *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask();
+ *out_ideal_core = m_original_physical_ideal_core_id;
+ *out_affinity_mask = m_original_physical_affinity_mask.GetAffinityMask();
}
R_SUCCEED();
}
-Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
- ASSERT(parent != nullptr);
+Result KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) {
+ ASSERT(m_parent != nullptr);
ASSERT(v_affinity_mask != 0);
- KScopedLightLock lk(activity_pause_lock);
+ KScopedLightLock lk(m_activity_pause_lock);
// Set the core mask.
u64 p_affinity_mask = 0;
{
- KScopedSchedulerLock sl(kernel);
- ASSERT(num_core_migration_disables >= 0);
+ KScopedSchedulerLock sl(m_kernel);
+ ASSERT(m_num_core_migration_disables >= 0);
// If we're updating, set our ideal virtual core.
- if (core_id_ != Svc::IdealCoreNoUpdate) {
- virtual_ideal_core_id = core_id_;
+ if (core_id != Svc::IdealCoreNoUpdate) {
+ m_virtual_ideal_core_id = core_id;
} else {
// Preserve our ideal core id.
- core_id_ = virtual_ideal_core_id;
- R_UNLESS(((1ULL << core_id_) & v_affinity_mask) != 0, ResultInvalidCombination);
+ core_id = m_virtual_ideal_core_id;
+ R_UNLESS(((1ULL << core_id) & v_affinity_mask) != 0, ResultInvalidCombination);
}
// Set our affinity mask.
- virtual_affinity_mask = v_affinity_mask;
+ m_virtual_affinity_mask = v_affinity_mask;
// Translate the virtual core to a physical core.
- if (core_id_ >= 0) {
- core_id_ = Core::Hardware::VirtualToPhysicalCoreMap[core_id_];
+ if (core_id >= 0) {
+ core_id = Core::Hardware::VirtualToPhysicalCoreMap[core_id];
}
// Translate the virtual affinity mask to a physical one.
@@ -600,43 +634,43 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
}
// If we haven't disabled migration, perform an affinity change.
- if (num_core_migration_disables == 0) {
- const KAffinityMask old_mask = physical_affinity_mask;
+ if (m_num_core_migration_disables == 0) {
+ const KAffinityMask old_mask = m_physical_affinity_mask;
// Set our new ideals.
- physical_ideal_core_id = core_id_;
- physical_affinity_mask.SetAffinityMask(p_affinity_mask);
+ m_physical_ideal_core_id = core_id;
+ m_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
- if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
+ if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
const s32 active_core = GetActiveCore();
- if (active_core >= 0 && !physical_affinity_mask.GetAffinity(active_core)) {
+ if (active_core >= 0 && !m_physical_affinity_mask.GetAffinity(active_core)) {
const s32 new_core = static_cast<s32>(
- physical_ideal_core_id >= 0
- ? physical_ideal_core_id
+ m_physical_ideal_core_id >= 0
+ ? m_physical_ideal_core_id
: Common::BitSize<u64>() - 1 -
- std::countl_zero(physical_affinity_mask.GetAffinityMask()));
+ std::countl_zero(m_physical_affinity_mask.GetAffinityMask()));
SetActiveCore(new_core);
}
- KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core);
+ KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core);
}
} else {
// Otherwise, we edit the original affinity for restoration later.
- original_physical_ideal_core_id = core_id_;
- original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
+ m_original_physical_ideal_core_id = core_id;
+ m_original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
}
}
// Update the pinned waiter list.
- ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, std::addressof(pinned_waiter_list));
+ ThreadQueueImplForKThreadSetProperty wait_queue(m_kernel, std::addressof(m_pinned_waiter_list));
{
bool retry_update{};
do {
// Lock the scheduler.
- KScopedSchedulerLock sl(kernel);
+ KScopedSchedulerLock sl(m_kernel);
// Don't do any further management if our termination has been requested.
- R_SUCCEED_IF(IsTerminationRequested());
+ R_SUCCEED_IF(this->IsTerminationRequested());
// By default, we won't need to retry.
retry_update = false;
@@ -646,7 +680,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
s32 thread_core;
for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
++thread_core) {
- if (kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) {
+ if (m_kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) {
thread_is_current = true;
break;
}
@@ -656,14 +690,14 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
// new mask.
if (thread_is_current && ((1ULL << thread_core) & p_affinity_mask) == 0) {
// If the thread is pinned, we want to wait until it's not pinned.
- if (GetStackParameters().is_pinned) {
+ if (this->GetStackParameters().is_pinned) {
// Verify that the current thread isn't terminating.
- R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
+ R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(),
ResultTerminationRequested);
// Wait until the thread isn't pinned any more.
- pinned_waiter_list.push_back(GetCurrentThread(kernel));
- GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_));
+ m_pinned_waiter_list.push_back(GetCurrentThread(m_kernel));
+ GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue));
} else {
// If the thread isn't pinned, release the scheduler lock and retry until it's
// not current.
@@ -679,111 +713,137 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
void KThread::SetBasePriority(s32 value) {
ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority);
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
// Change our base priority.
- base_priority = value;
+ m_base_priority = value;
// Perform a priority restoration.
- RestorePriority(kernel, this);
+ RestorePriority(m_kernel, this);
+}
+
+KThread* KThread::GetLockOwner() const {
+ return m_waiting_lock_info != nullptr ? m_waiting_lock_info->GetOwner() : nullptr;
+}
+
+void KThread::IncreaseBasePriority(s32 priority) {
+ ASSERT(Svc::HighestThreadPriority <= priority && priority <= Svc::LowestThreadPriority);
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
+ ASSERT(!this->GetStackParameters().is_pinned);
+
+ // Set our base priority.
+ if (m_base_priority > priority) {
+ m_base_priority = priority;
+
+ // Perform a priority restoration.
+ RestorePriority(m_kernel, this);
+ }
}
void KThread::RequestSuspend(SuspendType type) {
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
// Note the request in our flags.
- suspend_request_flags |=
- (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
+ m_suspend_request_flags |=
+ (1U << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
// Try to perform the suspend.
- TrySuspend();
+ this->TrySuspend();
}
void KThread::Resume(SuspendType type) {
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
// Clear the request in our flags.
- suspend_request_flags &=
- ~(1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
+ m_suspend_request_flags &=
+ ~(1U << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
// Update our state.
this->UpdateState();
}
void KThread::WaitCancel() {
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
// Check if we're waiting and cancellable.
- if (this->GetState() == ThreadState::Waiting && cancellable) {
- wait_cancelled = false;
- wait_queue->CancelWait(this, ResultCancelled, true);
+ if (this->GetState() == ThreadState::Waiting && m_cancellable) {
+ m_wait_cancelled = false;
+ m_wait_queue->CancelWait(this, ResultCancelled, true);
} else {
// Otherwise, note that we cancelled a wait.
- wait_cancelled = true;
+ m_wait_cancelled = true;
}
}
void KThread::TrySuspend() {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
- ASSERT(IsSuspendRequested());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
+ ASSERT(this->IsSuspendRequested());
// Ensure that we have no waiters.
- if (GetNumKernelWaiters() > 0) {
+ if (this->GetNumKernelWaiters() > 0) {
return;
}
- ASSERT(GetNumKernelWaiters() == 0);
+ ASSERT(this->GetNumKernelWaiters() == 0);
// Perform the suspend.
this->UpdateState();
}
void KThread::UpdateState() {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Set our suspend flags in state.
- const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
+ const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed);
const auto new_state =
static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask);
- thread_state.store(new_state, std::memory_order_relaxed);
+ m_thread_state.store(new_state, std::memory_order_relaxed);
// Note the state change in scheduler.
if (new_state != old_state) {
- KScheduler::OnThreadStateChanged(kernel, this, old_state);
+ KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
}
}
void KThread::Continue() {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Clear our suspend flags in state.
- const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
- thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed);
+ const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed);
+ m_thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed);
// Note the state change in scheduler.
- KScheduler::OnThreadStateChanged(kernel, this, old_state);
+ KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
}
-void KThread::WaitUntilSuspended() {
- // Make sure we have a suspend requested.
- ASSERT(IsSuspendRequested());
+void KThread::CloneFpuStatus() {
+ // We shouldn't reach here when starting kernel threads.
+ ASSERT(this->GetOwnerProcess() != nullptr);
+ ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(m_kernel));
- // Loop until the thread is not executing on any core.
- for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
- KThread* core_thread{};
- do {
- core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread();
- } while (core_thread == this);
+ if (this->GetOwnerProcess()->Is64BitProcess()) {
+ // Clone FPSR and FPCR.
+ ThreadContext64 cur_ctx{};
+ m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
+
+ this->GetContext64().fpcr = cur_ctx.fpcr;
+ this->GetContext64().fpsr = cur_ctx.fpsr;
+ } else {
+ // Clone FPSCR.
+ ThreadContext32 cur_ctx{};
+ m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
+
+ this->GetContext32().fpscr = cur_ctx.fpscr;
}
}
Result KThread::SetActivity(Svc::ThreadActivity activity) {
// Lock ourselves.
- KScopedLightLock lk(activity_pause_lock);
+ KScopedLightLock lk(m_activity_pause_lock);
// Set the activity.
{
// Lock the scheduler.
- KScopedSchedulerLock sl(kernel);
+ KScopedSchedulerLock sl(m_kernel);
// Verify our state.
const auto cur_state = this->GetState();
@@ -810,13 +870,13 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
// If the thread is now paused, update the pinned waiter list.
if (activity == Svc::ThreadActivity::Paused) {
- ThreadQueueImplForKThreadSetProperty wait_queue_(kernel,
- std::addressof(pinned_waiter_list));
+ ThreadQueueImplForKThreadSetProperty wait_queue(m_kernel,
+ std::addressof(m_pinned_waiter_list));
- bool thread_is_current;
+ bool thread_is_current{};
do {
// Lock the scheduler.
- KScopedSchedulerLock sl(kernel);
+ KScopedSchedulerLock sl(m_kernel);
// Don't do any further management if our termination has been requested.
R_SUCCEED_IF(this->IsTerminationRequested());
@@ -827,17 +887,17 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
// Check whether the thread is pinned.
if (this->GetStackParameters().is_pinned) {
// Verify that the current thread isn't terminating.
- R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
+ R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(),
ResultTerminationRequested);
// Wait until the thread isn't pinned any more.
- pinned_waiter_list.push_back(GetCurrentThread(kernel));
- GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_));
+ m_pinned_waiter_list.push_back(GetCurrentThread(m_kernel));
+ GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue));
} else {
// Check if the thread is currently running.
// If it is, we'll need to retry.
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
- if (kernel.Scheduler(i).GetSchedulerCurrentThread() == this) {
+ if (m_kernel.Scheduler(i).GetSchedulerCurrentThread() == this) {
thread_is_current = true;
break;
}
@@ -849,34 +909,32 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
R_SUCCEED();
}
-Result KThread::GetThreadContext3(std::vector<u8>& out) {
+Result KThread::GetThreadContext3(Common::ScratchBuffer<u8>& out) {
// Lock ourselves.
- KScopedLightLock lk{activity_pause_lock};
+ KScopedLightLock lk{m_activity_pause_lock};
// Get the context.
{
// Lock the scheduler.
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
// Verify that we're suspended.
- R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
+ R_UNLESS(this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
// If we're not terminating, get the thread's user context.
- if (!IsTerminationRequested()) {
- if (parent->Is64BitProcess()) {
+ if (!this->IsTerminationRequested()) {
+ if (m_parent->Is64BitProcess()) {
// Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
auto context = GetContext64();
context.pstate &= 0xFF0FFE20;
-
- out.resize(sizeof(context));
- std::memcpy(out.data(), &context, sizeof(context));
+ out.resize_destructive(sizeof(context));
+ std::memcpy(out.data(), std::addressof(context), sizeof(context));
} else {
// Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
auto context = GetContext32();
context.cpsr &= 0xFF0FFE20;
-
- out.resize(sizeof(context));
- std::memcpy(out.data(), &context, sizeof(context));
+ out.resize_destructive(sizeof(context));
+ std::memcpy(out.data(), std::addressof(context), sizeof(context));
}
}
}
@@ -884,51 +942,89 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
R_SUCCEED();
}
-void KThread::AddWaiterImpl(KThread* thread) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
+
+ // Set ourselves as the lock's owner.
+ lock_info->SetOwner(this);
+
+ // Add the lock to our held list.
+ m_held_lock_info_list.push_front(*lock_info);
+}
+
+KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(KProcessAddress address_key,
+ bool is_kernel_address_key) {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
- // Find the right spot to insert the waiter.
- auto it = waiter_list.begin();
- while (it != waiter_list.end()) {
- if (it->GetPriority() > thread->GetPriority()) {
- break;
+ // Try to find an existing held lock.
+ for (auto& held_lock : m_held_lock_info_list) {
+ if (held_lock.GetAddressKey() == address_key &&
+ held_lock.GetIsKernelAddressKey() == is_kernel_address_key) {
+ return std::addressof(held_lock);
}
- it++;
}
+ return nullptr;
+}
+
+void KThread::AddWaiterImpl(KThread* thread) {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
+ ASSERT(thread->GetConditionVariableTree() == nullptr);
+
+ // Get the thread's address key.
+ const auto address_key = thread->GetAddressKey();
+ const auto is_kernel_address_key = thread->GetIsKernelAddressKey();
+
// Keep track of how many kernel waiters we have.
- if (IsKernelAddressKey(thread->GetAddressKey())) {
- ASSERT((num_kernel_waiters++) >= 0);
- KScheduler::SetSchedulerUpdateNeeded(kernel);
+ if (is_kernel_address_key) {
+ ASSERT((m_num_kernel_waiters++) >= 0);
+ KScheduler::SetSchedulerUpdateNeeded(m_kernel);
+ }
+
+ // Get the relevant lock info.
+ auto* lock_info = this->FindHeldLock(address_key, is_kernel_address_key);
+ if (lock_info == nullptr) {
+ // Create a new lock for the address key.
+ lock_info =
+ LockWithPriorityInheritanceInfo::Create(m_kernel, address_key, is_kernel_address_key);
+
+ // Add the new lock to our list.
+ this->AddHeldLock(lock_info);
}
- // Insert the waiter.
- waiter_list.insert(it, *thread);
- thread->SetLockOwner(this);
+ // Add the thread as waiter to the lock info.
+ lock_info->AddWaiter(thread);
}
void KThread::RemoveWaiterImpl(KThread* thread) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Keep track of how many kernel waiters we have.
- if (IsKernelAddressKey(thread->GetAddressKey())) {
- ASSERT((num_kernel_waiters--) > 0);
- KScheduler::SetSchedulerUpdateNeeded(kernel);
+ if (thread->GetIsKernelAddressKey()) {
+ ASSERT((m_num_kernel_waiters--) > 0);
+ KScheduler::SetSchedulerUpdateNeeded(m_kernel);
}
+ // Get the info for the lock the thread is waiting on.
+ auto* lock_info = thread->GetWaitingLockInfo();
+ ASSERT(lock_info->GetOwner() == this);
+
// Remove the waiter.
- waiter_list.erase(waiter_list.iterator_to(*thread));
- thread->SetLockOwner(nullptr);
+ if (lock_info->RemoveWaiter(thread)) {
+ m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info));
+ LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
+ }
}
-void KThread::RestorePriority(KernelCore& kernel_ctx, KThread* thread) {
- ASSERT(kernel_ctx.GlobalSchedulerContext().IsLocked());
+void KThread::RestorePriority(KernelCore& kernel, KThread* thread) {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
- while (true) {
+ while (thread != nullptr) {
// We want to inherit priority where possible.
s32 new_priority = thread->GetBasePriority();
- if (thread->HasWaiters()) {
- new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority());
+ for (const auto& held_lock : thread->m_held_lock_info_list) {
+ new_priority =
+ std::min(new_priority, held_lock.GetHighestPriorityWaiter()->GetPriority());
}
// If the priority we would inherit is not different from ours, don't do anything.
@@ -936,9 +1032,18 @@ void KThread::RestorePriority(KernelCore& kernel_ctx, KThread* thread) {
return;
}
+ // Get the owner of whatever lock this thread is waiting on.
+ KThread* const lock_owner = thread->GetLockOwner();
+
+ // If the thread is waiting on some lock, remove it as a waiter to prevent violating red
+ // black tree invariants.
+ if (lock_owner != nullptr) {
+ lock_owner->RemoveWaiterImpl(thread);
+ }
+
// Ensure we don't violate condition variable red black tree invariants.
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
- BeforeUpdatePriority(kernel_ctx, cv_tree, thread);
+ BeforeUpdatePriority(kernel, cv_tree, thread);
}
// Change the priority.
@@ -947,148 +1052,175 @@ void KThread::RestorePriority(KernelCore& kernel_ctx, KThread* thread) {
// Restore the condition variable, if relevant.
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
- AfterUpdatePriority(kernel_ctx, cv_tree, thread);
+ AfterUpdatePriority(kernel, cv_tree, thread);
}
- // Update the scheduler.
- KScheduler::OnThreadPriorityChanged(kernel_ctx, thread, old_priority);
-
- // Keep the lock owner up to date.
- KThread* lock_owner = thread->GetLockOwner();
- if (lock_owner == nullptr) {
- return;
+ // If we removed the thread from some lock's waiting list, add it back.
+ if (lock_owner != nullptr) {
+ lock_owner->AddWaiterImpl(thread);
}
- // Update the thread in the lock owner's sorted list, and continue inheriting.
- lock_owner->RemoveWaiterImpl(thread);
- lock_owner->AddWaiterImpl(thread);
+ // Update the scheduler.
+ KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority);
+
+ // Continue inheriting priority.
thread = lock_owner;
}
}
void KThread::AddWaiter(KThread* thread) {
- AddWaiterImpl(thread);
- RestorePriority(kernel, this);
+ this->AddWaiterImpl(thread);
+
+ // If the thread has a higher priority than us, we should inherit.
+ if (thread->GetPriority() < this->GetPriority()) {
+ RestorePriority(m_kernel, this);
+ }
}
void KThread::RemoveWaiter(KThread* thread) {
- RemoveWaiterImpl(thread);
- RestorePriority(kernel, this);
+ this->RemoveWaiterImpl(thread);
+
+ // If our priority is the same as the thread's (and we've inherited), we may need to restore to
+ // lower priority.
+ if (this->GetPriority() == thread->GetPriority() &&
+ this->GetPriority() < this->GetBasePriority()) {
+ RestorePriority(m_kernel, this);
+ }
}
-KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, KProcessAddress key,
+ bool is_kernel_address_key_) {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
- s32 num_waiters{};
- KThread* next_lock_owner{};
- auto it = waiter_list.begin();
- while (it != waiter_list.end()) {
- if (it->GetAddressKey() == key) {
- KThread* thread = std::addressof(*it);
+ // Get the relevant lock info.
+ auto* lock_info = this->FindHeldLock(key, is_kernel_address_key_);
+ if (lock_info == nullptr) {
+ *out_has_waiters = false;
+ return nullptr;
+ }
- // Keep track of how many kernel waiters we have.
- if (IsKernelAddressKey(thread->GetAddressKey())) {
- ASSERT((num_kernel_waiters--) > 0);
- KScheduler::SetSchedulerUpdateNeeded(kernel);
- }
- it = waiter_list.erase(it);
+ // Remove the lock info from our held list.
+ m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info));
- // Update the next lock owner.
- if (next_lock_owner == nullptr) {
- next_lock_owner = thread;
- next_lock_owner->SetLockOwner(nullptr);
- } else {
- next_lock_owner->AddWaiterImpl(thread);
- }
- num_waiters++;
- } else {
- it++;
+ // Keep track of how many kernel waiters we have.
+ if (lock_info->GetIsKernelAddressKey()) {
+ m_num_kernel_waiters -= lock_info->GetWaiterCount();
+ ASSERT(m_num_kernel_waiters >= 0);
+ KScheduler::SetSchedulerUpdateNeeded(m_kernel);
+ }
+
+ ASSERT(lock_info->GetWaiterCount() > 0);
+
+ // Remove the highest priority waiter from the lock to be the next owner.
+ KThread* next_lock_owner = lock_info->GetHighestPriorityWaiter();
+ if (lock_info->RemoveWaiter(next_lock_owner)) {
+ // The new owner was the only waiter.
+ *out_has_waiters = false;
+
+ // Free the lock info, since it has no waiters.
+ LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
+ } else {
+ // There are additional waiters on the lock.
+ *out_has_waiters = true;
+
+ // Add the lock to the new owner's held list.
+ next_lock_owner->AddHeldLock(lock_info);
+
+ // Keep track of any kernel waiters for the new owner.
+ if (lock_info->GetIsKernelAddressKey()) {
+ next_lock_owner->m_num_kernel_waiters += lock_info->GetWaiterCount();
+ ASSERT(next_lock_owner->m_num_kernel_waiters > 0);
+
+ // NOTE: No need to set scheduler update needed, because we will have already done so
+ // when removing earlier.
}
}
- // Do priority updates, if we have a next owner.
- if (next_lock_owner) {
- RestorePriority(kernel, this);
- RestorePriority(kernel, next_lock_owner);
+ // If our priority is the same as the next owner's (and we've inherited), we may need to restore
+ // to lower priority.
+ if (this->GetPriority() == next_lock_owner->GetPriority() &&
+ this->GetPriority() < this->GetBasePriority()) {
+ RestorePriority(m_kernel, this);
+ // NOTE: No need to restore priority on the next lock owner, because it was already the
+ // highest priority waiter on the lock.
}
- // Return output.
- *out_num_waiters = num_waiters;
+ // Return the next lock owner.
return next_lock_owner;
}
Result KThread::Run() {
while (true) {
- KScopedSchedulerLock lk{kernel};
+ KScopedSchedulerLock lk{m_kernel};
// If either this thread or the current thread are requesting termination, note it.
- R_UNLESS(!IsTerminationRequested(), ResultTerminationRequested);
- R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested);
+ R_UNLESS(!this->IsTerminationRequested(), ResultTerminationRequested);
+ R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested);
// Ensure our thread state is correct.
- R_UNLESS(GetState() == ThreadState::Initialized, ResultInvalidState);
+ R_UNLESS(this->GetState() == ThreadState::Initialized, ResultInvalidState);
// If the current thread has been asked to suspend, suspend it and retry.
- if (GetCurrentThread(kernel).IsSuspended()) {
- GetCurrentThread(kernel).UpdateState();
+ if (GetCurrentThread(m_kernel).IsSuspended()) {
+ GetCurrentThread(m_kernel).UpdateState();
continue;
}
// If we're not a kernel thread and we've been asked to suspend, suspend ourselves.
if (KProcess* owner = this->GetOwnerProcess(); owner != nullptr) {
- if (IsUserThread() && IsSuspended()) {
+ if (this->IsUserThread() && this->IsSuspended()) {
this->UpdateState();
}
owner->IncrementRunningThreadCount();
}
// Set our state and finish.
- SetState(ThreadState::Runnable);
+ this->SetState(ThreadState::Runnable);
R_SUCCEED();
}
}
void KThread::Exit() {
- ASSERT(this == GetCurrentThreadPointer(kernel));
+ ASSERT(this == GetCurrentThreadPointer(m_kernel));
// Release the thread resource hint, running thread count from parent.
- if (parent != nullptr) {
- parent->GetResourceLimit()->Release(Kernel::LimitableResource::ThreadCountMax, 0, 1);
- resource_limit_release_hint = true;
- parent->DecrementRunningThreadCount();
+ if (m_parent != nullptr) {
+ m_parent->GetResourceLimit()->Release(Kernel::LimitableResource::ThreadCountMax, 0, 1);
+ m_resource_limit_release_hint = true;
+ m_parent->DecrementRunningThreadCount();
}
// Perform termination.
{
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
// Disallow all suspension.
- suspend_allowed_flags = 0;
+ m_suspend_allowed_flags = 0;
this->UpdateState();
// Disallow all suspension.
- suspend_allowed_flags = 0;
+ m_suspend_allowed_flags = 0;
// Start termination.
- StartTermination();
+ this->StartTermination();
// Register the thread as a work task.
- KWorkerTaskManager::AddTask(kernel, KWorkerTaskManager::WorkerType::Exit, this);
+ KWorkerTaskManager::AddTask(m_kernel, KWorkerTaskManager::WorkerType::Exit, this);
}
UNREACHABLE_MSG("KThread::Exit() would return");
}
Result KThread::Terminate() {
- ASSERT(this != GetCurrentThreadPointer(kernel));
+ ASSERT(this != GetCurrentThreadPointer(m_kernel));
// Request the thread terminate if it hasn't already.
if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) {
// If the thread isn't terminated, wait for it to terminate.
s32 index;
KSynchronizationObject* objects[] = {this};
- R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1,
+ R_TRY(KSynchronizationObject::Wait(m_kernel, std::addressof(index), objects, 1,
Svc::WaitInfinite));
}
@@ -1096,22 +1228,22 @@ Result KThread::Terminate() {
}
ThreadState KThread::RequestTerminate() {
- ASSERT(this != GetCurrentThreadPointer(kernel));
+ ASSERT(this != GetCurrentThreadPointer(m_kernel));
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
// Determine if this is the first termination request.
const bool first_request = [&]() -> bool {
// Perform an atomic compare-and-swap from false to true.
bool expected = false;
- return termination_requested.compare_exchange_strong(expected, true);
+ return m_termination_requested.compare_exchange_strong(expected, true);
}();
// If this is the first request, start termination procedure.
if (first_request) {
// If the thread is in initialized state, just change state to terminated.
if (this->GetState() == ThreadState::Initialized) {
- thread_state = ThreadState::Terminated;
+ m_thread_state = ThreadState::Terminated;
return ThreadState::Terminated;
}
@@ -1125,27 +1257,25 @@ ThreadState KThread::RequestTerminate() {
// If the thread is suspended, continue it.
if (this->IsSuspended()) {
- suspend_allowed_flags = 0;
+ m_suspend_allowed_flags = 0;
this->UpdateState();
}
// Change the thread's priority to be higher than any system thread's.
- if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) {
- this->SetBasePriority(TerminatingThreadPriority);
- }
+ this->IncreaseBasePriority(TerminatingThreadPriority);
// If the thread is runnable, send a termination interrupt to other cores.
if (this->GetState() == ThreadState::Runnable) {
- if (const u64 core_mask =
- physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel));
+ if (const u64 core_mask = m_physical_affinity_mask.GetAffinityMask() &
+ ~(1ULL << GetCurrentCoreId(m_kernel));
core_mask != 0) {
- Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask);
+ Kernel::KInterruptManager::SendInterProcessorInterrupt(m_kernel, core_mask);
}
}
// Wake up the thread.
if (this->GetState() == ThreadState::Waiting) {
- wait_queue->CancelWait(this, ResultTerminationRequested, true);
+ m_wait_queue->CancelWait(this, ResultTerminationRequested, true);
}
}
@@ -1153,14 +1283,15 @@ ThreadState KThread::RequestTerminate() {
}
Result KThread::Sleep(s64 timeout) {
- ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
- ASSERT(this == GetCurrentThreadPointer(kernel));
+ ASSERT(!KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
+ ASSERT(this == GetCurrentThreadPointer(m_kernel));
ASSERT(timeout > 0);
- ThreadQueueImplForKThreadSleep wait_queue_(kernel);
+ ThreadQueueImplForKThreadSleep wait_queue(m_kernel);
+ KHardwareTimer* timer{};
{
// Setup the scheduling lock and sleep.
- KScopedSchedulerLockAndSleep slp(kernel, this, timeout);
+ KScopedSchedulerLockAndSleep slp(m_kernel, std::addressof(timer), this, timeout);
// Check if the thread should terminate.
if (this->IsTerminationRequested()) {
@@ -1169,102 +1300,107 @@ Result KThread::Sleep(s64 timeout) {
}
// Wait for the sleep to end.
- this->BeginWait(std::addressof(wait_queue_));
- SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
+ wait_queue.SetHardwareTimer(timer);
+ this->BeginWait(std::addressof(wait_queue));
+ this->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
}
R_SUCCEED();
}
void KThread::RequestDummyThreadWait() {
- ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
ASSERT(this->IsDummyThread());
// We will block when the scheduler lock is released.
- dummy_thread_runnable.store(false);
+ std::scoped_lock lock{m_dummy_thread_mutex};
+ m_dummy_thread_runnable = false;
}
void KThread::DummyThreadBeginWait() {
- if (!this->IsDummyThread() || kernel.IsPhantomModeForSingleCore()) {
+ if (!this->IsDummyThread() || m_kernel.IsPhantomModeForSingleCore()) {
// Occurs in single core mode.
return;
}
// Block until runnable is no longer false.
- dummy_thread_runnable.wait(false);
+ std::unique_lock lock{m_dummy_thread_mutex};
+ m_dummy_thread_cv.wait(lock, [this] { return m_dummy_thread_runnable; });
}
void KThread::DummyThreadEndWait() {
- ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
ASSERT(this->IsDummyThread());
// Wake up the waiting thread.
- dummy_thread_runnable.store(true);
- dummy_thread_runnable.notify_one();
+ {
+ std::scoped_lock lock{m_dummy_thread_mutex};
+ m_dummy_thread_runnable = true;
+ }
+ m_dummy_thread_cv.notify_one();
}
void KThread::BeginWait(KThreadQueue* queue) {
// Set our state as waiting.
- SetState(ThreadState::Waiting);
+ this->SetState(ThreadState::Waiting);
// Set our wait queue.
- wait_queue = queue;
+ m_wait_queue = queue;
}
-void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_) {
+void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result) {
// Lock the scheduler.
- KScopedSchedulerLock sl(kernel);
+ KScopedSchedulerLock sl(m_kernel);
// If we're waiting, notify our queue that we're available.
- if (GetState() == ThreadState::Waiting) {
- wait_queue->NotifyAvailable(this, signaled_object, wait_result_);
+ if (this->GetState() == ThreadState::Waiting) {
+ m_wait_queue->NotifyAvailable(this, signaled_object, wait_result);
}
}
-void KThread::EndWait(Result wait_result_) {
+void KThread::EndWait(Result wait_result) {
// Lock the scheduler.
- KScopedSchedulerLock sl(kernel);
+ KScopedSchedulerLock sl(m_kernel);
// If we're waiting, notify our queue that we're available.
- if (GetState() == ThreadState::Waiting) {
- if (wait_queue == nullptr) {
+ if (this->GetState() == ThreadState::Waiting) {
+ if (m_wait_queue == nullptr) {
// This should never happen, but avoid a hard crash below to get this logged.
ASSERT_MSG(false, "wait_queue is nullptr!");
return;
}
- wait_queue->EndWait(this, wait_result_);
+ m_wait_queue->EndWait(this, wait_result);
}
}
-void KThread::CancelWait(Result wait_result_, bool cancel_timer_task) {
+void KThread::CancelWait(Result wait_result, bool cancel_timer_task) {
// Lock the scheduler.
- KScopedSchedulerLock sl(kernel);
+ KScopedSchedulerLock sl(m_kernel);
// If we're waiting, notify our queue that we're available.
- if (GetState() == ThreadState::Waiting) {
- wait_queue->CancelWait(this, wait_result_, cancel_timer_task);
+ if (this->GetState() == ThreadState::Waiting) {
+ m_wait_queue->CancelWait(this, wait_result, cancel_timer_task);
}
}
void KThread::SetState(ThreadState state) {
- KScopedSchedulerLock sl{kernel};
+ KScopedSchedulerLock sl{m_kernel};
// Clear debugging state
- SetMutexWaitAddressForDebugging({});
- SetWaitReasonForDebugging({});
+ this->SetWaitReasonForDebugging({});
- const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
- thread_state.store(
+ const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed);
+ m_thread_state.store(
static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)),
std::memory_order_relaxed);
- if (thread_state.load(std::memory_order_relaxed) != old_state) {
- KScheduler::OnThreadStateChanged(kernel, this, old_state);
+ if (m_thread_state.load(std::memory_order_relaxed) != old_state) {
+ KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
}
}
std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
- return host_context;
+ return m_host_context;
}
void SetCurrentThread(KernelCore& kernel, KThread* thread) {
@@ -1279,26 +1415,39 @@ KThread& GetCurrentThread(KernelCore& kernel) {
return *GetCurrentThreadPointer(kernel);
}
+KProcess* GetCurrentProcessPointer(KernelCore& kernel) {
+ return GetCurrentThread(kernel).GetOwnerProcess();
+}
+
+KProcess& GetCurrentProcess(KernelCore& kernel) {
+ return *GetCurrentProcessPointer(kernel);
+}
+
s32 GetCurrentCoreId(KernelCore& kernel) {
return GetCurrentThread(kernel).GetCurrentCore();
}
+Core::Memory::Memory& GetCurrentMemory(KernelCore& kernel) {
+ // TODO: per-process memory
+ return kernel.System().ApplicationMemory();
+}
+
KScopedDisableDispatch::~KScopedDisableDispatch() {
// If we are shutting down the kernel, none of this is relevant anymore.
- if (kernel.IsShuttingDown()) {
+ if (m_kernel.IsShuttingDown()) {
return;
}
- if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
- auto* scheduler = kernel.CurrentScheduler();
+ if (GetCurrentThread(m_kernel).GetDisableDispatchCount() <= 1) {
+ auto* scheduler = m_kernel.CurrentScheduler();
- if (scheduler && !kernel.IsPhantomModeForSingleCore()) {
+ if (scheduler && !m_kernel.IsPhantomModeForSingleCore()) {
scheduler->RescheduleCurrentCore();
} else {
- KScheduler::RescheduleCurrentHLEThread(kernel);
+ KScheduler::RescheduleCurrentHLEThread(m_kernel);
}
} else {
- GetCurrentThread(kernel).EnableDispatch();
+ GetCurrentThread(m_kernel).EnableDispatch();
}
}
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index 7cd94a340..dd662b3f8 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -12,10 +12,10 @@
#include <utility>
#include <vector>
-#include <boost/intrusive/list.hpp>
+#include "common/intrusive_list.h"
-#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
+#include "common/scratch_buffer.h"
#include "common/spin_lock.h"
#include "core/arm/arm_interface.h"
#include "core/hle/kernel/k_affinity_mask.h"
@@ -23,6 +23,7 @@
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_timer_task.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/k_worker_task.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/svc_common.h"
@@ -34,6 +35,9 @@ class Fiber;
}
namespace Core {
+namespace Memory {
+class Memory;
+}
class ARM_Interface;
class System;
} // namespace Core
@@ -46,7 +50,7 @@ class KProcess;
class KScheduler;
class KThreadQueue;
-using KThreadFunction = VAddr;
+using KThreadFunction = KProcessAddress;
enum class ThreadType : u32 {
Main = 0,
@@ -108,12 +112,15 @@ enum class StepState : u32 {
};
void SetCurrentThread(KernelCore& kernel, KThread* thread);
-[[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel);
-[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel);
-[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel);
+KThread* GetCurrentThreadPointer(KernelCore& kernel);
+KThread& GetCurrentThread(KernelCore& kernel);
+KProcess* GetCurrentProcessPointer(KernelCore& kernel);
+KProcess& GetCurrentProcess(KernelCore& kernel);
+s32 GetCurrentCoreId(KernelCore& kernel);
+Core::Memory::Memory& GetCurrentMemory(KernelCore& kernel);
class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KWorkerTask>,
- public boost::intrusive::list_base_hook<>,
+ public Common::IntrusiveListBaseNode<KThread>,
public KTimerTask {
KERNEL_AUTOOBJECT_TRAITS(KThread, KSynchronizationObject);
@@ -126,24 +133,20 @@ public:
static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1;
static constexpr s32 DummyThreadPriority = Svc::LowestThreadPriority + 2;
- explicit KThread(KernelCore& kernel_);
+ explicit KThread(KernelCore& kernel);
~KThread() override;
public:
using ThreadContext32 = Core::ARM_Interface::ThreadContext32;
using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
- using WaiterList = boost::intrusive::list<KThread>;
-
- void SetName(std::string new_name) {
- name = std::move(new_name);
- }
+ using WaiterList = Common::IntrusiveListBaseTraits<KThread>::ListType;
/**
* Gets the thread's current priority
* @return The current thread's priority
*/
- [[nodiscard]] s32 GetPriority() const {
- return priority;
+ s32 GetPriority() const {
+ return m_priority;
}
/**
@@ -151,23 +154,23 @@ public:
* @param priority The new priority.
*/
void SetPriority(s32 value) {
- priority = value;
+ m_priority = value;
}
/**
* Gets the thread's nominal priority.
* @return The current thread's nominal priority.
*/
- [[nodiscard]] s32 GetBasePriority() const {
- return base_priority;
+ s32 GetBasePriority() const {
+ return m_base_priority;
}
/**
* Gets the thread's thread ID
* @return The thread's ID
*/
- [[nodiscard]] u64 GetThreadID() const {
- return thread_id;
+ u64 GetThreadId() const {
+ return m_thread_id;
}
void ContinueIfHasKernelWaiters() {
@@ -178,7 +181,7 @@ public:
void SetBasePriority(s32 value);
- [[nodiscard]] Result Run();
+ Result Run();
void Exit();
@@ -186,22 +189,22 @@ public:
ThreadState RequestTerminate();
- [[nodiscard]] u32 GetSuspendFlags() const {
- return suspend_allowed_flags & suspend_request_flags;
+ u32 GetSuspendFlags() const {
+ return m_suspend_allowed_flags & m_suspend_request_flags;
}
- [[nodiscard]] bool IsSuspended() const {
+ bool IsSuspended() const {
return GetSuspendFlags() != 0;
}
- [[nodiscard]] bool IsSuspendRequested(SuspendType type) const {
- return (suspend_request_flags &
- (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)))) !=
+ bool IsSuspendRequested(SuspendType type) const {
+ return (m_suspend_request_flags &
+ (1U << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)))) !=
0;
}
- [[nodiscard]] bool IsSuspendRequested() const {
- return suspend_request_flags != 0;
+ bool IsSuspendRequested() const {
+ return m_suspend_request_flags != 0;
}
void RequestSuspend(SuspendType type);
@@ -214,202 +217,196 @@ public:
void Continue();
- void WaitUntilSuspended();
-
constexpr void SetSyncedIndex(s32 index) {
- synced_index = index;
+ m_synced_index = index;
}
- [[nodiscard]] constexpr s32 GetSyncedIndex() const {
- return synced_index;
+ constexpr s32 GetSyncedIndex() const {
+ return m_synced_index;
}
constexpr void SetWaitResult(Result wait_res) {
- wait_result = wait_res;
+ m_wait_result = wait_res;
}
- [[nodiscard]] constexpr Result GetWaitResult() const {
- return wait_result;
+ constexpr Result GetWaitResult() const {
+ return m_wait_result;
}
/*
* Returns the Thread Local Storage address of the current thread
- * @returns VAddr of the thread's TLS
+ * @returns Address of the thread's TLS
*/
- [[nodiscard]] VAddr GetTLSAddress() const {
- return tls_address;
+ KProcessAddress GetTlsAddress() const {
+ return m_tls_address;
}
/*
* Returns the value of the TPIDR_EL0 Read/Write system register for this thread.
* @returns The value of the TPIDR_EL0 register.
*/
- [[nodiscard]] u64 GetTPIDR_EL0() const {
- return thread_context_64.tpidr;
+ u64 GetTpidrEl0() const {
+ return m_thread_context_64.tpidr;
}
/// Sets the value of the TPIDR_EL0 Read/Write system register for this thread.
- void SetTPIDR_EL0(u64 value) {
- thread_context_64.tpidr = value;
- thread_context_32.tpidr = static_cast<u32>(value);
+ void SetTpidrEl0(u64 value) {
+ m_thread_context_64.tpidr = value;
+ m_thread_context_32.tpidr = static_cast<u32>(value);
}
- [[nodiscard]] ThreadContext32& GetContext32() {
- return thread_context_32;
+ void CloneFpuStatus();
+
+ ThreadContext32& GetContext32() {
+ return m_thread_context_32;
}
- [[nodiscard]] const ThreadContext32& GetContext32() const {
- return thread_context_32;
+ const ThreadContext32& GetContext32() const {
+ return m_thread_context_32;
}
- [[nodiscard]] ThreadContext64& GetContext64() {
- return thread_context_64;
+ ThreadContext64& GetContext64() {
+ return m_thread_context_64;
}
- [[nodiscard]] const ThreadContext64& GetContext64() const {
- return thread_context_64;
+ const ThreadContext64& GetContext64() const {
+ return m_thread_context_64;
}
- [[nodiscard]] std::shared_ptr<Common::Fiber>& GetHostContext();
+ std::shared_ptr<Common::Fiber>& GetHostContext();
- [[nodiscard]] ThreadState GetState() const {
- return thread_state.load(std::memory_order_relaxed) & ThreadState::Mask;
+ ThreadState GetState() const {
+ return m_thread_state.load(std::memory_order_relaxed) & ThreadState::Mask;
}
- [[nodiscard]] ThreadState GetRawState() const {
- return thread_state.load(std::memory_order_relaxed);
+ ThreadState GetRawState() const {
+ return m_thread_state.load(std::memory_order_relaxed);
}
void SetState(ThreadState state);
- [[nodiscard]] StepState GetStepState() const {
- return step_state;
+ StepState GetStepState() const {
+ return m_step_state;
}
void SetStepState(StepState state) {
- step_state = state;
+ m_step_state = state;
}
- [[nodiscard]] s64 GetLastScheduledTick() const {
- return last_scheduled_tick;
+ s64 GetLastScheduledTick() const {
+ return m_last_scheduled_tick;
}
void SetLastScheduledTick(s64 tick) {
- last_scheduled_tick = tick;
+ m_last_scheduled_tick = tick;
}
- void AddCpuTime([[maybe_unused]] s32 core_id_, s64 amount) {
- cpu_time += amount;
+ void AddCpuTime(s32 core_id, s64 amount) {
+ m_cpu_time += amount;
// TODO(bunnei): Debug kernels track per-core tick counts. Should we?
}
- [[nodiscard]] s64 GetCpuTime() const {
- return cpu_time;
+ s64 GetCpuTime() const {
+ return m_cpu_time;
}
- [[nodiscard]] s32 GetActiveCore() const {
- return core_id;
+ s32 GetActiveCore() const {
+ return m_core_id;
}
void SetActiveCore(s32 core) {
- core_id = core;
+ m_core_id = core;
}
- [[nodiscard]] s32 GetCurrentCore() const {
- return current_core_id;
+ s32 GetCurrentCore() const {
+ return m_current_core_id;
}
void SetCurrentCore(s32 core) {
- current_core_id = core;
+ m_current_core_id = core;
}
- [[nodiscard]] KProcess* GetOwnerProcess() {
- return parent;
+ KProcess* GetOwnerProcess() {
+ return m_parent;
}
- [[nodiscard]] const KProcess* GetOwnerProcess() const {
- return parent;
+ const KProcess* GetOwnerProcess() const {
+ return m_parent;
}
- [[nodiscard]] bool IsUserThread() const {
- return parent != nullptr;
+ bool IsUserThread() const {
+ return m_parent != nullptr;
}
u16 GetUserDisableCount() const;
void SetInterruptFlag();
void ClearInterruptFlag();
- [[nodiscard]] KThread* GetLockOwner() const {
- return lock_owner;
- }
-
- void SetLockOwner(KThread* owner) {
- lock_owner = owner;
- }
+ KThread* GetLockOwner() const;
- [[nodiscard]] const KAffinityMask& GetAffinityMask() const {
- return physical_affinity_mask;
+ const KAffinityMask& GetAffinityMask() const {
+ return m_physical_affinity_mask;
}
- [[nodiscard]] Result GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
+ Result GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
- [[nodiscard]] Result GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
+ Result GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
- [[nodiscard]] Result SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask);
+ Result SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask);
- [[nodiscard]] Result SetActivity(Svc::ThreadActivity activity);
+ Result SetActivity(Svc::ThreadActivity activity);
- [[nodiscard]] Result Sleep(s64 timeout);
+ Result Sleep(s64 timeout);
- [[nodiscard]] s64 GetYieldScheduleCount() const {
- return schedule_count;
+ s64 GetYieldScheduleCount() const {
+ return m_schedule_count;
}
void SetYieldScheduleCount(s64 count) {
- schedule_count = count;
+ m_schedule_count = count;
}
void WaitCancel();
- [[nodiscard]] bool IsWaitCancelled() const {
- return wait_cancelled;
+ bool IsWaitCancelled() const {
+ return m_wait_cancelled;
}
void ClearWaitCancelled() {
- wait_cancelled = false;
+ m_wait_cancelled = false;
}
- [[nodiscard]] bool IsCancellable() const {
- return cancellable;
+ bool IsCancellable() const {
+ return m_cancellable;
}
void SetCancellable() {
- cancellable = true;
+ m_cancellable = true;
}
void ClearCancellable() {
- cancellable = false;
+ m_cancellable = false;
}
- [[nodiscard]] bool IsTerminationRequested() const {
- return termination_requested || GetRawState() == ThreadState::Terminated;
+ bool IsTerminationRequested() const {
+ return m_termination_requested || GetRawState() == ThreadState::Terminated;
}
- [[nodiscard]] u64 GetId() const override {
- return this->GetThreadID();
+ u64 GetId() const override {
+ return this->GetThreadId();
}
- [[nodiscard]] bool IsInitialized() const override {
- return initialized;
+ bool IsInitialized() const override {
+ return m_initialized;
}
- [[nodiscard]] uintptr_t GetPostDestroyArgument() const override {
- return reinterpret_cast<uintptr_t>(parent) | (resource_limit_release_hint ? 1 : 0);
+ uintptr_t GetPostDestroyArgument() const override {
+ return reinterpret_cast<uintptr_t>(m_parent) | (m_resource_limit_release_hint ? 1 : 0);
}
void Finalize() override;
- [[nodiscard]] bool IsSignaled() const override;
+ bool IsSignaled() const override;
void OnTimer();
@@ -417,22 +414,22 @@ public:
static void PostDestroy(uintptr_t arg);
- [[nodiscard]] static Result InitializeDummyThread(KThread* thread, KProcess* owner);
+ static Result InitializeDummyThread(KThread* thread, KProcess* owner);
- [[nodiscard]] static Result InitializeMainThread(Core::System& system, KThread* thread,
- s32 virt_core);
+ static Result InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core);
- [[nodiscard]] static Result InitializeIdleThread(Core::System& system, KThread* thread,
- s32 virt_core);
+ static Result InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core);
- [[nodiscard]] static Result InitializeHighPriorityThread(Core::System& system, KThread* thread,
- KThreadFunction func, uintptr_t arg,
- s32 virt_core);
+ static Result InitializeHighPriorityThread(Core::System& system, KThread* thread,
+ KThreadFunction func, uintptr_t arg, s32 virt_core);
- [[nodiscard]] static Result InitializeUserThread(Core::System& system, KThread* thread,
- KThreadFunction func, uintptr_t arg,
- VAddr user_stack_top, s32 prio, s32 virt_core,
- KProcess* owner);
+ static Result InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
+ uintptr_t arg, KProcessAddress user_stack_top, s32 prio,
+ s32 virt_core, KProcess* owner);
+
+ static Result InitializeServiceThread(Core::System& system, KThread* thread,
+ std::function<void()>&& thread_func, s32 prio,
+ s32 virt_core, KProcess* owner);
public:
struct StackParameters {
@@ -446,12 +443,12 @@ public:
KThread* cur_thread;
};
- [[nodiscard]] StackParameters& GetStackParameters() {
- return stack_parameters;
+ StackParameters& GetStackParameters() {
+ return m_stack_parameters;
}
- [[nodiscard]] const StackParameters& GetStackParameters() const {
- return stack_parameters;
+ const StackParameters& GetStackParameters() const {
+ return m_stack_parameters;
}
class QueueEntry {
@@ -459,47 +456,47 @@ public:
constexpr QueueEntry() = default;
constexpr void Initialize() {
- prev = nullptr;
- next = nullptr;
+ m_prev = nullptr;
+ m_next = nullptr;
}
constexpr KThread* GetPrev() const {
- return prev;
+ return m_prev;
}
constexpr KThread* GetNext() const {
- return next;
+ return m_next;
}
constexpr void SetPrev(KThread* thread) {
- prev = thread;
+ m_prev = thread;
}
constexpr void SetNext(KThread* thread) {
- next = thread;
+ m_next = thread;
}
private:
- KThread* prev{};
- KThread* next{};
+ KThread* m_prev{};
+ KThread* m_next{};
};
- [[nodiscard]] QueueEntry& GetPriorityQueueEntry(s32 core) {
- return per_core_priority_queue_entry[core];
+ QueueEntry& GetPriorityQueueEntry(s32 core) {
+ return m_per_core_priority_queue_entry[core];
}
- [[nodiscard]] const QueueEntry& GetPriorityQueueEntry(s32 core) const {
- return per_core_priority_queue_entry[core];
+ const QueueEntry& GetPriorityQueueEntry(s32 core) const {
+ return m_per_core_priority_queue_entry[core];
}
- [[nodiscard]] s32 GetDisableDispatchCount() const {
+ s32 GetDisableDispatchCount() const {
return this->GetStackParameters().disable_count;
}
void DisableDispatch() {
- ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
+ ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() >= 0);
this->GetStackParameters().disable_count++;
}
void EnableDispatch() {
- ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
+ ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() > 0);
this->GetStackParameters().disable_count--;
}
@@ -515,7 +512,7 @@ public:
this->GetStackParameters().is_in_exception_handler = false;
}
- [[nodiscard]] bool IsInExceptionHandler() const {
+ bool IsInExceptionHandler() const {
return this->GetStackParameters().is_in_exception_handler;
}
@@ -527,11 +524,11 @@ public:
this->GetStackParameters().is_calling_svc = false;
}
- [[nodiscard]] bool IsCallingSvc() const {
+ bool IsCallingSvc() const {
return this->GetStackParameters().is_calling_svc;
}
- [[nodiscard]] u8 GetSvcId() const {
+ u8 GetSvcId() const {
return this->GetStackParameters().current_svc_id;
}
@@ -543,102 +540,94 @@ public:
this->GetStackParameters().dpc_flags &= ~static_cast<u8>(flag);
}
- [[nodiscard]] u8 GetDpc() const {
+ u8 GetDpc() const {
return this->GetStackParameters().dpc_flags;
}
- [[nodiscard]] bool HasDpc() const {
+ bool HasDpc() const {
return this->GetDpc() != 0;
}
void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) {
- wait_reason_for_debugging = reason;
+ m_wait_reason_for_debugging = reason;
}
- [[nodiscard]] ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const {
- return wait_reason_for_debugging;
+ ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const {
+ return m_wait_reason_for_debugging;
}
- [[nodiscard]] ThreadType GetThreadType() const {
- return thread_type;
+ ThreadType GetThreadType() const {
+ return m_thread_type;
}
- [[nodiscard]] bool IsDummyThread() const {
- return GetThreadType() == ThreadType::Dummy;
+ bool IsDummyThread() const {
+ return this->GetThreadType() == ThreadType::Dummy;
}
- void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
- wait_objects_for_debugging.clear();
- wait_objects_for_debugging.reserve(objects.size());
- for (const auto& object : objects) {
- wait_objects_for_debugging.emplace_back(object);
- }
- }
+ void AddWaiter(KThread* thread);
- [[nodiscard]] const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const {
- return wait_objects_for_debugging;
- }
+ void RemoveWaiter(KThread* thread);
- void SetMutexWaitAddressForDebugging(VAddr address) {
- mutex_wait_address_for_debugging = address;
- }
+ Result GetThreadContext3(Common::ScratchBuffer<u8>& out);
- [[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const {
- return mutex_wait_address_for_debugging;
+ KThread* RemoveUserWaiterByKey(bool* out_has_waiters, KProcessAddress key) {
+ return this->RemoveWaiterByKey(out_has_waiters, key, false);
}
- [[nodiscard]] s32 GetIdealCoreForDebugging() const {
- return virtual_ideal_core_id;
+ KThread* RemoveKernelWaiterByKey(bool* out_has_waiters, KProcessAddress key) {
+ return this->RemoveWaiterByKey(out_has_waiters, key, true);
}
- void AddWaiter(KThread* thread);
-
- void RemoveWaiter(KThread* thread);
-
- [[nodiscard]] Result GetThreadContext3(std::vector<u8>& out);
-
- [[nodiscard]] KThread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
+ KProcessAddress GetAddressKey() const {
+ return m_address_key;
+ }
- [[nodiscard]] VAddr GetAddressKey() const {
- return address_key;
+ u32 GetAddressKeyValue() const {
+ return m_address_key_value;
}
- [[nodiscard]] u32 GetAddressKeyValue() const {
- return address_key_value;
+ bool GetIsKernelAddressKey() const {
+ return m_is_kernel_address_key;
}
- void SetAddressKey(VAddr key) {
- address_key = key;
+ //! NB: intentional deviation from official kernel.
+ //
+ // Separate SetAddressKey into user and kernel versions
+ // to cope with arbitrary host pointers making their way
+ // into things.
+
+ void SetUserAddressKey(KProcessAddress key, u32 val) {
+ ASSERT(m_waiting_lock_info == nullptr);
+ m_address_key = key;
+ m_address_key_value = val;
+ m_is_kernel_address_key = false;
}
- void SetAddressKey(VAddr key, u32 val) {
- address_key = key;
- address_key_value = val;
+ void SetKernelAddressKey(KProcessAddress key) {
+ ASSERT(m_waiting_lock_info == nullptr);
+ m_address_key = key;
+ m_is_kernel_address_key = true;
}
void ClearWaitQueue() {
- wait_queue = nullptr;
+ m_wait_queue = nullptr;
}
void BeginWait(KThreadQueue* queue);
- void NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_);
- void EndWait(Result wait_result_);
- void CancelWait(Result wait_result_, bool cancel_timer_task);
+ void NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result);
+ void EndWait(Result wait_result);
+ void CancelWait(Result wait_result, bool cancel_timer_task);
- [[nodiscard]] bool HasWaiters() const {
- return !waiter_list.empty();
+ s32 GetNumKernelWaiters() const {
+ return m_num_kernel_waiters;
}
- [[nodiscard]] s32 GetNumKernelWaiters() const {
- return num_kernel_waiters;
+ u64 GetConditionVariableKey() const {
+ return m_condvar_key;
}
- [[nodiscard]] u64 GetConditionVariableKey() const {
- return condvar_key;
- }
-
- [[nodiscard]] u64 GetAddressArbiterKey() const {
- return condvar_key;
+ u64 GetAddressArbiterKey() const {
+ return m_condvar_key;
}
// Dummy threads (used for HLE host threads) cannot wait based on the guest scheduler, and
@@ -649,20 +638,23 @@ public:
void DummyThreadBeginWait();
void DummyThreadEndWait();
- [[nodiscard]] uintptr_t GetArgument() const {
- return argument;
+ uintptr_t GetArgument() const {
+ return m_argument;
}
- [[nodiscard]] VAddr GetUserStackTop() const {
- return stack_top;
+ KProcessAddress GetUserStackTop() const {
+ return m_stack_top;
}
private:
+ KThread* RemoveWaiterByKey(bool* out_has_waiters, KProcessAddress key,
+ bool is_kernel_address_key);
+
static constexpr size_t PriorityInheritanceCountMax = 10;
union SyncObjectBuffer {
std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> sync_objects{};
std::array<Handle,
- Svc::ArgumentHandleCountMax*(sizeof(KSynchronizationObject*) / sizeof(Handle))>
+ Svc::ArgumentHandleCountMax * (sizeof(KSynchronizationObject*) / sizeof(Handle))>
handles;
constexpr SyncObjectBuffer() {}
};
@@ -673,20 +665,18 @@ private:
u64 cv_key{};
s32 priority{};
- [[nodiscard]] constexpr u64 GetConditionVariableKey() const {
+ constexpr u64 GetConditionVariableKey() const {
return cv_key;
}
- [[nodiscard]] constexpr s32 GetPriority() const {
+ constexpr s32 GetPriority() const {
return priority;
}
};
template <typename T>
- requires(
- std::same_as<T, KThread> ||
- std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs,
- const KThread& rhs) {
+ requires(std::same_as<T, KThread> || std::same_as<T, RedBlackKeyType>)
+ static constexpr int Compare(const T& lhs, const KThread& rhs) {
const u64 l_key = lhs.GetConditionVariableKey();
const u64 r_key = rhs.GetConditionVariableKey();
@@ -703,134 +693,262 @@ private:
};
void AddWaiterImpl(KThread* thread);
-
void RemoveWaiterImpl(KThread* thread);
+ static void RestorePriority(KernelCore& kernel, KThread* thread);
void StartTermination();
-
void FinishTermination();
- [[nodiscard]] Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
- s32 prio, s32 virt_core, KProcess* owner, ThreadType type);
+ void IncreaseBasePriority(s32 priority);
- [[nodiscard]] static Result InitializeThread(KThread* thread, KThreadFunction func,
- uintptr_t arg, VAddr user_stack_top, s32 prio,
- s32 core, KProcess* owner, ThreadType type,
- std::function<void()>&& init_func);
+ Result Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio,
+ s32 virt_core, KProcess* owner, ThreadType type);
- static void RestorePriority(KernelCore& kernel_ctx, KThread* thread);
+ static Result InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
+ KProcessAddress user_stack_top, s32 prio, s32 core,
+ KProcess* owner, ThreadType type,
+ std::function<void()>&& init_func);
// For core KThread implementation
- ThreadContext32 thread_context_32{};
- ThreadContext64 thread_context_64{};
- Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{};
- s32 priority{};
+ ThreadContext32 m_thread_context_32{};
+ ThreadContext64 m_thread_context_64{};
+ Common::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node{};
+ s32 m_priority{};
using ConditionVariableThreadTreeTraits =
Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<
- &KThread::condvar_arbiter_tree_node>;
+ &KThread::m_condvar_arbiter_tree_node>;
using ConditionVariableThreadTree =
ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
- ConditionVariableThreadTree* condvar_tree{};
- u64 condvar_key{};
- u64 virtual_affinity_mask{};
- KAffinityMask physical_affinity_mask{};
- u64 thread_id{};
- std::atomic<s64> cpu_time{};
- VAddr address_key{};
- KProcess* parent{};
- VAddr kernel_stack_top{};
- u32* light_ipc_data{};
- VAddr tls_address{};
- KLightLock activity_pause_lock;
- s64 schedule_count{};
- s64 last_scheduled_tick{};
- std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
- KThreadQueue* wait_queue{};
- WaiterList waiter_list{};
- WaiterList pinned_waiter_list{};
- KThread* lock_owner{};
- u32 address_key_value{};
- u32 suspend_request_flags{};
- u32 suspend_allowed_flags{};
- s32 synced_index{};
- Result wait_result{ResultSuccess};
- s32 base_priority{};
- s32 physical_ideal_core_id{};
- s32 virtual_ideal_core_id{};
- s32 num_kernel_waiters{};
- s32 current_core_id{};
- s32 core_id{};
- KAffinityMask original_physical_affinity_mask{};
- s32 original_physical_ideal_core_id{};
- s32 num_core_migration_disables{};
- std::atomic<ThreadState> thread_state{};
- std::atomic<bool> termination_requested{};
- bool wait_cancelled{};
- bool cancellable{};
- bool signaled{};
- bool initialized{};
- bool debug_attached{};
- s8 priority_inheritance_count{};
- bool resource_limit_release_hint{};
- StackParameters stack_parameters{};
- Common::SpinLock context_guard{};
+
+private:
+ struct LockWithPriorityInheritanceComparator {
+ struct RedBlackKeyType {
+ s32 m_priority;
+
+ constexpr s32 GetPriority() const {
+ return m_priority;
+ }
+ };
+
+ template <typename T>
+ requires(std::same_as<T, KThread> || std::same_as<T, RedBlackKeyType>)
+ static constexpr int Compare(const T& lhs, const KThread& rhs) {
+ if (lhs.GetPriority() < rhs.GetPriority()) {
+ // Sort by priority.
+ return -1;
+ } else {
+ return 1;
+ }
+ }
+ };
+ static_assert(std::same_as<Common::RedBlackKeyType<LockWithPriorityInheritanceComparator, void>,
+ LockWithPriorityInheritanceComparator::RedBlackKeyType>);
+
+ using LockWithPriorityInheritanceThreadTreeTraits =
+ Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<
+ &KThread::m_condvar_arbiter_tree_node>;
+ using LockWithPriorityInheritanceThreadTree =
+ ConditionVariableThreadTreeTraits::TreeType<LockWithPriorityInheritanceComparator>;
+
+public:
+ class LockWithPriorityInheritanceInfo
+ : public KSlabAllocated<LockWithPriorityInheritanceInfo>,
+ public Common::IntrusiveListBaseNode<LockWithPriorityInheritanceInfo> {
+ public:
+ explicit LockWithPriorityInheritanceInfo(KernelCore&) {}
+
+ static LockWithPriorityInheritanceInfo* Create(KernelCore& kernel,
+ KProcessAddress address_key,
+ bool is_kernel_address_key) {
+ // Create a new lock info.
+ auto* new_lock = LockWithPriorityInheritanceInfo::Allocate(kernel);
+ ASSERT(new_lock != nullptr);
+
+ // Set the new lock's address key.
+ new_lock->m_address_key = address_key;
+ new_lock->m_is_kernel_address_key = is_kernel_address_key;
+
+ return new_lock;
+ }
+
+ void SetOwner(KThread* new_owner) {
+ // Set new owner.
+ m_owner = new_owner;
+ }
+
+ void AddWaiter(KThread* waiter) {
+ // Insert the waiter.
+ m_tree.insert(*waiter);
+ m_waiter_count++;
+
+ waiter->SetWaitingLockInfo(this);
+ }
+
+ bool RemoveWaiter(KThread* waiter) {
+ m_tree.erase(m_tree.iterator_to(*waiter));
+
+ waiter->SetWaitingLockInfo(nullptr);
+
+ return (--m_waiter_count) == 0;
+ }
+
+ KThread* GetHighestPriorityWaiter() {
+ return std::addressof(m_tree.front());
+ }
+ const KThread* GetHighestPriorityWaiter() const {
+ return std::addressof(m_tree.front());
+ }
+
+ LockWithPriorityInheritanceThreadTree& GetThreadTree() {
+ return m_tree;
+ }
+ const LockWithPriorityInheritanceThreadTree& GetThreadTree() const {
+ return m_tree;
+ }
+
+ KProcessAddress GetAddressKey() const {
+ return m_address_key;
+ }
+ bool GetIsKernelAddressKey() const {
+ return m_is_kernel_address_key;
+ }
+ KThread* GetOwner() const {
+ return m_owner;
+ }
+ u32 GetWaiterCount() const {
+ return m_waiter_count;
+ }
+
+ private:
+ LockWithPriorityInheritanceThreadTree m_tree{};
+ KProcessAddress m_address_key{};
+ KThread* m_owner{};
+ u32 m_waiter_count{};
+ bool m_is_kernel_address_key{};
+ };
+
+ void SetWaitingLockInfo(LockWithPriorityInheritanceInfo* lock) {
+ m_waiting_lock_info = lock;
+ }
+
+ LockWithPriorityInheritanceInfo* GetWaitingLockInfo() {
+ return m_waiting_lock_info;
+ }
+
+ void AddHeldLock(LockWithPriorityInheritanceInfo* lock_info);
+ LockWithPriorityInheritanceInfo* FindHeldLock(KProcessAddress address_key,
+ bool is_kernel_address_key);
+
+private:
+ using LockWithPriorityInheritanceInfoList =
+ Common::IntrusiveListBaseTraits<LockWithPriorityInheritanceInfo>::ListType;
+
+ ConditionVariableThreadTree* m_condvar_tree{};
+ u64 m_condvar_key{};
+ u64 m_virtual_affinity_mask{};
+ KAffinityMask m_physical_affinity_mask{};
+ u64 m_thread_id{};
+ std::atomic<s64> m_cpu_time{};
+ KProcessAddress m_address_key{};
+ KProcess* m_parent{};
+ KVirtualAddress m_kernel_stack_top{};
+ u32* m_light_ipc_data{};
+ KProcessAddress m_tls_address{};
+ KLightLock m_activity_pause_lock;
+ s64 m_schedule_count{};
+ s64 m_last_scheduled_tick{};
+ std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> m_per_core_priority_queue_entry{};
+ KThreadQueue* m_wait_queue{};
+ LockWithPriorityInheritanceInfoList m_held_lock_info_list{};
+ LockWithPriorityInheritanceInfo* m_waiting_lock_info{};
+ WaiterList m_pinned_waiter_list{};
+ u32 m_address_key_value{};
+ u32 m_suspend_request_flags{};
+ u32 m_suspend_allowed_flags{};
+ s32 m_synced_index{};
+ Result m_wait_result{ResultSuccess};
+ s32 m_base_priority{};
+ s32 m_physical_ideal_core_id{};
+ s32 m_virtual_ideal_core_id{};
+ s32 m_num_kernel_waiters{};
+ s32 m_current_core_id{};
+ s32 m_core_id{};
+ KAffinityMask m_original_physical_affinity_mask{};
+ s32 m_original_physical_ideal_core_id{};
+ s32 m_num_core_migration_disables{};
+ std::atomic<ThreadState> m_thread_state{};
+ std::atomic<bool> m_termination_requested{};
+ bool m_wait_cancelled{};
+ bool m_cancellable{};
+ bool m_signaled{};
+ bool m_initialized{};
+ bool m_debug_attached{};
+ s8 m_priority_inheritance_count{};
+ bool m_resource_limit_release_hint{};
+ bool m_is_kernel_address_key{};
+ StackParameters m_stack_parameters{};
+ Common::SpinLock m_context_guard{};
// For emulation
- std::shared_ptr<Common::Fiber> host_context{};
- bool is_single_core{};
- ThreadType thread_type{};
- StepState step_state{};
- std::atomic<bool> dummy_thread_runnable{true};
+ std::shared_ptr<Common::Fiber> m_host_context{};
+ ThreadType m_thread_type{};
+ StepState m_step_state{};
+ bool m_dummy_thread_runnable{true};
+ std::mutex m_dummy_thread_mutex{};
+ std::condition_variable m_dummy_thread_cv{};
// For debugging
- std::vector<KSynchronizationObject*> wait_objects_for_debugging;
- VAddr mutex_wait_address_for_debugging{};
- ThreadWaitReasonForDebugging wait_reason_for_debugging{};
- uintptr_t argument{};
- VAddr stack_top{};
+ std::vector<KSynchronizationObject*> m_wait_objects_for_debugging{};
+ KProcessAddress m_mutex_wait_address_for_debugging{};
+ ThreadWaitReasonForDebugging m_wait_reason_for_debugging{};
+ uintptr_t m_argument{};
+ KProcessAddress m_stack_top{};
public:
using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
- void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key,
- u32 value) {
- condvar_tree = tree;
- condvar_key = cv_key;
- address_key = address;
- address_key_value = value;
+ void SetConditionVariable(ConditionVariableThreadTree* tree, KProcessAddress address,
+ u64 cv_key, u32 value) {
+ ASSERT(m_waiting_lock_info == nullptr);
+ m_condvar_tree = tree;
+ m_condvar_key = cv_key;
+ m_address_key = address;
+ m_address_key_value = value;
+ m_is_kernel_address_key = false;
}
void ClearConditionVariable() {
- condvar_tree = nullptr;
+ m_condvar_tree = nullptr;
}
- [[nodiscard]] bool IsWaitingForConditionVariable() const {
- return condvar_tree != nullptr;
+ bool IsWaitingForConditionVariable() const {
+ return m_condvar_tree != nullptr;
}
void SetAddressArbiter(ConditionVariableThreadTree* tree, u64 address) {
- condvar_tree = tree;
- condvar_key = address;
+ ASSERT(m_waiting_lock_info == nullptr);
+ m_condvar_tree = tree;
+ m_condvar_key = address;
}
void ClearAddressArbiter() {
- condvar_tree = nullptr;
+ m_condvar_tree = nullptr;
}
- [[nodiscard]] bool IsWaitingForAddressArbiter() const {
- return condvar_tree != nullptr;
+ bool IsWaitingForAddressArbiter() const {
+ return m_condvar_tree != nullptr;
}
- [[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const {
- return condvar_tree;
+ ConditionVariableThreadTree* GetConditionVariableTree() const {
+ return m_condvar_tree;
}
};
class KScopedDisableDispatch {
public:
- [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} {
+ explicit KScopedDisableDispatch(KernelCore& kernel) : m_kernel{kernel} {
// If we are shutting down the kernel, none of this is relevant anymore.
- if (kernel.IsShuttingDown()) {
+ if (m_kernel.IsShuttingDown()) {
return;
}
GetCurrentThread(kernel).DisableDispatch();
@@ -839,7 +957,7 @@ public:
~KScopedDisableDispatch();
private:
- KernelCore& kernel;
+ KernelCore& m_kernel;
};
inline void KTimerTask::OnTimer() {
diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp
index 563560114..b4a1e3cdb 100644
--- a/src/core/hle/kernel/k_thread_local_page.cpp
+++ b/src/core/hle/kernel/k_thread_local_page.cpp
@@ -16,7 +16,7 @@ namespace Kernel {
Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
// Set that this process owns us.
m_owner = process;
- m_kernel = &kernel;
+ m_kernel = std::addressof(kernel);
// Allocate a new page.
KPageBuffer* page_buf = KPageBuffer::Allocate(kernel);
@@ -37,7 +37,7 @@ Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
Result KThreadLocalPage::Finalize() {
// Get the physical address of the page.
- const PAddr phys_addr = m_owner->PageTable().GetPhysicalAddr(m_virt_addr);
+ const KPhysicalAddress phys_addr = m_owner->PageTable().GetPhysicalAddr(m_virt_addr);
ASSERT(phys_addr);
// Unmap the page.
@@ -49,7 +49,7 @@ Result KThreadLocalPage::Finalize() {
return ResultSuccess;
}
-VAddr KThreadLocalPage::Reserve() {
+KProcessAddress KThreadLocalPage::Reserve() {
for (size_t i = 0; i < m_is_region_free.size(); i++) {
if (m_is_region_free[i]) {
m_is_region_free[i] = false;
@@ -60,7 +60,7 @@ VAddr KThreadLocalPage::Reserve() {
return 0;
}
-void KThreadLocalPage::Release(VAddr addr) {
+void KThreadLocalPage::Release(KProcessAddress addr) {
m_is_region_free[this->GetRegionIndex(addr)] = true;
}
diff --git a/src/core/hle/kernel/k_thread_local_page.h b/src/core/hle/kernel/k_thread_local_page.h
index fe0cff084..813f32a7e 100644
--- a/src/core/hle/kernel/k_thread_local_page.h
+++ b/src/core/hle/kernel/k_thread_local_page.h
@@ -27,19 +27,20 @@ public:
static_assert(RegionsPerPage > 0);
public:
- constexpr explicit KThreadLocalPage(KernelCore&, VAddr addr = {}) : m_virt_addr(addr) {
+ constexpr explicit KThreadLocalPage(KernelCore&, KProcessAddress addr = {})
+ : m_virt_addr(addr) {
m_is_region_free.fill(true);
}
- constexpr VAddr GetAddress() const {
+ constexpr KProcessAddress GetAddress() const {
return m_virt_addr;
}
Result Initialize(KernelCore& kernel, KProcess* process);
Result Finalize();
- VAddr Reserve();
- void Release(VAddr addr);
+ KProcessAddress Reserve();
+ void Release(KProcessAddress addr);
bool IsAllUsed() const {
return std::ranges::all_of(m_is_region_free.begin(), m_is_region_free.end(),
@@ -60,7 +61,7 @@ public:
}
public:
- using RedBlackKeyType = VAddr;
+ using RedBlackKeyType = KProcessAddress;
static constexpr RedBlackKeyType GetRedBlackKey(const RedBlackKeyType& v) {
return v;
@@ -70,12 +71,10 @@ public:
}
template <typename T>
- requires(std::same_as<T, KThreadLocalPage> ||
- std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs,
- const KThreadLocalPage&
- rhs) {
- const VAddr lval = GetRedBlackKey(lhs);
- const VAddr rval = GetRedBlackKey(rhs);
+ requires(std::same_as<T, KThreadLocalPage> || std::same_as<T, RedBlackKeyType>)
+ static constexpr int Compare(const T& lhs, const KThreadLocalPage& rhs) {
+ const KProcessAddress lval = GetRedBlackKey(lhs);
+ const KProcessAddress rval = GetRedBlackKey(rhs);
if (lval < rval) {
return -1;
@@ -87,22 +86,22 @@ public:
}
private:
- constexpr VAddr GetRegionAddress(size_t i) const {
+ constexpr KProcessAddress GetRegionAddress(size_t i) const {
return this->GetAddress() + i * Svc::ThreadLocalRegionSize;
}
- constexpr bool Contains(VAddr addr) const {
+ constexpr bool Contains(KProcessAddress addr) const {
return this->GetAddress() <= addr && addr < this->GetAddress() + PageSize;
}
- constexpr size_t GetRegionIndex(VAddr addr) const {
- ASSERT(Common::IsAligned(addr, Svc::ThreadLocalRegionSize));
+ constexpr size_t GetRegionIndex(KProcessAddress addr) const {
+ ASSERT(Common::IsAligned(GetInteger(addr), Svc::ThreadLocalRegionSize));
ASSERT(this->Contains(addr));
return (addr - this->GetAddress()) / Svc::ThreadLocalRegionSize;
}
private:
- VAddr m_virt_addr{};
+ KProcessAddress m_virt_addr{};
KProcess* m_owner{};
KernelCore* m_kernel{};
std::array<bool, RegionsPerPage> m_is_region_free{};
diff --git a/src/core/hle/kernel/k_thread_queue.cpp b/src/core/hle/kernel/k_thread_queue.cpp
index 5f1dc97eb..61488f4ce 100644
--- a/src/core/hle/kernel/k_thread_queue.cpp
+++ b/src/core/hle/kernel/k_thread_queue.cpp
@@ -7,9 +7,10 @@
namespace Kernel {
-void KThreadQueue::NotifyAvailable([[maybe_unused]] KThread* waiting_thread,
- [[maybe_unused]] KSynchronizationObject* signaled_object,
- [[maybe_unused]] Result wait_result) {}
+void KThreadQueue::NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
+ Result wait_result) {
+ UNREACHABLE();
+}
void KThreadQueue::EndWait(KThread* waiting_thread, Result wait_result) {
// Set the thread's wait result.
@@ -22,7 +23,9 @@ void KThreadQueue::EndWait(KThread* waiting_thread, Result wait_result) {
waiting_thread->ClearWaitQueue();
// Cancel the thread task.
- kernel.HardwareTimer().CancelTask(waiting_thread);
+ if (m_hardware_timer != nullptr) {
+ m_hardware_timer->CancelTask(waiting_thread);
+ }
}
void KThreadQueue::CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) {
@@ -36,12 +39,13 @@ void KThreadQueue::CancelWait(KThread* waiting_thread, Result wait_result, bool
waiting_thread->ClearWaitQueue();
// Cancel the thread task.
- if (cancel_timer_task) {
- kernel.HardwareTimer().CancelTask(waiting_thread);
+ if (cancel_timer_task && m_hardware_timer != nullptr) {
+ m_hardware_timer->CancelTask(waiting_thread);
}
}
-void KThreadQueueWithoutEndWait::EndWait([[maybe_unused]] KThread* waiting_thread,
- [[maybe_unused]] Result wait_result) {}
+void KThreadQueueWithoutEndWait::EndWait(KThread* waiting_thread, Result wait_result) {
+ UNREACHABLE();
+}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread_queue.h b/src/core/hle/kernel/k_thread_queue.h
index 8d76ece81..117af0919 100644
--- a/src/core/hle/kernel/k_thread_queue.h
+++ b/src/core/hle/kernel/k_thread_queue.h
@@ -8,24 +8,30 @@
namespace Kernel {
+class KHardwareTimer;
+
class KThreadQueue {
public:
- explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_} {}
+ explicit KThreadQueue(KernelCore& kernel) : m_kernel{kernel}, m_hardware_timer{} {}
virtual ~KThreadQueue() = default;
+ void SetHardwareTimer(KHardwareTimer* timer) {
+ m_hardware_timer = timer;
+ }
+
virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
Result wait_result);
virtual void EndWait(KThread* waiting_thread, Result wait_result);
virtual void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task);
private:
- KernelCore& kernel;
- KThread::WaiterList wait_list{};
+ KernelCore& m_kernel;
+ KHardwareTimer* m_hardware_timer{};
};
class KThreadQueueWithoutEndWait : public KThreadQueue {
public:
- explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {}
+ explicit KThreadQueueWithoutEndWait(KernelCore& kernel) : KThreadQueue(kernel) {}
void EndWait(KThread* waiting_thread, Result wait_result) override final;
};
diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp
index 9f34c2d46..13d34125c 100644
--- a/src/core/hle/kernel/k_transfer_memory.cpp
+++ b/src/core/hle/kernel/k_transfer_memory.cpp
@@ -8,32 +8,29 @@
namespace Kernel {
-KTransferMemory::KTransferMemory(KernelCore& kernel_)
- : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
+KTransferMemory::KTransferMemory(KernelCore& kernel)
+ : KAutoObjectWithSlabHeapAndContainer{kernel} {}
KTransferMemory::~KTransferMemory() = default;
-Result KTransferMemory::Initialize(VAddr address_, std::size_t size_,
- Svc::MemoryPermission owner_perm_) {
+Result KTransferMemory::Initialize(KProcessAddress address, std::size_t size,
+ Svc::MemoryPermission owner_perm) {
// Set members.
- owner = kernel.CurrentProcess();
+ m_owner = GetCurrentProcessPointer(m_kernel);
// TODO(bunnei): Lock for transfer memory
// Set remaining tracking members.
- owner->Open();
- owner_perm = owner_perm_;
- address = address_;
- size = size_;
- is_initialized = true;
+ m_owner->Open();
+ m_owner_perm = owner_perm;
+ m_address = address;
+ m_size = size;
+ m_is_initialized = true;
- return ResultSuccess;
+ R_SUCCEED();
}
-void KTransferMemory::Finalize() {
- // Perform inherited finalization.
- KAutoObjectWithSlabHeapAndContainer<KTransferMemory, KAutoObjectWithList>::Finalize();
-}
+void KTransferMemory::Finalize() {}
void KTransferMemory::PostDestroy(uintptr_t arg) {
KProcess* owner = reinterpret_cast<KProcess*>(arg);
diff --git a/src/core/hle/kernel/k_transfer_memory.h b/src/core/hle/kernel/k_transfer_memory.h
index 85d508ee7..54f97ccb4 100644
--- a/src/core/hle/kernel/k_transfer_memory.h
+++ b/src/core/hle/kernel/k_transfer_memory.h
@@ -23,41 +23,41 @@ class KTransferMemory final
KERNEL_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject);
public:
- explicit KTransferMemory(KernelCore& kernel_);
+ explicit KTransferMemory(KernelCore& kernel);
~KTransferMemory() override;
- Result Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_);
+ Result Initialize(KProcessAddress address, std::size_t size, Svc::MemoryPermission owner_perm);
void Finalize() override;
bool IsInitialized() const override {
- return is_initialized;
+ return m_is_initialized;
}
uintptr_t GetPostDestroyArgument() const override {
- return reinterpret_cast<uintptr_t>(owner);
+ return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
KProcess* GetOwner() const override {
- return owner;
+ return m_owner;
}
- VAddr GetSourceAddress() const {
- return address;
+ KProcessAddress GetSourceAddress() const {
+ return m_address;
}
size_t GetSize() const {
- return is_initialized ? size : 0;
+ return m_is_initialized ? m_size : 0;
}
private:
- KProcess* owner{};
- VAddr address{};
- Svc::MemoryPermission owner_perm{};
- size_t size{};
- bool is_initialized{};
+ KProcess* m_owner{};
+ KProcessAddress m_address{};
+ Svc::MemoryPermission m_owner_perm{};
+ size_t m_size{};
+ bool m_is_initialized{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_typed_address.h b/src/core/hle/kernel/k_typed_address.h
new file mode 100644
index 000000000..d57535ba0
--- /dev/null
+++ b/src/core/hle/kernel/k_typed_address.h
@@ -0,0 +1,12 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/typed_address.h"
+
+namespace Kernel {
+
+using KPhysicalAddress = Common::PhysicalAddress;
+using KVirtualAddress = Common::VirtualAddress;
+using KProcessAddress = Common::ProcessAddress;
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_worker_task.h b/src/core/hle/kernel/k_worker_task.h
index ef591d831..9a230c03c 100644
--- a/src/core/hle/kernel/k_worker_task.h
+++ b/src/core/hle/kernel/k_worker_task.h
@@ -9,7 +9,7 @@ namespace Kernel {
class KWorkerTask : public KSynchronizationObject {
public:
- explicit KWorkerTask(KernelCore& kernel_);
+ explicit KWorkerTask(KernelCore& kernel);
void DoWorkerTask();
};
diff --git a/src/core/hle/kernel/k_worker_task_manager.cpp b/src/core/hle/kernel/k_worker_task_manager.cpp
index 04042bf8f..8ead39591 100644
--- a/src/core/hle/kernel/k_worker_task_manager.cpp
+++ b/src/core/hle/kernel/k_worker_task_manager.cpp
@@ -10,7 +10,7 @@
namespace Kernel {
-KWorkerTask::KWorkerTask(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
+KWorkerTask::KWorkerTask(KernelCore& kernel) : KSynchronizationObject{kernel} {}
void KWorkerTask::DoWorkerTask() {
if (auto* const thread = this->DynamicCast<KThread*>(); thread != nullptr) {
diff --git a/src/core/hle/kernel/k_worker_task_manager.h b/src/core/hle/kernel/k_worker_task_manager.h
index f6618883e..8745a4ce2 100644
--- a/src/core/hle/kernel/k_worker_task_manager.h
+++ b/src/core/hle/kernel/k_worker_task_manager.h
@@ -20,7 +20,7 @@ public:
KWorkerTaskManager();
- static void AddTask(KernelCore& kernel_, WorkerType type, KWorkerTask* task);
+ static void AddTask(KernelCore& kernel, WorkerType type, KWorkerTask* task);
private:
void AddTask(KernelCore& kernel, KWorkerTask* task);
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 1fb25f221..f33600ca5 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -29,18 +29,20 @@
#include "core/hle/kernel/k_hardware_timer.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
+#include "core/hle/kernel/k_object_name.h"
#include "core/hle/kernel/k_page_buffer.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_system_resource.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_worker_task_manager.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
-#include "core/hle/kernel/service_thread.h"
#include "core/hle/result.h"
+#include "core/hle/service/server_manager.h"
#include "core/hle/service/sm/sm.h"
#include "core/memory.h"
@@ -54,9 +56,7 @@ struct KernelCore::Impl {
static constexpr size_t BlockInfoSlabHeapSize = 4000;
static constexpr size_t ReservedDynamicPageCount = 64;
- explicit Impl(Core::System& system_, KernelCore& kernel_)
- : service_threads_manager{1, "ServiceThreadsManager"},
- service_thread_barrier{2}, system{system_} {}
+ explicit Impl(Core::System& system_, KernelCore& kernel_) : system{system_} {}
void SetMulticore(bool is_multi) {
is_multicore = is_multi;
@@ -84,6 +84,7 @@ struct KernelCore::Impl {
InitializeShutdownThreads();
InitializePhysicalCores();
InitializePreemption(kernel);
+ InitializeGlobalData(kernel);
// Initialize the Dynamic Slab Heaps.
{
@@ -94,21 +95,19 @@ struct KernelCore::Impl {
pt_heap_region.GetSize());
}
- InitializeHackSharedMemory();
+ InitializeHackSharedMemory(kernel);
RegisterHostThread(nullptr);
-
- default_service_thread = &CreateServiceThread(kernel, "DefaultServiceThread");
}
void InitializeCores() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
- cores[core_id]->Initialize((*current_process).Is64BitProcess());
- system.Memory().SetCurrentPageTable(*current_process, core_id);
+ cores[core_id]->Initialize((*application_process).Is64BitProcess());
+ system.ApplicationMemory().SetCurrentPageTable(*application_process, core_id);
}
}
- void CloseCurrentProcess() {
- KProcess* old_process = current_process.exchange(nullptr);
+ void CloseApplicationProcess() {
+ KProcess* old_process = application_process.exchange(nullptr);
if (old_process == nullptr) {
return;
}
@@ -138,11 +137,6 @@ struct KernelCore::Impl {
preemption_event = nullptr;
- for (auto& iter : named_ports) {
- iter.second->Close();
- }
- named_ports.clear();
-
exclusive_monitor.reset();
// Cleanup persistent kernel objects
@@ -182,7 +176,7 @@ struct KernelCore::Impl {
}
}
- CloseCurrentProcess();
+ CloseApplicationProcess();
// Track kernel objects that were not freed on shutdown
{
@@ -194,6 +188,8 @@ struct KernelCore::Impl {
}
}
+ object_name_global_data.reset();
+
// Ensure that the object list container is finalized and properly shutdown.
global_object_list_container->Finalize();
global_object_list_container.reset();
@@ -203,13 +199,14 @@ struct KernelCore::Impl {
}
void CloseServices() {
- // Ensures all service threads gracefully shutdown.
- ClearServiceThreads();
+ // Ensures all servers gracefully shutdown.
+ std::scoped_lock lk{server_lock};
+ server_managers.clear();
}
void InitializePhysicalCores() {
exclusive_monitor =
- Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
+ Core::MakeExclusiveMonitor(system.ApplicationMemory(), Core::Hardware::NUM_CPU_CORES);
for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
const s32 core{static_cast<s32>(i)};
@@ -217,13 +214,14 @@ struct KernelCore::Impl {
cores[i] = std::make_unique<Kernel::PhysicalCore>(i, system, *schedulers[i]);
auto* main_thread{Kernel::KThread::Create(system.Kernel())};
- main_thread->SetName(fmt::format("MainThread:{}", core));
main_thread->SetCurrentCore(core);
ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess());
+ KThread::Register(system.Kernel(), main_thread);
auto* idle_thread{Kernel::KThread::Create(system.Kernel())};
idle_thread->SetCurrentCore(core);
ASSERT(Kernel::KThread::InitializeIdleThread(system, idle_thread, core).IsSuccess());
+ KThread::Register(system.Kernel(), idle_thread);
schedulers[i]->Initialize(main_thread, idle_thread, core);
}
@@ -234,6 +232,7 @@ struct KernelCore::Impl {
const Core::Timing::CoreTiming& core_timing) {
system_resource_limit = KResourceLimit::Create(system.Kernel());
system_resource_limit->Initialize(&core_timing);
+ KResourceLimit::Register(kernel, system_resource_limit);
const auto sizes{memory_layout->GetTotalAndKernelMemorySizes()};
const auto total_size{sizes.first};
@@ -275,9 +274,9 @@ struct KernelCore::Impl {
system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
}
- void InitializeResourceManagers(KernelCore& kernel, VAddr address, size_t size) {
+ void InitializeResourceManagers(KernelCore& kernel, KVirtualAddress address, size_t size) {
// Ensure that the buffer is suitable for our use.
- ASSERT(Common::IsAligned(address, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(address), PageSize));
ASSERT(Common::IsAligned(size, PageSize));
// Ensure that we have space for our reference counts.
@@ -359,55 +358,52 @@ struct KernelCore::Impl {
ASSERT(KThread::InitializeHighPriorityThread(system, shutdown_threads[core_id], {}, {},
core_id)
.IsSuccess());
- shutdown_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
+ KThread::Register(system.Kernel(), shutdown_threads[core_id]);
}
}
- void MakeCurrentProcess(KProcess* process) {
- current_process = process;
+ void InitializeGlobalData(KernelCore& kernel) {
+ object_name_global_data = std::make_unique<KObjectNameGlobalData>(kernel);
}
- static inline thread_local u32 host_thread_id = UINT32_MAX;
+ void MakeApplicationProcess(KProcess* process) {
+ application_process = process;
+ }
- /// Gets the host thread ID for the caller, allocating a new one if this is the first time
- u32 GetHostThreadId(std::size_t core_id) {
- if (host_thread_id == UINT32_MAX) {
- // The first four slots are reserved for CPU core threads
- ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
- host_thread_id = static_cast<u32>(core_id);
- }
+ static inline thread_local u8 host_thread_id = UINT8_MAX;
+
+ /// Sets the host thread ID for the caller.
+ u32 SetHostThreadId(std::size_t core_id) {
+ // This should only be called during core init.
+ ASSERT(host_thread_id == UINT8_MAX);
+
+ // The first four slots are reserved for CPU core threads
+ ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
+ host_thread_id = static_cast<u8>(core_id);
return host_thread_id;
}
- /// Gets the host thread ID for the caller, allocating a new one if this is the first time
- u32 GetHostThreadId() {
- if (host_thread_id == UINT32_MAX) {
- host_thread_id = next_host_thread_id++;
- }
+ /// Gets the host thread ID for the caller
+ u32 GetHostThreadId() const {
return host_thread_id;
}
// Gets the dummy KThread for the caller, allocating a new one if this is the first time
KThread* GetHostDummyThread(KThread* existing_thread) {
- auto initialize = [this](KThread* thread) {
+ const auto initialize{[](KThread* thread) {
ASSERT(KThread::InitializeDummyThread(thread, nullptr).IsSuccess());
- thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
return thread;
- };
+ }};
thread_local KThread raw_thread{system.Kernel()};
- thread_local KThread* thread = nullptr;
- if (thread == nullptr) {
- thread = (existing_thread == nullptr) ? initialize(&raw_thread) : existing_thread;
- }
-
+ thread_local KThread* thread = existing_thread ? existing_thread : initialize(&raw_thread);
return thread;
}
/// Registers a CPU core thread by allocating a host thread ID for it
void RegisterCoreThread(std::size_t core_id) {
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
- const auto this_id = GetHostThreadId(core_id);
+ const auto this_id = SetHostThreadId(core_id);
if (!is_multicore) {
single_core_thread_id = this_id;
}
@@ -415,7 +411,6 @@ struct KernelCore::Impl {
/// Registers a new host thread by allocating a host thread ID for it
void RegisterHostThread(KThread* existing_thread) {
- [[maybe_unused]] const auto this_id = GetHostThreadId();
[[maybe_unused]] const auto dummy_thread = GetHostDummyThread(existing_thread);
}
@@ -445,11 +440,9 @@ struct KernelCore::Impl {
static inline thread_local KThread* current_thread{nullptr};
KThread* GetCurrentEmuThread() {
- const auto thread_id = GetCurrentHostThreadID();
- if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
- return GetHostDummyThread(nullptr);
+ if (!current_thread) {
+ current_thread = GetHostDummyThread(nullptr);
}
-
return current_thread;
}
@@ -473,29 +466,30 @@ struct KernelCore::Impl {
KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1);
// Save start and end for ease of use.
- const VAddr code_start_virt_addr = KernelVirtualAddressCodeBase;
- const VAddr code_end_virt_addr = KernelVirtualAddressCodeEnd;
+ constexpr KVirtualAddress code_start_virt_addr = KernelVirtualAddressCodeBase;
+ constexpr KVirtualAddress code_end_virt_addr = KernelVirtualAddressCodeEnd;
// Setup the containing kernel region.
constexpr size_t KernelRegionSize = 1_GiB;
constexpr size_t KernelRegionAlign = 1_GiB;
- constexpr VAddr kernel_region_start =
- Common::AlignDown(code_start_virt_addr, KernelRegionAlign);
+ constexpr KVirtualAddress kernel_region_start =
+ Common::AlignDown(GetInteger(code_start_virt_addr), KernelRegionAlign);
size_t kernel_region_size = KernelRegionSize;
if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) {
- kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start;
+ kernel_region_size = KernelVirtualAddressSpaceEnd - GetInteger(kernel_region_start);
}
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel));
+ GetInteger(kernel_region_start), kernel_region_size, KMemoryRegionType_Kernel));
// Setup the code region.
constexpr size_t CodeRegionAlign = PageSize;
- constexpr VAddr code_region_start =
- Common::AlignDown(code_start_virt_addr, CodeRegionAlign);
- constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign);
+ constexpr KVirtualAddress code_region_start =
+ Common::AlignDown(GetInteger(code_start_virt_addr), CodeRegionAlign);
+ constexpr KVirtualAddress code_region_end =
+ Common::AlignUp(GetInteger(code_end_virt_addr), CodeRegionAlign);
constexpr size_t code_region_size = code_region_end - code_region_start;
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- code_region_start, code_region_size, KMemoryRegionType_KernelCode));
+ GetInteger(code_region_start), code_region_size, KMemoryRegionType_KernelCode));
// Setup board-specific device physical regions.
Init::SetupDevicePhysicalMemoryRegions(*memory_layout);
@@ -531,11 +525,11 @@ struct KernelCore::Impl {
ASSERT(misc_region_size > 0);
// Setup the misc region.
- const VAddr misc_region_start =
+ const KVirtualAddress misc_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel);
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc));
+ GetInteger(misc_region_start), misc_region_size, KMemoryRegionType_KernelMisc));
// Determine if we'll use extra thread resources.
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
@@ -543,11 +537,11 @@ struct KernelCore::Impl {
// Setup the stack region.
constexpr size_t StackRegionSize = 14_MiB;
constexpr size_t StackRegionAlign = KernelAslrAlignment;
- const VAddr stack_region_start =
+ const KVirtualAddress stack_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack));
+ GetInteger(stack_region_start), StackRegionSize, KMemoryRegionType_KernelStack));
// Determine the size of the resource region.
const size_t resource_region_size =
@@ -559,29 +553,29 @@ struct KernelCore::Impl {
ASSERT(slab_region_size <= resource_region_size);
// Setup the slab region.
- const PAddr code_start_phys_addr = KernelPhysicalAddressCodeBase;
- const PAddr code_end_phys_addr = code_start_phys_addr + code_region_size;
- const PAddr slab_start_phys_addr = code_end_phys_addr;
- const PAddr slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
+ const KPhysicalAddress code_start_phys_addr = KernelPhysicalAddressCodeBase;
+ const KPhysicalAddress code_end_phys_addr = code_start_phys_addr + code_region_size;
+ const KPhysicalAddress slab_start_phys_addr = code_end_phys_addr;
+ const KPhysicalAddress slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
constexpr size_t SlabRegionAlign = KernelAslrAlignment;
const size_t slab_region_needed_size =
- Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) -
- Common::AlignDown(code_end_phys_addr, SlabRegionAlign);
- const VAddr slab_region_start =
+ Common::AlignUp(GetInteger(code_end_phys_addr) + slab_region_size, SlabRegionAlign) -
+ Common::AlignDown(GetInteger(code_end_phys_addr), SlabRegionAlign);
+ const KVirtualAddress slab_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) +
- (code_end_phys_addr % SlabRegionAlign);
+ (GetInteger(code_end_phys_addr) % SlabRegionAlign);
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab));
+ GetInteger(slab_region_start), slab_region_size, KMemoryRegionType_KernelSlab));
// Setup the temp region.
constexpr size_t TempRegionSize = 128_MiB;
constexpr size_t TempRegionAlign = KernelAslrAlignment;
- const VAddr temp_region_start =
+ const KVirtualAddress temp_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
- ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize,
- KMemoryRegionType_KernelTemp));
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
+ GetInteger(temp_region_start), TempRegionSize, KMemoryRegionType_KernelTemp));
// Automatically map in devices that have auto-map attributes.
for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
@@ -607,35 +601,37 @@ struct KernelCore::Impl {
region.SetTypeAttribute(KMemoryRegionAttr_DidKernelMap);
// Create a virtual pair region and insert it into the tree.
- const PAddr map_phys_addr = Common::AlignDown(region.GetAddress(), PageSize);
+ const KPhysicalAddress map_phys_addr = Common::AlignDown(region.GetAddress(), PageSize);
const size_t map_size =
- Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr;
- const VAddr map_virt_addr =
+ Common::AlignUp(region.GetEndAddress(), PageSize) - GetInteger(map_phys_addr);
+ const KVirtualAddress map_virt_addr =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize);
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice));
- region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr);
+ GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscMappedDevice));
+ region.SetPairAddress(GetInteger(map_virt_addr) + region.GetAddress() -
+ GetInteger(map_phys_addr));
}
Init::SetupDramPhysicalMemoryRegions(*memory_layout);
// Insert a physical region for the kernel code region.
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
- code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode));
+ GetInteger(code_start_phys_addr), code_region_size, KMemoryRegionType_DramKernelCode));
// Insert a physical region for the kernel slab region.
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
- slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab));
+ GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab));
// Determine size available for kernel page table heaps, requiring > 8 MB.
- const PAddr resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
+ const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr;
ASSERT(page_table_heap_size / 4_MiB > 2);
// Insert a physical region for the kernel page table heap region
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
- slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
+ GetInteger(slab_end_phys_addr), page_table_heap_size,
+ KMemoryRegionType_DramKernelPtHeap));
// All DRAM regions that we haven't tagged by this point will be mapped under the linear
// mapping. Tag them.
@@ -657,20 +653,21 @@ struct KernelCore::Impl {
// Setup the linear mapping region.
constexpr size_t LinearRegionAlign = 1_GiB;
- const PAddr aligned_linear_phys_start =
+ const KPhysicalAddress aligned_linear_phys_start =
Common::AlignDown(linear_extents.GetAddress(), LinearRegionAlign);
const size_t linear_region_size =
Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) -
- aligned_linear_phys_start;
- const VAddr linear_region_start =
+ GetInteger(aligned_linear_phys_start);
+ const KVirtualAddress linear_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign);
- const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start;
+ const u64 linear_region_phys_to_virt_diff =
+ GetInteger(linear_region_start) - GetInteger(aligned_linear_phys_start);
// Map and create regions for all the linearly-mapped data.
{
- PAddr cur_phys_addr = 0;
+ KPhysicalAddress cur_phys_addr = 0;
u64 cur_size = 0;
for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
@@ -689,15 +686,16 @@ struct KernelCore::Impl {
cur_size = region.GetSize();
}
- const VAddr region_virt_addr =
+ const KVirtualAddress region_virt_addr =
region.GetAddress() + linear_region_phys_to_virt_diff;
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- region_virt_addr, region.GetSize(),
+ GetInteger(region_virt_addr), region.GetSize(),
GetTypeForVirtualLinearMapping(region.GetType())));
- region.SetPairAddress(region_virt_addr);
+ region.SetPairAddress(GetInteger(region_virt_addr));
KMemoryRegion* virt_region =
- memory_layout->GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
+ memory_layout->GetVirtualMemoryRegionTree().FindModifiable(
+ GetInteger(region_virt_addr));
ASSERT(virt_region != nullptr);
virt_region->SetPairAddress(region.GetAddress());
}
@@ -705,10 +703,11 @@ struct KernelCore::Impl {
// Insert regions for the initial page table region.
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
- resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt));
+ GetInteger(resource_end_phys_addr), KernelPageTableHeapSize,
+ KMemoryRegionType_DramKernelInitPt));
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize,
- KMemoryRegionType_VirtualDramKernelInitPt));
+ GetInteger(resource_end_phys_addr) + linear_region_phys_to_virt_diff,
+ KernelPageTableHeapSize, KMemoryRegionType_VirtualDramKernelInitPt));
// All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to
// some pool partition. Tag them.
@@ -734,7 +733,7 @@ struct KernelCore::Impl {
memory_manager->Initialize(management_region.GetAddress(), management_region.GetSize());
}
- void InitializeHackSharedMemory() {
+ void InitializeHackSharedMemory(KernelCore& kernel) {
// Setup memory regions for emulated processes
// TODO(bunnei): These should not be hardcoded regions initialized within the kernel
constexpr std::size_t hid_size{0x40000};
@@ -750,65 +749,24 @@ struct KernelCore::Impl {
hidbus_shared_mem = KSharedMemory::Create(system.Kernel());
hid_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
- Svc::MemoryPermission::Read, hid_size, "HID:SharedMemory");
- font_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
- Svc::MemoryPermission::Read, font_size, "Font:SharedMemory");
- irs_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
- Svc::MemoryPermission::Read, irs_size, "IRS:SharedMemory");
- time_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
- Svc::MemoryPermission::Read, time_size, "Time:SharedMemory");
- hidbus_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
- Svc::MemoryPermission::Read, hidbus_size,
- "HidBus:SharedMemory");
- }
-
- KClientPort* CreateNamedServicePort(std::string name) {
- auto search = service_interface_factory.find(name);
- if (search == service_interface_factory.end()) {
- UNIMPLEMENTED();
- return {};
- }
-
- return &search->second(system.ServiceManager(), system);
- }
-
- void RegisterNamedServiceHandler(std::string name, KServerPort* server_port) {
- auto search = service_interface_handlers.find(name);
- if (search == service_interface_handlers.end()) {
- return;
- }
-
- search->second(system.ServiceManager(), server_port);
- }
+ Svc::MemoryPermission::Read, hid_size);
+ KSharedMemory::Register(kernel, hid_shared_mem);
- Kernel::ServiceThread& CreateServiceThread(KernelCore& kernel, const std::string& name) {
- auto* ptr = new ServiceThread(kernel, name);
-
- service_threads_manager.QueueWork(
- [this, ptr]() { service_threads.emplace(ptr, std::unique_ptr<ServiceThread>(ptr)); });
-
- return *ptr;
- }
-
- void ReleaseServiceThread(Kernel::ServiceThread& service_thread) {
- auto* ptr = &service_thread;
+ font_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
+ Svc::MemoryPermission::Read, font_size);
+ KSharedMemory::Register(kernel, font_shared_mem);
- if (ptr == default_service_thread) {
- // Nothing to do here, the service is using default_service_thread, which will be
- // released on shutdown.
- return;
- }
+ irs_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
+ Svc::MemoryPermission::Read, irs_size);
+ KSharedMemory::Register(kernel, irs_shared_mem);
- service_threads_manager.QueueWork([this, ptr]() { service_threads.erase(ptr); });
- }
+ time_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
+ Svc::MemoryPermission::Read, time_size);
+ KSharedMemory::Register(kernel, time_shared_mem);
- void ClearServiceThreads() {
- service_threads_manager.QueueWork([this] {
- service_threads.clear();
- default_service_thread = nullptr;
- service_thread_barrier.Sync();
- });
- service_thread_barrier.Sync();
+ hidbus_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
+ Svc::MemoryPermission::Read, hidbus_size);
+ KSharedMemory::Register(kernel, hidbus_shared_mem);
}
std::mutex registered_objects_lock;
@@ -821,7 +779,7 @@ struct KernelCore::Impl {
// Lists all processes that exist in the current session.
std::vector<KProcess*> process_list;
- std::atomic<KProcess*> current_process{};
+ std::atomic<KProcess*> application_process{};
std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
std::unique_ptr<Kernel::KHardwareTimer> hardware_timer;
@@ -838,14 +796,14 @@ struct KernelCore::Impl {
std::unique_ptr<KAutoObjectWithListContainer> global_object_list_container;
- /// Map of named ports managed by the kernel, which can be retrieved using
- /// the ConnectToPort SVC.
- std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory;
- std::unordered_map<std::string, ServiceInterfaceHandlerFn> service_interface_handlers;
- NamedPortTable named_ports;
+ std::unique_ptr<KObjectNameGlobalData> object_name_global_data;
+
std::unordered_set<KAutoObject*> registered_objects;
std::unordered_set<KAutoObject*> registered_in_use_objects;
+ std::mutex server_lock;
+ std::vector<std::unique_ptr<Service::ServerManager>> server_managers;
+
std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
std::array<std::unique_ptr<Kernel::PhysicalCore>, Core::Hardware::NUM_CPU_CORES> cores;
@@ -880,12 +838,6 @@ struct KernelCore::Impl {
// Memory layout
std::unique_ptr<KMemoryLayout> memory_layout;
- // Threads used for services
- std::unordered_map<ServiceThread*, std::unique_ptr<ServiceThread>> service_threads;
- ServiceThread* default_service_thread{};
- Common::ThreadWorker service_threads_manager;
- Common::Barrier service_thread_barrier;
-
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> shutdown_threads{};
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
@@ -941,20 +893,20 @@ void KernelCore::AppendNewProcess(KProcess* process) {
impl->process_list.push_back(process);
}
-void KernelCore::MakeCurrentProcess(KProcess* process) {
- impl->MakeCurrentProcess(process);
+void KernelCore::MakeApplicationProcess(KProcess* process) {
+ impl->MakeApplicationProcess(process);
}
-KProcess* KernelCore::CurrentProcess() {
- return impl->current_process;
+KProcess* KernelCore::ApplicationProcess() {
+ return impl->application_process;
}
-const KProcess* KernelCore::CurrentProcess() const {
- return impl->current_process;
+const KProcess* KernelCore::ApplicationProcess() const {
+ return impl->application_process;
}
-void KernelCore::CloseCurrentProcess() {
- impl->CloseCurrentProcess();
+void KernelCore::CloseApplicationProcess() {
+ impl->CloseApplicationProcess();
}
const std::vector<KProcess*>& KernelCore::GetProcessList() const {
@@ -1002,7 +954,7 @@ const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
}
Kernel::KScheduler* KernelCore::CurrentScheduler() {
- u32 core_id = impl->GetCurrentHostThreadID();
+ const u32 core_id = impl->GetCurrentHostThreadID();
if (core_id >= Core::Hardware::NUM_CPU_CORES) {
// This is expected when called from not a guest thread
return {};
@@ -1036,12 +988,12 @@ void KernelCore::InvalidateAllInstructionCaches() {
}
}
-void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
+void KernelCore::InvalidateCpuInstructionCacheRange(KProcessAddress addr, std::size_t size) {
for (auto& physical_core : impl->cores) {
if (!physical_core->IsInitialized()) {
continue;
}
- physical_core->ArmInterface().InvalidateCacheRange(addr, size);
+ physical_core->ArmInterface().InvalidateCacheRange(GetInteger(addr), size);
}
}
@@ -1049,23 +1001,6 @@ void KernelCore::PrepareReschedule(std::size_t id) {
// TODO: Reimplement, this
}
-void KernelCore::RegisterNamedService(std::string name, ServiceInterfaceFactory&& factory) {
- impl->service_interface_factory.emplace(std::move(name), factory);
-}
-
-void KernelCore::RegisterInterfaceForNamedService(std::string name,
- ServiceInterfaceHandlerFn&& handler) {
- impl->service_interface_handlers.emplace(std::move(name), handler);
-}
-
-KClientPort* KernelCore::CreateNamedServicePort(std::string name) {
- return impl->CreateNamedServicePort(std::move(name));
-}
-
-void KernelCore::RegisterNamedServiceHandler(std::string name, KServerPort* server_port) {
- impl->RegisterNamedServiceHandler(std::move(name), server_port);
-}
-
void KernelCore::RegisterKernelObject(KAutoObject* object) {
std::scoped_lock lk{impl->registered_objects_lock};
impl->registered_objects.insert(object);
@@ -1086,8 +1021,19 @@ void KernelCore::UnregisterInUseObject(KAutoObject* object) {
impl->registered_in_use_objects.erase(object);
}
-bool KernelCore::IsValidNamedPort(NamedPortTable::const_iterator port) const {
- return port != impl->named_ports.cend();
+void KernelCore::RunServer(std::unique_ptr<Service::ServerManager>&& server_manager) {
+ auto* manager = server_manager.get();
+
+ {
+ std::scoped_lock lk{impl->server_lock};
+ if (impl->is_shutting_down) {
+ return;
+ }
+
+ impl->server_managers.emplace_back(std::move(server_manager));
+ }
+
+ manager->LoopProcess();
}
u32 KernelCore::CreateNewObjectID() {
@@ -1126,6 +1072,99 @@ void KernelCore::RegisterHostThread(KThread* existing_thread) {
}
}
+static std::jthread RunHostThreadFunc(KernelCore& kernel, KProcess* process,
+ std::string&& thread_name, std::function<void()>&& func) {
+ // Reserve a new thread from the process resource limit.
+ KScopedResourceReservation thread_reservation(process, LimitableResource::ThreadCountMax);
+ ASSERT(thread_reservation.Succeeded());
+
+ // Initialize the thread.
+ KThread* thread = KThread::Create(kernel);
+ ASSERT(R_SUCCEEDED(KThread::InitializeDummyThread(thread, process)));
+
+ // Commit the thread reservation.
+ thread_reservation.Commit();
+
+ // Register the thread.
+ KThread::Register(kernel, thread);
+
+ return std::jthread(
+ [&kernel, thread, thread_name{std::move(thread_name)}, func{std::move(func)}] {
+ // Set the thread name.
+ Common::SetCurrentThreadName(thread_name.c_str());
+
+ // Set the thread as current.
+ kernel.RegisterHostThread(thread);
+
+ // Run the callback.
+ func();
+
+ // Close the thread.
+ // This will free the process if it is the last reference.
+ thread->Close();
+ });
+}
+
+std::jthread KernelCore::RunOnHostCoreProcess(std::string&& process_name,
+ std::function<void()> func) {
+ // Make a new process.
+ KProcess* process = KProcess::Create(*this);
+ ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland,
+ GetSystemResourceLimit())));
+
+ // Ensure that we don't hold onto any extra references.
+ SCOPE_EXIT({ process->Close(); });
+
+ // Register the new process.
+ KProcess::Register(*this, process);
+
+ // Run the host thread.
+ return RunHostThreadFunc(*this, process, std::move(process_name), std::move(func));
+}
+
+std::jthread KernelCore::RunOnHostCoreThread(std::string&& thread_name,
+ std::function<void()> func) {
+ // Get the current process.
+ KProcess* process = GetCurrentProcessPointer(*this);
+
+ // Run the host thread.
+ return RunHostThreadFunc(*this, process, std::move(thread_name), std::move(func));
+}
+
+void KernelCore::RunOnGuestCoreProcess(std::string&& process_name, std::function<void()> func) {
+ constexpr s32 ServiceThreadPriority = 16;
+ constexpr s32 ServiceThreadCore = 3;
+
+ // Make a new process.
+ KProcess* process = KProcess::Create(*this);
+ ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland,
+ GetSystemResourceLimit())));
+
+ // Ensure that we don't hold onto any extra references.
+ SCOPE_EXIT({ process->Close(); });
+
+ // Register the new process.
+ KProcess::Register(*this, process);
+
+ // Reserve a new thread from the process resource limit.
+ KScopedResourceReservation thread_reservation(process, LimitableResource::ThreadCountMax);
+ ASSERT(thread_reservation.Succeeded());
+
+ // Initialize the thread.
+ KThread* thread = KThread::Create(*this);
+ ASSERT(R_SUCCEEDED(KThread::InitializeServiceThread(
+ System(), thread, std::move(func), ServiceThreadPriority, ServiceThreadCore, process)));
+
+ // Commit the thread reservation.
+ thread_reservation.Commit();
+
+ // Register the new thread.
+ KThread::Register(*this, thread);
+
+ // Begin running the thread.
+ ASSERT(R_SUCCEEDED(thread->Run()));
+}
+
u32 KernelCore::GetCurrentHostThreadID() const {
return impl->GetCurrentHostThreadID();
}
@@ -1138,6 +1177,10 @@ void KernelCore::SetCurrentEmuThread(KThread* thread) {
impl->SetCurrentEmuThread(thread);
}
+KObjectNameGlobalData& KernelCore::ObjectNameGlobalData() {
+ return *impl->object_name_global_data;
+}
+
KMemoryManager& KernelCore::MemoryManager() {
return *impl->memory_manager;
}
@@ -1146,6 +1189,14 @@ const KMemoryManager& KernelCore::MemoryManager() const {
return *impl->memory_manager;
}
+KSystemResource& KernelCore::GetAppSystemResource() {
+ return *impl->app_system_resource;
+}
+
+const KSystemResource& KernelCore::GetAppSystemResource() const {
+ return *impl->app_system_resource;
+}
+
KSystemResource& KernelCore::GetSystemSystemResource() {
return *impl->sys_system_resource;
}
@@ -1194,32 +1245,39 @@ const Kernel::KSharedMemory& KernelCore::GetHidBusSharedMem() const {
return *impl->hidbus_shared_mem;
}
-void KernelCore::Suspend(bool suspended) {
+void KernelCore::SuspendApplication(bool suspended) {
const bool should_suspend{exception_exited || suspended};
const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable;
- std::vector<KScopedAutoObject<KThread>> process_threads;
- {
- KScopedSchedulerLock sl{*this};
+ // Get the application process.
+ KScopedAutoObject<KProcess> process = ApplicationProcess();
+ if (process.IsNull()) {
+ return;
+ }
- if (auto* process = CurrentProcess(); process != nullptr) {
- process->SetActivity(activity);
+ // Set the new activity.
+ process->SetActivity(activity);
- if (!should_suspend) {
- // Runnable now; no need to wait.
- return;
- }
+ // Wait for process execution to stop.
+ bool must_wait{should_suspend};
+
+ // KernelCore::SuspendApplication must be called from locked context,
+ // or we could race another call to SetActivity, interfering with waiting.
+ while (must_wait) {
+ KScopedSchedulerLock sl{*this};
+
+ // Assume that all threads have finished running.
+ must_wait = false;
- for (auto* thread : process->GetThreadList()) {
- process_threads.emplace_back(thread);
+ for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
+ if (Scheduler(i).GetSchedulerCurrentThread()->GetOwnerProcess() ==
+ process.GetPointerUnsafe()) {
+ // A thread has not finished running yet.
+ // Continue waiting.
+ must_wait = true;
}
}
}
-
- // Wait for execution to stop.
- for (auto& thread : process_threads) {
- thread->WaitUntilSuspended();
- }
}
void KernelCore::ShutdownCores() {
@@ -1238,9 +1296,9 @@ bool KernelCore::IsShuttingDown() const {
return impl->IsShuttingDown();
}
-void KernelCore::ExceptionalExit() {
+void KernelCore::ExceptionalExitApplication() {
exception_exited = true;
- Suspend(true);
+ SuspendApplication(true);
}
void KernelCore::EnterSVCProfile() {
@@ -1251,18 +1309,6 @@ void KernelCore::ExitSVCProfile() {
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]);
}
-Kernel::ServiceThread& KernelCore::CreateServiceThread(const std::string& name) {
- return impl->CreateServiceThread(*this, name);
-}
-
-Kernel::ServiceThread& KernelCore::GetDefaultServiceThread() const {
- return *impl->default_service_thread;
-}
-
-void KernelCore::ReleaseServiceThread(Kernel::ServiceThread& service_thread) {
- impl->ReleaseServiceThread(service_thread);
-}
-
Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() {
return impl->slab_resource_counts;
}
@@ -1299,4 +1345,93 @@ const Core::System& KernelCore::System() const {
return impl->system;
}
+struct KernelCore::SlabHeapContainer {
+ KSlabHeap<KClientSession> client_session;
+ KSlabHeap<KEvent> event;
+ KSlabHeap<KPort> port;
+ KSlabHeap<KProcess> process;
+ KSlabHeap<KResourceLimit> resource_limit;
+ KSlabHeap<KSession> session;
+ KSlabHeap<KSharedMemory> shared_memory;
+ KSlabHeap<KSharedMemoryInfo> shared_memory_info;
+ KSlabHeap<KThread> thread;
+ KSlabHeap<KTransferMemory> transfer_memory;
+ KSlabHeap<KCodeMemory> code_memory;
+ KSlabHeap<KDeviceAddressSpace> device_address_space;
+ KSlabHeap<KPageBuffer> page_buffer;
+ KSlabHeap<KThreadLocalPage> thread_local_page;
+ KSlabHeap<KObjectName> object_name;
+ KSlabHeap<KSessionRequest> session_request;
+ KSlabHeap<KSecureSystemResource> secure_system_resource;
+ KSlabHeap<KThread::LockWithPriorityInheritanceInfo> lock_info;
+ KSlabHeap<KEventInfo> event_info;
+ KSlabHeap<KDebug> debug;
+};
+
+template <typename T>
+KSlabHeap<T>& KernelCore::SlabHeap() {
+ if constexpr (std::is_same_v<T, KClientSession>) {
+ return slab_heap_container->client_session;
+ } else if constexpr (std::is_same_v<T, KEvent>) {
+ return slab_heap_container->event;
+ } else if constexpr (std::is_same_v<T, KPort>) {
+ return slab_heap_container->port;
+ } else if constexpr (std::is_same_v<T, KProcess>) {
+ return slab_heap_container->process;
+ } else if constexpr (std::is_same_v<T, KResourceLimit>) {
+ return slab_heap_container->resource_limit;
+ } else if constexpr (std::is_same_v<T, KSession>) {
+ return slab_heap_container->session;
+ } else if constexpr (std::is_same_v<T, KSharedMemory>) {
+ return slab_heap_container->shared_memory;
+ } else if constexpr (std::is_same_v<T, KSharedMemoryInfo>) {
+ return slab_heap_container->shared_memory_info;
+ } else if constexpr (std::is_same_v<T, KThread>) {
+ return slab_heap_container->thread;
+ } else if constexpr (std::is_same_v<T, KTransferMemory>) {
+ return slab_heap_container->transfer_memory;
+ } else if constexpr (std::is_same_v<T, KCodeMemory>) {
+ return slab_heap_container->code_memory;
+ } else if constexpr (std::is_same_v<T, KDeviceAddressSpace>) {
+ return slab_heap_container->device_address_space;
+ } else if constexpr (std::is_same_v<T, KPageBuffer>) {
+ return slab_heap_container->page_buffer;
+ } else if constexpr (std::is_same_v<T, KThreadLocalPage>) {
+ return slab_heap_container->thread_local_page;
+ } else if constexpr (std::is_same_v<T, KObjectName>) {
+ return slab_heap_container->object_name;
+ } else if constexpr (std::is_same_v<T, KSessionRequest>) {
+ return slab_heap_container->session_request;
+ } else if constexpr (std::is_same_v<T, KSecureSystemResource>) {
+ return slab_heap_container->secure_system_resource;
+ } else if constexpr (std::is_same_v<T, KThread::LockWithPriorityInheritanceInfo>) {
+ return slab_heap_container->lock_info;
+ } else if constexpr (std::is_same_v<T, KEventInfo>) {
+ return slab_heap_container->event_info;
+ } else if constexpr (std::is_same_v<T, KDebug>) {
+ return slab_heap_container->debug;
+ }
+}
+
+template KSlabHeap<KClientSession>& KernelCore::SlabHeap();
+template KSlabHeap<KEvent>& KernelCore::SlabHeap();
+template KSlabHeap<KPort>& KernelCore::SlabHeap();
+template KSlabHeap<KProcess>& KernelCore::SlabHeap();
+template KSlabHeap<KResourceLimit>& KernelCore::SlabHeap();
+template KSlabHeap<KSession>& KernelCore::SlabHeap();
+template KSlabHeap<KSharedMemory>& KernelCore::SlabHeap();
+template KSlabHeap<KSharedMemoryInfo>& KernelCore::SlabHeap();
+template KSlabHeap<KThread>& KernelCore::SlabHeap();
+template KSlabHeap<KTransferMemory>& KernelCore::SlabHeap();
+template KSlabHeap<KCodeMemory>& KernelCore::SlabHeap();
+template KSlabHeap<KDeviceAddressSpace>& KernelCore::SlabHeap();
+template KSlabHeap<KPageBuffer>& KernelCore::SlabHeap();
+template KSlabHeap<KThreadLocalPage>& KernelCore::SlabHeap();
+template KSlabHeap<KObjectName>& KernelCore::SlabHeap();
+template KSlabHeap<KSessionRequest>& KernelCore::SlabHeap();
+template KSlabHeap<KSecureSystemResource>& KernelCore::SlabHeap();
+template KSlabHeap<KThread::LockWithPriorityInheritanceInfo>& KernelCore::SlabHeap();
+template KSlabHeap<KEventInfo>& KernelCore::SlabHeap();
+template KSlabHeap<KDebug>& KernelCore::SlabHeap();
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 8d22f8d2c..d5b08eeb5 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -9,9 +9,12 @@
#include <string>
#include <unordered_map>
#include <vector>
+
+#include "common/polyfill_thread.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_slab_heap.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/svc_common.h"
namespace Core {
@@ -24,6 +27,10 @@ class CoreTiming;
struct EventType;
} // namespace Core::Timing
+namespace Service {
+class ServerManager;
+}
+
namespace Service::SM {
class ServiceManager;
}
@@ -35,14 +42,16 @@ class GlobalSchedulerContext;
class KAutoObjectWithListContainer;
class KClientSession;
class KDebug;
+class KDeviceAddressSpace;
class KDynamicPageManager;
class KEvent;
class KEventInfo;
class KHandleTable;
class KHardwareTimer;
-class KLinkedListNode;
class KMemoryLayout;
class KMemoryManager;
+class KObjectName;
+class KObjectNameGlobalData;
class KPageBuffer;
class KPageBufferSlabHeap;
class KPort;
@@ -62,13 +71,6 @@ class KTransferMemory;
class KWorkerTaskManager;
class KCodeMemory;
class PhysicalCore;
-class ServiceThread;
-class Synchronization;
-
-using ServiceInterfaceFactory =
- std::function<KClientPort&(Service::SM::ServiceManager&, Core::System&)>;
-
-using ServiceInterfaceHandlerFn = std::function<void(Service::SM::ServiceManager&, KServerPort*)>;
namespace Init {
struct KSlabResourceCounts;
@@ -77,15 +79,8 @@ struct KSlabResourceCounts;
template <typename T>
class KSlabHeap;
-using EmuThreadHandle = uintptr_t;
-constexpr EmuThreadHandle EmuThreadHandleInvalid{};
-constexpr EmuThreadHandle EmuThreadHandleReserved{1ULL << 63};
-
/// Represents a single instance of the kernel.
class KernelCore {
-private:
- using NamedPortTable = std::unordered_map<std::string, KClientPort*>;
-
public:
/// Constructs an instance of the kernel using the given System
/// instance as a context for any necessary system-related state,
@@ -130,17 +125,17 @@ public:
/// Adds the given shared pointer to an internal list of active processes.
void AppendNewProcess(KProcess* process);
- /// Makes the given process the new current process.
- void MakeCurrentProcess(KProcess* process);
+ /// Makes the given process the new application process.
+ void MakeApplicationProcess(KProcess* process);
- /// Retrieves a pointer to the current process.
- KProcess* CurrentProcess();
+ /// Retrieves a pointer to the application process.
+ KProcess* ApplicationProcess();
- /// Retrieves a const pointer to the current process.
- const KProcess* CurrentProcess() const;
+ /// Retrieves a const pointer to the application process.
+ const KProcess* ApplicationProcess() const;
- /// Closes the current process.
- void CloseCurrentProcess();
+ /// Closes the application process.
+ void CloseApplicationProcess();
/// Retrieves the list of processes.
const std::vector<KProcess*>& GetProcessList() const;
@@ -191,19 +186,7 @@ public:
void InvalidateAllInstructionCaches();
- void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
-
- /// Registers a named HLE service, passing a factory used to open a port to that service.
- void RegisterNamedService(std::string name, ServiceInterfaceFactory&& factory);
-
- /// Registers a setup function for the named HLE service.
- void RegisterInterfaceForNamedService(std::string name, ServiceInterfaceHandlerFn&& handler);
-
- /// Opens a port to a service previously registered with RegisterNamedService.
- KClientPort* CreateNamedServicePort(std::string name);
-
- /// Accepts a session on a port created by CreateNamedServicePort.
- void RegisterNamedServiceHandler(std::string name, KServerPort* server_port);
+ void InvalidateCpuInstructionCacheRange(KProcessAddress addr, std::size_t size);
/// Registers all kernel objects with the global emulation state, this is purely for tracking
/// leaks after emulation has been shutdown.
@@ -221,8 +204,8 @@ public:
/// destroyed during the current emulation session.
void UnregisterInUseObject(KAutoObject* object);
- /// Determines whether or not the given port is a valid named port.
- bool IsValidNamedPort(NamedPortTable::const_iterator port) const;
+ // Runs the given server manager until shutdown.
+ void RunServer(std::unique_ptr<Service::ServerManager>&& server_manager);
/// Gets the current host_thread/guest_thread pointer.
KThread* GetCurrentEmuThread() const;
@@ -239,12 +222,27 @@ public:
/// Register the current thread as a non CPU core thread.
void RegisterHostThread(KThread* existing_thread = nullptr);
+ void RunOnGuestCoreProcess(std::string&& process_name, std::function<void()> func);
+
+ std::jthread RunOnHostCoreProcess(std::string&& process_name, std::function<void()> func);
+
+ std::jthread RunOnHostCoreThread(std::string&& thread_name, std::function<void()> func);
+
+ /// Gets global data for KObjectName.
+ KObjectNameGlobalData& ObjectNameGlobalData();
+
/// Gets the virtual memory manager for the kernel.
KMemoryManager& MemoryManager();
/// Gets the virtual memory manager for the kernel.
const KMemoryManager& MemoryManager() const;
+ /// Gets the application resource manager.
+ KSystemResource& GetAppSystemResource();
+
+ /// Gets the application resource manager.
+ const KSystemResource& GetAppSystemResource() const;
+
/// Gets the system resource manager.
KSystemResource& GetSystemSystemResource();
@@ -281,11 +279,11 @@ public:
/// Gets the shared memory object for HIDBus services.
const Kernel::KSharedMemory& GetHidBusSharedMem() const;
- /// Suspend/unsuspend all processes.
- void Suspend(bool suspend);
+ /// Suspend/unsuspend application process.
+ void SuspendApplication(bool suspend);
- /// Exceptional exit all processes.
- void ExceptionalExit();
+ /// Exceptional exit application process.
+ void ExceptionalExitApplication();
/// Notify emulated CPU cores to shut down.
void ShutdownCores();
@@ -298,33 +296,6 @@ public:
void ExitSVCProfile();
- /**
- * Creates a host thread to execute HLE service requests, which are used to execute service
- * routines asynchronously. While these are allocated per ServerSession, these need to be owned
- * and managed outside of ServerSession to avoid a circular dependency. In general, most
- * services can just use the default service thread, and not need their own host service thread.
- * See GetDefaultServiceThread.
- * @param name String name for the ServerSession creating this thread, used for debug
- * purposes.
- * @returns A reference to the newly created service thread.
- */
- Kernel::ServiceThread& CreateServiceThread(const std::string& name);
-
- /**
- * Gets the default host service thread, which executes HLE service requests. Unless service
- * requests need to block on the host, the default service thread should be used in favor of
- * creating a new service thread.
- * @returns A reference to the default service thread.
- */
- Kernel::ServiceThread& GetDefaultServiceThread() const;
-
- /**
- * Releases a HLE service thread, instructing KernelCore to free it. This should be called when
- * the ServerSession associated with the thread is destroyed.
- * @param service_thread Service thread to release.
- */
- void ReleaseServiceThread(Kernel::ServiceThread& service_thread);
-
/// Workaround for single-core mode when preempting threads while idle.
bool IsPhantomModeForSingleCore() const;
void SetIsPhantomModeForSingleCore(bool value);
@@ -334,45 +305,7 @@ public:
/// Gets the slab heap for the specified kernel object type.
template <typename T>
- KSlabHeap<T>& SlabHeap() {
- if constexpr (std::is_same_v<T, KClientSession>) {
- return slab_heap_container->client_session;
- } else if constexpr (std::is_same_v<T, KEvent>) {
- return slab_heap_container->event;
- } else if constexpr (std::is_same_v<T, KLinkedListNode>) {
- return slab_heap_container->linked_list_node;
- } else if constexpr (std::is_same_v<T, KPort>) {
- return slab_heap_container->port;
- } else if constexpr (std::is_same_v<T, KProcess>) {
- return slab_heap_container->process;
- } else if constexpr (std::is_same_v<T, KResourceLimit>) {
- return slab_heap_container->resource_limit;
- } else if constexpr (std::is_same_v<T, KSession>) {
- return slab_heap_container->session;
- } else if constexpr (std::is_same_v<T, KSharedMemory>) {
- return slab_heap_container->shared_memory;
- } else if constexpr (std::is_same_v<T, KSharedMemoryInfo>) {
- return slab_heap_container->shared_memory_info;
- } else if constexpr (std::is_same_v<T, KThread>) {
- return slab_heap_container->thread;
- } else if constexpr (std::is_same_v<T, KTransferMemory>) {
- return slab_heap_container->transfer_memory;
- } else if constexpr (std::is_same_v<T, KCodeMemory>) {
- return slab_heap_container->code_memory;
- } else if constexpr (std::is_same_v<T, KPageBuffer>) {
- return slab_heap_container->page_buffer;
- } else if constexpr (std::is_same_v<T, KThreadLocalPage>) {
- return slab_heap_container->thread_local_page;
- } else if constexpr (std::is_same_v<T, KSessionRequest>) {
- return slab_heap_container->session_request;
- } else if constexpr (std::is_same_v<T, KSecureSystemResource>) {
- return slab_heap_container->secure_system_resource;
- } else if constexpr (std::is_same_v<T, KEventInfo>) {
- return slab_heap_container->event_info;
- } else if constexpr (std::is_same_v<T, KDebug>) {
- return slab_heap_container->debug;
- }
- }
+ KSlabHeap<T>& SlabHeap();
/// Gets the current slab resource counts.
Init::KSlabResourceCounts& SlabResourceCounts();
@@ -418,26 +351,7 @@ private:
private:
/// Helper to encapsulate all slab heaps in a single heap allocated container
- struct SlabHeapContainer {
- KSlabHeap<KClientSession> client_session;
- KSlabHeap<KEvent> event;
- KSlabHeap<KLinkedListNode> linked_list_node;
- KSlabHeap<KPort> port;
- KSlabHeap<KProcess> process;
- KSlabHeap<KResourceLimit> resource_limit;
- KSlabHeap<KSession> session;
- KSlabHeap<KSharedMemory> shared_memory;
- KSlabHeap<KSharedMemoryInfo> shared_memory_info;
- KSlabHeap<KThread> thread;
- KSlabHeap<KTransferMemory> transfer_memory;
- KSlabHeap<KCodeMemory> code_memory;
- KSlabHeap<KPageBuffer> page_buffer;
- KSlabHeap<KThreadLocalPage> thread_local_page;
- KSlabHeap<KSessionRequest> session_request;
- KSlabHeap<KSecureSystemResource> secure_system_resource;
- KSlabHeap<KEventInfo> event_info;
- KSlabHeap<KDebug> debug;
- };
+ struct SlabHeapContainer;
std::unique_ptr<SlabHeapContainer> slab_heap_container;
};
diff --git a/src/core/hle/kernel/memory_types.h b/src/core/hle/kernel/memory_types.h
index 92b8b37ac..18de675cc 100644
--- a/src/core/hle/kernel/memory_types.h
+++ b/src/core/hle/kernel/memory_types.h
@@ -6,6 +6,7 @@
#include <array>
#include "common/common_types.h"
+#include "core/hle/kernel/k_typed_address.h"
namespace Kernel {
@@ -14,7 +15,4 @@ constexpr std::size_t PageSize{1 << PageBits};
using Page = std::array<u8, PageSize>;
-using KPhysicalAddress = PAddr;
-using KProcessAddress = VAddr;
-
} // namespace Kernel
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index 3044922ac..2e0c36129 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -10,14 +10,14 @@
namespace Kernel {
-PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_)
- : core_index{core_index_}, system{system_}, scheduler{scheduler_} {
+PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system, KScheduler& scheduler)
+ : m_core_index{core_index}, m_system{system}, m_scheduler{scheduler} {
#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64)
// TODO(bunnei): Initialization relies on a core being available. We may later replace this with
// a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager.
auto& kernel = system.Kernel();
- arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
- system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
+ m_arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
+ system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), m_core_index);
#else
#error Platform not supported yet.
#endif
@@ -25,13 +25,13 @@ PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KSche
PhysicalCore::~PhysicalCore() = default;
-void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
+void PhysicalCore::Initialize(bool is_64_bit) {
#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64)
- auto& kernel = system.Kernel();
+ auto& kernel = m_system.Kernel();
if (!is_64_bit) {
// We already initialized a 64-bit core, replace with a 32-bit one.
- arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
- system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
+ m_arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
+ m_system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), m_core_index);
}
#else
#error Platform not supported yet.
@@ -39,31 +39,30 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
}
void PhysicalCore::Run() {
- arm_interface->Run();
- arm_interface->ClearExclusiveState();
+ m_arm_interface->Run();
+ m_arm_interface->ClearExclusiveState();
}
void PhysicalCore::Idle() {
- std::unique_lock lk{guard};
- on_interrupt.wait(lk, [this] { return is_interrupted; });
+ std::unique_lock lk{m_guard};
+ m_on_interrupt.wait(lk, [this] { return m_is_interrupted; });
}
bool PhysicalCore::IsInterrupted() const {
- return is_interrupted;
+ return m_is_interrupted;
}
void PhysicalCore::Interrupt() {
- std::unique_lock lk{guard};
- is_interrupted = true;
- arm_interface->SignalInterrupt();
- on_interrupt.notify_all();
+ std::unique_lock lk{m_guard};
+ m_is_interrupted = true;
+ m_arm_interface->SignalInterrupt();
+ m_on_interrupt.notify_all();
}
void PhysicalCore::ClearInterrupt() {
- std::unique_lock lk{guard};
- is_interrupted = false;
- arm_interface->ClearInterrupt();
- on_interrupt.notify_all();
+ std::unique_lock lk{m_guard};
+ m_is_interrupted = false;
+ m_arm_interface->ClearInterrupt();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index fb2ba4c6b..5cb398fdc 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -3,6 +3,7 @@
#pragma once
+#include <condition_variable>
#include <cstddef>
#include <memory>
#include <mutex>
@@ -46,46 +47,38 @@ public:
bool IsInterrupted() const;
bool IsInitialized() const {
- return arm_interface != nullptr;
+ return m_arm_interface != nullptr;
}
Core::ARM_Interface& ArmInterface() {
- return *arm_interface;
+ return *m_arm_interface;
}
const Core::ARM_Interface& ArmInterface() const {
- return *arm_interface;
- }
-
- bool IsMainCore() const {
- return core_index == 0;
- }
-
- bool IsSystemCore() const {
- return core_index == 3;
+ return *m_arm_interface;
}
std::size_t CoreIndex() const {
- return core_index;
+ return m_core_index;
}
Kernel::KScheduler& Scheduler() {
- return scheduler;
+ return m_scheduler;
}
const Kernel::KScheduler& Scheduler() const {
- return scheduler;
+ return m_scheduler;
}
private:
- const std::size_t core_index;
- Core::System& system;
- Kernel::KScheduler& scheduler;
-
- std::mutex guard;
- std::condition_variable on_interrupt;
- std::unique_ptr<Core::ARM_Interface> arm_interface;
- bool is_interrupted{};
+ const std::size_t m_core_index;
+ Core::System& m_system;
+ Kernel::KScheduler& m_scheduler;
+
+ std::mutex m_guard;
+ std::condition_variable m_on_interrupt;
+ std::unique_ptr<Core::ARM_Interface> m_arm_interface;
+ bool m_is_interrupted{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp
deleted file mode 100644
index 38afa720b..000000000
--- a/src/core/hle/kernel/service_thread.cpp
+++ /dev/null
@@ -1,206 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <functional>
-#include <map>
-#include <mutex>
-#include <thread>
-#include <vector>
-
-#include "common/polyfill_thread.h"
-#include "common/scope_exit.h"
-#include "common/thread.h"
-#include "core/hle/ipc_helpers.h"
-#include "core/hle/kernel/hle_ipc.h"
-#include "core/hle/kernel/k_event.h"
-#include "core/hle/kernel/k_scoped_resource_reservation.h"
-#include "core/hle/kernel/k_session.h"
-#include "core/hle/kernel/k_thread.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/service_thread.h"
-
-namespace Kernel {
-
-class ServiceThread::Impl final {
-public:
- explicit Impl(KernelCore& kernel, const std::string& service_name);
- ~Impl();
-
- void WaitAndProcessImpl();
- void SessionClosed(KServerSession* server_session,
- std::shared_ptr<SessionRequestManager> manager);
- void LoopProcess();
-
- void RegisterServerSession(KServerSession* session,
- std::shared_ptr<SessionRequestManager> manager);
-
-private:
- KernelCore& kernel;
- const std::string m_service_name;
-
- std::jthread m_host_thread{};
- std::mutex m_session_mutex{};
- std::map<KServerSession*, std::shared_ptr<SessionRequestManager>> m_sessions{};
- KEvent* m_wakeup_event{};
- KThread* m_thread{};
- std::atomic<bool> m_shutdown_requested{};
-};
-
-void ServiceThread::Impl::WaitAndProcessImpl() {
- // Create local list of waitable sessions.
- std::vector<KSynchronizationObject*> objs;
- std::vector<std::shared_ptr<SessionRequestManager>> managers;
-
- {
- // Lock to get the set.
- std::scoped_lock lk{m_session_mutex};
-
- // Reserve the needed quantity.
- objs.reserve(m_sessions.size() + 1);
- managers.reserve(m_sessions.size());
-
- // Copy to our local list.
- for (const auto& [session, manager] : m_sessions) {
- objs.push_back(session);
- managers.push_back(manager);
- }
-
- // Insert the wakeup event at the end.
- objs.push_back(&m_wakeup_event->GetReadableEvent());
- }
-
- // Wait on the list of sessions.
- s32 index{-1};
- Result rc = KSynchronizationObject::Wait(kernel, &index, objs.data(),
- static_cast<s32>(objs.size()), -1);
- ASSERT(!rc.IsFailure());
-
- // If this was the wakeup event, clear it and finish.
- if (index >= static_cast<s64>(objs.size() - 1)) {
- m_wakeup_event->Clear();
- return;
- }
-
- // This event is from a server session.
- auto* server_session = static_cast<KServerSession*>(objs[index]);
- auto& manager = managers[index];
-
- // Fetch the HLE request context.
- std::shared_ptr<HLERequestContext> context;
- rc = server_session->ReceiveRequest(&context, manager);
-
- // If the session was closed, handle that.
- if (rc == ResultSessionClosed) {
- SessionClosed(server_session, manager);
-
- // Finish.
- return;
- }
-
- // TODO: handle other cases
- ASSERT(rc == ResultSuccess);
-
- // Perform the request.
- Result service_rc = manager->CompleteSyncRequest(server_session, *context);
-
- // Reply to the client.
- rc = server_session->SendReplyHLE();
-
- if (rc == ResultSessionClosed || service_rc == IPC::ERR_REMOTE_PROCESS_DEAD) {
- SessionClosed(server_session, manager);
- return;
- }
-
- // TODO: handle other cases
- ASSERT(rc == ResultSuccess);
- ASSERT(service_rc == ResultSuccess);
-}
-
-void ServiceThread::Impl::SessionClosed(KServerSession* server_session,
- std::shared_ptr<SessionRequestManager> manager) {
- {
- // Lock to get the set.
- std::scoped_lock lk{m_session_mutex};
-
- // Erase the session.
- ASSERT(m_sessions.erase(server_session) == 1);
- }
-
- // Close our reference to the server session.
- server_session->Close();
-}
-
-void ServiceThread::Impl::LoopProcess() {
- Common::SetCurrentThreadName(m_service_name.c_str());
-
- kernel.RegisterHostThread(m_thread);
-
- while (!m_shutdown_requested.load()) {
- WaitAndProcessImpl();
- }
-}
-
-void ServiceThread::Impl::RegisterServerSession(KServerSession* server_session,
- std::shared_ptr<SessionRequestManager> manager) {
- // Open the server session.
- server_session->Open();
-
- {
- // Lock to get the set.
- std::scoped_lock lk{m_session_mutex};
-
- // Insert the session and manager.
- m_sessions[server_session] = manager;
- }
-
- // Signal the wakeup event.
- m_wakeup_event->Signal();
-}
-
-ServiceThread::Impl::~Impl() {
- // Shut down the processing thread.
- m_shutdown_requested.store(true);
- m_wakeup_event->Signal();
- m_host_thread.join();
-
- // Close all remaining sessions.
- for (const auto& [server_session, manager] : m_sessions) {
- server_session->Close();
- }
-
- // Destroy remaining managers.
- m_sessions.clear();
-
- // Close event.
- m_wakeup_event->GetReadableEvent().Close();
- m_wakeup_event->Close();
-
- // Close thread.
- m_thread->Close();
-}
-
-ServiceThread::Impl::Impl(KernelCore& kernel_, const std::string& service_name)
- : kernel{kernel_}, m_service_name{service_name} {
- // Initialize event.
- m_wakeup_event = KEvent::Create(kernel);
- m_wakeup_event->Initialize(nullptr);
-
- // Initialize thread.
- m_thread = KThread::Create(kernel);
- ASSERT(KThread::InitializeDummyThread(m_thread, nullptr).IsSuccess());
-
- // Start thread.
- m_host_thread = std::jthread([this] { LoopProcess(); });
-}
-
-ServiceThread::ServiceThread(KernelCore& kernel, const std::string& name)
- : impl{std::make_unique<Impl>(kernel, name)} {}
-
-ServiceThread::~ServiceThread() = default;
-
-void ServiceThread::RegisterServerSession(KServerSession* session,
- std::shared_ptr<SessionRequestManager> manager) {
- impl->RegisterServerSession(session, manager);
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/service_thread.h b/src/core/hle/kernel/service_thread.h
deleted file mode 100644
index fb4325531..000000000
--- a/src/core/hle/kernel/service_thread.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include <memory>
-#include <string>
-
-namespace Kernel {
-
-class HLERequestContext;
-class KernelCore;
-class KSession;
-class SessionRequestManager;
-
-class ServiceThread final {
-public:
- explicit ServiceThread(KernelCore& kernel, const std::string& name);
- ~ServiceThread();
-
- void RegisterServerSession(KServerSession* session,
- std::shared_ptr<SessionRequestManager> manager);
-
-private:
- class Impl;
- std::unique_ptr<Impl> impl;
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h
index 0228ce188..d1bbc7670 100644
--- a/src/core/hle/kernel/slab_helpers.h
+++ b/src/core/hle/kernel/slab_helpers.h
@@ -66,7 +66,7 @@ private:
}
public:
- explicit KAutoObjectWithSlabHeap(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {}
+ explicit KAutoObjectWithSlabHeap(KernelCore& kernel) : Base(kernel) {}
virtual ~KAutoObjectWithSlabHeap() = default;
virtual void Destroy() override {
@@ -76,7 +76,7 @@ public:
arg = this->GetPostDestroyArgument();
this->Finalize();
}
- Free(kernel, static_cast<Derived*>(this));
+ Free(Base::m_kernel, static_cast<Derived*>(this));
if (is_initialized) {
Derived::PostDestroy(arg);
}
@@ -90,7 +90,7 @@ public:
}
size_t GetSlabIndex() const {
- return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this));
+ return SlabHeap<Derived>(Base::m_kernel).GetObjectIndex(static_cast<const Derived*>(this));
}
public:
@@ -125,14 +125,11 @@ public:
static size_t GetNumRemaining(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetNumRemaining();
}
-
-protected:
- KernelCore& kernel;
};
template <typename Derived, typename Base>
class KAutoObjectWithSlabHeapAndContainer : public Base {
- static_assert(std::is_base_of<KAutoObjectWithList, Base>::value);
+ static_assert(std::is_base_of_v<KAutoObjectWithList, Base>);
private:
static Derived* Allocate(KernelCore& kernel) {
@@ -144,18 +141,18 @@ private:
}
public:
- KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {}
+ KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel) : Base(kernel) {}
virtual ~KAutoObjectWithSlabHeapAndContainer() {}
virtual void Destroy() override {
const bool is_initialized = this->IsInitialized();
uintptr_t arg = 0;
if (is_initialized) {
- kernel.ObjectListContainer().Unregister(this);
+ Base::m_kernel.ObjectListContainer().Unregister(this);
arg = this->GetPostDestroyArgument();
this->Finalize();
}
- Free(kernel, static_cast<Derived*>(this));
+ Free(Base::m_kernel, static_cast<Derived*>(this));
if (is_initialized) {
Derived::PostDestroy(arg);
}
@@ -169,7 +166,7 @@ public:
}
size_t GetSlabIndex() const {
- return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this));
+ return SlabHeap<Derived>(Base::m_kernel).GetObjectIndex(static_cast<const Derived*>(this));
}
public:
@@ -209,9 +206,6 @@ public:
static size_t GetNumRemaining(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetNumRemaining();
}
-
-protected:
- KernelCore& kernel;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index aca442196..871d541d4 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1,3129 +1,4435 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
-#include <algorithm>
-#include <cinttypes>
-#include <iterator>
-#include <mutex>
-#include <vector>
-
-#include "common/alignment.h"
-#include "common/assert.h"
-#include "common/common_funcs.h"
-#include "common/fiber.h"
-#include "common/logging/log.h"
-#include "common/scope_exit.h"
+// This file is automatically generated using svc_generator.py.
+
+#include <type_traits>
+
+#include "core/arm/arm_interface.h"
#include "core/core.h"
-#include "core/core_timing.h"
-#include "core/debugger/debugger.h"
-#include "core/hle/kernel/k_client_port.h"
-#include "core/hle/kernel/k_client_session.h"
-#include "core/hle/kernel/k_code_memory.h"
-#include "core/hle/kernel/k_event.h"
-#include "core/hle/kernel/k_handle_table.h"
-#include "core/hle/kernel/k_memory_block.h"
-#include "core/hle/kernel/k_memory_layout.h"
-#include "core/hle/kernel/k_page_table.h"
-#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_process.h"
-#include "core/hle/kernel/k_readable_event.h"
-#include "core/hle/kernel/k_resource_limit.h"
-#include "core/hle/kernel/k_scheduler.h"
-#include "core/hle/kernel/k_scoped_resource_reservation.h"
-#include "core/hle/kernel/k_session.h"
-#include "core/hle/kernel/k_shared_memory.h"
-#include "core/hle/kernel/k_synchronization_object.h"
-#include "core/hle/kernel/k_thread.h"
-#include "core/hle/kernel/k_thread_queue.h"
-#include "core/hle/kernel/k_transfer_memory.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/svc.h"
-#include "core/hle/kernel/svc_results.h"
-#include "core/hle/kernel/svc_types.h"
-#include "core/hle/kernel/svc_wrap.h"
-#include "core/hle/result.h"
-#include "core/memory.h"
-#include "core/reporter.h"
namespace Kernel::Svc {
-namespace {
-
-// Checks if address + size is greater than the given address
-// This can return false if the size causes an overflow of a 64-bit type
-// or if the given size is zero.
-constexpr bool IsValidAddressRange(VAddr address, u64 size) {
- return address + size > address;
-}
-
-// Helper function that performs the common sanity checks for svcMapMemory
-// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
-// in the same order.
-Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr,
- u64 size) {
- if (!Common::Is4KBAligned(dst_addr)) {
- LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
- return ResultInvalidAddress;
- }
- if (!Common::Is4KBAligned(src_addr)) {
- LOG_ERROR(Kernel_SVC, "Source address is not aligned to 4KB, 0x{:016X}", src_addr);
- return ResultInvalidSize;
- }
+static uint32_t GetReg32(Core::System& system, int n) {
+ return static_cast<uint32_t>(system.CurrentArmInterface().GetReg(n));
+}
- if (size == 0) {
- LOG_ERROR(Kernel_SVC, "Size is 0");
- return ResultInvalidSize;
- }
+static void SetReg32(Core::System& system, int n, uint32_t result) {
+ system.CurrentArmInterface().SetReg(n, static_cast<uint64_t>(result));
+}
- if (!Common::Is4KBAligned(size)) {
- LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size);
- return ResultInvalidSize;
- }
+static uint64_t GetReg64(Core::System& system, int n) {
+ return system.CurrentArmInterface().GetReg(n);
+}
- if (!IsValidAddressRange(dst_addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Destination is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
- dst_addr, size);
- return ResultInvalidCurrentMemory;
- }
+static void SetReg64(Core::System& system, int n, uint64_t result) {
+ system.CurrentArmInterface().SetReg(n, result);
+}
- if (!IsValidAddressRange(src_addr, size)) {
- LOG_ERROR(Kernel_SVC, "Source is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
- src_addr, size);
- return ResultInvalidCurrentMemory;
- }
+// Like bit_cast, but handles the case when the source and dest
+// are differently-sized.
+template <typename To, typename From>
+ requires(std::is_trivial_v<To> && std::is_trivially_copyable_v<From>)
+static To Convert(const From& from) {
+ To to{};
- if (!manager.IsInsideAddressSpace(src_addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}",
- src_addr, size);
- return ResultInvalidCurrentMemory;
+ if constexpr (sizeof(To) >= sizeof(From)) {
+ std::memcpy(std::addressof(to), std::addressof(from), sizeof(From));
+ } else {
+ std::memcpy(std::addressof(to), std::addressof(from), sizeof(To));
}
- if (manager.IsOutsideStackRegion(dst_addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}",
- dst_addr, size);
- return ResultInvalidMemoryRegion;
- }
+ return to;
+}
- if (manager.IsInsideHeapRegion(dst_addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Destination does not fit within the heap region, addr=0x{:016X}, "
- "size=0x{:016X}",
- dst_addr, size);
- return ResultInvalidMemoryRegion;
- }
+// clang-format off
+static_assert(sizeof(ArbitrationType) == 4);
+static_assert(sizeof(BreakReason) == 4);
+static_assert(sizeof(CodeMemoryOperation) == 4);
+static_assert(sizeof(DebugThreadParam) == 4);
+static_assert(sizeof(DeviceName) == 4);
+static_assert(sizeof(HardwareBreakPointRegisterName) == 4);
+static_assert(sizeof(Handle) == 4);
+static_assert(sizeof(InfoType) == 4);
+static_assert(sizeof(InterruptType) == 4);
+static_assert(sizeof(IoPoolType) == 4);
+static_assert(sizeof(KernelDebugType) == 4);
+static_assert(sizeof(KernelTraceState) == 4);
+static_assert(sizeof(LimitableResource) == 4);
+static_assert(sizeof(MemoryMapping) == 4);
+static_assert(sizeof(MemoryPermission) == 4);
+static_assert(sizeof(PageInfo) == 4);
+static_assert(sizeof(ProcessActivity) == 4);
+static_assert(sizeof(ProcessInfoType) == 4);
+static_assert(sizeof(Result) == 4);
+static_assert(sizeof(SignalType) == 4);
+static_assert(sizeof(SystemInfoType) == 4);
+static_assert(sizeof(ThreadActivity) == 4);
+static_assert(sizeof(ilp32::LastThreadContext) == 16);
+static_assert(sizeof(ilp32::PhysicalMemoryInfo) == 16);
+static_assert(sizeof(ilp32::SecureMonitorArguments) == 32);
+static_assert(sizeof(lp64::LastThreadContext) == 32);
+static_assert(sizeof(lp64::PhysicalMemoryInfo) == 24);
+static_assert(sizeof(lp64::SecureMonitorArguments) == 64);
+static_assert(sizeof(bool) == 1);
+static_assert(sizeof(int32_t) == 4);
+static_assert(sizeof(int64_t) == 8);
+static_assert(sizeof(uint32_t) == 4);
+static_assert(sizeof(uint64_t) == 8);
- if (manager.IsInsideAliasRegion(dst_addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Destination does not fit within the map region, addr=0x{:016X}, "
- "size=0x{:016X}",
- dst_addr, size);
- return ResultInvalidMemoryRegion;
- }
+static void SvcWrap_SetHeapSize64From32(Core::System& system) {
+ Result ret{};
+
+ uint64_t out_address{};
+ uint32_t size{};
+
+ size = Convert<uint32_t>(GetReg32(system, 1));
+
+ ret = SetHeapSize64From32(system, std::addressof(out_address), size);
- return ResultSuccess;
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_address));
}
-enum class ResourceLimitValueType {
- CurrentValue,
- LimitValue,
- PeakValue,
-};
+static void SvcWrap_SetMemoryPermission64From32(Core::System& system) {
+ Result ret{};
-} // Anonymous namespace
+ uint32_t address{};
+ uint32_t size{};
+ MemoryPermission perm{};
+
+ address = Convert<uint32_t>(GetReg32(system, 0));
+ size = Convert<uint32_t>(GetReg32(system, 1));
+ perm = Convert<MemoryPermission>(GetReg32(system, 2));
+
+ ret = SetMemoryPermission64From32(system, address, size, perm);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+}
-/// Set the process heap to a given Size. It can both extend and shrink the heap.
-static Result SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
- LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", size);
+static void SvcWrap_SetMemoryAttribute64From32(Core::System& system) {
+ Result ret{};
- // Validate size.
- R_UNLESS(Common::IsAligned(size, HeapSizeAlignment), ResultInvalidSize);
- R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize);
+ uint32_t address{};
+ uint32_t size{};
+ uint32_t mask{};
+ uint32_t attr{};
- // Set the heap size.
- R_TRY(system.Kernel().CurrentProcess()->PageTable().SetHeapSize(out_address, size));
+ address = Convert<uint32_t>(GetReg32(system, 0));
+ size = Convert<uint32_t>(GetReg32(system, 1));
+ mask = Convert<uint32_t>(GetReg32(system, 2));
+ attr = Convert<uint32_t>(GetReg32(system, 3));
- return ResultSuccess;
+ ret = SetMemoryAttribute64From32(system, address, size, mask, attr);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_size) {
- VAddr temp_heap_addr{};
- const Result result{SetHeapSize(system, &temp_heap_addr, heap_size)};
- *heap_addr = static_cast<u32>(temp_heap_addr);
- return result;
+static void SvcWrap_MapMemory64From32(Core::System& system) {
+ Result ret{};
+
+ uint32_t dst_address{};
+ uint32_t src_address{};
+ uint32_t size{};
+
+ dst_address = Convert<uint32_t>(GetReg32(system, 0));
+ src_address = Convert<uint32_t>(GetReg32(system, 1));
+ size = Convert<uint32_t>(GetReg32(system, 2));
+
+ ret = MapMemory64From32(system, dst_address, src_address, size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-constexpr bool IsValidSetMemoryPermission(MemoryPermission perm) {
- switch (perm) {
- case MemoryPermission::None:
- case MemoryPermission::Read:
- case MemoryPermission::ReadWrite:
- return true;
- default:
- return false;
- }
+static void SvcWrap_UnmapMemory64From32(Core::System& system) {
+ Result ret{};
+
+ uint32_t dst_address{};
+ uint32_t src_address{};
+ uint32_t size{};
+
+ dst_address = Convert<uint32_t>(GetReg32(system, 0));
+ src_address = Convert<uint32_t>(GetReg32(system, 1));
+ size = Convert<uint32_t>(GetReg32(system, 2));
+
+ ret = UnmapMemory64From32(system, dst_address, src_address, size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result SetMemoryPermission(Core::System& system, VAddr address, u64 size,
- MemoryPermission perm) {
- LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size,
- perm);
+static void SvcWrap_QueryMemory64From32(Core::System& system) {
+ Result ret{};
- // Validate address / size.
- R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+ PageInfo out_page_info{};
+ uint32_t out_memory_info{};
+ uint32_t address{};
- // Validate the permission.
- R_UNLESS(IsValidSetMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+ out_memory_info = Convert<uint32_t>(GetReg32(system, 0));
+ address = Convert<uint32_t>(GetReg32(system, 2));
- // Validate that the region is in range for the current process.
- auto& page_table = system.Kernel().CurrentProcess()->PageTable();
- R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
+ ret = QueryMemory64From32(system, out_memory_info, std::addressof(out_page_info), address);
- // Set the memory attribute.
- return page_table.SetMemoryPermission(address, size, perm);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_page_info));
}
-static Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask,
- u32 attr) {
- LOG_DEBUG(Kernel_SVC,
- "called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
- size, mask, attr);
+static void SvcWrap_ExitProcess64From32(Core::System& system) {
+ ExitProcess64From32(system);
+}
+
+static void SvcWrap_CreateThread64From32(Core::System& system) {
+ Result ret{};
- // Validate address / size.
- R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+ Handle out_handle{};
+ uint32_t func{};
+ uint32_t arg{};
+ uint32_t stack_bottom{};
+ int32_t priority{};
+ int32_t core_id{};
- // Validate the attribute and mask.
- constexpr u32 SupportedMask = static_cast<u32>(MemoryAttribute::Uncached);
- R_UNLESS((mask | attr) == mask, ResultInvalidCombination);
- R_UNLESS((mask | attr | SupportedMask) == SupportedMask, ResultInvalidCombination);
+ func = Convert<uint32_t>(GetReg32(system, 1));
+ arg = Convert<uint32_t>(GetReg32(system, 2));
+ stack_bottom = Convert<uint32_t>(GetReg32(system, 3));
+ priority = Convert<int32_t>(GetReg32(system, 0));
+ core_id = Convert<int32_t>(GetReg32(system, 4));
- // Validate that the region is in range for the current process.
- auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
- R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
+ ret = CreateThread64From32(system, std::addressof(out_handle), func, arg, stack_bottom, priority, core_id);
- // Set the memory attribute.
- return page_table.SetMemoryAttribute(address, size, mask, attr);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_handle));
}
-static Result SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
- u32 attr) {
- return SetMemoryAttribute(system, address, size, mask, attr);
+static void SvcWrap_StartThread64From32(Core::System& system) {
+ Result ret{};
+
+ Handle thread_handle{};
+
+ thread_handle = Convert<Handle>(GetReg32(system, 0));
+
+ ret = StartThread64From32(system, thread_handle);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-/// Maps a memory range into a different range.
-static Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
- LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
- src_addr, size);
+static void SvcWrap_ExitThread64From32(Core::System& system) {
+ ExitThread64From32(system);
+}
- auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
+static void SvcWrap_SleepThread64From32(Core::System& system) {
+ int64_t ns{};
- if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
- result.IsError()) {
- return result;
- }
+ std::array<uint32_t, 2> ns_gather{};
+ ns_gather[0] = GetReg32(system, 0);
+ ns_gather[1] = GetReg32(system, 1);
+ ns = Convert<int64_t>(ns_gather);
- return page_table.MapMemory(dst_addr, src_addr, size);
+ SleepThread64From32(system, ns);
}
-static Result MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
- return MapMemory(system, dst_addr, src_addr, size);
+static void SvcWrap_GetThreadPriority64From32(Core::System& system) {
+ Result ret{};
+
+ int32_t out_priority{};
+ Handle thread_handle{};
+
+ thread_handle = Convert<Handle>(GetReg32(system, 1));
+
+ ret = GetThreadPriority64From32(system, std::addressof(out_priority), thread_handle);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_priority));
}
-/// Unmaps a region that was previously mapped with svcMapMemory
-static Result UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
- LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
- src_addr, size);
+static void SvcWrap_SetThreadPriority64From32(Core::System& system) {
+ Result ret{};
- auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
+ Handle thread_handle{};
+ int32_t priority{};
- if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
- result.IsError()) {
- return result;
- }
+ thread_handle = Convert<Handle>(GetReg32(system, 0));
+ priority = Convert<int32_t>(GetReg32(system, 1));
- return page_table.UnmapMemory(dst_addr, src_addr, size);
+ ret = SetThreadPriority64From32(system, thread_handle, priority);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
- return UnmapMemory(system, dst_addr, src_addr, size);
+static void SvcWrap_GetThreadCoreMask64From32(Core::System& system) {
+ Result ret{};
+
+ int32_t out_core_id{};
+ uint64_t out_affinity_mask{};
+ Handle thread_handle{};
+
+ thread_handle = Convert<Handle>(GetReg32(system, 2));
+
+ ret = GetThreadCoreMask64From32(system, std::addressof(out_core_id), std::addressof(out_affinity_mask), thread_handle);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_core_id));
+ auto out_affinity_mask_scatter = Convert<std::array<uint32_t, 2>>(out_affinity_mask);
+ SetReg32(system, 2, out_affinity_mask_scatter[0]);
+ SetReg32(system, 3, out_affinity_mask_scatter[1]);
}
-template <typename T>
-Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, u64 name) {
- auto& process = *system.CurrentProcess();
- auto& handle_table = process.GetHandleTable();
+static void SvcWrap_SetThreadCoreMask64From32(Core::System& system) {
+ Result ret{};
- // Declare the session we're going to allocate.
- T* session;
+ Handle thread_handle{};
+ int32_t core_id{};
+ uint64_t affinity_mask{};
- // Reserve a new session from the process resource limit.
- // FIXME: LimitableResource_SessionCountMax
- KScopedResourceReservation session_reservation(&process, LimitableResource::SessionCountMax);
- if (session_reservation.Succeeded()) {
- session = T::Create(system.Kernel());
- } else {
- return ResultLimitReached;
-
- // // We couldn't reserve a session. Check that we support dynamically expanding the
- // // resource limit.
- // R_UNLESS(process.GetResourceLimit() ==
- // &system.Kernel().GetSystemResourceLimit(), ResultLimitReached);
- // R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), ResultLimitReached());
-
- // // Try to allocate a session from unused slab memory.
- // session = T::CreateFromUnusedSlabMemory();
- // R_UNLESS(session != nullptr, ResultLimitReached);
- // ON_RESULT_FAILURE { session->Close(); };
-
- // // If we're creating a KSession, we want to add two KSessionRequests to the heap, to
- // // prevent request exhaustion.
- // // NOTE: Nintendo checks if session->DynamicCast<KSession *>() != nullptr, but there's
- // // no reason to not do this statically.
- // if constexpr (std::same_as<T, KSession>) {
- // for (size_t i = 0; i < 2; i++) {
- // KSessionRequest* request = KSessionRequest::CreateFromUnusedSlabMemory();
- // R_UNLESS(request != nullptr, ResultLimitReached);
- // request->Close();
- // }
- // }
-
- // We successfully allocated a session, so add the object we allocated to the resource
- // limit.
- // system.Kernel().GetSystemResourceLimit().Reserve(LimitableResource::SessionCountMax, 1);
- }
+ thread_handle = Convert<Handle>(GetReg32(system, 0));
+ core_id = Convert<int32_t>(GetReg32(system, 1));
+ std::array<uint32_t, 2> affinity_mask_gather{};
+ affinity_mask_gather[0] = GetReg32(system, 2);
+ affinity_mask_gather[1] = GetReg32(system, 3);
+ affinity_mask = Convert<uint64_t>(affinity_mask_gather);
- // Check that we successfully created a session.
- R_UNLESS(session != nullptr, ResultOutOfResource);
+ ret = SetThreadCoreMask64From32(system, thread_handle, core_id, affinity_mask);
- // Initialize the session.
- session->Initialize(nullptr, fmt::format("{}", name));
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+}
- // Commit the session reservation.
- session_reservation.Commit();
+static void SvcWrap_GetCurrentProcessorNumber64From32(Core::System& system) {
+ int32_t ret{};
- // Ensure that we clean up the session (and its only references are handle table) on function
- // end.
- SCOPE_EXIT({
- session->GetClientSession().Close();
- session->GetServerSession().Close();
- });
+ ret = GetCurrentProcessorNumber64From32(system);
- // Register the session.
- T::Register(system.Kernel(), session);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+}
- // Add the server session to the handle table.
- R_TRY(handle_table.Add(out_server, &session->GetServerSession()));
+static void SvcWrap_SignalEvent64From32(Core::System& system) {
+ Result ret{};
- // Add the client session to the handle table.
- const auto result = handle_table.Add(out_client, &session->GetClientSession());
+ Handle event_handle{};
- if (!R_SUCCEEDED(result)) {
- // Ensure that we maintaing a clean handle state on exit.
- handle_table.Remove(*out_server);
- }
+ event_handle = Convert<Handle>(GetReg32(system, 0));
- return result;
+ ret = SignalEvent64From32(system, event_handle);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client,
- u32 is_light, u64 name) {
- if (is_light) {
- // return CreateSession<KLightSession>(system, out_server, out_client, name);
- return ResultUnknown;
- } else {
- return CreateSession<KSession>(system, out_server, out_client, name);
- }
+static void SvcWrap_ClearEvent64From32(Core::System& system) {
+ Result ret{};
+
+ Handle event_handle{};
+
+ event_handle = Convert<Handle>(GetReg32(system, 0));
+
+ ret = ClearEvent64From32(system, event_handle);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-/// Connect to an OS service given the port name, returns the handle to the port to out
-static Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) {
- auto& memory = system.Memory();
- if (!memory.IsValidVirtualAddress(port_name_address)) {
- LOG_ERROR(Kernel_SVC,
- "Port Name Address is not a valid virtual address, port_name_address=0x{:016X}",
- port_name_address);
- return ResultNotFound;
- }
+static void SvcWrap_MapSharedMemory64From32(Core::System& system) {
+ Result ret{};
- static constexpr std::size_t PortNameMaxLength = 11;
- // Read 1 char beyond the max allowed port name to detect names that are too long.
- const std::string port_name = memory.ReadCString(port_name_address, PortNameMaxLength + 1);
- if (port_name.size() > PortNameMaxLength) {
- LOG_ERROR(Kernel_SVC, "Port name is too long, expected {} but got {}", PortNameMaxLength,
- port_name.size());
- return ResultOutOfRange;
- }
+ Handle shmem_handle{};
+ uint32_t address{};
+ uint32_t size{};
+ MemoryPermission map_perm{};
- LOG_TRACE(Kernel_SVC, "called port_name={}", port_name);
+ shmem_handle = Convert<Handle>(GetReg32(system, 0));
+ address = Convert<uint32_t>(GetReg32(system, 1));
+ size = Convert<uint32_t>(GetReg32(system, 2));
+ map_perm = Convert<MemoryPermission>(GetReg32(system, 3));
- // Get the current handle table.
- auto& kernel = system.Kernel();
- auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
+ ret = MapSharedMemory64From32(system, shmem_handle, address, size, map_perm);
- // Find the client port.
- auto port = kernel.CreateNamedServicePort(port_name);
- if (!port) {
- LOG_ERROR(Kernel_SVC, "tried to connect to unknown port: {}", port_name);
- return ResultNotFound;
- }
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+}
- // Reserve a handle for the port.
- // NOTE: Nintendo really does write directly to the output handle here.
- R_TRY(handle_table.Reserve(out));
- auto handle_guard = SCOPE_GUARD({ handle_table.Unreserve(*out); });
+static void SvcWrap_UnmapSharedMemory64From32(Core::System& system) {
+ Result ret{};
- // Create a session.
- KClientSession* session{};
- R_TRY(port->CreateSession(std::addressof(session)));
+ Handle shmem_handle{};
+ uint32_t address{};
+ uint32_t size{};
- kernel.RegisterNamedServiceHandler(port_name, &port->GetParent()->GetServerPort());
+ shmem_handle = Convert<Handle>(GetReg32(system, 0));
+ address = Convert<uint32_t>(GetReg32(system, 1));
+ size = Convert<uint32_t>(GetReg32(system, 2));
- // Register the session in the table, close the extra reference.
- handle_table.Register(*out, session);
- session->Close();
+ ret = UnmapSharedMemory64From32(system, shmem_handle, address, size);
- // We succeeded.
- handle_guard.Cancel();
- return ResultSuccess;
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result ConnectToNamedPort32(Core::System& system, Handle* out_handle,
- u32 port_name_address) {
+static void SvcWrap_CreateTransferMemory64From32(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+ uint32_t address{};
+ uint32_t size{};
+ MemoryPermission map_perm{};
+
+ address = Convert<uint32_t>(GetReg32(system, 1));
+ size = Convert<uint32_t>(GetReg32(system, 2));
+ map_perm = Convert<MemoryPermission>(GetReg32(system, 3));
+
+ ret = CreateTransferMemory64From32(system, std::addressof(out_handle), address, size, map_perm);
- return ConnectToNamedPort(system, out_handle, port_name_address);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_handle));
}
-/// Makes a blocking IPC call to a service.
-static Result SendSyncRequest(Core::System& system, Handle handle) {
- auto& kernel = system.Kernel();
+static void SvcWrap_CloseHandle64From32(Core::System& system) {
+ Result ret{};
- // Create the wait queue.
- KThreadQueue wait_queue(kernel);
+ Handle handle{};
- // Get the client session from its handle.
- KScopedAutoObject session =
- kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle);
- R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
+ handle = Convert<Handle>(GetReg32(system, 0));
- LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
+ ret = CloseHandle64From32(system, handle);
- return session->SendSyncRequest();
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result SendSyncRequest32(Core::System& system, Handle handle) {
- return SendSyncRequest(system, handle);
+static void SvcWrap_ResetSignal64From32(Core::System& system) {
+ Result ret{};
+
+ Handle handle{};
+
+ handle = Convert<Handle>(GetReg32(system, 0));
+
+ ret = ResetSignal64From32(system, handle);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result ReplyAndReceive(Core::System& system, s32* out_index, Handle* handles,
- s32 num_handles, Handle reply_target, s64 timeout_ns) {
- auto& kernel = system.Kernel();
- auto& handle_table = GetCurrentThread(kernel).GetOwnerProcess()->GetHandleTable();
-
- // Convert handle list to object table.
- std::vector<KSynchronizationObject*> objs(num_handles);
- R_UNLESS(
- handle_table.GetMultipleObjects<KSynchronizationObject>(objs.data(), handles, num_handles),
- ResultInvalidHandle);
-
- // Ensure handles are closed when we're done.
- SCOPE_EXIT({
- for (auto i = 0; i < num_handles; ++i) {
- objs[i]->Close();
- }
- });
-
- // Reply to the target, if one is specified.
- if (reply_target != InvalidHandle) {
- KScopedAutoObject session = handle_table.GetObject<KServerSession>(reply_target);
- R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
-
- // If we fail to reply, we want to set the output index to -1.
- // ON_RESULT_FAILURE { *out_index = -1; };
-
- // Send the reply.
- // R_TRY(session->SendReply());
-
- Result rc = session->SendReply();
- if (!R_SUCCEEDED(rc)) {
- *out_index = -1;
- return rc;
- }
- }
+static void SvcWrap_WaitSynchronization64From32(Core::System& system) {
+ Result ret{};
- // Wait for a message.
- while (true) {
- // Wait for an object.
- s32 index;
- Result result = KSynchronizationObject::Wait(kernel, &index, objs.data(),
- static_cast<s32>(objs.size()), timeout_ns);
- if (result == ResultTimedOut) {
- return result;
- }
-
- // Receive the request.
- if (R_SUCCEEDED(result)) {
- KServerSession* session = objs[index]->DynamicCast<KServerSession*>();
- if (session != nullptr) {
- result = session->ReceiveRequest();
- if (result == ResultNotFound) {
- continue;
- }
- }
- }
-
- *out_index = index;
- return result;
- }
+ int32_t out_index{};
+ uint32_t handles{};
+ int32_t num_handles{};
+ int64_t timeout_ns{};
+
+ handles = Convert<uint32_t>(GetReg32(system, 1));
+ num_handles = Convert<int32_t>(GetReg32(system, 2));
+ std::array<uint32_t, 2> timeout_ns_gather{};
+ timeout_ns_gather[0] = GetReg32(system, 0);
+ timeout_ns_gather[1] = GetReg32(system, 3);
+ timeout_ns = Convert<int64_t>(timeout_ns_gather);
+
+ ret = WaitSynchronization64From32(system, std::addressof(out_index), handles, num_handles, timeout_ns);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_index));
}
-/// Get the ID for the specified thread.
-static Result GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
- // Get the thread from its handle.
- KScopedAutoObject thread =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+static void SvcWrap_CancelSynchronization64From32(Core::System& system) {
+ Result ret{};
- // Get the thread's id.
- *out_thread_id = thread->GetId();
- return ResultSuccess;
+ Handle handle{};
+
+ handle = Convert<Handle>(GetReg32(system, 0));
+
+ ret = CancelSynchronization64From32(system, handle);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result GetThreadId32(Core::System& system, u32* out_thread_id_low, u32* out_thread_id_high,
- Handle thread_handle) {
- u64 out_thread_id{};
- const Result result{GetThreadId(system, &out_thread_id, thread_handle)};
+static void SvcWrap_ArbitrateLock64From32(Core::System& system) {
+ Result ret{};
+
+ Handle thread_handle{};
+ uint32_t address{};
+ uint32_t tag{};
- *out_thread_id_low = static_cast<u32>(out_thread_id >> 32);
- *out_thread_id_high = static_cast<u32>(out_thread_id & std::numeric_limits<u32>::max());
+ thread_handle = Convert<Handle>(GetReg32(system, 0));
+ address = Convert<uint32_t>(GetReg32(system, 1));
+ tag = Convert<uint32_t>(GetReg32(system, 2));
- return result;
+ ret = ArbitrateLock64From32(system, thread_handle, address, tag);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-/// Gets the ID of the specified process or a specified thread's owning process.
-static Result GetProcessId(Core::System& system, u64* out_process_id, Handle handle) {
- LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle);
+static void SvcWrap_ArbitrateUnlock64From32(Core::System& system) {
+ Result ret{};
- // Get the object from the handle table.
- KScopedAutoObject obj =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KAutoObject>(
- static_cast<Handle>(handle));
- R_UNLESS(obj.IsNotNull(), ResultInvalidHandle);
+ uint32_t address{};
- // Get the process from the object.
- KProcess* process = nullptr;
- if (KProcess* p = obj->DynamicCast<KProcess*>(); p != nullptr) {
- // The object is a process, so we can use it directly.
- process = p;
- } else if (KThread* t = obj->DynamicCast<KThread*>(); t != nullptr) {
- // The object is a thread, so we want to use its parent.
- process = reinterpret_cast<KThread*>(obj.GetPointerUnsafe())->GetOwnerProcess();
- } else {
- // TODO(bunnei): This should also handle debug objects before returning.
- UNIMPLEMENTED_MSG("Debug objects not implemented");
- }
+ address = Convert<uint32_t>(GetReg32(system, 0));
- // Make sure the target process exists.
- R_UNLESS(process != nullptr, ResultInvalidHandle);
+ ret = ArbitrateUnlock64From32(system, address);
- // Get the process id.
- *out_process_id = process->GetId();
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+}
+
+static void SvcWrap_WaitProcessWideKeyAtomic64From32(Core::System& system) {
+ Result ret{};
+
+ uint32_t address{};
+ uint32_t cv_key{};
+ uint32_t tag{};
+ int64_t timeout_ns{};
+
+ address = Convert<uint32_t>(GetReg32(system, 0));
+ cv_key = Convert<uint32_t>(GetReg32(system, 1));
+ tag = Convert<uint32_t>(GetReg32(system, 2));
+ std::array<uint32_t, 2> timeout_ns_gather{};
+ timeout_ns_gather[0] = GetReg32(system, 3);
+ timeout_ns_gather[1] = GetReg32(system, 4);
+ timeout_ns = Convert<int64_t>(timeout_ns_gather);
- return ResultSuccess;
+ ret = WaitProcessWideKeyAtomic64From32(system, address, cv_key, tag, timeout_ns);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result GetProcessId32(Core::System& system, u32* out_process_id_low,
- u32* out_process_id_high, Handle handle) {
- u64 out_process_id{};
- const auto result = GetProcessId(system, &out_process_id, handle);
- *out_process_id_low = static_cast<u32>(out_process_id);
- *out_process_id_high = static_cast<u32>(out_process_id >> 32);
- return result;
+static void SvcWrap_SignalProcessWideKey64From32(Core::System& system) {
+ uint32_t cv_key{};
+ int32_t count{};
+
+ cv_key = Convert<uint32_t>(GetReg32(system, 0));
+ count = Convert<int32_t>(GetReg32(system, 1));
+
+ SignalProcessWideKey64From32(system, cv_key, count);
}
-/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
-static Result WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
- s32 num_handles, s64 nano_seconds) {
- LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, num_handles={}, nano_seconds={}",
- handles_address, num_handles, nano_seconds);
+static void SvcWrap_GetSystemTick64From32(Core::System& system) {
+ int64_t ret{};
- // Ensure number of handles is valid.
- R_UNLESS(0 <= num_handles && num_handles <= ArgumentHandleCountMax, ResultOutOfRange);
+ ret = GetSystemTick64From32(system);
- auto& kernel = system.Kernel();
- std::vector<KSynchronizationObject*> objs(num_handles);
- const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
- Handle* handles = system.Memory().GetPointer<Handle>(handles_address);
-
- // Copy user handles.
- if (num_handles > 0) {
- // Convert the handles to objects.
- R_UNLESS(handle_table.GetMultipleObjects<KSynchronizationObject>(objs.data(), handles,
- num_handles),
- ResultInvalidHandle);
- for (const auto& obj : objs) {
- kernel.RegisterInUseObject(obj);
- }
- }
+ auto ret_scatter = Convert<std::array<uint32_t, 2>>(ret);
+ SetReg32(system, 0, ret_scatter[0]);
+ SetReg32(system, 1, ret_scatter[1]);
+}
+
+static void SvcWrap_ConnectToNamedPort64From32(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+ uint32_t name{};
- // Ensure handles are closed when we're done.
- SCOPE_EXIT({
- for (s32 i = 0; i < num_handles; ++i) {
- kernel.UnregisterInUseObject(objs[i]);
- objs[i]->Close();
- }
- });
+ name = Convert<uint32_t>(GetReg32(system, 1));
- return KSynchronizationObject::Wait(kernel, index, objs.data(), static_cast<s32>(objs.size()),
- nano_seconds);
+ ret = ConnectToNamedPort64From32(system, std::addressof(out_handle), name);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_handle));
}
-static Result WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
- s32 num_handles, u32 timeout_high, s32* index) {
- const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
- return WaitSynchronization(system, index, handles_address, num_handles, nano_seconds);
+static void SvcWrap_SendSyncRequest64From32(Core::System& system) {
+ Result ret{};
+
+ Handle session_handle{};
+
+ session_handle = Convert<Handle>(GetReg32(system, 0));
+
+ ret = SendSyncRequest64From32(system, session_handle);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-/// Resumes a thread waiting on WaitSynchronization
-static Result CancelSynchronization(Core::System& system, Handle handle) {
- LOG_TRACE(Kernel_SVC, "called handle=0x{:X}", handle);
+static void SvcWrap_SendSyncRequestWithUserBuffer64From32(Core::System& system) {
+ Result ret{};
+
+ uint32_t message_buffer{};
+ uint32_t message_buffer_size{};
+ Handle session_handle{};
+
+ message_buffer = Convert<uint32_t>(GetReg32(system, 0));
+ message_buffer_size = Convert<uint32_t>(GetReg32(system, 1));
+ session_handle = Convert<Handle>(GetReg32(system, 2));
- // Get the thread from its handle.
- KScopedAutoObject thread =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+ ret = SendSyncRequestWithUserBuffer64From32(system, message_buffer, message_buffer_size, session_handle);
- // Cancel the thread's wait.
- thread->WaitCancel();
- return ResultSuccess;
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result CancelSynchronization32(Core::System& system, Handle handle) {
- return CancelSynchronization(system, handle);
+static void SvcWrap_SendAsyncRequestWithUserBuffer64From32(Core::System& system) {
+ Result ret{};
+
+ Handle out_event_handle{};
+ uint32_t message_buffer{};
+ uint32_t message_buffer_size{};
+ Handle session_handle{};
+
+ message_buffer = Convert<uint32_t>(GetReg32(system, 1));
+ message_buffer_size = Convert<uint32_t>(GetReg32(system, 2));
+ session_handle = Convert<Handle>(GetReg32(system, 3));
+
+ ret = SendAsyncRequestWithUserBuffer64From32(system, std::addressof(out_event_handle), message_buffer, message_buffer_size, session_handle);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_event_handle));
}
-/// Attempts to locks a mutex
-static Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address, u32 tag) {
- LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
- thread_handle, address, tag);
+static void SvcWrap_GetProcessId64From32(Core::System& system) {
+ Result ret{};
- // Validate the input address.
- if (IsKernelAddress(address)) {
- LOG_ERROR(Kernel_SVC, "Attempting to arbitrate a lock on a kernel address (address={:08X})",
- address);
- return ResultInvalidCurrentMemory;
- }
- if (!Common::IsAligned(address, sizeof(u32))) {
- LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address);
- return ResultInvalidAddress;
- }
+ uint64_t out_process_id{};
+ Handle process_handle{};
+
+ process_handle = Convert<Handle>(GetReg32(system, 1));
+
+ ret = GetProcessId64From32(system, std::addressof(out_process_id), process_handle);
- return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ auto out_process_id_scatter = Convert<std::array<uint32_t, 2>>(out_process_id);
+ SetReg32(system, 1, out_process_id_scatter[0]);
+ SetReg32(system, 2, out_process_id_scatter[1]);
}
-static Result ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address, u32 tag) {
- return ArbitrateLock(system, thread_handle, address, tag);
+static void SvcWrap_GetThreadId64From32(Core::System& system) {
+ Result ret{};
+
+ uint64_t out_thread_id{};
+ Handle thread_handle{};
+
+ thread_handle = Convert<Handle>(GetReg32(system, 1));
+
+ ret = GetThreadId64From32(system, std::addressof(out_thread_id), thread_handle);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ auto out_thread_id_scatter = Convert<std::array<uint32_t, 2>>(out_thread_id);
+ SetReg32(system, 1, out_thread_id_scatter[0]);
+ SetReg32(system, 2, out_thread_id_scatter[1]);
}
-/// Unlock a mutex
-static Result ArbitrateUnlock(Core::System& system, VAddr address) {
- LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
+static void SvcWrap_Break64From32(Core::System& system) {
+ BreakReason break_reason{};
+ uint32_t arg{};
+ uint32_t size{};
- // Validate the input address.
- if (IsKernelAddress(address)) {
- LOG_ERROR(Kernel_SVC,
- "Attempting to arbitrate an unlock on a kernel address (address={:08X})",
- address);
- return ResultInvalidCurrentMemory;
- }
- if (!Common::IsAligned(address, sizeof(u32))) {
- LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address);
- return ResultInvalidAddress;
- }
+ break_reason = Convert<BreakReason>(GetReg32(system, 0));
+ arg = Convert<uint32_t>(GetReg32(system, 1));
+ size = Convert<uint32_t>(GetReg32(system, 2));
- return system.Kernel().CurrentProcess()->SignalToAddress(address);
-}
-
-static Result ArbitrateUnlock32(Core::System& system, u32 address) {
- return ArbitrateUnlock(system, address);
-}
-
-/// Break program execution
-static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
- BreakReason break_reason =
- static_cast<BreakReason>(reason & ~static_cast<u32>(BreakReason::NotificationOnlyFlag));
- bool notification_only = (reason & static_cast<u32>(BreakReason::NotificationOnlyFlag)) != 0;
-
- bool has_dumped_buffer{};
- std::vector<u8> debug_buffer;
-
- const auto handle_debug_buffer = [&](VAddr addr, u64 sz) {
- if (sz == 0 || addr == 0 || has_dumped_buffer) {
- return;
- }
-
- auto& memory = system.Memory();
-
- // This typically is an error code so we're going to assume this is the case
- if (sz == sizeof(u32)) {
- LOG_CRITICAL(Debug_Emulated, "debug_buffer_err_code={:X}", memory.Read32(addr));
- } else {
- // We don't know what's in here so we'll hexdump it
- debug_buffer.resize(sz);
- memory.ReadBlock(addr, debug_buffer.data(), sz);
- std::string hexdump;
- for (std::size_t i = 0; i < debug_buffer.size(); i++) {
- hexdump += fmt::format("{:02X} ", debug_buffer[i]);
- if (i != 0 && i % 16 == 0) {
- hexdump += '\n';
- }
- }
- LOG_CRITICAL(Debug_Emulated, "debug_buffer=\n{}", hexdump);
- }
- has_dumped_buffer = true;
- };
- switch (break_reason) {
- case BreakReason::Panic:
- LOG_CRITICAL(Debug_Emulated, "Userspace PANIC! info1=0x{:016X}, info2=0x{:016X}", info1,
- info2);
- handle_debug_buffer(info1, info2);
- break;
- case BreakReason::Assert:
- LOG_CRITICAL(Debug_Emulated, "Userspace Assertion failed! info1=0x{:016X}, info2=0x{:016X}",
- info1, info2);
- handle_debug_buffer(info1, info2);
- break;
- case BreakReason::User:
- LOG_WARNING(Debug_Emulated, "Userspace Break! 0x{:016X} with size 0x{:016X}", info1, info2);
- handle_debug_buffer(info1, info2);
- break;
- case BreakReason::PreLoadDll:
- LOG_INFO(Debug_Emulated,
- "Userspace Attempting to load an NRO at 0x{:016X} with size 0x{:016X}", info1,
- info2);
- break;
- case BreakReason::PostLoadDll:
- LOG_INFO(Debug_Emulated, "Userspace Loaded an NRO at 0x{:016X} with size 0x{:016X}", info1,
- info2);
- break;
- case BreakReason::PreUnloadDll:
- LOG_INFO(Debug_Emulated,
- "Userspace Attempting to unload an NRO at 0x{:016X} with size 0x{:016X}", info1,
- info2);
- break;
- case BreakReason::PostUnloadDll:
- LOG_INFO(Debug_Emulated, "Userspace Unloaded an NRO at 0x{:016X} with size 0x{:016X}",
- info1, info2);
- break;
- case BreakReason::CppException:
- LOG_CRITICAL(Debug_Emulated, "Signalling debugger. Uncaught C++ exception encountered.");
- break;
- default:
- LOG_WARNING(
- Debug_Emulated,
- "Signalling debugger, Unknown break reason {:#X}, info1=0x{:016X}, info2=0x{:016X}",
- reason, info1, info2);
- handle_debug_buffer(info1, info2);
- break;
- }
+ Break64From32(system, break_reason, arg, size);
+}
- system.GetReporter().SaveSvcBreakReport(reason, notification_only, info1, info2,
- has_dumped_buffer ? std::make_optional(debug_buffer)
- : std::nullopt);
+static void SvcWrap_OutputDebugString64From32(Core::System& system) {
+ Result ret{};
- if (!notification_only) {
- LOG_CRITICAL(
- Debug_Emulated,
- "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
- reason, info1, info2);
+ uint32_t debug_str{};
+ uint32_t len{};
- handle_debug_buffer(info1, info2);
+ debug_str = Convert<uint32_t>(GetReg32(system, 0));
+ len = Convert<uint32_t>(GetReg32(system, 1));
- auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
- const auto thread_processor_id = current_thread->GetActiveCore();
- system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
- }
+ ret = OutputDebugString64From32(system, debug_str, len);
- if (system.DebuggerEnabled()) {
- auto* thread = system.Kernel().GetCurrentEmuThread();
- system.GetDebugger().NotifyThreadStopped(thread);
- thread->RequestSuspend(Kernel::SuspendType::Debug);
- }
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) {
- Break(system, reason, info1, info2);
+static void SvcWrap_ReturnFromException64From32(Core::System& system) {
+ Result result{};
+
+ result = Convert<Result>(GetReg32(system, 0));
+
+ ReturnFromException64From32(system, result);
}
-/// Used to output a message on a debug hardware unit - does nothing on a retail unit
-static void OutputDebugString(Core::System& system, VAddr address, u64 len) {
- if (len == 0) {
- return;
- }
+static void SvcWrap_GetInfo64From32(Core::System& system) {
+ Result ret{};
- std::string str(len, '\0');
- system.Memory().ReadBlock(address, str.data(), str.size());
- LOG_DEBUG(Debug_Emulated, "{}", str);
-}
-
-static void OutputDebugString32(Core::System& system, u32 address, u32 len) {
- OutputDebugString(system, address, len);
-}
-
-/// Gets system/memory information for the current process
-static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle,
- u64 info_sub_id) {
- LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
- info_sub_id, handle);
-
- const auto info_id_type = static_cast<InfoType>(info_id);
-
- switch (info_id_type) {
- case InfoType::CoreMask:
- case InfoType::PriorityMask:
- case InfoType::AliasRegionAddress:
- case InfoType::AliasRegionSize:
- case InfoType::HeapRegionAddress:
- case InfoType::HeapRegionSize:
- case InfoType::AslrRegionAddress:
- case InfoType::AslrRegionSize:
- case InfoType::StackRegionAddress:
- case InfoType::StackRegionSize:
- case InfoType::TotalMemorySize:
- case InfoType::UsedMemorySize:
- case InfoType::SystemResourceSizeTotal:
- case InfoType::SystemResourceSizeUsed:
- case InfoType::ProgramId:
- case InfoType::UserExceptionContextAddress:
- case InfoType::TotalNonSystemMemorySize:
- case InfoType::UsedNonSystemMemorySize:
- case InfoType::IsApplication:
- case InfoType::FreeThreadCount: {
- if (info_sub_id != 0) {
- LOG_ERROR(Kernel_SVC, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
- info_sub_id);
- return ResultInvalidEnumValue;
- }
-
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- KScopedAutoObject process = handle_table.GetObject<KProcess>(handle);
- if (process.IsNull()) {
- LOG_ERROR(Kernel_SVC, "Process is not valid! info_id={}, info_sub_id={}, handle={:08X}",
- info_id, info_sub_id, handle);
- return ResultInvalidHandle;
- }
-
- switch (info_id_type) {
- case InfoType::CoreMask:
- *result = process->GetCoreMask();
- return ResultSuccess;
-
- case InfoType::PriorityMask:
- *result = process->GetPriorityMask();
- return ResultSuccess;
-
- case InfoType::AliasRegionAddress:
- *result = process->PageTable().GetAliasRegionStart();
- return ResultSuccess;
-
- case InfoType::AliasRegionSize:
- *result = process->PageTable().GetAliasRegionSize();
- return ResultSuccess;
-
- case InfoType::HeapRegionAddress:
- *result = process->PageTable().GetHeapRegionStart();
- return ResultSuccess;
-
- case InfoType::HeapRegionSize:
- *result = process->PageTable().GetHeapRegionSize();
- return ResultSuccess;
-
- case InfoType::AslrRegionAddress:
- *result = process->PageTable().GetAliasCodeRegionStart();
- return ResultSuccess;
-
- case InfoType::AslrRegionSize:
- *result = process->PageTable().GetAliasCodeRegionSize();
- return ResultSuccess;
-
- case InfoType::StackRegionAddress:
- *result = process->PageTable().GetStackRegionStart();
- return ResultSuccess;
-
- case InfoType::StackRegionSize:
- *result = process->PageTable().GetStackRegionSize();
- return ResultSuccess;
-
- case InfoType::TotalMemorySize:
- *result = process->GetTotalPhysicalMemoryAvailable();
- return ResultSuccess;
-
- case InfoType::UsedMemorySize:
- *result = process->GetTotalPhysicalMemoryUsed();
- return ResultSuccess;
-
- case InfoType::SystemResourceSizeTotal:
- *result = process->GetSystemResourceSize();
- return ResultSuccess;
-
- case InfoType::SystemResourceSizeUsed:
- LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage");
- *result = process->GetSystemResourceUsage();
- return ResultSuccess;
-
- case InfoType::ProgramId:
- *result = process->GetProgramID();
- return ResultSuccess;
-
- case InfoType::UserExceptionContextAddress:
- *result = process->GetProcessLocalRegionAddress();
- return ResultSuccess;
-
- case InfoType::TotalNonSystemMemorySize:
- *result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource();
- return ResultSuccess;
-
- case InfoType::UsedNonSystemMemorySize:
- *result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource();
- return ResultSuccess;
-
- case InfoType::FreeThreadCount:
- *result = process->GetFreeThreadCount();
- return ResultSuccess;
-
- default:
- break;
- }
-
- LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
- return ResultInvalidEnumValue;
- }
+ uint64_t out{};
+ InfoType info_type{};
+ Handle handle{};
+ uint64_t info_subtype{};
- case InfoType::DebuggerAttached:
- *result = 0;
- return ResultSuccess;
-
- case InfoType::ResourceLimit: {
- if (handle != 0) {
- LOG_ERROR(Kernel, "Handle is non zero! handle={:08X}", handle);
- return ResultInvalidHandle;
- }
-
- if (info_sub_id != 0) {
- LOG_ERROR(Kernel, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
- info_sub_id);
- return ResultInvalidCombination;
- }
-
- KProcess* const current_process = system.Kernel().CurrentProcess();
- KHandleTable& handle_table = current_process->GetHandleTable();
- const auto resource_limit = current_process->GetResourceLimit();
- if (!resource_limit) {
- *result = Svc::InvalidHandle;
- // Yes, the kernel considers this a successful operation.
- return ResultSuccess;
- }
-
- Handle resource_handle{};
- R_TRY(handle_table.Add(&resource_handle, resource_limit));
-
- *result = resource_handle;
- return ResultSuccess;
- }
+ info_type = Convert<InfoType>(GetReg32(system, 1));
+ handle = Convert<Handle>(GetReg32(system, 2));
+ std::array<uint32_t, 2> info_subtype_gather{};
+ info_subtype_gather[0] = GetReg32(system, 0);
+ info_subtype_gather[1] = GetReg32(system, 3);
+ info_subtype = Convert<uint64_t>(info_subtype_gather);
- case InfoType::RandomEntropy:
- if (handle != 0) {
- LOG_ERROR(Kernel_SVC, "Process Handle is non zero, expected 0 result but got {:016X}",
- handle);
- return ResultInvalidHandle;
- }
-
- if (info_sub_id >= KProcess::RANDOM_ENTROPY_SIZE) {
- LOG_ERROR(Kernel_SVC, "Entropy size is out of range, expected {} but got {}",
- KProcess::RANDOM_ENTROPY_SIZE, info_sub_id);
- return ResultInvalidCombination;
- }
-
- *result = system.Kernel().CurrentProcess()->GetRandomEntropy(info_sub_id);
- return ResultSuccess;
-
- case InfoType::InitialProcessIdRange:
- LOG_WARNING(Kernel_SVC,
- "(STUBBED) Attempted to query privileged process id bounds, returned 0");
- *result = 0;
- return ResultSuccess;
-
- case InfoType::ThreadTickCount: {
- constexpr u64 num_cpus = 4;
- if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id >= num_cpus) {
- LOG_ERROR(Kernel_SVC, "Core count is out of range, expected {} but got {}", num_cpus,
- info_sub_id);
- return ResultInvalidCombination;
- }
-
- KScopedAutoObject thread =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(
- static_cast<Handle>(handle));
- if (thread.IsNull()) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}",
- static_cast<Handle>(handle));
- return ResultInvalidHandle;
- }
-
- const auto& core_timing = system.CoreTiming();
- const auto& scheduler = *system.Kernel().CurrentScheduler();
- const auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
- const bool same_thread = current_thread == thread.GetPointerUnsafe();
-
- const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTime();
- u64 out_ticks = 0;
- if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
- const u64 thread_ticks = current_thread->GetCpuTime();
-
- out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
- } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) {
- out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
- }
-
- *result = out_ticks;
- return ResultSuccess;
- }
- case InfoType::IdleTickCount: {
- // Verify the input handle is invalid.
- R_UNLESS(handle == InvalidHandle, ResultInvalidHandle);
-
- // Verify the requested core is valid.
- const bool core_valid =
- (info_sub_id == 0xFFFFFFFFFFFFFFFF) ||
- (info_sub_id == static_cast<u64>(system.Kernel().CurrentPhysicalCoreIndex()));
- R_UNLESS(core_valid, ResultInvalidCombination);
-
- // Get the idle tick count.
- *result = system.Kernel().CurrentScheduler()->GetIdleThread()->GetCpuTime();
- return ResultSuccess;
- }
- case InfoType::MesosphereCurrentProcess: {
- // Verify the input handle is invalid.
- R_UNLESS(handle == InvalidHandle, ResultInvalidHandle);
+ ret = GetInfo64From32(system, std::addressof(out), info_type, handle, info_subtype);
- // Verify the sub-type is valid.
- R_UNLESS(info_sub_id == 0, ResultInvalidCombination);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ auto out_scatter = Convert<std::array<uint32_t, 2>>(out);
+ SetReg32(system, 1, out_scatter[0]);
+ SetReg32(system, 2, out_scatter[1]);
+}
- // Get the handle table.
- KProcess* current_process = system.Kernel().CurrentProcess();
- KHandleTable& handle_table = current_process->GetHandleTable();
+static void SvcWrap_FlushEntireDataCache64From32(Core::System& system) {
+ FlushEntireDataCache64From32(system);
+}
- // Get a new handle for the current process.
- Handle tmp;
- R_TRY(handle_table.Add(&tmp, current_process));
+static void SvcWrap_FlushDataCache64From32(Core::System& system) {
+ Result ret{};
- // Set the output.
- *result = tmp;
+ uint32_t address{};
+ uint32_t size{};
- // We succeeded.
- return ResultSuccess;
- }
- default:
- LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
- return ResultInvalidEnumValue;
- }
+ address = Convert<uint32_t>(GetReg32(system, 0));
+ size = Convert<uint32_t>(GetReg32(system, 1));
+
+ ret = FlushDataCache64From32(system, address, size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
- u32 info_id, u32 handle, u32 sub_id_high) {
- const u64 sub_id{u64{sub_id_low} | (u64{sub_id_high} << 32)};
- u64 res_value{};
+static void SvcWrap_MapPhysicalMemory64From32(Core::System& system) {
+ Result ret{};
+
+ uint32_t address{};
+ uint32_t size{};
- const Result result{GetInfo(system, &res_value, info_id, handle, sub_id)};
- *result_high = static_cast<u32>(res_value >> 32);
- *result_low = static_cast<u32>(res_value & std::numeric_limits<u32>::max());
+ address = Convert<uint32_t>(GetReg32(system, 0));
+ size = Convert<uint32_t>(GetReg32(system, 1));
- return result;
+ ret = MapPhysicalMemory64From32(system, address, size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-/// Maps memory at a desired address
-static Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
- LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
+static void SvcWrap_UnmapPhysicalMemory64From32(Core::System& system) {
+ Result ret{};
- if (!Common::Is4KBAligned(addr)) {
- LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
- return ResultInvalidAddress;
- }
+ uint32_t address{};
+ uint32_t size{};
- if (!Common::Is4KBAligned(size)) {
- LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
- return ResultInvalidSize;
- }
+ address = Convert<uint32_t>(GetReg32(system, 0));
+ size = Convert<uint32_t>(GetReg32(system, 1));
- if (size == 0) {
- LOG_ERROR(Kernel_SVC, "Size is zero");
- return ResultInvalidSize;
- }
+ ret = UnmapPhysicalMemory64From32(system, address, size);
- if (!(addr < addr + size)) {
- LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
- return ResultInvalidMemoryRegion;
- }
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+}
- KProcess* const current_process{system.Kernel().CurrentProcess()};
- auto& page_table{current_process->PageTable()};
+static void SvcWrap_GetDebugFutureThreadInfo64From32(Core::System& system) {
+ Result ret{};
- if (current_process->GetSystemResourceSize() == 0) {
- LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
- return ResultInvalidState;
- }
+ ilp32::LastThreadContext out_context{};
+ uint64_t out_thread_id{};
+ Handle debug_handle{};
+ int64_t ns{};
- if (!page_table.IsInsideAddressSpace(addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
- size);
- return ResultInvalidMemoryRegion;
- }
+ debug_handle = Convert<Handle>(GetReg32(system, 2));
+ std::array<uint32_t, 2> ns_gather{};
+ ns_gather[0] = GetReg32(system, 0);
+ ns_gather[1] = GetReg32(system, 1);
+ ns = Convert<int64_t>(ns_gather);
- if (page_table.IsOutsideAliasRegion(addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
- size);
- return ResultInvalidMemoryRegion;
- }
+ ret = GetDebugFutureThreadInfo64From32(system, std::addressof(out_context), std::addressof(out_thread_id), debug_handle, ns);
- return page_table.MapPhysicalMemory(addr, size);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ auto out_context_scatter = Convert<std::array<uint32_t, 4>>(out_context);
+ SetReg32(system, 1, out_context_scatter[0]);
+ SetReg32(system, 2, out_context_scatter[1]);
+ SetReg32(system, 3, out_context_scatter[2]);
+ SetReg32(system, 4, out_context_scatter[3]);
+ auto out_thread_id_scatter = Convert<std::array<uint32_t, 2>>(out_thread_id);
+ SetReg32(system, 5, out_thread_id_scatter[0]);
+ SetReg32(system, 6, out_thread_id_scatter[1]);
}
-static Result MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
- return MapPhysicalMemory(system, addr, size);
+static void SvcWrap_GetLastThreadInfo64From32(Core::System& system) {
+ Result ret{};
+
+ ilp32::LastThreadContext out_context{};
+ uint64_t out_tls_address{};
+ uint32_t out_flags{};
+
+ ret = GetLastThreadInfo64From32(system, std::addressof(out_context), std::addressof(out_tls_address), std::addressof(out_flags));
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ auto out_context_scatter = Convert<std::array<uint32_t, 4>>(out_context);
+ SetReg32(system, 1, out_context_scatter[0]);
+ SetReg32(system, 2, out_context_scatter[1]);
+ SetReg32(system, 3, out_context_scatter[2]);
+ SetReg32(system, 4, out_context_scatter[3]);
+ SetReg32(system, 5, Convert<uint32_t>(out_tls_address));
+ SetReg32(system, 6, Convert<uint32_t>(out_flags));
}
-/// Unmaps memory previously mapped via MapPhysicalMemory
-static Result UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
- LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
+static void SvcWrap_GetResourceLimitLimitValue64From32(Core::System& system) {
+ Result ret{};
- if (!Common::Is4KBAligned(addr)) {
- LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
- return ResultInvalidAddress;
- }
+ int64_t out_limit_value{};
+ Handle resource_limit_handle{};
+ LimitableResource which{};
- if (!Common::Is4KBAligned(size)) {
- LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
- return ResultInvalidSize;
- }
+ resource_limit_handle = Convert<Handle>(GetReg32(system, 1));
+ which = Convert<LimitableResource>(GetReg32(system, 2));
- if (size == 0) {
- LOG_ERROR(Kernel_SVC, "Size is zero");
- return ResultInvalidSize;
- }
+ ret = GetResourceLimitLimitValue64From32(system, std::addressof(out_limit_value), resource_limit_handle, which);
- if (!(addr < addr + size)) {
- LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
- return ResultInvalidMemoryRegion;
- }
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ auto out_limit_value_scatter = Convert<std::array<uint32_t, 2>>(out_limit_value);
+ SetReg32(system, 1, out_limit_value_scatter[0]);
+ SetReg32(system, 2, out_limit_value_scatter[1]);
+}
- KProcess* const current_process{system.Kernel().CurrentProcess()};
- auto& page_table{current_process->PageTable()};
+static void SvcWrap_GetResourceLimitCurrentValue64From32(Core::System& system) {
+ Result ret{};
- if (current_process->GetSystemResourceSize() == 0) {
- LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
- return ResultInvalidState;
- }
+ int64_t out_current_value{};
+ Handle resource_limit_handle{};
+ LimitableResource which{};
- if (!page_table.IsInsideAddressSpace(addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
- size);
- return ResultInvalidMemoryRegion;
- }
+ resource_limit_handle = Convert<Handle>(GetReg32(system, 1));
+ which = Convert<LimitableResource>(GetReg32(system, 2));
- if (page_table.IsOutsideAliasRegion(addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
- size);
- return ResultInvalidMemoryRegion;
- }
+ ret = GetResourceLimitCurrentValue64From32(system, std::addressof(out_current_value), resource_limit_handle, which);
- return page_table.UnmapPhysicalMemory(addr, size);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ auto out_current_value_scatter = Convert<std::array<uint32_t, 2>>(out_current_value);
+ SetReg32(system, 1, out_current_value_scatter[0]);
+ SetReg32(system, 2, out_current_value_scatter[1]);
}
-static Result UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
- return UnmapPhysicalMemory(system, addr, size);
+static void SvcWrap_SetThreadActivity64From32(Core::System& system) {
+ Result ret{};
+
+ Handle thread_handle{};
+ ThreadActivity thread_activity{};
+
+ thread_handle = Convert<Handle>(GetReg32(system, 0));
+ thread_activity = Convert<ThreadActivity>(GetReg32(system, 1));
+
+ ret = SetThreadActivity64From32(system, thread_handle, thread_activity);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+}
+
+static void SvcWrap_GetThreadContext364From32(Core::System& system) {
+ Result ret{};
+
+ uint32_t out_context{};
+ Handle thread_handle{};
+
+ out_context = Convert<uint32_t>(GetReg32(system, 0));
+ thread_handle = Convert<Handle>(GetReg32(system, 1));
+
+ ret = GetThreadContext364From32(system, out_context, thread_handle);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-/// Sets the thread activity
-static Result SetThreadActivity(Core::System& system, Handle thread_handle,
- ThreadActivity thread_activity) {
- LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", thread_handle,
- thread_activity);
+static void SvcWrap_WaitForAddress64From32(Core::System& system) {
+ Result ret{};
+
+ uint32_t address{};
+ ArbitrationType arb_type{};
+ int32_t value{};
+ int64_t timeout_ns{};
+
+ address = Convert<uint32_t>(GetReg32(system, 0));
+ arb_type = Convert<ArbitrationType>(GetReg32(system, 1));
+ value = Convert<int32_t>(GetReg32(system, 2));
+ std::array<uint32_t, 2> timeout_ns_gather{};
+ timeout_ns_gather[0] = GetReg32(system, 3);
+ timeout_ns_gather[1] = GetReg32(system, 4);
+ timeout_ns = Convert<int64_t>(timeout_ns_gather);
- // Validate the activity.
- constexpr auto IsValidThreadActivity = [](ThreadActivity activity) {
- return activity == ThreadActivity::Runnable || activity == ThreadActivity::Paused;
- };
- R_UNLESS(IsValidThreadActivity(thread_activity), ResultInvalidEnumValue);
+ ret = WaitForAddress64From32(system, address, arb_type, value, timeout_ns);
- // Get the thread from its handle.
- KScopedAutoObject thread =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+}
+
+static void SvcWrap_SignalToAddress64From32(Core::System& system) {
+ Result ret{};
- // Check that the activity is being set on a non-current thread for the current process.
- R_UNLESS(thread->GetOwnerProcess() == system.Kernel().CurrentProcess(), ResultInvalidHandle);
- R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(system.Kernel()), ResultBusy);
+ uint32_t address{};
+ SignalType signal_type{};
+ int32_t value{};
+ int32_t count{};
- // Set the activity.
- R_TRY(thread->SetActivity(thread_activity));
+ address = Convert<uint32_t>(GetReg32(system, 0));
+ signal_type = Convert<SignalType>(GetReg32(system, 1));
+ value = Convert<int32_t>(GetReg32(system, 2));
+ count = Convert<int32_t>(GetReg32(system, 3));
- return ResultSuccess;
+ ret = SignalToAddress64From32(system, address, signal_type, value, count);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result SetThreadActivity32(Core::System& system, Handle thread_handle,
- Svc::ThreadActivity thread_activity) {
- return SetThreadActivity(system, thread_handle, thread_activity);
+static void SvcWrap_SynchronizePreemptionState64From32(Core::System& system) {
+ SynchronizePreemptionState64From32(system);
}
-/// Gets the thread context
-static Result GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle) {
- LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context,
- thread_handle);
+static void SvcWrap_GetResourceLimitPeakValue64From32(Core::System& system) {
+ Result ret{};
- auto& kernel = system.Kernel();
+ int64_t out_peak_value{};
+ Handle resource_limit_handle{};
+ LimitableResource which{};
- // Get the thread from its handle.
- KScopedAutoObject thread =
- kernel.CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+ resource_limit_handle = Convert<Handle>(GetReg32(system, 1));
+ which = Convert<LimitableResource>(GetReg32(system, 2));
- // Require the handle be to a non-current thread in the current process.
- const auto* current_process = kernel.CurrentProcess();
- R_UNLESS(current_process == thread->GetOwnerProcess(), ResultInvalidId);
+ ret = GetResourceLimitPeakValue64From32(system, std::addressof(out_peak_value), resource_limit_handle, which);
- // Verify that the thread isn't terminated.
- R_UNLESS(thread->GetState() != ThreadState::Terminated, ResultTerminationRequested);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ auto out_peak_value_scatter = Convert<std::array<uint32_t, 2>>(out_peak_value);
+ SetReg32(system, 1, out_peak_value_scatter[0]);
+ SetReg32(system, 2, out_peak_value_scatter[1]);
+}
+
+static void SvcWrap_CreateIoPool64From32(Core::System& system) {
+ Result ret{};
- /// Check that the thread is not the current one.
- /// NOTE: Nintendo does not check this, and thus the following loop will deadlock.
- R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(kernel), ResultInvalidId);
+ Handle out_handle{};
+ IoPoolType which{};
- // Try to get the thread context until the thread isn't current on any core.
- while (true) {
- KScopedSchedulerLock sl{kernel};
+ which = Convert<IoPoolType>(GetReg32(system, 1));
- // TODO(bunnei): Enforce that thread is suspended for debug here.
+ ret = CreateIoPool64From32(system, std::addressof(out_handle), which);
- // If the thread's raw state isn't runnable, check if it's current on some core.
- if (thread->GetRawState() != ThreadState::Runnable) {
- bool current = false;
- for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
- if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetSchedulerCurrentThread()) {
- current = true;
- break;
- }
- }
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_handle));
+}
- // If the thread is current, retry until it isn't.
- if (current) {
- continue;
- }
- }
+static void SvcWrap_CreateIoRegion64From32(Core::System& system) {
+ Result ret{};
- // Get the thread context.
- std::vector<u8> context;
- R_TRY(thread->GetThreadContext3(context));
+ Handle out_handle{};
+ Handle io_pool{};
+ uint64_t physical_address{};
+ uint32_t size{};
+ MemoryMapping mapping{};
+ MemoryPermission perm{};
- // Copy the thread context to user space.
- system.Memory().WriteBlock(out_context, context.data(), context.size());
+ io_pool = Convert<Handle>(GetReg32(system, 1));
+ std::array<uint32_t, 2> physical_address_gather{};
+ physical_address_gather[0] = GetReg32(system, 2);
+ physical_address_gather[1] = GetReg32(system, 3);
+ physical_address = Convert<uint64_t>(physical_address_gather);
+ size = Convert<uint32_t>(GetReg32(system, 0));
+ mapping = Convert<MemoryMapping>(GetReg32(system, 4));
+ perm = Convert<MemoryPermission>(GetReg32(system, 5));
- return ResultSuccess;
- }
+ ret = CreateIoRegion64From32(system, std::addressof(out_handle), io_pool, physical_address, size, mapping, perm);
- return ResultSuccess;
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_handle));
}
-static Result GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle) {
- return GetThreadContext(system, out_context, thread_handle);
+static void SvcWrap_KernelDebug64From32(Core::System& system) {
+ KernelDebugType kern_debug_type{};
+ uint64_t arg0{};
+ uint64_t arg1{};
+ uint64_t arg2{};
+
+ kern_debug_type = Convert<KernelDebugType>(GetReg32(system, 0));
+ std::array<uint32_t, 2> arg0_gather{};
+ arg0_gather[0] = GetReg32(system, 2);
+ arg0_gather[1] = GetReg32(system, 3);
+ arg0 = Convert<uint64_t>(arg0_gather);
+ std::array<uint32_t, 2> arg1_gather{};
+ arg1_gather[0] = GetReg32(system, 1);
+ arg1_gather[1] = GetReg32(system, 4);
+ arg1 = Convert<uint64_t>(arg1_gather);
+ std::array<uint32_t, 2> arg2_gather{};
+ arg2_gather[0] = GetReg32(system, 5);
+ arg2_gather[1] = GetReg32(system, 6);
+ arg2 = Convert<uint64_t>(arg2_gather);
+
+ KernelDebug64From32(system, kern_debug_type, arg0, arg1, arg2);
}
-/// Gets the priority for the specified thread
-static Result GetThreadPriority(Core::System& system, u32* out_priority, Handle handle) {
- LOG_TRACE(Kernel_SVC, "called");
+static void SvcWrap_ChangeKernelTraceState64From32(Core::System& system) {
+ KernelTraceState kern_trace_state{};
- // Get the thread from its handle.
- KScopedAutoObject thread =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+ kern_trace_state = Convert<KernelTraceState>(GetReg32(system, 0));
- // Get the thread's priority.
- *out_priority = thread->GetPriority();
- return ResultSuccess;
+ ChangeKernelTraceState64From32(system, kern_trace_state);
}
-static Result GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle) {
- return GetThreadPriority(system, out_priority, handle);
+static void SvcWrap_CreateSession64From32(Core::System& system) {
+ Result ret{};
+
+ Handle out_server_session_handle{};
+ Handle out_client_session_handle{};
+ bool is_light{};
+ uint32_t name{};
+
+ is_light = Convert<bool>(GetReg32(system, 2));
+ name = Convert<uint32_t>(GetReg32(system, 3));
+
+ ret = CreateSession64From32(system, std::addressof(out_server_session_handle), std::addressof(out_client_session_handle), is_light, name);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_server_session_handle));
+ SetReg32(system, 2, Convert<uint32_t>(out_client_session_handle));
}
-/// Sets the priority for the specified thread
-static Result SetThreadPriority(Core::System& system, Handle thread_handle, u32 priority) {
- // Get the current process.
- KProcess& process = *system.Kernel().CurrentProcess();
+static void SvcWrap_AcceptSession64From32(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+ Handle port{};
- // Validate the priority.
- R_UNLESS(HighestThreadPriority <= priority && priority <= LowestThreadPriority,
- ResultInvalidPriority);
- R_UNLESS(process.CheckThreadPriority(priority), ResultInvalidPriority);
+ port = Convert<Handle>(GetReg32(system, 1));
- // Get the thread from its handle.
- KScopedAutoObject thread = process.GetHandleTable().GetObject<KThread>(thread_handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+ ret = AcceptSession64From32(system, std::addressof(out_handle), port);
- // Set the thread priority.
- thread->SetBasePriority(priority);
- return ResultSuccess;
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_handle));
}
-static Result SetThreadPriority32(Core::System& system, Handle thread_handle, u32 priority) {
- return SetThreadPriority(system, thread_handle, priority);
+static void SvcWrap_ReplyAndReceive64From32(Core::System& system) {
+ Result ret{};
+
+ int32_t out_index{};
+ uint32_t handles{};
+ int32_t num_handles{};
+ Handle reply_target{};
+ int64_t timeout_ns{};
+
+ handles = Convert<uint32_t>(GetReg32(system, 1));
+ num_handles = Convert<int32_t>(GetReg32(system, 2));
+ reply_target = Convert<Handle>(GetReg32(system, 3));
+ std::array<uint32_t, 2> timeout_ns_gather{};
+ timeout_ns_gather[0] = GetReg32(system, 0);
+ timeout_ns_gather[1] = GetReg32(system, 4);
+ timeout_ns = Convert<int64_t>(timeout_ns_gather);
+
+ ret = ReplyAndReceive64From32(system, std::addressof(out_index), handles, num_handles, reply_target, timeout_ns);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_index));
}
-/// Get which CPU core is executing the current thread
-static u32 GetCurrentProcessorNumber(Core::System& system) {
- LOG_TRACE(Kernel_SVC, "called");
- return static_cast<u32>(system.CurrentPhysicalCore().CoreIndex());
+static void SvcWrap_ReplyAndReceiveWithUserBuffer64From32(Core::System& system) {
+ Result ret{};
+
+ int32_t out_index{};
+ uint32_t message_buffer{};
+ uint32_t message_buffer_size{};
+ uint32_t handles{};
+ int32_t num_handles{};
+ Handle reply_target{};
+ int64_t timeout_ns{};
+
+ message_buffer = Convert<uint32_t>(GetReg32(system, 1));
+ message_buffer_size = Convert<uint32_t>(GetReg32(system, 2));
+ handles = Convert<uint32_t>(GetReg32(system, 3));
+ num_handles = Convert<int32_t>(GetReg32(system, 0));
+ reply_target = Convert<Handle>(GetReg32(system, 4));
+ std::array<uint32_t, 2> timeout_ns_gather{};
+ timeout_ns_gather[0] = GetReg32(system, 5);
+ timeout_ns_gather[1] = GetReg32(system, 6);
+ timeout_ns = Convert<int64_t>(timeout_ns_gather);
+
+ ret = ReplyAndReceiveWithUserBuffer64From32(system, std::addressof(out_index), message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_index));
}
-static u32 GetCurrentProcessorNumber32(Core::System& system) {
- return GetCurrentProcessorNumber(system);
+static void SvcWrap_CreateEvent64From32(Core::System& system) {
+ Result ret{};
+
+ Handle out_write_handle{};
+ Handle out_read_handle{};
+
+ ret = CreateEvent64From32(system, std::addressof(out_write_handle), std::addressof(out_read_handle));
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_write_handle));
+ SetReg32(system, 2, Convert<uint32_t>(out_read_handle));
}
-namespace {
+static void SvcWrap_MapIoRegion64From32(Core::System& system) {
+ Result ret{};
-constexpr bool IsValidSharedMemoryPermission(Svc::MemoryPermission perm) {
- switch (perm) {
- case Svc::MemoryPermission::Read:
- case Svc::MemoryPermission::ReadWrite:
- return true;
- default:
- return false;
- }
+ Handle io_region{};
+ uint32_t address{};
+ uint32_t size{};
+ MemoryPermission perm{};
+
+ io_region = Convert<Handle>(GetReg32(system, 0));
+ address = Convert<uint32_t>(GetReg32(system, 1));
+ size = Convert<uint32_t>(GetReg32(system, 2));
+ perm = Convert<MemoryPermission>(GetReg32(system, 3));
+
+ ret = MapIoRegion64From32(system, io_region, address, size, perm);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-[[maybe_unused]] constexpr bool IsValidRemoteSharedMemoryPermission(Svc::MemoryPermission perm) {
- return IsValidSharedMemoryPermission(perm) || perm == Svc::MemoryPermission::DontCare;
+static void SvcWrap_UnmapIoRegion64From32(Core::System& system) {
+ Result ret{};
+
+ Handle io_region{};
+ uint32_t address{};
+ uint32_t size{};
+
+ io_region = Convert<Handle>(GetReg32(system, 0));
+ address = Convert<uint32_t>(GetReg32(system, 1));
+ size = Convert<uint32_t>(GetReg32(system, 2));
+
+ ret = UnmapIoRegion64From32(system, io_region, address, size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-constexpr bool IsValidProcessMemoryPermission(Svc::MemoryPermission perm) {
- switch (perm) {
- case Svc::MemoryPermission::None:
- case Svc::MemoryPermission::Read:
- case Svc::MemoryPermission::ReadWrite:
- case Svc::MemoryPermission::ReadExecute:
- return true;
- default:
- return false;
- }
+static void SvcWrap_MapPhysicalMemoryUnsafe64From32(Core::System& system) {
+ Result ret{};
+
+ uint32_t address{};
+ uint32_t size{};
+
+ address = Convert<uint32_t>(GetReg32(system, 0));
+ size = Convert<uint32_t>(GetReg32(system, 1));
+
+ ret = MapPhysicalMemoryUnsafe64From32(system, address, size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-constexpr bool IsValidMapCodeMemoryPermission(Svc::MemoryPermission perm) {
- return perm == Svc::MemoryPermission::ReadWrite;
+static void SvcWrap_UnmapPhysicalMemoryUnsafe64From32(Core::System& system) {
+ Result ret{};
+
+ uint32_t address{};
+ uint32_t size{};
+
+ address = Convert<uint32_t>(GetReg32(system, 0));
+ size = Convert<uint32_t>(GetReg32(system, 1));
+
+ ret = UnmapPhysicalMemoryUnsafe64From32(system, address, size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-constexpr bool IsValidMapToOwnerCodeMemoryPermission(Svc::MemoryPermission perm) {
- return perm == Svc::MemoryPermission::Read || perm == Svc::MemoryPermission::ReadExecute;
+static void SvcWrap_SetUnsafeLimit64From32(Core::System& system) {
+ Result ret{};
+
+ uint32_t limit{};
+
+ limit = Convert<uint32_t>(GetReg32(system, 0));
+
+ ret = SetUnsafeLimit64From32(system, limit);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-constexpr bool IsValidUnmapCodeMemoryPermission(Svc::MemoryPermission perm) {
- return perm == Svc::MemoryPermission::None;
+static void SvcWrap_CreateCodeMemory64From32(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+ uint32_t address{};
+ uint32_t size{};
+
+ address = Convert<uint32_t>(GetReg32(system, 1));
+ size = Convert<uint32_t>(GetReg32(system, 2));
+
+ ret = CreateCodeMemory64From32(system, std::addressof(out_handle), address, size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_handle));
}
-constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(Svc::MemoryPermission perm) {
- return perm == Svc::MemoryPermission::None;
+static void SvcWrap_ControlCodeMemory64From32(Core::System& system) {
+ Result ret{};
+
+ Handle code_memory_handle{};
+ CodeMemoryOperation operation{};
+ uint64_t address{};
+ uint64_t size{};
+ MemoryPermission perm{};
+
+ code_memory_handle = Convert<Handle>(GetReg32(system, 0));
+ operation = Convert<CodeMemoryOperation>(GetReg32(system, 1));
+ std::array<uint32_t, 2> address_gather{};
+ address_gather[0] = GetReg32(system, 2);
+ address_gather[1] = GetReg32(system, 3);
+ address = Convert<uint64_t>(address_gather);
+ std::array<uint32_t, 2> size_gather{};
+ size_gather[0] = GetReg32(system, 4);
+ size_gather[1] = GetReg32(system, 5);
+ size = Convert<uint64_t>(size_gather);
+ perm = Convert<MemoryPermission>(GetReg32(system, 6));
+
+ ret = ControlCodeMemory64From32(system, code_memory_handle, operation, address, size, perm);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-} // Anonymous namespace
+static void SvcWrap_SleepSystem64From32(Core::System& system) {
+ SleepSystem64From32(system);
+}
-static Result MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size,
- Svc::MemoryPermission map_perm) {
- LOG_TRACE(Kernel_SVC,
- "called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
- shmem_handle, address, size, map_perm);
+static void SvcWrap_ReadWriteRegister64From32(Core::System& system) {
+ Result ret{};
- // Validate the address/size.
- R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+ uint32_t out_value{};
+ uint64_t address{};
+ uint32_t mask{};
+ uint32_t value{};
- // Validate the permission.
- R_UNLESS(IsValidSharedMemoryPermission(map_perm), ResultInvalidNewMemoryPermission);
+ std::array<uint32_t, 2> address_gather{};
+ address_gather[0] = GetReg32(system, 2);
+ address_gather[1] = GetReg32(system, 3);
+ address = Convert<uint64_t>(address_gather);
+ mask = Convert<uint32_t>(GetReg32(system, 0));
+ value = Convert<uint32_t>(GetReg32(system, 1));
- // Get the current process.
- auto& process = *system.Kernel().CurrentProcess();
- auto& page_table = process.PageTable();
+ ret = ReadWriteRegister64From32(system, std::addressof(out_value), address, mask, value);
- // Get the shared memory.
- KScopedAutoObject shmem = process.GetHandleTable().GetObject<KSharedMemory>(shmem_handle);
- R_UNLESS(shmem.IsNotNull(), ResultInvalidHandle);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_value));
+}
- // Verify that the mapping is in range.
- R_UNLESS(page_table.CanContain(address, size, KMemoryState::Shared), ResultInvalidMemoryRegion);
+static void SvcWrap_SetProcessActivity64From32(Core::System& system) {
+ Result ret{};
- // Add the shared memory to the process.
- R_TRY(process.AddSharedMemory(shmem.GetPointerUnsafe(), address, size));
+ Handle process_handle{};
+ ProcessActivity process_activity{};
- // Ensure that we clean up the shared memory if we fail to map it.
- auto guard =
- SCOPE_GUARD({ process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size); });
+ process_handle = Convert<Handle>(GetReg32(system, 0));
+ process_activity = Convert<ProcessActivity>(GetReg32(system, 1));
- // Map the shared memory.
- R_TRY(shmem->Map(process, address, size, map_perm));
+ ret = SetProcessActivity64From32(system, process_handle, process_activity);
- // We succeeded.
- guard.Cancel();
- return ResultSuccess;
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result MapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address, u32 size,
- Svc::MemoryPermission map_perm) {
- return MapSharedMemory(system, shmem_handle, address, size, map_perm);
+static void SvcWrap_CreateSharedMemory64From32(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+ uint32_t size{};
+ MemoryPermission owner_perm{};
+ MemoryPermission remote_perm{};
+
+ size = Convert<uint32_t>(GetReg32(system, 1));
+ owner_perm = Convert<MemoryPermission>(GetReg32(system, 2));
+ remote_perm = Convert<MemoryPermission>(GetReg32(system, 3));
+
+ ret = CreateSharedMemory64From32(system, std::addressof(out_handle), size, owner_perm, remote_perm);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_handle));
}
-static Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
- u64 size) {
- // Validate the address/size.
- R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+static void SvcWrap_MapTransferMemory64From32(Core::System& system) {
+ Result ret{};
+
+ Handle trmem_handle{};
+ uint32_t address{};
+ uint32_t size{};
+ MemoryPermission owner_perm{};
- // Get the current process.
- auto& process = *system.Kernel().CurrentProcess();
- auto& page_table = process.PageTable();
+ trmem_handle = Convert<Handle>(GetReg32(system, 0));
+ address = Convert<uint32_t>(GetReg32(system, 1));
+ size = Convert<uint32_t>(GetReg32(system, 2));
+ owner_perm = Convert<MemoryPermission>(GetReg32(system, 3));
- // Get the shared memory.
- KScopedAutoObject shmem = process.GetHandleTable().GetObject<KSharedMemory>(shmem_handle);
- R_UNLESS(shmem.IsNotNull(), ResultInvalidHandle);
+ ret = MapTransferMemory64From32(system, trmem_handle, address, size, owner_perm);
- // Verify that the mapping is in range.
- R_UNLESS(page_table.CanContain(address, size, KMemoryState::Shared), ResultInvalidMemoryRegion);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+}
+
+static void SvcWrap_UnmapTransferMemory64From32(Core::System& system) {
+ Result ret{};
- // Unmap the shared memory.
- R_TRY(shmem->Unmap(process, address, size));
+ Handle trmem_handle{};
+ uint32_t address{};
+ uint32_t size{};
- // Remove the shared memory from the process.
- process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size);
+ trmem_handle = Convert<Handle>(GetReg32(system, 0));
+ address = Convert<uint32_t>(GetReg32(system, 1));
+ size = Convert<uint32_t>(GetReg32(system, 2));
- return ResultSuccess;
+ ret = UnmapTransferMemory64From32(system, trmem_handle, address, size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result UnmapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address,
- u32 size) {
- return UnmapSharedMemory(system, shmem_handle, address, size);
+static void SvcWrap_CreateInterruptEvent64From32(Core::System& system) {
+ Result ret{};
+
+ Handle out_read_handle{};
+ int32_t interrupt_id{};
+ InterruptType interrupt_type{};
+
+ interrupt_id = Convert<int32_t>(GetReg32(system, 1));
+ interrupt_type = Convert<InterruptType>(GetReg32(system, 2));
+
+ ret = CreateInterruptEvent64From32(system, std::addressof(out_read_handle), interrupt_id, interrupt_type);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_read_handle));
}
-static Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, VAddr address,
- u64 size, Svc::MemoryPermission perm) {
- LOG_TRACE(Kernel_SVC,
- "called, process_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
- process_handle, address, size, perm);
+static void SvcWrap_QueryPhysicalAddress64From32(Core::System& system) {
+ Result ret{};
- // Validate the address/size.
- R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
- R_UNLESS(address == static_cast<uintptr_t>(address), ResultInvalidCurrentMemory);
- R_UNLESS(size == static_cast<size_t>(size), ResultInvalidCurrentMemory);
+ ilp32::PhysicalMemoryInfo out_info{};
+ uint32_t address{};
- // Validate the memory permission.
- R_UNLESS(IsValidProcessMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+ address = Convert<uint32_t>(GetReg32(system, 1));
- // Get the process from its handle.
- KScopedAutoObject process =
- system.CurrentProcess()->GetHandleTable().GetObject<KProcess>(process_handle);
- R_UNLESS(process.IsNotNull(), ResultInvalidHandle);
+ ret = QueryPhysicalAddress64From32(system, std::addressof(out_info), address);
- // Validate that the address is in range.
- auto& page_table = process->PageTable();
- R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ auto out_info_scatter = Convert<std::array<uint32_t, 4>>(out_info);
+ SetReg32(system, 1, out_info_scatter[0]);
+ SetReg32(system, 2, out_info_scatter[1]);
+ SetReg32(system, 3, out_info_scatter[2]);
+ SetReg32(system, 4, out_info_scatter[3]);
+}
- // Set the memory permission.
- return page_table.SetProcessMemoryPermission(address, size, perm);
+static void SvcWrap_QueryIoMapping64From32(Core::System& system) {
+ Result ret{};
+
+ uint64_t out_address{};
+ uint64_t out_size{};
+ uint64_t physical_address{};
+ uint32_t size{};
+
+ std::array<uint32_t, 2> physical_address_gather{};
+ physical_address_gather[0] = GetReg32(system, 2);
+ physical_address_gather[1] = GetReg32(system, 3);
+ physical_address = Convert<uint64_t>(physical_address_gather);
+ size = Convert<uint32_t>(GetReg32(system, 0));
+
+ ret = QueryIoMapping64From32(system, std::addressof(out_address), std::addressof(out_size), physical_address, size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_address));
+ SetReg32(system, 2, Convert<uint32_t>(out_size));
}
-static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
- VAddr src_address, u64 size) {
- LOG_TRACE(Kernel_SVC,
- "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
- dst_address, process_handle, src_address, size);
+static void SvcWrap_CreateDeviceAddressSpace64From32(Core::System& system) {
+ Result ret{};
- // Validate the address/size.
- R_UNLESS(Common::IsAligned(dst_address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(src_address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((dst_address < dst_address + size), ResultInvalidCurrentMemory);
- R_UNLESS((src_address < src_address + size), ResultInvalidCurrentMemory);
+ Handle out_handle{};
+ uint64_t das_address{};
+ uint64_t das_size{};
- // Get the processes.
- KProcess* dst_process = system.CurrentProcess();
- KScopedAutoObject src_process =
- dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
- R_UNLESS(src_process.IsNotNull(), ResultInvalidHandle);
+ std::array<uint32_t, 2> das_address_gather{};
+ das_address_gather[0] = GetReg32(system, 2);
+ das_address_gather[1] = GetReg32(system, 3);
+ das_address = Convert<uint64_t>(das_address_gather);
+ std::array<uint32_t, 2> das_size_gather{};
+ das_size_gather[0] = GetReg32(system, 0);
+ das_size_gather[1] = GetReg32(system, 1);
+ das_size = Convert<uint64_t>(das_size_gather);
- // Get the page tables.
- auto& dst_pt = dst_process->PageTable();
- auto& src_pt = src_process->PageTable();
+ ret = CreateDeviceAddressSpace64From32(system, std::addressof(out_handle), das_address, das_size);
- // Validate that the mapping is in range.
- R_UNLESS(src_pt.Contains(src_address, size), ResultInvalidCurrentMemory);
- R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState::SharedCode),
- ResultInvalidMemoryRegion);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_handle));
+}
+
+static void SvcWrap_AttachDeviceAddressSpace64From32(Core::System& system) {
+ Result ret{};
- // Create a new page group.
- KPageGroup pg{system.Kernel(), dst_pt.GetBlockInfoManager()};
- R_TRY(src_pt.MakeAndOpenPageGroup(
- std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
- KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::All, KMemoryAttribute::None));
+ DeviceName device_name{};
+ Handle das_handle{};
- // Map the group.
- R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode,
- KMemoryPermission::UserReadWrite));
+ device_name = Convert<DeviceName>(GetReg32(system, 0));
+ das_handle = Convert<Handle>(GetReg32(system, 1));
- return ResultSuccess;
+ ret = AttachDeviceAddressSpace64From32(system, device_name, das_handle);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
- VAddr src_address, u64 size) {
- LOG_TRACE(Kernel_SVC,
- "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
- dst_address, process_handle, src_address, size);
+static void SvcWrap_DetachDeviceAddressSpace64From32(Core::System& system) {
+ Result ret{};
- // Validate the address/size.
- R_UNLESS(Common::IsAligned(dst_address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(src_address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((dst_address < dst_address + size), ResultInvalidCurrentMemory);
- R_UNLESS((src_address < src_address + size), ResultInvalidCurrentMemory);
+ DeviceName device_name{};
+ Handle das_handle{};
- // Get the processes.
- KProcess* dst_process = system.CurrentProcess();
- KScopedAutoObject src_process =
- dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
- R_UNLESS(src_process.IsNotNull(), ResultInvalidHandle);
-
- // Get the page tables.
- auto& dst_pt = dst_process->PageTable();
- auto& src_pt = src_process->PageTable();
-
- // Validate that the mapping is in range.
- R_UNLESS(src_pt.Contains(src_address, size), ResultInvalidCurrentMemory);
- R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState::SharedCode),
- ResultInvalidMemoryRegion);
-
- // Unmap the memory.
- R_TRY(dst_pt.UnmapProcessMemory(dst_address, size, src_pt, src_address));
-
- return ResultSuccess;
-}
-
-static Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
- LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size);
-
- // Get kernel instance.
- auto& kernel = system.Kernel();
+ device_name = Convert<DeviceName>(GetReg32(system, 0));
+ das_handle = Convert<Handle>(GetReg32(system, 1));
- // Validate address / size.
- R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+ ret = DetachDeviceAddressSpace64From32(system, device_name, das_handle);
- // Create the code memory.
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+}
+
+static void SvcWrap_MapDeviceAddressSpaceByForce64From32(Core::System& system) {
+ Result ret{};
- KCodeMemory* code_mem = KCodeMemory::Create(kernel);
- R_UNLESS(code_mem != nullptr, ResultOutOfResource);
+ Handle das_handle{};
+ Handle process_handle{};
+ uint64_t process_address{};
+ uint32_t size{};
+ uint64_t device_address{};
+ uint32_t option{};
- // Verify that the region is in range.
- R_UNLESS(system.CurrentProcess()->PageTable().Contains(address, size),
- ResultInvalidCurrentMemory);
+ das_handle = Convert<Handle>(GetReg32(system, 0));
+ process_handle = Convert<Handle>(GetReg32(system, 1));
+ std::array<uint32_t, 2> process_address_gather{};
+ process_address_gather[0] = GetReg32(system, 2);
+ process_address_gather[1] = GetReg32(system, 3);
+ process_address = Convert<uint64_t>(process_address_gather);
+ size = Convert<uint32_t>(GetReg32(system, 4));
+ std::array<uint32_t, 2> device_address_gather{};
+ device_address_gather[0] = GetReg32(system, 5);
+ device_address_gather[1] = GetReg32(system, 6);
+ device_address = Convert<uint64_t>(device_address_gather);
+ option = Convert<uint32_t>(GetReg32(system, 7));
- // Initialize the code memory.
- R_TRY(code_mem->Initialize(system.DeviceMemory(), address, size));
+ ret = MapDeviceAddressSpaceByForce64From32(system, das_handle, process_handle, process_address, size, device_address, option);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+}
- // Register the code memory.
- KCodeMemory::Register(kernel, code_mem);
+static void SvcWrap_MapDeviceAddressSpaceAligned64From32(Core::System& system) {
+ Result ret{};
- // Add the code memory to the handle table.
- R_TRY(system.CurrentProcess()->GetHandleTable().Add(out, code_mem));
+ Handle das_handle{};
+ Handle process_handle{};
+ uint64_t process_address{};
+ uint32_t size{};
+ uint64_t device_address{};
+ uint32_t option{};
- code_mem->Close();
+ das_handle = Convert<Handle>(GetReg32(system, 0));
+ process_handle = Convert<Handle>(GetReg32(system, 1));
+ std::array<uint32_t, 2> process_address_gather{};
+ process_address_gather[0] = GetReg32(system, 2);
+ process_address_gather[1] = GetReg32(system, 3);
+ process_address = Convert<uint64_t>(process_address_gather);
+ size = Convert<uint32_t>(GetReg32(system, 4));
+ std::array<uint32_t, 2> device_address_gather{};
+ device_address_gather[0] = GetReg32(system, 5);
+ device_address_gather[1] = GetReg32(system, 6);
+ device_address = Convert<uint64_t>(device_address_gather);
+ option = Convert<uint32_t>(GetReg32(system, 7));
- return ResultSuccess;
+ ret = MapDeviceAddressSpaceAligned64From32(system, das_handle, process_handle, process_address, size, device_address, option);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result CreateCodeMemory32(Core::System& system, Handle* out, u32 address, u32 size) {
- return CreateCodeMemory(system, out, address, size);
+static void SvcWrap_UnmapDeviceAddressSpace64From32(Core::System& system) {
+ Result ret{};
+
+ Handle das_handle{};
+ Handle process_handle{};
+ uint64_t process_address{};
+ uint32_t size{};
+ uint64_t device_address{};
+
+ das_handle = Convert<Handle>(GetReg32(system, 0));
+ process_handle = Convert<Handle>(GetReg32(system, 1));
+ std::array<uint32_t, 2> process_address_gather{};
+ process_address_gather[0] = GetReg32(system, 2);
+ process_address_gather[1] = GetReg32(system, 3);
+ process_address = Convert<uint64_t>(process_address_gather);
+ size = Convert<uint32_t>(GetReg32(system, 4));
+ std::array<uint32_t, 2> device_address_gather{};
+ device_address_gather[0] = GetReg32(system, 5);
+ device_address_gather[1] = GetReg32(system, 6);
+ device_address = Convert<uint64_t>(device_address_gather);
+
+ ret = UnmapDeviceAddressSpace64From32(system, das_handle, process_handle, process_address, size, device_address);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
- VAddr address, size_t size, Svc::MemoryPermission perm) {
+static void SvcWrap_InvalidateProcessDataCache64From32(Core::System& system) {
+ Result ret{};
- LOG_TRACE(Kernel_SVC,
- "called, code_memory_handle=0x{:X}, operation=0x{:X}, address=0x{:X}, size=0x{:X}, "
- "permission=0x{:X}",
- code_memory_handle, operation, address, size, perm);
+ Handle process_handle{};
+ uint64_t address{};
+ uint64_t size{};
- // Validate the address / size.
- R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+ process_handle = Convert<Handle>(GetReg32(system, 0));
+ std::array<uint32_t, 2> address_gather{};
+ address_gather[0] = GetReg32(system, 2);
+ address_gather[1] = GetReg32(system, 3);
+ address = Convert<uint64_t>(address_gather);
+ std::array<uint32_t, 2> size_gather{};
+ size_gather[0] = GetReg32(system, 1);
+ size_gather[1] = GetReg32(system, 4);
+ size = Convert<uint64_t>(size_gather);
- // Get the code memory from its handle.
- KScopedAutoObject code_mem =
- system.CurrentProcess()->GetHandleTable().GetObject<KCodeMemory>(code_memory_handle);
- R_UNLESS(code_mem.IsNotNull(), ResultInvalidHandle);
+ ret = InvalidateProcessDataCache64From32(system, process_handle, address, size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+}
- // NOTE: Here, Atmosphere extends the SVC to allow code memory operations on one's own process.
- // This enables homebrew usage of these SVCs for JIT.
+static void SvcWrap_StoreProcessDataCache64From32(Core::System& system) {
+ Result ret{};
- // Perform the operation.
- switch (static_cast<CodeMemoryOperation>(operation)) {
- case CodeMemoryOperation::Map: {
- // Check that the region is in range.
- R_UNLESS(
- system.CurrentProcess()->PageTable().CanContain(address, size, KMemoryState::CodeOut),
- ResultInvalidMemoryRegion);
+ Handle process_handle{};
+ uint64_t address{};
+ uint64_t size{};
- // Check the memory permission.
- R_UNLESS(IsValidMapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+ process_handle = Convert<Handle>(GetReg32(system, 0));
+ std::array<uint32_t, 2> address_gather{};
+ address_gather[0] = GetReg32(system, 2);
+ address_gather[1] = GetReg32(system, 3);
+ address = Convert<uint64_t>(address_gather);
+ std::array<uint32_t, 2> size_gather{};
+ size_gather[0] = GetReg32(system, 1);
+ size_gather[1] = GetReg32(system, 4);
+ size = Convert<uint64_t>(size_gather);
- // Map the memory.
- R_TRY(code_mem->Map(address, size));
- } break;
- case CodeMemoryOperation::Unmap: {
- // Check that the region is in range.
- R_UNLESS(
- system.CurrentProcess()->PageTable().CanContain(address, size, KMemoryState::CodeOut),
- ResultInvalidMemoryRegion);
+ ret = StoreProcessDataCache64From32(system, process_handle, address, size);
- // Check the memory permission.
- R_UNLESS(IsValidUnmapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+}
- // Unmap the memory.
- R_TRY(code_mem->Unmap(address, size));
- } break;
- case CodeMemoryOperation::MapToOwner: {
- // Check that the region is in range.
- R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size,
- KMemoryState::GeneratedCode),
- ResultInvalidMemoryRegion);
+static void SvcWrap_FlushProcessDataCache64From32(Core::System& system) {
+ Result ret{};
- // Check the memory permission.
- R_UNLESS(IsValidMapToOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+ Handle process_handle{};
+ uint64_t address{};
+ uint64_t size{};
- // Map the memory to its owner.
- R_TRY(code_mem->MapToOwner(address, size, perm));
- } break;
- case CodeMemoryOperation::UnmapFromOwner: {
- // Check that the region is in range.
- R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size,
- KMemoryState::GeneratedCode),
- ResultInvalidMemoryRegion);
+ process_handle = Convert<Handle>(GetReg32(system, 0));
+ std::array<uint32_t, 2> address_gather{};
+ address_gather[0] = GetReg32(system, 2);
+ address_gather[1] = GetReg32(system, 3);
+ address = Convert<uint64_t>(address_gather);
+ std::array<uint32_t, 2> size_gather{};
+ size_gather[0] = GetReg32(system, 1);
+ size_gather[1] = GetReg32(system, 4);
+ size = Convert<uint64_t>(size_gather);
- // Check the memory permission.
- R_UNLESS(IsValidUnmapFromOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
-
- // Unmap the memory from its owner.
- R_TRY(code_mem->UnmapFromOwner(address, size));
- } break;
- default:
- return ResultInvalidEnumValue;
- }
+ ret = FlushProcessDataCache64From32(system, process_handle, address, size);
- return ResultSuccess;
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result ControlCodeMemory32(Core::System& system, Handle code_memory_handle, u32 operation,
- u64 address, u64 size, Svc::MemoryPermission perm) {
- return ControlCodeMemory(system, code_memory_handle, operation, address, size, perm);
+static void SvcWrap_DebugActiveProcess64From32(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+ uint64_t process_id{};
+
+ std::array<uint32_t, 2> process_id_gather{};
+ process_id_gather[0] = GetReg32(system, 2);
+ process_id_gather[1] = GetReg32(system, 3);
+ process_id = Convert<uint64_t>(process_id_gather);
+
+ ret = DebugActiveProcess64From32(system, std::addressof(out_handle), process_id);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_handle));
}
-static Result QueryProcessMemory(Core::System& system, VAddr memory_info_address,
- VAddr page_info_address, Handle process_handle, VAddr address) {
- LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
- if (process.IsNull()) {
- LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
- process_handle);
- return ResultInvalidHandle;
- }
+static void SvcWrap_BreakDebugProcess64From32(Core::System& system) {
+ Result ret{};
- auto& memory{system.Memory()};
- const auto memory_info{process->PageTable().QueryInfo(address).GetSvcMemoryInfo()};
+ Handle debug_handle{};
- memory.Write64(memory_info_address + 0x00, memory_info.base_address);
- memory.Write64(memory_info_address + 0x08, memory_info.size);
- memory.Write32(memory_info_address + 0x10, static_cast<u32>(memory_info.state) & 0xff);
- memory.Write32(memory_info_address + 0x14, static_cast<u32>(memory_info.attribute));
- memory.Write32(memory_info_address + 0x18, static_cast<u32>(memory_info.permission));
- memory.Write32(memory_info_address + 0x1c, memory_info.ipc_count);
- memory.Write32(memory_info_address + 0x20, memory_info.device_count);
- memory.Write32(memory_info_address + 0x24, 0);
+ debug_handle = Convert<Handle>(GetReg32(system, 0));
- // Page info appears to be currently unused by the kernel and is always set to zero.
- memory.Write32(page_info_address, 0);
+ ret = BreakDebugProcess64From32(system, debug_handle);
- return ResultSuccess;
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result QueryMemory(Core::System& system, VAddr memory_info_address, VAddr page_info_address,
- VAddr query_address) {
- LOG_TRACE(Kernel_SVC,
- "called, memory_info_address=0x{:016X}, page_info_address=0x{:016X}, "
- "query_address=0x{:016X}",
- memory_info_address, page_info_address, query_address);
+static void SvcWrap_TerminateDebugProcess64From32(Core::System& system) {
+ Result ret{};
+
+ Handle debug_handle{};
+
+ debug_handle = Convert<Handle>(GetReg32(system, 0));
+
+ ret = TerminateDebugProcess64From32(system, debug_handle);
- return QueryProcessMemory(system, memory_info_address, page_info_address, CurrentProcess,
- query_address);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result QueryMemory32(Core::System& system, u32 memory_info_address, u32 page_info_address,
- u32 query_address) {
- return QueryMemory(system, memory_info_address, page_info_address, query_address);
+static void SvcWrap_GetDebugEvent64From32(Core::System& system) {
+ Result ret{};
+
+ uint32_t out_info{};
+ Handle debug_handle{};
+
+ out_info = Convert<uint32_t>(GetReg32(system, 0));
+ debug_handle = Convert<Handle>(GetReg32(system, 1));
+
+ ret = GetDebugEvent64From32(system, out_info, debug_handle);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
- u64 src_address, u64 size) {
- LOG_DEBUG(Kernel_SVC,
- "called. process_handle=0x{:08X}, dst_address=0x{:016X}, "
- "src_address=0x{:016X}, size=0x{:016X}",
- process_handle, dst_address, src_address, size);
+static void SvcWrap_ContinueDebugEvent64From32(Core::System& system) {
+ Result ret{};
- if (!Common::Is4KBAligned(src_address)) {
- LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
- src_address);
- return ResultInvalidAddress;
- }
+ Handle debug_handle{};
+ uint32_t flags{};
+ uint32_t thread_ids{};
+ int32_t num_thread_ids{};
- if (!Common::Is4KBAligned(dst_address)) {
- LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
- dst_address);
- return ResultInvalidAddress;
- }
+ debug_handle = Convert<Handle>(GetReg32(system, 0));
+ flags = Convert<uint32_t>(GetReg32(system, 1));
+ thread_ids = Convert<uint32_t>(GetReg32(system, 2));
+ num_thread_ids = Convert<int32_t>(GetReg32(system, 3));
- if (size == 0 || !Common::Is4KBAligned(size)) {
- LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X})", size);
- return ResultInvalidSize;
- }
+ ret = ContinueDebugEvent64From32(system, debug_handle, flags, thread_ids, num_thread_ids);
- if (!IsValidAddressRange(dst_address, size)) {
- LOG_ERROR(Kernel_SVC,
- "Destination address range overflows the address space (dst_address=0x{:016X}, "
- "size=0x{:016X}).",
- dst_address, size);
- return ResultInvalidCurrentMemory;
- }
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+}
- if (!IsValidAddressRange(src_address, size)) {
- LOG_ERROR(Kernel_SVC,
- "Source address range overflows the address space (src_address=0x{:016X}, "
- "size=0x{:016X}).",
- src_address, size);
- return ResultInvalidCurrentMemory;
- }
+static void SvcWrap_GetProcessList64From32(Core::System& system) {
+ Result ret{};
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
- if (process.IsNull()) {
- LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
- process_handle);
- return ResultInvalidHandle;
- }
+ int32_t out_num_processes{};
+ uint32_t out_process_ids{};
+ int32_t max_out_count{};
- auto& page_table = process->PageTable();
- if (!page_table.IsInsideAddressSpace(src_address, size)) {
- LOG_ERROR(Kernel_SVC,
- "Source address range is not within the address space (src_address=0x{:016X}, "
- "size=0x{:016X}).",
- src_address, size);
- return ResultInvalidCurrentMemory;
- }
+ out_process_ids = Convert<uint32_t>(GetReg32(system, 1));
+ max_out_count = Convert<int32_t>(GetReg32(system, 2));
- if (!page_table.IsInsideASLRRegion(dst_address, size)) {
- LOG_ERROR(Kernel_SVC,
- "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
- "size=0x{:016X}).",
- dst_address, size);
- return ResultInvalidMemoryRegion;
- }
+ ret = GetProcessList64From32(system, std::addressof(out_num_processes), out_process_ids, max_out_count);
- return page_table.MapCodeMemory(dst_address, src_address, size);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_num_processes));
}
-static Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
- u64 src_address, u64 size) {
- LOG_DEBUG(Kernel_SVC,
- "called. process_handle=0x{:08X}, dst_address=0x{:016X}, src_address=0x{:016X}, "
- "size=0x{:016X}",
- process_handle, dst_address, src_address, size);
+static void SvcWrap_GetThreadList64From32(Core::System& system) {
+ Result ret{};
- if (!Common::Is4KBAligned(dst_address)) {
- LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
- dst_address);
- return ResultInvalidAddress;
- }
+ int32_t out_num_threads{};
+ uint32_t out_thread_ids{};
+ int32_t max_out_count{};
+ Handle debug_handle{};
- if (!Common::Is4KBAligned(src_address)) {
- LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
- src_address);
- return ResultInvalidAddress;
- }
+ out_thread_ids = Convert<uint32_t>(GetReg32(system, 1));
+ max_out_count = Convert<int32_t>(GetReg32(system, 2));
+ debug_handle = Convert<Handle>(GetReg32(system, 3));
- if (size == 0 || !Common::Is4KBAligned(size)) {
- LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
- return ResultInvalidSize;
- }
+ ret = GetThreadList64From32(system, std::addressof(out_num_threads), out_thread_ids, max_out_count, debug_handle);
- if (!IsValidAddressRange(dst_address, size)) {
- LOG_ERROR(Kernel_SVC,
- "Destination address range overflows the address space (dst_address=0x{:016X}, "
- "size=0x{:016X}).",
- dst_address, size);
- return ResultInvalidCurrentMemory;
- }
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_num_threads));
+}
- if (!IsValidAddressRange(src_address, size)) {
- LOG_ERROR(Kernel_SVC,
- "Source address range overflows the address space (src_address=0x{:016X}, "
- "size=0x{:016X}).",
- src_address, size);
- return ResultInvalidCurrentMemory;
- }
+static void SvcWrap_GetDebugThreadContext64From32(Core::System& system) {
+ Result ret{};
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
- if (process.IsNull()) {
- LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
- process_handle);
- return ResultInvalidHandle;
- }
+ uint32_t out_context{};
+ Handle debug_handle{};
+ uint64_t thread_id{};
+ uint32_t context_flags{};
- auto& page_table = process->PageTable();
- if (!page_table.IsInsideAddressSpace(src_address, size)) {
- LOG_ERROR(Kernel_SVC,
- "Source address range is not within the address space (src_address=0x{:016X}, "
- "size=0x{:016X}).",
- src_address, size);
- return ResultInvalidCurrentMemory;
- }
+ out_context = Convert<uint32_t>(GetReg32(system, 0));
+ debug_handle = Convert<Handle>(GetReg32(system, 1));
+ std::array<uint32_t, 2> thread_id_gather{};
+ thread_id_gather[0] = GetReg32(system, 2);
+ thread_id_gather[1] = GetReg32(system, 3);
+ thread_id = Convert<uint64_t>(thread_id_gather);
+ context_flags = Convert<uint32_t>(GetReg32(system, 4));
- if (!page_table.IsInsideASLRRegion(dst_address, size)) {
- LOG_ERROR(Kernel_SVC,
- "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
- "size=0x{:016X}).",
- dst_address, size);
- return ResultInvalidMemoryRegion;
- }
+ ret = GetDebugThreadContext64From32(system, out_context, debug_handle, thread_id, context_flags);
- return page_table.UnmapCodeMemory(dst_address, src_address, size,
- KPageTable::ICacheInvalidationStrategy::InvalidateAll);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-/// Exits the current process
-static void ExitProcess(Core::System& system) {
- auto* current_process = system.Kernel().CurrentProcess();
+static void SvcWrap_SetDebugThreadContext64From32(Core::System& system) {
+ Result ret{};
+
+ Handle debug_handle{};
+ uint64_t thread_id{};
+ uint32_t context{};
+ uint32_t context_flags{};
- LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
- ASSERT_MSG(current_process->GetState() == KProcess::State::Running,
- "Process has already exited");
+ debug_handle = Convert<Handle>(GetReg32(system, 0));
+ std::array<uint32_t, 2> thread_id_gather{};
+ thread_id_gather[0] = GetReg32(system, 2);
+ thread_id_gather[1] = GetReg32(system, 3);
+ thread_id = Convert<uint64_t>(thread_id_gather);
+ context = Convert<uint32_t>(GetReg32(system, 1));
+ context_flags = Convert<uint32_t>(GetReg32(system, 4));
- system.Exit();
+ ret = SetDebugThreadContext64From32(system, debug_handle, thread_id, context, context_flags);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static void ExitProcess32(Core::System& system) {
- ExitProcess(system);
+static void SvcWrap_QueryDebugProcessMemory64From32(Core::System& system) {
+ Result ret{};
+
+ PageInfo out_page_info{};
+ uint32_t out_memory_info{};
+ Handle process_handle{};
+ uint32_t address{};
+
+ out_memory_info = Convert<uint32_t>(GetReg32(system, 0));
+ process_handle = Convert<Handle>(GetReg32(system, 2));
+ address = Convert<uint32_t>(GetReg32(system, 3));
+
+ ret = QueryDebugProcessMemory64From32(system, out_memory_info, std::addressof(out_page_info), process_handle, address);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_page_info));
}
-namespace {
+static void SvcWrap_ReadDebugProcessMemory64From32(Core::System& system) {
+ Result ret{};
+
+ uint32_t buffer{};
+ Handle debug_handle{};
+ uint32_t address{};
+ uint32_t size{};
+
+ buffer = Convert<uint32_t>(GetReg32(system, 0));
+ debug_handle = Convert<Handle>(GetReg32(system, 1));
+ address = Convert<uint32_t>(GetReg32(system, 2));
+ size = Convert<uint32_t>(GetReg32(system, 3));
-constexpr bool IsValidVirtualCoreId(int32_t core_id) {
- return (0 <= core_id && core_id < static_cast<int32_t>(Core::Hardware::NUM_CPU_CORES));
+ ret = ReadDebugProcessMemory64From32(system, buffer, debug_handle, address, size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-} // Anonymous namespace
+static void SvcWrap_WriteDebugProcessMemory64From32(Core::System& system) {
+ Result ret{};
-/// Creates a new thread
-static Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
- VAddr stack_bottom, u32 priority, s32 core_id) {
- LOG_DEBUG(Kernel_SVC,
- "called entry_point=0x{:08X}, arg=0x{:08X}, stack_bottom=0x{:08X}, "
- "priority=0x{:08X}, core_id=0x{:08X}",
- entry_point, arg, stack_bottom, priority, core_id);
+ Handle debug_handle{};
+ uint32_t buffer{};
+ uint32_t address{};
+ uint32_t size{};
- // Adjust core id, if it's the default magic.
- auto& kernel = system.Kernel();
- auto& process = *kernel.CurrentProcess();
- if (core_id == IdealCoreUseProcessValue) {
- core_id = process.GetIdealCoreId();
- }
+ debug_handle = Convert<Handle>(GetReg32(system, 0));
+ buffer = Convert<uint32_t>(GetReg32(system, 1));
+ address = Convert<uint32_t>(GetReg32(system, 2));
+ size = Convert<uint32_t>(GetReg32(system, 3));
- // Validate arguments.
- if (!IsValidVirtualCoreId(core_id)) {
- LOG_ERROR(Kernel_SVC, "Invalid Core ID specified (id={})", core_id);
- return ResultInvalidCoreId;
- }
- if (((1ULL << core_id) & process.GetCoreMask()) == 0) {
- LOG_ERROR(Kernel_SVC, "Core ID doesn't fall within allowable cores (id={})", core_id);
- return ResultInvalidCoreId;
- }
+ ret = WriteDebugProcessMemory64From32(system, debug_handle, buffer, address, size);
- if (HighestThreadPriority > priority || priority > LowestThreadPriority) {
- LOG_ERROR(Kernel_SVC, "Invalid priority specified (priority={})", priority);
- return ResultInvalidPriority;
- }
- if (!process.CheckThreadPriority(priority)) {
- LOG_ERROR(Kernel_SVC, "Invalid allowable thread priority (priority={})", priority);
- return ResultInvalidPriority;
- }
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+}
- // Reserve a new thread from the process resource limit (waiting up to 100ms).
- KScopedResourceReservation thread_reservation(
- kernel.CurrentProcess(), LimitableResource::ThreadCountMax, 1,
- system.CoreTiming().GetGlobalTimeNs().count() + 100000000);
- if (!thread_reservation.Succeeded()) {
- LOG_ERROR(Kernel_SVC, "Could not reserve a new thread");
- return ResultLimitReached;
- }
+static void SvcWrap_SetHardwareBreakPoint64From32(Core::System& system) {
+ Result ret{};
- // Create the thread.
- KThread* thread = KThread::Create(kernel);
- if (!thread) {
- LOG_ERROR(Kernel_SVC, "Unable to create new threads. Thread creation limit reached.");
- return ResultOutOfResource;
- }
- SCOPE_EXIT({ thread->Close(); });
+ HardwareBreakPointRegisterName name{};
+ uint64_t flags{};
+ uint64_t value{};
- // Initialize the thread.
- {
- KScopedLightLock lk{process.GetStateLock()};
- R_TRY(KThread::InitializeUserThread(system, thread, entry_point, arg, stack_bottom,
- priority, core_id, &process));
- }
+ name = Convert<HardwareBreakPointRegisterName>(GetReg32(system, 0));
+ std::array<uint32_t, 2> flags_gather{};
+ flags_gather[0] = GetReg32(system, 2);
+ flags_gather[1] = GetReg32(system, 3);
+ flags = Convert<uint64_t>(flags_gather);
+ std::array<uint32_t, 2> value_gather{};
+ value_gather[0] = GetReg32(system, 1);
+ value_gather[1] = GetReg32(system, 4);
+ value = Convert<uint64_t>(value_gather);
- // Set the thread name for debugging purposes.
- thread->SetName(fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *out_handle));
+ ret = SetHardwareBreakPoint64From32(system, name, flags, value);
- // Commit the thread reservation.
- thread_reservation.Commit();
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+}
- // Register the new thread.
- KThread::Register(kernel, thread);
+static void SvcWrap_GetDebugThreadParam64From32(Core::System& system) {
+ Result ret{};
- // Add the thread to the handle table.
- R_TRY(process.GetHandleTable().Add(out_handle, thread));
+ uint64_t out_64{};
+ uint32_t out_32{};
+ Handle debug_handle{};
+ uint64_t thread_id{};
+ DebugThreadParam param{};
- return ResultSuccess;
+ debug_handle = Convert<Handle>(GetReg32(system, 2));
+ std::array<uint32_t, 2> thread_id_gather{};
+ thread_id_gather[0] = GetReg32(system, 0);
+ thread_id_gather[1] = GetReg32(system, 1);
+ thread_id = Convert<uint64_t>(thread_id_gather);
+ param = Convert<DebugThreadParam>(GetReg32(system, 3));
+
+ ret = GetDebugThreadParam64From32(system, std::addressof(out_64), std::addressof(out_32), debug_handle, thread_id, param);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ auto out_64_scatter = Convert<std::array<uint32_t, 2>>(out_64);
+ SetReg32(system, 1, out_64_scatter[0]);
+ SetReg32(system, 2, out_64_scatter[1]);
+ SetReg32(system, 3, Convert<uint32_t>(out_32));
}
-static Result CreateThread32(Core::System& system, Handle* out_handle, u32 priority,
- u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) {
- return CreateThread(system, out_handle, entry_point, arg, stack_top, priority, processor_id);
+static void SvcWrap_GetSystemInfo64From32(Core::System& system) {
+ Result ret{};
+
+ uint64_t out{};
+ SystemInfoType info_type{};
+ Handle handle{};
+ uint64_t info_subtype{};
+
+ info_type = Convert<SystemInfoType>(GetReg32(system, 1));
+ handle = Convert<Handle>(GetReg32(system, 2));
+ std::array<uint32_t, 2> info_subtype_gather{};
+ info_subtype_gather[0] = GetReg32(system, 0);
+ info_subtype_gather[1] = GetReg32(system, 3);
+ info_subtype = Convert<uint64_t>(info_subtype_gather);
+
+ ret = GetSystemInfo64From32(system, std::addressof(out), info_type, handle, info_subtype);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ auto out_scatter = Convert<std::array<uint32_t, 2>>(out);
+ SetReg32(system, 1, out_scatter[0]);
+ SetReg32(system, 2, out_scatter[1]);
}
-/// Starts the thread for the provided handle
-static Result StartThread(Core::System& system, Handle thread_handle) {
- LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
+static void SvcWrap_CreatePort64From32(Core::System& system) {
+ Result ret{};
- // Get the thread from its handle.
- KScopedAutoObject thread =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+ Handle out_server_handle{};
+ Handle out_client_handle{};
+ int32_t max_sessions{};
+ bool is_light{};
+ uint32_t name{};
- // Try to start the thread.
- R_TRY(thread->Run());
+ max_sessions = Convert<int32_t>(GetReg32(system, 2));
+ is_light = Convert<bool>(GetReg32(system, 3));
+ name = Convert<uint32_t>(GetReg32(system, 0));
- // If we succeeded, persist a reference to the thread.
- thread->Open();
- system.Kernel().RegisterInUseObject(thread.GetPointerUnsafe());
+ ret = CreatePort64From32(system, std::addressof(out_server_handle), std::addressof(out_client_handle), max_sessions, is_light, name);
- return ResultSuccess;
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_server_handle));
+ SetReg32(system, 2, Convert<uint32_t>(out_client_handle));
}
-static Result StartThread32(Core::System& system, Handle thread_handle) {
- return StartThread(system, thread_handle);
+static void SvcWrap_ManageNamedPort64From32(Core::System& system) {
+ Result ret{};
+
+ Handle out_server_handle{};
+ uint32_t name{};
+ int32_t max_sessions{};
+
+ name = Convert<uint32_t>(GetReg32(system, 1));
+ max_sessions = Convert<int32_t>(GetReg32(system, 2));
+
+ ret = ManageNamedPort64From32(system, std::addressof(out_server_handle), name, max_sessions);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_server_handle));
}
-/// Called when a thread exits
-static void ExitThread(Core::System& system) {
- LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
+static void SvcWrap_ConnectToPort64From32(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+ Handle port{};
- auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
- system.GlobalSchedulerContext().RemoveThread(current_thread);
- current_thread->Exit();
- system.Kernel().UnregisterInUseObject(current_thread);
+ port = Convert<Handle>(GetReg32(system, 1));
+
+ ret = ConnectToPort64From32(system, std::addressof(out_handle), port);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_handle));
}
-static void ExitThread32(Core::System& system) {
- ExitThread(system);
+static void SvcWrap_SetProcessMemoryPermission64From32(Core::System& system) {
+ Result ret{};
+
+ Handle process_handle{};
+ uint64_t address{};
+ uint64_t size{};
+ MemoryPermission perm{};
+
+ process_handle = Convert<Handle>(GetReg32(system, 0));
+ std::array<uint32_t, 2> address_gather{};
+ address_gather[0] = GetReg32(system, 2);
+ address_gather[1] = GetReg32(system, 3);
+ address = Convert<uint64_t>(address_gather);
+ std::array<uint32_t, 2> size_gather{};
+ size_gather[0] = GetReg32(system, 1);
+ size_gather[1] = GetReg32(system, 4);
+ size = Convert<uint64_t>(size_gather);
+ perm = Convert<MemoryPermission>(GetReg32(system, 5));
+
+ ret = SetProcessMemoryPermission64From32(system, process_handle, address, size, perm);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-/// Sleep the current thread
-static void SleepThread(Core::System& system, s64 nanoseconds) {
- auto& kernel = system.Kernel();
- const auto yield_type = static_cast<Svc::YieldType>(nanoseconds);
-
- LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
-
- // When the input tick is positive, sleep.
- if (nanoseconds > 0) {
- // Convert the timeout from nanoseconds to ticks.
- // NOTE: Nintendo does not use this conversion logic in WaitSynchronization...
-
- // Sleep.
- // NOTE: Nintendo does not check the result of this sleep.
- static_cast<void>(GetCurrentThread(kernel).Sleep(nanoseconds));
- } else if (yield_type == Svc::YieldType::WithoutCoreMigration) {
- KScheduler::YieldWithoutCoreMigration(kernel);
- } else if (yield_type == Svc::YieldType::WithCoreMigration) {
- KScheduler::YieldWithCoreMigration(kernel);
- } else if (yield_type == Svc::YieldType::ToAnyThread) {
- KScheduler::YieldToAnyThread(kernel);
- } else {
- // Nintendo does nothing at all if an otherwise invalid value is passed.
- ASSERT_MSG(false, "Unimplemented sleep yield type '{:016X}'!", nanoseconds);
- }
+static void SvcWrap_MapProcessMemory64From32(Core::System& system) {
+ Result ret{};
+
+ uint32_t dst_address{};
+ Handle process_handle{};
+ uint64_t src_address{};
+ uint32_t size{};
+
+ dst_address = Convert<uint32_t>(GetReg32(system, 0));
+ process_handle = Convert<Handle>(GetReg32(system, 1));
+ std::array<uint32_t, 2> src_address_gather{};
+ src_address_gather[0] = GetReg32(system, 2);
+ src_address_gather[1] = GetReg32(system, 3);
+ src_address = Convert<uint64_t>(src_address_gather);
+ size = Convert<uint32_t>(GetReg32(system, 4));
+
+ ret = MapProcessMemory64From32(system, dst_address, process_handle, src_address, size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanoseconds_high) {
- const auto nanoseconds = static_cast<s64>(u64{nanoseconds_low} | (u64{nanoseconds_high} << 32));
- SleepThread(system, nanoseconds);
+static void SvcWrap_UnmapProcessMemory64From32(Core::System& system) {
+ Result ret{};
+
+ uint32_t dst_address{};
+ Handle process_handle{};
+ uint64_t src_address{};
+ uint32_t size{};
+
+ dst_address = Convert<uint32_t>(GetReg32(system, 0));
+ process_handle = Convert<Handle>(GetReg32(system, 1));
+ std::array<uint32_t, 2> src_address_gather{};
+ src_address_gather[0] = GetReg32(system, 2);
+ src_address_gather[1] = GetReg32(system, 3);
+ src_address = Convert<uint64_t>(src_address_gather);
+ size = Convert<uint32_t>(GetReg32(system, 4));
+
+ ret = UnmapProcessMemory64From32(system, dst_address, process_handle, src_address, size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-/// Wait process wide key atomic
-static Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key, u32 tag,
- s64 timeout_ns) {
- LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
- cv_key, tag, timeout_ns);
+static void SvcWrap_QueryProcessMemory64From32(Core::System& system) {
+ Result ret{};
- // Validate input.
- if (IsKernelAddress(address)) {
- LOG_ERROR(Kernel_SVC, "Attempted to wait on kernel address (address={:08X})", address);
- return ResultInvalidCurrentMemory;
- }
- if (!Common::IsAligned(address, sizeof(s32))) {
- LOG_ERROR(Kernel_SVC, "Address must be 4 byte aligned (address={:08X})", address);
- return ResultInvalidAddress;
- }
+ PageInfo out_page_info{};
+ uint32_t out_memory_info{};
+ Handle process_handle{};
+ uint64_t address{};
- // Convert timeout from nanoseconds to ticks.
- s64 timeout{};
- if (timeout_ns > 0) {
- const s64 offset_tick(timeout_ns);
- if (offset_tick > 0) {
- timeout = offset_tick + 2;
- if (timeout <= 0) {
- timeout = std::numeric_limits<s64>::max();
- }
- } else {
- timeout = std::numeric_limits<s64>::max();
- }
- } else {
- timeout = timeout_ns;
- }
+ out_memory_info = Convert<uint32_t>(GetReg32(system, 0));
+ process_handle = Convert<Handle>(GetReg32(system, 2));
+ std::array<uint32_t, 2> address_gather{};
+ address_gather[0] = GetReg32(system, 1);
+ address_gather[1] = GetReg32(system, 3);
+ address = Convert<uint64_t>(address_gather);
+
+ ret = QueryProcessMemory64From32(system, out_memory_info, std::addressof(out_page_info), process_handle, address);
- // Wait on the condition variable.
- return system.Kernel().CurrentProcess()->WaitConditionVariable(
- address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout);
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_page_info));
}
-static Result WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
- u32 timeout_ns_low, u32 timeout_ns_high) {
- const auto timeout_ns = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
- return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns);
+static void SvcWrap_MapProcessCodeMemory64From32(Core::System& system) {
+ Result ret{};
+
+ Handle process_handle{};
+ uint64_t dst_address{};
+ uint64_t src_address{};
+ uint64_t size{};
+
+ process_handle = Convert<Handle>(GetReg32(system, 0));
+ std::array<uint32_t, 2> dst_address_gather{};
+ dst_address_gather[0] = GetReg32(system, 2);
+ dst_address_gather[1] = GetReg32(system, 3);
+ dst_address = Convert<uint64_t>(dst_address_gather);
+ std::array<uint32_t, 2> src_address_gather{};
+ src_address_gather[0] = GetReg32(system, 1);
+ src_address_gather[1] = GetReg32(system, 4);
+ src_address = Convert<uint64_t>(src_address_gather);
+ std::array<uint32_t, 2> size_gather{};
+ size_gather[0] = GetReg32(system, 5);
+ size_gather[1] = GetReg32(system, 6);
+ size = Convert<uint64_t>(size_gather);
+
+ ret = MapProcessCodeMemory64From32(system, process_handle, dst_address, src_address, size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-/// Signal process wide key
-static void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count) {
- LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count);
+static void SvcWrap_UnmapProcessCodeMemory64From32(Core::System& system) {
+ Result ret{};
- // Signal the condition variable.
- return system.Kernel().CurrentProcess()->SignalConditionVariable(
- Common::AlignDown(cv_key, sizeof(u32)), count);
+ Handle process_handle{};
+ uint64_t dst_address{};
+ uint64_t src_address{};
+ uint64_t size{};
+
+ process_handle = Convert<Handle>(GetReg32(system, 0));
+ std::array<uint32_t, 2> dst_address_gather{};
+ dst_address_gather[0] = GetReg32(system, 2);
+ dst_address_gather[1] = GetReg32(system, 3);
+ dst_address = Convert<uint64_t>(dst_address_gather);
+ std::array<uint32_t, 2> src_address_gather{};
+ src_address_gather[0] = GetReg32(system, 1);
+ src_address_gather[1] = GetReg32(system, 4);
+ src_address = Convert<uint64_t>(src_address_gather);
+ std::array<uint32_t, 2> size_gather{};
+ size_gather[0] = GetReg32(system, 5);
+ size_gather[1] = GetReg32(system, 6);
+ size = Convert<uint64_t>(size_gather);
+
+ ret = UnmapProcessCodeMemory64From32(system, process_handle, dst_address, src_address, size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static void SignalProcessWideKey32(Core::System& system, u32 cv_key, s32 count) {
- SignalProcessWideKey(system, cv_key, count);
+static void SvcWrap_CreateProcess64From32(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+ uint32_t parameters{};
+ uint32_t caps{};
+ int32_t num_caps{};
+
+ parameters = Convert<uint32_t>(GetReg32(system, 1));
+ caps = Convert<uint32_t>(GetReg32(system, 2));
+ num_caps = Convert<int32_t>(GetReg32(system, 3));
+
+ ret = CreateProcess64From32(system, std::addressof(out_handle), parameters, caps, num_caps);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_handle));
}
-namespace {
+static void SvcWrap_StartProcess64From32(Core::System& system) {
+ Result ret{};
-constexpr bool IsValidSignalType(Svc::SignalType type) {
- switch (type) {
- case Svc::SignalType::Signal:
- case Svc::SignalType::SignalAndIncrementIfEqual:
- case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
- return true;
- default:
- return false;
- }
+ Handle process_handle{};
+ int32_t priority{};
+ int32_t core_id{};
+ uint64_t main_thread_stack_size{};
+
+ process_handle = Convert<Handle>(GetReg32(system, 0));
+ priority = Convert<int32_t>(GetReg32(system, 1));
+ core_id = Convert<int32_t>(GetReg32(system, 2));
+ std::array<uint32_t, 2> main_thread_stack_size_gather{};
+ main_thread_stack_size_gather[0] = GetReg32(system, 3);
+ main_thread_stack_size_gather[1] = GetReg32(system, 4);
+ main_thread_stack_size = Convert<uint64_t>(main_thread_stack_size_gather);
+
+ ret = StartProcess64From32(system, process_handle, priority, core_id, main_thread_stack_size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
- switch (type) {
- case Svc::ArbitrationType::WaitIfLessThan:
- case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
- case Svc::ArbitrationType::WaitIfEqual:
- return true;
- default:
- return false;
- }
+static void SvcWrap_TerminateProcess64From32(Core::System& system) {
+ Result ret{};
+
+ Handle process_handle{};
+
+ process_handle = Convert<Handle>(GetReg32(system, 0));
+
+ ret = TerminateProcess64From32(system, process_handle);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-} // namespace
+static void SvcWrap_GetProcessInfo64From32(Core::System& system) {
+ Result ret{};
-// Wait for an address (via Address Arbiter)
-static Result WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
- s32 value, s64 timeout_ns) {
- LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
- address, arb_type, value, timeout_ns);
+ int64_t out_info{};
+ Handle process_handle{};
+ ProcessInfoType info_type{};
- // Validate input.
- if (IsKernelAddress(address)) {
- LOG_ERROR(Kernel_SVC, "Attempting to wait on kernel address (address={:08X})", address);
- return ResultInvalidCurrentMemory;
- }
- if (!Common::IsAligned(address, sizeof(s32))) {
- LOG_ERROR(Kernel_SVC, "Wait address must be 4 byte aligned (address={:08X})", address);
- return ResultInvalidAddress;
- }
- if (!IsValidArbitrationType(arb_type)) {
- LOG_ERROR(Kernel_SVC, "Invalid arbitration type specified (type={})", arb_type);
- return ResultInvalidEnumValue;
- }
+ process_handle = Convert<Handle>(GetReg32(system, 1));
+ info_type = Convert<ProcessInfoType>(GetReg32(system, 2));
- // Convert timeout from nanoseconds to ticks.
- s64 timeout{};
- if (timeout_ns > 0) {
- const s64 offset_tick(timeout_ns);
- if (offset_tick > 0) {
- timeout = offset_tick + 2;
- if (timeout <= 0) {
- timeout = std::numeric_limits<s64>::max();
- }
- } else {
- timeout = std::numeric_limits<s64>::max();
- }
- } else {
- timeout = timeout_ns;
- }
+ ret = GetProcessInfo64From32(system, std::addressof(out_info), process_handle, info_type);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ auto out_info_scatter = Convert<std::array<uint32_t, 2>>(out_info);
+ SetReg32(system, 1, out_info_scatter[0]);
+ SetReg32(system, 2, out_info_scatter[1]);
+}
+
+static void SvcWrap_CreateResourceLimit64From32(Core::System& system) {
+ Result ret{};
- return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout);
+ Handle out_handle{};
+
+ ret = CreateResourceLimit64From32(system, std::addressof(out_handle));
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
+ SetReg32(system, 1, Convert<uint32_t>(out_handle));
}
-static Result WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
- s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
- const auto timeout = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
- return WaitForAddress(system, address, arb_type, value, timeout);
+static void SvcWrap_SetResourceLimitLimitValue64From32(Core::System& system) {
+ Result ret{};
+
+ Handle resource_limit_handle{};
+ LimitableResource which{};
+ int64_t limit_value{};
+
+ resource_limit_handle = Convert<Handle>(GetReg32(system, 0));
+ which = Convert<LimitableResource>(GetReg32(system, 1));
+ std::array<uint32_t, 2> limit_value_gather{};
+ limit_value_gather[0] = GetReg32(system, 2);
+ limit_value_gather[1] = GetReg32(system, 3);
+ limit_value = Convert<int64_t>(limit_value_gather);
+
+ ret = SetResourceLimitLimitValue64From32(system, resource_limit_handle, which, limit_value);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-// Signals to an address (via Address Arbiter)
-static Result SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
- s32 value, s32 count) {
- LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
- address, signal_type, value, count);
+static void SvcWrap_MapInsecureMemory64From32(Core::System& system) {
+ Result ret{};
- // Validate input.
- if (IsKernelAddress(address)) {
- LOG_ERROR(Kernel_SVC, "Attempting to signal to a kernel address (address={:08X})", address);
- return ResultInvalidCurrentMemory;
- }
- if (!Common::IsAligned(address, sizeof(s32))) {
- LOG_ERROR(Kernel_SVC, "Signaled address must be 4 byte aligned (address={:08X})", address);
- return ResultInvalidAddress;
- }
- if (!IsValidSignalType(signal_type)) {
- LOG_ERROR(Kernel_SVC, "Invalid signal type specified (type={})", signal_type);
- return ResultInvalidEnumValue;
- }
+ uint32_t address{};
+ uint32_t size{};
+
+ address = Convert<uint32_t>(GetReg32(system, 0));
+ size = Convert<uint32_t>(GetReg32(system, 1));
- return system.Kernel().CurrentProcess()->SignalAddressArbiter(address, signal_type, value,
- count);
+ ret = MapInsecureMemory64From32(system, address, size);
+
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static void SynchronizePreemptionState(Core::System& system) {
- auto& kernel = system.Kernel();
+static void SvcWrap_UnmapInsecureMemory64From32(Core::System& system) {
+ Result ret{};
- // Lock the scheduler.
- KScopedSchedulerLock sl{kernel};
+ uint32_t address{};
+ uint32_t size{};
- // If the current thread is pinned, unpin it.
- KProcess* cur_process = system.Kernel().CurrentProcess();
- const auto core_id = GetCurrentCoreId(kernel);
+ address = Convert<uint32_t>(GetReg32(system, 0));
+ size = Convert<uint32_t>(GetReg32(system, 1));
- if (cur_process->GetPinnedThread(core_id) == GetCurrentThreadPointer(kernel)) {
- // Clear the current thread's interrupt flag.
- GetCurrentThread(kernel).ClearInterruptFlag();
+ ret = UnmapInsecureMemory64From32(system, address, size);
- // Unpin the current thread.
- cur_process->UnpinCurrentThread(core_id);
- }
+ SetReg32(system, 0, Convert<uint32_t>(ret));
}
-static Result SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
- s32 value, s32 count) {
- return SignalToAddress(system, address, signal_type, value, count);
+static void SvcWrap_SetHeapSize64(Core::System& system) {
+ Result ret{};
+
+ uint64_t out_address{};
+ uint64_t size{};
+
+ size = Convert<uint64_t>(GetReg64(system, 1));
+
+ ret = SetHeapSize64(system, std::addressof(out_address), size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_address));
}
-static void KernelDebug([[maybe_unused]] Core::System& system,
- [[maybe_unused]] u32 kernel_debug_type, [[maybe_unused]] u64 param1,
- [[maybe_unused]] u64 param2, [[maybe_unused]] u64 param3) {
- // Intentionally do nothing, as this does nothing in released kernel binaries.
+static void SvcWrap_SetMemoryPermission64(Core::System& system) {
+ Result ret{};
+
+ uint64_t address{};
+ uint64_t size{};
+ MemoryPermission perm{};
+
+ address = Convert<uint64_t>(GetReg64(system, 0));
+ size = Convert<uint64_t>(GetReg64(system, 1));
+ perm = Convert<MemoryPermission>(GetReg64(system, 2));
+
+ ret = SetMemoryPermission64(system, address, size, perm);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
}
-static void ChangeKernelTraceState([[maybe_unused]] Core::System& system,
- [[maybe_unused]] u32 trace_state) {
- // Intentionally do nothing, as this does nothing in released kernel binaries.
+static void SvcWrap_SetMemoryAttribute64(Core::System& system) {
+ Result ret{};
+
+ uint64_t address{};
+ uint64_t size{};
+ uint32_t mask{};
+ uint32_t attr{};
+
+ address = Convert<uint64_t>(GetReg64(system, 0));
+ size = Convert<uint64_t>(GetReg64(system, 1));
+ mask = Convert<uint32_t>(GetReg64(system, 2));
+ attr = Convert<uint32_t>(GetReg64(system, 3));
+
+ ret = SetMemoryAttribute64(system, address, size, mask, attr);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
}
-/// This returns the total CPU ticks elapsed since the CPU was powered-on
-static u64 GetSystemTick(Core::System& system) {
- LOG_TRACE(Kernel_SVC, "called");
+static void SvcWrap_MapMemory64(Core::System& system) {
+ Result ret{};
- auto& core_timing = system.CoreTiming();
+ uint64_t dst_address{};
+ uint64_t src_address{};
+ uint64_t size{};
- // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick)
- const u64 result{core_timing.GetClockTicks()};
+ dst_address = Convert<uint64_t>(GetReg64(system, 0));
+ src_address = Convert<uint64_t>(GetReg64(system, 1));
+ size = Convert<uint64_t>(GetReg64(system, 2));
- if (!system.Kernel().IsMulticore()) {
- core_timing.AddTicks(400U);
- }
+ ret = MapMemory64(system, dst_address, src_address, size);
- return result;
+ SetReg64(system, 0, Convert<uint64_t>(ret));
}
-static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high) {
- const auto time = GetSystemTick(system);
- *time_low = static_cast<u32>(time);
- *time_high = static_cast<u32>(time >> 32);
+static void SvcWrap_UnmapMemory64(Core::System& system) {
+ Result ret{};
+
+ uint64_t dst_address{};
+ uint64_t src_address{};
+ uint64_t size{};
+
+ dst_address = Convert<uint64_t>(GetReg64(system, 0));
+ src_address = Convert<uint64_t>(GetReg64(system, 1));
+ size = Convert<uint64_t>(GetReg64(system, 2));
+
+ ret = UnmapMemory64(system, dst_address, src_address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
}
-/// Close a handle
-static Result CloseHandle(Core::System& system, Handle handle) {
- LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle);
+static void SvcWrap_QueryMemory64(Core::System& system) {
+ Result ret{};
- // Remove the handle.
- R_UNLESS(system.Kernel().CurrentProcess()->GetHandleTable().Remove(handle),
- ResultInvalidHandle);
+ PageInfo out_page_info{};
+ uint64_t out_memory_info{};
+ uint64_t address{};
- return ResultSuccess;
+ out_memory_info = Convert<uint64_t>(GetReg64(system, 0));
+ address = Convert<uint64_t>(GetReg64(system, 2));
+
+ ret = QueryMemory64(system, out_memory_info, std::addressof(out_page_info), address);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_page_info));
}
-static Result CloseHandle32(Core::System& system, Handle handle) {
- return CloseHandle(system, handle);
+static void SvcWrap_ExitProcess64(Core::System& system) {
+ ExitProcess64(system);
}
-/// Clears the signaled state of an event or process.
-static Result ResetSignal(Core::System& system, Handle handle) {
- LOG_DEBUG(Kernel_SVC, "called handle 0x{:08X}", handle);
+static void SvcWrap_CreateThread64(Core::System& system) {
+ Result ret{};
- // Get the current handle table.
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ Handle out_handle{};
+ uint64_t func{};
+ uint64_t arg{};
+ uint64_t stack_bottom{};
+ int32_t priority{};
+ int32_t core_id{};
- // Try to reset as readable event.
- {
- KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(handle);
- if (readable_event.IsNotNull()) {
- return readable_event->Reset();
- }
- }
+ func = Convert<uint64_t>(GetReg64(system, 1));
+ arg = Convert<uint64_t>(GetReg64(system, 2));
+ stack_bottom = Convert<uint64_t>(GetReg64(system, 3));
+ priority = Convert<int32_t>(GetReg64(system, 4));
+ core_id = Convert<int32_t>(GetReg64(system, 5));
- // Try to reset as process.
- {
- KScopedAutoObject process = handle_table.GetObject<KProcess>(handle);
- if (process.IsNotNull()) {
- return process->Reset();
- }
- }
+ ret = CreateThread64(system, std::addressof(out_handle), func, arg, stack_bottom, priority, core_id);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_handle));
+}
+
+static void SvcWrap_StartThread64(Core::System& system) {
+ Result ret{};
+
+ Handle thread_handle{};
- LOG_ERROR(Kernel_SVC, "invalid handle (0x{:08X})", handle);
+ thread_handle = Convert<Handle>(GetReg64(system, 0));
- return ResultInvalidHandle;
+ ret = StartThread64(system, thread_handle);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
}
-static Result ResetSignal32(Core::System& system, Handle handle) {
- return ResetSignal(system, handle);
+static void SvcWrap_ExitThread64(Core::System& system) {
+ ExitThread64(system);
}
-namespace {
+static void SvcWrap_SleepThread64(Core::System& system) {
+ int64_t ns{};
-constexpr bool IsValidTransferMemoryPermission(MemoryPermission perm) {
- switch (perm) {
- case MemoryPermission::None:
- case MemoryPermission::Read:
- case MemoryPermission::ReadWrite:
- return true;
- default:
- return false;
- }
+ ns = Convert<int64_t>(GetReg64(system, 0));
+
+ SleepThread64(system, ns);
}
-} // Anonymous namespace
+static void SvcWrap_GetThreadPriority64(Core::System& system) {
+ Result ret{};
-/// Creates a TransferMemory object
-static Result CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size,
- MemoryPermission map_perm) {
- auto& kernel = system.Kernel();
+ int32_t out_priority{};
+ Handle thread_handle{};
+
+ thread_handle = Convert<Handle>(GetReg64(system, 1));
+
+ ret = GetThreadPriority64(system, std::addressof(out_priority), thread_handle);
- // Validate the size.
- R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_priority));
+}
+
+static void SvcWrap_SetThreadPriority64(Core::System& system) {
+ Result ret{};
+
+ Handle thread_handle{};
+ int32_t priority{};
+
+ thread_handle = Convert<Handle>(GetReg64(system, 0));
+ priority = Convert<int32_t>(GetReg64(system, 1));
- // Validate the permissions.
- R_UNLESS(IsValidTransferMemoryPermission(map_perm), ResultInvalidNewMemoryPermission);
+ ret = SetThreadPriority64(system, thread_handle, priority);
- // Get the current process and handle table.
- auto& process = *kernel.CurrentProcess();
- auto& handle_table = process.GetHandleTable();
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_GetThreadCoreMask64(Core::System& system) {
+ Result ret{};
- // Reserve a new transfer memory from the process resource limit.
- KScopedResourceReservation trmem_reservation(kernel.CurrentProcess(),
- LimitableResource::TransferMemoryCountMax);
- R_UNLESS(trmem_reservation.Succeeded(), ResultLimitReached);
+ int32_t out_core_id{};
+ uint64_t out_affinity_mask{};
+ Handle thread_handle{};
- // Create the transfer memory.
- KTransferMemory* trmem = KTransferMemory::Create(kernel);
- R_UNLESS(trmem != nullptr, ResultOutOfResource);
+ thread_handle = Convert<Handle>(GetReg64(system, 2));
- // Ensure the only reference is in the handle table when we're done.
- SCOPE_EXIT({ trmem->Close(); });
+ ret = GetThreadCoreMask64(system, std::addressof(out_core_id), std::addressof(out_affinity_mask), thread_handle);
- // Ensure that the region is in range.
- R_UNLESS(process.PageTable().Contains(address, size), ResultInvalidCurrentMemory);
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_core_id));
+ SetReg64(system, 2, Convert<uint64_t>(out_affinity_mask));
+}
- // Initialize the transfer memory.
- R_TRY(trmem->Initialize(address, size, map_perm));
+static void SvcWrap_SetThreadCoreMask64(Core::System& system) {
+ Result ret{};
- // Commit the reservation.
- trmem_reservation.Commit();
+ Handle thread_handle{};
+ int32_t core_id{};
+ uint64_t affinity_mask{};
- // Register the transfer memory.
- KTransferMemory::Register(kernel, trmem);
+ thread_handle = Convert<Handle>(GetReg64(system, 0));
+ core_id = Convert<int32_t>(GetReg64(system, 1));
+ affinity_mask = Convert<uint64_t>(GetReg64(system, 2));
- // Add the transfer memory to the handle table.
- R_TRY(handle_table.Add(out, trmem));
+ ret = SetThreadCoreMask64(system, thread_handle, core_id, affinity_mask);
- return ResultSuccess;
+ SetReg64(system, 0, Convert<uint64_t>(ret));
}
-static Result CreateTransferMemory32(Core::System& system, Handle* out, u32 address, u32 size,
- MemoryPermission map_perm) {
- return CreateTransferMemory(system, out, address, size, map_perm);
+static void SvcWrap_GetCurrentProcessorNumber64(Core::System& system) {
+ int32_t ret{};
+
+ ret = GetCurrentProcessorNumber64(system);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
}
-static Result GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
- u64* out_affinity_mask) {
- LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
+static void SvcWrap_SignalEvent64(Core::System& system) {
+ Result ret{};
+
+ Handle event_handle{};
- // Get the thread from its handle.
- KScopedAutoObject thread =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+ event_handle = Convert<Handle>(GetReg64(system, 0));
- // Get the core mask.
- R_TRY(thread->GetCoreMask(out_core_id, out_affinity_mask));
+ ret = SignalEvent64(system, event_handle);
- return ResultSuccess;
+ SetReg64(system, 0, Convert<uint64_t>(ret));
}
-static Result GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
- u32* out_affinity_mask_low, u32* out_affinity_mask_high) {
- u64 out_affinity_mask{};
- const auto result = GetThreadCoreMask(system, thread_handle, out_core_id, &out_affinity_mask);
- *out_affinity_mask_high = static_cast<u32>(out_affinity_mask >> 32);
- *out_affinity_mask_low = static_cast<u32>(out_affinity_mask);
- return result;
+static void SvcWrap_ClearEvent64(Core::System& system) {
+ Result ret{};
+
+ Handle event_handle{};
+
+ event_handle = Convert<Handle>(GetReg64(system, 0));
+
+ ret = ClearEvent64(system, event_handle);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
}
-static Result SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
- u64 affinity_mask) {
- // Determine the core id/affinity mask.
- if (core_id == IdealCoreUseProcessValue) {
- core_id = system.Kernel().CurrentProcess()->GetIdealCoreId();
- affinity_mask = (1ULL << core_id);
- } else {
- // Validate the affinity mask.
- const u64 process_core_mask = system.Kernel().CurrentProcess()->GetCoreMask();
- R_UNLESS((affinity_mask | process_core_mask) == process_core_mask, ResultInvalidCoreId);
- R_UNLESS(affinity_mask != 0, ResultInvalidCombination);
-
- // Validate the core id.
- if (IsValidVirtualCoreId(core_id)) {
- R_UNLESS(((1ULL << core_id) & affinity_mask) != 0, ResultInvalidCombination);
- } else {
- R_UNLESS(core_id == IdealCoreNoUpdate || core_id == IdealCoreDontCare,
- ResultInvalidCoreId);
- }
- }
+static void SvcWrap_MapSharedMemory64(Core::System& system) {
+ Result ret{};
+
+ Handle shmem_handle{};
+ uint64_t address{};
+ uint64_t size{};
+ MemoryPermission map_perm{};
- // Get the thread from its handle.
- KScopedAutoObject thread =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+ shmem_handle = Convert<Handle>(GetReg64(system, 0));
+ address = Convert<uint64_t>(GetReg64(system, 1));
+ size = Convert<uint64_t>(GetReg64(system, 2));
+ map_perm = Convert<MemoryPermission>(GetReg64(system, 3));
- // Set the core mask.
- R_TRY(thread->SetCoreMask(core_id, affinity_mask));
+ ret = MapSharedMemory64(system, shmem_handle, address, size, map_perm);
- return ResultSuccess;
+ SetReg64(system, 0, Convert<uint64_t>(ret));
}
-static Result SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
- u32 affinity_mask_low, u32 affinity_mask_high) {
- const auto affinity_mask = u64{affinity_mask_low} | (u64{affinity_mask_high} << 32);
- return SetThreadCoreMask(system, thread_handle, core_id, affinity_mask);
+static void SvcWrap_UnmapSharedMemory64(Core::System& system) {
+ Result ret{};
+
+ Handle shmem_handle{};
+ uint64_t address{};
+ uint64_t size{};
+
+ shmem_handle = Convert<Handle>(GetReg64(system, 0));
+ address = Convert<uint64_t>(GetReg64(system, 1));
+ size = Convert<uint64_t>(GetReg64(system, 2));
+
+ ret = UnmapSharedMemory64(system, shmem_handle, address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
}
-static Result SignalEvent(Core::System& system, Handle event_handle) {
- LOG_DEBUG(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
+static void SvcWrap_CreateTransferMemory64(Core::System& system) {
+ Result ret{};
- // Get the current handle table.
- const KHandleTable& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ Handle out_handle{};
+ uint64_t address{};
+ uint64_t size{};
+ MemoryPermission map_perm{};
- // Get the event.
- KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
- R_UNLESS(event.IsNotNull(), ResultInvalidHandle);
+ address = Convert<uint64_t>(GetReg64(system, 1));
+ size = Convert<uint64_t>(GetReg64(system, 2));
+ map_perm = Convert<MemoryPermission>(GetReg64(system, 3));
- return event->Signal();
+ ret = CreateTransferMemory64(system, std::addressof(out_handle), address, size, map_perm);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_handle));
}
-static Result SignalEvent32(Core::System& system, Handle event_handle) {
- return SignalEvent(system, event_handle);
+static void SvcWrap_CloseHandle64(Core::System& system) {
+ Result ret{};
+
+ Handle handle{};
+
+ handle = Convert<Handle>(GetReg64(system, 0));
+
+ ret = CloseHandle64(system, handle);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
}
-static Result ClearEvent(Core::System& system, Handle event_handle) {
- LOG_TRACE(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
+static void SvcWrap_ResetSignal64(Core::System& system) {
+ Result ret{};
- // Get the current handle table.
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ Handle handle{};
- // Try to clear the writable event.
- {
- KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
- if (event.IsNotNull()) {
- return event->Clear();
- }
- }
+ handle = Convert<Handle>(GetReg64(system, 0));
- // Try to clear the readable event.
- {
- KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(event_handle);
- if (readable_event.IsNotNull()) {
- return readable_event->Clear();
- }
- }
+ ret = ResetSignal64(system, handle);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
- LOG_ERROR(Kernel_SVC, "Event handle does not exist, event_handle=0x{:08X}", event_handle);
+static void SvcWrap_WaitSynchronization64(Core::System& system) {
+ Result ret{};
- return ResultInvalidHandle;
+ int32_t out_index{};
+ uint64_t handles{};
+ int32_t num_handles{};
+ int64_t timeout_ns{};
+
+ handles = Convert<uint64_t>(GetReg64(system, 1));
+ num_handles = Convert<int32_t>(GetReg64(system, 2));
+ timeout_ns = Convert<int64_t>(GetReg64(system, 3));
+
+ ret = WaitSynchronization64(system, std::addressof(out_index), handles, num_handles, timeout_ns);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_index));
}
-static Result ClearEvent32(Core::System& system, Handle event_handle) {
- return ClearEvent(system, event_handle);
+static void SvcWrap_CancelSynchronization64(Core::System& system) {
+ Result ret{};
+
+ Handle handle{};
+
+ handle = Convert<Handle>(GetReg64(system, 0));
+
+ ret = CancelSynchronization64(system, handle);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
}
-static Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
- LOG_DEBUG(Kernel_SVC, "called");
+static void SvcWrap_ArbitrateLock64(Core::System& system) {
+ Result ret{};
- // Get the kernel reference and handle table.
- auto& kernel = system.Kernel();
- auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
+ Handle thread_handle{};
+ uint64_t address{};
+ uint32_t tag{};
- // Reserve a new event from the process resource limit
- KScopedResourceReservation event_reservation(kernel.CurrentProcess(),
- LimitableResource::EventCountMax);
- R_UNLESS(event_reservation.Succeeded(), ResultLimitReached);
+ thread_handle = Convert<Handle>(GetReg64(system, 0));
+ address = Convert<uint64_t>(GetReg64(system, 1));
+ tag = Convert<uint32_t>(GetReg64(system, 2));
- // Create a new event.
- KEvent* event = KEvent::Create(kernel);
- R_UNLESS(event != nullptr, ResultOutOfResource);
+ ret = ArbitrateLock64(system, thread_handle, address, tag);
- // Initialize the event.
- event->Initialize(kernel.CurrentProcess());
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_ArbitrateUnlock64(Core::System& system) {
+ Result ret{};
- // Commit the thread reservation.
- event_reservation.Commit();
+ uint64_t address{};
- // Ensure that we clean up the event (and its only references are handle table) on function end.
- SCOPE_EXIT({
- event->GetReadableEvent().Close();
- event->Close();
- });
+ address = Convert<uint64_t>(GetReg64(system, 0));
- // Register the event.
- KEvent::Register(kernel, event);
+ ret = ArbitrateUnlock64(system, address);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
- // Add the event to the handle table.
- R_TRY(handle_table.Add(out_write, event));
+static void SvcWrap_WaitProcessWideKeyAtomic64(Core::System& system) {
+ Result ret{};
- // Ensure that we maintaing a clean handle state on exit.
- auto handle_guard = SCOPE_GUARD({ handle_table.Remove(*out_write); });
+ uint64_t address{};
+ uint64_t cv_key{};
+ uint32_t tag{};
+ int64_t timeout_ns{};
- // Add the readable event to the handle table.
- R_TRY(handle_table.Add(out_read, std::addressof(event->GetReadableEvent())));
+ address = Convert<uint64_t>(GetReg64(system, 0));
+ cv_key = Convert<uint64_t>(GetReg64(system, 1));
+ tag = Convert<uint32_t>(GetReg64(system, 2));
+ timeout_ns = Convert<int64_t>(GetReg64(system, 3));
- // We succeeded.
- handle_guard.Cancel();
- return ResultSuccess;
+ ret = WaitProcessWideKeyAtomic64(system, address, cv_key, tag, timeout_ns);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
}
-static Result CreateEvent32(Core::System& system, Handle* out_write, Handle* out_read) {
- return CreateEvent(system, out_write, out_read);
+static void SvcWrap_SignalProcessWideKey64(Core::System& system) {
+ uint64_t cv_key{};
+ int32_t count{};
+
+ cv_key = Convert<uint64_t>(GetReg64(system, 0));
+ count = Convert<int32_t>(GetReg64(system, 1));
+
+ SignalProcessWideKey64(system, cv_key, count);
}
-static Result GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
- LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type);
+static void SvcWrap_GetSystemTick64(Core::System& system) {
+ int64_t ret{};
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
- if (process.IsNull()) {
- LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
- process_handle);
- return ResultInvalidHandle;
- }
+ ret = GetSystemTick64(system);
- const auto info_type = static_cast<ProcessInfoType>(type);
- if (info_type != ProcessInfoType::ProcessState) {
- LOG_ERROR(Kernel_SVC, "Expected info_type to be ProcessState but got {} instead", type);
- return ResultInvalidEnumValue;
- }
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_ConnectToNamedPort64(Core::System& system) {
+ Result ret{};
- *out = static_cast<u64>(process->GetState());
- return ResultSuccess;
+ Handle out_handle{};
+ uint64_t name{};
+
+ name = Convert<uint64_t>(GetReg64(system, 1));
+
+ ret = ConnectToNamedPort64(system, std::addressof(out_handle), name);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_handle));
}
-static Result CreateResourceLimit(Core::System& system, Handle* out_handle) {
- LOG_DEBUG(Kernel_SVC, "called");
+static void SvcWrap_SendSyncRequest64(Core::System& system) {
+ Result ret{};
- // Create a new resource limit.
- auto& kernel = system.Kernel();
- KResourceLimit* resource_limit = KResourceLimit::Create(kernel);
- R_UNLESS(resource_limit != nullptr, ResultOutOfResource);
+ Handle session_handle{};
- // Ensure we don't leak a reference to the limit.
- SCOPE_EXIT({ resource_limit->Close(); });
+ session_handle = Convert<Handle>(GetReg64(system, 0));
- // Initialize the resource limit.
- resource_limit->Initialize(&system.CoreTiming());
+ ret = SendSyncRequest64(system, session_handle);
- // Register the limit.
- KResourceLimit::Register(kernel, resource_limit);
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_SendSyncRequestWithUserBuffer64(Core::System& system) {
+ Result ret{};
+
+ uint64_t message_buffer{};
+ uint64_t message_buffer_size{};
+ Handle session_handle{};
+
+ message_buffer = Convert<uint64_t>(GetReg64(system, 0));
+ message_buffer_size = Convert<uint64_t>(GetReg64(system, 1));
+ session_handle = Convert<Handle>(GetReg64(system, 2));
- // Add the limit to the handle table.
- R_TRY(kernel.CurrentProcess()->GetHandleTable().Add(out_handle, resource_limit));
+ ret = SendSyncRequestWithUserBuffer64(system, message_buffer, message_buffer_size, session_handle);
- return ResultSuccess;
+ SetReg64(system, 0, Convert<uint64_t>(ret));
}
-static Result GetResourceLimitLimitValue(Core::System& system, u64* out_limit_value,
- Handle resource_limit_handle, LimitableResource which) {
- LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
- which);
+static void SvcWrap_SendAsyncRequestWithUserBuffer64(Core::System& system) {
+ Result ret{};
- // Validate the resource.
- R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue);
+ Handle out_event_handle{};
+ uint64_t message_buffer{};
+ uint64_t message_buffer_size{};
+ Handle session_handle{};
- // Get the resource limit.
- auto& kernel = system.Kernel();
- KScopedAutoObject resource_limit =
- kernel.CurrentProcess()->GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle);
- R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle);
+ message_buffer = Convert<uint64_t>(GetReg64(system, 1));
+ message_buffer_size = Convert<uint64_t>(GetReg64(system, 2));
+ session_handle = Convert<Handle>(GetReg64(system, 3));
- // Get the limit value.
- *out_limit_value = resource_limit->GetLimitValue(which);
+ ret = SendAsyncRequestWithUserBuffer64(system, std::addressof(out_event_handle), message_buffer, message_buffer_size, session_handle);
- return ResultSuccess;
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_event_handle));
}
-static Result GetResourceLimitCurrentValue(Core::System& system, u64* out_current_value,
- Handle resource_limit_handle, LimitableResource which) {
- LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
- which);
+static void SvcWrap_GetProcessId64(Core::System& system) {
+ Result ret{};
- // Validate the resource.
- R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue);
+ uint64_t out_process_id{};
+ Handle process_handle{};
- // Get the resource limit.
- auto& kernel = system.Kernel();
- KScopedAutoObject resource_limit =
- kernel.CurrentProcess()->GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle);
- R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle);
+ process_handle = Convert<Handle>(GetReg64(system, 1));
- // Get the current value.
- *out_current_value = resource_limit->GetCurrentValue(which);
+ ret = GetProcessId64(system, std::addressof(out_process_id), process_handle);
- return ResultSuccess;
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_process_id));
}
-static Result SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle,
- LimitableResource which, u64 limit_value) {
- LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}, limit_value={}",
- resource_limit_handle, which, limit_value);
+static void SvcWrap_GetThreadId64(Core::System& system) {
+ Result ret{};
- // Validate the resource.
- R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue);
+ uint64_t out_thread_id{};
+ Handle thread_handle{};
- // Get the resource limit.
- auto& kernel = system.Kernel();
- KScopedAutoObject resource_limit =
- kernel.CurrentProcess()->GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle);
- R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle);
+ thread_handle = Convert<Handle>(GetReg64(system, 1));
- // Set the limit value.
- R_TRY(resource_limit->SetLimitValue(which, limit_value));
+ ret = GetThreadId64(system, std::addressof(out_thread_id), thread_handle);
- return ResultSuccess;
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_thread_id));
}
-static Result GetProcessList(Core::System& system, u32* out_num_processes, VAddr out_process_ids,
- u32 out_process_ids_size) {
- LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}",
- out_process_ids, out_process_ids_size);
+static void SvcWrap_Break64(Core::System& system) {
+ BreakReason break_reason{};
+ uint64_t arg{};
+ uint64_t size{};
- // If the supplied size is negative or greater than INT32_MAX / sizeof(u64), bail.
- if ((out_process_ids_size & 0xF0000000) != 0) {
- LOG_ERROR(Kernel_SVC,
- "Supplied size outside [0, 0x0FFFFFFF] range. out_process_ids_size={}",
- out_process_ids_size);
- return ResultOutOfRange;
- }
+ break_reason = Convert<BreakReason>(GetReg64(system, 0));
+ arg = Convert<uint64_t>(GetReg64(system, 1));
+ size = Convert<uint64_t>(GetReg64(system, 2));
- const auto& kernel = system.Kernel();
- const auto total_copy_size = out_process_ids_size * sizeof(u64);
+ Break64(system, break_reason, arg, size);
+}
- if (out_process_ids_size > 0 && !kernel.CurrentProcess()->PageTable().IsInsideAddressSpace(
- out_process_ids, total_copy_size)) {
- LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
- out_process_ids, out_process_ids + total_copy_size);
- return ResultInvalidCurrentMemory;
- }
+static void SvcWrap_OutputDebugString64(Core::System& system) {
+ Result ret{};
- auto& memory = system.Memory();
- const auto& process_list = kernel.GetProcessList();
- const auto num_processes = process_list.size();
- const auto copy_amount = std::min(std::size_t{out_process_ids_size}, num_processes);
+ uint64_t debug_str{};
+ uint64_t len{};
- for (std::size_t i = 0; i < copy_amount; ++i) {
- memory.Write64(out_process_ids, process_list[i]->GetProcessID());
- out_process_ids += sizeof(u64);
- }
+ debug_str = Convert<uint64_t>(GetReg64(system, 0));
+ len = Convert<uint64_t>(GetReg64(system, 1));
+
+ ret = OutputDebugString64(system, debug_str, len);
- *out_num_processes = static_cast<u32>(num_processes);
- return ResultSuccess;
+ SetReg64(system, 0, Convert<uint64_t>(ret));
}
-static Result GetThreadList(Core::System& system, u32* out_num_threads, VAddr out_thread_ids,
- u32 out_thread_ids_size, Handle debug_handle) {
- // TODO: Handle this case when debug events are supported.
- UNIMPLEMENTED_IF(debug_handle != InvalidHandle);
+static void SvcWrap_ReturnFromException64(Core::System& system) {
+ Result result{};
- LOG_DEBUG(Kernel_SVC, "called. out_thread_ids=0x{:016X}, out_thread_ids_size={}",
- out_thread_ids, out_thread_ids_size);
+ result = Convert<Result>(GetReg64(system, 0));
- // If the size is negative or larger than INT32_MAX / sizeof(u64)
- if ((out_thread_ids_size & 0xF0000000) != 0) {
- LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. size={}",
- out_thread_ids_size);
- return ResultOutOfRange;
- }
+ ReturnFromException64(system, result);
+}
- auto* const current_process = system.Kernel().CurrentProcess();
- const auto total_copy_size = out_thread_ids_size * sizeof(u64);
+static void SvcWrap_GetInfo64(Core::System& system) {
+ Result ret{};
- if (out_thread_ids_size > 0 &&
- !current_process->PageTable().IsInsideAddressSpace(out_thread_ids, total_copy_size)) {
- LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
- out_thread_ids, out_thread_ids + total_copy_size);
- return ResultInvalidCurrentMemory;
- }
+ uint64_t out{};
+ InfoType info_type{};
+ Handle handle{};
+ uint64_t info_subtype{};
- auto& memory = system.Memory();
- const auto& thread_list = current_process->GetThreadList();
- const auto num_threads = thread_list.size();
- const auto copy_amount = std::min(std::size_t{out_thread_ids_size}, num_threads);
+ info_type = Convert<InfoType>(GetReg64(system, 1));
+ handle = Convert<Handle>(GetReg64(system, 2));
+ info_subtype = Convert<uint64_t>(GetReg64(system, 3));
- auto list_iter = thread_list.cbegin();
- for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
- memory.Write64(out_thread_ids, (*list_iter)->GetThreadID());
- out_thread_ids += sizeof(u64);
- }
+ ret = GetInfo64(system, std::addressof(out), info_type, handle, info_subtype);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out));
+}
+
+static void SvcWrap_FlushEntireDataCache64(Core::System& system) {
+ FlushEntireDataCache64(system);
+}
+
+static void SvcWrap_FlushDataCache64(Core::System& system) {
+ Result ret{};
+
+ uint64_t address{};
+ uint64_t size{};
+
+ address = Convert<uint64_t>(GetReg64(system, 0));
+ size = Convert<uint64_t>(GetReg64(system, 1));
+
+ ret = FlushDataCache64(system, address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_MapPhysicalMemory64(Core::System& system) {
+ Result ret{};
+
+ uint64_t address{};
+ uint64_t size{};
+
+ address = Convert<uint64_t>(GetReg64(system, 0));
+ size = Convert<uint64_t>(GetReg64(system, 1));
+
+ ret = MapPhysicalMemory64(system, address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_UnmapPhysicalMemory64(Core::System& system) {
+ Result ret{};
+
+ uint64_t address{};
+ uint64_t size{};
+
+ address = Convert<uint64_t>(GetReg64(system, 0));
+ size = Convert<uint64_t>(GetReg64(system, 1));
+
+ ret = UnmapPhysicalMemory64(system, address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_GetDebugFutureThreadInfo64(Core::System& system) {
+ Result ret{};
+
+ lp64::LastThreadContext out_context{};
+ uint64_t out_thread_id{};
+ Handle debug_handle{};
+ int64_t ns{};
+
+ debug_handle = Convert<Handle>(GetReg64(system, 2));
+ ns = Convert<int64_t>(GetReg64(system, 3));
+
+ ret = GetDebugFutureThreadInfo64(system, std::addressof(out_context), std::addressof(out_thread_id), debug_handle, ns);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ auto out_context_scatter = Convert<std::array<uint64_t, 4>>(out_context);
+ SetReg64(system, 1, out_context_scatter[0]);
+ SetReg64(system, 2, out_context_scatter[1]);
+ SetReg64(system, 3, out_context_scatter[2]);
+ SetReg64(system, 4, out_context_scatter[3]);
+ SetReg64(system, 5, Convert<uint64_t>(out_thread_id));
+}
+
+static void SvcWrap_GetLastThreadInfo64(Core::System& system) {
+ Result ret{};
+
+ lp64::LastThreadContext out_context{};
+ uint64_t out_tls_address{};
+ uint32_t out_flags{};
+
+ ret = GetLastThreadInfo64(system, std::addressof(out_context), std::addressof(out_tls_address), std::addressof(out_flags));
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ auto out_context_scatter = Convert<std::array<uint64_t, 4>>(out_context);
+ SetReg64(system, 1, out_context_scatter[0]);
+ SetReg64(system, 2, out_context_scatter[1]);
+ SetReg64(system, 3, out_context_scatter[2]);
+ SetReg64(system, 4, out_context_scatter[3]);
+ SetReg64(system, 5, Convert<uint64_t>(out_tls_address));
+ SetReg64(system, 6, Convert<uint64_t>(out_flags));
+}
+
+static void SvcWrap_GetResourceLimitLimitValue64(Core::System& system) {
+ Result ret{};
+
+ int64_t out_limit_value{};
+ Handle resource_limit_handle{};
+ LimitableResource which{};
+
+ resource_limit_handle = Convert<Handle>(GetReg64(system, 1));
+ which = Convert<LimitableResource>(GetReg64(system, 2));
+
+ ret = GetResourceLimitLimitValue64(system, std::addressof(out_limit_value), resource_limit_handle, which);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_limit_value));
+}
+
+static void SvcWrap_GetResourceLimitCurrentValue64(Core::System& system) {
+ Result ret{};
+
+ int64_t out_current_value{};
+ Handle resource_limit_handle{};
+ LimitableResource which{};
+
+ resource_limit_handle = Convert<Handle>(GetReg64(system, 1));
+ which = Convert<LimitableResource>(GetReg64(system, 2));
+
+ ret = GetResourceLimitCurrentValue64(system, std::addressof(out_current_value), resource_limit_handle, which);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_current_value));
+}
+
+static void SvcWrap_SetThreadActivity64(Core::System& system) {
+ Result ret{};
+
+ Handle thread_handle{};
+ ThreadActivity thread_activity{};
+
+ thread_handle = Convert<Handle>(GetReg64(system, 0));
+ thread_activity = Convert<ThreadActivity>(GetReg64(system, 1));
+
+ ret = SetThreadActivity64(system, thread_handle, thread_activity);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_GetThreadContext364(Core::System& system) {
+ Result ret{};
+
+ uint64_t out_context{};
+ Handle thread_handle{};
+
+ out_context = Convert<uint64_t>(GetReg64(system, 0));
+ thread_handle = Convert<Handle>(GetReg64(system, 1));
+
+ ret = GetThreadContext364(system, out_context, thread_handle);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_WaitForAddress64(Core::System& system) {
+ Result ret{};
+
+ uint64_t address{};
+ ArbitrationType arb_type{};
+ int32_t value{};
+ int64_t timeout_ns{};
+
+ address = Convert<uint64_t>(GetReg64(system, 0));
+ arb_type = Convert<ArbitrationType>(GetReg64(system, 1));
+ value = Convert<int32_t>(GetReg64(system, 2));
+ timeout_ns = Convert<int64_t>(GetReg64(system, 3));
+
+ ret = WaitForAddress64(system, address, arb_type, value, timeout_ns);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_SignalToAddress64(Core::System& system) {
+ Result ret{};
+
+ uint64_t address{};
+ SignalType signal_type{};
+ int32_t value{};
+ int32_t count{};
+
+ address = Convert<uint64_t>(GetReg64(system, 0));
+ signal_type = Convert<SignalType>(GetReg64(system, 1));
+ value = Convert<int32_t>(GetReg64(system, 2));
+ count = Convert<int32_t>(GetReg64(system, 3));
+
+ ret = SignalToAddress64(system, address, signal_type, value, count);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_SynchronizePreemptionState64(Core::System& system) {
+ SynchronizePreemptionState64(system);
+}
+
+static void SvcWrap_GetResourceLimitPeakValue64(Core::System& system) {
+ Result ret{};
+
+ int64_t out_peak_value{};
+ Handle resource_limit_handle{};
+ LimitableResource which{};
+
+ resource_limit_handle = Convert<Handle>(GetReg64(system, 1));
+ which = Convert<LimitableResource>(GetReg64(system, 2));
+
+ ret = GetResourceLimitPeakValue64(system, std::addressof(out_peak_value), resource_limit_handle, which);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_peak_value));
+}
+
+static void SvcWrap_CreateIoPool64(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+ IoPoolType which{};
+
+ which = Convert<IoPoolType>(GetReg64(system, 1));
+
+ ret = CreateIoPool64(system, std::addressof(out_handle), which);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_handle));
+}
+
+static void SvcWrap_CreateIoRegion64(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+ Handle io_pool{};
+ uint64_t physical_address{};
+ uint64_t size{};
+ MemoryMapping mapping{};
+ MemoryPermission perm{};
+
+ io_pool = Convert<Handle>(GetReg64(system, 1));
+ physical_address = Convert<uint64_t>(GetReg64(system, 2));
+ size = Convert<uint64_t>(GetReg64(system, 3));
+ mapping = Convert<MemoryMapping>(GetReg64(system, 4));
+ perm = Convert<MemoryPermission>(GetReg64(system, 5));
+
+ ret = CreateIoRegion64(system, std::addressof(out_handle), io_pool, physical_address, size, mapping, perm);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_handle));
+}
+
+static void SvcWrap_KernelDebug64(Core::System& system) {
+ KernelDebugType kern_debug_type{};
+ uint64_t arg0{};
+ uint64_t arg1{};
+ uint64_t arg2{};
+
+ kern_debug_type = Convert<KernelDebugType>(GetReg64(system, 0));
+ arg0 = Convert<uint64_t>(GetReg64(system, 1));
+ arg1 = Convert<uint64_t>(GetReg64(system, 2));
+ arg2 = Convert<uint64_t>(GetReg64(system, 3));
+
+ KernelDebug64(system, kern_debug_type, arg0, arg1, arg2);
+}
+
+static void SvcWrap_ChangeKernelTraceState64(Core::System& system) {
+ KernelTraceState kern_trace_state{};
+
+ kern_trace_state = Convert<KernelTraceState>(GetReg64(system, 0));
+
+ ChangeKernelTraceState64(system, kern_trace_state);
+}
- *out_num_threads = static_cast<u32>(num_threads);
- return ResultSuccess;
-}
-
-static Result FlushProcessDataCache32(Core::System& system, Handle process_handle, u64 address,
- u64 size) {
- // Validate address/size.
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS(address == static_cast<uintptr_t>(address), ResultInvalidCurrentMemory);
- R_UNLESS(size == static_cast<size_t>(size), ResultInvalidCurrentMemory);
-
- // Get the process from its handle.
- KScopedAutoObject process =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KProcess>(process_handle);
- R_UNLESS(process.IsNotNull(), ResultInvalidHandle);
-
- // Verify the region is within range.
- auto& page_table = process->PageTable();
- R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
-
- // Perform the operation.
- R_RETURN(system.Memory().FlushDataCache(*process, address, size));
-}
-
-namespace {
-struct FunctionDef {
- using Func = void(Core::System&);
-
- u32 id;
- Func* func;
- const char* name;
-};
-} // namespace
-
-static const FunctionDef SVC_Table_32[] = {
- {0x00, nullptr, "Unknown0"},
- {0x01, SvcWrap32<SetHeapSize32>, "SetHeapSize32"},
- {0x02, nullptr, "SetMemoryPermission32"},
- {0x03, SvcWrap32<SetMemoryAttribute32>, "SetMemoryAttribute32"},
- {0x04, SvcWrap32<MapMemory32>, "MapMemory32"},
- {0x05, SvcWrap32<UnmapMemory32>, "UnmapMemory32"},
- {0x06, SvcWrap32<QueryMemory32>, "QueryMemory32"},
- {0x07, SvcWrap32<ExitProcess32>, "ExitProcess32"},
- {0x08, SvcWrap32<CreateThread32>, "CreateThread32"},
- {0x09, SvcWrap32<StartThread32>, "StartThread32"},
- {0x0a, SvcWrap32<ExitThread32>, "ExitThread32"},
- {0x0b, SvcWrap32<SleepThread32>, "SleepThread32"},
- {0x0c, SvcWrap32<GetThreadPriority32>, "GetThreadPriority32"},
- {0x0d, SvcWrap32<SetThreadPriority32>, "SetThreadPriority32"},
- {0x0e, SvcWrap32<GetThreadCoreMask32>, "GetThreadCoreMask32"},
- {0x0f, SvcWrap32<SetThreadCoreMask32>, "SetThreadCoreMask32"},
- {0x10, SvcWrap32<GetCurrentProcessorNumber32>, "GetCurrentProcessorNumber32"},
- {0x11, SvcWrap32<SignalEvent32>, "SignalEvent32"},
- {0x12, SvcWrap32<ClearEvent32>, "ClearEvent32"},
- {0x13, SvcWrap32<MapSharedMemory32>, "MapSharedMemory32"},
- {0x14, SvcWrap32<UnmapSharedMemory32>, "UnmapSharedMemory32"},
- {0x15, SvcWrap32<CreateTransferMemory32>, "CreateTransferMemory32"},
- {0x16, SvcWrap32<CloseHandle32>, "CloseHandle32"},
- {0x17, SvcWrap32<ResetSignal32>, "ResetSignal32"},
- {0x18, SvcWrap32<WaitSynchronization32>, "WaitSynchronization32"},
- {0x19, SvcWrap32<CancelSynchronization32>, "CancelSynchronization32"},
- {0x1a, SvcWrap32<ArbitrateLock32>, "ArbitrateLock32"},
- {0x1b, SvcWrap32<ArbitrateUnlock32>, "ArbitrateUnlock32"},
- {0x1c, SvcWrap32<WaitProcessWideKeyAtomic32>, "WaitProcessWideKeyAtomic32"},
- {0x1d, SvcWrap32<SignalProcessWideKey32>, "SignalProcessWideKey32"},
- {0x1e, SvcWrap32<GetSystemTick32>, "GetSystemTick32"},
- {0x1f, SvcWrap32<ConnectToNamedPort32>, "ConnectToNamedPort32"},
- {0x20, nullptr, "SendSyncRequestLight32"},
- {0x21, SvcWrap32<SendSyncRequest32>, "SendSyncRequest32"},
- {0x22, nullptr, "SendSyncRequestWithUserBuffer32"},
- {0x23, nullptr, "SendAsyncRequestWithUserBuffer32"},
- {0x24, SvcWrap32<GetProcessId32>, "GetProcessId32"},
- {0x25, SvcWrap32<GetThreadId32>, "GetThreadId32"},
- {0x26, SvcWrap32<Break32>, "Break32"},
- {0x27, SvcWrap32<OutputDebugString32>, "OutputDebugString32"},
- {0x28, nullptr, "ReturnFromException32"},
- {0x29, SvcWrap32<GetInfo32>, "GetInfo32"},
- {0x2a, nullptr, "FlushEntireDataCache32"},
- {0x2b, nullptr, "FlushDataCache32"},
- {0x2c, SvcWrap32<MapPhysicalMemory32>, "MapPhysicalMemory32"},
- {0x2d, SvcWrap32<UnmapPhysicalMemory32>, "UnmapPhysicalMemory32"},
- {0x2e, nullptr, "GetDebugFutureThreadInfo32"},
- {0x2f, nullptr, "GetLastThreadInfo32"},
- {0x30, nullptr, "GetResourceLimitLimitValue32"},
- {0x31, nullptr, "GetResourceLimitCurrentValue32"},
- {0x32, SvcWrap32<SetThreadActivity32>, "SetThreadActivity32"},
- {0x33, SvcWrap32<GetThreadContext32>, "GetThreadContext32"},
- {0x34, SvcWrap32<WaitForAddress32>, "WaitForAddress32"},
- {0x35, SvcWrap32<SignalToAddress32>, "SignalToAddress32"},
- {0x36, SvcWrap32<SynchronizePreemptionState>, "SynchronizePreemptionState32"},
- {0x37, nullptr, "GetResourceLimitPeakValue32"},
- {0x38, nullptr, "Unknown38"},
- {0x39, nullptr, "CreateIoPool32"},
- {0x3a, nullptr, "CreateIoRegion32"},
- {0x3b, nullptr, "Unknown3b"},
- {0x3c, nullptr, "KernelDebug32"},
- {0x3d, nullptr, "ChangeKernelTraceState32"},
- {0x3e, nullptr, "Unknown3e"},
- {0x3f, nullptr, "Unknown3f"},
- {0x40, nullptr, "CreateSession32"},
- {0x41, nullptr, "AcceptSession32"},
- {0x42, nullptr, "ReplyAndReceiveLight32"},
- {0x43, nullptr, "ReplyAndReceive32"},
- {0x44, nullptr, "ReplyAndReceiveWithUserBuffer32"},
- {0x45, SvcWrap32<CreateEvent32>, "CreateEvent32"},
- {0x46, nullptr, "MapIoRegion32"},
- {0x47, nullptr, "UnmapIoRegion32"},
- {0x48, nullptr, "MapPhysicalMemoryUnsafe32"},
- {0x49, nullptr, "UnmapPhysicalMemoryUnsafe32"},
- {0x4a, nullptr, "SetUnsafeLimit32"},
- {0x4b, SvcWrap32<CreateCodeMemory32>, "CreateCodeMemory32"},
- {0x4c, SvcWrap32<ControlCodeMemory32>, "ControlCodeMemory32"},
- {0x4d, nullptr, "SleepSystem32"},
- {0x4e, nullptr, "ReadWriteRegister32"},
- {0x4f, nullptr, "SetProcessActivity32"},
- {0x50, nullptr, "CreateSharedMemory32"},
- {0x51, nullptr, "MapTransferMemory32"},
- {0x52, nullptr, "UnmapTransferMemory32"},
- {0x53, nullptr, "CreateInterruptEvent32"},
- {0x54, nullptr, "QueryPhysicalAddress32"},
- {0x55, nullptr, "QueryIoMapping32"},
- {0x56, nullptr, "CreateDeviceAddressSpace32"},
- {0x57, nullptr, "AttachDeviceAddressSpace32"},
- {0x58, nullptr, "DetachDeviceAddressSpace32"},
- {0x59, nullptr, "MapDeviceAddressSpaceByForce32"},
- {0x5a, nullptr, "MapDeviceAddressSpaceAligned32"},
- {0x5b, nullptr, "MapDeviceAddressSpace32"},
- {0x5c, nullptr, "UnmapDeviceAddressSpace32"},
- {0x5d, nullptr, "InvalidateProcessDataCache32"},
- {0x5e, nullptr, "StoreProcessDataCache32"},
- {0x5F, SvcWrap32<FlushProcessDataCache32>, "FlushProcessDataCache32"},
- {0x60, nullptr, "StoreProcessDataCache32"},
- {0x61, nullptr, "BreakDebugProcess32"},
- {0x62, nullptr, "TerminateDebugProcess32"},
- {0x63, nullptr, "GetDebugEvent32"},
- {0x64, nullptr, "ContinueDebugEvent32"},
- {0x65, nullptr, "GetProcessList32"},
- {0x66, nullptr, "GetThreadList"},
- {0x67, nullptr, "GetDebugThreadContext32"},
- {0x68, nullptr, "SetDebugThreadContext32"},
- {0x69, nullptr, "QueryDebugProcessMemory32"},
- {0x6A, nullptr, "ReadDebugProcessMemory32"},
- {0x6B, nullptr, "WriteDebugProcessMemory32"},
- {0x6C, nullptr, "SetHardwareBreakPoint32"},
- {0x6D, nullptr, "GetDebugThreadParam32"},
- {0x6E, nullptr, "Unknown6E"},
- {0x6f, nullptr, "GetSystemInfo32"},
- {0x70, nullptr, "CreatePort32"},
- {0x71, nullptr, "ManageNamedPort32"},
- {0x72, nullptr, "ConnectToPort32"},
- {0x73, nullptr, "SetProcessMemoryPermission32"},
- {0x74, nullptr, "MapProcessMemory32"},
- {0x75, nullptr, "UnmapProcessMemory32"},
- {0x76, nullptr, "QueryProcessMemory32"},
- {0x77, nullptr, "MapProcessCodeMemory32"},
- {0x78, nullptr, "UnmapProcessCodeMemory32"},
- {0x79, nullptr, "CreateProcess32"},
- {0x7A, nullptr, "StartProcess32"},
- {0x7B, nullptr, "TerminateProcess32"},
- {0x7C, nullptr, "GetProcessInfo32"},
- {0x7D, nullptr, "CreateResourceLimit32"},
- {0x7E, nullptr, "SetResourceLimitLimitValue32"},
- {0x7F, nullptr, "CallSecureMonitor32"},
- {0x80, nullptr, "Unknown"},
- {0x81, nullptr, "Unknown"},
- {0x82, nullptr, "Unknown"},
- {0x83, nullptr, "Unknown"},
- {0x84, nullptr, "Unknown"},
- {0x85, nullptr, "Unknown"},
- {0x86, nullptr, "Unknown"},
- {0x87, nullptr, "Unknown"},
- {0x88, nullptr, "Unknown"},
- {0x89, nullptr, "Unknown"},
- {0x8A, nullptr, "Unknown"},
- {0x8B, nullptr, "Unknown"},
- {0x8C, nullptr, "Unknown"},
- {0x8D, nullptr, "Unknown"},
- {0x8E, nullptr, "Unknown"},
- {0x8F, nullptr, "Unknown"},
- {0x90, nullptr, "Unknown"},
- {0x91, nullptr, "Unknown"},
- {0x92, nullptr, "Unknown"},
- {0x93, nullptr, "Unknown"},
- {0x94, nullptr, "Unknown"},
- {0x95, nullptr, "Unknown"},
- {0x96, nullptr, "Unknown"},
- {0x97, nullptr, "Unknown"},
- {0x98, nullptr, "Unknown"},
- {0x99, nullptr, "Unknown"},
- {0x9A, nullptr, "Unknown"},
- {0x9B, nullptr, "Unknown"},
- {0x9C, nullptr, "Unknown"},
- {0x9D, nullptr, "Unknown"},
- {0x9E, nullptr, "Unknown"},
- {0x9F, nullptr, "Unknown"},
- {0xA0, nullptr, "Unknown"},
- {0xA1, nullptr, "Unknown"},
- {0xA2, nullptr, "Unknown"},
- {0xA3, nullptr, "Unknown"},
- {0xA4, nullptr, "Unknown"},
- {0xA5, nullptr, "Unknown"},
- {0xA6, nullptr, "Unknown"},
- {0xA7, nullptr, "Unknown"},
- {0xA8, nullptr, "Unknown"},
- {0xA9, nullptr, "Unknown"},
- {0xAA, nullptr, "Unknown"},
- {0xAB, nullptr, "Unknown"},
- {0xAC, nullptr, "Unknown"},
- {0xAD, nullptr, "Unknown"},
- {0xAE, nullptr, "Unknown"},
- {0xAF, nullptr, "Unknown"},
- {0xB0, nullptr, "Unknown"},
- {0xB1, nullptr, "Unknown"},
- {0xB2, nullptr, "Unknown"},
- {0xB3, nullptr, "Unknown"},
- {0xB4, nullptr, "Unknown"},
- {0xB5, nullptr, "Unknown"},
- {0xB6, nullptr, "Unknown"},
- {0xB7, nullptr, "Unknown"},
- {0xB8, nullptr, "Unknown"},
- {0xB9, nullptr, "Unknown"},
- {0xBA, nullptr, "Unknown"},
- {0xBB, nullptr, "Unknown"},
- {0xBC, nullptr, "Unknown"},
- {0xBD, nullptr, "Unknown"},
- {0xBE, nullptr, "Unknown"},
- {0xBF, nullptr, "Unknown"},
-};
-
-static const FunctionDef SVC_Table_64[] = {
- {0x00, nullptr, "Unknown0"},
- {0x01, SvcWrap64<SetHeapSize>, "SetHeapSize"},
- {0x02, SvcWrap64<SetMemoryPermission>, "SetMemoryPermission"},
- {0x03, SvcWrap64<SetMemoryAttribute>, "SetMemoryAttribute"},
- {0x04, SvcWrap64<MapMemory>, "MapMemory"},
- {0x05, SvcWrap64<UnmapMemory>, "UnmapMemory"},
- {0x06, SvcWrap64<QueryMemory>, "QueryMemory"},
- {0x07, SvcWrap64<ExitProcess>, "ExitProcess"},
- {0x08, SvcWrap64<CreateThread>, "CreateThread"},
- {0x09, SvcWrap64<StartThread>, "StartThread"},
- {0x0A, SvcWrap64<ExitThread>, "ExitThread"},
- {0x0B, SvcWrap64<SleepThread>, "SleepThread"},
- {0x0C, SvcWrap64<GetThreadPriority>, "GetThreadPriority"},
- {0x0D, SvcWrap64<SetThreadPriority>, "SetThreadPriority"},
- {0x0E, SvcWrap64<GetThreadCoreMask>, "GetThreadCoreMask"},
- {0x0F, SvcWrap64<SetThreadCoreMask>, "SetThreadCoreMask"},
- {0x10, SvcWrap64<GetCurrentProcessorNumber>, "GetCurrentProcessorNumber"},
- {0x11, SvcWrap64<SignalEvent>, "SignalEvent"},
- {0x12, SvcWrap64<ClearEvent>, "ClearEvent"},
- {0x13, SvcWrap64<MapSharedMemory>, "MapSharedMemory"},
- {0x14, SvcWrap64<UnmapSharedMemory>, "UnmapSharedMemory"},
- {0x15, SvcWrap64<CreateTransferMemory>, "CreateTransferMemory"},
- {0x16, SvcWrap64<CloseHandle>, "CloseHandle"},
- {0x17, SvcWrap64<ResetSignal>, "ResetSignal"},
- {0x18, SvcWrap64<WaitSynchronization>, "WaitSynchronization"},
- {0x19, SvcWrap64<CancelSynchronization>, "CancelSynchronization"},
- {0x1A, SvcWrap64<ArbitrateLock>, "ArbitrateLock"},
- {0x1B, SvcWrap64<ArbitrateUnlock>, "ArbitrateUnlock"},
- {0x1C, SvcWrap64<WaitProcessWideKeyAtomic>, "WaitProcessWideKeyAtomic"},
- {0x1D, SvcWrap64<SignalProcessWideKey>, "SignalProcessWideKey"},
- {0x1E, SvcWrap64<GetSystemTick>, "GetSystemTick"},
- {0x1F, SvcWrap64<ConnectToNamedPort>, "ConnectToNamedPort"},
- {0x20, nullptr, "SendSyncRequestLight"},
- {0x21, SvcWrap64<SendSyncRequest>, "SendSyncRequest"},
- {0x22, nullptr, "SendSyncRequestWithUserBuffer"},
- {0x23, nullptr, "SendAsyncRequestWithUserBuffer"},
- {0x24, SvcWrap64<GetProcessId>, "GetProcessId"},
- {0x25, SvcWrap64<GetThreadId>, "GetThreadId"},
- {0x26, SvcWrap64<Break>, "Break"},
- {0x27, SvcWrap64<OutputDebugString>, "OutputDebugString"},
- {0x28, nullptr, "ReturnFromException"},
- {0x29, SvcWrap64<GetInfo>, "GetInfo"},
- {0x2A, nullptr, "FlushEntireDataCache"},
- {0x2B, nullptr, "FlushDataCache"},
- {0x2C, SvcWrap64<MapPhysicalMemory>, "MapPhysicalMemory"},
- {0x2D, SvcWrap64<UnmapPhysicalMemory>, "UnmapPhysicalMemory"},
- {0x2E, nullptr, "GetFutureThreadInfo"},
- {0x2F, nullptr, "GetLastThreadInfo"},
- {0x30, SvcWrap64<GetResourceLimitLimitValue>, "GetResourceLimitLimitValue"},
- {0x31, SvcWrap64<GetResourceLimitCurrentValue>, "GetResourceLimitCurrentValue"},
- {0x32, SvcWrap64<SetThreadActivity>, "SetThreadActivity"},
- {0x33, SvcWrap64<GetThreadContext>, "GetThreadContext"},
- {0x34, SvcWrap64<WaitForAddress>, "WaitForAddress"},
- {0x35, SvcWrap64<SignalToAddress>, "SignalToAddress"},
- {0x36, SvcWrap64<SynchronizePreemptionState>, "SynchronizePreemptionState"},
- {0x37, nullptr, "GetResourceLimitPeakValue"},
- {0x38, nullptr, "Unknown38"},
- {0x39, nullptr, "CreateIoPool"},
- {0x3A, nullptr, "CreateIoRegion"},
- {0x3B, nullptr, "Unknown3B"},
- {0x3C, SvcWrap64<KernelDebug>, "KernelDebug"},
- {0x3D, SvcWrap64<ChangeKernelTraceState>, "ChangeKernelTraceState"},
- {0x3E, nullptr, "Unknown3e"},
- {0x3F, nullptr, "Unknown3f"},
- {0x40, SvcWrap64<CreateSession>, "CreateSession"},
- {0x41, nullptr, "AcceptSession"},
- {0x42, nullptr, "ReplyAndReceiveLight"},
- {0x43, SvcWrap64<ReplyAndReceive>, "ReplyAndReceive"},
- {0x44, nullptr, "ReplyAndReceiveWithUserBuffer"},
- {0x45, SvcWrap64<CreateEvent>, "CreateEvent"},
- {0x46, nullptr, "MapIoRegion"},
- {0x47, nullptr, "UnmapIoRegion"},
- {0x48, nullptr, "MapPhysicalMemoryUnsafe"},
- {0x49, nullptr, "UnmapPhysicalMemoryUnsafe"},
- {0x4A, nullptr, "SetUnsafeLimit"},
- {0x4B, SvcWrap64<CreateCodeMemory>, "CreateCodeMemory"},
- {0x4C, SvcWrap64<ControlCodeMemory>, "ControlCodeMemory"},
- {0x4D, nullptr, "SleepSystem"},
- {0x4E, nullptr, "ReadWriteRegister"},
- {0x4F, nullptr, "SetProcessActivity"},
- {0x50, nullptr, "CreateSharedMemory"},
- {0x51, nullptr, "MapTransferMemory"},
- {0x52, nullptr, "UnmapTransferMemory"},
- {0x53, nullptr, "CreateInterruptEvent"},
- {0x54, nullptr, "QueryPhysicalAddress"},
- {0x55, nullptr, "QueryIoMapping"},
- {0x56, nullptr, "CreateDeviceAddressSpace"},
- {0x57, nullptr, "AttachDeviceAddressSpace"},
- {0x58, nullptr, "DetachDeviceAddressSpace"},
- {0x59, nullptr, "MapDeviceAddressSpaceByForce"},
- {0x5A, nullptr, "MapDeviceAddressSpaceAligned"},
- {0x5B, nullptr, "MapDeviceAddressSpace"},
- {0x5C, nullptr, "UnmapDeviceAddressSpace"},
- {0x5D, nullptr, "InvalidateProcessDataCache"},
- {0x5E, nullptr, "StoreProcessDataCache"},
- {0x5F, nullptr, "FlushProcessDataCache"},
- {0x60, nullptr, "DebugActiveProcess"},
- {0x61, nullptr, "BreakDebugProcess"},
- {0x62, nullptr, "TerminateDebugProcess"},
- {0x63, nullptr, "GetDebugEvent"},
- {0x64, nullptr, "ContinueDebugEvent"},
- {0x65, SvcWrap64<GetProcessList>, "GetProcessList"},
- {0x66, SvcWrap64<GetThreadList>, "GetThreadList"},
- {0x67, nullptr, "GetDebugThreadContext"},
- {0x68, nullptr, "SetDebugThreadContext"},
- {0x69, nullptr, "QueryDebugProcessMemory"},
- {0x6A, nullptr, "ReadDebugProcessMemory"},
- {0x6B, nullptr, "WriteDebugProcessMemory"},
- {0x6C, nullptr, "SetHardwareBreakPoint"},
- {0x6D, nullptr, "GetDebugThreadParam"},
- {0x6E, nullptr, "Unknown6E"},
- {0x6F, nullptr, "GetSystemInfo"},
- {0x70, nullptr, "CreatePort"},
- {0x71, nullptr, "ManageNamedPort"},
- {0x72, nullptr, "ConnectToPort"},
- {0x73, SvcWrap64<SetProcessMemoryPermission>, "SetProcessMemoryPermission"},
- {0x74, SvcWrap64<MapProcessMemory>, "MapProcessMemory"},
- {0x75, SvcWrap64<UnmapProcessMemory>, "UnmapProcessMemory"},
- {0x76, SvcWrap64<QueryProcessMemory>, "QueryProcessMemory"},
- {0x77, SvcWrap64<MapProcessCodeMemory>, "MapProcessCodeMemory"},
- {0x78, SvcWrap64<UnmapProcessCodeMemory>, "UnmapProcessCodeMemory"},
- {0x79, nullptr, "CreateProcess"},
- {0x7A, nullptr, "StartProcess"},
- {0x7B, nullptr, "TerminateProcess"},
- {0x7C, SvcWrap64<GetProcessInfo>, "GetProcessInfo"},
- {0x7D, SvcWrap64<CreateResourceLimit>, "CreateResourceLimit"},
- {0x7E, SvcWrap64<SetResourceLimitLimitValue>, "SetResourceLimitLimitValue"},
- {0x7F, nullptr, "CallSecureMonitor"},
- {0x80, nullptr, "Unknown"},
- {0x81, nullptr, "Unknown"},
- {0x82, nullptr, "Unknown"},
- {0x83, nullptr, "Unknown"},
- {0x84, nullptr, "Unknown"},
- {0x85, nullptr, "Unknown"},
- {0x86, nullptr, "Unknown"},
- {0x87, nullptr, "Unknown"},
- {0x88, nullptr, "Unknown"},
- {0x89, nullptr, "Unknown"},
- {0x8A, nullptr, "Unknown"},
- {0x8B, nullptr, "Unknown"},
- {0x8C, nullptr, "Unknown"},
- {0x8D, nullptr, "Unknown"},
- {0x8E, nullptr, "Unknown"},
- {0x8F, nullptr, "Unknown"},
- {0x90, nullptr, "Unknown"},
- {0x91, nullptr, "Unknown"},
- {0x92, nullptr, "Unknown"},
- {0x93, nullptr, "Unknown"},
- {0x94, nullptr, "Unknown"},
- {0x95, nullptr, "Unknown"},
- {0x96, nullptr, "Unknown"},
- {0x97, nullptr, "Unknown"},
- {0x98, nullptr, "Unknown"},
- {0x99, nullptr, "Unknown"},
- {0x9A, nullptr, "Unknown"},
- {0x9B, nullptr, "Unknown"},
- {0x9C, nullptr, "Unknown"},
- {0x9D, nullptr, "Unknown"},
- {0x9E, nullptr, "Unknown"},
- {0x9F, nullptr, "Unknown"},
- {0xA0, nullptr, "Unknown"},
- {0xA1, nullptr, "Unknown"},
- {0xA2, nullptr, "Unknown"},
- {0xA3, nullptr, "Unknown"},
- {0xA4, nullptr, "Unknown"},
- {0xA5, nullptr, "Unknown"},
- {0xA6, nullptr, "Unknown"},
- {0xA7, nullptr, "Unknown"},
- {0xA8, nullptr, "Unknown"},
- {0xA9, nullptr, "Unknown"},
- {0xAA, nullptr, "Unknown"},
- {0xAB, nullptr, "Unknown"},
- {0xAC, nullptr, "Unknown"},
- {0xAD, nullptr, "Unknown"},
- {0xAE, nullptr, "Unknown"},
- {0xAF, nullptr, "Unknown"},
- {0xB0, nullptr, "Unknown"},
- {0xB1, nullptr, "Unknown"},
- {0xB2, nullptr, "Unknown"},
- {0xB3, nullptr, "Unknown"},
- {0xB4, nullptr, "Unknown"},
- {0xB5, nullptr, "Unknown"},
- {0xB6, nullptr, "Unknown"},
- {0xB7, nullptr, "Unknown"},
- {0xB8, nullptr, "Unknown"},
- {0xB9, nullptr, "Unknown"},
- {0xBA, nullptr, "Unknown"},
- {0xBB, nullptr, "Unknown"},
- {0xBC, nullptr, "Unknown"},
- {0xBD, nullptr, "Unknown"},
- {0xBE, nullptr, "Unknown"},
- {0xBF, nullptr, "Unknown"},
-};
-
-static const FunctionDef* GetSVCInfo32(u32 func_num) {
- if (func_num >= std::size(SVC_Table_32)) {
- LOG_ERROR(Kernel_SVC, "Unknown svc=0x{:02X}", func_num);
- return nullptr;
+static void SvcWrap_CreateSession64(Core::System& system) {
+ Result ret{};
+
+ Handle out_server_session_handle{};
+ Handle out_client_session_handle{};
+ bool is_light{};
+ uint64_t name{};
+
+ is_light = Convert<bool>(GetReg64(system, 2));
+ name = Convert<uint64_t>(GetReg64(system, 3));
+
+ ret = CreateSession64(system, std::addressof(out_server_session_handle), std::addressof(out_client_session_handle), is_light, name);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_server_session_handle));
+ SetReg64(system, 2, Convert<uint64_t>(out_client_session_handle));
+}
+
+static void SvcWrap_AcceptSession64(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+ Handle port{};
+
+ port = Convert<Handle>(GetReg64(system, 1));
+
+ ret = AcceptSession64(system, std::addressof(out_handle), port);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_handle));
+}
+
+static void SvcWrap_ReplyAndReceive64(Core::System& system) {
+ Result ret{};
+
+ int32_t out_index{};
+ uint64_t handles{};
+ int32_t num_handles{};
+ Handle reply_target{};
+ int64_t timeout_ns{};
+
+ handles = Convert<uint64_t>(GetReg64(system, 1));
+ num_handles = Convert<int32_t>(GetReg64(system, 2));
+ reply_target = Convert<Handle>(GetReg64(system, 3));
+ timeout_ns = Convert<int64_t>(GetReg64(system, 4));
+
+ ret = ReplyAndReceive64(system, std::addressof(out_index), handles, num_handles, reply_target, timeout_ns);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_index));
+}
+
+static void SvcWrap_ReplyAndReceiveWithUserBuffer64(Core::System& system) {
+ Result ret{};
+
+ int32_t out_index{};
+ uint64_t message_buffer{};
+ uint64_t message_buffer_size{};
+ uint64_t handles{};
+ int32_t num_handles{};
+ Handle reply_target{};
+ int64_t timeout_ns{};
+
+ message_buffer = Convert<uint64_t>(GetReg64(system, 1));
+ message_buffer_size = Convert<uint64_t>(GetReg64(system, 2));
+ handles = Convert<uint64_t>(GetReg64(system, 3));
+ num_handles = Convert<int32_t>(GetReg64(system, 4));
+ reply_target = Convert<Handle>(GetReg64(system, 5));
+ timeout_ns = Convert<int64_t>(GetReg64(system, 6));
+
+ ret = ReplyAndReceiveWithUserBuffer64(system, std::addressof(out_index), message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_index));
+}
+
+static void SvcWrap_CreateEvent64(Core::System& system) {
+ Result ret{};
+
+ Handle out_write_handle{};
+ Handle out_read_handle{};
+
+ ret = CreateEvent64(system, std::addressof(out_write_handle), std::addressof(out_read_handle));
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_write_handle));
+ SetReg64(system, 2, Convert<uint64_t>(out_read_handle));
+}
+
+static void SvcWrap_MapIoRegion64(Core::System& system) {
+ Result ret{};
+
+ Handle io_region{};
+ uint64_t address{};
+ uint64_t size{};
+ MemoryPermission perm{};
+
+ io_region = Convert<Handle>(GetReg64(system, 0));
+ address = Convert<uint64_t>(GetReg64(system, 1));
+ size = Convert<uint64_t>(GetReg64(system, 2));
+ perm = Convert<MemoryPermission>(GetReg64(system, 3));
+
+ ret = MapIoRegion64(system, io_region, address, size, perm);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_UnmapIoRegion64(Core::System& system) {
+ Result ret{};
+
+ Handle io_region{};
+ uint64_t address{};
+ uint64_t size{};
+
+ io_region = Convert<Handle>(GetReg64(system, 0));
+ address = Convert<uint64_t>(GetReg64(system, 1));
+ size = Convert<uint64_t>(GetReg64(system, 2));
+
+ ret = UnmapIoRegion64(system, io_region, address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_MapPhysicalMemoryUnsafe64(Core::System& system) {
+ Result ret{};
+
+ uint64_t address{};
+ uint64_t size{};
+
+ address = Convert<uint64_t>(GetReg64(system, 0));
+ size = Convert<uint64_t>(GetReg64(system, 1));
+
+ ret = MapPhysicalMemoryUnsafe64(system, address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_UnmapPhysicalMemoryUnsafe64(Core::System& system) {
+ Result ret{};
+
+ uint64_t address{};
+ uint64_t size{};
+
+ address = Convert<uint64_t>(GetReg64(system, 0));
+ size = Convert<uint64_t>(GetReg64(system, 1));
+
+ ret = UnmapPhysicalMemoryUnsafe64(system, address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_SetUnsafeLimit64(Core::System& system) {
+ Result ret{};
+
+ uint64_t limit{};
+
+ limit = Convert<uint64_t>(GetReg64(system, 0));
+
+ ret = SetUnsafeLimit64(system, limit);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_CreateCodeMemory64(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+ uint64_t address{};
+ uint64_t size{};
+
+ address = Convert<uint64_t>(GetReg64(system, 1));
+ size = Convert<uint64_t>(GetReg64(system, 2));
+
+ ret = CreateCodeMemory64(system, std::addressof(out_handle), address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_handle));
+}
+
+static void SvcWrap_ControlCodeMemory64(Core::System& system) {
+ Result ret{};
+
+ Handle code_memory_handle{};
+ CodeMemoryOperation operation{};
+ uint64_t address{};
+ uint64_t size{};
+ MemoryPermission perm{};
+
+ code_memory_handle = Convert<Handle>(GetReg64(system, 0));
+ operation = Convert<CodeMemoryOperation>(GetReg64(system, 1));
+ address = Convert<uint64_t>(GetReg64(system, 2));
+ size = Convert<uint64_t>(GetReg64(system, 3));
+ perm = Convert<MemoryPermission>(GetReg64(system, 4));
+
+ ret = ControlCodeMemory64(system, code_memory_handle, operation, address, size, perm);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_SleepSystem64(Core::System& system) {
+ SleepSystem64(system);
+}
+
+static void SvcWrap_ReadWriteRegister64(Core::System& system) {
+ Result ret{};
+
+ uint32_t out_value{};
+ uint64_t address{};
+ uint32_t mask{};
+ uint32_t value{};
+
+ address = Convert<uint64_t>(GetReg64(system, 1));
+ mask = Convert<uint32_t>(GetReg64(system, 2));
+ value = Convert<uint32_t>(GetReg64(system, 3));
+
+ ret = ReadWriteRegister64(system, std::addressof(out_value), address, mask, value);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_value));
+}
+
+static void SvcWrap_SetProcessActivity64(Core::System& system) {
+ Result ret{};
+
+ Handle process_handle{};
+ ProcessActivity process_activity{};
+
+ process_handle = Convert<Handle>(GetReg64(system, 0));
+ process_activity = Convert<ProcessActivity>(GetReg64(system, 1));
+
+ ret = SetProcessActivity64(system, process_handle, process_activity);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_CreateSharedMemory64(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+ uint64_t size{};
+ MemoryPermission owner_perm{};
+ MemoryPermission remote_perm{};
+
+ size = Convert<uint64_t>(GetReg64(system, 1));
+ owner_perm = Convert<MemoryPermission>(GetReg64(system, 2));
+ remote_perm = Convert<MemoryPermission>(GetReg64(system, 3));
+
+ ret = CreateSharedMemory64(system, std::addressof(out_handle), size, owner_perm, remote_perm);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_handle));
+}
+
+static void SvcWrap_MapTransferMemory64(Core::System& system) {
+ Result ret{};
+
+ Handle trmem_handle{};
+ uint64_t address{};
+ uint64_t size{};
+ MemoryPermission owner_perm{};
+
+ trmem_handle = Convert<Handle>(GetReg64(system, 0));
+ address = Convert<uint64_t>(GetReg64(system, 1));
+ size = Convert<uint64_t>(GetReg64(system, 2));
+ owner_perm = Convert<MemoryPermission>(GetReg64(system, 3));
+
+ ret = MapTransferMemory64(system, trmem_handle, address, size, owner_perm);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_UnmapTransferMemory64(Core::System& system) {
+ Result ret{};
+
+ Handle trmem_handle{};
+ uint64_t address{};
+ uint64_t size{};
+
+ trmem_handle = Convert<Handle>(GetReg64(system, 0));
+ address = Convert<uint64_t>(GetReg64(system, 1));
+ size = Convert<uint64_t>(GetReg64(system, 2));
+
+ ret = UnmapTransferMemory64(system, trmem_handle, address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_CreateInterruptEvent64(Core::System& system) {
+ Result ret{};
+
+ Handle out_read_handle{};
+ int32_t interrupt_id{};
+ InterruptType interrupt_type{};
+
+ interrupt_id = Convert<int32_t>(GetReg64(system, 1));
+ interrupt_type = Convert<InterruptType>(GetReg64(system, 2));
+
+ ret = CreateInterruptEvent64(system, std::addressof(out_read_handle), interrupt_id, interrupt_type);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_read_handle));
+}
+
+static void SvcWrap_QueryPhysicalAddress64(Core::System& system) {
+ Result ret{};
+
+ lp64::PhysicalMemoryInfo out_info{};
+ uint64_t address{};
+
+ address = Convert<uint64_t>(GetReg64(system, 1));
+
+ ret = QueryPhysicalAddress64(system, std::addressof(out_info), address);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ auto out_info_scatter = Convert<std::array<uint64_t, 3>>(out_info);
+ SetReg64(system, 1, out_info_scatter[0]);
+ SetReg64(system, 2, out_info_scatter[1]);
+ SetReg64(system, 3, out_info_scatter[2]);
+}
+
+static void SvcWrap_QueryIoMapping64(Core::System& system) {
+ Result ret{};
+
+ uint64_t out_address{};
+ uint64_t out_size{};
+ uint64_t physical_address{};
+ uint64_t size{};
+
+ physical_address = Convert<uint64_t>(GetReg64(system, 2));
+ size = Convert<uint64_t>(GetReg64(system, 3));
+
+ ret = QueryIoMapping64(system, std::addressof(out_address), std::addressof(out_size), physical_address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_address));
+ SetReg64(system, 2, Convert<uint64_t>(out_size));
+}
+
+static void SvcWrap_CreateDeviceAddressSpace64(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+ uint64_t das_address{};
+ uint64_t das_size{};
+
+ das_address = Convert<uint64_t>(GetReg64(system, 1));
+ das_size = Convert<uint64_t>(GetReg64(system, 2));
+
+ ret = CreateDeviceAddressSpace64(system, std::addressof(out_handle), das_address, das_size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_handle));
+}
+
+static void SvcWrap_AttachDeviceAddressSpace64(Core::System& system) {
+ Result ret{};
+
+ DeviceName device_name{};
+ Handle das_handle{};
+
+ device_name = Convert<DeviceName>(GetReg64(system, 0));
+ das_handle = Convert<Handle>(GetReg64(system, 1));
+
+ ret = AttachDeviceAddressSpace64(system, device_name, das_handle);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_DetachDeviceAddressSpace64(Core::System& system) {
+ Result ret{};
+
+ DeviceName device_name{};
+ Handle das_handle{};
+
+ device_name = Convert<DeviceName>(GetReg64(system, 0));
+ das_handle = Convert<Handle>(GetReg64(system, 1));
+
+ ret = DetachDeviceAddressSpace64(system, device_name, das_handle);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_MapDeviceAddressSpaceByForce64(Core::System& system) {
+ Result ret{};
+
+ Handle das_handle{};
+ Handle process_handle{};
+ uint64_t process_address{};
+ uint64_t size{};
+ uint64_t device_address{};
+ uint32_t option{};
+
+ das_handle = Convert<Handle>(GetReg64(system, 0));
+ process_handle = Convert<Handle>(GetReg64(system, 1));
+ process_address = Convert<uint64_t>(GetReg64(system, 2));
+ size = Convert<uint64_t>(GetReg64(system, 3));
+ device_address = Convert<uint64_t>(GetReg64(system, 4));
+ option = Convert<uint32_t>(GetReg64(system, 5));
+
+ ret = MapDeviceAddressSpaceByForce64(system, das_handle, process_handle, process_address, size, device_address, option);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_MapDeviceAddressSpaceAligned64(Core::System& system) {
+ Result ret{};
+
+ Handle das_handle{};
+ Handle process_handle{};
+ uint64_t process_address{};
+ uint64_t size{};
+ uint64_t device_address{};
+ uint32_t option{};
+
+ das_handle = Convert<Handle>(GetReg64(system, 0));
+ process_handle = Convert<Handle>(GetReg64(system, 1));
+ process_address = Convert<uint64_t>(GetReg64(system, 2));
+ size = Convert<uint64_t>(GetReg64(system, 3));
+ device_address = Convert<uint64_t>(GetReg64(system, 4));
+ option = Convert<uint32_t>(GetReg64(system, 5));
+
+ ret = MapDeviceAddressSpaceAligned64(system, das_handle, process_handle, process_address, size, device_address, option);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_UnmapDeviceAddressSpace64(Core::System& system) {
+ Result ret{};
+
+ Handle das_handle{};
+ Handle process_handle{};
+ uint64_t process_address{};
+ uint64_t size{};
+ uint64_t device_address{};
+
+ das_handle = Convert<Handle>(GetReg64(system, 0));
+ process_handle = Convert<Handle>(GetReg64(system, 1));
+ process_address = Convert<uint64_t>(GetReg64(system, 2));
+ size = Convert<uint64_t>(GetReg64(system, 3));
+ device_address = Convert<uint64_t>(GetReg64(system, 4));
+
+ ret = UnmapDeviceAddressSpace64(system, das_handle, process_handle, process_address, size, device_address);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_InvalidateProcessDataCache64(Core::System& system) {
+ Result ret{};
+
+ Handle process_handle{};
+ uint64_t address{};
+ uint64_t size{};
+
+ process_handle = Convert<Handle>(GetReg64(system, 0));
+ address = Convert<uint64_t>(GetReg64(system, 1));
+ size = Convert<uint64_t>(GetReg64(system, 2));
+
+ ret = InvalidateProcessDataCache64(system, process_handle, address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_StoreProcessDataCache64(Core::System& system) {
+ Result ret{};
+
+ Handle process_handle{};
+ uint64_t address{};
+ uint64_t size{};
+
+ process_handle = Convert<Handle>(GetReg64(system, 0));
+ address = Convert<uint64_t>(GetReg64(system, 1));
+ size = Convert<uint64_t>(GetReg64(system, 2));
+
+ ret = StoreProcessDataCache64(system, process_handle, address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_FlushProcessDataCache64(Core::System& system) {
+ Result ret{};
+
+ Handle process_handle{};
+ uint64_t address{};
+ uint64_t size{};
+
+ process_handle = Convert<Handle>(GetReg64(system, 0));
+ address = Convert<uint64_t>(GetReg64(system, 1));
+ size = Convert<uint64_t>(GetReg64(system, 2));
+
+ ret = FlushProcessDataCache64(system, process_handle, address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_DebugActiveProcess64(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+ uint64_t process_id{};
+
+ process_id = Convert<uint64_t>(GetReg64(system, 1));
+
+ ret = DebugActiveProcess64(system, std::addressof(out_handle), process_id);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_handle));
+}
+
+static void SvcWrap_BreakDebugProcess64(Core::System& system) {
+ Result ret{};
+
+ Handle debug_handle{};
+
+ debug_handle = Convert<Handle>(GetReg64(system, 0));
+
+ ret = BreakDebugProcess64(system, debug_handle);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_TerminateDebugProcess64(Core::System& system) {
+ Result ret{};
+
+ Handle debug_handle{};
+
+ debug_handle = Convert<Handle>(GetReg64(system, 0));
+
+ ret = TerminateDebugProcess64(system, debug_handle);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_GetDebugEvent64(Core::System& system) {
+ Result ret{};
+
+ uint64_t out_info{};
+ Handle debug_handle{};
+
+ out_info = Convert<uint64_t>(GetReg64(system, 0));
+ debug_handle = Convert<Handle>(GetReg64(system, 1));
+
+ ret = GetDebugEvent64(system, out_info, debug_handle);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_ContinueDebugEvent64(Core::System& system) {
+ Result ret{};
+
+ Handle debug_handle{};
+ uint32_t flags{};
+ uint64_t thread_ids{};
+ int32_t num_thread_ids{};
+
+ debug_handle = Convert<Handle>(GetReg64(system, 0));
+ flags = Convert<uint32_t>(GetReg64(system, 1));
+ thread_ids = Convert<uint64_t>(GetReg64(system, 2));
+ num_thread_ids = Convert<int32_t>(GetReg64(system, 3));
+
+ ret = ContinueDebugEvent64(system, debug_handle, flags, thread_ids, num_thread_ids);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_GetProcessList64(Core::System& system) {
+ Result ret{};
+
+ int32_t out_num_processes{};
+ uint64_t out_process_ids{};
+ int32_t max_out_count{};
+
+ out_process_ids = Convert<uint64_t>(GetReg64(system, 1));
+ max_out_count = Convert<int32_t>(GetReg64(system, 2));
+
+ ret = GetProcessList64(system, std::addressof(out_num_processes), out_process_ids, max_out_count);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_num_processes));
+}
+
+static void SvcWrap_GetThreadList64(Core::System& system) {
+ Result ret{};
+
+ int32_t out_num_threads{};
+ uint64_t out_thread_ids{};
+ int32_t max_out_count{};
+ Handle debug_handle{};
+
+ out_thread_ids = Convert<uint64_t>(GetReg64(system, 1));
+ max_out_count = Convert<int32_t>(GetReg64(system, 2));
+ debug_handle = Convert<Handle>(GetReg64(system, 3));
+
+ ret = GetThreadList64(system, std::addressof(out_num_threads), out_thread_ids, max_out_count, debug_handle);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_num_threads));
+}
+
+static void SvcWrap_GetDebugThreadContext64(Core::System& system) {
+ Result ret{};
+
+ uint64_t out_context{};
+ Handle debug_handle{};
+ uint64_t thread_id{};
+ uint32_t context_flags{};
+
+ out_context = Convert<uint64_t>(GetReg64(system, 0));
+ debug_handle = Convert<Handle>(GetReg64(system, 1));
+ thread_id = Convert<uint64_t>(GetReg64(system, 2));
+ context_flags = Convert<uint32_t>(GetReg64(system, 3));
+
+ ret = GetDebugThreadContext64(system, out_context, debug_handle, thread_id, context_flags);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_SetDebugThreadContext64(Core::System& system) {
+ Result ret{};
+
+ Handle debug_handle{};
+ uint64_t thread_id{};
+ uint64_t context{};
+ uint32_t context_flags{};
+
+ debug_handle = Convert<Handle>(GetReg64(system, 0));
+ thread_id = Convert<uint64_t>(GetReg64(system, 1));
+ context = Convert<uint64_t>(GetReg64(system, 2));
+ context_flags = Convert<uint32_t>(GetReg64(system, 3));
+
+ ret = SetDebugThreadContext64(system, debug_handle, thread_id, context, context_flags);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_QueryDebugProcessMemory64(Core::System& system) {
+ Result ret{};
+
+ PageInfo out_page_info{};
+ uint64_t out_memory_info{};
+ Handle process_handle{};
+ uint64_t address{};
+
+ out_memory_info = Convert<uint64_t>(GetReg64(system, 0));
+ process_handle = Convert<Handle>(GetReg64(system, 2));
+ address = Convert<uint64_t>(GetReg64(system, 3));
+
+ ret = QueryDebugProcessMemory64(system, out_memory_info, std::addressof(out_page_info), process_handle, address);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_page_info));
+}
+
+static void SvcWrap_ReadDebugProcessMemory64(Core::System& system) {
+ Result ret{};
+
+ uint64_t buffer{};
+ Handle debug_handle{};
+ uint64_t address{};
+ uint64_t size{};
+
+ buffer = Convert<uint64_t>(GetReg64(system, 0));
+ debug_handle = Convert<Handle>(GetReg64(system, 1));
+ address = Convert<uint64_t>(GetReg64(system, 2));
+ size = Convert<uint64_t>(GetReg64(system, 3));
+
+ ret = ReadDebugProcessMemory64(system, buffer, debug_handle, address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_WriteDebugProcessMemory64(Core::System& system) {
+ Result ret{};
+
+ Handle debug_handle{};
+ uint64_t buffer{};
+ uint64_t address{};
+ uint64_t size{};
+
+ debug_handle = Convert<Handle>(GetReg64(system, 0));
+ buffer = Convert<uint64_t>(GetReg64(system, 1));
+ address = Convert<uint64_t>(GetReg64(system, 2));
+ size = Convert<uint64_t>(GetReg64(system, 3));
+
+ ret = WriteDebugProcessMemory64(system, debug_handle, buffer, address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_SetHardwareBreakPoint64(Core::System& system) {
+ Result ret{};
+
+ HardwareBreakPointRegisterName name{};
+ uint64_t flags{};
+ uint64_t value{};
+
+ name = Convert<HardwareBreakPointRegisterName>(GetReg64(system, 0));
+ flags = Convert<uint64_t>(GetReg64(system, 1));
+ value = Convert<uint64_t>(GetReg64(system, 2));
+
+ ret = SetHardwareBreakPoint64(system, name, flags, value);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_GetDebugThreadParam64(Core::System& system) {
+ Result ret{};
+
+ uint64_t out_64{};
+ uint32_t out_32{};
+ Handle debug_handle{};
+ uint64_t thread_id{};
+ DebugThreadParam param{};
+
+ debug_handle = Convert<Handle>(GetReg64(system, 2));
+ thread_id = Convert<uint64_t>(GetReg64(system, 3));
+ param = Convert<DebugThreadParam>(GetReg64(system, 4));
+
+ ret = GetDebugThreadParam64(system, std::addressof(out_64), std::addressof(out_32), debug_handle, thread_id, param);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_64));
+ SetReg64(system, 2, Convert<uint64_t>(out_32));
+}
+
+static void SvcWrap_GetSystemInfo64(Core::System& system) {
+ Result ret{};
+
+ uint64_t out{};
+ SystemInfoType info_type{};
+ Handle handle{};
+ uint64_t info_subtype{};
+
+ info_type = Convert<SystemInfoType>(GetReg64(system, 1));
+ handle = Convert<Handle>(GetReg64(system, 2));
+ info_subtype = Convert<uint64_t>(GetReg64(system, 3));
+
+ ret = GetSystemInfo64(system, std::addressof(out), info_type, handle, info_subtype);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out));
+}
+
+static void SvcWrap_CreatePort64(Core::System& system) {
+ Result ret{};
+
+ Handle out_server_handle{};
+ Handle out_client_handle{};
+ int32_t max_sessions{};
+ bool is_light{};
+ uint64_t name{};
+
+ max_sessions = Convert<int32_t>(GetReg64(system, 2));
+ is_light = Convert<bool>(GetReg64(system, 3));
+ name = Convert<uint64_t>(GetReg64(system, 4));
+
+ ret = CreatePort64(system, std::addressof(out_server_handle), std::addressof(out_client_handle), max_sessions, is_light, name);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_server_handle));
+ SetReg64(system, 2, Convert<uint64_t>(out_client_handle));
+}
+
+static void SvcWrap_ManageNamedPort64(Core::System& system) {
+ Result ret{};
+
+ Handle out_server_handle{};
+ uint64_t name{};
+ int32_t max_sessions{};
+
+ name = Convert<uint64_t>(GetReg64(system, 1));
+ max_sessions = Convert<int32_t>(GetReg64(system, 2));
+
+ ret = ManageNamedPort64(system, std::addressof(out_server_handle), name, max_sessions);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_server_handle));
+}
+
+static void SvcWrap_ConnectToPort64(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+ Handle port{};
+
+ port = Convert<Handle>(GetReg64(system, 1));
+
+ ret = ConnectToPort64(system, std::addressof(out_handle), port);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_handle));
+}
+
+static void SvcWrap_SetProcessMemoryPermission64(Core::System& system) {
+ Result ret{};
+
+ Handle process_handle{};
+ uint64_t address{};
+ uint64_t size{};
+ MemoryPermission perm{};
+
+ process_handle = Convert<Handle>(GetReg64(system, 0));
+ address = Convert<uint64_t>(GetReg64(system, 1));
+ size = Convert<uint64_t>(GetReg64(system, 2));
+ perm = Convert<MemoryPermission>(GetReg64(system, 3));
+
+ ret = SetProcessMemoryPermission64(system, process_handle, address, size, perm);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_MapProcessMemory64(Core::System& system) {
+ Result ret{};
+
+ uint64_t dst_address{};
+ Handle process_handle{};
+ uint64_t src_address{};
+ uint64_t size{};
+
+ dst_address = Convert<uint64_t>(GetReg64(system, 0));
+ process_handle = Convert<Handle>(GetReg64(system, 1));
+ src_address = Convert<uint64_t>(GetReg64(system, 2));
+ size = Convert<uint64_t>(GetReg64(system, 3));
+
+ ret = MapProcessMemory64(system, dst_address, process_handle, src_address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_UnmapProcessMemory64(Core::System& system) {
+ Result ret{};
+
+ uint64_t dst_address{};
+ Handle process_handle{};
+ uint64_t src_address{};
+ uint64_t size{};
+
+ dst_address = Convert<uint64_t>(GetReg64(system, 0));
+ process_handle = Convert<Handle>(GetReg64(system, 1));
+ src_address = Convert<uint64_t>(GetReg64(system, 2));
+ size = Convert<uint64_t>(GetReg64(system, 3));
+
+ ret = UnmapProcessMemory64(system, dst_address, process_handle, src_address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_QueryProcessMemory64(Core::System& system) {
+ Result ret{};
+
+ PageInfo out_page_info{};
+ uint64_t out_memory_info{};
+ Handle process_handle{};
+ uint64_t address{};
+
+ out_memory_info = Convert<uint64_t>(GetReg64(system, 0));
+ process_handle = Convert<Handle>(GetReg64(system, 2));
+ address = Convert<uint64_t>(GetReg64(system, 3));
+
+ ret = QueryProcessMemory64(system, out_memory_info, std::addressof(out_page_info), process_handle, address);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_page_info));
+}
+
+static void SvcWrap_MapProcessCodeMemory64(Core::System& system) {
+ Result ret{};
+
+ Handle process_handle{};
+ uint64_t dst_address{};
+ uint64_t src_address{};
+ uint64_t size{};
+
+ process_handle = Convert<Handle>(GetReg64(system, 0));
+ dst_address = Convert<uint64_t>(GetReg64(system, 1));
+ src_address = Convert<uint64_t>(GetReg64(system, 2));
+ size = Convert<uint64_t>(GetReg64(system, 3));
+
+ ret = MapProcessCodeMemory64(system, process_handle, dst_address, src_address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_UnmapProcessCodeMemory64(Core::System& system) {
+ Result ret{};
+
+ Handle process_handle{};
+ uint64_t dst_address{};
+ uint64_t src_address{};
+ uint64_t size{};
+
+ process_handle = Convert<Handle>(GetReg64(system, 0));
+ dst_address = Convert<uint64_t>(GetReg64(system, 1));
+ src_address = Convert<uint64_t>(GetReg64(system, 2));
+ size = Convert<uint64_t>(GetReg64(system, 3));
+
+ ret = UnmapProcessCodeMemory64(system, process_handle, dst_address, src_address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_CreateProcess64(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+ uint64_t parameters{};
+ uint64_t caps{};
+ int32_t num_caps{};
+
+ parameters = Convert<uint64_t>(GetReg64(system, 1));
+ caps = Convert<uint64_t>(GetReg64(system, 2));
+ num_caps = Convert<int32_t>(GetReg64(system, 3));
+
+ ret = CreateProcess64(system, std::addressof(out_handle), parameters, caps, num_caps);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_handle));
+}
+
+static void SvcWrap_StartProcess64(Core::System& system) {
+ Result ret{};
+
+ Handle process_handle{};
+ int32_t priority{};
+ int32_t core_id{};
+ uint64_t main_thread_stack_size{};
+
+ process_handle = Convert<Handle>(GetReg64(system, 0));
+ priority = Convert<int32_t>(GetReg64(system, 1));
+ core_id = Convert<int32_t>(GetReg64(system, 2));
+ main_thread_stack_size = Convert<uint64_t>(GetReg64(system, 3));
+
+ ret = StartProcess64(system, process_handle, priority, core_id, main_thread_stack_size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_TerminateProcess64(Core::System& system) {
+ Result ret{};
+
+ Handle process_handle{};
+
+ process_handle = Convert<Handle>(GetReg64(system, 0));
+
+ ret = TerminateProcess64(system, process_handle);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_GetProcessInfo64(Core::System& system) {
+ Result ret{};
+
+ int64_t out_info{};
+ Handle process_handle{};
+ ProcessInfoType info_type{};
+
+ process_handle = Convert<Handle>(GetReg64(system, 1));
+ info_type = Convert<ProcessInfoType>(GetReg64(system, 2));
+
+ ret = GetProcessInfo64(system, std::addressof(out_info), process_handle, info_type);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_info));
+}
+
+static void SvcWrap_CreateResourceLimit64(Core::System& system) {
+ Result ret{};
+
+ Handle out_handle{};
+
+ ret = CreateResourceLimit64(system, std::addressof(out_handle));
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+ SetReg64(system, 1, Convert<uint64_t>(out_handle));
+}
+
+static void SvcWrap_SetResourceLimitLimitValue64(Core::System& system) {
+ Result ret{};
+
+ Handle resource_limit_handle{};
+ LimitableResource which{};
+ int64_t limit_value{};
+
+ resource_limit_handle = Convert<Handle>(GetReg64(system, 0));
+ which = Convert<LimitableResource>(GetReg64(system, 1));
+ limit_value = Convert<int64_t>(GetReg64(system, 2));
+
+ ret = SetResourceLimitLimitValue64(system, resource_limit_handle, which, limit_value);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_MapInsecureMemory64(Core::System& system) {
+ Result ret{};
+
+ uint64_t address{};
+ uint64_t size{};
+
+ address = Convert<uint64_t>(GetReg64(system, 0));
+ size = Convert<uint64_t>(GetReg64(system, 1));
+
+ ret = MapInsecureMemory64(system, address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void SvcWrap_UnmapInsecureMemory64(Core::System& system) {
+ Result ret{};
+
+ uint64_t address{};
+ uint64_t size{};
+
+ address = Convert<uint64_t>(GetReg64(system, 0));
+ size = Convert<uint64_t>(GetReg64(system, 1));
+
+ ret = UnmapInsecureMemory64(system, address, size);
+
+ SetReg64(system, 0, Convert<uint64_t>(ret));
+}
+
+static void Call32(Core::System& system, u32 imm) {
+ switch (static_cast<SvcId>(imm)) {
+ case SvcId::SetHeapSize:
+ return SvcWrap_SetHeapSize64From32(system);
+ case SvcId::SetMemoryPermission:
+ return SvcWrap_SetMemoryPermission64From32(system);
+ case SvcId::SetMemoryAttribute:
+ return SvcWrap_SetMemoryAttribute64From32(system);
+ case SvcId::MapMemory:
+ return SvcWrap_MapMemory64From32(system);
+ case SvcId::UnmapMemory:
+ return SvcWrap_UnmapMemory64From32(system);
+ case SvcId::QueryMemory:
+ return SvcWrap_QueryMemory64From32(system);
+ case SvcId::ExitProcess:
+ return SvcWrap_ExitProcess64From32(system);
+ case SvcId::CreateThread:
+ return SvcWrap_CreateThread64From32(system);
+ case SvcId::StartThread:
+ return SvcWrap_StartThread64From32(system);
+ case SvcId::ExitThread:
+ return SvcWrap_ExitThread64From32(system);
+ case SvcId::SleepThread:
+ return SvcWrap_SleepThread64From32(system);
+ case SvcId::GetThreadPriority:
+ return SvcWrap_GetThreadPriority64From32(system);
+ case SvcId::SetThreadPriority:
+ return SvcWrap_SetThreadPriority64From32(system);
+ case SvcId::GetThreadCoreMask:
+ return SvcWrap_GetThreadCoreMask64From32(system);
+ case SvcId::SetThreadCoreMask:
+ return SvcWrap_SetThreadCoreMask64From32(system);
+ case SvcId::GetCurrentProcessorNumber:
+ return SvcWrap_GetCurrentProcessorNumber64From32(system);
+ case SvcId::SignalEvent:
+ return SvcWrap_SignalEvent64From32(system);
+ case SvcId::ClearEvent:
+ return SvcWrap_ClearEvent64From32(system);
+ case SvcId::MapSharedMemory:
+ return SvcWrap_MapSharedMemory64From32(system);
+ case SvcId::UnmapSharedMemory:
+ return SvcWrap_UnmapSharedMemory64From32(system);
+ case SvcId::CreateTransferMemory:
+ return SvcWrap_CreateTransferMemory64From32(system);
+ case SvcId::CloseHandle:
+ return SvcWrap_CloseHandle64From32(system);
+ case SvcId::ResetSignal:
+ return SvcWrap_ResetSignal64From32(system);
+ case SvcId::WaitSynchronization:
+ return SvcWrap_WaitSynchronization64From32(system);
+ case SvcId::CancelSynchronization:
+ return SvcWrap_CancelSynchronization64From32(system);
+ case SvcId::ArbitrateLock:
+ return SvcWrap_ArbitrateLock64From32(system);
+ case SvcId::ArbitrateUnlock:
+ return SvcWrap_ArbitrateUnlock64From32(system);
+ case SvcId::WaitProcessWideKeyAtomic:
+ return SvcWrap_WaitProcessWideKeyAtomic64From32(system);
+ case SvcId::SignalProcessWideKey:
+ return SvcWrap_SignalProcessWideKey64From32(system);
+ case SvcId::GetSystemTick:
+ return SvcWrap_GetSystemTick64From32(system);
+ case SvcId::ConnectToNamedPort:
+ return SvcWrap_ConnectToNamedPort64From32(system);
+ case SvcId::SendSyncRequestLight:
+ return SvcWrap_SendSyncRequestLight64From32(system);
+ case SvcId::SendSyncRequest:
+ return SvcWrap_SendSyncRequest64From32(system);
+ case SvcId::SendSyncRequestWithUserBuffer:
+ return SvcWrap_SendSyncRequestWithUserBuffer64From32(system);
+ case SvcId::SendAsyncRequestWithUserBuffer:
+ return SvcWrap_SendAsyncRequestWithUserBuffer64From32(system);
+ case SvcId::GetProcessId:
+ return SvcWrap_GetProcessId64From32(system);
+ case SvcId::GetThreadId:
+ return SvcWrap_GetThreadId64From32(system);
+ case SvcId::Break:
+ return SvcWrap_Break64From32(system);
+ case SvcId::OutputDebugString:
+ return SvcWrap_OutputDebugString64From32(system);
+ case SvcId::ReturnFromException:
+ return SvcWrap_ReturnFromException64From32(system);
+ case SvcId::GetInfo:
+ return SvcWrap_GetInfo64From32(system);
+ case SvcId::FlushEntireDataCache:
+ return SvcWrap_FlushEntireDataCache64From32(system);
+ case SvcId::FlushDataCache:
+ return SvcWrap_FlushDataCache64From32(system);
+ case SvcId::MapPhysicalMemory:
+ return SvcWrap_MapPhysicalMemory64From32(system);
+ case SvcId::UnmapPhysicalMemory:
+ return SvcWrap_UnmapPhysicalMemory64From32(system);
+ case SvcId::GetDebugFutureThreadInfo:
+ return SvcWrap_GetDebugFutureThreadInfo64From32(system);
+ case SvcId::GetLastThreadInfo:
+ return SvcWrap_GetLastThreadInfo64From32(system);
+ case SvcId::GetResourceLimitLimitValue:
+ return SvcWrap_GetResourceLimitLimitValue64From32(system);
+ case SvcId::GetResourceLimitCurrentValue:
+ return SvcWrap_GetResourceLimitCurrentValue64From32(system);
+ case SvcId::SetThreadActivity:
+ return SvcWrap_SetThreadActivity64From32(system);
+ case SvcId::GetThreadContext3:
+ return SvcWrap_GetThreadContext364From32(system);
+ case SvcId::WaitForAddress:
+ return SvcWrap_WaitForAddress64From32(system);
+ case SvcId::SignalToAddress:
+ return SvcWrap_SignalToAddress64From32(system);
+ case SvcId::SynchronizePreemptionState:
+ return SvcWrap_SynchronizePreemptionState64From32(system);
+ case SvcId::GetResourceLimitPeakValue:
+ return SvcWrap_GetResourceLimitPeakValue64From32(system);
+ case SvcId::CreateIoPool:
+ return SvcWrap_CreateIoPool64From32(system);
+ case SvcId::CreateIoRegion:
+ return SvcWrap_CreateIoRegion64From32(system);
+ case SvcId::KernelDebug:
+ return SvcWrap_KernelDebug64From32(system);
+ case SvcId::ChangeKernelTraceState:
+ return SvcWrap_ChangeKernelTraceState64From32(system);
+ case SvcId::CreateSession:
+ return SvcWrap_CreateSession64From32(system);
+ case SvcId::AcceptSession:
+ return SvcWrap_AcceptSession64From32(system);
+ case SvcId::ReplyAndReceiveLight:
+ return SvcWrap_ReplyAndReceiveLight64From32(system);
+ case SvcId::ReplyAndReceive:
+ return SvcWrap_ReplyAndReceive64From32(system);
+ case SvcId::ReplyAndReceiveWithUserBuffer:
+ return SvcWrap_ReplyAndReceiveWithUserBuffer64From32(system);
+ case SvcId::CreateEvent:
+ return SvcWrap_CreateEvent64From32(system);
+ case SvcId::MapIoRegion:
+ return SvcWrap_MapIoRegion64From32(system);
+ case SvcId::UnmapIoRegion:
+ return SvcWrap_UnmapIoRegion64From32(system);
+ case SvcId::MapPhysicalMemoryUnsafe:
+ return SvcWrap_MapPhysicalMemoryUnsafe64From32(system);
+ case SvcId::UnmapPhysicalMemoryUnsafe:
+ return SvcWrap_UnmapPhysicalMemoryUnsafe64From32(system);
+ case SvcId::SetUnsafeLimit:
+ return SvcWrap_SetUnsafeLimit64From32(system);
+ case SvcId::CreateCodeMemory:
+ return SvcWrap_CreateCodeMemory64From32(system);
+ case SvcId::ControlCodeMemory:
+ return SvcWrap_ControlCodeMemory64From32(system);
+ case SvcId::SleepSystem:
+ return SvcWrap_SleepSystem64From32(system);
+ case SvcId::ReadWriteRegister:
+ return SvcWrap_ReadWriteRegister64From32(system);
+ case SvcId::SetProcessActivity:
+ return SvcWrap_SetProcessActivity64From32(system);
+ case SvcId::CreateSharedMemory:
+ return SvcWrap_CreateSharedMemory64From32(system);
+ case SvcId::MapTransferMemory:
+ return SvcWrap_MapTransferMemory64From32(system);
+ case SvcId::UnmapTransferMemory:
+ return SvcWrap_UnmapTransferMemory64From32(system);
+ case SvcId::CreateInterruptEvent:
+ return SvcWrap_CreateInterruptEvent64From32(system);
+ case SvcId::QueryPhysicalAddress:
+ return SvcWrap_QueryPhysicalAddress64From32(system);
+ case SvcId::QueryIoMapping:
+ return SvcWrap_QueryIoMapping64From32(system);
+ case SvcId::CreateDeviceAddressSpace:
+ return SvcWrap_CreateDeviceAddressSpace64From32(system);
+ case SvcId::AttachDeviceAddressSpace:
+ return SvcWrap_AttachDeviceAddressSpace64From32(system);
+ case SvcId::DetachDeviceAddressSpace:
+ return SvcWrap_DetachDeviceAddressSpace64From32(system);
+ case SvcId::MapDeviceAddressSpaceByForce:
+ return SvcWrap_MapDeviceAddressSpaceByForce64From32(system);
+ case SvcId::MapDeviceAddressSpaceAligned:
+ return SvcWrap_MapDeviceAddressSpaceAligned64From32(system);
+ case SvcId::UnmapDeviceAddressSpace:
+ return SvcWrap_UnmapDeviceAddressSpace64From32(system);
+ case SvcId::InvalidateProcessDataCache:
+ return SvcWrap_InvalidateProcessDataCache64From32(system);
+ case SvcId::StoreProcessDataCache:
+ return SvcWrap_StoreProcessDataCache64From32(system);
+ case SvcId::FlushProcessDataCache:
+ return SvcWrap_FlushProcessDataCache64From32(system);
+ case SvcId::DebugActiveProcess:
+ return SvcWrap_DebugActiveProcess64From32(system);
+ case SvcId::BreakDebugProcess:
+ return SvcWrap_BreakDebugProcess64From32(system);
+ case SvcId::TerminateDebugProcess:
+ return SvcWrap_TerminateDebugProcess64From32(system);
+ case SvcId::GetDebugEvent:
+ return SvcWrap_GetDebugEvent64From32(system);
+ case SvcId::ContinueDebugEvent:
+ return SvcWrap_ContinueDebugEvent64From32(system);
+ case SvcId::GetProcessList:
+ return SvcWrap_GetProcessList64From32(system);
+ case SvcId::GetThreadList:
+ return SvcWrap_GetThreadList64From32(system);
+ case SvcId::GetDebugThreadContext:
+ return SvcWrap_GetDebugThreadContext64From32(system);
+ case SvcId::SetDebugThreadContext:
+ return SvcWrap_SetDebugThreadContext64From32(system);
+ case SvcId::QueryDebugProcessMemory:
+ return SvcWrap_QueryDebugProcessMemory64From32(system);
+ case SvcId::ReadDebugProcessMemory:
+ return SvcWrap_ReadDebugProcessMemory64From32(system);
+ case SvcId::WriteDebugProcessMemory:
+ return SvcWrap_WriteDebugProcessMemory64From32(system);
+ case SvcId::SetHardwareBreakPoint:
+ return SvcWrap_SetHardwareBreakPoint64From32(system);
+ case SvcId::GetDebugThreadParam:
+ return SvcWrap_GetDebugThreadParam64From32(system);
+ case SvcId::GetSystemInfo:
+ return SvcWrap_GetSystemInfo64From32(system);
+ case SvcId::CreatePort:
+ return SvcWrap_CreatePort64From32(system);
+ case SvcId::ManageNamedPort:
+ return SvcWrap_ManageNamedPort64From32(system);
+ case SvcId::ConnectToPort:
+ return SvcWrap_ConnectToPort64From32(system);
+ case SvcId::SetProcessMemoryPermission:
+ return SvcWrap_SetProcessMemoryPermission64From32(system);
+ case SvcId::MapProcessMemory:
+ return SvcWrap_MapProcessMemory64From32(system);
+ case SvcId::UnmapProcessMemory:
+ return SvcWrap_UnmapProcessMemory64From32(system);
+ case SvcId::QueryProcessMemory:
+ return SvcWrap_QueryProcessMemory64From32(system);
+ case SvcId::MapProcessCodeMemory:
+ return SvcWrap_MapProcessCodeMemory64From32(system);
+ case SvcId::UnmapProcessCodeMemory:
+ return SvcWrap_UnmapProcessCodeMemory64From32(system);
+ case SvcId::CreateProcess:
+ return SvcWrap_CreateProcess64From32(system);
+ case SvcId::StartProcess:
+ return SvcWrap_StartProcess64From32(system);
+ case SvcId::TerminateProcess:
+ return SvcWrap_TerminateProcess64From32(system);
+ case SvcId::GetProcessInfo:
+ return SvcWrap_GetProcessInfo64From32(system);
+ case SvcId::CreateResourceLimit:
+ return SvcWrap_CreateResourceLimit64From32(system);
+ case SvcId::SetResourceLimitLimitValue:
+ return SvcWrap_SetResourceLimitLimitValue64From32(system);
+ case SvcId::CallSecureMonitor:
+ return SvcWrap_CallSecureMonitor64From32(system);
+ case SvcId::MapInsecureMemory:
+ return SvcWrap_MapInsecureMemory64From32(system);
+ case SvcId::UnmapInsecureMemory:
+ return SvcWrap_UnmapInsecureMemory64From32(system);
+ default:
+ LOG_CRITICAL(Kernel_SVC, "Unknown SVC {:x}!", imm);
+ break;
}
- return &SVC_Table_32[func_num];
}
-static const FunctionDef* GetSVCInfo64(u32 func_num) {
- if (func_num >= std::size(SVC_Table_64)) {
- LOG_ERROR(Kernel_SVC, "Unknown svc=0x{:02X}", func_num);
- return nullptr;
+static void Call64(Core::System& system, u32 imm) {
+ switch (static_cast<SvcId>(imm)) {
+ case SvcId::SetHeapSize:
+ return SvcWrap_SetHeapSize64(system);
+ case SvcId::SetMemoryPermission:
+ return SvcWrap_SetMemoryPermission64(system);
+ case SvcId::SetMemoryAttribute:
+ return SvcWrap_SetMemoryAttribute64(system);
+ case SvcId::MapMemory:
+ return SvcWrap_MapMemory64(system);
+ case SvcId::UnmapMemory:
+ return SvcWrap_UnmapMemory64(system);
+ case SvcId::QueryMemory:
+ return SvcWrap_QueryMemory64(system);
+ case SvcId::ExitProcess:
+ return SvcWrap_ExitProcess64(system);
+ case SvcId::CreateThread:
+ return SvcWrap_CreateThread64(system);
+ case SvcId::StartThread:
+ return SvcWrap_StartThread64(system);
+ case SvcId::ExitThread:
+ return SvcWrap_ExitThread64(system);
+ case SvcId::SleepThread:
+ return SvcWrap_SleepThread64(system);
+ case SvcId::GetThreadPriority:
+ return SvcWrap_GetThreadPriority64(system);
+ case SvcId::SetThreadPriority:
+ return SvcWrap_SetThreadPriority64(system);
+ case SvcId::GetThreadCoreMask:
+ return SvcWrap_GetThreadCoreMask64(system);
+ case SvcId::SetThreadCoreMask:
+ return SvcWrap_SetThreadCoreMask64(system);
+ case SvcId::GetCurrentProcessorNumber:
+ return SvcWrap_GetCurrentProcessorNumber64(system);
+ case SvcId::SignalEvent:
+ return SvcWrap_SignalEvent64(system);
+ case SvcId::ClearEvent:
+ return SvcWrap_ClearEvent64(system);
+ case SvcId::MapSharedMemory:
+ return SvcWrap_MapSharedMemory64(system);
+ case SvcId::UnmapSharedMemory:
+ return SvcWrap_UnmapSharedMemory64(system);
+ case SvcId::CreateTransferMemory:
+ return SvcWrap_CreateTransferMemory64(system);
+ case SvcId::CloseHandle:
+ return SvcWrap_CloseHandle64(system);
+ case SvcId::ResetSignal:
+ return SvcWrap_ResetSignal64(system);
+ case SvcId::WaitSynchronization:
+ return SvcWrap_WaitSynchronization64(system);
+ case SvcId::CancelSynchronization:
+ return SvcWrap_CancelSynchronization64(system);
+ case SvcId::ArbitrateLock:
+ return SvcWrap_ArbitrateLock64(system);
+ case SvcId::ArbitrateUnlock:
+ return SvcWrap_ArbitrateUnlock64(system);
+ case SvcId::WaitProcessWideKeyAtomic:
+ return SvcWrap_WaitProcessWideKeyAtomic64(system);
+ case SvcId::SignalProcessWideKey:
+ return SvcWrap_SignalProcessWideKey64(system);
+ case SvcId::GetSystemTick:
+ return SvcWrap_GetSystemTick64(system);
+ case SvcId::ConnectToNamedPort:
+ return SvcWrap_ConnectToNamedPort64(system);
+ case SvcId::SendSyncRequestLight:
+ return SvcWrap_SendSyncRequestLight64(system);
+ case SvcId::SendSyncRequest:
+ return SvcWrap_SendSyncRequest64(system);
+ case SvcId::SendSyncRequestWithUserBuffer:
+ return SvcWrap_SendSyncRequestWithUserBuffer64(system);
+ case SvcId::SendAsyncRequestWithUserBuffer:
+ return SvcWrap_SendAsyncRequestWithUserBuffer64(system);
+ case SvcId::GetProcessId:
+ return SvcWrap_GetProcessId64(system);
+ case SvcId::GetThreadId:
+ return SvcWrap_GetThreadId64(system);
+ case SvcId::Break:
+ return SvcWrap_Break64(system);
+ case SvcId::OutputDebugString:
+ return SvcWrap_OutputDebugString64(system);
+ case SvcId::ReturnFromException:
+ return SvcWrap_ReturnFromException64(system);
+ case SvcId::GetInfo:
+ return SvcWrap_GetInfo64(system);
+ case SvcId::FlushEntireDataCache:
+ return SvcWrap_FlushEntireDataCache64(system);
+ case SvcId::FlushDataCache:
+ return SvcWrap_FlushDataCache64(system);
+ case SvcId::MapPhysicalMemory:
+ return SvcWrap_MapPhysicalMemory64(system);
+ case SvcId::UnmapPhysicalMemory:
+ return SvcWrap_UnmapPhysicalMemory64(system);
+ case SvcId::GetDebugFutureThreadInfo:
+ return SvcWrap_GetDebugFutureThreadInfo64(system);
+ case SvcId::GetLastThreadInfo:
+ return SvcWrap_GetLastThreadInfo64(system);
+ case SvcId::GetResourceLimitLimitValue:
+ return SvcWrap_GetResourceLimitLimitValue64(system);
+ case SvcId::GetResourceLimitCurrentValue:
+ return SvcWrap_GetResourceLimitCurrentValue64(system);
+ case SvcId::SetThreadActivity:
+ return SvcWrap_SetThreadActivity64(system);
+ case SvcId::GetThreadContext3:
+ return SvcWrap_GetThreadContext364(system);
+ case SvcId::WaitForAddress:
+ return SvcWrap_WaitForAddress64(system);
+ case SvcId::SignalToAddress:
+ return SvcWrap_SignalToAddress64(system);
+ case SvcId::SynchronizePreemptionState:
+ return SvcWrap_SynchronizePreemptionState64(system);
+ case SvcId::GetResourceLimitPeakValue:
+ return SvcWrap_GetResourceLimitPeakValue64(system);
+ case SvcId::CreateIoPool:
+ return SvcWrap_CreateIoPool64(system);
+ case SvcId::CreateIoRegion:
+ return SvcWrap_CreateIoRegion64(system);
+ case SvcId::KernelDebug:
+ return SvcWrap_KernelDebug64(system);
+ case SvcId::ChangeKernelTraceState:
+ return SvcWrap_ChangeKernelTraceState64(system);
+ case SvcId::CreateSession:
+ return SvcWrap_CreateSession64(system);
+ case SvcId::AcceptSession:
+ return SvcWrap_AcceptSession64(system);
+ case SvcId::ReplyAndReceiveLight:
+ return SvcWrap_ReplyAndReceiveLight64(system);
+ case SvcId::ReplyAndReceive:
+ return SvcWrap_ReplyAndReceive64(system);
+ case SvcId::ReplyAndReceiveWithUserBuffer:
+ return SvcWrap_ReplyAndReceiveWithUserBuffer64(system);
+ case SvcId::CreateEvent:
+ return SvcWrap_CreateEvent64(system);
+ case SvcId::MapIoRegion:
+ return SvcWrap_MapIoRegion64(system);
+ case SvcId::UnmapIoRegion:
+ return SvcWrap_UnmapIoRegion64(system);
+ case SvcId::MapPhysicalMemoryUnsafe:
+ return SvcWrap_MapPhysicalMemoryUnsafe64(system);
+ case SvcId::UnmapPhysicalMemoryUnsafe:
+ return SvcWrap_UnmapPhysicalMemoryUnsafe64(system);
+ case SvcId::SetUnsafeLimit:
+ return SvcWrap_SetUnsafeLimit64(system);
+ case SvcId::CreateCodeMemory:
+ return SvcWrap_CreateCodeMemory64(system);
+ case SvcId::ControlCodeMemory:
+ return SvcWrap_ControlCodeMemory64(system);
+ case SvcId::SleepSystem:
+ return SvcWrap_SleepSystem64(system);
+ case SvcId::ReadWriteRegister:
+ return SvcWrap_ReadWriteRegister64(system);
+ case SvcId::SetProcessActivity:
+ return SvcWrap_SetProcessActivity64(system);
+ case SvcId::CreateSharedMemory:
+ return SvcWrap_CreateSharedMemory64(system);
+ case SvcId::MapTransferMemory:
+ return SvcWrap_MapTransferMemory64(system);
+ case SvcId::UnmapTransferMemory:
+ return SvcWrap_UnmapTransferMemory64(system);
+ case SvcId::CreateInterruptEvent:
+ return SvcWrap_CreateInterruptEvent64(system);
+ case SvcId::QueryPhysicalAddress:
+ return SvcWrap_QueryPhysicalAddress64(system);
+ case SvcId::QueryIoMapping:
+ return SvcWrap_QueryIoMapping64(system);
+ case SvcId::CreateDeviceAddressSpace:
+ return SvcWrap_CreateDeviceAddressSpace64(system);
+ case SvcId::AttachDeviceAddressSpace:
+ return SvcWrap_AttachDeviceAddressSpace64(system);
+ case SvcId::DetachDeviceAddressSpace:
+ return SvcWrap_DetachDeviceAddressSpace64(system);
+ case SvcId::MapDeviceAddressSpaceByForce:
+ return SvcWrap_MapDeviceAddressSpaceByForce64(system);
+ case SvcId::MapDeviceAddressSpaceAligned:
+ return SvcWrap_MapDeviceAddressSpaceAligned64(system);
+ case SvcId::UnmapDeviceAddressSpace:
+ return SvcWrap_UnmapDeviceAddressSpace64(system);
+ case SvcId::InvalidateProcessDataCache:
+ return SvcWrap_InvalidateProcessDataCache64(system);
+ case SvcId::StoreProcessDataCache:
+ return SvcWrap_StoreProcessDataCache64(system);
+ case SvcId::FlushProcessDataCache:
+ return SvcWrap_FlushProcessDataCache64(system);
+ case SvcId::DebugActiveProcess:
+ return SvcWrap_DebugActiveProcess64(system);
+ case SvcId::BreakDebugProcess:
+ return SvcWrap_BreakDebugProcess64(system);
+ case SvcId::TerminateDebugProcess:
+ return SvcWrap_TerminateDebugProcess64(system);
+ case SvcId::GetDebugEvent:
+ return SvcWrap_GetDebugEvent64(system);
+ case SvcId::ContinueDebugEvent:
+ return SvcWrap_ContinueDebugEvent64(system);
+ case SvcId::GetProcessList:
+ return SvcWrap_GetProcessList64(system);
+ case SvcId::GetThreadList:
+ return SvcWrap_GetThreadList64(system);
+ case SvcId::GetDebugThreadContext:
+ return SvcWrap_GetDebugThreadContext64(system);
+ case SvcId::SetDebugThreadContext:
+ return SvcWrap_SetDebugThreadContext64(system);
+ case SvcId::QueryDebugProcessMemory:
+ return SvcWrap_QueryDebugProcessMemory64(system);
+ case SvcId::ReadDebugProcessMemory:
+ return SvcWrap_ReadDebugProcessMemory64(system);
+ case SvcId::WriteDebugProcessMemory:
+ return SvcWrap_WriteDebugProcessMemory64(system);
+ case SvcId::SetHardwareBreakPoint:
+ return SvcWrap_SetHardwareBreakPoint64(system);
+ case SvcId::GetDebugThreadParam:
+ return SvcWrap_GetDebugThreadParam64(system);
+ case SvcId::GetSystemInfo:
+ return SvcWrap_GetSystemInfo64(system);
+ case SvcId::CreatePort:
+ return SvcWrap_CreatePort64(system);
+ case SvcId::ManageNamedPort:
+ return SvcWrap_ManageNamedPort64(system);
+ case SvcId::ConnectToPort:
+ return SvcWrap_ConnectToPort64(system);
+ case SvcId::SetProcessMemoryPermission:
+ return SvcWrap_SetProcessMemoryPermission64(system);
+ case SvcId::MapProcessMemory:
+ return SvcWrap_MapProcessMemory64(system);
+ case SvcId::UnmapProcessMemory:
+ return SvcWrap_UnmapProcessMemory64(system);
+ case SvcId::QueryProcessMemory:
+ return SvcWrap_QueryProcessMemory64(system);
+ case SvcId::MapProcessCodeMemory:
+ return SvcWrap_MapProcessCodeMemory64(system);
+ case SvcId::UnmapProcessCodeMemory:
+ return SvcWrap_UnmapProcessCodeMemory64(system);
+ case SvcId::CreateProcess:
+ return SvcWrap_CreateProcess64(system);
+ case SvcId::StartProcess:
+ return SvcWrap_StartProcess64(system);
+ case SvcId::TerminateProcess:
+ return SvcWrap_TerminateProcess64(system);
+ case SvcId::GetProcessInfo:
+ return SvcWrap_GetProcessInfo64(system);
+ case SvcId::CreateResourceLimit:
+ return SvcWrap_CreateResourceLimit64(system);
+ case SvcId::SetResourceLimitLimitValue:
+ return SvcWrap_SetResourceLimitLimitValue64(system);
+ case SvcId::CallSecureMonitor:
+ return SvcWrap_CallSecureMonitor64(system);
+ case SvcId::MapInsecureMemory:
+ return SvcWrap_MapInsecureMemory64(system);
+ case SvcId::UnmapInsecureMemory:
+ return SvcWrap_UnmapInsecureMemory64(system);
+ default:
+ LOG_CRITICAL(Kernel_SVC, "Unknown SVC {:x}!", imm);
+ break;
}
- return &SVC_Table_64[func_num];
}
+// clang-format on
-void Call(Core::System& system, u32 immediate) {
+void Call(Core::System& system, u32 imm) {
auto& kernel = system.Kernel();
kernel.EnterSVCProfile();
- auto* thread = GetCurrentThreadPointer(kernel);
- thread->SetIsCallingSvc();
-
- const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
- : GetSVCInfo32(immediate);
- if (info) {
- if (info->func) {
- info->func(system);
- } else {
- LOG_CRITICAL(Kernel_SVC, "Unimplemented SVC function {}(..)", info->name);
- }
+ if (GetCurrentProcess(system.Kernel()).Is64BitProcess()) {
+ Call64(system, imm);
} else {
- LOG_CRITICAL(Kernel_SVC, "Unknown SVC function 0x{:X}", immediate);
+ Call32(system, imm);
}
kernel.ExitSVCProfile();
diff --git a/src/core/hle/kernel/svc.h b/src/core/hle/kernel/svc.h
index 13f061b83..ac4696008 100644
--- a/src/core/hle/kernel/svc.h
+++ b/src/core/hle/kernel/svc.h
@@ -1,16 +1,536 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
-#pragma once
+// This file is automatically generated using svc_generator.py.
-#include "common/common_types.h"
+#pragma once
namespace Core {
class System;
}
+#include "common/common_types.h"
+#include "core/hle/kernel/svc_types.h"
+#include "core/hle/result.h"
+
namespace Kernel::Svc {
-void Call(Core::System& system, u32 immediate);
+// clang-format off
+Result SetHeapSize(Core::System& system, uint64_t* out_address, uint64_t size);
+Result SetMemoryPermission(Core::System& system, uint64_t address, uint64_t size, MemoryPermission perm);
+Result SetMemoryAttribute(Core::System& system, uint64_t address, uint64_t size, uint32_t mask, uint32_t attr);
+Result MapMemory(Core::System& system, uint64_t dst_address, uint64_t src_address, uint64_t size);
+Result UnmapMemory(Core::System& system, uint64_t dst_address, uint64_t src_address, uint64_t size);
+Result QueryMemory(Core::System& system, uint64_t out_memory_info, PageInfo* out_page_info, uint64_t address);
+void ExitProcess(Core::System& system);
+Result CreateThread(Core::System& system, Handle* out_handle, uint64_t func, uint64_t arg, uint64_t stack_bottom, int32_t priority, int32_t core_id);
+Result StartThread(Core::System& system, Handle thread_handle);
+void ExitThread(Core::System& system);
+void SleepThread(Core::System& system, int64_t ns);
+Result GetThreadPriority(Core::System& system, int32_t* out_priority, Handle thread_handle);
+Result SetThreadPriority(Core::System& system, Handle thread_handle, int32_t priority);
+Result GetThreadCoreMask(Core::System& system, int32_t* out_core_id, uint64_t* out_affinity_mask, Handle thread_handle);
+Result SetThreadCoreMask(Core::System& system, Handle thread_handle, int32_t core_id, uint64_t affinity_mask);
+int32_t GetCurrentProcessorNumber(Core::System& system);
+Result SignalEvent(Core::System& system, Handle event_handle);
+Result ClearEvent(Core::System& system, Handle event_handle);
+Result MapSharedMemory(Core::System& system, Handle shmem_handle, uint64_t address, uint64_t size, MemoryPermission map_perm);
+Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, uint64_t address, uint64_t size);
+Result CreateTransferMemory(Core::System& system, Handle* out_handle, uint64_t address, uint64_t size, MemoryPermission map_perm);
+Result CloseHandle(Core::System& system, Handle handle);
+Result ResetSignal(Core::System& system, Handle handle);
+Result WaitSynchronization(Core::System& system, int32_t* out_index, uint64_t handles, int32_t num_handles, int64_t timeout_ns);
+Result CancelSynchronization(Core::System& system, Handle handle);
+Result ArbitrateLock(Core::System& system, Handle thread_handle, uint64_t address, uint32_t tag);
+Result ArbitrateUnlock(Core::System& system, uint64_t address);
+Result WaitProcessWideKeyAtomic(Core::System& system, uint64_t address, uint64_t cv_key, uint32_t tag, int64_t timeout_ns);
+void SignalProcessWideKey(Core::System& system, uint64_t cv_key, int32_t count);
+int64_t GetSystemTick(Core::System& system);
+Result ConnectToNamedPort(Core::System& system, Handle* out_handle, uint64_t name);
+Result SendSyncRequest(Core::System& system, Handle session_handle);
+Result SendSyncRequestWithUserBuffer(Core::System& system, uint64_t message_buffer, uint64_t message_buffer_size, Handle session_handle);
+Result SendAsyncRequestWithUserBuffer(Core::System& system, Handle* out_event_handle, uint64_t message_buffer, uint64_t message_buffer_size, Handle session_handle);
+Result GetProcessId(Core::System& system, uint64_t* out_process_id, Handle process_handle);
+Result GetThreadId(Core::System& system, uint64_t* out_thread_id, Handle thread_handle);
+void Break(Core::System& system, BreakReason break_reason, uint64_t arg, uint64_t size);
+Result OutputDebugString(Core::System& system, uint64_t debug_str, uint64_t len);
+void ReturnFromException(Core::System& system, Result result);
+Result GetInfo(Core::System& system, uint64_t* out, InfoType info_type, Handle handle, uint64_t info_subtype);
+void FlushEntireDataCache(Core::System& system);
+Result FlushDataCache(Core::System& system, uint64_t address, uint64_t size);
+Result MapPhysicalMemory(Core::System& system, uint64_t address, uint64_t size);
+Result UnmapPhysicalMemory(Core::System& system, uint64_t address, uint64_t size);
+Result GetDebugFutureThreadInfo(Core::System& system, lp64::LastThreadContext* out_context, uint64_t* out_thread_id, Handle debug_handle, int64_t ns);
+Result GetLastThreadInfo(Core::System& system, lp64::LastThreadContext* out_context, uint64_t* out_tls_address, uint32_t* out_flags);
+Result GetResourceLimitLimitValue(Core::System& system, int64_t* out_limit_value, Handle resource_limit_handle, LimitableResource which);
+Result GetResourceLimitCurrentValue(Core::System& system, int64_t* out_current_value, Handle resource_limit_handle, LimitableResource which);
+Result SetThreadActivity(Core::System& system, Handle thread_handle, ThreadActivity thread_activity);
+Result GetThreadContext3(Core::System& system, uint64_t out_context, Handle thread_handle);
+Result WaitForAddress(Core::System& system, uint64_t address, ArbitrationType arb_type, int32_t value, int64_t timeout_ns);
+Result SignalToAddress(Core::System& system, uint64_t address, SignalType signal_type, int32_t value, int32_t count);
+void SynchronizePreemptionState(Core::System& system);
+Result GetResourceLimitPeakValue(Core::System& system, int64_t* out_peak_value, Handle resource_limit_handle, LimitableResource which);
+Result CreateIoPool(Core::System& system, Handle* out_handle, IoPoolType which);
+Result CreateIoRegion(Core::System& system, Handle* out_handle, Handle io_pool, uint64_t physical_address, uint64_t size, MemoryMapping mapping, MemoryPermission perm);
+void KernelDebug(Core::System& system, KernelDebugType kern_debug_type, uint64_t arg0, uint64_t arg1, uint64_t arg2);
+void ChangeKernelTraceState(Core::System& system, KernelTraceState kern_trace_state);
+Result CreateSession(Core::System& system, Handle* out_server_session_handle, Handle* out_client_session_handle, bool is_light, uint64_t name);
+Result AcceptSession(Core::System& system, Handle* out_handle, Handle port);
+Result ReplyAndReceive(Core::System& system, int32_t* out_index, uint64_t handles, int32_t num_handles, Handle reply_target, int64_t timeout_ns);
+Result ReplyAndReceiveWithUserBuffer(Core::System& system, int32_t* out_index, uint64_t message_buffer, uint64_t message_buffer_size, uint64_t handles, int32_t num_handles, Handle reply_target, int64_t timeout_ns);
+Result CreateEvent(Core::System& system, Handle* out_write_handle, Handle* out_read_handle);
+Result MapIoRegion(Core::System& system, Handle io_region, uint64_t address, uint64_t size, MemoryPermission perm);
+Result UnmapIoRegion(Core::System& system, Handle io_region, uint64_t address, uint64_t size);
+Result MapPhysicalMemoryUnsafe(Core::System& system, uint64_t address, uint64_t size);
+Result UnmapPhysicalMemoryUnsafe(Core::System& system, uint64_t address, uint64_t size);
+Result SetUnsafeLimit(Core::System& system, uint64_t limit);
+Result CreateCodeMemory(Core::System& system, Handle* out_handle, uint64_t address, uint64_t size);
+Result ControlCodeMemory(Core::System& system, Handle code_memory_handle, CodeMemoryOperation operation, uint64_t address, uint64_t size, MemoryPermission perm);
+void SleepSystem(Core::System& system);
+Result ReadWriteRegister(Core::System& system, uint32_t* out_value, uint64_t address, uint32_t mask, uint32_t value);
+Result SetProcessActivity(Core::System& system, Handle process_handle, ProcessActivity process_activity);
+Result CreateSharedMemory(Core::System& system, Handle* out_handle, uint64_t size, MemoryPermission owner_perm, MemoryPermission remote_perm);
+Result MapTransferMemory(Core::System& system, Handle trmem_handle, uint64_t address, uint64_t size, MemoryPermission owner_perm);
+Result UnmapTransferMemory(Core::System& system, Handle trmem_handle, uint64_t address, uint64_t size);
+Result CreateInterruptEvent(Core::System& system, Handle* out_read_handle, int32_t interrupt_id, InterruptType interrupt_type);
+Result QueryPhysicalAddress(Core::System& system, lp64::PhysicalMemoryInfo* out_info, uint64_t address);
+Result QueryIoMapping(Core::System& system, uint64_t* out_address, uint64_t* out_size, uint64_t physical_address, uint64_t size);
+Result CreateDeviceAddressSpace(Core::System& system, Handle* out_handle, uint64_t das_address, uint64_t das_size);
+Result AttachDeviceAddressSpace(Core::System& system, DeviceName device_name, Handle das_handle);
+Result DetachDeviceAddressSpace(Core::System& system, DeviceName device_name, Handle das_handle);
+Result MapDeviceAddressSpaceByForce(Core::System& system, Handle das_handle, Handle process_handle, uint64_t process_address, uint64_t size, uint64_t device_address, uint32_t option);
+Result MapDeviceAddressSpaceAligned(Core::System& system, Handle das_handle, Handle process_handle, uint64_t process_address, uint64_t size, uint64_t device_address, uint32_t option);
+Result UnmapDeviceAddressSpace(Core::System& system, Handle das_handle, Handle process_handle, uint64_t process_address, uint64_t size, uint64_t device_address);
+Result InvalidateProcessDataCache(Core::System& system, Handle process_handle, uint64_t address, uint64_t size);
+Result StoreProcessDataCache(Core::System& system, Handle process_handle, uint64_t address, uint64_t size);
+Result FlushProcessDataCache(Core::System& system, Handle process_handle, uint64_t address, uint64_t size);
+Result DebugActiveProcess(Core::System& system, Handle* out_handle, uint64_t process_id);
+Result BreakDebugProcess(Core::System& system, Handle debug_handle);
+Result TerminateDebugProcess(Core::System& system, Handle debug_handle);
+Result GetDebugEvent(Core::System& system, uint64_t out_info, Handle debug_handle);
+Result ContinueDebugEvent(Core::System& system, Handle debug_handle, uint32_t flags, uint64_t thread_ids, int32_t num_thread_ids);
+Result GetProcessList(Core::System& system, int32_t* out_num_processes, uint64_t out_process_ids, int32_t max_out_count);
+Result GetThreadList(Core::System& system, int32_t* out_num_threads, uint64_t out_thread_ids, int32_t max_out_count, Handle debug_handle);
+Result GetDebugThreadContext(Core::System& system, uint64_t out_context, Handle debug_handle, uint64_t thread_id, uint32_t context_flags);
+Result SetDebugThreadContext(Core::System& system, Handle debug_handle, uint64_t thread_id, uint64_t context, uint32_t context_flags);
+Result QueryDebugProcessMemory(Core::System& system, uint64_t out_memory_info, PageInfo* out_page_info, Handle process_handle, uint64_t address);
+Result ReadDebugProcessMemory(Core::System& system, uint64_t buffer, Handle debug_handle, uint64_t address, uint64_t size);
+Result WriteDebugProcessMemory(Core::System& system, Handle debug_handle, uint64_t buffer, uint64_t address, uint64_t size);
+Result SetHardwareBreakPoint(Core::System& system, HardwareBreakPointRegisterName name, uint64_t flags, uint64_t value);
+Result GetDebugThreadParam(Core::System& system, uint64_t* out_64, uint32_t* out_32, Handle debug_handle, uint64_t thread_id, DebugThreadParam param);
+Result GetSystemInfo(Core::System& system, uint64_t* out, SystemInfoType info_type, Handle handle, uint64_t info_subtype);
+Result CreatePort(Core::System& system, Handle* out_server_handle, Handle* out_client_handle, int32_t max_sessions, bool is_light, uint64_t name);
+Result ManageNamedPort(Core::System& system, Handle* out_server_handle, uint64_t name, int32_t max_sessions);
+Result ConnectToPort(Core::System& system, Handle* out_handle, Handle port);
+Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, uint64_t address, uint64_t size, MemoryPermission perm);
+Result MapProcessMemory(Core::System& system, uint64_t dst_address, Handle process_handle, uint64_t src_address, uint64_t size);
+Result UnmapProcessMemory(Core::System& system, uint64_t dst_address, Handle process_handle, uint64_t src_address, uint64_t size);
+Result QueryProcessMemory(Core::System& system, uint64_t out_memory_info, PageInfo* out_page_info, Handle process_handle, uint64_t address);
+Result MapProcessCodeMemory(Core::System& system, Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size);
+Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size);
+Result CreateProcess(Core::System& system, Handle* out_handle, uint64_t parameters, uint64_t caps, int32_t num_caps);
+Result StartProcess(Core::System& system, Handle process_handle, int32_t priority, int32_t core_id, uint64_t main_thread_stack_size);
+Result TerminateProcess(Core::System& system, Handle process_handle);
+Result GetProcessInfo(Core::System& system, int64_t* out_info, Handle process_handle, ProcessInfoType info_type);
+Result CreateResourceLimit(Core::System& system, Handle* out_handle);
+Result SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle, LimitableResource which, int64_t limit_value);
+Result MapInsecureMemory(Core::System& system, uint64_t address, uint64_t size);
+Result UnmapInsecureMemory(Core::System& system, uint64_t address, uint64_t size);
+
+Result SetHeapSize64From32(Core::System& system, uint64_t* out_address, uint32_t size);
+Result SetMemoryPermission64From32(Core::System& system, uint32_t address, uint32_t size, MemoryPermission perm);
+Result SetMemoryAttribute64From32(Core::System& system, uint32_t address, uint32_t size, uint32_t mask, uint32_t attr);
+Result MapMemory64From32(Core::System& system, uint32_t dst_address, uint32_t src_address, uint32_t size);
+Result UnmapMemory64From32(Core::System& system, uint32_t dst_address, uint32_t src_address, uint32_t size);
+Result QueryMemory64From32(Core::System& system, uint32_t out_memory_info, PageInfo* out_page_info, uint32_t address);
+void ExitProcess64From32(Core::System& system);
+Result CreateThread64From32(Core::System& system, Handle* out_handle, uint32_t func, uint32_t arg, uint32_t stack_bottom, int32_t priority, int32_t core_id);
+Result StartThread64From32(Core::System& system, Handle thread_handle);
+void ExitThread64From32(Core::System& system);
+void SleepThread64From32(Core::System& system, int64_t ns);
+Result GetThreadPriority64From32(Core::System& system, int32_t* out_priority, Handle thread_handle);
+Result SetThreadPriority64From32(Core::System& system, Handle thread_handle, int32_t priority);
+Result GetThreadCoreMask64From32(Core::System& system, int32_t* out_core_id, uint64_t* out_affinity_mask, Handle thread_handle);
+Result SetThreadCoreMask64From32(Core::System& system, Handle thread_handle, int32_t core_id, uint64_t affinity_mask);
+int32_t GetCurrentProcessorNumber64From32(Core::System& system);
+Result SignalEvent64From32(Core::System& system, Handle event_handle);
+Result ClearEvent64From32(Core::System& system, Handle event_handle);
+Result MapSharedMemory64From32(Core::System& system, Handle shmem_handle, uint32_t address, uint32_t size, MemoryPermission map_perm);
+Result UnmapSharedMemory64From32(Core::System& system, Handle shmem_handle, uint32_t address, uint32_t size);
+Result CreateTransferMemory64From32(Core::System& system, Handle* out_handle, uint32_t address, uint32_t size, MemoryPermission map_perm);
+Result CloseHandle64From32(Core::System& system, Handle handle);
+Result ResetSignal64From32(Core::System& system, Handle handle);
+Result WaitSynchronization64From32(Core::System& system, int32_t* out_index, uint32_t handles, int32_t num_handles, int64_t timeout_ns);
+Result CancelSynchronization64From32(Core::System& system, Handle handle);
+Result ArbitrateLock64From32(Core::System& system, Handle thread_handle, uint32_t address, uint32_t tag);
+Result ArbitrateUnlock64From32(Core::System& system, uint32_t address);
+Result WaitProcessWideKeyAtomic64From32(Core::System& system, uint32_t address, uint32_t cv_key, uint32_t tag, int64_t timeout_ns);
+void SignalProcessWideKey64From32(Core::System& system, uint32_t cv_key, int32_t count);
+int64_t GetSystemTick64From32(Core::System& system);
+Result ConnectToNamedPort64From32(Core::System& system, Handle* out_handle, uint32_t name);
+Result SendSyncRequest64From32(Core::System& system, Handle session_handle);
+Result SendSyncRequestWithUserBuffer64From32(Core::System& system, uint32_t message_buffer, uint32_t message_buffer_size, Handle session_handle);
+Result SendAsyncRequestWithUserBuffer64From32(Core::System& system, Handle* out_event_handle, uint32_t message_buffer, uint32_t message_buffer_size, Handle session_handle);
+Result GetProcessId64From32(Core::System& system, uint64_t* out_process_id, Handle process_handle);
+Result GetThreadId64From32(Core::System& system, uint64_t* out_thread_id, Handle thread_handle);
+void Break64From32(Core::System& system, BreakReason break_reason, uint32_t arg, uint32_t size);
+Result OutputDebugString64From32(Core::System& system, uint32_t debug_str, uint32_t len);
+void ReturnFromException64From32(Core::System& system, Result result);
+Result GetInfo64From32(Core::System& system, uint64_t* out, InfoType info_type, Handle handle, uint64_t info_subtype);
+void FlushEntireDataCache64From32(Core::System& system);
+Result FlushDataCache64From32(Core::System& system, uint32_t address, uint32_t size);
+Result MapPhysicalMemory64From32(Core::System& system, uint32_t address, uint32_t size);
+Result UnmapPhysicalMemory64From32(Core::System& system, uint32_t address, uint32_t size);
+Result GetDebugFutureThreadInfo64From32(Core::System& system, ilp32::LastThreadContext* out_context, uint64_t* out_thread_id, Handle debug_handle, int64_t ns);
+Result GetLastThreadInfo64From32(Core::System& system, ilp32::LastThreadContext* out_context, uint64_t* out_tls_address, uint32_t* out_flags);
+Result GetResourceLimitLimitValue64From32(Core::System& system, int64_t* out_limit_value, Handle resource_limit_handle, LimitableResource which);
+Result GetResourceLimitCurrentValue64From32(Core::System& system, int64_t* out_current_value, Handle resource_limit_handle, LimitableResource which);
+Result SetThreadActivity64From32(Core::System& system, Handle thread_handle, ThreadActivity thread_activity);
+Result GetThreadContext364From32(Core::System& system, uint32_t out_context, Handle thread_handle);
+Result WaitForAddress64From32(Core::System& system, uint32_t address, ArbitrationType arb_type, int32_t value, int64_t timeout_ns);
+Result SignalToAddress64From32(Core::System& system, uint32_t address, SignalType signal_type, int32_t value, int32_t count);
+void SynchronizePreemptionState64From32(Core::System& system);
+Result GetResourceLimitPeakValue64From32(Core::System& system, int64_t* out_peak_value, Handle resource_limit_handle, LimitableResource which);
+Result CreateIoPool64From32(Core::System& system, Handle* out_handle, IoPoolType which);
+Result CreateIoRegion64From32(Core::System& system, Handle* out_handle, Handle io_pool, uint64_t physical_address, uint32_t size, MemoryMapping mapping, MemoryPermission perm);
+void KernelDebug64From32(Core::System& system, KernelDebugType kern_debug_type, uint64_t arg0, uint64_t arg1, uint64_t arg2);
+void ChangeKernelTraceState64From32(Core::System& system, KernelTraceState kern_trace_state);
+Result CreateSession64From32(Core::System& system, Handle* out_server_session_handle, Handle* out_client_session_handle, bool is_light, uint32_t name);
+Result AcceptSession64From32(Core::System& system, Handle* out_handle, Handle port);
+Result ReplyAndReceive64From32(Core::System& system, int32_t* out_index, uint32_t handles, int32_t num_handles, Handle reply_target, int64_t timeout_ns);
+Result ReplyAndReceiveWithUserBuffer64From32(Core::System& system, int32_t* out_index, uint32_t message_buffer, uint32_t message_buffer_size, uint32_t handles, int32_t num_handles, Handle reply_target, int64_t timeout_ns);
+Result CreateEvent64From32(Core::System& system, Handle* out_write_handle, Handle* out_read_handle);
+Result MapIoRegion64From32(Core::System& system, Handle io_region, uint32_t address, uint32_t size, MemoryPermission perm);
+Result UnmapIoRegion64From32(Core::System& system, Handle io_region, uint32_t address, uint32_t size);
+Result MapPhysicalMemoryUnsafe64From32(Core::System& system, uint32_t address, uint32_t size);
+Result UnmapPhysicalMemoryUnsafe64From32(Core::System& system, uint32_t address, uint32_t size);
+Result SetUnsafeLimit64From32(Core::System& system, uint32_t limit);
+Result CreateCodeMemory64From32(Core::System& system, Handle* out_handle, uint32_t address, uint32_t size);
+Result ControlCodeMemory64From32(Core::System& system, Handle code_memory_handle, CodeMemoryOperation operation, uint64_t address, uint64_t size, MemoryPermission perm);
+void SleepSystem64From32(Core::System& system);
+Result ReadWriteRegister64From32(Core::System& system, uint32_t* out_value, uint64_t address, uint32_t mask, uint32_t value);
+Result SetProcessActivity64From32(Core::System& system, Handle process_handle, ProcessActivity process_activity);
+Result CreateSharedMemory64From32(Core::System& system, Handle* out_handle, uint32_t size, MemoryPermission owner_perm, MemoryPermission remote_perm);
+Result MapTransferMemory64From32(Core::System& system, Handle trmem_handle, uint32_t address, uint32_t size, MemoryPermission owner_perm);
+Result UnmapTransferMemory64From32(Core::System& system, Handle trmem_handle, uint32_t address, uint32_t size);
+Result CreateInterruptEvent64From32(Core::System& system, Handle* out_read_handle, int32_t interrupt_id, InterruptType interrupt_type);
+Result QueryPhysicalAddress64From32(Core::System& system, ilp32::PhysicalMemoryInfo* out_info, uint32_t address);
+Result QueryIoMapping64From32(Core::System& system, uint64_t* out_address, uint64_t* out_size, uint64_t physical_address, uint32_t size);
+Result CreateDeviceAddressSpace64From32(Core::System& system, Handle* out_handle, uint64_t das_address, uint64_t das_size);
+Result AttachDeviceAddressSpace64From32(Core::System& system, DeviceName device_name, Handle das_handle);
+Result DetachDeviceAddressSpace64From32(Core::System& system, DeviceName device_name, Handle das_handle);
+Result MapDeviceAddressSpaceByForce64From32(Core::System& system, Handle das_handle, Handle process_handle, uint64_t process_address, uint32_t size, uint64_t device_address, uint32_t option);
+Result MapDeviceAddressSpaceAligned64From32(Core::System& system, Handle das_handle, Handle process_handle, uint64_t process_address, uint32_t size, uint64_t device_address, uint32_t option);
+Result UnmapDeviceAddressSpace64From32(Core::System& system, Handle das_handle, Handle process_handle, uint64_t process_address, uint32_t size, uint64_t device_address);
+Result InvalidateProcessDataCache64From32(Core::System& system, Handle process_handle, uint64_t address, uint64_t size);
+Result StoreProcessDataCache64From32(Core::System& system, Handle process_handle, uint64_t address, uint64_t size);
+Result FlushProcessDataCache64From32(Core::System& system, Handle process_handle, uint64_t address, uint64_t size);
+Result DebugActiveProcess64From32(Core::System& system, Handle* out_handle, uint64_t process_id);
+Result BreakDebugProcess64From32(Core::System& system, Handle debug_handle);
+Result TerminateDebugProcess64From32(Core::System& system, Handle debug_handle);
+Result GetDebugEvent64From32(Core::System& system, uint32_t out_info, Handle debug_handle);
+Result ContinueDebugEvent64From32(Core::System& system, Handle debug_handle, uint32_t flags, uint32_t thread_ids, int32_t num_thread_ids);
+Result GetProcessList64From32(Core::System& system, int32_t* out_num_processes, uint32_t out_process_ids, int32_t max_out_count);
+Result GetThreadList64From32(Core::System& system, int32_t* out_num_threads, uint32_t out_thread_ids, int32_t max_out_count, Handle debug_handle);
+Result GetDebugThreadContext64From32(Core::System& system, uint32_t out_context, Handle debug_handle, uint64_t thread_id, uint32_t context_flags);
+Result SetDebugThreadContext64From32(Core::System& system, Handle debug_handle, uint64_t thread_id, uint32_t context, uint32_t context_flags);
+Result QueryDebugProcessMemory64From32(Core::System& system, uint32_t out_memory_info, PageInfo* out_page_info, Handle process_handle, uint32_t address);
+Result ReadDebugProcessMemory64From32(Core::System& system, uint32_t buffer, Handle debug_handle, uint32_t address, uint32_t size);
+Result WriteDebugProcessMemory64From32(Core::System& system, Handle debug_handle, uint32_t buffer, uint32_t address, uint32_t size);
+Result SetHardwareBreakPoint64From32(Core::System& system, HardwareBreakPointRegisterName name, uint64_t flags, uint64_t value);
+Result GetDebugThreadParam64From32(Core::System& system, uint64_t* out_64, uint32_t* out_32, Handle debug_handle, uint64_t thread_id, DebugThreadParam param);
+Result GetSystemInfo64From32(Core::System& system, uint64_t* out, SystemInfoType info_type, Handle handle, uint64_t info_subtype);
+Result CreatePort64From32(Core::System& system, Handle* out_server_handle, Handle* out_client_handle, int32_t max_sessions, bool is_light, uint32_t name);
+Result ManageNamedPort64From32(Core::System& system, Handle* out_server_handle, uint32_t name, int32_t max_sessions);
+Result ConnectToPort64From32(Core::System& system, Handle* out_handle, Handle port);
+Result SetProcessMemoryPermission64From32(Core::System& system, Handle process_handle, uint64_t address, uint64_t size, MemoryPermission perm);
+Result MapProcessMemory64From32(Core::System& system, uint32_t dst_address, Handle process_handle, uint64_t src_address, uint32_t size);
+Result UnmapProcessMemory64From32(Core::System& system, uint32_t dst_address, Handle process_handle, uint64_t src_address, uint32_t size);
+Result QueryProcessMemory64From32(Core::System& system, uint32_t out_memory_info, PageInfo* out_page_info, Handle process_handle, uint64_t address);
+Result MapProcessCodeMemory64From32(Core::System& system, Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size);
+Result UnmapProcessCodeMemory64From32(Core::System& system, Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size);
+Result CreateProcess64From32(Core::System& system, Handle* out_handle, uint32_t parameters, uint32_t caps, int32_t num_caps);
+Result StartProcess64From32(Core::System& system, Handle process_handle, int32_t priority, int32_t core_id, uint64_t main_thread_stack_size);
+Result TerminateProcess64From32(Core::System& system, Handle process_handle);
+Result GetProcessInfo64From32(Core::System& system, int64_t* out_info, Handle process_handle, ProcessInfoType info_type);
+Result CreateResourceLimit64From32(Core::System& system, Handle* out_handle);
+Result SetResourceLimitLimitValue64From32(Core::System& system, Handle resource_limit_handle, LimitableResource which, int64_t limit_value);
+Result MapInsecureMemory64From32(Core::System& system, uint32_t address, uint32_t size);
+Result UnmapInsecureMemory64From32(Core::System& system, uint32_t address, uint32_t size);
+
+Result SetHeapSize64(Core::System& system, uint64_t* out_address, uint64_t size);
+Result SetMemoryPermission64(Core::System& system, uint64_t address, uint64_t size, MemoryPermission perm);
+Result SetMemoryAttribute64(Core::System& system, uint64_t address, uint64_t size, uint32_t mask, uint32_t attr);
+Result MapMemory64(Core::System& system, uint64_t dst_address, uint64_t src_address, uint64_t size);
+Result UnmapMemory64(Core::System& system, uint64_t dst_address, uint64_t src_address, uint64_t size);
+Result QueryMemory64(Core::System& system, uint64_t out_memory_info, PageInfo* out_page_info, uint64_t address);
+void ExitProcess64(Core::System& system);
+Result CreateThread64(Core::System& system, Handle* out_handle, uint64_t func, uint64_t arg, uint64_t stack_bottom, int32_t priority, int32_t core_id);
+Result StartThread64(Core::System& system, Handle thread_handle);
+void ExitThread64(Core::System& system);
+void SleepThread64(Core::System& system, int64_t ns);
+Result GetThreadPriority64(Core::System& system, int32_t* out_priority, Handle thread_handle);
+Result SetThreadPriority64(Core::System& system, Handle thread_handle, int32_t priority);
+Result GetThreadCoreMask64(Core::System& system, int32_t* out_core_id, uint64_t* out_affinity_mask, Handle thread_handle);
+Result SetThreadCoreMask64(Core::System& system, Handle thread_handle, int32_t core_id, uint64_t affinity_mask);
+int32_t GetCurrentProcessorNumber64(Core::System& system);
+Result SignalEvent64(Core::System& system, Handle event_handle);
+Result ClearEvent64(Core::System& system, Handle event_handle);
+Result MapSharedMemory64(Core::System& system, Handle shmem_handle, uint64_t address, uint64_t size, MemoryPermission map_perm);
+Result UnmapSharedMemory64(Core::System& system, Handle shmem_handle, uint64_t address, uint64_t size);
+Result CreateTransferMemory64(Core::System& system, Handle* out_handle, uint64_t address, uint64_t size, MemoryPermission map_perm);
+Result CloseHandle64(Core::System& system, Handle handle);
+Result ResetSignal64(Core::System& system, Handle handle);
+Result WaitSynchronization64(Core::System& system, int32_t* out_index, uint64_t handles, int32_t num_handles, int64_t timeout_ns);
+Result CancelSynchronization64(Core::System& system, Handle handle);
+Result ArbitrateLock64(Core::System& system, Handle thread_handle, uint64_t address, uint32_t tag);
+Result ArbitrateUnlock64(Core::System& system, uint64_t address);
+Result WaitProcessWideKeyAtomic64(Core::System& system, uint64_t address, uint64_t cv_key, uint32_t tag, int64_t timeout_ns);
+void SignalProcessWideKey64(Core::System& system, uint64_t cv_key, int32_t count);
+int64_t GetSystemTick64(Core::System& system);
+Result ConnectToNamedPort64(Core::System& system, Handle* out_handle, uint64_t name);
+Result SendSyncRequest64(Core::System& system, Handle session_handle);
+Result SendSyncRequestWithUserBuffer64(Core::System& system, uint64_t message_buffer, uint64_t message_buffer_size, Handle session_handle);
+Result SendAsyncRequestWithUserBuffer64(Core::System& system, Handle* out_event_handle, uint64_t message_buffer, uint64_t message_buffer_size, Handle session_handle);
+Result GetProcessId64(Core::System& system, uint64_t* out_process_id, Handle process_handle);
+Result GetThreadId64(Core::System& system, uint64_t* out_thread_id, Handle thread_handle);
+void Break64(Core::System& system, BreakReason break_reason, uint64_t arg, uint64_t size);
+Result OutputDebugString64(Core::System& system, uint64_t debug_str, uint64_t len);
+void ReturnFromException64(Core::System& system, Result result);
+Result GetInfo64(Core::System& system, uint64_t* out, InfoType info_type, Handle handle, uint64_t info_subtype);
+void FlushEntireDataCache64(Core::System& system);
+Result FlushDataCache64(Core::System& system, uint64_t address, uint64_t size);
+Result MapPhysicalMemory64(Core::System& system, uint64_t address, uint64_t size);
+Result UnmapPhysicalMemory64(Core::System& system, uint64_t address, uint64_t size);
+Result GetDebugFutureThreadInfo64(Core::System& system, lp64::LastThreadContext* out_context, uint64_t* out_thread_id, Handle debug_handle, int64_t ns);
+Result GetLastThreadInfo64(Core::System& system, lp64::LastThreadContext* out_context, uint64_t* out_tls_address, uint32_t* out_flags);
+Result GetResourceLimitLimitValue64(Core::System& system, int64_t* out_limit_value, Handle resource_limit_handle, LimitableResource which);
+Result GetResourceLimitCurrentValue64(Core::System& system, int64_t* out_current_value, Handle resource_limit_handle, LimitableResource which);
+Result SetThreadActivity64(Core::System& system, Handle thread_handle, ThreadActivity thread_activity);
+Result GetThreadContext364(Core::System& system, uint64_t out_context, Handle thread_handle);
+Result WaitForAddress64(Core::System& system, uint64_t address, ArbitrationType arb_type, int32_t value, int64_t timeout_ns);
+Result SignalToAddress64(Core::System& system, uint64_t address, SignalType signal_type, int32_t value, int32_t count);
+void SynchronizePreemptionState64(Core::System& system);
+Result GetResourceLimitPeakValue64(Core::System& system, int64_t* out_peak_value, Handle resource_limit_handle, LimitableResource which);
+Result CreateIoPool64(Core::System& system, Handle* out_handle, IoPoolType which);
+Result CreateIoRegion64(Core::System& system, Handle* out_handle, Handle io_pool, uint64_t physical_address, uint64_t size, MemoryMapping mapping, MemoryPermission perm);
+void KernelDebug64(Core::System& system, KernelDebugType kern_debug_type, uint64_t arg0, uint64_t arg1, uint64_t arg2);
+void ChangeKernelTraceState64(Core::System& system, KernelTraceState kern_trace_state);
+Result CreateSession64(Core::System& system, Handle* out_server_session_handle, Handle* out_client_session_handle, bool is_light, uint64_t name);
+Result AcceptSession64(Core::System& system, Handle* out_handle, Handle port);
+Result ReplyAndReceive64(Core::System& system, int32_t* out_index, uint64_t handles, int32_t num_handles, Handle reply_target, int64_t timeout_ns);
+Result ReplyAndReceiveWithUserBuffer64(Core::System& system, int32_t* out_index, uint64_t message_buffer, uint64_t message_buffer_size, uint64_t handles, int32_t num_handles, Handle reply_target, int64_t timeout_ns);
+Result CreateEvent64(Core::System& system, Handle* out_write_handle, Handle* out_read_handle);
+Result MapIoRegion64(Core::System& system, Handle io_region, uint64_t address, uint64_t size, MemoryPermission perm);
+Result UnmapIoRegion64(Core::System& system, Handle io_region, uint64_t address, uint64_t size);
+Result MapPhysicalMemoryUnsafe64(Core::System& system, uint64_t address, uint64_t size);
+Result UnmapPhysicalMemoryUnsafe64(Core::System& system, uint64_t address, uint64_t size);
+Result SetUnsafeLimit64(Core::System& system, uint64_t limit);
+Result CreateCodeMemory64(Core::System& system, Handle* out_handle, uint64_t address, uint64_t size);
+Result ControlCodeMemory64(Core::System& system, Handle code_memory_handle, CodeMemoryOperation operation, uint64_t address, uint64_t size, MemoryPermission perm);
+void SleepSystem64(Core::System& system);
+Result ReadWriteRegister64(Core::System& system, uint32_t* out_value, uint64_t address, uint32_t mask, uint32_t value);
+Result SetProcessActivity64(Core::System& system, Handle process_handle, ProcessActivity process_activity);
+Result CreateSharedMemory64(Core::System& system, Handle* out_handle, uint64_t size, MemoryPermission owner_perm, MemoryPermission remote_perm);
+Result MapTransferMemory64(Core::System& system, Handle trmem_handle, uint64_t address, uint64_t size, MemoryPermission owner_perm);
+Result UnmapTransferMemory64(Core::System& system, Handle trmem_handle, uint64_t address, uint64_t size);
+Result CreateInterruptEvent64(Core::System& system, Handle* out_read_handle, int32_t interrupt_id, InterruptType interrupt_type);
+Result QueryPhysicalAddress64(Core::System& system, lp64::PhysicalMemoryInfo* out_info, uint64_t address);
+Result QueryIoMapping64(Core::System& system, uint64_t* out_address, uint64_t* out_size, uint64_t physical_address, uint64_t size);
+Result CreateDeviceAddressSpace64(Core::System& system, Handle* out_handle, uint64_t das_address, uint64_t das_size);
+Result AttachDeviceAddressSpace64(Core::System& system, DeviceName device_name, Handle das_handle);
+Result DetachDeviceAddressSpace64(Core::System& system, DeviceName device_name, Handle das_handle);
+Result MapDeviceAddressSpaceByForce64(Core::System& system, Handle das_handle, Handle process_handle, uint64_t process_address, uint64_t size, uint64_t device_address, uint32_t option);
+Result MapDeviceAddressSpaceAligned64(Core::System& system, Handle das_handle, Handle process_handle, uint64_t process_address, uint64_t size, uint64_t device_address, uint32_t option);
+Result UnmapDeviceAddressSpace64(Core::System& system, Handle das_handle, Handle process_handle, uint64_t process_address, uint64_t size, uint64_t device_address);
+Result InvalidateProcessDataCache64(Core::System& system, Handle process_handle, uint64_t address, uint64_t size);
+Result StoreProcessDataCache64(Core::System& system, Handle process_handle, uint64_t address, uint64_t size);
+Result FlushProcessDataCache64(Core::System& system, Handle process_handle, uint64_t address, uint64_t size);
+Result DebugActiveProcess64(Core::System& system, Handle* out_handle, uint64_t process_id);
+Result BreakDebugProcess64(Core::System& system, Handle debug_handle);
+Result TerminateDebugProcess64(Core::System& system, Handle debug_handle);
+Result GetDebugEvent64(Core::System& system, uint64_t out_info, Handle debug_handle);
+Result ContinueDebugEvent64(Core::System& system, Handle debug_handle, uint32_t flags, uint64_t thread_ids, int32_t num_thread_ids);
+Result GetProcessList64(Core::System& system, int32_t* out_num_processes, uint64_t out_process_ids, int32_t max_out_count);
+Result GetThreadList64(Core::System& system, int32_t* out_num_threads, uint64_t out_thread_ids, int32_t max_out_count, Handle debug_handle);
+Result GetDebugThreadContext64(Core::System& system, uint64_t out_context, Handle debug_handle, uint64_t thread_id, uint32_t context_flags);
+Result SetDebugThreadContext64(Core::System& system, Handle debug_handle, uint64_t thread_id, uint64_t context, uint32_t context_flags);
+Result QueryDebugProcessMemory64(Core::System& system, uint64_t out_memory_info, PageInfo* out_page_info, Handle process_handle, uint64_t address);
+Result ReadDebugProcessMemory64(Core::System& system, uint64_t buffer, Handle debug_handle, uint64_t address, uint64_t size);
+Result WriteDebugProcessMemory64(Core::System& system, Handle debug_handle, uint64_t buffer, uint64_t address, uint64_t size);
+Result SetHardwareBreakPoint64(Core::System& system, HardwareBreakPointRegisterName name, uint64_t flags, uint64_t value);
+Result GetDebugThreadParam64(Core::System& system, uint64_t* out_64, uint32_t* out_32, Handle debug_handle, uint64_t thread_id, DebugThreadParam param);
+Result GetSystemInfo64(Core::System& system, uint64_t* out, SystemInfoType info_type, Handle handle, uint64_t info_subtype);
+Result CreatePort64(Core::System& system, Handle* out_server_handle, Handle* out_client_handle, int32_t max_sessions, bool is_light, uint64_t name);
+Result ManageNamedPort64(Core::System& system, Handle* out_server_handle, uint64_t name, int32_t max_sessions);
+Result ConnectToPort64(Core::System& system, Handle* out_handle, Handle port);
+Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address, uint64_t size, MemoryPermission perm);
+Result MapProcessMemory64(Core::System& system, uint64_t dst_address, Handle process_handle, uint64_t src_address, uint64_t size);
+Result UnmapProcessMemory64(Core::System& system, uint64_t dst_address, Handle process_handle, uint64_t src_address, uint64_t size);
+Result QueryProcessMemory64(Core::System& system, uint64_t out_memory_info, PageInfo* out_page_info, Handle process_handle, uint64_t address);
+Result MapProcessCodeMemory64(Core::System& system, Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size);
+Result UnmapProcessCodeMemory64(Core::System& system, Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size);
+Result CreateProcess64(Core::System& system, Handle* out_handle, uint64_t parameters, uint64_t caps, int32_t num_caps);
+Result StartProcess64(Core::System& system, Handle process_handle, int32_t priority, int32_t core_id, uint64_t main_thread_stack_size);
+Result TerminateProcess64(Core::System& system, Handle process_handle);
+Result GetProcessInfo64(Core::System& system, int64_t* out_info, Handle process_handle, ProcessInfoType info_type);
+Result CreateResourceLimit64(Core::System& system, Handle* out_handle);
+Result SetResourceLimitLimitValue64(Core::System& system, Handle resource_limit_handle, LimitableResource which, int64_t limit_value);
+Result MapInsecureMemory64(Core::System& system, uint64_t address, uint64_t size);
+Result UnmapInsecureMemory64(Core::System& system, uint64_t address, uint64_t size);
+
+enum class SvcId : u32 {
+ SetHeapSize = 0x1,
+ SetMemoryPermission = 0x2,
+ SetMemoryAttribute = 0x3,
+ MapMemory = 0x4,
+ UnmapMemory = 0x5,
+ QueryMemory = 0x6,
+ ExitProcess = 0x7,
+ CreateThread = 0x8,
+ StartThread = 0x9,
+ ExitThread = 0xa,
+ SleepThread = 0xb,
+ GetThreadPriority = 0xc,
+ SetThreadPriority = 0xd,
+ GetThreadCoreMask = 0xe,
+ SetThreadCoreMask = 0xf,
+ GetCurrentProcessorNumber = 0x10,
+ SignalEvent = 0x11,
+ ClearEvent = 0x12,
+ MapSharedMemory = 0x13,
+ UnmapSharedMemory = 0x14,
+ CreateTransferMemory = 0x15,
+ CloseHandle = 0x16,
+ ResetSignal = 0x17,
+ WaitSynchronization = 0x18,
+ CancelSynchronization = 0x19,
+ ArbitrateLock = 0x1a,
+ ArbitrateUnlock = 0x1b,
+ WaitProcessWideKeyAtomic = 0x1c,
+ SignalProcessWideKey = 0x1d,
+ GetSystemTick = 0x1e,
+ ConnectToNamedPort = 0x1f,
+ SendSyncRequestLight = 0x20,
+ SendSyncRequest = 0x21,
+ SendSyncRequestWithUserBuffer = 0x22,
+ SendAsyncRequestWithUserBuffer = 0x23,
+ GetProcessId = 0x24,
+ GetThreadId = 0x25,
+ Break = 0x26,
+ OutputDebugString = 0x27,
+ ReturnFromException = 0x28,
+ GetInfo = 0x29,
+ FlushEntireDataCache = 0x2a,
+ FlushDataCache = 0x2b,
+ MapPhysicalMemory = 0x2c,
+ UnmapPhysicalMemory = 0x2d,
+ GetDebugFutureThreadInfo = 0x2e,
+ GetLastThreadInfo = 0x2f,
+ GetResourceLimitLimitValue = 0x30,
+ GetResourceLimitCurrentValue = 0x31,
+ SetThreadActivity = 0x32,
+ GetThreadContext3 = 0x33,
+ WaitForAddress = 0x34,
+ SignalToAddress = 0x35,
+ SynchronizePreemptionState = 0x36,
+ GetResourceLimitPeakValue = 0x37,
+ CreateIoPool = 0x39,
+ CreateIoRegion = 0x3a,
+ KernelDebug = 0x3c,
+ ChangeKernelTraceState = 0x3d,
+ CreateSession = 0x40,
+ AcceptSession = 0x41,
+ ReplyAndReceiveLight = 0x42,
+ ReplyAndReceive = 0x43,
+ ReplyAndReceiveWithUserBuffer = 0x44,
+ CreateEvent = 0x45,
+ MapIoRegion = 0x46,
+ UnmapIoRegion = 0x47,
+ MapPhysicalMemoryUnsafe = 0x48,
+ UnmapPhysicalMemoryUnsafe = 0x49,
+ SetUnsafeLimit = 0x4a,
+ CreateCodeMemory = 0x4b,
+ ControlCodeMemory = 0x4c,
+ SleepSystem = 0x4d,
+ ReadWriteRegister = 0x4e,
+ SetProcessActivity = 0x4f,
+ CreateSharedMemory = 0x50,
+ MapTransferMemory = 0x51,
+ UnmapTransferMemory = 0x52,
+ CreateInterruptEvent = 0x53,
+ QueryPhysicalAddress = 0x54,
+ QueryIoMapping = 0x55,
+ CreateDeviceAddressSpace = 0x56,
+ AttachDeviceAddressSpace = 0x57,
+ DetachDeviceAddressSpace = 0x58,
+ MapDeviceAddressSpaceByForce = 0x59,
+ MapDeviceAddressSpaceAligned = 0x5a,
+ UnmapDeviceAddressSpace = 0x5c,
+ InvalidateProcessDataCache = 0x5d,
+ StoreProcessDataCache = 0x5e,
+ FlushProcessDataCache = 0x5f,
+ DebugActiveProcess = 0x60,
+ BreakDebugProcess = 0x61,
+ TerminateDebugProcess = 0x62,
+ GetDebugEvent = 0x63,
+ ContinueDebugEvent = 0x64,
+ GetProcessList = 0x65,
+ GetThreadList = 0x66,
+ GetDebugThreadContext = 0x67,
+ SetDebugThreadContext = 0x68,
+ QueryDebugProcessMemory = 0x69,
+ ReadDebugProcessMemory = 0x6a,
+ WriteDebugProcessMemory = 0x6b,
+ SetHardwareBreakPoint = 0x6c,
+ GetDebugThreadParam = 0x6d,
+ GetSystemInfo = 0x6f,
+ CreatePort = 0x70,
+ ManageNamedPort = 0x71,
+ ConnectToPort = 0x72,
+ SetProcessMemoryPermission = 0x73,
+ MapProcessMemory = 0x74,
+ UnmapProcessMemory = 0x75,
+ QueryProcessMemory = 0x76,
+ MapProcessCodeMemory = 0x77,
+ UnmapProcessCodeMemory = 0x78,
+ CreateProcess = 0x79,
+ StartProcess = 0x7a,
+ TerminateProcess = 0x7b,
+ GetProcessInfo = 0x7c,
+ CreateResourceLimit = 0x7d,
+ SetResourceLimitLimitValue = 0x7e,
+ CallSecureMonitor = 0x7f,
+ MapInsecureMemory = 0x90,
+ UnmapInsecureMemory = 0x91,
+};
+// clang-format on
+
+// Custom ABI.
+Result ReplyAndReceiveLight(Core::System& system, Handle handle, uint32_t* args);
+Result ReplyAndReceiveLight64From32(Core::System& system, Handle handle, uint32_t* args);
+Result ReplyAndReceiveLight64(Core::System& system, Handle handle, uint32_t* args);
+
+Result SendSyncRequestLight(Core::System& system, Handle session_handle, uint32_t* args);
+Result SendSyncRequestLight64From32(Core::System& system, Handle session_handle, uint32_t* args);
+Result SendSyncRequestLight64(Core::System& system, Handle session_handle, uint32_t* args);
+
+void CallSecureMonitor(Core::System& system, lp64::SecureMonitorArguments* args);
+void CallSecureMonitor64From32(Core::System& system, ilp32::SecureMonitorArguments* args);
+void CallSecureMonitor64(Core::System& system, lp64::SecureMonitorArguments* args);
+
+// Defined in svc_light_ipc.cpp.
+void SvcWrap_ReplyAndReceiveLight64From32(Core::System& system);
+void SvcWrap_ReplyAndReceiveLight64(Core::System& system);
+
+void SvcWrap_SendSyncRequestLight64From32(Core::System& system);
+void SvcWrap_SendSyncRequestLight64(Core::System& system);
+
+// Defined in svc_secure_monitor_call.cpp.
+void SvcWrap_CallSecureMonitor64From32(Core::System& system);
+void SvcWrap_CallSecureMonitor64(Core::System& system);
+
+// Perform a supervisor call by index.
+void Call(Core::System& system, u32 imm);
} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_activity.cpp b/src/core/hle/kernel/svc/svc_activity.cpp
new file mode 100644
index 000000000..63bc08555
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_activity.cpp
@@ -0,0 +1,66 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel::Svc {
+
+/// Sets the thread activity
+Result SetThreadActivity(Core::System& system, Handle thread_handle,
+ ThreadActivity thread_activity) {
+ LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", thread_handle,
+ thread_activity);
+
+ // Validate the activity.
+ static constexpr auto IsValidThreadActivity = [](ThreadActivity activity) {
+ return activity == ThreadActivity::Runnable || activity == ThreadActivity::Paused;
+ };
+ R_UNLESS(IsValidThreadActivity(thread_activity), ResultInvalidEnumValue);
+
+ // Get the thread from its handle.
+ KScopedAutoObject thread =
+ GetCurrentProcess(system.Kernel()).GetHandleTable().GetObject<KThread>(thread_handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Check that the activity is being set on a non-current thread for the current process.
+ R_UNLESS(thread->GetOwnerProcess() == GetCurrentProcessPointer(system.Kernel()),
+ ResultInvalidHandle);
+ R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(system.Kernel()), ResultBusy);
+
+ // Set the activity.
+ R_TRY(thread->SetActivity(thread_activity));
+
+ return ResultSuccess;
+}
+
+Result SetProcessActivity(Core::System& system, Handle process_handle,
+ ProcessActivity process_activity) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result SetThreadActivity64(Core::System& system, Handle thread_handle,
+ ThreadActivity thread_activity) {
+ return SetThreadActivity(system, thread_handle, thread_activity);
+}
+
+Result SetProcessActivity64(Core::System& system, Handle process_handle,
+ ProcessActivity process_activity) {
+ return SetProcessActivity(system, process_handle, process_activity);
+}
+
+Result SetThreadActivity64From32(Core::System& system, Handle thread_handle,
+ ThreadActivity thread_activity) {
+ return SetThreadActivity(system, thread_handle, thread_activity);
+}
+
+Result SetProcessActivity64From32(Core::System& system, Handle process_handle,
+ ProcessActivity process_activity) {
+ return SetProcessActivity(system, process_handle, process_activity);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_address_arbiter.cpp b/src/core/hle/kernel/svc/svc_address_arbiter.cpp
new file mode 100644
index 000000000..04cc5ea64
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_address_arbiter.cpp
@@ -0,0 +1,105 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_memory_layout.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
+#include "core/hle/kernel/svc_types.h"
+
+namespace Kernel::Svc {
+namespace {
+
+constexpr bool IsValidSignalType(Svc::SignalType type) {
+ switch (type) {
+ case Svc::SignalType::Signal:
+ case Svc::SignalType::SignalAndIncrementIfEqual:
+ case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
+ return true;
+ default:
+ return false;
+ }
+}
+
+constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
+ switch (type) {
+ case Svc::ArbitrationType::WaitIfLessThan:
+ case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
+ case Svc::ArbitrationType::WaitIfEqual:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // namespace
+
+// Wait for an address (via Address Arbiter)
+Result WaitForAddress(Core::System& system, u64 address, ArbitrationType arb_type, s32 value,
+ s64 timeout_ns) {
+ LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
+ address, arb_type, value, timeout_ns);
+
+ // Validate input.
+ R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
+ R_UNLESS(Common::IsAligned(address, sizeof(s32)), ResultInvalidAddress);
+ R_UNLESS(IsValidArbitrationType(arb_type), ResultInvalidEnumValue);
+
+ // Convert timeout from nanoseconds to ticks.
+ s64 timeout{};
+ if (timeout_ns > 0) {
+ const s64 offset_tick(timeout_ns);
+ if (offset_tick > 0) {
+ timeout = offset_tick + 2;
+ if (timeout <= 0) {
+ timeout = std::numeric_limits<s64>::max();
+ }
+ } else {
+ timeout = std::numeric_limits<s64>::max();
+ }
+ } else {
+ timeout = timeout_ns;
+ }
+
+ R_RETURN(
+ GetCurrentProcess(system.Kernel()).WaitAddressArbiter(address, arb_type, value, timeout));
+}
+
+// Signals to an address (via Address Arbiter)
+Result SignalToAddress(Core::System& system, u64 address, SignalType signal_type, s32 value,
+ s32 count) {
+ LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
+ address, signal_type, value, count);
+
+ // Validate input.
+ R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
+ R_UNLESS(Common::IsAligned(address, sizeof(s32)), ResultInvalidAddress);
+ R_UNLESS(IsValidSignalType(signal_type), ResultInvalidEnumValue);
+
+ R_RETURN(GetCurrentProcess(system.Kernel())
+ .SignalAddressArbiter(address, signal_type, value, count));
+}
+
+Result WaitForAddress64(Core::System& system, u64 address, ArbitrationType arb_type, s32 value,
+ s64 timeout_ns) {
+ R_RETURN(WaitForAddress(system, address, arb_type, value, timeout_ns));
+}
+
+Result SignalToAddress64(Core::System& system, u64 address, SignalType signal_type, s32 value,
+ s32 count) {
+ R_RETURN(SignalToAddress(system, address, signal_type, value, count));
+}
+
+Result WaitForAddress64From32(Core::System& system, u32 address, ArbitrationType arb_type,
+ s32 value, s64 timeout_ns) {
+ R_RETURN(WaitForAddress(system, address, arb_type, value, timeout_ns));
+}
+
+Result SignalToAddress64From32(Core::System& system, u32 address, SignalType signal_type, s32 value,
+ s32 count) {
+ R_RETURN(SignalToAddress(system, address, signal_type, value, count));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_address_translation.cpp b/src/core/hle/kernel/svc/svc_address_translation.cpp
new file mode 100644
index 000000000..e65a11cda
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_address_translation.cpp
@@ -0,0 +1,50 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel::Svc {
+
+Result QueryPhysicalAddress(Core::System& system, lp64::PhysicalMemoryInfo* out_info,
+ uint64_t address) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result QueryIoMapping(Core::System& system, uint64_t* out_address, uint64_t* out_size,
+ uint64_t physical_address, uint64_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result QueryPhysicalAddress64(Core::System& system, lp64::PhysicalMemoryInfo* out_info,
+ uint64_t address) {
+ R_RETURN(QueryPhysicalAddress(system, out_info, address));
+}
+
+Result QueryIoMapping64(Core::System& system, uint64_t* out_address, uint64_t* out_size,
+ uint64_t physical_address, uint64_t size) {
+ R_RETURN(QueryIoMapping(system, out_address, out_size, physical_address, size));
+}
+
+Result QueryPhysicalAddress64From32(Core::System& system, ilp32::PhysicalMemoryInfo* out_info,
+ uint32_t address) {
+ lp64::PhysicalMemoryInfo info{};
+ R_TRY(QueryPhysicalAddress(system, std::addressof(info), address));
+
+ *out_info = {
+ .physical_address = info.physical_address,
+ .virtual_address = static_cast<u32>(info.virtual_address),
+ .size = static_cast<u32>(info.size),
+ };
+ R_SUCCEED();
+}
+
+Result QueryIoMapping64From32(Core::System& system, uint64_t* out_address, uint64_t* out_size,
+ uint64_t physical_address, uint32_t size) {
+ R_RETURN(QueryIoMapping(system, reinterpret_cast<uint64_t*>(out_address),
+ reinterpret_cast<uint64_t*>(out_size), physical_address, size));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_cache.cpp b/src/core/hle/kernel/svc/svc_cache.cpp
new file mode 100644
index 000000000..082942dab
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_cache.cpp
@@ -0,0 +1,98 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
+#include "core/hle/kernel/svc_types.h"
+
+namespace Kernel::Svc {
+
+void FlushEntireDataCache(Core::System& system) {
+ UNIMPLEMENTED();
+}
+
+Result FlushDataCache(Core::System& system, uint64_t address, uint64_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result InvalidateProcessDataCache(Core::System& system, Handle process_handle, uint64_t address,
+ uint64_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result StoreProcessDataCache(Core::System& system, Handle process_handle, uint64_t address,
+ uint64_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result FlushProcessDataCache(Core::System& system, Handle process_handle, u64 address, u64 size) {
+ // Validate address/size.
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS(address == static_cast<uint64_t>(address), ResultInvalidCurrentMemory);
+ R_UNLESS(size == static_cast<uint64_t>(size), ResultInvalidCurrentMemory);
+
+ // Get the process from its handle.
+ KScopedAutoObject process =
+ GetCurrentProcess(system.Kernel()).GetHandleTable().GetObject<KProcess>(process_handle);
+ R_UNLESS(process.IsNotNull(), ResultInvalidHandle);
+
+ // Verify the region is within range.
+ auto& page_table = process->PageTable();
+ R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Perform the operation.
+ R_RETURN(GetCurrentMemory(system.Kernel()).FlushDataCache(address, size));
+}
+
+void FlushEntireDataCache64(Core::System& system) {
+ FlushEntireDataCache(system);
+}
+
+Result FlushDataCache64(Core::System& system, uint64_t address, uint64_t size) {
+ R_RETURN(FlushDataCache(system, address, size));
+}
+
+Result InvalidateProcessDataCache64(Core::System& system, Handle process_handle, uint64_t address,
+ uint64_t size) {
+ R_RETURN(InvalidateProcessDataCache(system, process_handle, address, size));
+}
+
+Result StoreProcessDataCache64(Core::System& system, Handle process_handle, uint64_t address,
+ uint64_t size) {
+ R_RETURN(StoreProcessDataCache(system, process_handle, address, size));
+}
+
+Result FlushProcessDataCache64(Core::System& system, Handle process_handle, uint64_t address,
+ uint64_t size) {
+ R_RETURN(FlushProcessDataCache(system, process_handle, address, size));
+}
+
+void FlushEntireDataCache64From32(Core::System& system) {
+ return FlushEntireDataCache(system);
+}
+
+Result FlushDataCache64From32(Core::System& system, uint32_t address, uint32_t size) {
+ R_RETURN(FlushDataCache(system, address, size));
+}
+
+Result InvalidateProcessDataCache64From32(Core::System& system, Handle process_handle,
+ uint64_t address, uint64_t size) {
+ R_RETURN(InvalidateProcessDataCache(system, process_handle, address, size));
+}
+
+Result StoreProcessDataCache64From32(Core::System& system, Handle process_handle, uint64_t address,
+ uint64_t size) {
+ R_RETURN(StoreProcessDataCache(system, process_handle, address, size));
+}
+
+Result FlushProcessDataCache64From32(Core::System& system, Handle process_handle, uint64_t address,
+ uint64_t size) {
+ R_RETURN(FlushProcessDataCache(system, process_handle, address, size));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_code_memory.cpp b/src/core/hle/kernel/svc/svc_code_memory.cpp
new file mode 100644
index 000000000..687baff82
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_code_memory.cpp
@@ -0,0 +1,171 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_code_memory.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+namespace {
+
+constexpr bool IsValidMapCodeMemoryPermission(MemoryPermission perm) {
+ return perm == MemoryPermission::ReadWrite;
+}
+
+constexpr bool IsValidMapToOwnerCodeMemoryPermission(MemoryPermission perm) {
+ return perm == MemoryPermission::Read || perm == MemoryPermission::ReadExecute;
+}
+
+constexpr bool IsValidUnmapCodeMemoryPermission(MemoryPermission perm) {
+ return perm == MemoryPermission::None;
+}
+
+constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(MemoryPermission perm) {
+ return perm == MemoryPermission::None;
+}
+
+} // namespace
+
+Result CreateCodeMemory(Core::System& system, Handle* out, u64 address, uint64_t size) {
+ LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size);
+
+ // Get kernel instance.
+ auto& kernel = system.Kernel();
+
+ // Validate address / size.
+ R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+
+ // Create the code memory.
+
+ KCodeMemory* code_mem = KCodeMemory::Create(kernel);
+ R_UNLESS(code_mem != nullptr, ResultOutOfResource);
+ SCOPE_EXIT({ code_mem->Close(); });
+
+ // Verify that the region is in range.
+ R_UNLESS(GetCurrentProcess(system.Kernel()).PageTable().Contains(address, size),
+ ResultInvalidCurrentMemory);
+
+ // Initialize the code memory.
+ R_TRY(code_mem->Initialize(system.DeviceMemory(), address, size));
+
+ // Register the code memory.
+ KCodeMemory::Register(kernel, code_mem);
+
+ // Add the code memory to the handle table.
+ R_TRY(GetCurrentProcess(system.Kernel()).GetHandleTable().Add(out, code_mem));
+
+ R_SUCCEED();
+}
+
+Result ControlCodeMemory(Core::System& system, Handle code_memory_handle,
+ CodeMemoryOperation operation, u64 address, uint64_t size,
+ MemoryPermission perm) {
+
+ LOG_TRACE(Kernel_SVC,
+ "called, code_memory_handle=0x{:X}, operation=0x{:X}, address=0x{:X}, size=0x{:X}, "
+ "permission=0x{:X}",
+ code_memory_handle, operation, address, size, perm);
+
+ // Validate the address / size.
+ R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+
+ // Get the code memory from its handle.
+ KScopedAutoObject code_mem = GetCurrentProcess(system.Kernel())
+ .GetHandleTable()
+ .GetObject<KCodeMemory>(code_memory_handle);
+ R_UNLESS(code_mem.IsNotNull(), ResultInvalidHandle);
+
+ // NOTE: Here, Atmosphere extends the SVC to allow code memory operations on one's own process.
+ // This enables homebrew usage of these SVCs for JIT.
+
+ // Perform the operation.
+ switch (operation) {
+ case CodeMemoryOperation::Map: {
+ // Check that the region is in range.
+ R_UNLESS(GetCurrentProcess(system.Kernel())
+ .PageTable()
+ .CanContain(address, size, KMemoryState::CodeOut),
+ ResultInvalidMemoryRegion);
+
+ // Check the memory permission.
+ R_UNLESS(IsValidMapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+
+ // Map the memory.
+ R_TRY(code_mem->Map(address, size));
+ } break;
+ case CodeMemoryOperation::Unmap: {
+ // Check that the region is in range.
+ R_UNLESS(GetCurrentProcess(system.Kernel())
+ .PageTable()
+ .CanContain(address, size, KMemoryState::CodeOut),
+ ResultInvalidMemoryRegion);
+
+ // Check the memory permission.
+ R_UNLESS(IsValidUnmapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+
+ // Unmap the memory.
+ R_TRY(code_mem->Unmap(address, size));
+ } break;
+ case CodeMemoryOperation::MapToOwner: {
+ // Check that the region is in range.
+ R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size,
+ KMemoryState::GeneratedCode),
+ ResultInvalidMemoryRegion);
+
+ // Check the memory permission.
+ R_UNLESS(IsValidMapToOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+
+ // Map the memory to its owner.
+ R_TRY(code_mem->MapToOwner(address, size, perm));
+ } break;
+ case CodeMemoryOperation::UnmapFromOwner: {
+ // Check that the region is in range.
+ R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size,
+ KMemoryState::GeneratedCode),
+ ResultInvalidMemoryRegion);
+
+ // Check the memory permission.
+ R_UNLESS(IsValidUnmapFromOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+
+ // Unmap the memory from its owner.
+ R_TRY(code_mem->UnmapFromOwner(address, size));
+ } break;
+ default:
+ R_THROW(ResultInvalidEnumValue);
+ }
+
+ R_SUCCEED();
+}
+
+Result CreateCodeMemory64(Core::System& system, Handle* out_handle, uint64_t address,
+ uint64_t size) {
+ R_RETURN(CreateCodeMemory(system, out_handle, address, size));
+}
+
+Result ControlCodeMemory64(Core::System& system, Handle code_memory_handle,
+ CodeMemoryOperation operation, uint64_t address, uint64_t size,
+ MemoryPermission perm) {
+ R_RETURN(ControlCodeMemory(system, code_memory_handle, operation, address, size, perm));
+}
+
+Result CreateCodeMemory64From32(Core::System& system, Handle* out_handle, uint32_t address,
+ uint32_t size) {
+ R_RETURN(CreateCodeMemory(system, out_handle, address, size));
+}
+
+Result ControlCodeMemory64From32(Core::System& system, Handle code_memory_handle,
+ CodeMemoryOperation operation, uint64_t address, uint64_t size,
+ MemoryPermission perm) {
+ R_RETURN(ControlCodeMemory(system, code_memory_handle, operation, address, size, perm));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_condition_variable.cpp b/src/core/hle/kernel/svc/svc_condition_variable.cpp
new file mode 100644
index 000000000..ca120d67e
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_condition_variable.cpp
@@ -0,0 +1,72 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_memory_layout.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel::Svc {
+
+/// Wait process wide key atomic
+Result WaitProcessWideKeyAtomic(Core::System& system, u64 address, u64 cv_key, u32 tag,
+ s64 timeout_ns) {
+ LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
+ cv_key, tag, timeout_ns);
+
+ // Validate input.
+ R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
+ R_UNLESS(Common::IsAligned(address, sizeof(s32)), ResultInvalidAddress);
+
+ // Convert timeout from nanoseconds to ticks.
+ s64 timeout{};
+ if (timeout_ns > 0) {
+ const s64 offset_tick(timeout_ns);
+ if (offset_tick > 0) {
+ timeout = offset_tick + 2;
+ if (timeout <= 0) {
+ timeout = std::numeric_limits<s64>::max();
+ }
+ } else {
+ timeout = std::numeric_limits<s64>::max();
+ }
+ } else {
+ timeout = timeout_ns;
+ }
+
+ // Wait on the condition variable.
+ R_RETURN(
+ GetCurrentProcess(system.Kernel())
+ .WaitConditionVariable(address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout));
+}
+
+/// Signal process wide key
+void SignalProcessWideKey(Core::System& system, u64 cv_key, s32 count) {
+ LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count);
+
+ // Signal the condition variable.
+ return GetCurrentProcess(system.Kernel())
+ .SignalConditionVariable(Common::AlignDown(cv_key, sizeof(u32)), count);
+}
+
+Result WaitProcessWideKeyAtomic64(Core::System& system, uint64_t address, uint64_t cv_key,
+ uint32_t tag, int64_t timeout_ns) {
+ R_RETURN(WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns));
+}
+
+void SignalProcessWideKey64(Core::System& system, uint64_t cv_key, int32_t count) {
+ SignalProcessWideKey(system, cv_key, count);
+}
+
+Result WaitProcessWideKeyAtomic64From32(Core::System& system, uint32_t address, uint32_t cv_key,
+ uint32_t tag, int64_t timeout_ns) {
+ R_RETURN(WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns));
+}
+
+void SignalProcessWideKey64From32(Core::System& system, uint32_t cv_key, int32_t count) {
+ SignalProcessWideKey(system, cv_key, count);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_debug.cpp b/src/core/hle/kernel/svc/svc_debug.cpp
new file mode 100644
index 000000000..a4d1f700e
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_debug.cpp
@@ -0,0 +1,194 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel::Svc {
+
+Result DebugActiveProcess(Core::System& system, Handle* out_handle, uint64_t process_id) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result BreakDebugProcess(Core::System& system, Handle debug_handle) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result TerminateDebugProcess(Core::System& system, Handle debug_handle) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result GetDebugEvent(Core::System& system, uint64_t out_info, Handle debug_handle) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result ContinueDebugEvent(Core::System& system, Handle debug_handle, uint32_t flags,
+ uint64_t user_thread_ids, int32_t num_thread_ids) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result GetDebugThreadContext(Core::System& system, uint64_t out_context, Handle debug_handle,
+ uint64_t thread_id, uint32_t context_flags) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result SetDebugThreadContext(Core::System& system, Handle debug_handle, uint64_t thread_id,
+ uint64_t user_context, uint32_t context_flags) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result QueryDebugProcessMemory(Core::System& system, uint64_t out_memory_info,
+ PageInfo* out_page_info, Handle process_handle, uint64_t address) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result ReadDebugProcessMemory(Core::System& system, uint64_t buffer, Handle debug_handle,
+ uint64_t address, uint64_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result WriteDebugProcessMemory(Core::System& system, Handle debug_handle, uint64_t buffer,
+ uint64_t address, uint64_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result SetHardwareBreakPoint(Core::System& system, HardwareBreakPointRegisterName name,
+ uint64_t flags, uint64_t value) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result GetDebugThreadParam(Core::System& system, uint64_t* out_64, uint32_t* out_32,
+ Handle debug_handle, uint64_t thread_id, DebugThreadParam param) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result DebugActiveProcess64(Core::System& system, Handle* out_handle, uint64_t process_id) {
+ R_RETURN(DebugActiveProcess(system, out_handle, process_id));
+}
+
+Result BreakDebugProcess64(Core::System& system, Handle debug_handle) {
+ R_RETURN(BreakDebugProcess(system, debug_handle));
+}
+
+Result TerminateDebugProcess64(Core::System& system, Handle debug_handle) {
+ R_RETURN(TerminateDebugProcess(system, debug_handle));
+}
+
+Result GetDebugEvent64(Core::System& system, uint64_t out_info, Handle debug_handle) {
+ R_RETURN(GetDebugEvent(system, out_info, debug_handle));
+}
+
+Result ContinueDebugEvent64(Core::System& system, Handle debug_handle, uint32_t flags,
+ uint64_t thread_ids, int32_t num_thread_ids) {
+ R_RETURN(ContinueDebugEvent(system, debug_handle, flags, thread_ids, num_thread_ids));
+}
+
+Result GetDebugThreadContext64(Core::System& system, uint64_t out_context, Handle debug_handle,
+ uint64_t thread_id, uint32_t context_flags) {
+ R_RETURN(GetDebugThreadContext(system, out_context, debug_handle, thread_id, context_flags));
+}
+
+Result SetDebugThreadContext64(Core::System& system, Handle debug_handle, uint64_t thread_id,
+ uint64_t context, uint32_t context_flags) {
+ R_RETURN(SetDebugThreadContext(system, debug_handle, thread_id, context, context_flags));
+}
+
+Result QueryDebugProcessMemory64(Core::System& system, uint64_t out_memory_info,
+ PageInfo* out_page_info, Handle debug_handle, uint64_t address) {
+ R_RETURN(
+ QueryDebugProcessMemory(system, out_memory_info, out_page_info, debug_handle, address));
+}
+
+Result ReadDebugProcessMemory64(Core::System& system, uint64_t buffer, Handle debug_handle,
+ uint64_t address, uint64_t size) {
+ R_RETURN(ReadDebugProcessMemory(system, buffer, debug_handle, address, size));
+}
+
+Result WriteDebugProcessMemory64(Core::System& system, Handle debug_handle, uint64_t buffer,
+ uint64_t address, uint64_t size) {
+ R_RETURN(WriteDebugProcessMemory(system, debug_handle, buffer, address, size));
+}
+
+Result SetHardwareBreakPoint64(Core::System& system, HardwareBreakPointRegisterName name,
+ uint64_t flags, uint64_t value) {
+ R_RETURN(SetHardwareBreakPoint(system, name, flags, value));
+}
+
+Result GetDebugThreadParam64(Core::System& system, uint64_t* out_64, uint32_t* out_32,
+ Handle debug_handle, uint64_t thread_id, DebugThreadParam param) {
+ R_RETURN(GetDebugThreadParam(system, out_64, out_32, debug_handle, thread_id, param));
+}
+
+Result DebugActiveProcess64From32(Core::System& system, Handle* out_handle, uint64_t process_id) {
+ R_RETURN(DebugActiveProcess(system, out_handle, process_id));
+}
+
+Result BreakDebugProcess64From32(Core::System& system, Handle debug_handle) {
+ R_RETURN(BreakDebugProcess(system, debug_handle));
+}
+
+Result TerminateDebugProcess64From32(Core::System& system, Handle debug_handle) {
+ R_RETURN(TerminateDebugProcess(system, debug_handle));
+}
+
+Result GetDebugEvent64From32(Core::System& system, uint32_t out_info, Handle debug_handle) {
+ R_RETURN(GetDebugEvent(system, out_info, debug_handle));
+}
+
+Result ContinueDebugEvent64From32(Core::System& system, Handle debug_handle, uint32_t flags,
+ uint32_t thread_ids, int32_t num_thread_ids) {
+ R_RETURN(ContinueDebugEvent(system, debug_handle, flags, thread_ids, num_thread_ids));
+}
+
+Result GetDebugThreadContext64From32(Core::System& system, uint32_t out_context,
+ Handle debug_handle, uint64_t thread_id,
+ uint32_t context_flags) {
+ R_RETURN(GetDebugThreadContext(system, out_context, debug_handle, thread_id, context_flags));
+}
+
+Result SetDebugThreadContext64From32(Core::System& system, Handle debug_handle, uint64_t thread_id,
+ uint32_t context, uint32_t context_flags) {
+ R_RETURN(SetDebugThreadContext(system, debug_handle, thread_id, context, context_flags));
+}
+
+Result QueryDebugProcessMemory64From32(Core::System& system, uint32_t out_memory_info,
+ PageInfo* out_page_info, Handle debug_handle,
+ uint32_t address) {
+ R_RETURN(
+ QueryDebugProcessMemory(system, out_memory_info, out_page_info, debug_handle, address));
+}
+
+Result ReadDebugProcessMemory64From32(Core::System& system, uint32_t buffer, Handle debug_handle,
+ uint32_t address, uint32_t size) {
+ R_RETURN(ReadDebugProcessMemory(system, buffer, debug_handle, address, size));
+}
+
+Result WriteDebugProcessMemory64From32(Core::System& system, Handle debug_handle, uint32_t buffer,
+ uint32_t address, uint32_t size) {
+ R_RETURN(WriteDebugProcessMemory(system, debug_handle, buffer, address, size));
+}
+
+Result SetHardwareBreakPoint64From32(Core::System& system, HardwareBreakPointRegisterName name,
+ uint64_t flags, uint64_t value) {
+ R_RETURN(SetHardwareBreakPoint(system, name, flags, value));
+}
+
+Result GetDebugThreadParam64From32(Core::System& system, uint64_t* out_64, uint32_t* out_32,
+ Handle debug_handle, uint64_t thread_id,
+ DebugThreadParam param) {
+ R_RETURN(GetDebugThreadParam(system, out_64, out_32, debug_handle, thread_id, param));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_debug_string.cpp b/src/core/hle/kernel/svc/svc_debug_string.cpp
new file mode 100644
index 000000000..4c14ce668
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_debug_string.cpp
@@ -0,0 +1,30 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/svc.h"
+#include "core/memory.h"
+
+namespace Kernel::Svc {
+
+/// Used to output a message on a debug hardware unit - does nothing on a retail unit
+Result OutputDebugString(Core::System& system, u64 address, u64 len) {
+ R_SUCCEED_IF(len == 0);
+
+ std::string str(len, '\0');
+ GetCurrentMemory(system.Kernel()).ReadBlock(address, str.data(), str.size());
+ LOG_DEBUG(Debug_Emulated, "{}", str);
+
+ R_SUCCEED();
+}
+
+Result OutputDebugString64(Core::System& system, uint64_t debug_str, uint64_t len) {
+ R_RETURN(OutputDebugString(system, debug_str, len));
+}
+
+Result OutputDebugString64From32(Core::System& system, uint32_t debug_str, uint32_t len) {
+ R_RETURN(OutputDebugString(system, debug_str, len));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_device_address_space.cpp b/src/core/hle/kernel/svc/svc_device_address_space.cpp
new file mode 100644
index 000000000..ec3143e67
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_device_address_space.cpp
@@ -0,0 +1,258 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/alignment.h"
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_device_address_space.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+constexpr inline u64 DeviceAddressSpaceAlignMask = (1ULL << 22) - 1;
+
+constexpr bool IsProcessAndDeviceAligned(uint64_t process_address, uint64_t device_address) {
+ return (process_address & DeviceAddressSpaceAlignMask) ==
+ (device_address & DeviceAddressSpaceAlignMask);
+}
+
+Result CreateDeviceAddressSpace(Core::System& system, Handle* out, uint64_t das_address,
+ uint64_t das_size) {
+ // Validate input.
+ R_UNLESS(Common::IsAligned(das_address, PageSize), ResultInvalidMemoryRegion);
+ R_UNLESS(Common::IsAligned(das_size, PageSize), ResultInvalidMemoryRegion);
+ R_UNLESS(das_size > 0, ResultInvalidMemoryRegion);
+ R_UNLESS((das_address < das_address + das_size), ResultInvalidMemoryRegion);
+
+ // Create the device address space.
+ KDeviceAddressSpace* das = KDeviceAddressSpace::Create(system.Kernel());
+ R_UNLESS(das != nullptr, ResultOutOfResource);
+ SCOPE_EXIT({ das->Close(); });
+
+ // Initialize the device address space.
+ R_TRY(das->Initialize(das_address, das_size));
+
+ // Register the device address space.
+ KDeviceAddressSpace::Register(system.Kernel(), das);
+
+ // Add to the handle table.
+ R_TRY(GetCurrentProcess(system.Kernel()).GetHandleTable().Add(out, das));
+
+ R_SUCCEED();
+}
+
+Result AttachDeviceAddressSpace(Core::System& system, DeviceName device_name, Handle das_handle) {
+ // Get the device address space.
+ KScopedAutoObject das = GetCurrentProcess(system.Kernel())
+ .GetHandleTable()
+ .GetObject<KDeviceAddressSpace>(das_handle);
+ R_UNLESS(das.IsNotNull(), ResultInvalidHandle);
+
+ // Attach.
+ R_RETURN(das->Attach(device_name));
+}
+
+Result DetachDeviceAddressSpace(Core::System& system, DeviceName device_name, Handle das_handle) {
+ // Get the device address space.
+ KScopedAutoObject das = GetCurrentProcess(system.Kernel())
+ .GetHandleTable()
+ .GetObject<KDeviceAddressSpace>(das_handle);
+ R_UNLESS(das.IsNotNull(), ResultInvalidHandle);
+
+ // Detach.
+ R_RETURN(das->Detach(device_name));
+}
+
+constexpr bool IsValidDeviceMemoryPermission(MemoryPermission device_perm) {
+ switch (device_perm) {
+ case MemoryPermission::Read:
+ case MemoryPermission::Write:
+ case MemoryPermission::ReadWrite:
+ return true;
+ default:
+ return false;
+ }
+}
+
+Result MapDeviceAddressSpaceByForce(Core::System& system, Handle das_handle, Handle process_handle,
+ uint64_t process_address, uint64_t size,
+ uint64_t device_address, u32 option) {
+ // Decode the option.
+ const MapDeviceAddressSpaceOption option_pack{option};
+ const auto device_perm = option_pack.permission;
+ const auto reserved = option_pack.reserved;
+
+ // Validate input.
+ R_UNLESS(Common::IsAligned(process_address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(device_address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((process_address < process_address + size), ResultInvalidCurrentMemory);
+ R_UNLESS((device_address < device_address + size), ResultInvalidMemoryRegion);
+ R_UNLESS((process_address == static_cast<uint64_t>(process_address)),
+ ResultInvalidCurrentMemory);
+ R_UNLESS(IsValidDeviceMemoryPermission(device_perm), ResultInvalidNewMemoryPermission);
+ R_UNLESS(reserved == 0, ResultInvalidEnumValue);
+
+ // Get the device address space.
+ KScopedAutoObject das = GetCurrentProcess(system.Kernel())
+ .GetHandleTable()
+ .GetObject<KDeviceAddressSpace>(das_handle);
+ R_UNLESS(das.IsNotNull(), ResultInvalidHandle);
+
+ // Get the process.
+ KScopedAutoObject process =
+ GetCurrentProcess(system.Kernel()).GetHandleTable().GetObject<KProcess>(process_handle);
+ R_UNLESS(process.IsNotNull(), ResultInvalidHandle);
+
+ // Validate that the process address is within range.
+ auto& page_table = process->PageTable();
+ R_UNLESS(page_table.Contains(process_address, size), ResultInvalidCurrentMemory);
+
+ // Map.
+ R_RETURN(
+ das->MapByForce(std::addressof(page_table), process_address, size, device_address, option));
+}
+
+Result MapDeviceAddressSpaceAligned(Core::System& system, Handle das_handle, Handle process_handle,
+ uint64_t process_address, uint64_t size,
+ uint64_t device_address, u32 option) {
+ // Decode the option.
+ const MapDeviceAddressSpaceOption option_pack{option};
+ const auto device_perm = option_pack.permission;
+ const auto reserved = option_pack.reserved;
+
+ // Validate input.
+ R_UNLESS(Common::IsAligned(process_address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(device_address, PageSize), ResultInvalidAddress);
+ R_UNLESS(IsProcessAndDeviceAligned(process_address, device_address), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((process_address < process_address + size), ResultInvalidCurrentMemory);
+ R_UNLESS((device_address < device_address + size), ResultInvalidMemoryRegion);
+ R_UNLESS((process_address == static_cast<uint64_t>(process_address)),
+ ResultInvalidCurrentMemory);
+ R_UNLESS(IsValidDeviceMemoryPermission(device_perm), ResultInvalidNewMemoryPermission);
+ R_UNLESS(reserved == 0, ResultInvalidEnumValue);
+
+ // Get the device address space.
+ KScopedAutoObject das = GetCurrentProcess(system.Kernel())
+ .GetHandleTable()
+ .GetObject<KDeviceAddressSpace>(das_handle);
+ R_UNLESS(das.IsNotNull(), ResultInvalidHandle);
+
+ // Get the process.
+ KScopedAutoObject process =
+ GetCurrentProcess(system.Kernel()).GetHandleTable().GetObject<KProcess>(process_handle);
+ R_UNLESS(process.IsNotNull(), ResultInvalidHandle);
+
+ // Validate that the process address is within range.
+ auto& page_table = process->PageTable();
+ R_UNLESS(page_table.Contains(process_address, size), ResultInvalidCurrentMemory);
+
+ // Map.
+ R_RETURN(
+ das->MapAligned(std::addressof(page_table), process_address, size, device_address, option));
+}
+
+Result UnmapDeviceAddressSpace(Core::System& system, Handle das_handle, Handle process_handle,
+ uint64_t process_address, uint64_t size, uint64_t device_address) {
+ // Validate input.
+ R_UNLESS(Common::IsAligned(process_address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(device_address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((process_address < process_address + size), ResultInvalidCurrentMemory);
+ R_UNLESS((device_address < device_address + size), ResultInvalidMemoryRegion);
+ R_UNLESS((process_address == static_cast<uint64_t>(process_address)),
+ ResultInvalidCurrentMemory);
+
+ // Get the device address space.
+ KScopedAutoObject das = GetCurrentProcess(system.Kernel())
+ .GetHandleTable()
+ .GetObject<KDeviceAddressSpace>(das_handle);
+ R_UNLESS(das.IsNotNull(), ResultInvalidHandle);
+
+ // Get the process.
+ KScopedAutoObject process =
+ GetCurrentProcess(system.Kernel()).GetHandleTable().GetObject<KProcess>(process_handle);
+ R_UNLESS(process.IsNotNull(), ResultInvalidHandle);
+
+ // Validate that the process address is within range.
+ auto& page_table = process->PageTable();
+ R_UNLESS(page_table.Contains(process_address, size), ResultInvalidCurrentMemory);
+
+ R_RETURN(das->Unmap(std::addressof(page_table), process_address, size, device_address));
+}
+
+Result CreateDeviceAddressSpace64(Core::System& system, Handle* out_handle, uint64_t das_address,
+ uint64_t das_size) {
+ R_RETURN(CreateDeviceAddressSpace(system, out_handle, das_address, das_size));
+}
+
+Result AttachDeviceAddressSpace64(Core::System& system, DeviceName device_name, Handle das_handle) {
+ R_RETURN(AttachDeviceAddressSpace(system, device_name, das_handle));
+}
+
+Result DetachDeviceAddressSpace64(Core::System& system, DeviceName device_name, Handle das_handle) {
+ R_RETURN(DetachDeviceAddressSpace(system, device_name, das_handle));
+}
+
+Result MapDeviceAddressSpaceByForce64(Core::System& system, Handle das_handle,
+ Handle process_handle, uint64_t process_address,
+ uint64_t size, uint64_t device_address, u32 option) {
+ R_RETURN(MapDeviceAddressSpaceByForce(system, das_handle, process_handle, process_address, size,
+ device_address, option));
+}
+
+Result MapDeviceAddressSpaceAligned64(Core::System& system, Handle das_handle,
+ Handle process_handle, uint64_t process_address,
+ uint64_t size, uint64_t device_address, u32 option) {
+ R_RETURN(MapDeviceAddressSpaceAligned(system, das_handle, process_handle, process_address, size,
+ device_address, option));
+}
+
+Result UnmapDeviceAddressSpace64(Core::System& system, Handle das_handle, Handle process_handle,
+ uint64_t process_address, uint64_t size, uint64_t device_address) {
+ R_RETURN(UnmapDeviceAddressSpace(system, das_handle, process_handle, process_address, size,
+ device_address));
+}
+
+Result CreateDeviceAddressSpace64From32(Core::System& system, Handle* out_handle,
+ uint64_t das_address, uint64_t das_size) {
+ R_RETURN(CreateDeviceAddressSpace(system, out_handle, das_address, das_size));
+}
+
+Result AttachDeviceAddressSpace64From32(Core::System& system, DeviceName device_name,
+ Handle das_handle) {
+ R_RETURN(AttachDeviceAddressSpace(system, device_name, das_handle));
+}
+
+Result DetachDeviceAddressSpace64From32(Core::System& system, DeviceName device_name,
+ Handle das_handle) {
+ R_RETURN(DetachDeviceAddressSpace(system, device_name, das_handle));
+}
+
+Result MapDeviceAddressSpaceByForce64From32(Core::System& system, Handle das_handle,
+ Handle process_handle, uint64_t process_address,
+ uint32_t size, uint64_t device_address, u32 option) {
+ R_RETURN(MapDeviceAddressSpaceByForce(system, das_handle, process_handle, process_address, size,
+ device_address, option));
+}
+
+Result MapDeviceAddressSpaceAligned64From32(Core::System& system, Handle das_handle,
+ Handle process_handle, uint64_t process_address,
+ uint32_t size, uint64_t device_address, u32 option) {
+ R_RETURN(MapDeviceAddressSpaceAligned(system, das_handle, process_handle, process_address, size,
+ device_address, option));
+}
+
+Result UnmapDeviceAddressSpace64From32(Core::System& system, Handle das_handle,
+ Handle process_handle, uint64_t process_address,
+ uint32_t size, uint64_t device_address) {
+ R_RETURN(UnmapDeviceAddressSpace(system, das_handle, process_handle, process_address, size,
+ device_address));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_event.cpp b/src/core/hle/kernel/svc/svc_event.cpp
new file mode 100644
index 000000000..901202e6a
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_event.cpp
@@ -0,0 +1,120 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_event.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+Result SignalEvent(Core::System& system, Handle event_handle) {
+ LOG_DEBUG(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
+
+ // Get the current handle table.
+ const KHandleTable& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable();
+
+ // Get the event.
+ KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
+ R_UNLESS(event.IsNotNull(), ResultInvalidHandle);
+
+ R_RETURN(event->Signal());
+}
+
+Result ClearEvent(Core::System& system, Handle event_handle) {
+ LOG_TRACE(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
+
+ // Get the current handle table.
+ const auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable();
+
+ // Try to clear the writable event.
+ {
+ KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
+ if (event.IsNotNull()) {
+ R_RETURN(event->Clear());
+ }
+ }
+
+ // Try to clear the readable event.
+ {
+ KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(event_handle);
+ if (readable_event.IsNotNull()) {
+ R_RETURN(readable_event->Clear());
+ }
+ }
+
+ R_THROW(ResultInvalidHandle);
+}
+
+Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
+ LOG_DEBUG(Kernel_SVC, "called");
+
+ // Get the kernel reference and handle table.
+ auto& kernel = system.Kernel();
+ auto& handle_table = GetCurrentProcess(kernel).GetHandleTable();
+
+ // Reserve a new event from the process resource limit
+ KScopedResourceReservation event_reservation(GetCurrentProcessPointer(kernel),
+ LimitableResource::EventCountMax);
+ R_UNLESS(event_reservation.Succeeded(), ResultLimitReached);
+
+ // Create a new event.
+ KEvent* event = KEvent::Create(kernel);
+ R_UNLESS(event != nullptr, ResultOutOfResource);
+
+ // Initialize the event.
+ event->Initialize(GetCurrentProcessPointer(kernel));
+
+ // Commit the thread reservation.
+ event_reservation.Commit();
+
+ // Ensure that we clean up the event (and its only references are handle table) on function end.
+ SCOPE_EXIT({
+ event->GetReadableEvent().Close();
+ event->Close();
+ });
+
+ // Register the event.
+ KEvent::Register(kernel, event);
+
+ // Add the event to the handle table.
+ R_TRY(handle_table.Add(out_write, event));
+
+ // Ensure that we maintain a clean handle state on exit.
+ ON_RESULT_FAILURE {
+ handle_table.Remove(*out_write);
+ };
+
+ // Add the readable event to the handle table.
+ R_RETURN(handle_table.Add(out_read, std::addressof(event->GetReadableEvent())));
+}
+
+Result SignalEvent64(Core::System& system, Handle event_handle) {
+ R_RETURN(SignalEvent(system, event_handle));
+}
+
+Result ClearEvent64(Core::System& system, Handle event_handle) {
+ R_RETURN(ClearEvent(system, event_handle));
+}
+
+Result CreateEvent64(Core::System& system, Handle* out_write_handle, Handle* out_read_handle) {
+ R_RETURN(CreateEvent(system, out_write_handle, out_read_handle));
+}
+
+Result SignalEvent64From32(Core::System& system, Handle event_handle) {
+ R_RETURN(SignalEvent(system, event_handle));
+}
+
+Result ClearEvent64From32(Core::System& system, Handle event_handle) {
+ R_RETURN(ClearEvent(system, event_handle));
+}
+
+Result CreateEvent64From32(Core::System& system, Handle* out_write_handle,
+ Handle* out_read_handle) {
+ R_RETURN(CreateEvent(system, out_write_handle, out_read_handle));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_exception.cpp b/src/core/hle/kernel/svc/svc_exception.cpp
new file mode 100644
index 000000000..580cf2f75
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_exception.cpp
@@ -0,0 +1,137 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/debugger/debugger.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_types.h"
+#include "core/memory.h"
+#include "core/reporter.h"
+
+namespace Kernel::Svc {
+
+/// Break program execution
+void Break(Core::System& system, BreakReason reason, u64 info1, u64 info2) {
+ BreakReason break_reason =
+ reason & static_cast<BreakReason>(~BreakReason::NotificationOnlyFlag);
+ bool notification_only = True(reason & BreakReason::NotificationOnlyFlag);
+
+ bool has_dumped_buffer{};
+ std::vector<u8> debug_buffer;
+
+ const auto handle_debug_buffer = [&](u64 addr, u64 sz) {
+ if (sz == 0 || addr == 0 || has_dumped_buffer) {
+ return;
+ }
+
+ auto& memory = GetCurrentMemory(system.Kernel());
+
+ // This typically is an error code so we're going to assume this is the case
+ if (sz == sizeof(u32)) {
+ LOG_CRITICAL(Debug_Emulated, "debug_buffer_err_code={:X}", memory.Read32(addr));
+ } else {
+ // We don't know what's in here so we'll hexdump it
+ debug_buffer.resize(sz);
+ memory.ReadBlock(addr, debug_buffer.data(), sz);
+ std::string hexdump;
+ for (std::size_t i = 0; i < debug_buffer.size(); i++) {
+ hexdump += fmt::format("{:02X} ", debug_buffer[i]);
+ if (i != 0 && i % 16 == 0) {
+ hexdump += '\n';
+ }
+ }
+ LOG_CRITICAL(Debug_Emulated, "debug_buffer=\n{}", hexdump);
+ }
+ has_dumped_buffer = true;
+ };
+ switch (break_reason) {
+ case BreakReason::Panic:
+ LOG_CRITICAL(Debug_Emulated, "Userspace PANIC! info1=0x{:016X}, info2=0x{:016X}", info1,
+ info2);
+ handle_debug_buffer(info1, info2);
+ break;
+ case BreakReason::Assert:
+ LOG_CRITICAL(Debug_Emulated, "Userspace Assertion failed! info1=0x{:016X}, info2=0x{:016X}",
+ info1, info2);
+ handle_debug_buffer(info1, info2);
+ break;
+ case BreakReason::User:
+ LOG_WARNING(Debug_Emulated, "Userspace Break! 0x{:016X} with size 0x{:016X}", info1, info2);
+ handle_debug_buffer(info1, info2);
+ break;
+ case BreakReason::PreLoadDll:
+ LOG_INFO(Debug_Emulated,
+ "Userspace Attempting to load an NRO at 0x{:016X} with size 0x{:016X}", info1,
+ info2);
+ break;
+ case BreakReason::PostLoadDll:
+ LOG_INFO(Debug_Emulated, "Userspace Loaded an NRO at 0x{:016X} with size 0x{:016X}", info1,
+ info2);
+ break;
+ case BreakReason::PreUnloadDll:
+ LOG_INFO(Debug_Emulated,
+ "Userspace Attempting to unload an NRO at 0x{:016X} with size 0x{:016X}", info1,
+ info2);
+ break;
+ case BreakReason::PostUnloadDll:
+ LOG_INFO(Debug_Emulated, "Userspace Unloaded an NRO at 0x{:016X} with size 0x{:016X}",
+ info1, info2);
+ break;
+ case BreakReason::CppException:
+ LOG_CRITICAL(Debug_Emulated, "Signalling debugger. Uncaught C++ exception encountered.");
+ break;
+ default:
+ LOG_WARNING(
+ Debug_Emulated,
+ "Signalling debugger, Unknown break reason {:#X}, info1=0x{:016X}, info2=0x{:016X}",
+ reason, info1, info2);
+ handle_debug_buffer(info1, info2);
+ break;
+ }
+
+ system.GetReporter().SaveSvcBreakReport(
+ static_cast<u32>(reason), notification_only, info1, info2,
+ has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt);
+
+ if (!notification_only) {
+ LOG_CRITICAL(
+ Debug_Emulated,
+ "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
+ reason, info1, info2);
+
+ handle_debug_buffer(info1, info2);
+
+ auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
+ const auto thread_processor_id = current_thread->GetActiveCore();
+ system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
+ }
+
+ if (system.DebuggerEnabled()) {
+ auto* thread = system.Kernel().GetCurrentEmuThread();
+ system.GetDebugger().NotifyThreadStopped(thread);
+ thread->RequestSuspend(Kernel::SuspendType::Debug);
+ }
+}
+
+void ReturnFromException(Core::System& system, Result result) {
+ UNIMPLEMENTED();
+}
+
+void Break64(Core::System& system, BreakReason break_reason, uint64_t arg, uint64_t size) {
+ Break(system, break_reason, arg, size);
+}
+
+void Break64From32(Core::System& system, BreakReason break_reason, uint32_t arg, uint32_t size) {
+ Break(system, break_reason, arg, size);
+}
+
+void ReturnFromException64(Core::System& system, Result result) {
+ ReturnFromException(system, result);
+}
+
+void ReturnFromException64From32(Core::System& system, Result result) {
+ ReturnFromException(system, result);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_info.cpp b/src/core/hle/kernel/svc/svc_info.cpp
new file mode 100644
index 000000000..445cdd87b
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_info.cpp
@@ -0,0 +1,277 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_resource_limit.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+/// Gets system/memory information for the current process
+Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle handle,
+ u64 info_sub_id) {
+ LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}",
+ info_id_type, info_sub_id, handle);
+
+ u32 info_id = static_cast<u32>(info_id_type);
+
+ switch (info_id_type) {
+ case InfoType::CoreMask:
+ case InfoType::PriorityMask:
+ case InfoType::AliasRegionAddress:
+ case InfoType::AliasRegionSize:
+ case InfoType::HeapRegionAddress:
+ case InfoType::HeapRegionSize:
+ case InfoType::AslrRegionAddress:
+ case InfoType::AslrRegionSize:
+ case InfoType::StackRegionAddress:
+ case InfoType::StackRegionSize:
+ case InfoType::TotalMemorySize:
+ case InfoType::UsedMemorySize:
+ case InfoType::SystemResourceSizeTotal:
+ case InfoType::SystemResourceSizeUsed:
+ case InfoType::ProgramId:
+ case InfoType::UserExceptionContextAddress:
+ case InfoType::TotalNonSystemMemorySize:
+ case InfoType::UsedNonSystemMemorySize:
+ case InfoType::IsApplication:
+ case InfoType::FreeThreadCount: {
+ R_UNLESS(info_sub_id == 0, ResultInvalidEnumValue);
+
+ const auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable();
+ KScopedAutoObject process = handle_table.GetObject<KProcess>(handle);
+ R_UNLESS(process.IsNotNull(), ResultInvalidHandle);
+
+ switch (info_id_type) {
+ case InfoType::CoreMask:
+ *result = process->GetCoreMask();
+ R_SUCCEED();
+
+ case InfoType::PriorityMask:
+ *result = process->GetPriorityMask();
+ R_SUCCEED();
+
+ case InfoType::AliasRegionAddress:
+ *result = GetInteger(process->PageTable().GetAliasRegionStart());
+ R_SUCCEED();
+
+ case InfoType::AliasRegionSize:
+ *result = process->PageTable().GetAliasRegionSize();
+ R_SUCCEED();
+
+ case InfoType::HeapRegionAddress:
+ *result = GetInteger(process->PageTable().GetHeapRegionStart());
+ R_SUCCEED();
+
+ case InfoType::HeapRegionSize:
+ *result = process->PageTable().GetHeapRegionSize();
+ R_SUCCEED();
+
+ case InfoType::AslrRegionAddress:
+ *result = GetInteger(process->PageTable().GetAliasCodeRegionStart());
+ R_SUCCEED();
+
+ case InfoType::AslrRegionSize:
+ *result = process->PageTable().GetAliasCodeRegionSize();
+ R_SUCCEED();
+
+ case InfoType::StackRegionAddress:
+ *result = GetInteger(process->PageTable().GetStackRegionStart());
+ R_SUCCEED();
+
+ case InfoType::StackRegionSize:
+ *result = process->PageTable().GetStackRegionSize();
+ R_SUCCEED();
+
+ case InfoType::TotalMemorySize:
+ *result = process->GetTotalPhysicalMemoryAvailable();
+ R_SUCCEED();
+
+ case InfoType::UsedMemorySize:
+ *result = process->GetTotalPhysicalMemoryUsed();
+ R_SUCCEED();
+
+ case InfoType::SystemResourceSizeTotal:
+ *result = process->GetSystemResourceSize();
+ R_SUCCEED();
+
+ case InfoType::SystemResourceSizeUsed:
+ LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage");
+ *result = process->GetSystemResourceUsage();
+ R_SUCCEED();
+
+ case InfoType::ProgramId:
+ *result = process->GetProgramId();
+ R_SUCCEED();
+
+ case InfoType::UserExceptionContextAddress:
+ *result = GetInteger(process->GetProcessLocalRegionAddress());
+ R_SUCCEED();
+
+ case InfoType::TotalNonSystemMemorySize:
+ *result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource();
+ R_SUCCEED();
+
+ case InfoType::UsedNonSystemMemorySize:
+ *result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource();
+ R_SUCCEED();
+
+ case InfoType::IsApplication:
+ LOG_WARNING(Kernel_SVC, "(STUBBED) Assuming process is application");
+ *result = true;
+ R_SUCCEED();
+
+ case InfoType::FreeThreadCount:
+ *result = process->GetFreeThreadCount();
+ R_SUCCEED();
+
+ default:
+ break;
+ }
+
+ LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
+ R_THROW(ResultInvalidEnumValue);
+ }
+
+ case InfoType::DebuggerAttached:
+ *result = 0;
+ R_SUCCEED();
+
+ case InfoType::ResourceLimit: {
+ R_UNLESS(handle == 0, ResultInvalidHandle);
+ R_UNLESS(info_sub_id == 0, ResultInvalidCombination);
+
+ KProcess* const current_process = GetCurrentProcessPointer(system.Kernel());
+ KHandleTable& handle_table = current_process->GetHandleTable();
+ const auto resource_limit = current_process->GetResourceLimit();
+ if (!resource_limit) {
+ *result = Svc::InvalidHandle;
+ // Yes, the kernel considers this a successful operation.
+ R_SUCCEED();
+ }
+
+ Handle resource_handle{};
+ R_TRY(handle_table.Add(std::addressof(resource_handle), resource_limit));
+
+ *result = resource_handle;
+ R_SUCCEED();
+ }
+
+ case InfoType::RandomEntropy:
+ R_UNLESS(handle == 0, ResultInvalidHandle);
+ R_UNLESS(info_sub_id < KProcess::RANDOM_ENTROPY_SIZE, ResultInvalidCombination);
+
+ *result = GetCurrentProcess(system.Kernel()).GetRandomEntropy(info_sub_id);
+ R_SUCCEED();
+
+ case InfoType::InitialProcessIdRange:
+ LOG_WARNING(Kernel_SVC,
+ "(STUBBED) Attempted to query privileged process id bounds, returned 0");
+ *result = 0;
+ R_SUCCEED();
+
+ case InfoType::ThreadTickCount: {
+ constexpr u64 num_cpus = 4;
+ if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id >= num_cpus) {
+ LOG_ERROR(Kernel_SVC, "Core count is out of range, expected {} but got {}", num_cpus,
+ info_sub_id);
+ R_THROW(ResultInvalidCombination);
+ }
+
+ KScopedAutoObject thread = GetCurrentProcess(system.Kernel())
+ .GetHandleTable()
+ .GetObject<KThread>(static_cast<Handle>(handle));
+ if (thread.IsNull()) {
+ LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}",
+ static_cast<Handle>(handle));
+ R_THROW(ResultInvalidHandle);
+ }
+
+ const auto& core_timing = system.CoreTiming();
+ const auto& scheduler = *system.Kernel().CurrentScheduler();
+ const auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
+ const bool same_thread = current_thread == thread.GetPointerUnsafe();
+
+ const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTime();
+ u64 out_ticks = 0;
+ if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
+ const u64 thread_ticks = current_thread->GetCpuTime();
+
+ out_ticks = thread_ticks + (core_timing.GetClockTicks() - prev_ctx_ticks);
+ } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) {
+ out_ticks = core_timing.GetClockTicks() - prev_ctx_ticks;
+ }
+
+ *result = out_ticks;
+ R_SUCCEED();
+ }
+ case InfoType::IdleTickCount: {
+ // Verify the input handle is invalid.
+ R_UNLESS(handle == InvalidHandle, ResultInvalidHandle);
+
+ // Verify the requested core is valid.
+ const bool core_valid =
+ (info_sub_id == 0xFFFFFFFFFFFFFFFF) ||
+ (info_sub_id == static_cast<u64>(system.Kernel().CurrentPhysicalCoreIndex()));
+ R_UNLESS(core_valid, ResultInvalidCombination);
+
+ // Get the idle tick count.
+ *result = system.Kernel().CurrentScheduler()->GetIdleThread()->GetCpuTime();
+ R_SUCCEED();
+ }
+ case InfoType::MesosphereCurrentProcess: {
+ // Verify the input handle is invalid.
+ R_UNLESS(handle == InvalidHandle, ResultInvalidHandle);
+
+ // Verify the sub-type is valid.
+ R_UNLESS(info_sub_id == 0, ResultInvalidCombination);
+
+ // Get the handle table.
+ KProcess* current_process = GetCurrentProcessPointer(system.Kernel());
+ KHandleTable& handle_table = current_process->GetHandleTable();
+
+ // Get a new handle for the current process.
+ Handle tmp;
+ R_TRY(handle_table.Add(std::addressof(tmp), current_process));
+
+ // Set the output.
+ *result = tmp;
+
+ // We succeeded.
+ R_SUCCEED();
+ }
+ default:
+ LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
+ R_THROW(ResultInvalidEnumValue);
+ }
+}
+
+Result GetSystemInfo(Core::System& system, uint64_t* out, SystemInfoType info_type, Handle handle,
+ uint64_t info_subtype) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result GetInfo64(Core::System& system, uint64_t* out, InfoType info_type, Handle handle,
+ uint64_t info_subtype) {
+ R_RETURN(GetInfo(system, out, info_type, handle, info_subtype));
+}
+
+Result GetSystemInfo64(Core::System& system, uint64_t* out, SystemInfoType info_type, Handle handle,
+ uint64_t info_subtype) {
+ R_RETURN(GetSystemInfo(system, out, info_type, handle, info_subtype));
+}
+
+Result GetInfo64From32(Core::System& system, uint64_t* out, InfoType info_type, Handle handle,
+ uint64_t info_subtype) {
+ R_RETURN(GetInfo(system, out, info_type, handle, info_subtype));
+}
+
+Result GetSystemInfo64From32(Core::System& system, uint64_t* out, SystemInfoType info_type,
+ Handle handle, uint64_t info_subtype) {
+ R_RETURN(GetSystemInfo(system, out, info_type, handle, info_subtype));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_insecure_memory.cpp b/src/core/hle/kernel/svc/svc_insecure_memory.cpp
new file mode 100644
index 000000000..00457c6bf
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_insecure_memory.cpp
@@ -0,0 +1,35 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel::Svc {
+
+Result MapInsecureMemory(Core::System& system, uint64_t address, uint64_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result UnmapInsecureMemory(Core::System& system, uint64_t address, uint64_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result MapInsecureMemory64(Core::System& system, uint64_t address, uint64_t size) {
+ R_RETURN(MapInsecureMemory(system, address, size));
+}
+
+Result UnmapInsecureMemory64(Core::System& system, uint64_t address, uint64_t size) {
+ R_RETURN(UnmapInsecureMemory(system, address, size));
+}
+
+Result MapInsecureMemory64From32(Core::System& system, uint32_t address, uint32_t size) {
+ R_RETURN(MapInsecureMemory(system, address, size));
+}
+
+Result UnmapInsecureMemory64From32(Core::System& system, uint32_t address, uint32_t size) {
+ R_RETURN(UnmapInsecureMemory(system, address, size));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_interrupt_event.cpp b/src/core/hle/kernel/svc/svc_interrupt_event.cpp
new file mode 100644
index 000000000..768b30a1f
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_interrupt_event.cpp
@@ -0,0 +1,25 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel::Svc {
+
+Result CreateInterruptEvent(Core::System& system, Handle* out, int32_t interrupt_id,
+ InterruptType type) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result CreateInterruptEvent64(Core::System& system, Handle* out_read_handle, int32_t interrupt_id,
+ InterruptType interrupt_type) {
+ R_RETURN(CreateInterruptEvent(system, out_read_handle, interrupt_id, interrupt_type));
+}
+
+Result CreateInterruptEvent64From32(Core::System& system, Handle* out_read_handle,
+ int32_t interrupt_id, InterruptType interrupt_type) {
+ R_RETURN(CreateInterruptEvent(system, out_read_handle, interrupt_id, interrupt_type));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_io_pool.cpp b/src/core/hle/kernel/svc/svc_io_pool.cpp
new file mode 100644
index 000000000..f01817e24
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_io_pool.cpp
@@ -0,0 +1,71 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel::Svc {
+
+Result CreateIoPool(Core::System& system, Handle* out, IoPoolType pool_type) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result CreateIoRegion(Core::System& system, Handle* out, Handle io_pool_handle, uint64_t phys_addr,
+ uint64_t size, MemoryMapping mapping, MemoryPermission perm) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result MapIoRegion(Core::System& system, Handle io_region_handle, uint64_t address, uint64_t size,
+ MemoryPermission map_perm) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result UnmapIoRegion(Core::System& system, Handle io_region_handle, uint64_t address,
+ uint64_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result CreateIoPool64(Core::System& system, Handle* out_handle, IoPoolType pool_type) {
+ R_RETURN(CreateIoPool(system, out_handle, pool_type));
+}
+
+Result CreateIoRegion64(Core::System& system, Handle* out_handle, Handle io_pool,
+ uint64_t physical_address, uint64_t size, MemoryMapping mapping,
+ MemoryPermission perm) {
+ R_RETURN(CreateIoRegion(system, out_handle, io_pool, physical_address, size, mapping, perm));
+}
+
+Result MapIoRegion64(Core::System& system, Handle io_region, uint64_t address, uint64_t size,
+ MemoryPermission perm) {
+ R_RETURN(MapIoRegion(system, io_region, address, size, perm));
+}
+
+Result UnmapIoRegion64(Core::System& system, Handle io_region, uint64_t address, uint64_t size) {
+ R_RETURN(UnmapIoRegion(system, io_region, address, size));
+}
+
+Result CreateIoPool64From32(Core::System& system, Handle* out_handle, IoPoolType pool_type) {
+ R_RETURN(CreateIoPool(system, out_handle, pool_type));
+}
+
+Result CreateIoRegion64From32(Core::System& system, Handle* out_handle, Handle io_pool,
+ uint64_t physical_address, uint32_t size, MemoryMapping mapping,
+ MemoryPermission perm) {
+ R_RETURN(CreateIoRegion(system, out_handle, io_pool, physical_address, size, mapping, perm));
+}
+
+Result MapIoRegion64From32(Core::System& system, Handle io_region, uint32_t address, uint32_t size,
+ MemoryPermission perm) {
+ R_RETURN(MapIoRegion(system, io_region, address, size, perm));
+}
+
+Result UnmapIoRegion64From32(Core::System& system, Handle io_region, uint32_t address,
+ uint32_t size) {
+ R_RETURN(UnmapIoRegion(system, io_region, address, size));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_ipc.cpp b/src/core/hle/kernel/svc/svc_ipc.cpp
new file mode 100644
index 000000000..60247df2e
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_ipc.cpp
@@ -0,0 +1,173 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "common/scratch_buffer.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_client_session.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_server_session.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+/// Makes a blocking IPC call to a service.
+Result SendSyncRequest(Core::System& system, Handle handle) {
+ // Get the client session from its handle.
+ KScopedAutoObject session =
+ GetCurrentProcess(system.Kernel()).GetHandleTable().GetObject<KClientSession>(handle);
+ R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
+
+ LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}", handle);
+
+ R_RETURN(session->SendSyncRequest());
+}
+
+Result SendSyncRequestWithUserBuffer(Core::System& system, uint64_t message_buffer,
+ uint64_t message_buffer_size, Handle session_handle) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result SendAsyncRequestWithUserBuffer(Core::System& system, Handle* out_event_handle,
+ uint64_t message_buffer, uint64_t message_buffer_size,
+ Handle session_handle) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result ReplyAndReceive(Core::System& system, s32* out_index, uint64_t handles_addr, s32 num_handles,
+ Handle reply_target, s64 timeout_ns) {
+ auto& kernel = system.Kernel();
+ auto& handle_table = GetCurrentProcess(kernel).GetHandleTable();
+
+ R_UNLESS(0 <= num_handles && num_handles <= ArgumentHandleCountMax, ResultOutOfRange);
+ R_UNLESS(GetCurrentMemory(kernel).IsValidVirtualAddressRange(
+ handles_addr, static_cast<u64>(sizeof(Handle) * num_handles)),
+ ResultInvalidPointer);
+
+ std::array<Handle, Svc::ArgumentHandleCountMax> handles;
+ GetCurrentMemory(kernel).ReadBlock(handles_addr, handles.data(), sizeof(Handle) * num_handles);
+
+ // Convert handle list to object table.
+ std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> objs;
+ R_UNLESS(handle_table.GetMultipleObjects<KSynchronizationObject>(objs.data(), handles.data(),
+ num_handles),
+ ResultInvalidHandle);
+
+ // Ensure handles are closed when we're done.
+ SCOPE_EXIT({
+ for (auto i = 0; i < num_handles; ++i) {
+ objs[i]->Close();
+ }
+ });
+
+ // Reply to the target, if one is specified.
+ if (reply_target != InvalidHandle) {
+ KScopedAutoObject session = handle_table.GetObject<KServerSession>(reply_target);
+ R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
+
+ // If we fail to reply, we want to set the output index to -1.
+ ON_RESULT_FAILURE {
+ *out_index = -1;
+ };
+
+ // Send the reply.
+ R_TRY(session->SendReply());
+ }
+
+ // Wait for a message.
+ while (true) {
+ // Wait for an object.
+ s32 index;
+ Result result = KSynchronizationObject::Wait(kernel, std::addressof(index), objs.data(),
+ num_handles, timeout_ns);
+ if (result == ResultTimedOut) {
+ R_RETURN(result);
+ }
+
+ // Receive the request.
+ if (R_SUCCEEDED(result)) {
+ KServerSession* session = objs[index]->DynamicCast<KServerSession*>();
+ if (session != nullptr) {
+ result = session->ReceiveRequest();
+ if (result == ResultNotFound) {
+ continue;
+ }
+ }
+ }
+
+ *out_index = index;
+ R_RETURN(result);
+ }
+}
+
+Result ReplyAndReceiveWithUserBuffer(Core::System& system, int32_t* out_index,
+ uint64_t message_buffer, uint64_t message_buffer_size,
+ uint64_t handles, int32_t num_handles, Handle reply_target,
+ int64_t timeout_ns) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result SendSyncRequest64(Core::System& system, Handle session_handle) {
+ R_RETURN(SendSyncRequest(system, session_handle));
+}
+
+Result SendSyncRequestWithUserBuffer64(Core::System& system, uint64_t message_buffer,
+ uint64_t message_buffer_size, Handle session_handle) {
+ R_RETURN(
+ SendSyncRequestWithUserBuffer(system, message_buffer, message_buffer_size, session_handle));
+}
+
+Result SendAsyncRequestWithUserBuffer64(Core::System& system, Handle* out_event_handle,
+ uint64_t message_buffer, uint64_t message_buffer_size,
+ Handle session_handle) {
+ R_RETURN(SendAsyncRequestWithUserBuffer(system, out_event_handle, message_buffer,
+ message_buffer_size, session_handle));
+}
+
+Result ReplyAndReceive64(Core::System& system, int32_t* out_index, uint64_t handles,
+ int32_t num_handles, Handle reply_target, int64_t timeout_ns) {
+ R_RETURN(ReplyAndReceive(system, out_index, handles, num_handles, reply_target, timeout_ns));
+}
+
+Result ReplyAndReceiveWithUserBuffer64(Core::System& system, int32_t* out_index,
+ uint64_t message_buffer, uint64_t message_buffer_size,
+ uint64_t handles, int32_t num_handles, Handle reply_target,
+ int64_t timeout_ns) {
+ R_RETURN(ReplyAndReceiveWithUserBuffer(system, out_index, message_buffer, message_buffer_size,
+ handles, num_handles, reply_target, timeout_ns));
+}
+
+Result SendSyncRequest64From32(Core::System& system, Handle session_handle) {
+ R_RETURN(SendSyncRequest(system, session_handle));
+}
+
+Result SendSyncRequestWithUserBuffer64From32(Core::System& system, uint32_t message_buffer,
+ uint32_t message_buffer_size, Handle session_handle) {
+ R_RETURN(
+ SendSyncRequestWithUserBuffer(system, message_buffer, message_buffer_size, session_handle));
+}
+
+Result SendAsyncRequestWithUserBuffer64From32(Core::System& system, Handle* out_event_handle,
+ uint32_t message_buffer, uint32_t message_buffer_size,
+ Handle session_handle) {
+ R_RETURN(SendAsyncRequestWithUserBuffer(system, out_event_handle, message_buffer,
+ message_buffer_size, session_handle));
+}
+
+Result ReplyAndReceive64From32(Core::System& system, int32_t* out_index, uint32_t handles,
+ int32_t num_handles, Handle reply_target, int64_t timeout_ns) {
+ R_RETURN(ReplyAndReceive(system, out_index, handles, num_handles, reply_target, timeout_ns));
+}
+
+Result ReplyAndReceiveWithUserBuffer64From32(Core::System& system, int32_t* out_index,
+ uint32_t message_buffer, uint32_t message_buffer_size,
+ uint32_t handles, int32_t num_handles,
+ Handle reply_target, int64_t timeout_ns) {
+ R_RETURN(ReplyAndReceiveWithUserBuffer(system, out_index, message_buffer, message_buffer_size,
+ handles, num_handles, reply_target, timeout_ns));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_kernel_debug.cpp b/src/core/hle/kernel/svc/svc_kernel_debug.cpp
new file mode 100644
index 000000000..cee048279
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_kernel_debug.cpp
@@ -0,0 +1,35 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+void KernelDebug(Core::System& system, KernelDebugType kernel_debug_type, u64 arg0, u64 arg1,
+ u64 arg2) {
+ // Intentionally do nothing, as this does nothing in released kernel binaries.
+}
+
+void ChangeKernelTraceState(Core::System& system, KernelTraceState trace_state) {
+ // Intentionally do nothing, as this does nothing in released kernel binaries.
+}
+
+void KernelDebug64(Core::System& system, KernelDebugType kern_debug_type, uint64_t arg0,
+ uint64_t arg1, uint64_t arg2) {
+ KernelDebug(system, kern_debug_type, arg0, arg1, arg2);
+}
+
+void ChangeKernelTraceState64(Core::System& system, KernelTraceState kern_trace_state) {
+ ChangeKernelTraceState(system, kern_trace_state);
+}
+
+void KernelDebug64From32(Core::System& system, KernelDebugType kern_debug_type, uint64_t arg0,
+ uint64_t arg1, uint64_t arg2) {
+ KernelDebug(system, kern_debug_type, arg0, arg1, arg2);
+}
+
+void ChangeKernelTraceState64From32(Core::System& system, KernelTraceState kern_trace_state) {
+ ChangeKernelTraceState(system, kern_trace_state);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_light_ipc.cpp b/src/core/hle/kernel/svc/svc_light_ipc.cpp
new file mode 100644
index 000000000..b76ce984c
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_light_ipc.cpp
@@ -0,0 +1,73 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/arm/arm_interface.h"
+#include "core/core.h"
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel::Svc {
+
+Result SendSyncRequestLight(Core::System& system, Handle session_handle, u32* args) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result ReplyAndReceiveLight(Core::System& system, Handle session_handle, u32* args) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result SendSyncRequestLight64(Core::System& system, Handle session_handle, u32* args) {
+ R_RETURN(SendSyncRequestLight(system, session_handle, args));
+}
+
+Result ReplyAndReceiveLight64(Core::System& system, Handle session_handle, u32* args) {
+ R_RETURN(ReplyAndReceiveLight(system, session_handle, args));
+}
+
+Result SendSyncRequestLight64From32(Core::System& system, Handle session_handle, u32* args) {
+ R_RETURN(SendSyncRequestLight(system, session_handle, args));
+}
+
+Result ReplyAndReceiveLight64From32(Core::System& system, Handle session_handle, u32* args) {
+ R_RETURN(ReplyAndReceiveLight(system, session_handle, args));
+}
+
+// Custom ABI implementation for light IPC.
+
+template <typename F>
+static void SvcWrap_LightIpc(Core::System& system, F&& cb) {
+ auto& core = system.CurrentArmInterface();
+ std::array<u32, 7> arguments{};
+
+ Handle session_handle = static_cast<Handle>(core.GetReg(0));
+ for (int i = 0; i < 7; i++) {
+ arguments[i] = static_cast<u32>(core.GetReg(i + 1));
+ }
+
+ Result ret = cb(system, session_handle, arguments.data());
+
+ core.SetReg(0, ret.raw);
+ for (int i = 0; i < 7; i++) {
+ core.SetReg(i + 1, arguments[i]);
+ }
+}
+
+void SvcWrap_SendSyncRequestLight64(Core::System& system) {
+ SvcWrap_LightIpc(system, SendSyncRequestLight64);
+}
+
+void SvcWrap_ReplyAndReceiveLight64(Core::System& system) {
+ SvcWrap_LightIpc(system, ReplyAndReceiveLight64);
+}
+
+void SvcWrap_SendSyncRequestLight64From32(Core::System& system) {
+ SvcWrap_LightIpc(system, SendSyncRequestLight64From32);
+}
+
+void SvcWrap_ReplyAndReceiveLight64From32(Core::System& system) {
+ SvcWrap_LightIpc(system, ReplyAndReceiveLight64From32);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_lock.cpp b/src/core/hle/kernel/svc/svc_lock.cpp
new file mode 100644
index 000000000..1d7bc4246
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_lock.cpp
@@ -0,0 +1,51 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_memory_layout.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+/// Attempts to locks a mutex
+Result ArbitrateLock(Core::System& system, Handle thread_handle, u64 address, u32 tag) {
+ LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
+ thread_handle, address, tag);
+
+ // Validate the input address.
+ R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
+ R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
+
+ R_RETURN(GetCurrentProcess(system.Kernel()).WaitForAddress(thread_handle, address, tag));
+}
+
+/// Unlock a mutex
+Result ArbitrateUnlock(Core::System& system, u64 address) {
+ LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
+
+ // Validate the input address.
+ R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
+ R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
+
+ R_RETURN(GetCurrentProcess(system.Kernel()).SignalToAddress(address));
+}
+
+Result ArbitrateLock64(Core::System& system, Handle thread_handle, uint64_t address, uint32_t tag) {
+ R_RETURN(ArbitrateLock(system, thread_handle, address, tag));
+}
+
+Result ArbitrateUnlock64(Core::System& system, uint64_t address) {
+ R_RETURN(ArbitrateUnlock(system, address));
+}
+
+Result ArbitrateLock64From32(Core::System& system, Handle thread_handle, uint32_t address,
+ uint32_t tag) {
+ R_RETURN(ArbitrateLock(system, thread_handle, address, tag));
+}
+
+Result ArbitrateUnlock64From32(Core::System& system, uint32_t address) {
+ R_RETURN(ArbitrateUnlock(system, address));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp
new file mode 100644
index 000000000..5dcb7f045
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_memory.cpp
@@ -0,0 +1,216 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+namespace {
+
+constexpr bool IsValidSetMemoryPermission(MemoryPermission perm) {
+ switch (perm) {
+ case MemoryPermission::None:
+ case MemoryPermission::Read:
+ case MemoryPermission::ReadWrite:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Checks if address + size is greater than the given address
+// This can return false if the size causes an overflow of a 64-bit type
+// or if the given size is zero.
+constexpr bool IsValidAddressRange(u64 address, u64 size) {
+ return address + size > address;
+}
+
+// Helper function that performs the common sanity checks for svcMapMemory
+// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
+// in the same order.
+Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 src_addr, u64 size) {
+ if (!Common::Is4KBAligned(dst_addr)) {
+ LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
+ R_THROW(ResultInvalidAddress);
+ }
+
+ if (!Common::Is4KBAligned(src_addr)) {
+ LOG_ERROR(Kernel_SVC, "Source address is not aligned to 4KB, 0x{:016X}", src_addr);
+ R_THROW(ResultInvalidSize);
+ }
+
+ if (size == 0) {
+ LOG_ERROR(Kernel_SVC, "Size is 0");
+ R_THROW(ResultInvalidSize);
+ }
+
+ if (!Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size);
+ R_THROW(ResultInvalidSize);
+ }
+
+ if (!IsValidAddressRange(dst_addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
+ dst_addr, size);
+ R_THROW(ResultInvalidCurrentMemory);
+ }
+
+ if (!IsValidAddressRange(src_addr, size)) {
+ LOG_ERROR(Kernel_SVC, "Source is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
+ src_addr, size);
+ R_THROW(ResultInvalidCurrentMemory);
+ }
+
+ if (!manager.IsInsideAddressSpace(src_addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}",
+ src_addr, size);
+ R_THROW(ResultInvalidCurrentMemory);
+ }
+
+ if (manager.IsOutsideStackRegion(dst_addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}",
+ dst_addr, size);
+ R_THROW(ResultInvalidMemoryRegion);
+ }
+
+ if (manager.IsInsideHeapRegion(dst_addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination does not fit within the heap region, addr=0x{:016X}, "
+ "size=0x{:016X}",
+ dst_addr, size);
+ R_THROW(ResultInvalidMemoryRegion);
+ }
+
+ if (manager.IsInsideAliasRegion(dst_addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination does not fit within the map region, addr=0x{:016X}, "
+ "size=0x{:016X}",
+ dst_addr, size);
+ R_THROW(ResultInvalidMemoryRegion);
+ }
+
+ R_SUCCEED();
+}
+
+} // namespace
+
+Result SetMemoryPermission(Core::System& system, u64 address, u64 size, MemoryPermission perm) {
+ LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size,
+ perm);
+
+ // Validate address / size.
+ R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+
+ // Validate the permission.
+ R_UNLESS(IsValidSetMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+
+ // Validate that the region is in range for the current process.
+ auto& page_table = GetCurrentProcess(system.Kernel()).PageTable();
+ R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Set the memory attribute.
+ R_RETURN(page_table.SetMemoryPermission(address, size, perm));
+}
+
+Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask, u32 attr) {
+ LOG_DEBUG(Kernel_SVC,
+ "called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
+ size, mask, attr);
+
+ // Validate address / size.
+ R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+
+ // Validate the attribute and mask.
+ constexpr u32 SupportedMask = static_cast<u32>(MemoryAttribute::Uncached);
+ R_UNLESS((mask | attr) == mask, ResultInvalidCombination);
+ R_UNLESS((mask | attr | SupportedMask) == SupportedMask, ResultInvalidCombination);
+
+ // Validate that the region is in range for the current process.
+ auto& page_table{GetCurrentProcess(system.Kernel()).PageTable()};
+ R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Set the memory attribute.
+ R_RETURN(page_table.SetMemoryAttribute(address, size, mask, attr));
+}
+
+/// Maps a memory range into a different range.
+Result MapMemory(Core::System& system, u64 dst_addr, u64 src_addr, u64 size) {
+ LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
+ src_addr, size);
+
+ auto& page_table{GetCurrentProcess(system.Kernel()).PageTable()};
+
+ if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
+ result.IsError()) {
+ return result;
+ }
+
+ R_RETURN(page_table.MapMemory(dst_addr, src_addr, size));
+}
+
+/// Unmaps a region that was previously mapped with svcMapMemory
+Result UnmapMemory(Core::System& system, u64 dst_addr, u64 src_addr, u64 size) {
+ LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
+ src_addr, size);
+
+ auto& page_table{GetCurrentProcess(system.Kernel()).PageTable()};
+
+ if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
+ result.IsError()) {
+ return result;
+ }
+
+ R_RETURN(page_table.UnmapMemory(dst_addr, src_addr, size));
+}
+
+Result SetMemoryPermission64(Core::System& system, uint64_t address, uint64_t size,
+ MemoryPermission perm) {
+ R_RETURN(SetMemoryPermission(system, address, size, perm));
+}
+
+Result SetMemoryAttribute64(Core::System& system, uint64_t address, uint64_t size, uint32_t mask,
+ uint32_t attr) {
+ R_RETURN(SetMemoryAttribute(system, address, size, mask, attr));
+}
+
+Result MapMemory64(Core::System& system, uint64_t dst_address, uint64_t src_address,
+ uint64_t size) {
+ R_RETURN(MapMemory(system, dst_address, src_address, size));
+}
+
+Result UnmapMemory64(Core::System& system, uint64_t dst_address, uint64_t src_address,
+ uint64_t size) {
+ R_RETURN(UnmapMemory(system, dst_address, src_address, size));
+}
+
+Result SetMemoryPermission64From32(Core::System& system, uint32_t address, uint32_t size,
+ MemoryPermission perm) {
+ R_RETURN(SetMemoryPermission(system, address, size, perm));
+}
+
+Result SetMemoryAttribute64From32(Core::System& system, uint32_t address, uint32_t size,
+ uint32_t mask, uint32_t attr) {
+ R_RETURN(SetMemoryAttribute(system, address, size, mask, attr));
+}
+
+Result MapMemory64From32(Core::System& system, uint32_t dst_address, uint32_t src_address,
+ uint32_t size) {
+ R_RETURN(MapMemory(system, dst_address, src_address, size));
+}
+
+Result UnmapMemory64From32(Core::System& system, uint32_t dst_address, uint32_t src_address,
+ uint32_t size) {
+ R_RETURN(UnmapMemory(system, dst_address, src_address, size));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_physical_memory.cpp b/src/core/hle/kernel/svc/svc_physical_memory.cpp
new file mode 100644
index 000000000..c2fbfb59a
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_physical_memory.cpp
@@ -0,0 +1,183 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+/// Set the process heap to a given Size. It can both extend and shrink the heap.
+Result SetHeapSize(Core::System& system, u64* out_address, u64 size) {
+ LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", size);
+
+ // Validate size.
+ R_UNLESS(Common::IsAligned(size, HeapSizeAlignment), ResultInvalidSize);
+ R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize);
+
+ // Set the heap size.
+ R_RETURN(GetCurrentProcess(system.Kernel()).PageTable().SetHeapSize(out_address, size));
+}
+
+/// Maps memory at a desired address
+Result MapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
+ LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
+
+ if (!Common::Is4KBAligned(addr)) {
+ LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
+ R_THROW(ResultInvalidAddress);
+ }
+
+ if (!Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
+ R_THROW(ResultInvalidSize);
+ }
+
+ if (size == 0) {
+ LOG_ERROR(Kernel_SVC, "Size is zero");
+ R_THROW(ResultInvalidSize);
+ }
+
+ if (!(addr < addr + size)) {
+ LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
+ R_THROW(ResultInvalidMemoryRegion);
+ }
+
+ KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
+ auto& page_table{current_process->PageTable()};
+
+ if (current_process->GetSystemResourceSize() == 0) {
+ LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
+ R_THROW(ResultInvalidState);
+ }
+
+ if (!page_table.IsInsideAddressSpace(addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
+ size);
+ R_THROW(ResultInvalidMemoryRegion);
+ }
+
+ if (page_table.IsOutsideAliasRegion(addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
+ size);
+ R_THROW(ResultInvalidMemoryRegion);
+ }
+
+ R_RETURN(page_table.MapPhysicalMemory(addr, size));
+}
+
+/// Unmaps memory previously mapped via MapPhysicalMemory
+Result UnmapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
+ LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
+
+ if (!Common::Is4KBAligned(addr)) {
+ LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
+ R_THROW(ResultInvalidAddress);
+ }
+
+ if (!Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
+ R_THROW(ResultInvalidSize);
+ }
+
+ if (size == 0) {
+ LOG_ERROR(Kernel_SVC, "Size is zero");
+ R_THROW(ResultInvalidSize);
+ }
+
+ if (!(addr < addr + size)) {
+ LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
+ R_THROW(ResultInvalidMemoryRegion);
+ }
+
+ KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
+ auto& page_table{current_process->PageTable()};
+
+ if (current_process->GetSystemResourceSize() == 0) {
+ LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
+ R_THROW(ResultInvalidState);
+ }
+
+ if (!page_table.IsInsideAddressSpace(addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
+ size);
+ R_THROW(ResultInvalidMemoryRegion);
+ }
+
+ if (page_table.IsOutsideAliasRegion(addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
+ size);
+ R_THROW(ResultInvalidMemoryRegion);
+ }
+
+ R_RETURN(page_table.UnmapPhysicalMemory(addr, size));
+}
+
+Result MapPhysicalMemoryUnsafe(Core::System& system, uint64_t address, uint64_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result UnmapPhysicalMemoryUnsafe(Core::System& system, uint64_t address, uint64_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result SetUnsafeLimit(Core::System& system, uint64_t limit) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result SetHeapSize64(Core::System& system, uint64_t* out_address, uint64_t size) {
+ R_RETURN(SetHeapSize(system, out_address, size));
+}
+
+Result MapPhysicalMemory64(Core::System& system, uint64_t address, uint64_t size) {
+ R_RETURN(MapPhysicalMemory(system, address, size));
+}
+
+Result UnmapPhysicalMemory64(Core::System& system, uint64_t address, uint64_t size) {
+ R_RETURN(UnmapPhysicalMemory(system, address, size));
+}
+
+Result MapPhysicalMemoryUnsafe64(Core::System& system, uint64_t address, uint64_t size) {
+ R_RETURN(MapPhysicalMemoryUnsafe(system, address, size));
+}
+
+Result UnmapPhysicalMemoryUnsafe64(Core::System& system, uint64_t address, uint64_t size) {
+ R_RETURN(UnmapPhysicalMemoryUnsafe(system, address, size));
+}
+
+Result SetUnsafeLimit64(Core::System& system, uint64_t limit) {
+ R_RETURN(SetUnsafeLimit(system, limit));
+}
+
+Result SetHeapSize64From32(Core::System& system, uint64_t* out_address, uint32_t size) {
+ R_RETURN(SetHeapSize(system, out_address, size));
+}
+
+Result MapPhysicalMemory64From32(Core::System& system, uint32_t address, uint32_t size) {
+ R_RETURN(MapPhysicalMemory(system, address, size));
+}
+
+Result UnmapPhysicalMemory64From32(Core::System& system, uint32_t address, uint32_t size) {
+ R_RETURN(UnmapPhysicalMemory(system, address, size));
+}
+
+Result MapPhysicalMemoryUnsafe64From32(Core::System& system, uint32_t address, uint32_t size) {
+ R_RETURN(MapPhysicalMemoryUnsafe(system, address, size));
+}
+
+Result UnmapPhysicalMemoryUnsafe64From32(Core::System& system, uint32_t address, uint32_t size) {
+ R_RETURN(UnmapPhysicalMemoryUnsafe(system, address, size));
+}
+
+Result SetUnsafeLimit64From32(Core::System& system, uint32_t limit) {
+ R_RETURN(SetUnsafeLimit(system, limit));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_port.cpp b/src/core/hle/kernel/svc/svc_port.cpp
new file mode 100644
index 000000000..abba757c7
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_port.cpp
@@ -0,0 +1,159 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_client_port.h"
+#include "core/hle/kernel/k_client_session.h"
+#include "core/hle/kernel/k_object_name.h"
+#include "core/hle/kernel/k_port.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+Result ConnectToNamedPort(Core::System& system, Handle* out, u64 user_name) {
+ // Copy the provided name from user memory to kernel memory.
+ auto string_name =
+ GetCurrentMemory(system.Kernel()).ReadCString(user_name, KObjectName::NameLengthMax);
+
+ std::array<char, KObjectName::NameLengthMax> name{};
+ std::strncpy(name.data(), string_name.c_str(), KObjectName::NameLengthMax - 1);
+
+ // Validate that the name is valid.
+ R_UNLESS(name[sizeof(name) - 1] == '\x00', ResultOutOfRange);
+
+ // Get the current handle table.
+ auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable();
+
+ // Find the client port.
+ auto port = KObjectName::Find<KClientPort>(system.Kernel(), name.data());
+ R_UNLESS(port.IsNotNull(), ResultNotFound);
+
+ // Reserve a handle for the port.
+ // NOTE: Nintendo really does write directly to the output handle here.
+ R_TRY(handle_table.Reserve(out));
+ ON_RESULT_FAILURE {
+ handle_table.Unreserve(*out);
+ };
+
+ // Create a session.
+ KClientSession* session;
+ R_TRY(port->CreateSession(std::addressof(session)));
+
+ // Register the session in the table, close the extra reference.
+ handle_table.Register(*out, session);
+ session->Close();
+
+ // We succeeded.
+ R_SUCCEED();
+}
+
+Result CreatePort(Core::System& system, Handle* out_server, Handle* out_client,
+ int32_t max_sessions, bool is_light, uint64_t name) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result ConnectToPort(Core::System& system, Handle* out_handle, Handle port) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result ManageNamedPort(Core::System& system, Handle* out_server_handle, uint64_t user_name,
+ int32_t max_sessions) {
+ // Copy the provided name from user memory to kernel memory.
+ auto string_name =
+ GetCurrentMemory(system.Kernel()).ReadCString(user_name, KObjectName::NameLengthMax);
+
+ // Copy the provided name from user memory to kernel memory.
+ std::array<char, KObjectName::NameLengthMax> name{};
+ std::strncpy(name.data(), string_name.c_str(), KObjectName::NameLengthMax - 1);
+
+ // Validate that sessions and name are valid.
+ R_UNLESS(max_sessions >= 0, ResultOutOfRange);
+ R_UNLESS(name[sizeof(name) - 1] == '\x00', ResultOutOfRange);
+
+ if (max_sessions > 0) {
+ // Get the current handle table.
+ auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable();
+
+ // Create a new port.
+ KPort* port = KPort::Create(system.Kernel());
+ R_UNLESS(port != nullptr, ResultOutOfResource);
+
+ // Initialize the new port.
+ port->Initialize(max_sessions, false, 0);
+
+ // Register the port.
+ KPort::Register(system.Kernel(), port);
+
+ // Ensure that our only reference to the port is in the handle table when we're done.
+ SCOPE_EXIT({
+ port->GetClientPort().Close();
+ port->GetServerPort().Close();
+ });
+
+ // Register the handle in the table.
+ R_TRY(handle_table.Add(out_server_handle, std::addressof(port->GetServerPort())));
+ ON_RESULT_FAILURE {
+ handle_table.Remove(*out_server_handle);
+ };
+
+ // Create a new object name.
+ R_TRY(KObjectName::NewFromName(system.Kernel(), std::addressof(port->GetClientPort()),
+ name.data()));
+ } else /* if (max_sessions == 0) */ {
+ // Ensure that this else case is correct.
+ ASSERT(max_sessions == 0);
+
+ // If we're closing, there's no server handle.
+ *out_server_handle = InvalidHandle;
+
+ // Delete the object.
+ R_TRY(KObjectName::Delete<KClientPort>(system.Kernel(), name.data()));
+ }
+
+ R_SUCCEED();
+}
+
+Result ConnectToNamedPort64(Core::System& system, Handle* out_handle, uint64_t name) {
+ R_RETURN(ConnectToNamedPort(system, out_handle, name));
+}
+
+Result CreatePort64(Core::System& system, Handle* out_server_handle, Handle* out_client_handle,
+ int32_t max_sessions, bool is_light, uint64_t name) {
+ R_RETURN(
+ CreatePort(system, out_server_handle, out_client_handle, max_sessions, is_light, name));
+}
+
+Result ManageNamedPort64(Core::System& system, Handle* out_server_handle, uint64_t name,
+ int32_t max_sessions) {
+ R_RETURN(ManageNamedPort(system, out_server_handle, name, max_sessions));
+}
+
+Result ConnectToPort64(Core::System& system, Handle* out_handle, Handle port) {
+ R_RETURN(ConnectToPort(system, out_handle, port));
+}
+
+Result ConnectToNamedPort64From32(Core::System& system, Handle* out_handle, uint32_t name) {
+ R_RETURN(ConnectToNamedPort(system, out_handle, name));
+}
+
+Result CreatePort64From32(Core::System& system, Handle* out_server_handle,
+ Handle* out_client_handle, int32_t max_sessions, bool is_light,
+ uint32_t name) {
+ R_RETURN(
+ CreatePort(system, out_server_handle, out_client_handle, max_sessions, is_light, name));
+}
+
+Result ManageNamedPort64From32(Core::System& system, Handle* out_server_handle, uint32_t name,
+ int32_t max_sessions) {
+ R_RETURN(ManageNamedPort(system, out_server_handle, name, max_sessions));
+}
+
+Result ConnectToPort64From32(Core::System& system, Handle* out_handle, Handle port) {
+ R_RETURN(ConnectToPort(system, out_handle, port));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_power_management.cpp b/src/core/hle/kernel/svc/svc_power_management.cpp
new file mode 100644
index 000000000..f605a0317
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_power_management.cpp
@@ -0,0 +1,21 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel::Svc {
+
+void SleepSystem(Core::System& system) {
+ UNIMPLEMENTED();
+}
+
+void SleepSystem64(Core::System& system) {
+ return SleepSystem(system);
+}
+
+void SleepSystem64From32(Core::System& system) {
+ return SleepSystem(system);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_process.cpp b/src/core/hle/kernel/svc/svc_process.cpp
new file mode 100644
index 000000000..619ed16a3
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_process.cpp
@@ -0,0 +1,194 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+/// Exits the current process
+void ExitProcess(Core::System& system) {
+ auto* current_process = GetCurrentProcessPointer(system.Kernel());
+
+ LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessId());
+ ASSERT_MSG(current_process->GetState() == KProcess::State::Running,
+ "Process has already exited");
+
+ system.Exit();
+}
+
+/// Gets the ID of the specified process or a specified thread's owning process.
+Result GetProcessId(Core::System& system, u64* out_process_id, Handle handle) {
+ LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle);
+
+ // Get the object from the handle table.
+ KScopedAutoObject obj = GetCurrentProcess(system.Kernel())
+ .GetHandleTable()
+ .GetObject<KAutoObject>(static_cast<Handle>(handle));
+ R_UNLESS(obj.IsNotNull(), ResultInvalidHandle);
+
+ // Get the process from the object.
+ KProcess* process = nullptr;
+ if (KProcess* p = obj->DynamicCast<KProcess*>(); p != nullptr) {
+ // The object is a process, so we can use it directly.
+ process = p;
+ } else if (KThread* t = obj->DynamicCast<KThread*>(); t != nullptr) {
+ // The object is a thread, so we want to use its parent.
+ process = reinterpret_cast<KThread*>(obj.GetPointerUnsafe())->GetOwnerProcess();
+ } else {
+ // TODO(bunnei): This should also handle debug objects before returning.
+ UNIMPLEMENTED_MSG("Debug objects not implemented");
+ }
+
+ // Make sure the target process exists.
+ R_UNLESS(process != nullptr, ResultInvalidHandle);
+
+ // Get the process id.
+ *out_process_id = process->GetId();
+
+ R_SUCCEED();
+}
+
+Result GetProcessList(Core::System& system, s32* out_num_processes, u64 out_process_ids,
+ int32_t out_process_ids_size) {
+ LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}",
+ out_process_ids, out_process_ids_size);
+
+ // If the supplied size is negative or greater than INT32_MAX / sizeof(u64), bail.
+ if ((out_process_ids_size & 0xF0000000) != 0) {
+ LOG_ERROR(Kernel_SVC,
+ "Supplied size outside [0, 0x0FFFFFFF] range. out_process_ids_size={}",
+ out_process_ids_size);
+ R_THROW(ResultOutOfRange);
+ }
+
+ auto& kernel = system.Kernel();
+ const auto total_copy_size = out_process_ids_size * sizeof(u64);
+
+ if (out_process_ids_size > 0 && !GetCurrentProcess(kernel).PageTable().IsInsideAddressSpace(
+ out_process_ids, total_copy_size)) {
+ LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
+ out_process_ids, out_process_ids + total_copy_size);
+ R_THROW(ResultInvalidCurrentMemory);
+ }
+
+ auto& memory = GetCurrentMemory(kernel);
+ const auto& process_list = kernel.GetProcessList();
+ const auto num_processes = process_list.size();
+ const auto copy_amount =
+ std::min(static_cast<std::size_t>(out_process_ids_size), num_processes);
+
+ for (std::size_t i = 0; i < copy_amount; ++i) {
+ memory.Write64(out_process_ids, process_list[i]->GetProcessId());
+ out_process_ids += sizeof(u64);
+ }
+
+ *out_num_processes = static_cast<u32>(num_processes);
+ R_SUCCEED();
+}
+
+Result GetProcessInfo(Core::System& system, s64* out, Handle process_handle,
+ ProcessInfoType info_type) {
+ LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, info_type);
+
+ const auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable();
+ KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
+ if (process.IsNull()) {
+ LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
+ process_handle);
+ R_THROW(ResultInvalidHandle);
+ }
+
+ if (info_type != ProcessInfoType::ProcessState) {
+ LOG_ERROR(Kernel_SVC, "Expected info_type to be ProcessState but got {} instead",
+ info_type);
+ R_THROW(ResultInvalidEnumValue);
+ }
+
+ *out = static_cast<s64>(process->GetState());
+ R_SUCCEED();
+}
+
+Result CreateProcess(Core::System& system, Handle* out_handle, uint64_t parameters, uint64_t caps,
+ int32_t num_caps) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result StartProcess(Core::System& system, Handle process_handle, int32_t priority, int32_t core_id,
+ uint64_t main_thread_stack_size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result TerminateProcess(Core::System& system, Handle process_handle) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+void ExitProcess64(Core::System& system) {
+ ExitProcess(system);
+}
+
+Result GetProcessId64(Core::System& system, uint64_t* out_process_id, Handle process_handle) {
+ R_RETURN(GetProcessId(system, out_process_id, process_handle));
+}
+
+Result GetProcessList64(Core::System& system, int32_t* out_num_processes, uint64_t out_process_ids,
+ int32_t max_out_count) {
+ R_RETURN(GetProcessList(system, out_num_processes, out_process_ids, max_out_count));
+}
+
+Result CreateProcess64(Core::System& system, Handle* out_handle, uint64_t parameters, uint64_t caps,
+ int32_t num_caps) {
+ R_RETURN(CreateProcess(system, out_handle, parameters, caps, num_caps));
+}
+
+Result StartProcess64(Core::System& system, Handle process_handle, int32_t priority,
+ int32_t core_id, uint64_t main_thread_stack_size) {
+ R_RETURN(StartProcess(system, process_handle, priority, core_id, main_thread_stack_size));
+}
+
+Result TerminateProcess64(Core::System& system, Handle process_handle) {
+ R_RETURN(TerminateProcess(system, process_handle));
+}
+
+Result GetProcessInfo64(Core::System& system, int64_t* out_info, Handle process_handle,
+ ProcessInfoType info_type) {
+ R_RETURN(GetProcessInfo(system, out_info, process_handle, info_type));
+}
+
+void ExitProcess64From32(Core::System& system) {
+ ExitProcess(system);
+}
+
+Result GetProcessId64From32(Core::System& system, uint64_t* out_process_id, Handle process_handle) {
+ R_RETURN(GetProcessId(system, out_process_id, process_handle));
+}
+
+Result GetProcessList64From32(Core::System& system, int32_t* out_num_processes,
+ uint32_t out_process_ids, int32_t max_out_count) {
+ R_RETURN(GetProcessList(system, out_num_processes, out_process_ids, max_out_count));
+}
+
+Result CreateProcess64From32(Core::System& system, Handle* out_handle, uint32_t parameters,
+ uint32_t caps, int32_t num_caps) {
+ R_RETURN(CreateProcess(system, out_handle, parameters, caps, num_caps));
+}
+
+Result StartProcess64From32(Core::System& system, Handle process_handle, int32_t priority,
+ int32_t core_id, uint64_t main_thread_stack_size) {
+ R_RETURN(StartProcess(system, process_handle, priority, core_id, main_thread_stack_size));
+}
+
+Result TerminateProcess64From32(Core::System& system, Handle process_handle) {
+ R_RETURN(TerminateProcess(system, process_handle));
+}
+
+Result GetProcessInfo64From32(Core::System& system, int64_t* out_info, Handle process_handle,
+ ProcessInfoType info_type) {
+ R_RETURN(GetProcessInfo(system, out_info, process_handle, info_type));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_process_memory.cpp b/src/core/hle/kernel/svc/svc_process_memory.cpp
new file mode 100644
index 000000000..aee0f2f36
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_process_memory.cpp
@@ -0,0 +1,320 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+namespace {
+
+constexpr bool IsValidAddressRange(u64 address, u64 size) {
+ return address + size > address;
+}
+
+constexpr bool IsValidProcessMemoryPermission(Svc::MemoryPermission perm) {
+ switch (perm) {
+ case Svc::MemoryPermission::None:
+ case Svc::MemoryPermission::Read:
+ case Svc::MemoryPermission::ReadWrite:
+ case Svc::MemoryPermission::ReadExecute:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // namespace
+
+Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, u64 address,
+ u64 size, Svc::MemoryPermission perm) {
+ LOG_TRACE(Kernel_SVC,
+ "called, process_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
+ process_handle, address, size, perm);
+
+ // Validate the address/size.
+ R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+ R_UNLESS(address == static_cast<uint64_t>(address), ResultInvalidCurrentMemory);
+ R_UNLESS(size == static_cast<uint64_t>(size), ResultInvalidCurrentMemory);
+
+ // Validate the memory permission.
+ R_UNLESS(IsValidProcessMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+
+ // Get the process from its handle.
+ KScopedAutoObject process =
+ GetCurrentProcess(system.Kernel()).GetHandleTable().GetObject<KProcess>(process_handle);
+ R_UNLESS(process.IsNotNull(), ResultInvalidHandle);
+
+ // Validate that the address is in range.
+ auto& page_table = process->PageTable();
+ R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Set the memory permission.
+ R_RETURN(page_table.SetProcessMemoryPermission(address, size, perm));
+}
+
+Result MapProcessMemory(Core::System& system, u64 dst_address, Handle process_handle,
+ u64 src_address, u64 size) {
+ LOG_TRACE(Kernel_SVC,
+ "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
+ dst_address, process_handle, src_address, size);
+
+ // Validate the address/size.
+ R_UNLESS(Common::IsAligned(dst_address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(src_address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((dst_address < dst_address + size), ResultInvalidCurrentMemory);
+ R_UNLESS((src_address < src_address + size), ResultInvalidCurrentMemory);
+
+ // Get the processes.
+ KProcess* dst_process = GetCurrentProcessPointer(system.Kernel());
+ KScopedAutoObject src_process =
+ dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
+ R_UNLESS(src_process.IsNotNull(), ResultInvalidHandle);
+
+ // Get the page tables.
+ auto& dst_pt = dst_process->PageTable();
+ auto& src_pt = src_process->PageTable();
+
+ // Validate that the mapping is in range.
+ R_UNLESS(src_pt.Contains(src_address, size), ResultInvalidCurrentMemory);
+ R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState::SharedCode),
+ ResultInvalidMemoryRegion);
+
+ // Create a new page group.
+ KPageGroup pg{system.Kernel(), dst_pt.GetBlockInfoManager()};
+ R_TRY(src_pt.MakeAndOpenPageGroup(
+ std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
+ KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Map the group.
+ R_RETURN(dst_pt.MapPageGroup(dst_address, pg, KMemoryState::SharedCode,
+ KMemoryPermission::UserReadWrite));
+}
+
+Result UnmapProcessMemory(Core::System& system, u64 dst_address, Handle process_handle,
+ u64 src_address, u64 size) {
+ LOG_TRACE(Kernel_SVC,
+ "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
+ dst_address, process_handle, src_address, size);
+
+ // Validate the address/size.
+ R_UNLESS(Common::IsAligned(dst_address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(src_address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((dst_address < dst_address + size), ResultInvalidCurrentMemory);
+ R_UNLESS((src_address < src_address + size), ResultInvalidCurrentMemory);
+
+ // Get the processes.
+ KProcess* dst_process = GetCurrentProcessPointer(system.Kernel());
+ KScopedAutoObject src_process =
+ dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
+ R_UNLESS(src_process.IsNotNull(), ResultInvalidHandle);
+
+ // Get the page tables.
+ auto& dst_pt = dst_process->PageTable();
+ auto& src_pt = src_process->PageTable();
+
+ // Validate that the mapping is in range.
+ R_UNLESS(src_pt.Contains(src_address, size), ResultInvalidCurrentMemory);
+ R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState::SharedCode),
+ ResultInvalidMemoryRegion);
+
+ // Unmap the memory.
+ R_RETURN(dst_pt.UnmapProcessMemory(dst_address, size, src_pt, src_address));
+}
+
+Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
+ u64 src_address, u64 size) {
+ LOG_DEBUG(Kernel_SVC,
+ "called. process_handle=0x{:08X}, dst_address=0x{:016X}, "
+ "src_address=0x{:016X}, size=0x{:016X}",
+ process_handle, dst_address, src_address, size);
+
+ if (!Common::Is4KBAligned(src_address)) {
+ LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
+ src_address);
+ R_THROW(ResultInvalidAddress);
+ }
+
+ if (!Common::Is4KBAligned(dst_address)) {
+ LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
+ dst_address);
+ R_THROW(ResultInvalidAddress);
+ }
+
+ if (size == 0 || !Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X})", size);
+ R_THROW(ResultInvalidSize);
+ }
+
+ if (!IsValidAddressRange(dst_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination address range overflows the address space (dst_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ dst_address, size);
+ R_THROW(ResultInvalidCurrentMemory);
+ }
+
+ if (!IsValidAddressRange(src_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Source address range overflows the address space (src_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ src_address, size);
+ R_THROW(ResultInvalidCurrentMemory);
+ }
+
+ const auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable();
+ KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
+ if (process.IsNull()) {
+ LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
+ process_handle);
+ R_THROW(ResultInvalidHandle);
+ }
+
+ auto& page_table = process->PageTable();
+ if (!page_table.IsInsideAddressSpace(src_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Source address range is not within the address space (src_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ src_address, size);
+ R_THROW(ResultInvalidCurrentMemory);
+ }
+
+ if (!page_table.IsInsideASLRRegion(dst_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ dst_address, size);
+ R_THROW(ResultInvalidMemoryRegion);
+ }
+
+ R_RETURN(page_table.MapCodeMemory(dst_address, src_address, size));
+}
+
+Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
+ u64 src_address, u64 size) {
+ LOG_DEBUG(Kernel_SVC,
+ "called. process_handle=0x{:08X}, dst_address=0x{:016X}, src_address=0x{:016X}, "
+ "size=0x{:016X}",
+ process_handle, dst_address, src_address, size);
+
+ if (!Common::Is4KBAligned(dst_address)) {
+ LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
+ dst_address);
+ R_THROW(ResultInvalidAddress);
+ }
+
+ if (!Common::Is4KBAligned(src_address)) {
+ LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
+ src_address);
+ R_THROW(ResultInvalidAddress);
+ }
+
+ if (size == 0 || !Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
+ R_THROW(ResultInvalidSize);
+ }
+
+ if (!IsValidAddressRange(dst_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination address range overflows the address space (dst_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ dst_address, size);
+ R_THROW(ResultInvalidCurrentMemory);
+ }
+
+ if (!IsValidAddressRange(src_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Source address range overflows the address space (src_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ src_address, size);
+ R_THROW(ResultInvalidCurrentMemory);
+ }
+
+ const auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable();
+ KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
+ if (process.IsNull()) {
+ LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
+ process_handle);
+ R_THROW(ResultInvalidHandle);
+ }
+
+ auto& page_table = process->PageTable();
+ if (!page_table.IsInsideAddressSpace(src_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Source address range is not within the address space (src_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ src_address, size);
+ R_THROW(ResultInvalidCurrentMemory);
+ }
+
+ if (!page_table.IsInsideASLRRegion(dst_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ dst_address, size);
+ R_THROW(ResultInvalidMemoryRegion);
+ }
+
+ R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size,
+ KPageTable::ICacheInvalidationStrategy::InvalidateAll));
+}
+
+Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address,
+ uint64_t size, MemoryPermission perm) {
+ R_RETURN(SetProcessMemoryPermission(system, process_handle, address, size, perm));
+}
+
+Result MapProcessMemory64(Core::System& system, uint64_t dst_address, Handle process_handle,
+ uint64_t src_address, uint64_t size) {
+ R_RETURN(MapProcessMemory(system, dst_address, process_handle, src_address, size));
+}
+
+Result UnmapProcessMemory64(Core::System& system, uint64_t dst_address, Handle process_handle,
+ uint64_t src_address, uint64_t size) {
+ R_RETURN(UnmapProcessMemory(system, dst_address, process_handle, src_address, size));
+}
+
+Result MapProcessCodeMemory64(Core::System& system, Handle process_handle, uint64_t dst_address,
+ uint64_t src_address, uint64_t size) {
+ R_RETURN(MapProcessCodeMemory(system, process_handle, dst_address, src_address, size));
+}
+
+Result UnmapProcessCodeMemory64(Core::System& system, Handle process_handle, uint64_t dst_address,
+ uint64_t src_address, uint64_t size) {
+ R_RETURN(UnmapProcessCodeMemory(system, process_handle, dst_address, src_address, size));
+}
+
+Result SetProcessMemoryPermission64From32(Core::System& system, Handle process_handle,
+ uint64_t address, uint64_t size, MemoryPermission perm) {
+ R_RETURN(SetProcessMemoryPermission(system, process_handle, address, size, perm));
+}
+
+Result MapProcessMemory64From32(Core::System& system, uint32_t dst_address, Handle process_handle,
+ uint64_t src_address, uint32_t size) {
+ R_RETURN(MapProcessMemory(system, dst_address, process_handle, src_address, size));
+}
+
+Result UnmapProcessMemory64From32(Core::System& system, uint32_t dst_address, Handle process_handle,
+ uint64_t src_address, uint32_t size) {
+ R_RETURN(UnmapProcessMemory(system, dst_address, process_handle, src_address, size));
+}
+
+Result MapProcessCodeMemory64From32(Core::System& system, Handle process_handle,
+ uint64_t dst_address, uint64_t src_address, uint64_t size) {
+ R_RETURN(MapProcessCodeMemory(system, process_handle, dst_address, src_address, size));
+}
+
+Result UnmapProcessCodeMemory64From32(Core::System& system, Handle process_handle,
+ uint64_t dst_address, uint64_t src_address, uint64_t size) {
+ R_RETURN(UnmapProcessCodeMemory(system, process_handle, dst_address, src_address, size));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_processor.cpp b/src/core/hle/kernel/svc/svc_processor.cpp
new file mode 100644
index 000000000..7602ce6c0
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_processor.cpp
@@ -0,0 +1,25 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/logging/log.h"
+#include "core/core.h"
+#include "core/hle/kernel/physical_core.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+/// Get which CPU core is executing the current thread
+int32_t GetCurrentProcessorNumber(Core::System& system) {
+ LOG_TRACE(Kernel_SVC, "called");
+ return static_cast<int32_t>(system.CurrentPhysicalCore().CoreIndex());
+}
+
+int32_t GetCurrentProcessorNumber64(Core::System& system) {
+ return GetCurrentProcessorNumber(system);
+}
+
+int32_t GetCurrentProcessorNumber64From32(Core::System& system) {
+ return GetCurrentProcessorNumber(system);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_query_memory.cpp b/src/core/hle/kernel/svc/svc_query_memory.cpp
new file mode 100644
index 000000000..4d9fcd25f
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_query_memory.cpp
@@ -0,0 +1,65 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+Result QueryMemory(Core::System& system, uint64_t out_memory_info, PageInfo* out_page_info,
+ u64 query_address) {
+ LOG_TRACE(Kernel_SVC,
+ "called, out_memory_info=0x{:016X}, "
+ "query_address=0x{:016X}",
+ out_memory_info, query_address);
+
+ // Query memory is just QueryProcessMemory on the current process.
+ R_RETURN(
+ QueryProcessMemory(system, out_memory_info, out_page_info, CurrentProcess, query_address));
+}
+
+Result QueryProcessMemory(Core::System& system, uint64_t out_memory_info, PageInfo* out_page_info,
+ Handle process_handle, uint64_t address) {
+ LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
+ const auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable();
+ KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
+ if (process.IsNull()) {
+ LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
+ process_handle);
+ R_THROW(ResultInvalidHandle);
+ }
+
+ auto& current_memory{GetCurrentMemory(system.Kernel())};
+ const auto memory_info{process->PageTable().QueryInfo(address).GetSvcMemoryInfo()};
+
+ current_memory.WriteBlock(out_memory_info, std::addressof(memory_info), sizeof(memory_info));
+
+ //! This is supposed to be part of the QueryInfo call.
+ *out_page_info = {};
+
+ R_SUCCEED();
+}
+
+Result QueryMemory64(Core::System& system, uint64_t out_memory_info, PageInfo* out_page_info,
+ uint64_t address) {
+ R_RETURN(QueryMemory(system, out_memory_info, out_page_info, address));
+}
+
+Result QueryProcessMemory64(Core::System& system, uint64_t out_memory_info, PageInfo* out_page_info,
+ Handle process_handle, uint64_t address) {
+ R_RETURN(QueryProcessMemory(system, out_memory_info, out_page_info, process_handle, address));
+}
+
+Result QueryMemory64From32(Core::System& system, uint32_t out_memory_info, PageInfo* out_page_info,
+ uint32_t address) {
+ R_RETURN(QueryMemory(system, out_memory_info, out_page_info, address));
+}
+
+Result QueryProcessMemory64From32(Core::System& system, uint32_t out_memory_info,
+ PageInfo* out_page_info, Handle process_handle,
+ uint64_t address) {
+ R_RETURN(QueryProcessMemory(system, out_memory_info, out_page_info, process_handle, address));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_register.cpp b/src/core/hle/kernel/svc/svc_register.cpp
new file mode 100644
index 000000000..b883e6618
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_register.cpp
@@ -0,0 +1,27 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel::Svc {
+
+Result ReadWriteRegister(Core::System& system, uint32_t* out, uint64_t address, uint32_t mask,
+ uint32_t value) {
+ *out = 0;
+
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result ReadWriteRegister64(Core::System& system, uint32_t* out_value, uint64_t address,
+ uint32_t mask, uint32_t value) {
+ R_RETURN(ReadWriteRegister(system, out_value, address, mask, value));
+}
+
+Result ReadWriteRegister64From32(Core::System& system, uint32_t* out_value, uint64_t address,
+ uint32_t mask, uint32_t value) {
+ R_RETURN(ReadWriteRegister(system, out_value, address, mask, value));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_resource_limit.cpp b/src/core/hle/kernel/svc/svc_resource_limit.cpp
new file mode 100644
index 000000000..732bc017e
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_resource_limit.cpp
@@ -0,0 +1,145 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_resource_limit.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+Result CreateResourceLimit(Core::System& system, Handle* out_handle) {
+ LOG_DEBUG(Kernel_SVC, "called");
+
+ // Create a new resource limit.
+ auto& kernel = system.Kernel();
+ KResourceLimit* resource_limit = KResourceLimit::Create(kernel);
+ R_UNLESS(resource_limit != nullptr, ResultOutOfResource);
+
+ // Ensure we don't leak a reference to the limit.
+ SCOPE_EXIT({ resource_limit->Close(); });
+
+ // Initialize the resource limit.
+ resource_limit->Initialize(std::addressof(system.CoreTiming()));
+
+ // Register the limit.
+ KResourceLimit::Register(kernel, resource_limit);
+
+ // Add the limit to the handle table.
+ R_RETURN(GetCurrentProcess(kernel).GetHandleTable().Add(out_handle, resource_limit));
+}
+
+Result GetResourceLimitLimitValue(Core::System& system, s64* out_limit_value,
+ Handle resource_limit_handle, LimitableResource which) {
+ LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
+ which);
+
+ // Validate the resource.
+ R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue);
+
+ // Get the resource limit.
+ KScopedAutoObject resource_limit = GetCurrentProcess(system.Kernel())
+ .GetHandleTable()
+ .GetObject<KResourceLimit>(resource_limit_handle);
+ R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle);
+
+ // Get the limit value.
+ *out_limit_value = resource_limit->GetLimitValue(which);
+
+ R_SUCCEED();
+}
+
+Result GetResourceLimitCurrentValue(Core::System& system, s64* out_current_value,
+ Handle resource_limit_handle, LimitableResource which) {
+ LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
+ which);
+
+ // Validate the resource.
+ R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue);
+
+ // Get the resource limit.
+ KScopedAutoObject resource_limit = GetCurrentProcess(system.Kernel())
+ .GetHandleTable()
+ .GetObject<KResourceLimit>(resource_limit_handle);
+ R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle);
+
+ // Get the current value.
+ *out_current_value = resource_limit->GetCurrentValue(which);
+
+ R_SUCCEED();
+}
+
+Result SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle,
+ LimitableResource which, s64 limit_value) {
+ LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}, limit_value={}",
+ resource_limit_handle, which, limit_value);
+
+ // Validate the resource.
+ R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue);
+
+ // Get the resource limit.
+ KScopedAutoObject resource_limit = GetCurrentProcess(system.Kernel())
+ .GetHandleTable()
+ .GetObject<KResourceLimit>(resource_limit_handle);
+ R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle);
+
+ // Set the limit value.
+ R_RETURN(resource_limit->SetLimitValue(which, limit_value));
+}
+
+Result GetResourceLimitPeakValue(Core::System& system, int64_t* out_peak_value,
+ Handle resource_limit_handle, LimitableResource which) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result GetResourceLimitLimitValue64(Core::System& system, int64_t* out_limit_value,
+ Handle resource_limit_handle, LimitableResource which) {
+ R_RETURN(GetResourceLimitLimitValue(system, out_limit_value, resource_limit_handle, which));
+}
+
+Result GetResourceLimitCurrentValue64(Core::System& system, int64_t* out_current_value,
+ Handle resource_limit_handle, LimitableResource which) {
+ R_RETURN(GetResourceLimitCurrentValue(system, out_current_value, resource_limit_handle, which));
+}
+
+Result GetResourceLimitPeakValue64(Core::System& system, int64_t* out_peak_value,
+ Handle resource_limit_handle, LimitableResource which) {
+ R_RETURN(GetResourceLimitPeakValue(system, out_peak_value, resource_limit_handle, which));
+}
+
+Result CreateResourceLimit64(Core::System& system, Handle* out_handle) {
+ R_RETURN(CreateResourceLimit(system, out_handle));
+}
+
+Result SetResourceLimitLimitValue64(Core::System& system, Handle resource_limit_handle,
+ LimitableResource which, int64_t limit_value) {
+ R_RETURN(SetResourceLimitLimitValue(system, resource_limit_handle, which, limit_value));
+}
+
+Result GetResourceLimitLimitValue64From32(Core::System& system, int64_t* out_limit_value,
+ Handle resource_limit_handle, LimitableResource which) {
+ R_RETURN(GetResourceLimitLimitValue(system, out_limit_value, resource_limit_handle, which));
+}
+
+Result GetResourceLimitCurrentValue64From32(Core::System& system, int64_t* out_current_value,
+ Handle resource_limit_handle, LimitableResource which) {
+ R_RETURN(GetResourceLimitCurrentValue(system, out_current_value, resource_limit_handle, which));
+}
+
+Result GetResourceLimitPeakValue64From32(Core::System& system, int64_t* out_peak_value,
+ Handle resource_limit_handle, LimitableResource which) {
+ R_RETURN(GetResourceLimitPeakValue(system, out_peak_value, resource_limit_handle, which));
+}
+
+Result CreateResourceLimit64From32(Core::System& system, Handle* out_handle) {
+ R_RETURN(CreateResourceLimit(system, out_handle));
+}
+
+Result SetResourceLimitLimitValue64From32(Core::System& system, Handle resource_limit_handle,
+ LimitableResource which, int64_t limit_value) {
+ R_RETURN(SetResourceLimitLimitValue(system, resource_limit_handle, which, limit_value));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_secure_monitor_call.cpp b/src/core/hle/kernel/svc/svc_secure_monitor_call.cpp
new file mode 100644
index 000000000..62c781551
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_secure_monitor_call.cpp
@@ -0,0 +1,53 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/physical_core.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+void CallSecureMonitor(Core::System& system, lp64::SecureMonitorArguments* args) {
+ UNIMPLEMENTED();
+}
+
+void CallSecureMonitor64(Core::System& system, lp64::SecureMonitorArguments* args) {
+ CallSecureMonitor(system, args);
+}
+
+void CallSecureMonitor64From32(Core::System& system, ilp32::SecureMonitorArguments* args) {
+ // CallSecureMonitor64From32 is not supported.
+ UNIMPLEMENTED_MSG("CallSecureMonitor64From32");
+}
+
+// Custom ABI for CallSecureMonitor.
+
+void SvcWrap_CallSecureMonitor64(Core::System& system) {
+ auto& core = system.CurrentPhysicalCore().ArmInterface();
+ lp64::SecureMonitorArguments args{};
+ for (int i = 0; i < 8; i++) {
+ args.r[i] = core.GetReg(i);
+ }
+
+ CallSecureMonitor64(system, std::addressof(args));
+
+ for (int i = 0; i < 8; i++) {
+ core.SetReg(i, args.r[i]);
+ }
+}
+
+void SvcWrap_CallSecureMonitor64From32(Core::System& system) {
+ auto& core = system.CurrentPhysicalCore().ArmInterface();
+ ilp32::SecureMonitorArguments args{};
+ for (int i = 0; i < 8; i++) {
+ args.r[i] = static_cast<u32>(core.GetReg(i));
+ }
+
+ CallSecureMonitor64From32(system, std::addressof(args));
+
+ for (int i = 0; i < 8; i++) {
+ core.SetReg(i, args.r[i]);
+ }
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_session.cpp b/src/core/hle/kernel/svc/svc_session.cpp
new file mode 100644
index 000000000..01b8a52ad
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_session.cpp
@@ -0,0 +1,127 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
+#include "core/hle/kernel/k_session.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+namespace {
+
+template <typename T>
+Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, uint64_t name) {
+ auto& process = GetCurrentProcess(system.Kernel());
+ auto& handle_table = process.GetHandleTable();
+
+ // Declare the session we're going to allocate.
+ T* session;
+
+ // Reserve a new session from the process resource limit.
+ // FIXME: LimitableResource_SessionCountMax
+ KScopedResourceReservation session_reservation(std::addressof(process),
+ LimitableResource::SessionCountMax);
+ if (session_reservation.Succeeded()) {
+ session = T::Create(system.Kernel());
+ } else {
+ R_THROW(ResultLimitReached);
+
+ // // We couldn't reserve a session. Check that we support dynamically expanding the
+ // // resource limit.
+ // R_UNLESS(process.GetResourceLimit() ==
+ // std::addressof(system.Kernel().GetSystemResourceLimit()), ResultLimitReached);
+ // R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), ResultLimitReached());
+
+ // // Try to allocate a session from unused slab memory.
+ // session = T::CreateFromUnusedSlabMemory();
+ // R_UNLESS(session != nullptr, ResultLimitReached);
+ // ON_RESULT_FAILURE { session->Close(); };
+
+ // // If we're creating a KSession, we want to add two KSessionRequests to the heap, to
+ // // prevent request exhaustion.
+ // // NOTE: Nintendo checks if session->DynamicCast<KSession *>() != nullptr, but there's
+ // // no reason to not do this statically.
+ // if constexpr (std::same_as<T, KSession>) {
+ // for (size_t i = 0; i < 2; i++) {
+ // KSessionRequest* request = KSessionRequest::CreateFromUnusedSlabMemory();
+ // R_UNLESS(request != nullptr, ResultLimitReached);
+ // request->Close();
+ // }
+ // }
+
+ // We successfully allocated a session, so add the object we allocated to the resource
+ // limit.
+ // system.Kernel().GetSystemResourceLimit().Reserve(LimitableResource::SessionCountMax, 1);
+ }
+
+ // Check that we successfully created a session.
+ R_UNLESS(session != nullptr, ResultOutOfResource);
+
+ // Initialize the session.
+ session->Initialize(nullptr, name);
+
+ // Commit the session reservation.
+ session_reservation.Commit();
+
+ // Ensure that we clean up the session (and its only references are handle table) on function
+ // end.
+ SCOPE_EXIT({
+ session->GetClientSession().Close();
+ session->GetServerSession().Close();
+ });
+
+ // Register the session.
+ T::Register(system.Kernel(), session);
+
+ // Add the server session to the handle table.
+ R_TRY(handle_table.Add(out_server, std::addressof(session->GetServerSession())));
+
+ // Ensure that we maintain a clean handle state on exit.
+ ON_RESULT_FAILURE {
+ handle_table.Remove(*out_server);
+ };
+
+ // Add the client session to the handle table.
+ R_RETURN(handle_table.Add(out_client, std::addressof(session->GetClientSession())));
+}
+
+} // namespace
+
+Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, bool is_light,
+ u64 name) {
+ if (is_light) {
+ // return CreateSession<KLightSession>(system, out_server, out_client, name);
+ R_THROW(ResultNotImplemented);
+ } else {
+ R_RETURN(CreateSession<KSession>(system, out_server, out_client, name));
+ }
+}
+
+Result AcceptSession(Core::System& system, Handle* out_handle, Handle port_handle) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result CreateSession64(Core::System& system, Handle* out_server_session_handle,
+ Handle* out_client_session_handle, bool is_light, uint64_t name) {
+ R_RETURN(CreateSession(system, out_server_session_handle, out_client_session_handle, is_light,
+ name));
+}
+
+Result AcceptSession64(Core::System& system, Handle* out_handle, Handle port) {
+ R_RETURN(AcceptSession(system, out_handle, port));
+}
+
+Result CreateSession64From32(Core::System& system, Handle* out_server_session_handle,
+ Handle* out_client_session_handle, bool is_light, uint32_t name) {
+ R_RETURN(CreateSession(system, out_server_session_handle, out_client_session_handle, is_light,
+ name));
+}
+
+Result AcceptSession64From32(Core::System& system, Handle* out_handle, Handle port) {
+ R_RETURN(AcceptSession(system, out_handle, port));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_shared_memory.cpp b/src/core/hle/kernel/svc/svc_shared_memory.cpp
new file mode 100644
index 000000000..a698596aa
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_shared_memory.cpp
@@ -0,0 +1,130 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_shared_memory.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+namespace {
+
+constexpr bool IsValidSharedMemoryPermission(MemoryPermission perm) {
+ switch (perm) {
+ case MemoryPermission::Read:
+ case MemoryPermission::ReadWrite:
+ return true;
+ default:
+ return false;
+ }
+}
+
+[[maybe_unused]] constexpr bool IsValidRemoteSharedMemoryPermission(MemoryPermission perm) {
+ return IsValidSharedMemoryPermission(perm) || perm == MemoryPermission::DontCare;
+}
+
+} // namespace
+
+Result MapSharedMemory(Core::System& system, Handle shmem_handle, u64 address, u64 size,
+ Svc::MemoryPermission map_perm) {
+ LOG_TRACE(Kernel_SVC,
+ "called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
+ shmem_handle, address, size, map_perm);
+
+ // Validate the address/size.
+ R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+
+ // Validate the permission.
+ R_UNLESS(IsValidSharedMemoryPermission(map_perm), ResultInvalidNewMemoryPermission);
+
+ // Get the current process.
+ auto& process = GetCurrentProcess(system.Kernel());
+ auto& page_table = process.PageTable();
+
+ // Get the shared memory.
+ KScopedAutoObject shmem = process.GetHandleTable().GetObject<KSharedMemory>(shmem_handle);
+ R_UNLESS(shmem.IsNotNull(), ResultInvalidHandle);
+
+ // Verify that the mapping is in range.
+ R_UNLESS(page_table.CanContain(address, size, KMemoryState::Shared), ResultInvalidMemoryRegion);
+
+ // Add the shared memory to the process.
+ R_TRY(process.AddSharedMemory(shmem.GetPointerUnsafe(), address, size));
+
+ // Ensure that we clean up the shared memory if we fail to map it.
+ ON_RESULT_FAILURE {
+ process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size);
+ };
+
+ // Map the shared memory.
+ R_RETURN(shmem->Map(process, address, size, map_perm));
+}
+
+Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, u64 address, u64 size) {
+ // Validate the address/size.
+ R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+
+ // Get the current process.
+ auto& process = GetCurrentProcess(system.Kernel());
+ auto& page_table = process.PageTable();
+
+ // Get the shared memory.
+ KScopedAutoObject shmem = process.GetHandleTable().GetObject<KSharedMemory>(shmem_handle);
+ R_UNLESS(shmem.IsNotNull(), ResultInvalidHandle);
+
+ // Verify that the mapping is in range.
+ R_UNLESS(page_table.CanContain(address, size, KMemoryState::Shared), ResultInvalidMemoryRegion);
+
+ // Unmap the shared memory.
+ R_TRY(shmem->Unmap(process, address, size));
+
+ // Remove the shared memory from the process.
+ process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size);
+
+ R_SUCCEED();
+}
+
+Result CreateSharedMemory(Core::System& system, Handle* out_handle, uint64_t size,
+ MemoryPermission owner_perm, MemoryPermission remote_perm) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result MapSharedMemory64(Core::System& system, Handle shmem_handle, uint64_t address, uint64_t size,
+ MemoryPermission map_perm) {
+ R_RETURN(MapSharedMemory(system, shmem_handle, address, size, map_perm));
+}
+
+Result UnmapSharedMemory64(Core::System& system, Handle shmem_handle, uint64_t address,
+ uint64_t size) {
+ R_RETURN(UnmapSharedMemory(system, shmem_handle, address, size));
+}
+
+Result CreateSharedMemory64(Core::System& system, Handle* out_handle, uint64_t size,
+ MemoryPermission owner_perm, MemoryPermission remote_perm) {
+ R_RETURN(CreateSharedMemory(system, out_handle, size, owner_perm, remote_perm));
+}
+
+Result MapSharedMemory64From32(Core::System& system, Handle shmem_handle, uint32_t address,
+ uint32_t size, MemoryPermission map_perm) {
+ R_RETURN(MapSharedMemory(system, shmem_handle, address, size, map_perm));
+}
+
+Result UnmapSharedMemory64From32(Core::System& system, Handle shmem_handle, uint32_t address,
+ uint32_t size) {
+ R_RETURN(UnmapSharedMemory(system, shmem_handle, address, size));
+}
+
+Result CreateSharedMemory64From32(Core::System& system, Handle* out_handle, uint32_t size,
+ MemoryPermission owner_perm, MemoryPermission remote_perm) {
+ R_RETURN(CreateSharedMemory(system, out_handle, size, owner_perm, remote_perm));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_synchronization.cpp b/src/core/hle/kernel/svc/svc_synchronization.cpp
new file mode 100644
index 000000000..53df5bcd8
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_synchronization.cpp
@@ -0,0 +1,175 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "common/scratch_buffer.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_readable_event.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+/// Close a handle
+Result CloseHandle(Core::System& system, Handle handle) {
+ LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle);
+
+ // Remove the handle.
+ R_UNLESS(GetCurrentProcess(system.Kernel()).GetHandleTable().Remove(handle),
+ ResultInvalidHandle);
+
+ R_SUCCEED();
+}
+
+/// Clears the signaled state of an event or process.
+Result ResetSignal(Core::System& system, Handle handle) {
+ LOG_DEBUG(Kernel_SVC, "called handle 0x{:08X}", handle);
+
+ // Get the current handle table.
+ const auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable();
+
+ // Try to reset as readable event.
+ {
+ KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(handle);
+ if (readable_event.IsNotNull()) {
+ R_RETURN(readable_event->Reset());
+ }
+ }
+
+ // Try to reset as process.
+ {
+ KScopedAutoObject process = handle_table.GetObject<KProcess>(handle);
+ if (process.IsNotNull()) {
+ R_RETURN(process->Reset());
+ }
+ }
+
+ R_THROW(ResultInvalidHandle);
+}
+
+static Result WaitSynchronization(Core::System& system, int32_t* out_index, const Handle* handles,
+ int32_t num_handles, int64_t timeout_ns) {
+ // Ensure number of handles is valid.
+ R_UNLESS(0 <= num_handles && num_handles <= Svc::ArgumentHandleCountMax, ResultOutOfRange);
+
+ // Get the synchronization context.
+ auto& kernel = system.Kernel();
+ auto& handle_table = GetCurrentProcess(kernel).GetHandleTable();
+ std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> objs;
+
+ // Copy user handles.
+ if (num_handles > 0) {
+ // Convert the handles to objects.
+ R_UNLESS(handle_table.GetMultipleObjects<KSynchronizationObject>(objs.data(), handles,
+ num_handles),
+ ResultInvalidHandle);
+ }
+
+ // Ensure handles are closed when we're done.
+ SCOPE_EXIT({
+ for (auto i = 0; i < num_handles; ++i) {
+ objs[i]->Close();
+ }
+ });
+
+ // Wait on the objects.
+ Result res =
+ KSynchronizationObject::Wait(kernel, out_index, objs.data(), num_handles, timeout_ns);
+
+ R_SUCCEED_IF(res == ResultSessionClosed);
+ R_RETURN(res);
+}
+
+/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
+Result WaitSynchronization(Core::System& system, int32_t* out_index, u64 user_handles,
+ int32_t num_handles, int64_t timeout_ns) {
+ LOG_TRACE(Kernel_SVC, "called user_handles={:#x}, num_handles={}, timeout_ns={}", user_handles,
+ num_handles, timeout_ns);
+
+ // Ensure number of handles is valid.
+ R_UNLESS(0 <= num_handles && num_handles <= Svc::ArgumentHandleCountMax, ResultOutOfRange);
+ std::array<Handle, Svc::ArgumentHandleCountMax> handles;
+ if (num_handles > 0) {
+ GetCurrentMemory(system.Kernel())
+ .ReadBlock(user_handles, handles.data(), num_handles * sizeof(Handle));
+ }
+
+ R_RETURN(WaitSynchronization(system, out_index, handles.data(), num_handles, timeout_ns));
+}
+
+/// Resumes a thread waiting on WaitSynchronization
+Result CancelSynchronization(Core::System& system, Handle handle) {
+ LOG_TRACE(Kernel_SVC, "called handle=0x{:X}", handle);
+
+ // Get the thread from its handle.
+ KScopedAutoObject thread =
+ GetCurrentProcess(system.Kernel()).GetHandleTable().GetObject<KThread>(handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Cancel the thread's wait.
+ thread->WaitCancel();
+ R_SUCCEED();
+}
+
+void SynchronizePreemptionState(Core::System& system) {
+ auto& kernel = system.Kernel();
+
+ // Lock the scheduler.
+ KScopedSchedulerLock sl{kernel};
+
+ // If the current thread is pinned, unpin it.
+ KProcess* cur_process = GetCurrentProcessPointer(kernel);
+ const auto core_id = GetCurrentCoreId(kernel);
+
+ if (cur_process->GetPinnedThread(core_id) == GetCurrentThreadPointer(kernel)) {
+ // Clear the current thread's interrupt flag.
+ GetCurrentThread(kernel).ClearInterruptFlag();
+
+ // Unpin the current thread.
+ cur_process->UnpinCurrentThread(core_id);
+ }
+}
+
+Result CloseHandle64(Core::System& system, Handle handle) {
+ R_RETURN(CloseHandle(system, handle));
+}
+
+Result ResetSignal64(Core::System& system, Handle handle) {
+ R_RETURN(ResetSignal(system, handle));
+}
+
+Result WaitSynchronization64(Core::System& system, int32_t* out_index, uint64_t handles,
+ int32_t num_handles, int64_t timeout_ns) {
+ R_RETURN(WaitSynchronization(system, out_index, handles, num_handles, timeout_ns));
+}
+
+Result CancelSynchronization64(Core::System& system, Handle handle) {
+ R_RETURN(CancelSynchronization(system, handle));
+}
+
+void SynchronizePreemptionState64(Core::System& system) {
+ SynchronizePreemptionState(system);
+}
+
+Result CloseHandle64From32(Core::System& system, Handle handle) {
+ R_RETURN(CloseHandle(system, handle));
+}
+
+Result ResetSignal64From32(Core::System& system, Handle handle) {
+ R_RETURN(ResetSignal(system, handle));
+}
+
+Result WaitSynchronization64From32(Core::System& system, int32_t* out_index, uint32_t handles,
+ int32_t num_handles, int64_t timeout_ns) {
+ R_RETURN(WaitSynchronization(system, out_index, handles, num_handles, timeout_ns));
+}
+
+Result CancelSynchronization64From32(Core::System& system, Handle handle) {
+ R_RETURN(CancelSynchronization(system, handle));
+}
+
+void SynchronizePreemptionState64From32(Core::System& system) {
+ SynchronizePreemptionState(system);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_thread.cpp b/src/core/hle/kernel/svc/svc_thread.cpp
new file mode 100644
index 000000000..36b94e6bf
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_thread.cpp
@@ -0,0 +1,412 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+namespace {
+
+constexpr bool IsValidVirtualCoreId(int32_t core_id) {
+ return (0 <= core_id && core_id < static_cast<int32_t>(Core::Hardware::NUM_CPU_CORES));
+}
+
+} // Anonymous namespace
+
+/// Creates a new thread
+Result CreateThread(Core::System& system, Handle* out_handle, u64 entry_point, u64 arg,
+ u64 stack_bottom, s32 priority, s32 core_id) {
+ LOG_DEBUG(Kernel_SVC,
+ "called entry_point=0x{:08X}, arg=0x{:08X}, stack_bottom=0x{:08X}, "
+ "priority=0x{:08X}, core_id=0x{:08X}",
+ entry_point, arg, stack_bottom, priority, core_id);
+
+ // Adjust core id, if it's the default magic.
+ auto& kernel = system.Kernel();
+ auto& process = GetCurrentProcess(kernel);
+ if (core_id == IdealCoreUseProcessValue) {
+ core_id = process.GetIdealCoreId();
+ }
+
+ // Validate arguments.
+ R_UNLESS(IsValidVirtualCoreId(core_id), ResultInvalidCoreId);
+ R_UNLESS(((1ull << core_id) & process.GetCoreMask()) != 0, ResultInvalidCoreId);
+
+ R_UNLESS(HighestThreadPriority <= priority && priority <= LowestThreadPriority,
+ ResultInvalidPriority);
+ R_UNLESS(process.CheckThreadPriority(priority), ResultInvalidPriority);
+
+ // Reserve a new thread from the process resource limit (waiting up to 100ms).
+ KScopedResourceReservation thread_reservation(
+ std::addressof(process), LimitableResource::ThreadCountMax, 1,
+ system.CoreTiming().GetGlobalTimeNs().count() + 100000000);
+ R_UNLESS(thread_reservation.Succeeded(), ResultLimitReached);
+
+ // Create the thread.
+ KThread* thread = KThread::Create(kernel);
+ R_UNLESS(thread != nullptr, ResultOutOfResource)
+ SCOPE_EXIT({ thread->Close(); });
+
+ // Initialize the thread.
+ {
+ KScopedLightLock lk{process.GetStateLock()};
+ R_TRY(KThread::InitializeUserThread(system, thread, entry_point, arg, stack_bottom,
+ priority, core_id, std::addressof(process)));
+ }
+
+ // Commit the thread reservation.
+ thread_reservation.Commit();
+
+ // Clone the current fpu status to the new thread.
+ thread->CloneFpuStatus();
+
+ // Register the new thread.
+ KThread::Register(kernel, thread);
+
+ // Add the thread to the handle table.
+ R_RETURN(process.GetHandleTable().Add(out_handle, thread));
+}
+
+/// Starts the thread for the provided handle
+Result StartThread(Core::System& system, Handle thread_handle) {
+ LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
+
+ // Get the thread from its handle.
+ KScopedAutoObject thread =
+ GetCurrentProcess(system.Kernel()).GetHandleTable().GetObject<KThread>(thread_handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Try to start the thread.
+ R_TRY(thread->Run());
+
+ // If we succeeded, persist a reference to the thread.
+ thread->Open();
+ system.Kernel().RegisterInUseObject(thread.GetPointerUnsafe());
+
+ R_SUCCEED();
+}
+
+/// Called when a thread exits
+void ExitThread(Core::System& system) {
+ LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
+
+ auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
+ system.GlobalSchedulerContext().RemoveThread(current_thread);
+ current_thread->Exit();
+ system.Kernel().UnregisterInUseObject(current_thread);
+}
+
+/// Sleep the current thread
+void SleepThread(Core::System& system, s64 nanoseconds) {
+ auto& kernel = system.Kernel();
+ const auto yield_type = static_cast<Svc::YieldType>(nanoseconds);
+
+ LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
+
+ // When the input tick is positive, sleep.
+ if (nanoseconds > 0) {
+ // Convert the timeout from nanoseconds to ticks.
+ // NOTE: Nintendo does not use this conversion logic in WaitSynchronization...
+
+ // Sleep.
+ // NOTE: Nintendo does not check the result of this sleep.
+ static_cast<void>(GetCurrentThread(kernel).Sleep(nanoseconds));
+ } else if (yield_type == Svc::YieldType::WithoutCoreMigration) {
+ KScheduler::YieldWithoutCoreMigration(kernel);
+ } else if (yield_type == Svc::YieldType::WithCoreMigration) {
+ KScheduler::YieldWithCoreMigration(kernel);
+ } else if (yield_type == Svc::YieldType::ToAnyThread) {
+ KScheduler::YieldToAnyThread(kernel);
+ } else {
+ // Nintendo does nothing at all if an otherwise invalid value is passed.
+ ASSERT_MSG(false, "Unimplemented sleep yield type '{:016X}'!", nanoseconds);
+ }
+}
+
+/// Gets the thread context
+Result GetThreadContext3(Core::System& system, u64 out_context, Handle thread_handle) {
+ LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context,
+ thread_handle);
+
+ auto& kernel = system.Kernel();
+
+ // Get the thread from its handle.
+ KScopedAutoObject thread =
+ GetCurrentProcess(kernel).GetHandleTable().GetObject<KThread>(thread_handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Require the handle be to a non-current thread in the current process.
+ const auto* current_process = GetCurrentProcessPointer(kernel);
+ R_UNLESS(current_process == thread->GetOwnerProcess(), ResultInvalidId);
+
+ // Verify that the thread isn't terminated.
+ R_UNLESS(thread->GetState() != ThreadState::Terminated, ResultTerminationRequested);
+
+ /// Check that the thread is not the current one.
+ /// NOTE: Nintendo does not check this, and thus the following loop will deadlock.
+ R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(kernel), ResultInvalidId);
+
+ // Try to get the thread context until the thread isn't current on any core.
+ while (true) {
+ KScopedSchedulerLock sl{kernel};
+
+ // TODO(bunnei): Enforce that thread is suspended for debug here.
+
+ // If the thread's raw state isn't runnable, check if it's current on some core.
+ if (thread->GetRawState() != ThreadState::Runnable) {
+ bool current = false;
+ for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
+ if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetSchedulerCurrentThread()) {
+ current = true;
+ break;
+ }
+ }
+
+ // If the thread is current, retry until it isn't.
+ if (current) {
+ continue;
+ }
+ }
+
+ // Get the thread context.
+ static thread_local Common::ScratchBuffer<u8> context;
+ R_TRY(thread->GetThreadContext3(context));
+
+ // Copy the thread context to user space.
+ GetCurrentMemory(kernel).WriteBlock(out_context, context.data(), context.size());
+
+ R_SUCCEED();
+ }
+}
+
+/// Gets the priority for the specified thread
+Result GetThreadPriority(Core::System& system, s32* out_priority, Handle handle) {
+ LOG_TRACE(Kernel_SVC, "called");
+
+ // Get the thread from its handle.
+ KScopedAutoObject thread =
+ GetCurrentProcess(system.Kernel()).GetHandleTable().GetObject<KThread>(handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Get the thread's priority.
+ *out_priority = thread->GetPriority();
+ R_SUCCEED();
+}
+
+/// Sets the priority for the specified thread
+Result SetThreadPriority(Core::System& system, Handle thread_handle, s32 priority) {
+ // Get the current process.
+ KProcess& process = GetCurrentProcess(system.Kernel());
+
+ // Validate the priority.
+ R_UNLESS(HighestThreadPriority <= priority && priority <= LowestThreadPriority,
+ ResultInvalidPriority);
+ R_UNLESS(process.CheckThreadPriority(priority), ResultInvalidPriority);
+
+ // Get the thread from its handle.
+ KScopedAutoObject thread = process.GetHandleTable().GetObject<KThread>(thread_handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Set the thread priority.
+ thread->SetBasePriority(priority);
+ R_SUCCEED();
+}
+
+Result GetThreadList(Core::System& system, s32* out_num_threads, u64 out_thread_ids,
+ s32 out_thread_ids_size, Handle debug_handle) {
+ // TODO: Handle this case when debug events are supported.
+ UNIMPLEMENTED_IF(debug_handle != InvalidHandle);
+
+ LOG_DEBUG(Kernel_SVC, "called. out_thread_ids=0x{:016X}, out_thread_ids_size={}",
+ out_thread_ids, out_thread_ids_size);
+
+ // If the size is negative or larger than INT32_MAX / sizeof(u64)
+ if ((out_thread_ids_size & 0xF0000000) != 0) {
+ LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. size={}",
+ out_thread_ids_size);
+ R_THROW(ResultOutOfRange);
+ }
+
+ auto* const current_process = GetCurrentProcessPointer(system.Kernel());
+ const auto total_copy_size = out_thread_ids_size * sizeof(u64);
+
+ if (out_thread_ids_size > 0 &&
+ !current_process->PageTable().IsInsideAddressSpace(out_thread_ids, total_copy_size)) {
+ LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
+ out_thread_ids, out_thread_ids + total_copy_size);
+ R_THROW(ResultInvalidCurrentMemory);
+ }
+
+ auto& memory = GetCurrentMemory(system.Kernel());
+ const auto& thread_list = current_process->GetThreadList();
+ const auto num_threads = thread_list.size();
+ const auto copy_amount = std::min(static_cast<std::size_t>(out_thread_ids_size), num_threads);
+
+ auto list_iter = thread_list.cbegin();
+ for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
+ memory.Write64(out_thread_ids, (*list_iter)->GetThreadId());
+ out_thread_ids += sizeof(u64);
+ }
+
+ *out_num_threads = static_cast<u32>(num_threads);
+ R_SUCCEED();
+}
+
+Result GetThreadCoreMask(Core::System& system, s32* out_core_id, u64* out_affinity_mask,
+ Handle thread_handle) {
+ LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
+
+ // Get the thread from its handle.
+ KScopedAutoObject thread =
+ GetCurrentProcess(system.Kernel()).GetHandleTable().GetObject<KThread>(thread_handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Get the core mask.
+ R_RETURN(thread->GetCoreMask(out_core_id, out_affinity_mask));
+}
+
+Result SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
+ u64 affinity_mask) {
+ // Determine the core id/affinity mask.
+ if (core_id == IdealCoreUseProcessValue) {
+ core_id = GetCurrentProcess(system.Kernel()).GetIdealCoreId();
+ affinity_mask = (1ULL << core_id);
+ } else {
+ // Validate the affinity mask.
+ const u64 process_core_mask = GetCurrentProcess(system.Kernel()).GetCoreMask();
+ R_UNLESS((affinity_mask | process_core_mask) == process_core_mask, ResultInvalidCoreId);
+ R_UNLESS(affinity_mask != 0, ResultInvalidCombination);
+
+ // Validate the core id.
+ if (IsValidVirtualCoreId(core_id)) {
+ R_UNLESS(((1ULL << core_id) & affinity_mask) != 0, ResultInvalidCombination);
+ } else {
+ R_UNLESS(core_id == IdealCoreNoUpdate || core_id == IdealCoreDontCare,
+ ResultInvalidCoreId);
+ }
+ }
+
+ // Get the thread from its handle.
+ KScopedAutoObject thread =
+ GetCurrentProcess(system.Kernel()).GetHandleTable().GetObject<KThread>(thread_handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Set the core mask.
+ R_RETURN(thread->SetCoreMask(core_id, affinity_mask));
+}
+
+/// Get the ID for the specified thread.
+Result GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
+ // Get the thread from its handle.
+ KScopedAutoObject thread =
+ GetCurrentProcess(system.Kernel()).GetHandleTable().GetObject<KThread>(thread_handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Get the thread's id.
+ *out_thread_id = thread->GetId();
+ R_SUCCEED();
+}
+
+Result CreateThread64(Core::System& system, Handle* out_handle, uint64_t func, uint64_t arg,
+ uint64_t stack_bottom, int32_t priority, int32_t core_id) {
+ R_RETURN(CreateThread(system, out_handle, func, arg, stack_bottom, priority, core_id));
+}
+
+Result StartThread64(Core::System& system, Handle thread_handle) {
+ R_RETURN(StartThread(system, thread_handle));
+}
+
+void ExitThread64(Core::System& system) {
+ return ExitThread(system);
+}
+
+void SleepThread64(Core::System& system, int64_t ns) {
+ return SleepThread(system, ns);
+}
+
+Result GetThreadPriority64(Core::System& system, int32_t* out_priority, Handle thread_handle) {
+ R_RETURN(GetThreadPriority(system, out_priority, thread_handle));
+}
+
+Result SetThreadPriority64(Core::System& system, Handle thread_handle, int32_t priority) {
+ R_RETURN(SetThreadPriority(system, thread_handle, priority));
+}
+
+Result GetThreadCoreMask64(Core::System& system, int32_t* out_core_id, uint64_t* out_affinity_mask,
+ Handle thread_handle) {
+ R_RETURN(GetThreadCoreMask(system, out_core_id, out_affinity_mask, thread_handle));
+}
+
+Result SetThreadCoreMask64(Core::System& system, Handle thread_handle, int32_t core_id,
+ uint64_t affinity_mask) {
+ R_RETURN(SetThreadCoreMask(system, thread_handle, core_id, affinity_mask));
+}
+
+Result GetThreadId64(Core::System& system, uint64_t* out_thread_id, Handle thread_handle) {
+ R_RETURN(GetThreadId(system, out_thread_id, thread_handle));
+}
+
+Result GetThreadContext364(Core::System& system, uint64_t out_context, Handle thread_handle) {
+ R_RETURN(GetThreadContext3(system, out_context, thread_handle));
+}
+
+Result GetThreadList64(Core::System& system, int32_t* out_num_threads, uint64_t out_thread_ids,
+ int32_t max_out_count, Handle debug_handle) {
+ R_RETURN(GetThreadList(system, out_num_threads, out_thread_ids, max_out_count, debug_handle));
+}
+
+Result CreateThread64From32(Core::System& system, Handle* out_handle, uint32_t func, uint32_t arg,
+ uint32_t stack_bottom, int32_t priority, int32_t core_id) {
+ R_RETURN(CreateThread(system, out_handle, func, arg, stack_bottom, priority, core_id));
+}
+
+Result StartThread64From32(Core::System& system, Handle thread_handle) {
+ R_RETURN(StartThread(system, thread_handle));
+}
+
+void ExitThread64From32(Core::System& system) {
+ return ExitThread(system);
+}
+
+void SleepThread64From32(Core::System& system, int64_t ns) {
+ return SleepThread(system, ns);
+}
+
+Result GetThreadPriority64From32(Core::System& system, int32_t* out_priority,
+ Handle thread_handle) {
+ R_RETURN(GetThreadPriority(system, out_priority, thread_handle));
+}
+
+Result SetThreadPriority64From32(Core::System& system, Handle thread_handle, int32_t priority) {
+ R_RETURN(SetThreadPriority(system, thread_handle, priority));
+}
+
+Result GetThreadCoreMask64From32(Core::System& system, int32_t* out_core_id,
+ uint64_t* out_affinity_mask, Handle thread_handle) {
+ R_RETURN(GetThreadCoreMask(system, out_core_id, out_affinity_mask, thread_handle));
+}
+
+Result SetThreadCoreMask64From32(Core::System& system, Handle thread_handle, int32_t core_id,
+ uint64_t affinity_mask) {
+ R_RETURN(SetThreadCoreMask(system, thread_handle, core_id, affinity_mask));
+}
+
+Result GetThreadId64From32(Core::System& system, uint64_t* out_thread_id, Handle thread_handle) {
+ R_RETURN(GetThreadId(system, out_thread_id, thread_handle));
+}
+
+Result GetThreadContext364From32(Core::System& system, uint32_t out_context, Handle thread_handle) {
+ R_RETURN(GetThreadContext3(system, out_context, thread_handle));
+}
+
+Result GetThreadList64From32(Core::System& system, int32_t* out_num_threads,
+ uint32_t out_thread_ids, int32_t max_out_count, Handle debug_handle) {
+ R_RETURN(GetThreadList(system, out_num_threads, out_thread_ids, max_out_count, debug_handle));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_thread_profiler.cpp b/src/core/hle/kernel/svc/svc_thread_profiler.cpp
new file mode 100644
index 000000000..40de7708b
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_thread_profiler.cpp
@@ -0,0 +1,60 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel::Svc {
+
+Result GetDebugFutureThreadInfo(Core::System& system, lp64::LastThreadContext* out_context,
+ uint64_t* out_thread_id, Handle debug_handle, int64_t ns) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result GetLastThreadInfo(Core::System& system, lp64::LastThreadContext* out_context,
+ uint64_t* out_tls_address, uint32_t* out_flags) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result GetDebugFutureThreadInfo64(Core::System& system, lp64::LastThreadContext* out_context,
+ uint64_t* out_thread_id, Handle debug_handle, int64_t ns) {
+ R_RETURN(GetDebugFutureThreadInfo(system, out_context, out_thread_id, debug_handle, ns));
+}
+
+Result GetLastThreadInfo64(Core::System& system, lp64::LastThreadContext* out_context,
+ uint64_t* out_tls_address, uint32_t* out_flags) {
+ R_RETURN(GetLastThreadInfo(system, out_context, out_tls_address, out_flags));
+}
+
+Result GetDebugFutureThreadInfo64From32(Core::System& system, ilp32::LastThreadContext* out_context,
+ uint64_t* out_thread_id, Handle debug_handle, int64_t ns) {
+ lp64::LastThreadContext context{};
+ R_TRY(
+ GetDebugFutureThreadInfo(system, std::addressof(context), out_thread_id, debug_handle, ns));
+
+ *out_context = {
+ .fp = static_cast<u32>(context.fp),
+ .sp = static_cast<u32>(context.sp),
+ .lr = static_cast<u32>(context.lr),
+ .pc = static_cast<u32>(context.pc),
+ };
+ R_SUCCEED();
+}
+
+Result GetLastThreadInfo64From32(Core::System& system, ilp32::LastThreadContext* out_context,
+ uint64_t* out_tls_address, uint32_t* out_flags) {
+ lp64::LastThreadContext context{};
+ R_TRY(GetLastThreadInfo(system, std::addressof(context), out_tls_address, out_flags));
+
+ *out_context = {
+ .fp = static_cast<u32>(context.fp),
+ .sp = static_cast<u32>(context.sp),
+ .lr = static_cast<u32>(context.lr),
+ .pc = static_cast<u32>(context.pc),
+ };
+ R_SUCCEED();
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_tick.cpp b/src/core/hle/kernel/svc/svc_tick.cpp
new file mode 100644
index 000000000..7dd7c6e51
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_tick.cpp
@@ -0,0 +1,27 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+/// This returns the total CPU ticks elapsed since the CPU was powered-on
+int64_t GetSystemTick(Core::System& system) {
+ LOG_TRACE(Kernel_SVC, "called");
+
+ // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick)
+ return static_cast<int64_t>(system.CoreTiming().GetClockTicks());
+}
+
+int64_t GetSystemTick64(Core::System& system) {
+ return GetSystemTick(system);
+}
+
+int64_t GetSystemTick64From32(Core::System& system) {
+ return GetSystemTick(system);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_transfer_memory.cpp b/src/core/hle/kernel/svc/svc_transfer_memory.cpp
new file mode 100644
index 000000000..82d469a37
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_transfer_memory.cpp
@@ -0,0 +1,115 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
+#include "core/hle/kernel/k_transfer_memory.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+namespace {
+
+constexpr bool IsValidTransferMemoryPermission(MemoryPermission perm) {
+ switch (perm) {
+ case MemoryPermission::None:
+ case MemoryPermission::Read:
+ case MemoryPermission::ReadWrite:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // Anonymous namespace
+
+/// Creates a TransferMemory object
+Result CreateTransferMemory(Core::System& system, Handle* out, u64 address, u64 size,
+ MemoryPermission map_perm) {
+ auto& kernel = system.Kernel();
+
+ // Validate the size.
+ R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+
+ // Validate the permissions.
+ R_UNLESS(IsValidTransferMemoryPermission(map_perm), ResultInvalidNewMemoryPermission);
+
+ // Get the current process and handle table.
+ auto& process = GetCurrentProcess(kernel);
+ auto& handle_table = process.GetHandleTable();
+
+ // Reserve a new transfer memory from the process resource limit.
+ KScopedResourceReservation trmem_reservation(std::addressof(process),
+ LimitableResource::TransferMemoryCountMax);
+ R_UNLESS(trmem_reservation.Succeeded(), ResultLimitReached);
+
+ // Create the transfer memory.
+ KTransferMemory* trmem = KTransferMemory::Create(kernel);
+ R_UNLESS(trmem != nullptr, ResultOutOfResource);
+
+ // Ensure the only reference is in the handle table when we're done.
+ SCOPE_EXIT({ trmem->Close(); });
+
+ // Ensure that the region is in range.
+ R_UNLESS(process.PageTable().Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Initialize the transfer memory.
+ R_TRY(trmem->Initialize(address, size, map_perm));
+
+ // Commit the reservation.
+ trmem_reservation.Commit();
+
+ // Register the transfer memory.
+ KTransferMemory::Register(kernel, trmem);
+
+ // Add the transfer memory to the handle table.
+ R_RETURN(handle_table.Add(out, trmem));
+}
+
+Result MapTransferMemory(Core::System& system, Handle trmem_handle, uint64_t address, uint64_t size,
+ MemoryPermission owner_perm) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result UnmapTransferMemory(Core::System& system, Handle trmem_handle, uint64_t address,
+ uint64_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result MapTransferMemory64(Core::System& system, Handle trmem_handle, uint64_t address,
+ uint64_t size, MemoryPermission owner_perm) {
+ R_RETURN(MapTransferMemory(system, trmem_handle, address, size, owner_perm));
+}
+
+Result UnmapTransferMemory64(Core::System& system, Handle trmem_handle, uint64_t address,
+ uint64_t size) {
+ R_RETURN(UnmapTransferMemory(system, trmem_handle, address, size));
+}
+
+Result CreateTransferMemory64(Core::System& system, Handle* out_handle, uint64_t address,
+ uint64_t size, MemoryPermission map_perm) {
+ R_RETURN(CreateTransferMemory(system, out_handle, address, size, map_perm));
+}
+
+Result MapTransferMemory64From32(Core::System& system, Handle trmem_handle, uint32_t address,
+ uint32_t size, MemoryPermission owner_perm) {
+ R_RETURN(MapTransferMemory(system, trmem_handle, address, size, owner_perm));
+}
+
+Result UnmapTransferMemory64From32(Core::System& system, Handle trmem_handle, uint32_t address,
+ uint32_t size) {
+ R_RETURN(UnmapTransferMemory(system, trmem_handle, address, size));
+}
+
+Result CreateTransferMemory64From32(Core::System& system, Handle* out_handle, uint32_t address,
+ uint32_t size, MemoryPermission map_perm) {
+ R_RETURN(CreateTransferMemory(system, out_handle, address, size, map_perm));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_generator.py b/src/core/hle/kernel/svc_generator.py
new file mode 100644
index 000000000..7fcbb1ba1
--- /dev/null
+++ b/src/core/hle/kernel/svc_generator.py
@@ -0,0 +1,716 @@
+# SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+# Raw SVC definitions from the kernel.
+#
+# Avoid modifying the prototypes; see below for how to customize generation
+# for a given typename.
+SVCS = [
+ [0x01, "Result SetHeapSize(Address* out_address, Size size);"],
+ [0x02, "Result SetMemoryPermission(Address address, Size size, MemoryPermission perm);"],
+ [0x03, "Result SetMemoryAttribute(Address address, Size size, uint32_t mask, uint32_t attr);"],
+ [0x04, "Result MapMemory(Address dst_address, Address src_address, Size size);"],
+ [0x05, "Result UnmapMemory(Address dst_address, Address src_address, Size size);"],
+ [0x06, "Result QueryMemory(Address out_memory_info, PageInfo* out_page_info, Address address);"],
+ [0x07, "void ExitProcess();"],
+ [0x08, "Result CreateThread(Handle* out_handle, ThreadFunc func, Address arg, Address stack_bottom, int32_t priority, int32_t core_id);"],
+ [0x09, "Result StartThread(Handle thread_handle);"],
+ [0x0A, "void ExitThread();"],
+ [0x0B, "void SleepThread(int64_t ns);"],
+ [0x0C, "Result GetThreadPriority(int32_t* out_priority, Handle thread_handle);"],
+ [0x0D, "Result SetThreadPriority(Handle thread_handle, int32_t priority);"],
+ [0x0E, "Result GetThreadCoreMask(int32_t* out_core_id, uint64_t* out_affinity_mask, Handle thread_handle);"],
+ [0x0F, "Result SetThreadCoreMask(Handle thread_handle, int32_t core_id, uint64_t affinity_mask);"],
+ [0x10, "int32_t GetCurrentProcessorNumber();"],
+ [0x11, "Result SignalEvent(Handle event_handle);"],
+ [0x12, "Result ClearEvent(Handle event_handle);"],
+ [0x13, "Result MapSharedMemory(Handle shmem_handle, Address address, Size size, MemoryPermission map_perm);"],
+ [0x14, "Result UnmapSharedMemory(Handle shmem_handle, Address address, Size size);"],
+ [0x15, "Result CreateTransferMemory(Handle* out_handle, Address address, Size size, MemoryPermission map_perm);"],
+ [0x16, "Result CloseHandle(Handle handle);"],
+ [0x17, "Result ResetSignal(Handle handle);"],
+ [0x18, "Result WaitSynchronization(int32_t* out_index, Address handles, int32_t num_handles, int64_t timeout_ns);"],
+ [0x19, "Result CancelSynchronization(Handle handle);"],
+ [0x1A, "Result ArbitrateLock(Handle thread_handle, Address address, uint32_t tag);"],
+ [0x1B, "Result ArbitrateUnlock(Address address);"],
+ [0x1C, "Result WaitProcessWideKeyAtomic(Address address, Address cv_key, uint32_t tag, int64_t timeout_ns);"],
+ [0x1D, "void SignalProcessWideKey(Address cv_key, int32_t count);"],
+ [0x1E, "int64_t GetSystemTick();"],
+ [0x1F, "Result ConnectToNamedPort(Handle* out_handle, Address name);"],
+ [0x20, "Result SendSyncRequestLight(Handle session_handle);"],
+ [0x21, "Result SendSyncRequest(Handle session_handle);"],
+ [0x22, "Result SendSyncRequestWithUserBuffer(Address message_buffer, Size message_buffer_size, Handle session_handle);"],
+ [0x23, "Result SendAsyncRequestWithUserBuffer(Handle* out_event_handle, Address message_buffer, Size message_buffer_size, Handle session_handle);"],
+ [0x24, "Result GetProcessId(uint64_t* out_process_id, Handle process_handle);"],
+ [0x25, "Result GetThreadId(uint64_t* out_thread_id, Handle thread_handle);"],
+ [0x26, "void Break(BreakReason break_reason, Address arg, Size size);"],
+ [0x27, "Result OutputDebugString(Address debug_str, Size len);"],
+ [0x28, "void ReturnFromException(Result result);"],
+ [0x29, "Result GetInfo(uint64_t* out, InfoType info_type, Handle handle, uint64_t info_subtype);"],
+ [0x2A, "void FlushEntireDataCache();"],
+ [0x2B, "Result FlushDataCache(Address address, Size size);"],
+ [0x2C, "Result MapPhysicalMemory(Address address, Size size);"],
+ [0x2D, "Result UnmapPhysicalMemory(Address address, Size size);"],
+ [0x2E, "Result GetDebugFutureThreadInfo(LastThreadContext* out_context, uint64_t* out_thread_id, Handle debug_handle, int64_t ns);"],
+ [0x2F, "Result GetLastThreadInfo(LastThreadContext* out_context, Address* out_tls_address, uint32_t* out_flags);"],
+ [0x30, "Result GetResourceLimitLimitValue(int64_t* out_limit_value, Handle resource_limit_handle, LimitableResource which);"],
+ [0x31, "Result GetResourceLimitCurrentValue(int64_t* out_current_value, Handle resource_limit_handle, LimitableResource which);"],
+ [0x32, "Result SetThreadActivity(Handle thread_handle, ThreadActivity thread_activity);"],
+ [0x33, "Result GetThreadContext3(Address out_context, Handle thread_handle);"],
+ [0x34, "Result WaitForAddress(Address address, ArbitrationType arb_type, int32_t value, int64_t timeout_ns);"],
+ [0x35, "Result SignalToAddress(Address address, SignalType signal_type, int32_t value, int32_t count);"],
+ [0x36, "void SynchronizePreemptionState();"],
+ [0x37, "Result GetResourceLimitPeakValue(int64_t* out_peak_value, Handle resource_limit_handle, LimitableResource which);"],
+
+ [0x39, "Result CreateIoPool(Handle* out_handle, IoPoolType which);"],
+ [0x3A, "Result CreateIoRegion(Handle* out_handle, Handle io_pool, PhysicalAddress physical_address, Size size, MemoryMapping mapping, MemoryPermission perm);"],
+
+ [0x3C, "void KernelDebug(KernelDebugType kern_debug_type, uint64_t arg0, uint64_t arg1, uint64_t arg2);"],
+ [0x3D, "void ChangeKernelTraceState(KernelTraceState kern_trace_state);"],
+
+ [0x40, "Result CreateSession(Handle* out_server_session_handle, Handle* out_client_session_handle, bool is_light, Address name);"],
+ [0x41, "Result AcceptSession(Handle* out_handle, Handle port);"],
+ [0x42, "Result ReplyAndReceiveLight(Handle handle);"],
+ [0x43, "Result ReplyAndReceive(int32_t* out_index, Address handles, int32_t num_handles, Handle reply_target, int64_t timeout_ns);"],
+ [0x44, "Result ReplyAndReceiveWithUserBuffer(int32_t* out_index, Address message_buffer, Size message_buffer_size, Address handles, int32_t num_handles, Handle reply_target, int64_t timeout_ns);"],
+ [0x45, "Result CreateEvent(Handle* out_write_handle, Handle* out_read_handle);"],
+ [0x46, "Result MapIoRegion(Handle io_region, Address address, Size size, MemoryPermission perm);"],
+ [0x47, "Result UnmapIoRegion(Handle io_region, Address address, Size size);"],
+ [0x48, "Result MapPhysicalMemoryUnsafe(Address address, Size size);"],
+ [0x49, "Result UnmapPhysicalMemoryUnsafe(Address address, Size size);"],
+ [0x4A, "Result SetUnsafeLimit(Size limit);"],
+ [0x4B, "Result CreateCodeMemory(Handle* out_handle, Address address, Size size);"],
+ [0x4C, "Result ControlCodeMemory(Handle code_memory_handle, CodeMemoryOperation operation, uint64_t address, uint64_t size, MemoryPermission perm);"],
+ [0x4D, "void SleepSystem();"],
+ [0x4E, "Result ReadWriteRegister(uint32_t* out_value, PhysicalAddress address, uint32_t mask, uint32_t value);"],
+ [0x4F, "Result SetProcessActivity(Handle process_handle, ProcessActivity process_activity);"],
+ [0x50, "Result CreateSharedMemory(Handle* out_handle, Size size, MemoryPermission owner_perm, MemoryPermission remote_perm);"],
+ [0x51, "Result MapTransferMemory(Handle trmem_handle, Address address, Size size, MemoryPermission owner_perm);"],
+ [0x52, "Result UnmapTransferMemory(Handle trmem_handle, Address address, Size size);"],
+ [0x53, "Result CreateInterruptEvent(Handle* out_read_handle, int32_t interrupt_id, InterruptType interrupt_type);"],
+ [0x54, "Result QueryPhysicalAddress(PhysicalMemoryInfo* out_info, Address address);"],
+ [0x55, "Result QueryIoMapping(Address* out_address, Size* out_size, PhysicalAddress physical_address, Size size);"],
+ [0x56, "Result CreateDeviceAddressSpace(Handle* out_handle, uint64_t das_address, uint64_t das_size);"],
+ [0x57, "Result AttachDeviceAddressSpace(DeviceName device_name, Handle das_handle);"],
+ [0x58, "Result DetachDeviceAddressSpace(DeviceName device_name, Handle das_handle);"],
+ [0x59, "Result MapDeviceAddressSpaceByForce(Handle das_handle, Handle process_handle, uint64_t process_address, Size size, uint64_t device_address, uint32_t option);"],
+ [0x5A, "Result MapDeviceAddressSpaceAligned(Handle das_handle, Handle process_handle, uint64_t process_address, Size size, uint64_t device_address, uint32_t option);"],
+ [0x5C, "Result UnmapDeviceAddressSpace(Handle das_handle, Handle process_handle, uint64_t process_address, Size size, uint64_t device_address);"],
+ [0x5D, "Result InvalidateProcessDataCache(Handle process_handle, uint64_t address, uint64_t size);"],
+ [0x5E, "Result StoreProcessDataCache(Handle process_handle, uint64_t address, uint64_t size);"],
+ [0x5F, "Result FlushProcessDataCache(Handle process_handle, uint64_t address, uint64_t size);"],
+ [0x60, "Result DebugActiveProcess(Handle* out_handle, uint64_t process_id);"],
+ [0x61, "Result BreakDebugProcess(Handle debug_handle);"],
+ [0x62, "Result TerminateDebugProcess(Handle debug_handle);"],
+ [0x63, "Result GetDebugEvent(Address out_info, Handle debug_handle);"],
+ [0x64, "Result ContinueDebugEvent(Handle debug_handle, uint32_t flags, Address thread_ids, int32_t num_thread_ids);"],
+ [0x65, "Result GetProcessList(int32_t* out_num_processes, Address out_process_ids, int32_t max_out_count);"],
+ [0x66, "Result GetThreadList(int32_t* out_num_threads, Address out_thread_ids, int32_t max_out_count, Handle debug_handle);"],
+ [0x67, "Result GetDebugThreadContext(Address out_context, Handle debug_handle, uint64_t thread_id, uint32_t context_flags);"],
+ [0x68, "Result SetDebugThreadContext(Handle debug_handle, uint64_t thread_id, Address context, uint32_t context_flags);"],
+ [0x69, "Result QueryDebugProcessMemory(Address out_memory_info, PageInfo* out_page_info, Handle process_handle, Address address);"],
+ [0x6A, "Result ReadDebugProcessMemory(Address buffer, Handle debug_handle, Address address, Size size);"],
+ [0x6B, "Result WriteDebugProcessMemory(Handle debug_handle, Address buffer, Address address, Size size);"],
+ [0x6C, "Result SetHardwareBreakPoint(HardwareBreakPointRegisterName name, uint64_t flags, uint64_t value);"],
+ [0x6D, "Result GetDebugThreadParam(uint64_t* out_64, uint32_t* out_32, Handle debug_handle, uint64_t thread_id, DebugThreadParam param);"],
+
+ [0x6F, "Result GetSystemInfo(uint64_t* out, SystemInfoType info_type, Handle handle, uint64_t info_subtype);"],
+ [0x70, "Result CreatePort(Handle* out_server_handle, Handle* out_client_handle, int32_t max_sessions, bool is_light, Address name);"],
+ [0x71, "Result ManageNamedPort(Handle* out_server_handle, Address name, int32_t max_sessions);"],
+ [0x72, "Result ConnectToPort(Handle* out_handle, Handle port);"],
+ [0x73, "Result SetProcessMemoryPermission(Handle process_handle, uint64_t address, uint64_t size, MemoryPermission perm);"],
+ [0x74, "Result MapProcessMemory(Address dst_address, Handle process_handle, uint64_t src_address, Size size);"],
+ [0x75, "Result UnmapProcessMemory(Address dst_address, Handle process_handle, uint64_t src_address, Size size);"],
+ [0x76, "Result QueryProcessMemory(Address out_memory_info, PageInfo* out_page_info, Handle process_handle, uint64_t address);"],
+ [0x77, "Result MapProcessCodeMemory(Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size);"],
+ [0x78, "Result UnmapProcessCodeMemory(Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size);"],
+ [0x79, "Result CreateProcess(Handle* out_handle, Address parameters, Address caps, int32_t num_caps);"],
+ [0x7A, "Result StartProcess(Handle process_handle, int32_t priority, int32_t core_id, uint64_t main_thread_stack_size);"],
+ [0x7B, "Result TerminateProcess(Handle process_handle);"],
+ [0x7C, "Result GetProcessInfo(int64_t* out_info, Handle process_handle, ProcessInfoType info_type);"],
+ [0x7D, "Result CreateResourceLimit(Handle* out_handle);"],
+ [0x7E, "Result SetResourceLimitLimitValue(Handle resource_limit_handle, LimitableResource which, int64_t limit_value);"],
+ [0x7F, "void CallSecureMonitor(SecureMonitorArguments args);"],
+
+ [0x90, "Result MapInsecureMemory(Address address, Size size);"],
+ [0x91, "Result UnmapInsecureMemory(Address address, Size size);"],
+]
+
+# These use a custom ABI, and therefore require custom wrappers
+SKIP_WRAPPERS = {
+ 0x20: "SendSyncRequestLight",
+ 0x42: "ReplyAndReceiveLight",
+ 0x7F: "CallSecureMonitor",
+}
+
+BIT_32 = 0
+BIT_64 = 1
+
+REG_SIZES = [4, 8]
+SUFFIX_NAMES = ["64From32", "64"]
+TYPE_SIZES = {
+ # SVC types
+ "ArbitrationType": 4,
+ "BreakReason": 4,
+ "CodeMemoryOperation": 4,
+ "DebugThreadParam": 4,
+ "DeviceName": 4,
+ "HardwareBreakPointRegisterName": 4,
+ "Handle": 4,
+ "InfoType": 4,
+ "InterruptType": 4,
+ "IoPoolType": 4,
+ "KernelDebugType": 4,
+ "KernelTraceState": 4,
+ "LimitableResource": 4,
+ "MemoryMapping": 4,
+ "MemoryPermission": 4,
+ "PageInfo": 4,
+ "ProcessActivity": 4,
+ "ProcessInfoType": 4,
+ "Result": 4,
+ "SignalType": 4,
+ "SystemInfoType": 4,
+ "ThreadActivity": 4,
+
+ # Arch-specific types
+ "ilp32::LastThreadContext": 16,
+ "ilp32::PhysicalMemoryInfo": 16,
+ "ilp32::SecureMonitorArguments": 32,
+ "lp64::LastThreadContext": 32,
+ "lp64::PhysicalMemoryInfo": 24,
+ "lp64::SecureMonitorArguments": 64,
+
+ # Generic types
+ "bool": 1,
+ "int32_t": 4,
+ "int64_t": 8,
+ "uint32_t": 4,
+ "uint64_t": 8,
+ "void": 0,
+}
+
+TYPE_REPLACEMENTS = {
+ "Address": ["uint32_t", "uint64_t"],
+ "LastThreadContext": ["ilp32::LastThreadContext", "lp64::LastThreadContext"],
+ "PhysicalAddress": ["uint64_t", "uint64_t"],
+ "PhysicalMemoryInfo": ["ilp32::PhysicalMemoryInfo", "lp64::PhysicalMemoryInfo"],
+ "SecureMonitorArguments": ["ilp32::SecureMonitorArguments", "lp64::SecureMonitorArguments"],
+ "Size": ["uint32_t", "uint64_t"],
+ "ThreadFunc": ["uint32_t", "uint64_t"],
+}
+
+# Statically verify that the hardcoded sizes match the intended
+# sizes in C++.
+def emit_size_check():
+ lines = []
+
+ for type, size in TYPE_SIZES.items():
+ if type != "void":
+ lines.append(f"static_assert(sizeof({type}) == {size});")
+
+ return "\n".join(lines)
+
+
+# Replaces a type with an arch-specific one, if it exists.
+def substitute_type(name, bitness):
+ if name in TYPE_REPLACEMENTS:
+ return TYPE_REPLACEMENTS[name][bitness]
+ else:
+ return name
+
+
+class Argument:
+ def __init__(self, type_name, var_name, is_output, is_outptr, is_address):
+ self.type_name = type_name
+ self.var_name = var_name
+ self.is_output = is_output
+ self.is_outptr = is_outptr
+ self.is_address = is_address
+
+
+# Parses C-style string declarations for SVCs.
+def parse_declaration(declaration, bitness):
+ return_type, rest = declaration.split(" ", 1)
+ func_name, rest = rest.split("(", 1)
+ arg_names, rest = rest.split(")", 1)
+ argument_types = []
+
+ return_type = substitute_type(return_type, bitness)
+ assert return_type in TYPE_SIZES, f"Unknown type '{return_type}'"
+
+ if arg_names:
+ for arg_name in arg_names.split(", "):
+ type_name, var_name = arg_name.replace("*", "").split(" ", 1)
+
+ # All outputs must contain out_ in the name.
+ is_output = var_name == "out" or var_name.find("out_") != -1
+
+ # User-pointer outputs are not written to registers.
+ is_outptr = is_output and arg_name.find("*") == -1
+
+ # Special handling is performed for output addresses to avoid awkwardness
+ # in conversion for the 32-bit equivalents.
+ is_address = is_output and not is_outptr and \
+ type_name in ["Address", "Size"]
+ type_name = substitute_type(type_name, bitness)
+
+ assert type_name in TYPE_SIZES, f"Unknown type '{type_name}'"
+
+ argument_types.append(
+ Argument(type_name, var_name, is_output, is_outptr, is_address))
+
+ return (return_type, func_name, argument_types)
+
+
+class RegisterAllocator:
+ def __init__(self, num_regs, byte_size, parameter_count):
+ self.registers = {}
+ self.num_regs = num_regs
+ self.byte_size = byte_size
+ self.parameter_count = parameter_count
+
+ # Mark the given register as allocated, for use in layout
+ # calculation if the NGRN exceeds the ABI parameter count.
+ def allocate(self, i):
+ assert i not in self.registers, f"Register R{i} already allocated"
+ self.registers[i] = True
+ return i
+
+ # Calculate the next available location for a register;
+ # the NGRN has exceeded the ABI parameter count.
+ def allocate_first_free(self):
+ for i in range(0, self.num_regs):
+ if i in self.registers:
+ continue
+
+ self.allocate(i)
+ return i
+
+ assert False, "No registers available"
+
+ # Add a single register at the given NGRN.
+ # If the index exceeds the ABI parameter count, try to find a
+ # location to add it. Returns the output location and increment.
+ def add_single(self, ngrn):
+ if ngrn >= self.parameter_count:
+ return (self.allocate_first_free(), 0)
+ else:
+ return (self.allocate(ngrn), 1)
+
+ # Add registers at the given NGRN for a data type of
+ # the given size. Returns the output locations and increment.
+ def add(self, ngrn, data_size, align=True):
+ if data_size <= self.byte_size:
+ r, i = self.add_single(ngrn)
+ return ([r], i)
+
+ regs = []
+ inc = ngrn % 2 if align else 0
+ remaining_size = data_size
+ while remaining_size > 0:
+ r, i = self.add_single(ngrn + inc)
+ regs.append(r)
+ inc += i
+ remaining_size -= self.byte_size
+
+ return (regs, inc)
+
+
+def reg_alloc(bitness):
+ if bitness == 0:
+ # aapcs32: 4 4-byte registers
+ return RegisterAllocator(8, 4, 4)
+ elif bitness == 1:
+ # aapcs64: 8 8-byte registers
+ return RegisterAllocator(8, 8, 8)
+
+
+# Converts a parsed SVC declaration into register lists for
+# the return value, outputs, and inputs.
+def get_registers(parse_result, bitness):
+ output_alloc = reg_alloc(bitness)
+ input_alloc = reg_alloc(bitness)
+ return_type, _, arguments = parse_result
+
+ return_write = []
+ output_writes = []
+ input_reads = []
+
+ input_ngrn = 0
+ output_ngrn = 0
+
+ # Run the input calculation.
+ for arg in arguments:
+ if arg.is_output and not arg.is_outptr:
+ input_ngrn += 1
+ continue
+
+ regs, increment = input_alloc.add(
+ input_ngrn, TYPE_SIZES[arg.type_name], align=True)
+ input_reads.append([arg.type_name, arg.var_name, regs])
+ input_ngrn += increment
+
+ # Include the return value if this SVC returns a value.
+ if return_type != "void":
+ regs, increment = output_alloc.add(
+ output_ngrn, TYPE_SIZES[return_type], align=False)
+ return_write.append([return_type, regs])
+ output_ngrn += increment
+
+ # Run the output calculation.
+ for arg in arguments:
+ if not arg.is_output or arg.is_outptr:
+ continue
+
+ regs, increment = output_alloc.add(
+ output_ngrn, TYPE_SIZES[arg.type_name], align=False)
+ output_writes.append(
+ [arg.type_name, arg.var_name, regs, arg.is_address])
+ output_ngrn += increment
+
+ return (return_write, output_writes, input_reads)
+
+
+# Collects possibly multiple source registers into the named C++ value.
+def emit_gather(sources, name, type_name, reg_size):
+ get_fn = f"GetReg{reg_size*8}"
+
+ if len(sources) == 1:
+ s, = sources
+ line = f"{name} = Convert<{type_name}>({get_fn}(system, {s}));"
+ return [line]
+
+ var_type = f"std::array<uint{reg_size*8}_t, {len(sources)}>"
+ lines = [
+ f"{var_type} {name}_gather{{}};"
+ ]
+ for i in range(0, len(sources)):
+ lines.append(
+ f"{name}_gather[{i}] = {get_fn}(system, {sources[i]});")
+
+ lines.append(f"{name} = Convert<{type_name}>({name}_gather);")
+ return lines
+
+
+# Produces one or more statements which assign the named C++ value
+# into possibly multiple registers.
+def emit_scatter(destinations, name, reg_size):
+ set_fn = f"SetReg{reg_size*8}"
+ reg_type = f"uint{reg_size*8}_t"
+
+ if len(destinations) == 1:
+ d, = destinations
+ line = f"{set_fn}(system, {d}, Convert<{reg_type}>({name}));"
+ return [line]
+
+ var_type = f"std::array<{reg_type}, {len(destinations)}>"
+ lines = [
+ f"auto {name}_scatter = Convert<{var_type}>({name});"
+ ]
+
+ for i in range(0, len(destinations)):
+ lines.append(
+ f"{set_fn}(system, {destinations[i]}, {name}_scatter[{i}]);")
+
+ return lines
+
+
+def emit_lines(lines, indent=' '):
+ output_lines = []
+ first = True
+ for line in lines:
+ if line and not first:
+ output_lines.append(indent + line)
+ else:
+ output_lines.append(line)
+ first = False
+
+ return "\n".join(output_lines)
+
+
+# Emit a C++ function to wrap a guest SVC.
+def emit_wrapper(wrapped_fn, suffix, register_info, arguments, byte_size):
+ return_write, output_writes, input_reads = register_info
+ lines = [
+ f"static void SvcWrap_{wrapped_fn}{suffix}(Core::System& system) {{"
+ ]
+
+ # Get everything ready.
+ for return_type, _ in return_write:
+ lines.append(f"{return_type} ret{{}};")
+ if return_write:
+ lines.append("")
+
+ for output_type, var_name, _, is_address in output_writes:
+ output_type = "uint64_t" if is_address else output_type
+ lines.append(f"{output_type} {var_name}{{}};")
+ for input_type, var_name, _ in input_reads:
+ lines.append(f"{input_type} {var_name}{{}};")
+
+ if output_writes or input_reads:
+ lines.append("")
+
+ for input_type, var_name, sources in input_reads:
+ lines += emit_gather(sources, var_name, input_type, byte_size)
+ if input_reads:
+ lines.append("")
+
+ # Build the call.
+ call_arguments = ["system"]
+ for arg in arguments:
+ if arg.is_output and not arg.is_outptr:
+ call_arguments.append(f"std::addressof({arg.var_name})")
+ else:
+ call_arguments.append(arg.var_name)
+
+ line = ""
+ if return_write:
+ line += "ret = "
+
+ line += f"{wrapped_fn}{suffix}({', '.join(call_arguments)});"
+ lines.append(line)
+
+ if return_write or output_writes:
+ lines.append("")
+
+ # Write back the return value and outputs.
+ for _, destinations in return_write:
+ lines += emit_scatter(destinations, "ret", byte_size)
+ for _, var_name, destinations, _ in output_writes:
+ lines += emit_scatter(destinations, var_name, byte_size)
+
+ # Finish.
+ return emit_lines(lines) + "\n}"
+
+
+COPYRIGHT = """\
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+// This file is automatically generated using svc_generator.py.
+"""
+
+PROLOGUE_H = """
+#pragma once
+
+namespace Core {
+class System;
+}
+
+#include "common/common_types.h"
+#include "core/hle/kernel/svc_types.h"
+#include "core/hle/result.h"
+
+namespace Kernel::Svc {
+
+// clang-format off
+"""
+
+EPILOGUE_H = """
+// clang-format on
+
+// Custom ABI.
+Result ReplyAndReceiveLight(Core::System& system, Handle handle, uint32_t* args);
+Result ReplyAndReceiveLight64From32(Core::System& system, Handle handle, uint32_t* args);
+Result ReplyAndReceiveLight64(Core::System& system, Handle handle, uint32_t* args);
+
+Result SendSyncRequestLight(Core::System& system, Handle session_handle, uint32_t* args);
+Result SendSyncRequestLight64From32(Core::System& system, Handle session_handle, uint32_t* args);
+Result SendSyncRequestLight64(Core::System& system, Handle session_handle, uint32_t* args);
+
+void CallSecureMonitor(Core::System& system, lp64::SecureMonitorArguments* args);
+void CallSecureMonitor64From32(Core::System& system, ilp32::SecureMonitorArguments* args);
+void CallSecureMonitor64(Core::System& system, lp64::SecureMonitorArguments* args);
+
+// Defined in svc_light_ipc.cpp.
+void SvcWrap_ReplyAndReceiveLight64From32(Core::System& system);
+void SvcWrap_ReplyAndReceiveLight64(Core::System& system);
+
+void SvcWrap_SendSyncRequestLight64From32(Core::System& system);
+void SvcWrap_SendSyncRequestLight64(Core::System& system);
+
+// Defined in svc_secure_monitor_call.cpp.
+void SvcWrap_CallSecureMonitor64From32(Core::System& system);
+void SvcWrap_CallSecureMonitor64(Core::System& system);
+
+// Perform a supervisor call by index.
+void Call(Core::System& system, u32 imm);
+
+} // namespace Kernel::Svc
+"""
+
+PROLOGUE_CPP = """
+#include <type_traits>
+
+#include "core/arm/arm_interface.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+static uint32_t GetReg32(Core::System& system, int n) {
+ return static_cast<uint32_t>(system.CurrentArmInterface().GetReg(n));
+}
+
+static void SetReg32(Core::System& system, int n, uint32_t result) {
+ system.CurrentArmInterface().SetReg(n, static_cast<uint64_t>(result));
+}
+
+static uint64_t GetReg64(Core::System& system, int n) {
+ return system.CurrentArmInterface().GetReg(n);
+}
+
+static void SetReg64(Core::System& system, int n, uint64_t result) {
+ system.CurrentArmInterface().SetReg(n, result);
+}
+
+// Like bit_cast, but handles the case when the source and dest
+// are differently-sized.
+template <typename To, typename From>
+ requires(std::is_trivial_v<To> && std::is_trivially_copyable_v<From>)
+static To Convert(const From& from) {
+ To to{};
+
+ if constexpr (sizeof(To) >= sizeof(From)) {
+ std::memcpy(std::addressof(to), std::addressof(from), sizeof(From));
+ } else {
+ std::memcpy(std::addressof(to), std::addressof(from), sizeof(To));
+ }
+
+ return to;
+}
+
+// clang-format off
+"""
+
+EPILOGUE_CPP = """
+// clang-format on
+
+void Call(Core::System& system, u32 imm) {
+ auto& kernel = system.Kernel();
+ kernel.EnterSVCProfile();
+
+ if (GetCurrentProcess(system.Kernel()).Is64BitProcess()) {
+ Call64(system, imm);
+ } else {
+ Call32(system, imm);
+ }
+
+ kernel.ExitSVCProfile();
+}
+
+} // namespace Kernel::Svc
+"""
+
+
+def emit_call(bitness, names, suffix):
+ bit_size = REG_SIZES[bitness]*8
+ indent = " "
+ lines = [
+ f"static void Call{bit_size}(Core::System& system, u32 imm) {{",
+ f"{indent}switch (static_cast<SvcId>(imm)) {{"
+ ]
+
+ for _, name in names:
+ lines.append(f"{indent}case SvcId::{name}:")
+ lines.append(f"{indent*2}return SvcWrap_{name}{suffix}(system);")
+
+ lines.append(f"{indent}default:")
+ lines.append(
+ f"{indent*2}LOG_CRITICAL(Kernel_SVC, \"Unknown SVC {{:x}}!\", imm);")
+ lines.append(f"{indent*2}break;")
+ lines.append(f"{indent}}}")
+ lines.append("}")
+
+ return "\n".join(lines)
+
+
+def build_fn_declaration(return_type, name, arguments):
+ arg_list = ["Core::System& system"]
+ for arg in arguments:
+ type_name = "uint64_t" if arg.is_address else arg.type_name
+ pointer = "*" if arg.is_output and not arg.is_outptr else ""
+ arg_list.append(f"{type_name}{pointer} {arg.var_name}")
+
+ return f"{return_type} {name}({', '.join(arg_list)});"
+
+
+def build_enum_declarations():
+ lines = ["enum class SvcId : u32 {"]
+ indent = " "
+
+ for imm, decl in SVCS:
+ _, name, _ = parse_declaration(decl, BIT_64)
+ lines.append(f"{indent}{name} = {hex(imm)},")
+
+ lines.append("};")
+ return "\n".join(lines)
+
+
+def main():
+ arch_fw_declarations = [[], []]
+ svc_fw_declarations = []
+ wrapper_fns = []
+ names = []
+
+ for imm, decl in SVCS:
+ return_type, name, arguments = parse_declaration(decl, BIT_64)
+
+ if imm not in SKIP_WRAPPERS:
+ svc_fw_declarations.append(
+ build_fn_declaration(return_type, name, arguments))
+
+ names.append([imm, name])
+
+ for bitness in range(2):
+ byte_size = REG_SIZES[bitness]
+ suffix = SUFFIX_NAMES[bitness]
+
+ for imm, decl in SVCS:
+ if imm in SKIP_WRAPPERS:
+ continue
+
+ parse_result = parse_declaration(decl, bitness)
+ return_type, name, arguments = parse_result
+
+ register_info = get_registers(parse_result, bitness)
+ wrapper_fns.append(
+ emit_wrapper(name, suffix, register_info, arguments, byte_size))
+ arch_fw_declarations[bitness].append(
+ build_fn_declaration(return_type, name + suffix, arguments))
+
+ call_32 = emit_call(BIT_32, names, SUFFIX_NAMES[BIT_32])
+ call_64 = emit_call(BIT_64, names, SUFFIX_NAMES[BIT_64])
+ enum_decls = build_enum_declarations()
+
+ with open("svc.h", "w") as f:
+ f.write(COPYRIGHT)
+ f.write(PROLOGUE_H)
+ f.write("\n".join(svc_fw_declarations))
+ f.write("\n\n")
+ f.write("\n".join(arch_fw_declarations[BIT_32]))
+ f.write("\n\n")
+ f.write("\n".join(arch_fw_declarations[BIT_64]))
+ f.write("\n\n")
+ f.write(enum_decls)
+ f.write(EPILOGUE_H)
+
+ with open("svc.cpp", "w") as f:
+ f.write(COPYRIGHT)
+ f.write(PROLOGUE_CPP)
+ f.write(emit_size_check())
+ f.write("\n\n")
+ f.write("\n\n".join(wrapper_fns))
+ f.write("\n\n")
+ f.write(call_32)
+ f.write("\n\n")
+ f.write(call_64)
+ f.write(EPILOGUE_CPP)
+
+ print(f"Done (emitted {len(names)} definitions)")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h
index b7ca53085..e1ad78607 100644
--- a/src/core/hle/kernel/svc_results.h
+++ b/src/core/hle/kernel/svc_results.h
@@ -11,6 +11,7 @@ namespace Kernel {
constexpr Result ResultOutOfSessions{ErrorModule::Kernel, 7};
constexpr Result ResultInvalidArgument{ErrorModule::Kernel, 14};
+constexpr Result ResultNotImplemented{ErrorModule::Kernel, 33};
constexpr Result ResultNoSynchronizationObject{ErrorModule::Kernel, 57};
constexpr Result ResultTerminationRequested{ErrorModule::Kernel, 59};
constexpr Result ResultInvalidSize{ErrorModule::Kernel, 101};
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 33eebcef6..7f380ca4f 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -3,6 +3,9 @@
#pragma once
+#include <bitset>
+
+#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
@@ -148,6 +151,7 @@ enum class InfoType : u32 {
FreeThreadCount = 24,
ThreadTickCount = 25,
IsSvcPermitted = 26,
+ IoRegionHint = 27,
MesosphereMeta = 65000,
MesosphereCurrentProcess = 65001,
@@ -165,6 +169,7 @@ enum class BreakReason : u32 {
NotificationOnlyFlag = 0x80000000,
};
+DECLARE_ENUM_FLAG_OPERATORS(BreakReason);
enum class DebugEvent : u32 {
CreateProcess = 0,
@@ -248,7 +253,7 @@ struct LastThreadContext {
};
struct PhysicalMemoryInfo {
- PAddr physical_address;
+ u64 physical_address;
u64 virtual_address;
u64 size;
};
@@ -354,7 +359,7 @@ struct LastThreadContext {
};
struct PhysicalMemoryInfo {
- PAddr physical_address;
+ u64 physical_address;
u32 virtual_address;
u32 size;
};
@@ -496,6 +501,19 @@ enum class MemoryMapping : u32 {
Memory = 2,
};
+enum class MapDeviceAddressSpaceFlag : u32 {
+ None = (0U << 0),
+ NotIoRegister = (1U << 0),
+};
+DECLARE_ENUM_FLAG_OPERATORS(MapDeviceAddressSpaceFlag);
+
+union MapDeviceAddressSpaceOption {
+ u32 raw;
+ BitField<0, 16, MemoryPermission> permission;
+ BitField<16, 1, MapDeviceAddressSpaceFlag> flags;
+ BitField<17, 15, u32> reserved;
+};
+
enum class KernelDebugType : u32 {
Thread = 0,
ThreadCallStack = 1,
@@ -580,6 +598,11 @@ enum class ProcessInfoType : u32 {
ProcessState = 0,
};
+enum class ProcessActivity : u32 {
+ Runnable,
+ Paused,
+};
+
struct CreateProcessParameter {
std::array<char, 12> name;
u32 version;
@@ -592,4 +615,12 @@ struct CreateProcessParameter {
};
static_assert(sizeof(CreateProcessParameter) == 0x30);
+constexpr size_t NumSupervisorCalls = 0xC0;
+using SvcAccessFlagSet = std::bitset<NumSupervisorCalls>;
+
+enum class InitialProcessIdRangeInfo : u64 {
+ Minimum = 0,
+ Maximum = 1,
+};
+
} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_version.h b/src/core/hle/kernel/svc_version.h
new file mode 100644
index 000000000..3eb95aa7b
--- /dev/null
+++ b/src/core/hle/kernel/svc_version.h
@@ -0,0 +1,58 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/bit_field.h"
+#include "common/common_types.h"
+#include "common/literals.h"
+
+namespace Kernel::Svc {
+
+constexpr inline u32 ConvertToSvcMajorVersion(u32 sdk) {
+ return sdk + 4;
+}
+constexpr inline u32 ConvertToSdkMajorVersion(u32 svc) {
+ return svc - 4;
+}
+
+constexpr inline u32 ConvertToSvcMinorVersion(u32 sdk) {
+ return sdk;
+}
+constexpr inline u32 ConvertToSdkMinorVersion(u32 svc) {
+ return svc;
+}
+
+union KernelVersion {
+ u32 value;
+ BitField<0, 4, u32> minor_version;
+ BitField<4, 13, u32> major_version;
+};
+
+constexpr inline u32 EncodeKernelVersion(u32 major, u32 minor) {
+ return decltype(KernelVersion::minor_version)::FormatValue(minor) |
+ decltype(KernelVersion::major_version)::FormatValue(major);
+}
+
+constexpr inline u32 GetKernelMajorVersion(u32 encoded) {
+ return decltype(KernelVersion::major_version)::ExtractValue(encoded);
+}
+
+constexpr inline u32 GetKernelMinorVersion(u32 encoded) {
+ return decltype(KernelVersion::minor_version)::ExtractValue(encoded);
+}
+
+// Nintendo doesn't support programs targeting SVC versions < 3.0.
+constexpr inline u32 RequiredKernelMajorVersion = 3;
+constexpr inline u32 RequiredKernelMinorVersion = 0;
+constexpr inline u32 RequiredKernelVersion =
+ EncodeKernelVersion(RequiredKernelMajorVersion, RequiredKernelMinorVersion);
+
+// This is the highest SVC version supported, to be updated on new kernel releases.
+// NOTE: Official kernel versions have SVC major = SDK major + 4, SVC minor = SDK minor.
+constexpr inline u32 SupportedKernelMajorVersion = ConvertToSvcMajorVersion(15);
+constexpr inline u32 SupportedKernelMinorVersion = ConvertToSvcMinorVersion(3);
+constexpr inline u32 SupportedKernelVersion =
+ EncodeKernelVersion(SupportedKernelMajorVersion, SupportedKernelMinorVersion);
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
deleted file mode 100644
index 1ea8c7fbc..000000000
--- a/src/core/hle/kernel/svc_wrap.h
+++ /dev/null
@@ -1,733 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include "common/common_types.h"
-#include "core/arm/arm_interface.h"
-#include "core/core.h"
-#include "core/hle/kernel/svc_types.h"
-#include "core/hle/result.h"
-#include "core/memory.h"
-
-namespace Kernel {
-
-static inline u64 Param(const Core::System& system, int n) {
- return system.CurrentArmInterface().GetReg(n);
-}
-
-static inline u32 Param32(const Core::System& system, int n) {
- return static_cast<u32>(system.CurrentArmInterface().GetReg(n));
-}
-
-/**
- * HLE a function return from the current ARM userland process
- * @param system System context
- * @param result Result to return
- */
-static inline void FuncReturn(Core::System& system, u64 result) {
- system.CurrentArmInterface().SetReg(0, result);
-}
-
-static inline void FuncReturn32(Core::System& system, u32 result) {
- system.CurrentArmInterface().SetReg(0, (u64)result);
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////////
-// Function wrappers that return type Result
-
-template <Result func(Core::System&, u64)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, Param(system, 0)).raw);
-}
-
-template <Result func(Core::System&, u64, u64)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, Param(system, 0), Param(system, 1)).raw);
-}
-
-template <Result func(Core::System&, u32)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw);
-}
-
-template <Result func(Core::System&, u32, u32)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(
- system,
- func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw);
-}
-
-// Used by SetThreadActivity
-template <Result func(Core::System&, Handle, Svc::ThreadActivity)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
- static_cast<Svc::ThreadActivity>(Param(system, 1)))
- .raw);
-}
-
-template <Result func(Core::System&, u32, u64, u64, u64)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
- Param(system, 2), Param(system, 3))
- .raw);
-}
-
-// Used by MapProcessMemory and UnmapProcessMemory
-template <Result func(Core::System&, u64, u32, u64, u64)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
- Param(system, 2), Param(system, 3))
- .raw);
-}
-
-// Used by ControlCodeMemory
-template <Result func(Core::System&, Handle, u32, VAddr, size_t, Svc::MemoryPermission)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
- static_cast<u32>(Param(system, 1)), Param(system, 2), Param(system, 3),
- static_cast<Svc::MemoryPermission>(Param(system, 4)))
- .raw);
-}
-
-template <Result func(Core::System&, u32*)>
-void SvcWrap64(Core::System& system) {
- u32 param = 0;
- const u32 retval = func(system, &param).raw;
- system.CurrentArmInterface().SetReg(1, param);
- FuncReturn(system, retval);
-}
-
-template <Result func(Core::System&, u32*, u32)>
-void SvcWrap64(Core::System& system) {
- u32 param_1 = 0;
- const u32 retval = func(system, &param_1, static_cast<u32>(Param(system, 1))).raw;
- system.CurrentArmInterface().SetReg(1, param_1);
- FuncReturn(system, retval);
-}
-
-template <Result func(Core::System&, u32*, u32*)>
-void SvcWrap64(Core::System& system) {
- u32 param_1 = 0;
- u32 param_2 = 0;
- const u32 retval = func(system, &param_1, &param_2).raw;
-
- auto& arm_interface = system.CurrentArmInterface();
- arm_interface.SetReg(1, param_1);
- arm_interface.SetReg(2, param_2);
-
- FuncReturn(system, retval);
-}
-
-template <Result func(Core::System&, u32*, u64)>
-void SvcWrap64(Core::System& system) {
- u32 param_1 = 0;
- const u32 retval = func(system, &param_1, Param(system, 1)).raw;
- system.CurrentArmInterface().SetReg(1, param_1);
- FuncReturn(system, retval);
-}
-
-template <Result func(Core::System&, u32*, u64, u32)>
-void SvcWrap64(Core::System& system) {
- u32 param_1 = 0;
- const u32 retval =
- func(system, &param_1, Param(system, 1), static_cast<u32>(Param(system, 2))).raw;
-
- system.CurrentArmInterface().SetReg(1, param_1);
- FuncReturn(system, retval);
-}
-
-template <Result func(Core::System&, u64*, u32)>
-void SvcWrap64(Core::System& system) {
- u64 param_1 = 0;
- const u32 retval = func(system, &param_1, static_cast<u32>(Param(system, 1))).raw;
-
- system.CurrentArmInterface().SetReg(1, param_1);
- FuncReturn(system, retval);
-}
-
-template <Result func(Core::System&, u64, u32)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1))).raw);
-}
-
-template <Result func(Core::System&, u64*, u64)>
-void SvcWrap64(Core::System& system) {
- u64 param_1 = 0;
- const u32 retval = func(system, &param_1, Param(system, 1)).raw;
-
- system.CurrentArmInterface().SetReg(1, param_1);
- FuncReturn(system, retval);
-}
-
-template <Result func(Core::System&, u64*, u32, u32)>
-void SvcWrap64(Core::System& system) {
- u64 param_1 = 0;
- const u32 retval = func(system, &param_1, static_cast<u32>(Param(system, 1)),
- static_cast<u32>(Param(system, 2)))
- .raw;
-
- system.CurrentArmInterface().SetReg(1, param_1);
- FuncReturn(system, retval);
-}
-
-// Used by GetResourceLimitLimitValue.
-template <Result func(Core::System&, u64*, Handle, LimitableResource)>
-void SvcWrap64(Core::System& system) {
- u64 param_1 = 0;
- const u32 retval = func(system, &param_1, static_cast<Handle>(Param(system, 1)),
- static_cast<LimitableResource>(Param(system, 2)))
- .raw;
-
- system.CurrentArmInterface().SetReg(1, param_1);
- FuncReturn(system, retval);
-}
-
-template <Result func(Core::System&, u32, u64)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1)).raw);
-}
-
-// Used by SetResourceLimitLimitValue
-template <Result func(Core::System&, Handle, LimitableResource, u64)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
- static_cast<LimitableResource>(Param(system, 1)), Param(system, 2))
- .raw);
-}
-
-// Used by SetThreadCoreMask
-template <Result func(Core::System&, Handle, s32, u64)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
- static_cast<s32>(Param(system, 1)), Param(system, 2))
- .raw);
-}
-
-// Used by GetThreadCoreMask
-template <Result func(Core::System&, Handle, s32*, u64*)>
-void SvcWrap64(Core::System& system) {
- s32 param_1 = 0;
- u64 param_2 = 0;
- const Result retval = func(system, static_cast<u32>(Param(system, 2)), &param_1, &param_2);
-
- system.CurrentArmInterface().SetReg(1, param_1);
- system.CurrentArmInterface().SetReg(2, param_2);
- FuncReturn(system, retval.raw);
-}
-
-template <Result func(Core::System&, u64, u64, u32, u32)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
- static_cast<u32>(Param(system, 2)), static_cast<u32>(Param(system, 3)))
- .raw);
-}
-
-template <Result func(Core::System&, u64, u64, u32, u64)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
- static_cast<u32>(Param(system, 2)), Param(system, 3))
- .raw);
-}
-
-template <Result func(Core::System&, u32, u64, u32)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
- static_cast<u32>(Param(system, 2)))
- .raw);
-}
-
-template <Result func(Core::System&, u64, u64, u64)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, Param(system, 0), Param(system, 1), Param(system, 2)).raw);
-}
-
-template <Result func(Core::System&, u64, u64, u32)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(
- system,
- func(system, Param(system, 0), Param(system, 1), static_cast<u32>(Param(system, 2))).raw);
-}
-
-// Used by SetMemoryPermission
-template <Result func(Core::System&, u64, u64, Svc::MemoryPermission)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
- static_cast<Svc::MemoryPermission>(Param(system, 2)))
- .raw);
-}
-
-// Used by MapSharedMemory
-template <Result func(Core::System&, Handle, u64, u64, Svc::MemoryPermission)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)), Param(system, 1),
- Param(system, 2), static_cast<Svc::MemoryPermission>(Param(system, 3)))
- .raw);
-}
-
-template <Result func(Core::System&, u32, u64, u64)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(
- system,
- func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2)).raw);
-}
-
-// Used by WaitSynchronization
-template <Result func(Core::System&, s32*, u64, s32, s64)>
-void SvcWrap64(Core::System& system) {
- s32 param_1 = 0;
- const u32 retval = func(system, &param_1, Param(system, 1), static_cast<s32>(Param(system, 2)),
- static_cast<s64>(Param(system, 3)))
- .raw;
-
- system.CurrentArmInterface().SetReg(1, param_1);
- FuncReturn(system, retval);
-}
-
-template <Result func(Core::System&, u64, u64, u32, s64)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
- static_cast<u32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
- .raw);
-}
-
-// Used by GetInfo
-template <Result func(Core::System&, u64*, u64, Handle, u64)>
-void SvcWrap64(Core::System& system) {
- u64 param_1 = 0;
- const u32 retval = func(system, &param_1, Param(system, 1),
- static_cast<Handle>(Param(system, 2)), Param(system, 3))
- .raw;
-
- system.CurrentArmInterface().SetReg(1, param_1);
- FuncReturn(system, retval);
-}
-
-template <Result func(Core::System&, u32*, u64, u64, u64, u32, s32)>
-void SvcWrap64(Core::System& system) {
- u32 param_1 = 0;
- const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2), Param(system, 3),
- static_cast<u32>(Param(system, 4)), static_cast<s32>(Param(system, 5)))
- .raw;
-
- system.CurrentArmInterface().SetReg(1, param_1);
- FuncReturn(system, retval);
-}
-
-// Used by CreateTransferMemory
-template <Result func(Core::System&, Handle*, u64, u64, Svc::MemoryPermission)>
-void SvcWrap64(Core::System& system) {
- u32 param_1 = 0;
- const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2),
- static_cast<Svc::MemoryPermission>(Param(system, 3)))
- .raw;
-
- system.CurrentArmInterface().SetReg(1, param_1);
- FuncReturn(system, retval);
-}
-
-// Used by CreateCodeMemory
-template <Result func(Core::System&, Handle*, VAddr, size_t)>
-void SvcWrap64(Core::System& system) {
- u32 param_1 = 0;
- const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2)).raw;
-
- system.CurrentArmInterface().SetReg(1, param_1);
- FuncReturn(system, retval);
-}
-
-template <Result func(Core::System&, Handle*, u64, u32, u32)>
-void SvcWrap64(Core::System& system) {
- u32 param_1 = 0;
- const u32 retval = func(system, &param_1, Param(system, 1), static_cast<u32>(Param(system, 2)),
- static_cast<u32>(Param(system, 3)))
- .raw;
-
- system.CurrentArmInterface().SetReg(1, param_1);
- FuncReturn(system, retval);
-}
-
-// Used by CreateSession
-template <Result func(Core::System&, Handle*, Handle*, u32, u64)>
-void SvcWrap64(Core::System& system) {
- Handle param_1 = 0;
- Handle param_2 = 0;
- const u32 retval = func(system, &param_1, &param_2, static_cast<u32>(Param(system, 2)),
- static_cast<u32>(Param(system, 3)))
- .raw;
-
- system.CurrentArmInterface().SetReg(1, param_1);
- system.CurrentArmInterface().SetReg(2, param_2);
- FuncReturn(system, retval);
-}
-
-// Used by ReplyAndReceive
-template <Result func(Core::System&, s32*, Handle*, s32, Handle, s64)>
-void SvcWrap64(Core::System& system) {
- s32 param_1 = 0;
- s32 num_handles = static_cast<s32>(Param(system, 2));
-
- std::vector<Handle> handles(num_handles);
- system.Memory().ReadBlock(Param(system, 1), handles.data(), num_handles * sizeof(Handle));
-
- const u32 retval = func(system, &param_1, handles.data(), num_handles,
- static_cast<s32>(Param(system, 3)), static_cast<s64>(Param(system, 4)))
- .raw;
-
- system.CurrentArmInterface().SetReg(1, param_1);
- FuncReturn(system, retval);
-}
-
-// Used by WaitForAddress
-template <Result func(Core::System&, u64, Svc::ArbitrationType, s32, s64)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system,
- func(system, Param(system, 0), static_cast<Svc::ArbitrationType>(Param(system, 1)),
- static_cast<s32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
- .raw);
-}
-
-// Used by SignalToAddress
-template <Result func(Core::System&, u64, Svc::SignalType, s32, s32)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system,
- func(system, Param(system, 0), static_cast<Svc::SignalType>(Param(system, 1)),
- static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
- .raw);
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////////
-// Function wrappers that return type u32
-
-template <u32 func(Core::System&)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system));
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////////
-// Function wrappers that return type u64
-
-template <u64 func(Core::System&)>
-void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system));
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////////
-/// Function wrappers that return type void
-
-template <void func(Core::System&)>
-void SvcWrap64(Core::System& system) {
- func(system);
-}
-
-template <void func(Core::System&, u32)>
-void SvcWrap64(Core::System& system) {
- func(system, static_cast<u32>(Param(system, 0)));
-}
-
-template <void func(Core::System&, u32, u64, u64, u64)>
-void SvcWrap64(Core::System& system) {
- func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2),
- Param(system, 3));
-}
-
-template <void func(Core::System&, s64)>
-void SvcWrap64(Core::System& system) {
- func(system, static_cast<s64>(Param(system, 0)));
-}
-
-template <void func(Core::System&, u64, s32)>
-void SvcWrap64(Core::System& system) {
- func(system, Param(system, 0), static_cast<s32>(Param(system, 1)));
-}
-
-template <void func(Core::System&, u64, u64)>
-void SvcWrap64(Core::System& system) {
- func(system, Param(system, 0), Param(system, 1));
-}
-
-template <void func(Core::System&, u64, u64, u64)>
-void SvcWrap64(Core::System& system) {
- func(system, Param(system, 0), Param(system, 1), Param(system, 2));
-}
-
-template <void func(Core::System&, u32, u64, u64)>
-void SvcWrap64(Core::System& system) {
- func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2));
-}
-
-// Used by QueryMemory32, ArbitrateLock32
-template <Result func(Core::System&, u32, u32, u32)>
-void SvcWrap32(Core::System& system) {
- FuncReturn32(system,
- func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2)).raw);
-}
-
-// Used by Break32
-template <void func(Core::System&, u32, u32, u32)>
-void SvcWrap32(Core::System& system) {
- func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2));
-}
-
-// Used by ExitProcess32, ExitThread32
-template <void func(Core::System&)>
-void SvcWrap32(Core::System& system) {
- func(system);
-}
-
-// Used by GetCurrentProcessorNumber32
-template <u32 func(Core::System&)>
-void SvcWrap32(Core::System& system) {
- FuncReturn32(system, func(system));
-}
-
-// Used by SleepThread32
-template <void func(Core::System&, u32, u32)>
-void SvcWrap32(Core::System& system) {
- func(system, Param32(system, 0), Param32(system, 1));
-}
-
-// Used by CreateThread32
-template <Result func(Core::System&, Handle*, u32, u32, u32, u32, s32)>
-void SvcWrap32(Core::System& system) {
- Handle param_1 = 0;
-
- const u32 retval = func(system, &param_1, Param32(system, 0), Param32(system, 1),
- Param32(system, 2), Param32(system, 3), Param32(system, 4))
- .raw;
-
- system.CurrentArmInterface().SetReg(1, param_1);
- FuncReturn(system, retval);
-}
-
-// Used by GetInfo32
-template <Result func(Core::System&, u32*, u32*, u32, u32, u32, u32)>
-void SvcWrap32(Core::System& system) {
- u32 param_1 = 0;
- u32 param_2 = 0;
-
- const u32 retval = func(system, &param_1, &param_2, Param32(system, 0), Param32(system, 1),
- Param32(system, 2), Param32(system, 3))
- .raw;
-
- system.CurrentArmInterface().SetReg(1, param_1);
- system.CurrentArmInterface().SetReg(2, param_2);
- FuncReturn(system, retval);
-}
-
-// Used by GetThreadPriority32, ConnectToNamedPort32
-template <Result func(Core::System&, u32*, u32)>
-void SvcWrap32(Core::System& system) {
- u32 param_1 = 0;
- const u32 retval = func(system, &param_1, Param32(system, 1)).raw;
- system.CurrentArmInterface().SetReg(1, param_1);
- FuncReturn(system, retval);
-}
-
-// Used by GetThreadId32
-template <Result func(Core::System&, u32*, u32*, u32)>
-void SvcWrap32(Core::System& system) {
- u32 param_1 = 0;
- u32 param_2 = 0;
-
- const u32 retval = func(system, &param_1, &param_2, Param32(system, 1)).raw;
- system.CurrentArmInterface().SetReg(1, param_1);
- system.CurrentArmInterface().SetReg(2, param_2);
- FuncReturn(system, retval);
-}
-
-// Used by GetSystemTick32
-template <void func(Core::System&, u32*, u32*)>
-void SvcWrap32(Core::System& system) {
- u32 param_1 = 0;
- u32 param_2 = 0;
-
- func(system, &param_1, &param_2);
- system.CurrentArmInterface().SetReg(0, param_1);
- system.CurrentArmInterface().SetReg(1, param_2);
-}
-
-// Used by CreateEvent32
-template <Result func(Core::System&, Handle*, Handle*)>
-void SvcWrap32(Core::System& system) {
- Handle param_1 = 0;
- Handle param_2 = 0;
-
- const u32 retval = func(system, &param_1, &param_2).raw;
- system.CurrentArmInterface().SetReg(1, param_1);
- system.CurrentArmInterface().SetReg(2, param_2);
- FuncReturn(system, retval);
-}
-
-// Used by GetThreadId32
-template <Result func(Core::System&, Handle, u32*, u32*, u32*)>
-void SvcWrap32(Core::System& system) {
- u32 param_1 = 0;
- u32 param_2 = 0;
- u32 param_3 = 0;
-
- const u32 retval = func(system, Param32(system, 2), &param_1, &param_2, &param_3).raw;
- system.CurrentArmInterface().SetReg(1, param_1);
- system.CurrentArmInterface().SetReg(2, param_2);
- system.CurrentArmInterface().SetReg(3, param_3);
- FuncReturn(system, retval);
-}
-
-// Used by GetThreadCoreMask32
-template <Result func(Core::System&, Handle, s32*, u32*, u32*)>
-void SvcWrap32(Core::System& system) {
- s32 param_1 = 0;
- u32 param_2 = 0;
- u32 param_3 = 0;
-
- const u32 retval = func(system, Param32(system, 2), &param_1, &param_2, &param_3).raw;
- system.CurrentArmInterface().SetReg(1, param_1);
- system.CurrentArmInterface().SetReg(2, param_2);
- system.CurrentArmInterface().SetReg(3, param_3);
- FuncReturn(system, retval);
-}
-
-// Used by SignalProcessWideKey32
-template <void func(Core::System&, u32, s32)>
-void SvcWrap32(Core::System& system) {
- func(system, static_cast<u32>(Param(system, 0)), static_cast<s32>(Param(system, 1)));
-}
-
-// Used by SetThreadActivity32
-template <Result func(Core::System&, Handle, Svc::ThreadActivity)>
-void SvcWrap32(Core::System& system) {
- const u32 retval = func(system, static_cast<Handle>(Param(system, 0)),
- static_cast<Svc::ThreadActivity>(Param(system, 1)))
- .raw;
- FuncReturn(system, retval);
-}
-
-// Used by SetThreadPriority32
-template <Result func(Core::System&, Handle, u32)>
-void SvcWrap32(Core::System& system) {
- const u32 retval =
- func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw;
- FuncReturn(system, retval);
-}
-
-// Used by SetMemoryAttribute32
-template <Result func(Core::System&, Handle, u32, u32, u32)>
-void SvcWrap32(Core::System& system) {
- const u32 retval =
- func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
- static_cast<u32>(Param(system, 2)), static_cast<u32>(Param(system, 3)))
- .raw;
- FuncReturn(system, retval);
-}
-
-// Used by MapSharedMemory32
-template <Result func(Core::System&, Handle, u32, u32, Svc::MemoryPermission)>
-void SvcWrap32(Core::System& system) {
- const u32 retval = func(system, static_cast<Handle>(Param(system, 0)),
- static_cast<u32>(Param(system, 1)), static_cast<u32>(Param(system, 2)),
- static_cast<Svc::MemoryPermission>(Param(system, 3)))
- .raw;
- FuncReturn(system, retval);
-}
-
-// Used by SetThreadCoreMask32
-template <Result func(Core::System&, Handle, s32, u32, u32)>
-void SvcWrap32(Core::System& system) {
- const u32 retval =
- func(system, static_cast<Handle>(Param(system, 0)), static_cast<s32>(Param(system, 1)),
- static_cast<u32>(Param(system, 2)), static_cast<u32>(Param(system, 3)))
- .raw;
- FuncReturn(system, retval);
-}
-
-// Used by WaitProcessWideKeyAtomic32
-template <Result func(Core::System&, u32, u32, Handle, u32, u32)>
-void SvcWrap32(Core::System& system) {
- const u32 retval =
- func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
- static_cast<Handle>(Param(system, 2)), static_cast<u32>(Param(system, 3)),
- static_cast<u32>(Param(system, 4)))
- .raw;
- FuncReturn(system, retval);
-}
-
-// Used by WaitForAddress32
-template <Result func(Core::System&, u32, Svc::ArbitrationType, s32, u32, u32)>
-void SvcWrap32(Core::System& system) {
- const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
- static_cast<Svc::ArbitrationType>(Param(system, 1)),
- static_cast<s32>(Param(system, 2)), static_cast<u32>(Param(system, 3)),
- static_cast<u32>(Param(system, 4)))
- .raw;
- FuncReturn(system, retval);
-}
-
-// Used by SignalToAddress32
-template <Result func(Core::System&, u32, Svc::SignalType, s32, s32)>
-void SvcWrap32(Core::System& system) {
- const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
- static_cast<Svc::SignalType>(Param(system, 1)),
- static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
- .raw;
- FuncReturn(system, retval);
-}
-
-// Used by SendSyncRequest32, ArbitrateUnlock32
-template <Result func(Core::System&, u32)>
-void SvcWrap32(Core::System& system) {
- FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw);
-}
-
-// Used by CreateTransferMemory32
-template <Result func(Core::System&, Handle*, u32, u32, Svc::MemoryPermission)>
-void SvcWrap32(Core::System& system) {
- Handle handle = 0;
- const u32 retval = func(system, &handle, Param32(system, 1), Param32(system, 2),
- static_cast<Svc::MemoryPermission>(Param32(system, 3)))
- .raw;
- system.CurrentArmInterface().SetReg(1, handle);
- FuncReturn(system, retval);
-}
-
-// Used by WaitSynchronization32
-template <Result func(Core::System&, u32, u32, s32, u32, s32*)>
-void SvcWrap32(Core::System& system) {
- s32 param_1 = 0;
- const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2),
- Param32(system, 3), &param_1)
- .raw;
- system.CurrentArmInterface().SetReg(1, param_1);
- FuncReturn(system, retval);
-}
-
-// Used by CreateCodeMemory32
-template <Result func(Core::System&, Handle*, u32, u32)>
-void SvcWrap32(Core::System& system) {
- Handle handle = 0;
-
- const u32 retval = func(system, &handle, Param32(system, 1), Param32(system, 2)).raw;
-
- system.CurrentArmInterface().SetReg(1, handle);
- FuncReturn(system, retval);
-}
-
-// Used by ControlCodeMemory32
-template <Result func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
-void SvcWrap32(Core::System& system) {
- const u32 retval =
- func(system, Param32(system, 0), Param32(system, 1), Param(system, 2), Param(system, 4),
- static_cast<Svc::MemoryPermission>(Param32(system, 6)))
- .raw;
-
- FuncReturn(system, retval);
-}
-
-// Used by Invalidate/Store/FlushProcessDataCache32
-template <Result func(Core::System&, Handle, u64, u64)>
-void SvcWrap32(Core::System& system) {
- const u64 address = (Param(system, 3) << 32) | Param(system, 2);
- const u64 size = (Param(system, 4) << 32) | Param(system, 1);
- FuncReturn32(system, func(system, Param32(system, 0), address, size).raw);
-}
-
-} // namespace Kernel