aboutsummaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp7
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp44
-rw-r--r--src/core/hle/kernel/hle_ipc.h21
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp17
-rw-r--r--src/core/hle/kernel/k_address_arbiter.h19
-rw-r--r--src/core/hle/kernel/k_client_port.cpp9
-rw-r--r--src/core/hle/kernel/k_client_port.h9
-rw-r--r--src/core/hle/kernel/k_client_session.cpp4
-rw-r--r--src/core/hle/kernel/k_client_session.h6
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp12
-rw-r--r--src/core/hle/kernel/k_code_memory.h20
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp18
-rw-r--r--src/core/hle/kernel/k_condition_variable.h6
-rw-r--r--src/core/hle/kernel/k_handle_table.cpp6
-rw-r--r--src/core/hle/kernel/k_handle_table.h8
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.cpp12
-rw-r--r--src/core/hle/kernel/k_light_condition_variable.cpp3
-rw-r--r--src/core/hle/kernel/k_light_lock.cpp3
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp16
-rw-r--r--src/core/hle/kernel/k_memory_manager.h16
-rw-r--r--src/core/hle/kernel/k_page_group.h (renamed from src/core/hle/kernel/k_page_linked_list.h)10
-rw-r--r--src/core/hle/kernel/k_page_table.cpp178
-rw-r--r--src/core/hle/kernel/k_page_table.h173
-rw-r--r--src/core/hle/kernel/k_port.cpp2
-rw-r--r--src/core/hle/kernel/k_port.h2
-rw-r--r--src/core/hle/kernel/k_process.cpp113
-rw-r--r--src/core/hle/kernel/k_process.h66
-rw-r--r--src/core/hle/kernel/k_readable_event.cpp6
-rw-r--r--src/core/hle/kernel/k_readable_event.h6
-rw-r--r--src/core/hle/kernel/k_resource_limit.cpp2
-rw-r--r--src/core/hle/kernel/k_resource_limit.h4
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp739
-rw-r--r--src/core/hle/kernel/k_scheduler.h227
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h2
-rw-r--r--src/core/hle/kernel/k_server_session.cpp12
-rw-r--r--src/core/hle/kernel/k_server_session.h12
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp21
-rw-r--r--src/core/hle/kernel/k_shared_memory.h23
-rw-r--r--src/core/hle/kernel/k_synchronization_object.cpp13
-rw-r--r--src/core/hle/kernel/k_synchronization_object.h8
-rw-r--r--src/core/hle/kernel/k_thread.cpp135
-rw-r--r--src/core/hle/kernel/k_thread.h89
-rw-r--r--src/core/hle/kernel/k_thread_local_page.cpp4
-rw-r--r--src/core/hle/kernel/k_thread_local_page.h4
-rw-r--r--src/core/hle/kernel/k_thread_queue.cpp9
-rw-r--r--src/core/hle/kernel/k_thread_queue.h9
-rw-r--r--src/core/hle/kernel/k_transfer_memory.cpp4
-rw-r--r--src/core/hle/kernel/k_transfer_memory.h4
-rw-r--r--src/core/hle/kernel/k_writable_event.cpp4
-rw-r--r--src/core/hle/kernel/k_writable_event.h4
-rw-r--r--src/core/hle/kernel/kernel.cpp132
-rw-r--r--src/core/hle/kernel/kernel.h14
-rw-r--r--src/core/hle/kernel/physical_core.cpp30
-rw-r--r--src/core/hle/kernel/physical_core.h17
-rw-r--r--src/core/hle/kernel/process_capability.cpp43
-rw-r--r--src/core/hle/kernel/process_capability.h38
-rw-r--r--src/core/hle/kernel/svc.cpp338
-rw-r--r--src/core/hle/kernel/svc_results.h58
-rw-r--r--src/core/hle/kernel/svc_wrap.h124
-rw-r--r--src/core/hle/kernel/time_manager.cpp20
60 files changed, 1540 insertions, 1415 deletions
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
index 164436b26..65576b8c4 100644
--- a/src/core/hle/kernel/global_scheduler_context.cpp
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -41,12 +41,7 @@ void GlobalSchedulerContext::PreemptThreads() {
ASSERT(IsLocked());
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
const u32 priority = preemption_priorities[core_id];
- kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
-
- // Signal an interrupt occurred. For core 3, this is a certainty, as preemption will result
- // in the rotator thread being scheduled. For cores 0-2, this is to simulate or system
- // interrupts that may have occurred.
- kernel.PhysicalCore(core_id).Interrupt();
+ KScheduler::RotateScheduledQueue(kernel, core_id, priority);
}
}
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 4650d25b0..5b3feec66 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -188,8 +188,8 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
rp.Skip(1, false); // The command is actually an u64, but we don't use the high part.
}
-ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
- u32_le* src_cmdbuf) {
+Result HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
+ u32_le* src_cmdbuf) {
ParseCommandBuffer(handle_table, src_cmdbuf, true);
if (command_header->IsCloseCommand()) {
@@ -202,7 +202,7 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTab
return ResultSuccess;
}
-ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_thread) {
+Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_thread) {
auto current_offset = handles_offset;
auto& owner_process = *requesting_thread.GetOwnerProcess();
auto& handle_table = owner_process.GetHandleTable();
@@ -287,18 +287,52 @@ std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
BufferDescriptorB().size() > buffer_index &&
BufferDescriptorB()[buffer_index].Size() >= size,
{ return 0; }, "BufferDescriptorB is invalid, index={}, size={}", buffer_index, size);
- memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size);
+ WriteBufferB(buffer, size, buffer_index);
} else {
ASSERT_OR_EXECUTE_MSG(
BufferDescriptorC().size() > buffer_index &&
BufferDescriptorC()[buffer_index].Size() >= size,
{ return 0; }, "BufferDescriptorC is invalid, index={}, size={}", buffer_index, size);
- memory.WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size);
+ WriteBufferC(buffer, size, buffer_index);
}
return size;
}
+std::size_t HLERequestContext::WriteBufferB(const void* buffer, std::size_t size,
+ std::size_t buffer_index) const {
+ if (buffer_index >= BufferDescriptorB().size() || size == 0) {
+ return 0;
+ }
+
+ const auto buffer_size{BufferDescriptorB()[buffer_index].Size()};
+ if (size > buffer_size) {
+ LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
+ buffer_size);
+ size = buffer_size; // TODO(bunnei): This needs to be HW tested
+ }
+
+ memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size);
+ return size;
+}
+
+std::size_t HLERequestContext::WriteBufferC(const void* buffer, std::size_t size,
+ std::size_t buffer_index) const {
+ if (buffer_index >= BufferDescriptorC().size() || size == 0) {
+ return 0;
+ }
+
+ const auto buffer_size{BufferDescriptorC()[buffer_index].Size()};
+ if (size > buffer_size) {
+ LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
+ buffer_size);
+ size = buffer_size; // TODO(bunnei): This needs to be HW tested
+ }
+
+ memory.WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size);
+ return size;
+}
+
std::size_t HLERequestContext::GetReadBufferSize(std::size_t buffer_index) const {
const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
BufferDescriptorA()[buffer_index].Size()};
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index 0ddc8df9e..99265ce90 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -18,7 +18,7 @@
#include "core/hle/ipc.h"
#include "core/hle/kernel/svc_common.h"
-union ResultCode;
+union Result;
namespace Core::Memory {
class Memory;
@@ -71,10 +71,10 @@ public:
* it should be used to differentiate which client (As in ClientSession) we're answering to.
* TODO(Subv): Use a wrapper structure to hold all the information relevant to
* this request (ServerSession, Originator thread, Translated command buffer, etc).
- * @returns ResultCode the result code of the translate operation.
+ * @returns Result the result code of the translate operation.
*/
- virtual ResultCode HandleSyncRequest(Kernel::KServerSession& session,
- Kernel::HLERequestContext& context) = 0;
+ virtual Result HandleSyncRequest(Kernel::KServerSession& session,
+ Kernel::HLERequestContext& context) = 0;
/**
* Signals that a client has just connected to this HLE handler and keeps the
@@ -212,11 +212,10 @@ public:
}
/// Populates this context with data from the requesting process/thread.
- ResultCode PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
- u32_le* src_cmdbuf);
+ Result PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf);
/// Writes data from this context back to the requesting process/thread.
- ResultCode WriteToOutgoingCommandBuffer(KThread& requesting_thread);
+ Result WriteToOutgoingCommandBuffer(KThread& requesting_thread);
u32_le GetHipcCommand() const {
return command;
@@ -278,6 +277,14 @@ public:
std::size_t WriteBuffer(const void* buffer, std::size_t size,
std::size_t buffer_index = 0) const;
+ /// Helper function to write buffer B
+ std::size_t WriteBufferB(const void* buffer, std::size_t size,
+ std::size_t buffer_index = 0) const;
+
+ /// Helper function to write buffer C
+ std::size_t WriteBufferC(const void* buffer, std::size_t size,
+ std::size_t buffer_index = 0) const;
+
/* Helper function to write a buffer using the appropriate buffer descriptor
*
* @tparam T an arbitrary container that satisfies the
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
index 04cf86d52..f85b11557 100644
--- a/src/core/hle/kernel/k_address_arbiter.cpp
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -90,8 +90,7 @@ public:
explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t)
: KThreadQueue(kernel_), m_tree(t) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// If the thread is waiting on an address arbiter, remove it from the tree.
if (waiting_thread->IsWaitingForAddressArbiter()) {
m_tree->erase(m_tree->iterator_to(*waiting_thread));
@@ -108,7 +107,7 @@ private:
} // namespace
-ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
+Result KAddressArbiter::Signal(VAddr addr, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
@@ -131,7 +130,7 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
return ResultSuccess;
}
-ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
+Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
@@ -164,7 +163,7 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
return ResultSuccess;
}
-ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
+Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
@@ -232,9 +231,9 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
return ResultSuccess;
}
-ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
+Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
// Prepare to wait.
- KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
{
@@ -285,9 +284,9 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
return cur_thread->GetWaitResult();
}
-ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
+Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
// Prepare to wait.
- KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
{
diff --git a/src/core/hle/kernel/k_address_arbiter.h b/src/core/hle/kernel/k_address_arbiter.h
index 5fa19d386..e4085ae22 100644
--- a/src/core/hle/kernel/k_address_arbiter.h
+++ b/src/core/hle/kernel/k_address_arbiter.h
@@ -8,7 +8,7 @@
#include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/svc_types.h"
-union ResultCode;
+union Result;
namespace Core {
class System;
@@ -25,8 +25,7 @@ public:
explicit KAddressArbiter(Core::System& system_);
~KAddressArbiter();
- [[nodiscard]] ResultCode SignalToAddress(VAddr addr, Svc::SignalType type, s32 value,
- s32 count) {
+ [[nodiscard]] Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) {
switch (type) {
case Svc::SignalType::Signal:
return Signal(addr, count);
@@ -39,8 +38,8 @@ public:
return ResultUnknown;
}
- [[nodiscard]] ResultCode WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
- s64 timeout) {
+ [[nodiscard]] Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
+ s64 timeout) {
switch (type) {
case Svc::ArbitrationType::WaitIfLessThan:
return WaitIfLessThan(addr, value, false, timeout);
@@ -54,11 +53,11 @@ public:
}
private:
- [[nodiscard]] ResultCode Signal(VAddr addr, s32 count);
- [[nodiscard]] ResultCode SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
- [[nodiscard]] ResultCode SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
- [[nodiscard]] ResultCode WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
- [[nodiscard]] ResultCode WaitIfEqual(VAddr addr, s32 value, s64 timeout);
+ [[nodiscard]] Result Signal(VAddr addr, s32 count);
+ [[nodiscard]] Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
+ [[nodiscard]] Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
+ [[nodiscard]] Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
+ [[nodiscard]] Result WaitIfEqual(VAddr addr, s32 value, s64 timeout);
ThreadTree thread_tree;
diff --git a/src/core/hle/kernel/k_client_port.cpp b/src/core/hle/kernel/k_client_port.cpp
index ef168fe87..3cb22ff4d 100644
--- a/src/core/hle/kernel/k_client_port.cpp
+++ b/src/core/hle/kernel/k_client_port.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2021 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/scope_exit.h"
#include "core/hle/kernel/hle_ipc.h"
@@ -59,8 +58,8 @@ bool KClientPort::IsSignaled() const {
return num_sessions < max_sessions;
}
-ResultCode KClientPort::CreateSession(KClientSession** out,
- std::shared_ptr<SessionRequestManager> session_manager) {
+Result KClientPort::CreateSession(KClientSession** out,
+ std::shared_ptr<SessionRequestManager> session_manager) {
// Reserve a new session from the resource limit.
KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(),
LimitableResource::Sessions);
diff --git a/src/core/hle/kernel/k_client_port.h b/src/core/hle/kernel/k_client_port.h
index 54bb05e20..e17eff28f 100644
--- a/src/core/hle/kernel/k_client_port.h
+++ b/src/core/hle/kernel/k_client_port.h
@@ -1,6 +1,5 @@
-// Copyright 2016 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2016 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -53,8 +52,8 @@ public:
void Destroy() override;
bool IsSignaled() const override;
- ResultCode CreateSession(KClientSession** out,
- std::shared_ptr<SessionRequestManager> session_manager = nullptr);
+ Result CreateSession(KClientSession** out,
+ std::shared_ptr<SessionRequestManager> session_manager = nullptr);
private:
std::atomic<s32> num_sessions{};
diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp
index 731af079c..b2a887b14 100644
--- a/src/core/hle/kernel/k_client_session.cpp
+++ b/src/core/hle/kernel/k_client_session.cpp
@@ -21,8 +21,8 @@ void KClientSession::Destroy() {
void KClientSession::OnServerClosed() {}
-ResultCode KClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
- Core::Timing::CoreTiming& core_timing) {
+Result KClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
+ Core::Timing::CoreTiming& core_timing) {
// Signal the server session that new data is available
return parent->GetServerSession().HandleSyncRequest(thread, memory, core_timing);
}
diff --git a/src/core/hle/kernel/k_client_session.h b/src/core/hle/kernel/k_client_session.h
index 7a7ec8450..0c750d756 100644
--- a/src/core/hle/kernel/k_client_session.h
+++ b/src/core/hle/kernel/k_client_session.h
@@ -9,7 +9,7 @@
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
-union ResultCode;
+union Result;
namespace Core::Memory {
class Memory;
@@ -46,8 +46,8 @@ public:
return parent;
}
- ResultCode SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
- Core::Timing::CoreTiming& core_timing);
+ Result SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
+ Core::Timing::CoreTiming& core_timing);
void OnServerClosed();
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index 4ae40ec8e..da57ceb21 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -7,7 +7,7 @@
#include "core/hle/kernel/k_code_memory.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_memory_block.h"
-#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/slab_helpers.h"
@@ -19,7 +19,7 @@ namespace Kernel {
KCodeMemory::KCodeMemory(KernelCore& kernel_)
: KAutoObjectWithSlabHeapAndContainer{kernel_}, m_lock(kernel_) {}
-ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
+Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
// Set members.
m_owner = kernel.CurrentProcess();
@@ -62,7 +62,7 @@ void KCodeMemory::Finalize() {
m_owner->Close();
}
-ResultCode KCodeMemory::Map(VAddr address, size_t size) {
+Result KCodeMemory::Map(VAddr address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -82,7 +82,7 @@ ResultCode KCodeMemory::Map(VAddr address, size_t size) {
return ResultSuccess;
}
-ResultCode KCodeMemory::Unmap(VAddr address, size_t size) {
+Result KCodeMemory::Unmap(VAddr address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -99,7 +99,7 @@ ResultCode KCodeMemory::Unmap(VAddr address, size_t size) {
return ResultSuccess;
}
-ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
+Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
// Validate the size.
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -133,7 +133,7 @@ ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermis
return ResultSuccess;
}
-ResultCode KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
+Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h
index ab06b6f29..2e7e1436a 100644
--- a/src/core/hle/kernel/k_code_memory.h
+++ b/src/core/hle/kernel/k_code_memory.h
@@ -7,7 +7,7 @@
#include "core/device_memory.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_light_lock.h"
-#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/svc_types.h"
@@ -29,20 +29,20 @@ class KCodeMemory final
public:
explicit KCodeMemory(KernelCore& kernel_);
- ResultCode Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
- void Finalize();
+ Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
+ void Finalize() override;
- ResultCode Map(VAddr address, size_t size);
- ResultCode Unmap(VAddr address, size_t size);
- ResultCode MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm);
- ResultCode UnmapFromOwner(VAddr address, size_t size);
+ Result Map(VAddr address, size_t size);
+ Result Unmap(VAddr address, size_t size);
+ Result MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm);
+ Result UnmapFromOwner(VAddr address, size_t size);
- bool IsInitialized() const {
+ bool IsInitialized() const override {
return m_is_initialized;
}
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
- KProcess* GetOwner() const {
+ KProcess* GetOwner() const override {
return m_owner;
}
VAddr GetSourceAddress() const {
@@ -53,7 +53,7 @@ public:
}
private:
- KPageLinkedList m_page_group{};
+ KPageGroup m_page_group{};
KProcess* m_owner{};
VAddr m_address{};
KLightLock m_lock;
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 43bcd253d..124149697 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -61,8 +61,7 @@ public:
explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_)
: KThreadQueue(kernel_) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread as a waiter from its owner.
waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
@@ -80,8 +79,7 @@ public:
KernelCore& kernel_, KConditionVariable::ThreadTree* t)
: KThreadQueue(kernel_), m_tree(t) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread as a waiter from its owner.
if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
owner->RemoveWaiter(waiting_thread);
@@ -105,8 +103,8 @@ KConditionVariable::KConditionVariable(Core::System& system_)
KConditionVariable::~KConditionVariable() = default;
-ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
- KThread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
+Result KConditionVariable::SignalToAddress(VAddr addr) {
+ KThread* owner_thread = GetCurrentThreadPointer(kernel);
// Signal the address.
{
@@ -126,7 +124,7 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
}
// Write the value to userspace.
- ResultCode result{ResultSuccess};
+ Result result{ResultSuccess};
if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
result = ResultSuccess;
} else {
@@ -146,8 +144,8 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
}
}
-ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
- KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
// Wait for the address.
@@ -261,7 +259,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
}
}
-ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
+Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
// Prepare to wait.
KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
index 7bc749d98..fad4ed011 100644
--- a/src/core/hle/kernel/k_condition_variable.h
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -25,12 +25,12 @@ public:
~KConditionVariable();
// Arbitration
- [[nodiscard]] ResultCode SignalToAddress(VAddr addr);
- [[nodiscard]] ResultCode WaitForAddress(Handle handle, VAddr addr, u32 value);
+ [[nodiscard]] Result SignalToAddress(VAddr addr);
+ [[nodiscard]] Result WaitForAddress(Handle handle, VAddr addr, u32 value);
// Condition variable
void Signal(u64 cv_key, s32 count);
- [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
+ [[nodiscard]] Result Wait(VAddr addr, u64 key, u32 value, s64 timeout);
private:
void SignalImpl(KThread* thread);
diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp
index c453927ad..e830ca46e 100644
--- a/src/core/hle/kernel/k_handle_table.cpp
+++ b/src/core/hle/kernel/k_handle_table.cpp
@@ -8,7 +8,7 @@ namespace Kernel {
KHandleTable::KHandleTable(KernelCore& kernel_) : kernel{kernel_} {}
KHandleTable::~KHandleTable() = default;
-ResultCode KHandleTable::Finalize() {
+Result KHandleTable::Finalize() {
// Get the table and clear our record of it.
u16 saved_table_size = 0;
{
@@ -62,7 +62,7 @@ bool KHandleTable::Remove(Handle handle) {
return true;
}
-ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
+Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
@@ -85,7 +85,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
return ResultSuccess;
}
-ResultCode KHandleTable::Reserve(Handle* out_handle) {
+Result KHandleTable::Reserve(Handle* out_handle) {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h
index befdb2ec9..0864a737c 100644
--- a/src/core/hle/kernel/k_handle_table.h
+++ b/src/core/hle/kernel/k_handle_table.h
@@ -30,7 +30,7 @@ public:
explicit KHandleTable(KernelCore& kernel_);
~KHandleTable();
- ResultCode Initialize(s32 size) {
+ Result Initialize(s32 size) {
R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory);
// Initialize all fields.
@@ -60,7 +60,7 @@ public:
return m_max_count;
}
- ResultCode Finalize();
+ Result Finalize();
bool Remove(Handle handle);
template <typename T = KAutoObject>
@@ -100,10 +100,10 @@ public:
return this->template GetObjectWithoutPseudoHandle<T>(handle);
}
- ResultCode Reserve(Handle* out_handle);
+ Result Reserve(Handle* out_handle);
void Unreserve(Handle handle);
- ResultCode Add(Handle* out_handle, KAutoObject* obj);
+ Result Add(Handle* out_handle, KAutoObject* obj);
void Register(Handle handle, KAutoObject* obj);
template <typename T>
diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp
index cf9ed80d0..1b577a5b3 100644
--- a/src/core/hle/kernel/k_interrupt_manager.cpp
+++ b/src/core/hle/kernel/k_interrupt_manager.cpp
@@ -6,6 +6,7 @@
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/physical_core.h"
namespace Kernel::KInterruptManager {
@@ -15,8 +16,10 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
return;
}
- auto& scheduler = kernel.Scheduler(core_id);
- auto& current_thread = *scheduler.GetCurrentThread();
+ // Acknowledge the interrupt.
+ kernel.PhysicalCore(core_id).ClearInterrupt();
+
+ auto& current_thread = GetCurrentThread(kernel);
// If the user disable count is set, we may need to pin the current thread.
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
@@ -26,8 +29,11 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
process->PinCurrentThread(core_id);
// Set the interrupt flag for the thread.
- scheduler.GetCurrentThread()->SetInterruptFlag();
+ GetCurrentThread(kernel).SetInterruptFlag();
}
+
+ // Request interrupt scheduling.
+ kernel.CurrentScheduler()->RequestScheduleOnInterrupt();
}
} // namespace Kernel::KInterruptManager
diff --git a/src/core/hle/kernel/k_light_condition_variable.cpp b/src/core/hle/kernel/k_light_condition_variable.cpp
index a40f35f45..cade99cfd 100644
--- a/src/core/hle/kernel/k_light_condition_variable.cpp
+++ b/src/core/hle/kernel/k_light_condition_variable.cpp
@@ -17,8 +17,7 @@ public:
bool term)
: KThreadQueue(kernel_), m_wait_list(wl), m_allow_terminating_thread(term) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Only process waits if we're allowed to.
if (ResultTerminationRequested == wait_result && m_allow_terminating_thread) {
return;
diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp
index 0225734b4..43185320d 100644
--- a/src/core/hle/kernel/k_light_lock.cpp
+++ b/src/core/hle/kernel/k_light_lock.cpp
@@ -15,8 +15,7 @@ class ThreadQueueImplForKLightLock final : public KThreadQueue {
public:
explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread as a waiter from its owner.
if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
owner->RemoveWaiter(waiting_thread);
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index 58e540f31..5b0a9963a 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -11,7 +11,7 @@
#include "core/device_memory.h"
#include "core/hle/kernel/initial_process.h"
#include "core/hle/kernel/k_memory_manager.h"
-#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
@@ -208,8 +208,8 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
return allocated_block;
}
-ResultCode KMemoryManager::AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool,
- Direction dir, bool random) {
+Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool,
+ Direction dir, bool random) {
// Choose a heap based on our page size request.
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
R_UNLESS(0 <= heap_index, ResultOutOfMemory);
@@ -257,7 +257,7 @@ ResultCode KMemoryManager::AllocatePageGroupImpl(KPageLinkedList* out, size_t nu
return ResultSuccess;
}
-ResultCode KMemoryManager::AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option) {
+Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) {
ASSERT(out != nullptr);
ASSERT(out->GetNumPages() == 0);
@@ -293,8 +293,8 @@ ResultCode KMemoryManager::AllocateAndOpen(KPageLinkedList* out, size_t num_page
return ResultSuccess;
}
-ResultCode KMemoryManager::AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages,
- u32 option, u64 process_id, u8 fill_pattern) {
+Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option,
+ u64 process_id, u8 fill_pattern) {
ASSERT(out != nullptr);
ASSERT(out->GetNumPages() == 0);
@@ -370,12 +370,12 @@ void KMemoryManager::Close(PAddr address, size_t num_pages) {
}
}
-void KMemoryManager::Close(const KPageLinkedList& pg) {
+void KMemoryManager::Close(const KPageGroup& pg) {
for (const auto& node : pg.Nodes()) {
Close(node.GetAddress(), node.GetNumPages());
}
}
-void KMemoryManager::Open(const KPageLinkedList& pg) {
+void KMemoryManager::Open(const KPageGroup& pg) {
for (const auto& node : pg.Nodes()) {
Open(node.GetAddress(), node.GetNumPages());
}
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h
index c7923cb82..dcb9b6348 100644
--- a/src/core/hle/kernel/k_memory_manager.h
+++ b/src/core/hle/kernel/k_memory_manager.h
@@ -19,7 +19,7 @@ class System;
namespace Kernel {
-class KPageLinkedList;
+class KPageGroup;
class KMemoryManager final {
public:
@@ -65,17 +65,17 @@ public:
}
PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
- ResultCode AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option);
- ResultCode AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages, u32 option,
- u64 process_id, u8 fill_pattern);
+ Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
+ Result AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
+ u8 fill_pattern);
static constexpr size_t MaxManagerCount = 10;
void Close(PAddr address, size_t num_pages);
- void Close(const KPageLinkedList& pg);
+ void Close(const KPageGroup& pg);
void Open(PAddr address, size_t num_pages);
- void Open(const KPageLinkedList& pg);
+ void Open(const KPageGroup& pg);
public:
static size_t CalculateManagementOverheadSize(size_t region_size) {
@@ -262,8 +262,8 @@ private:
}
}
- ResultCode AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool,
- Direction dir, bool random);
+ Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir,
+ bool random);
private:
Core::System& system;
diff --git a/src/core/hle/kernel/k_page_linked_list.h b/src/core/hle/kernel/k_page_group.h
index 1f79c8330..968753992 100644
--- a/src/core/hle/kernel/k_page_linked_list.h
+++ b/src/core/hle/kernel/k_page_group.h
@@ -12,7 +12,7 @@
namespace Kernel {
-class KPageLinkedList final {
+class KPageGroup final {
public:
class Node final {
public:
@@ -36,8 +36,8 @@ public:
};
public:
- KPageLinkedList() = default;
- KPageLinkedList(u64 address, u64 num_pages) {
+ KPageGroup() = default;
+ KPageGroup(u64 address, u64 num_pages) {
ASSERT(AddBlock(address, num_pages).IsSuccess());
}
@@ -57,7 +57,7 @@ public:
return num_pages;
}
- bool IsEqual(KPageLinkedList& other) const {
+ bool IsEqual(KPageGroup& other) const {
auto this_node = nodes.begin();
auto other_node = other.nodes.begin();
while (this_node != nodes.end() && other_node != other.nodes.end()) {
@@ -72,7 +72,7 @@ public:
return this_node == nodes.end() && other_node == other.nodes.end();
}
- ResultCode AddBlock(u64 address, u64 num_pages) {
+ Result AddBlock(u64 address, u64 num_pages) {
if (!num_pages) {
return ResultSuccess;
}
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 504e22cb9..d975de844 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -9,7 +9,7 @@
#include "core/hle/kernel/k_address_space_info.h"
#include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_memory_block_manager.h"
-#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
@@ -47,9 +47,9 @@ KPageTable::KPageTable(Core::System& system_)
KPageTable::~KPageTable() = default;
-ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type,
- bool enable_aslr, VAddr code_addr,
- std::size_t code_size, KMemoryManager::Pool pool) {
+Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
+ VAddr code_addr, std::size_t code_size,
+ KMemoryManager::Pool pool) {
const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type);
@@ -65,7 +65,6 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
- ASSERT(start <= code_addr);
ASSERT(code_addr < code_addr + code_size);
ASSERT(code_addr + code_size - 1 <= end - 1);
@@ -258,8 +257,8 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
return InitializeMemoryLayout(start, end);
}
-ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state,
- KMemoryPermission perm) {
+Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
const u64 size{num_pages * PageSize};
// Validate the mapping request.
@@ -272,7 +271,7 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryAttribute::None));
- KPageLinkedList pg;
+ KPageGroup pg;
R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
&pg, num_pages,
KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option)));
@@ -284,7 +283,7 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
return ResultSuccess;
}
-ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
+Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
// Validate the mapping request.
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
ResultInvalidMemoryRegion);
@@ -314,7 +313,7 @@ ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::
const std::size_t num_pages = size / PageSize;
// Create page groups for the memory being mapped.
- KPageLinkedList pg;
+ KPageGroup pg;
AddRegionToPages(src_address, num_pages, pg);
// Reprotect the source as kernel-read/not mapped.
@@ -345,8 +344,8 @@ ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::
return ResultSuccess;
}
-ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
- ICacheInvalidationStrategy icache_invalidation_strategy) {
+Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
+ ICacheInvalidationStrategy icache_invalidation_strategy) {
// Validate the mapping request.
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
ResultInvalidMemoryRegion);
@@ -490,7 +489,7 @@ VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
return address;
}
-ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages) {
+Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
ASSERT(this->IsLockedByCurrentThread());
const size_t size = num_pages * PageSize;
@@ -542,7 +541,7 @@ ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num
return ResultSuccess;
}
-bool KPageTable::IsValidPageGroup(const KPageLinkedList& pg_ll, VAddr addr, size_t num_pages) {
+bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) {
ASSERT(this->IsLockedByCurrentThread());
const size_t size = num_pages * PageSize;
@@ -631,8 +630,8 @@ bool KPageTable::IsValidPageGroup(const KPageLinkedList& pg_ll, VAddr addr, size
return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
}
-ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
- KPageTable& src_page_table, VAddr src_addr) {
+Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
+ VAddr src_addr) {
KScopedLightLock lk(general_lock);
const std::size_t num_pages{size / PageSize};
@@ -661,7 +660,7 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
return ResultSuccess;
}
-ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
+Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
// Lock the physical memory lock.
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
@@ -722,7 +721,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
// Allocate pages for the new memory.
- KPageLinkedList pg;
+ KPageGroup pg;
R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
&pg, (size - mapped_size) / PageSize,
KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
@@ -904,7 +903,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
}
}
-ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
+Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
// Lock the physical memory lock.
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
@@ -973,7 +972,7 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
}
// Make a page group for the unmap region.
- KPageLinkedList pg;
+ KPageGroup pg;
{
auto& impl = this->PageTableImpl();
@@ -1135,7 +1134,7 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
return ResultSuccess;
}
-ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
+Result KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
KScopedLightLock lk(general_lock);
KMemoryState src_state{};
@@ -1148,7 +1147,7 @@ ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t siz
return ResultInvalidCurrentMemory;
}
- KPageLinkedList page_linked_list;
+ KPageGroup page_linked_list;
const std::size_t num_pages{size / PageSize};
AddRegionToPages(src_addr, num_pages, page_linked_list);
@@ -1174,7 +1173,7 @@ ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t siz
return ResultSuccess;
}
-ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
+Result KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
KScopedLightLock lk(general_lock);
KMemoryState src_state{};
@@ -1189,8 +1188,8 @@ ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t s
KMemoryPermission::None, KMemoryAttribute::Mask,
KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
- KPageLinkedList src_pages;
- KPageLinkedList dst_pages;
+ KPageGroup src_pages;
+ KPageGroup dst_pages;
const std::size_t num_pages{size / PageSize};
AddRegionToPages(src_addr, num_pages, src_pages);
@@ -1216,8 +1215,8 @@ ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t s
return ResultSuccess;
}
-ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
- KMemoryPermission perm) {
+Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
+ KMemoryPermission perm) {
ASSERT(this->IsLockedByCurrentThread());
VAddr cur_addr{addr};
@@ -1240,8 +1239,8 @@ ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_l
return ResultSuccess;
}
-ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list,
- KMemoryState state, KMemoryPermission perm) {
+Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state,
+ KMemoryPermission perm) {
// Check that the map is in range.
const std::size_t num_pages{page_linked_list.GetNumPages()};
const std::size_t size{num_pages * PageSize};
@@ -1264,10 +1263,10 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list
return ResultSuccess;
}
-ResultCode KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
- PAddr phys_addr, bool is_pa_valid, VAddr region_start,
- std::size_t region_num_pages, KMemoryState state,
- KMemoryPermission perm) {
+Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
+ PAddr phys_addr, bool is_pa_valid, VAddr region_start,
+ std::size_t region_num_pages, KMemoryState state,
+ KMemoryPermission perm) {
ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
// Ensure this is a valid map request.
@@ -1304,7 +1303,7 @@ ResultCode KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::siz
return ResultSuccess;
}
-ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) {
+Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
ASSERT(this->IsLockedByCurrentThread());
VAddr cur_addr{addr};
@@ -1322,8 +1321,7 @@ ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked
return ResultSuccess;
}
-ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
- KMemoryState state) {
+Result KPageTable::UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state) {
// Check that the unmap is in range.
const std::size_t num_pages{page_linked_list.GetNumPages()};
const std::size_t size{num_pages * PageSize};
@@ -1346,7 +1344,7 @@ ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
return ResultSuccess;
}
-ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) {
+Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) {
// Check that the unmap is in range.
const std::size_t size = num_pages * PageSize;
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
@@ -1370,10 +1368,10 @@ ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryS
return ResultSuccess;
}
-ResultCode KPageTable::MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr) {
+Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) {
// Ensure that the page group isn't null.
ASSERT(out != nullptr);
@@ -1395,8 +1393,8 @@ ResultCode KPageTable::MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address,
return ResultSuccess;
}
-ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
- Svc::MemoryPermission svc_perm) {
+Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
+ Svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
// Lock the table.
@@ -1468,7 +1466,7 @@ KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
return QueryInfoImpl(addr);
}
-ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) {
+Result KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) {
KScopedLightLock lk(general_lock);
KMemoryState state{};
@@ -1486,7 +1484,7 @@ ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemo
return ResultSuccess;
}
-ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
+Result KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
KScopedLightLock lk(general_lock);
KMemoryState state{};
@@ -1501,8 +1499,8 @@ ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
return ResultSuccess;
}
-ResultCode KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
- Svc::MemoryPermission svc_perm) {
+Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
+ Svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
// Lock the table.
@@ -1529,7 +1527,7 @@ ResultCode KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
return ResultSuccess;
}
-ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) {
+Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) {
const size_t num_pages = size / PageSize;
ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
KMemoryAttribute::SetMask);
@@ -1564,7 +1562,7 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask
return ResultSuccess;
}
-ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
+Result KPageTable::SetMaxHeapSize(std::size_t size) {
// Lock the table.
KScopedLightLock lk(general_lock);
@@ -1576,7 +1574,7 @@ ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
return ResultSuccess;
}
-ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
+Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
// Lock the physical memory mutex.
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
@@ -1643,7 +1641,7 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
// Allocate pages for the heap extension.
- KPageLinkedList pg;
+ KPageGroup pg;
R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
&pg, allocation_size / PageSize,
KMemoryManager::EncodeOption(memory_pool, allocation_option)));
@@ -1718,7 +1716,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
if (is_map_only) {
R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
} else {
- KPageLinkedList page_group;
+ KPageGroup page_group;
R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
&page_group, needed_num_pages,
KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
@@ -1730,11 +1728,11 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
return addr;
}
-ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
+Result KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
KScopedLightLock lk(general_lock);
KMemoryPermission perm{};
- if (const ResultCode result{CheckMemoryState(
+ if (const Result result{CheckMemoryState(
nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
@@ -1753,11 +1751,11 @@ ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
return ResultSuccess;
}
-ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
+Result KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
KScopedLightLock lk(general_lock);
KMemoryPermission perm{};
- if (const ResultCode result{CheckMemoryState(
+ if (const Result result{CheckMemoryState(
nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
@@ -1776,7 +1774,7 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
return ResultSuccess;
}
-ResultCode KPageTable::LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::size_t size) {
+Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size) {
return this->LockMemoryAndOpen(
out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
@@ -1786,15 +1784,14 @@ ResultCode KPageTable::LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::
KMemoryAttribute::Locked);
}
-ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size,
- const KPageLinkedList& pg) {
+Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg) {
return this->UnlockMemory(
addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg);
}
-ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
+Result KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
block_manager = std::make_unique<KMemoryBlockManager>(start, end);
return ResultSuccess;
@@ -1819,7 +1816,7 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
}
void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
- KPageLinkedList& page_linked_list) {
+ KPageGroup& page_linked_list) {
VAddr addr{start};
while (addr < start + (num_pages * PageSize)) {
const PAddr paddr{GetPhysicalAddr(addr)};
@@ -1838,8 +1835,8 @@ VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_page
IsKernel() ? 1 : 4);
}
-ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group,
- OperationType operation) {
+Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
+ OperationType operation) {
ASSERT(this->IsLockedByCurrentThread());
ASSERT(Common::IsAligned(addr, PageSize));
@@ -1863,8 +1860,8 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLin
return ResultSuccess;
}
-ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
- OperationType operation, PAddr map_addr) {
+Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
+ OperationType operation, PAddr map_addr) {
ASSERT(this->IsLockedByCurrentThread());
ASSERT(num_pages > 0);
@@ -2006,10 +2003,10 @@ bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) co
}
}
-ResultCode KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
+Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
// Validate the states match expectation.
R_UNLESS((info.state & state_mask) == state, ResultInvalidCurrentMemory);
R_UNLESS((info.perm & perm_mask) == perm, ResultInvalidCurrentMemory);
@@ -2018,12 +2015,11 @@ ResultCode KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState st
return ResultSuccess;
}
-ResultCode KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
- std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm,
- KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
+Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
+ std::size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
ASSERT(this->IsLockedByCurrentThread());
// Get information about the first block.
@@ -2061,12 +2057,12 @@ ResultCode KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed
return ResultSuccess;
}
-ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
- VAddr addr, std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
+Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
+ VAddr addr, std::size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
ASSERT(this->IsLockedByCurrentThread());
// Get information about the first block.
@@ -2123,11 +2119,11 @@ ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermissi
return ResultSuccess;
}
-ResultCode KPageTable::LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr,
- size_t size, KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
+Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
// Validate basic preconditions.
ASSERT((lock_attr & attr) == KMemoryAttribute::None);
ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
@@ -2181,11 +2177,11 @@ ResultCode KPageTable::LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_pad
return ResultSuccess;
}
-ResultCode KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryPermission new_perm,
- KMemoryAttribute lock_attr, const KPageLinkedList* pg) {
+Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryPermission new_perm,
+ KMemoryAttribute lock_attr, const KPageGroup* pg) {
// Validate basic preconditions.
ASSERT((attr_mask & lock_attr) == lock_attr);
ASSERT((attr & lock_attr) == lock_attr);
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 6312eb682..25774f232 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -33,51 +33,49 @@ public:
explicit KPageTable(Core::System& system_);
~KPageTable();
- ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
- VAddr code_addr, std::size_t code_size,
- KMemoryManager::Pool pool);
- ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
- KMemoryPermission perm);
- ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
- ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
- ICacheInvalidationStrategy icache_invalidation_strategy);
- ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
- VAddr src_addr);
- ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
- ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size);
- ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
- ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
- ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
- KMemoryPermission perm);
- ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
- PAddr phys_addr, KMemoryState state, KMemoryPermission perm) {
+ Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
+ VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool);
+ Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
+ KMemoryPermission perm);
+ Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
+ Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
+ ICacheInvalidationStrategy icache_invalidation_strategy);
+ Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
+ VAddr src_addr);
+ Result MapPhysicalMemory(VAddr addr, std::size_t size);
+ Result UnmapPhysicalMemory(VAddr addr, std::size_t size);
+ Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
+ Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
+ Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
+ KMemoryPermission perm);
+ Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
+ KMemoryState state, KMemoryPermission perm) {
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize,
state, perm);
}
- ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state);
- ResultCode UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
- ResultCode SetProcessMemoryPermission(VAddr addr, std::size_t size,
- Svc::MemoryPermission svc_perm);
+ Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
+ Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
+ Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm);
KMemoryInfo QueryInfo(VAddr addr);
- ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
- ResultCode ResetTransferMemory(VAddr addr, std::size_t size);
- ResultCode SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm);
- ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr);
- ResultCode SetMaxHeapSize(std::size_t size);
- ResultCode SetHeapSize(VAddr* out, std::size_t size);
+ Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
+ Result ResetTransferMemory(VAddr addr, std::size_t size);
+ Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm);
+ Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr);
+ Result SetMaxHeapSize(std::size_t size);
+ Result SetHeapSize(VAddr* out, std::size_t size);
ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
bool is_map_only, VAddr region_start,
std::size_t region_num_pages, KMemoryState state,
KMemoryPermission perm, PAddr map_addr = 0);
- ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
- ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
- ResultCode LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::size_t size);
- ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageLinkedList& pg);
- ResultCode MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr);
+ Result LockForDeviceAddressSpace(VAddr addr, std::size_t size);
+ Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
+ Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size);
+ Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg);
+ Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr);
Common::PageTable& PageTableImpl() {
return page_table_impl;
@@ -102,83 +100,78 @@ private:
KMemoryAttribute::IpcLocked |
KMemoryAttribute::DeviceShared;
- ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
- ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
- KMemoryPermission perm);
- ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
- PAddr phys_addr, bool is_pa_valid, VAddr region_start,
- std::size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
- ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list);
+ Result InitializeMemoryLayout(VAddr start, VAddr end);
+ Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
+ Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
+ bool is_pa_valid, VAddr region_start, std::size_t region_num_pages,
+ KMemoryState state, KMemoryPermission perm);
+ Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
bool IsRegionMapped(VAddr address, u64 size);
bool IsRegionContiguous(VAddr addr, u64 size) const;
- void AddRegionToPages(VAddr start, std::size_t num_pages, KPageLinkedList& page_linked_list);
+ void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list);
KMemoryInfo QueryInfoImpl(VAddr addr);
VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
std::size_t align);
- ResultCode Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group,
- OperationType operation);
- ResultCode Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
- OperationType operation, PAddr map_addr = 0);
+ Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
+ OperationType operation);
+ Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
+ OperationType operation, PAddr map_addr = 0);
VAddr GetRegionAddress(KMemoryState state) const;
std::size_t GetRegionSize(KMemoryState state) const;
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
std::size_t alignment, std::size_t offset, std::size_t guard_pages);
- ResultCode CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
- std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const;
- ResultCode CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
+ Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
+ Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
perm, attr_mask, attr);
}
- ResultCode CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const;
- ResultCode CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
- VAddr addr, std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
- ResultCode CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
+ Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
+ Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr,
+ std::size_t size, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
+ Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
}
- ResultCode CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
+ Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
attr_mask, attr, ignore_attr);
}
- ResultCode LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryPermission new_perm, KMemoryAttribute lock_attr);
- ResultCode UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryPermission new_perm, KMemoryAttribute lock_attr,
- const KPageLinkedList* pg);
+ Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr);
+ Result UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr,
+ const KPageGroup* pg);
- ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages);
- bool IsValidPageGroup(const KPageLinkedList& pg, VAddr addr, size_t num_pages);
+ Result MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages);
+ bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
bool IsLockedByCurrentThread() const {
return general_lock.IsLockedByCurrentThread();
diff --git a/src/core/hle/kernel/k_port.cpp b/src/core/hle/kernel/k_port.cpp
index 51c2cd1ef..7a5a9dc2a 100644
--- a/src/core/hle/kernel/k_port.cpp
+++ b/src/core/hle/kernel/k_port.cpp
@@ -50,7 +50,7 @@ bool KPort::IsServerClosed() const {
return state == State::ServerClosed;
}
-ResultCode KPort::EnqueueSession(KServerSession* session) {
+Result KPort::EnqueueSession(KServerSession* session) {
KScopedSchedulerLock sl{kernel};
R_UNLESS(state == State::Normal, ResultPortClosed);
diff --git a/src/core/hle/kernel/k_port.h b/src/core/hle/kernel/k_port.h
index 1bfecf8c3..0cfc16dab 100644
--- a/src/core/hle/kernel/k_port.h
+++ b/src/core/hle/kernel/k_port.h
@@ -34,7 +34,7 @@ public:
bool IsServerClosed() const;
- ResultCode EnqueueSession(KServerSession* session);
+ Result EnqueueSession(KServerSession* session);
KClientPort& GetClientPort() {
return client;
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index cd863e715..d3e99665f 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -1,6 +1,5 @@
-// Copyright 2015 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2015 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <bitset>
@@ -57,23 +56,18 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
thread->GetContext64().cpu_registers[0] = 0;
thread->GetContext32().cpu_registers[1] = thread_handle;
thread->GetContext64().cpu_registers[1] = thread_handle;
- thread->DisableDispatch();
- auto& kernel = system.Kernel();
- // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
- {
- KScopedSchedulerLock lock{kernel};
- thread->SetState(ThreadState::Runnable);
-
- if (system.DebuggerEnabled()) {
- thread->RequestSuspend(SuspendType::Debug);
- }
+ if (system.DebuggerEnabled()) {
+ thread->RequestSuspend(SuspendType::Debug);
}
+
+ // Run our thread.
+ void(thread->Run());
}
} // Anonymous namespace
-ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
- ProcessType type, KResourceLimit* res_limit) {
+Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
+ ProcessType type, KResourceLimit* res_limit) {
auto& kernel = system.Kernel();
process->name = std::move(process_name);
@@ -166,7 +160,7 @@ bool KProcess::ReleaseUserException(KThread* thread) {
std::addressof(num_waiters),
reinterpret_cast<uintptr_t>(std::addressof(exception_thread)));
next != nullptr) {
- next->SetState(ThreadState::Runnable);
+ next->EndWait(ResultSuccess);
}
KScheduler::SetSchedulerUpdateNeeded(kernel);
@@ -181,7 +175,8 @@ void KProcess::PinCurrentThread(s32 core_id) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread.
- KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
+ KThread* cur_thread =
+ kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
// If the thread isn't terminated, pin it.
if (!cur_thread->IsTerminationRequested()) {
@@ -198,7 +193,8 @@ void KProcess::UnpinCurrentThread(s32 core_id) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread.
- KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
+ KThread* cur_thread =
+ kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
// Unpin it.
cur_thread->Unpin();
@@ -222,8 +218,8 @@ void KProcess::UnpinThread(KThread* thread) {
KScheduler::SetSchedulerUpdateNeeded(kernel);
}
-ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
- [[maybe_unused]] size_t size) {
+Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
+ [[maybe_unused]] size_t size) {
// Lock ourselves, to prevent concurrent access.
KScopedLightLock lk(state_lock);
@@ -287,7 +283,7 @@ void KProcess::UnregisterThread(KThread* thread) {
thread_list.remove(thread);
}
-ResultCode KProcess::Reset() {
+Result KProcess::Reset() {
// Lock the process and the scheduler.
KScopedLightLock lk(state_lock);
KScopedSchedulerLock sl{kernel};
@@ -301,7 +297,7 @@ ResultCode KProcess::Reset() {
return ResultSuccess;
}
-ResultCode KProcess::SetActivity(ProcessActivity activity) {
+Result KProcess::SetActivity(ProcessActivity activity) {
// Lock ourselves and the scheduler.
KScopedLightLock lk{state_lock};
KScopedLightLock list_lk{list_lock};
@@ -345,8 +341,7 @@ ResultCode KProcess::SetActivity(ProcessActivity activity) {
return ResultSuccess;
}
-ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
- std::size_t code_size) {
+Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
program_id = metadata.GetTitleID();
ideal_core = metadata.GetMainThreadCore();
is_64bit_process = metadata.Is64BitProgram();
@@ -361,24 +356,24 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
return ResultLimitReached;
}
// Initialize proces address space
- if (const ResultCode result{
- page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 0x8000000,
- code_size, KMemoryManager::Pool::Application)};
+ if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false,
+ 0x8000000, code_size,
+ KMemoryManager::Pool::Application)};
result.IsError()) {
return result;
}
// Map process code region
- if (const ResultCode result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
- code_size / PageSize, KMemoryState::Code,
- KMemoryPermission::None)};
+ if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
+ code_size / PageSize, KMemoryState::Code,
+ KMemoryPermission::None)};
result.IsError()) {
return result;
}
// Initialize process capabilities
const auto& caps{metadata.GetKernelCapabilities()};
- if (const ResultCode result{
+ if (const Result result{
capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)};
result.IsError()) {
return result;
@@ -425,11 +420,11 @@ void KProcess::PrepareForTermination() {
ChangeStatus(ProcessStatus::Exiting);
const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
- for (auto& thread : in_thread_list) {
+ for (auto* thread : in_thread_list) {
if (thread->GetOwnerProcess() != this)
continue;
- if (thread == kernel.CurrentScheduler()->GetCurrentThread())
+ if (thread == GetCurrentThreadPointer(kernel))
continue;
// TODO(Subv): When are the other running/ready threads terminated?
@@ -485,7 +480,7 @@ void KProcess::Finalize() {
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
}
-ResultCode KProcess::CreateThreadLocalRegion(VAddr* out) {
+Result KProcess::CreateThreadLocalRegion(VAddr* out) {
KThreadLocalPage* tlp = nullptr;
VAddr tlr = 0;
@@ -536,7 +531,7 @@ ResultCode KProcess::CreateThreadLocalRegion(VAddr* out) {
return ResultSuccess;
}
-ResultCode KProcess::DeleteThreadLocalRegion(VAddr addr) {
+Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
KThreadLocalPage* page_to_free = nullptr;
// Release the region.
@@ -584,6 +579,52 @@ ResultCode KProcess::DeleteThreadLocalRegion(VAddr addr) {
return ResultSuccess;
}
+bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
+ DebugWatchpointType type) {
+ const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) {
+ return wp.type == DebugWatchpointType::None;
+ })};
+
+ if (watch == watchpoints.end()) {
+ return false;
+ }
+
+ watch->start_address = addr;
+ watch->end_address = addr + size;
+ watch->type = type;
+
+ for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
+ debug_page_refcounts[page]++;
+ system.Memory().MarkRegionDebug(page, PageSize, true);
+ }
+
+ return true;
+}
+
+bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
+ DebugWatchpointType type) {
+ const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) {
+ return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
+ })};
+
+ if (watch == watchpoints.end()) {
+ return false;
+ }
+
+ watch->start_address = 0;
+ watch->end_address = 0;
+ watch->type = DebugWatchpointType::None;
+
+ for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
+ debug_page_refcounts[page]--;
+ if (!debug_page_refcounts[page]) {
+ system.Memory().MarkRegionDebug(page, PageSize, false);
+ }
+ }
+
+ return true;
+}
+
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
Svc::MemoryPermission permission) {
@@ -621,7 +662,7 @@ void KProcess::ChangeStatus(ProcessStatus new_status) {
NotifyAvailable();
}
-ResultCode KProcess::AllocateMainThreadStack(std::size_t stack_size) {
+Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
ASSERT(stack_size);
// The kernel always ensures that the given stack size is page aligned.
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index e562a79b8..d56d73bab 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -1,12 +1,12 @@
-// Copyright 2015 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2015 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <cstddef>
#include <list>
+#include <map>
#include <string>
#include "common/common_types.h"
#include "core/hle/kernel/k_address_arbiter.h"
@@ -68,6 +68,20 @@ enum class ProcessActivity : u32 {
Paused,
};
+enum class DebugWatchpointType : u8 {
+ None = 0,
+ Read = 1 << 0,
+ Write = 1 << 1,
+ ReadOrWrite = Read | Write,
+};
+DECLARE_ENUM_FLAG_OPERATORS(DebugWatchpointType);
+
+struct DebugWatchpoint {
+ VAddr start_address;
+ VAddr end_address;
+ DebugWatchpointType type;
+};
+
class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask> {
KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
@@ -95,8 +109,8 @@ public:
static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
- static ResultCode Initialize(KProcess* process, Core::System& system, std::string process_name,
- ProcessType type, KResourceLimit* res_limit);
+ static Result Initialize(KProcess* process, Core::System& system, std::string process_name,
+ ProcessType type, KResourceLimit* res_limit);
/// Gets a reference to the process' page table.
KPageTable& PageTable() {
@@ -118,11 +132,11 @@ public:
return handle_table;
}
- ResultCode SignalToAddress(VAddr address) {
+ Result SignalToAddress(VAddr address) {
return condition_var.SignalToAddress(address);
}
- ResultCode WaitForAddress(Handle handle, VAddr address, u32 tag) {
+ Result WaitForAddress(Handle handle, VAddr address, u32 tag) {
return condition_var.WaitForAddress(handle, address, tag);
}
@@ -130,17 +144,16 @@ public:
return condition_var.Signal(cv_key, count);
}
- ResultCode WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
+ Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
return condition_var.Wait(address, cv_key, tag, ns);
}
- ResultCode SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value,
- s32 count) {
+ Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
return address_arbiter.SignalToAddress(address, signal_type, value, count);
}
- ResultCode WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
- s64 timeout) {
+ Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
+ s64 timeout) {
return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
}
@@ -307,7 +320,7 @@ public:
/// @pre The process must be in a signaled state. If this is called on a
/// process instance that is not signaled, ERR_INVALID_STATE will be
/// returned.
- ResultCode Reset();
+ Result Reset();
/**
* Loads process-specifics configuration info with metadata provided
@@ -318,7 +331,7 @@ public:
* @returns ResultSuccess if all relevant metadata was able to be
* loaded and parsed. Otherwise, an error code is returned.
*/
- ResultCode LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size);
+ Result LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size);
/**
* Starts the main application thread for this process.
@@ -352,7 +365,7 @@ public:
void DoWorkerTaskImpl();
- ResultCode SetActivity(ProcessActivity activity);
+ Result SetActivity(ProcessActivity activity);
void PinCurrentThread(s32 core_id);
void UnpinCurrentThread(s32 core_id);
@@ -362,17 +375,30 @@ public:
return state_lock;
}
- ResultCode AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
+ Result AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
void RemoveSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
///////////////////////////////////////////////////////////////////////////////////////////////
// Thread-local storage management
// Marks the next available region as used and returns the address of the slot.
- [[nodiscard]] ResultCode CreateThreadLocalRegion(VAddr* out);
+ [[nodiscard]] Result CreateThreadLocalRegion(VAddr* out);
// Frees a used TLS slot identified by the given address
- ResultCode DeleteThreadLocalRegion(VAddr addr);
+ Result DeleteThreadLocalRegion(VAddr addr);
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////
+ // Debug watchpoint management
+
+ // Attempts to insert a watchpoint into a free slot. Returns false if none are available.
+ bool InsertWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type);
+
+ // Attempts to remove the watchpoint specified by the given parameters.
+ bool RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type);
+
+ const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
+ return watchpoints;
+ }
private:
void PinThread(s32 core_id, KThread* thread) {
@@ -395,7 +421,7 @@ private:
void ChangeStatus(ProcessStatus new_status);
/// Allocates the main thread stack for the process, given the stack size in bytes.
- ResultCode AllocateMainThreadStack(std::size_t stack_size);
+ Result AllocateMainThreadStack(std::size_t stack_size);
/// Memory manager for this process
std::unique_ptr<KPageTable> page_table;
@@ -478,6 +504,8 @@ private:
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> running_threads{};
std::array<u64, Core::Hardware::NUM_CPU_CORES> running_thread_idle_counts{};
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> pinned_threads{};
+ std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> watchpoints{};
+ std::map<VAddr, u64> debug_page_refcounts;
KThread* exception_thread{};
diff --git a/src/core/hle/kernel/k_readable_event.cpp b/src/core/hle/kernel/k_readable_event.cpp
index dddba554d..94c5464fe 100644
--- a/src/core/hle/kernel/k_readable_event.cpp
+++ b/src/core/hle/kernel/k_readable_event.cpp
@@ -27,7 +27,7 @@ void KReadableEvent::Destroy() {
}
}
-ResultCode KReadableEvent::Signal() {
+Result KReadableEvent::Signal() {
KScopedSchedulerLock lk{kernel};
if (!is_signaled) {
@@ -38,13 +38,13 @@ ResultCode KReadableEvent::Signal() {
return ResultSuccess;
}
-ResultCode KReadableEvent::Clear() {
+Result KReadableEvent::Clear() {
Reset();
return ResultSuccess;
}
-ResultCode KReadableEvent::Reset() {
+Result KReadableEvent::Reset() {
KScopedSchedulerLock lk{kernel};
if (!is_signaled) {
diff --git a/src/core/hle/kernel/k_readable_event.h b/src/core/hle/kernel/k_readable_event.h
index 5065c7cc0..18dcad289 100644
--- a/src/core/hle/kernel/k_readable_event.h
+++ b/src/core/hle/kernel/k_readable_event.h
@@ -33,9 +33,9 @@ public:
bool IsSignaled() const override;
void Destroy() override;
- ResultCode Signal();
- ResultCode Clear();
- ResultCode Reset();
+ Result Signal();
+ Result Clear();
+ Result Reset();
private:
bool is_signaled{};
diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp
index 3e0ecffdb..010dcf99e 100644
--- a/src/core/hle/kernel/k_resource_limit.cpp
+++ b/src/core/hle/kernel/k_resource_limit.cpp
@@ -73,7 +73,7 @@ s64 KResourceLimit::GetFreeValue(LimitableResource which) const {
return value;
}
-ResultCode KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
+Result KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
const auto index = static_cast<std::size_t>(which);
KScopedLightLock lk(lock);
R_UNLESS(current_values[index] <= value, ResultInvalidState);
diff --git a/src/core/hle/kernel/k_resource_limit.h b/src/core/hle/kernel/k_resource_limit.h
index 43bf74b8d..65c98c979 100644
--- a/src/core/hle/kernel/k_resource_limit.h
+++ b/src/core/hle/kernel/k_resource_limit.h
@@ -8,7 +8,7 @@
#include "core/hle/kernel/k_light_condition_variable.h"
#include "core/hle/kernel/k_light_lock.h"
-union ResultCode;
+union Result;
namespace Core::Timing {
class CoreTiming;
@@ -46,7 +46,7 @@ public:
s64 GetPeakValue(LimitableResource which) const;
s64 GetFreeValue(LimitableResource which) const;
- ResultCode SetLimitValue(LimitableResource which, s64 value);
+ Result SetLimitValue(LimitableResource which, s64 value);
bool Reserve(LimitableResource which, s64 value);
bool Reserve(LimitableResource which, s64 value, s64 timeout);
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 2d4e8637b..c34ce7a17 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -27,69 +27,185 @@ static void IncrementScheduledCount(Kernel::KThread* thread) {
}
}
-void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule) {
- auto scheduler = kernel.CurrentScheduler();
-
- u32 current_core{0xF};
- bool must_context_switch{};
- if (scheduler) {
- current_core = scheduler->core_id;
- // TODO(bunnei): Should be set to true when we deprecate single core
- must_context_switch = !kernel.IsPhantomModeForSingleCore();
- }
-
- while (cores_pending_reschedule != 0) {
- const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule));
- ASSERT(core < Core::Hardware::NUM_CPU_CORES);
- if (!must_context_switch || core != current_core) {
- auto& phys_core = kernel.PhysicalCore(core);
- phys_core.Interrupt();
+KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} {
+ m_switch_fiber = std::make_shared<Common::Fiber>([this] {
+ while (true) {
+ ScheduleImplFiber();
}
- cores_pending_reschedule &= ~(1ULL << core);
+ });
+
+ m_state.needs_scheduling = true;
+}
+
+KScheduler::~KScheduler() = default;
+
+void KScheduler::SetInterruptTaskRunnable() {
+ m_state.interrupt_task_runnable = true;
+ m_state.needs_scheduling = true;
+}
+
+void KScheduler::RequestScheduleOnInterrupt() {
+ m_state.needs_scheduling = true;
+
+ if (CanSchedule(kernel)) {
+ ScheduleOnInterrupt();
}
+}
- for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) {
- if (kernel.PhysicalCore(core_id).IsInterrupted()) {
- KInterruptManager::HandleInterrupt(kernel, static_cast<s32>(core_id));
- }
+void KScheduler::DisableScheduling(KernelCore& kernel) {
+ ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
+ GetCurrentThread(kernel).DisableDispatch();
+}
+
+void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
+ ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 1);
+
+ auto* scheduler{kernel.CurrentScheduler()};
+
+ if (!scheduler || kernel.IsPhantomModeForSingleCore()) {
+ KScheduler::RescheduleCores(kernel, cores_needing_scheduling);
+ KScheduler::RescheduleCurrentHLEThread(kernel);
+ return;
}
- if (must_context_switch) {
- auto core_scheduler = kernel.CurrentScheduler();
- kernel.ExitSVCProfile();
- core_scheduler->RescheduleCurrentCore();
- kernel.EnterSVCProfile();
+ scheduler->RescheduleOtherCores(cores_needing_scheduling);
+
+ if (GetCurrentThread(kernel).GetDisableDispatchCount() > 1) {
+ GetCurrentThread(kernel).EnableDispatch();
+ } else {
+ scheduler->RescheduleCurrentCore();
}
}
+void KScheduler::RescheduleCurrentHLEThread(KernelCore& kernel) {
+ // HACK: we cannot schedule from this thread, it is not a core thread
+ ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
+
+ // Special case to ensure dummy threads that are waiting block
+ GetCurrentThread(kernel).IfDummyThreadTryWait();
+
+ ASSERT(GetCurrentThread(kernel).GetState() != ThreadState::Waiting);
+ GetCurrentThread(kernel).EnableDispatch();
+}
+
+u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
+ if (IsSchedulerUpdateNeeded(kernel)) {
+ return UpdateHighestPriorityThreadsImpl(kernel);
+ } else {
+ return 0;
+ }
+}
+
+void KScheduler::Schedule() {
+ ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
+ ASSERT(m_core_id == GetCurrentCoreId(kernel));
+
+ ScheduleImpl();
+}
+
+void KScheduler::ScheduleOnInterrupt() {
+ GetCurrentThread(kernel).DisableDispatch();
+ Schedule();
+ GetCurrentThread(kernel).EnableDispatch();
+}
+
+void KScheduler::PreemptSingleCore() {
+ GetCurrentThread(kernel).DisableDispatch();
+
+ auto* thread = GetCurrentThreadPointer(kernel);
+ auto& previous_scheduler = kernel.Scheduler(thread->GetCurrentCore());
+ previous_scheduler.Unload(thread);
+
+ Common::Fiber::YieldTo(thread->GetHostContext(), *m_switch_fiber);
+
+ GetCurrentThread(kernel).EnableDispatch();
+}
+
+void KScheduler::RescheduleCurrentCore() {
+ ASSERT(!kernel.IsPhantomModeForSingleCore());
+ ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
+
+ GetCurrentThread(kernel).EnableDispatch();
+
+ if (m_state.needs_scheduling.load()) {
+ // Disable interrupts, and then check again if rescheduling is needed.
+ // KScopedInterruptDisable intr_disable;
+
+ kernel.CurrentScheduler()->RescheduleCurrentCoreImpl();
+ }
+}
+
+void KScheduler::RescheduleCurrentCoreImpl() {
+ // Check that scheduling is needed.
+ if (m_state.needs_scheduling.load()) [[likely]] {
+ GetCurrentThread(kernel).DisableDispatch();
+ Schedule();
+ GetCurrentThread(kernel).EnableDispatch();
+ }
+}
+
+void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id) {
+ // Set core ID/idle thread/interrupt task manager.
+ m_core_id = core_id;
+ m_idle_thread = idle_thread;
+ // m_state.idle_thread_stack = m_idle_thread->GetStackTop();
+ // m_state.interrupt_task_manager = &kernel.GetInterruptTaskManager();
+
+ // Insert the main thread into the priority queue.
+ // {
+ // KScopedSchedulerLock lk{kernel};
+ // GetPriorityQueue(kernel).PushBack(GetCurrentThreadPointer(kernel));
+ // SetSchedulerUpdateNeeded(kernel);
+ // }
+
+ // Bind interrupt handler.
+ // kernel.GetInterruptManager().BindHandler(
+ // GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id,
+ // KInterruptController::PriorityLevel::Scheduler, false, false);
+
+ // Set the current thread.
+ m_current_thread = main_thread;
+}
+
+void KScheduler::Activate() {
+ ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
+
+ // m_state.should_count_idle = KTargetSystem::IsDebugMode();
+ m_is_active = true;
+ RescheduleCurrentCore();
+}
+
+void KScheduler::OnThreadStart() {
+ GetCurrentThread(kernel).EnableDispatch();
+}
+
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
- KScopedSpinLock lk{guard};
- if (KThread* prev_highest_thread = state.highest_priority_thread;
- prev_highest_thread != highest_thread) {
- if (prev_highest_thread != nullptr) {
+ if (KThread* prev_highest_thread = m_state.highest_priority_thread;
+ prev_highest_thread != highest_thread) [[likely]] {
+ if (prev_highest_thread != nullptr) [[likely]] {
IncrementScheduledCount(prev_highest_thread);
- prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks());
+ prev_highest_thread->SetLastScheduledTick(kernel.System().CoreTiming().GetCPUTicks());
}
- if (state.should_count_idle) {
- if (highest_thread != nullptr) {
+ if (m_state.should_count_idle) {
+ if (highest_thread != nullptr) [[likely]] {
if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) {
- process->SetRunningThread(core_id, highest_thread, state.idle_count);
+ process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count);
}
} else {
- state.idle_count++;
+ m_state.idle_count++;
}
}
- state.highest_priority_thread = highest_thread;
- state.needs_scheduling.store(true);
- return (1ULL << core_id);
+ m_state.highest_priority_thread = highest_thread;
+ m_state.needs_scheduling = true;
+ return (1ULL << m_core_id);
} else {
return 0;
}
}
u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(IsSchedulerLockedByCurrentThread(kernel));
// Clear that we need to update.
ClearSchedulerUpdateNeeded(kernel);
@@ -98,18 +214,20 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
KThread* top_threads[Core::Hardware::NUM_CPU_CORES];
auto& priority_queue = GetPriorityQueue(kernel);
- /// We want to go over all cores, finding the highest priority thread and determining if
- /// scheduling is needed for that core.
+ // We want to go over all cores, finding the highest priority thread and determining if
+ // scheduling is needed for that core.
for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
if (top_thread != nullptr) {
- // If the thread has no waiters, we need to check if the process has a thread pinned.
- if (top_thread->GetNumKernelWaiters() == 0) {
- if (KProcess* parent = top_thread->GetOwnerProcess(); parent != nullptr) {
- if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id));
- pinned != nullptr && pinned != top_thread) {
- // We prefer our parent's pinned thread if possible. However, we also don't
- // want to schedule un-runnable threads.
+ // We need to check if the thread's process has a pinned thread.
+ if (KProcess* parent = top_thread->GetOwnerProcess()) {
+ // Check that there's a pinned thread other than the current top thread.
+ if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id));
+ pinned != nullptr && pinned != top_thread) {
+ // We need to prefer threads with kernel waiters to the pinned thread.
+ if (top_thread->GetNumKernelWaiters() ==
+ 0 /* && top_thread != parent->GetExceptionThread() */) {
+ // If the pinned thread is runnable, use it.
if (pinned->GetRawState() == ThreadState::Runnable) {
top_thread = pinned;
} else {
@@ -129,7 +247,8 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
// Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
while (idle_cores != 0) {
- const auto core_id = static_cast<u32>(std::countr_zero(idle_cores));
+ const s32 core_id = static_cast<s32>(std::countr_zero(idle_cores));
+
if (KThread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
size_t num_candidates = 0;
@@ -150,7 +269,6 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
// The suggested thread isn't bound to its core, so we can migrate it!
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested);
-
top_threads[core_id] = suggested;
cores_needing_scheduling |=
kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
@@ -183,7 +301,6 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
// Perform the migration.
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(candidate_core, suggested);
-
top_threads[core_id] = suggested;
cores_needing_scheduling |=
kernel.Scheduler(core_id).UpdateHighestPriorityThread(
@@ -200,24 +317,210 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
return cores_needing_scheduling;
}
+void KScheduler::SwitchThread(KThread* next_thread) {
+ KProcess* const cur_process = kernel.CurrentProcess();
+ KThread* const cur_thread = GetCurrentThreadPointer(kernel);
+
+ // We never want to schedule a null thread, so use the idle thread if we don't have a next.
+ if (next_thread == nullptr) {
+ next_thread = m_idle_thread;
+ }
+
+ if (next_thread->GetCurrentCore() != m_core_id) {
+ next_thread->SetCurrentCore(m_core_id);
+ }
+
+ // If we're not actually switching thread, there's nothing to do.
+ if (next_thread == cur_thread) {
+ return;
+ }
+
+ // Next thread is now known not to be nullptr, and must not be dispatchable.
+ ASSERT(next_thread->GetDisableDispatchCount() == 1);
+ ASSERT(!next_thread->IsDummyThread());
+
+ // Update the CPU time tracking variables.
+ const s64 prev_tick = m_last_context_switch_time;
+ const s64 cur_tick = kernel.System().CoreTiming().GetCPUTicks();
+ const s64 tick_diff = cur_tick - prev_tick;
+ cur_thread->AddCpuTime(m_core_id, tick_diff);
+ if (cur_process != nullptr) {
+ cur_process->UpdateCPUTimeTicks(tick_diff);
+ }
+ m_last_context_switch_time = cur_tick;
+
+ // Update our previous thread.
+ if (cur_process != nullptr) {
+ if (!cur_thread->IsTerminationRequested() && cur_thread->GetActiveCore() == m_core_id)
+ [[likely]] {
+ m_state.prev_thread = cur_thread;
+ } else {
+ m_state.prev_thread = nullptr;
+ }
+ }
+
+ // Switch the current process, if we're switching processes.
+ // if (KProcess *next_process = next_thread->GetOwnerProcess(); next_process != cur_process) {
+ // KProcess::Switch(cur_process, next_process);
+ // }
+
+ // Set the new thread.
+ SetCurrentThread(kernel, next_thread);
+ m_current_thread = next_thread;
+
+ // Set the new Thread Local region.
+ // cpu::SwitchThreadLocalRegion(GetInteger(next_thread->GetThreadLocalRegionAddress()));
+}
+
+void KScheduler::ScheduleImpl() {
+ // First, clear the needs scheduling bool.
+ m_state.needs_scheduling.store(false, std::memory_order_seq_cst);
+
+ // Load the appropriate thread pointers for scheduling.
+ KThread* const cur_thread{GetCurrentThreadPointer(kernel)};
+ KThread* highest_priority_thread{m_state.highest_priority_thread};
+
+ // Check whether there are runnable interrupt tasks.
+ if (m_state.interrupt_task_runnable) {
+ // The interrupt task is runnable.
+ // We want to switch to the interrupt task/idle thread.
+ highest_priority_thread = nullptr;
+ }
+
+ // If there aren't, we want to check if the highest priority thread is the same as the current
+ // thread.
+ if (highest_priority_thread == cur_thread) {
+ // If they're the same, then we can just return.
+ return;
+ }
+
+ // The highest priority thread is not the same as the current thread.
+ // Jump to the switcher and continue executing from there.
+ m_switch_cur_thread = cur_thread;
+ m_switch_highest_priority_thread = highest_priority_thread;
+ m_switch_from_schedule = true;
+ Common::Fiber::YieldTo(cur_thread->host_context, *m_switch_fiber);
+
+ // Returning from ScheduleImpl occurs after this thread has been scheduled again.
+}
+
+void KScheduler::ScheduleImplFiber() {
+ KThread* const cur_thread{m_switch_cur_thread};
+ KThread* highest_priority_thread{m_switch_highest_priority_thread};
+
+ // If we're not coming from scheduling (i.e., we came from SC preemption),
+ // we should restart the scheduling loop directly. Not accurate to HOS.
+ if (!m_switch_from_schedule) {
+ goto retry;
+ }
+
+ // Mark that we are not coming from scheduling anymore.
+ m_switch_from_schedule = false;
+
+ // Save the original thread context.
+ Unload(cur_thread);
+
+ // The current thread's context has been entirely taken care of.
+ // Now we want to loop until we successfully switch the thread context.
+ while (true) {
+ // We're starting to try to do the context switch.
+ // Check if the highest priority thread is null.
+ if (!highest_priority_thread) {
+ // The next thread is nullptr!
+
+ // Switch to the idle thread. Note: HOS treats idling as a special case for
+ // performance. This is not *required* for yuzu's purposes, and for singlecore
+ // compatibility, we can just move the logic that would go here into the execution
+ // of the idle thread. If we ever remove singlecore, we should implement this
+ // accurately to HOS.
+ highest_priority_thread = m_idle_thread;
+ }
+
+ // We want to try to lock the highest priority thread's context.
+ // Try to take it.
+ while (!highest_priority_thread->context_guard.try_lock()) {
+ // The highest priority thread's context is already locked.
+ // Check if we need scheduling. If we don't, we can retry directly.
+ if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
+ // If we do, another core is interfering, and we must start again.
+ goto retry;
+ }
+ }
+
+ // It's time to switch the thread.
+ // Switch to the highest priority thread.
+ SwitchThread(highest_priority_thread);
+
+ // Check if we need scheduling. If we do, then we can't complete the switch and should
+ // retry.
+ if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
+ // Our switch failed.
+ // We should unlock the thread context, and then retry.
+ highest_priority_thread->context_guard.unlock();
+ goto retry;
+ } else {
+ break;
+ }
+
+ retry:
+
+ // We failed to successfully do the context switch, and need to retry.
+ // Clear needs_scheduling.
+ m_state.needs_scheduling.store(false, std::memory_order_seq_cst);
+
+ // Refresh the highest priority thread.
+ highest_priority_thread = m_state.highest_priority_thread;
+ }
+
+ // Reload the guest thread context.
+ Reload(highest_priority_thread);
+
+ // Reload the host thread.
+ Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->host_context);
+}
+
+void KScheduler::Unload(KThread* thread) {
+ auto& cpu_core = kernel.System().ArmInterface(m_core_id);
+ cpu_core.SaveContext(thread->GetContext32());
+ cpu_core.SaveContext(thread->GetContext64());
+ // Save the TPIDR_EL0 system register in case it was modified.
+ thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
+ cpu_core.ClearExclusiveState();
+
+ // Check if the thread is terminated by checking the DPC flags.
+ if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) {
+ // The thread isn't terminated, so we want to unlock it.
+ thread->context_guard.unlock();
+ }
+}
+
+void KScheduler::Reload(KThread* thread) {
+ auto& cpu_core = kernel.System().ArmInterface(m_core_id);
+ cpu_core.LoadContext(thread->GetContext32());
+ cpu_core.LoadContext(thread->GetContext64());
+ cpu_core.SetTlsAddress(thread->GetTLSAddress());
+ cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
+ cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints());
+ cpu_core.ClearExclusiveState();
+}
+
void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(IsSchedulerLockedByCurrentThread(kernel));
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) {
// Get an atomic reference to the core scheduler's previous thread.
- std::atomic_ref<KThread*> prev_thread(kernel.Scheduler(static_cast<s32>(i)).prev_thread);
- static_assert(std::atomic_ref<KThread*>::is_always_lock_free);
+ auto& prev_thread{kernel.Scheduler(i).m_state.prev_thread};
// Atomically clear the previous thread if it's our target.
KThread* compare = thread;
- prev_thread.compare_exchange_strong(compare, nullptr);
+ prev_thread.compare_exchange_strong(compare, nullptr, std::memory_order_seq_cst);
}
}
void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(IsSchedulerLockedByCurrentThread(kernel));
// Check if the state has changed, because if it hasn't there's nothing to do.
- const auto cur_state = thread->GetRawState();
+ const ThreadState cur_state = thread->GetRawState();
if (cur_state == old_state) {
return;
}
@@ -237,12 +540,12 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, Threa
}
void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(IsSchedulerLockedByCurrentThread(kernel));
// If the thread is runnable, we want to change its priority in the queue.
if (thread->GetRawState() == ThreadState::Runnable) {
GetPriorityQueue(kernel).ChangePriority(old_priority,
- thread == kernel.GetCurrentEmuThread(), thread);
+ thread == GetCurrentThreadPointer(kernel), thread);
IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded(kernel);
}
@@ -250,7 +553,7 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s3
void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
const KAffinityMask& old_affinity, s32 old_core) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(IsSchedulerLockedByCurrentThread(kernel));
// If the thread is runnable, we want to change its affinity in the queue.
if (thread->GetRawState() == ThreadState::Runnable) {
@@ -260,15 +563,14 @@ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread
}
}
-void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
- ASSERT(system.GlobalSchedulerContext().IsLocked());
+void KScheduler::RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 priority) {
+ ASSERT(IsSchedulerLockedByCurrentThread(kernel));
// Get a reference to the priority queue.
- auto& kernel = system.Kernel();
auto& priority_queue = GetPriorityQueue(kernel);
// Rotate the front of the queue to the end.
- KThread* top_thread = priority_queue.GetScheduledFront(cpu_core_id, priority);
+ KThread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
KThread* next_thread = nullptr;
if (top_thread != nullptr) {
next_thread = priority_queue.MoveToScheduledBack(top_thread);
@@ -280,7 +582,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
// While we have a suggested thread, try to migrate it!
{
- KThread* suggested = priority_queue.GetSuggestedFront(cpu_core_id, priority);
+ KThread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
while (suggested != nullptr) {
// Check if the suggested thread is the top thread on its core.
const s32 suggested_core = suggested->GetActiveCore();
@@ -301,7 +603,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
// to the front of the queue.
if (top_on_suggested_core == nullptr ||
top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
- suggested->SetActiveCore(cpu_core_id);
+ suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested, true);
IncrementScheduledCount(suggested);
break;
@@ -309,22 +611,21 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
}
// Get the next suggestion.
- suggested = priority_queue.GetSamePriorityNext(cpu_core_id, suggested);
+ suggested = priority_queue.GetSamePriorityNext(core_id, suggested);
}
}
// Now that we might have migrated a thread with the same priority, check if we can do better.
-
{
- KThread* best_thread = priority_queue.GetScheduledFront(cpu_core_id);
- if (best_thread == GetCurrentThread()) {
- best_thread = priority_queue.GetScheduledNext(cpu_core_id, best_thread);
+ KThread* best_thread = priority_queue.GetScheduledFront(core_id);
+ if (best_thread == GetCurrentThreadPointer(kernel)) {
+ best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
}
// If the best thread we can choose has a priority the same or worse than ours, try to
// migrate a higher priority thread.
if (best_thread != nullptr && best_thread->GetPriority() >= priority) {
- KThread* suggested = priority_queue.GetSuggestedFront(cpu_core_id);
+ KThread* suggested = priority_queue.GetSuggestedFront(core_id);
while (suggested != nullptr) {
// If the suggestion's priority is the same as ours, don't bother.
if (suggested->GetPriority() >= best_thread->GetPriority()) {
@@ -343,7 +644,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
if (top_on_suggested_core == nullptr ||
top_on_suggested_core->GetPriority() >=
HighestCoreMigrationAllowedPriority) {
- suggested->SetActiveCore(cpu_core_id);
+ suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested, true);
IncrementScheduledCount(suggested);
break;
@@ -351,7 +652,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
}
// Get the next suggestion.
- suggested = priority_queue.GetSuggestedNext(cpu_core_id, suggested);
+ suggested = priority_queue.GetSuggestedNext(core_id, suggested);
}
}
}
@@ -360,71 +661,13 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
SetSchedulerUpdateNeeded(kernel);
}
-bool KScheduler::CanSchedule(KernelCore& kernel) {
- return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() <= 1;
-}
-
-bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
- return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire);
-}
-
-void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) {
- kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release);
-}
-
-void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
- kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release);
-}
-
-void KScheduler::DisableScheduling(KernelCore& kernel) {
- // If we are shutting down the kernel, none of this is relevant anymore.
- if (kernel.IsShuttingDown()) {
- return;
- }
-
- ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0);
- GetCurrentThreadPointer(kernel)->DisableDispatch();
-}
-
-void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
- // If we are shutting down the kernel, none of this is relevant anymore.
- if (kernel.IsShuttingDown()) {
- return;
- }
-
- auto* current_thread = GetCurrentThreadPointer(kernel);
-
- ASSERT(current_thread->GetDisableDispatchCount() >= 1);
-
- if (current_thread->GetDisableDispatchCount() > 1) {
- current_thread->EnableDispatch();
- } else {
- RescheduleCores(kernel, cores_needing_scheduling);
- }
-
- // Special case to ensure dummy threads that are waiting block.
- current_thread->IfDummyThreadTryWait();
-}
-
-u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
- if (IsSchedulerUpdateNeeded(kernel)) {
- return UpdateHighestPriorityThreadsImpl(kernel);
- } else {
- return 0;
- }
-}
-
-KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
- return kernel.GlobalSchedulerContext().priority_queue;
-}
-
void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
// Validate preconditions.
ASSERT(CanSchedule(kernel));
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
- KThread& cur_thread = Kernel::GetCurrentThread(kernel);
+ KThread& cur_thread = GetCurrentThread(kernel);
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -437,7 +680,7 @@ void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
// Perform the yield.
{
- KScopedSchedulerLock lock(kernel);
+ KScopedSchedulerLock sl{kernel};
const auto cur_state = cur_thread.GetRawState();
if (cur_state == ThreadState::Runnable) {
@@ -463,7 +706,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
- KThread& cur_thread = Kernel::GetCurrentThread(kernel);
+ KThread& cur_thread = GetCurrentThread(kernel);
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -476,7 +719,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
// Perform the yield.
{
- KScopedSchedulerLock lock(kernel);
+ KScopedSchedulerLock sl{kernel};
const auto cur_state = cur_thread.GetRawState();
if (cur_state == ThreadState::Runnable) {
@@ -496,7 +739,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
if (KThread* running_on_suggested_core =
(suggested_core >= 0)
- ? kernel.Scheduler(suggested_core).state.highest_priority_thread
+ ? kernel.Scheduler(suggested_core).m_state.highest_priority_thread
: nullptr;
running_on_suggested_core != suggested) {
// If the current thread's priority is higher than our suggestion's we prefer
@@ -551,7 +794,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
- KThread& cur_thread = Kernel::GetCurrentThread(kernel);
+ KThread& cur_thread = GetCurrentThread(kernel);
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -564,7 +807,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
// Perform the yield.
{
- KScopedSchedulerLock lock(kernel);
+ KScopedSchedulerLock sl{kernel};
const auto cur_state = cur_thread.GetRawState();
if (cur_state == ThreadState::Runnable) {
@@ -621,219 +864,19 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
}
}
-KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, core_id{core_id_} {
- switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this);
- state.needs_scheduling.store(true);
- state.interrupt_task_thread_runnable = false;
- state.should_count_idle = false;
- state.idle_count = 0;
- state.idle_thread_stack = nullptr;
- state.highest_priority_thread = nullptr;
-}
-
-void KScheduler::Finalize() {
- if (idle_thread) {
- idle_thread->Close();
- idle_thread = nullptr;
- }
-}
-
-KScheduler::~KScheduler() {
- ASSERT(!idle_thread);
-}
-
-KThread* KScheduler::GetCurrentThread() const {
- if (auto result = current_thread.load(); result) {
- return result;
- }
- return idle_thread;
-}
-
-u64 KScheduler::GetLastContextSwitchTicks() const {
- return last_context_switch_time;
-}
-
-void KScheduler::RescheduleCurrentCore() {
- ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
-
- auto& phys_core = system.Kernel().PhysicalCore(core_id);
- if (phys_core.IsInterrupted()) {
- phys_core.ClearInterrupt();
+void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) {
+ if (const u64 core_mask = cores_needing_scheduling & ~(1ULL << m_core_id); core_mask != 0) {
+ RescheduleCores(kernel, core_mask);
}
-
- guard.Lock();
- if (state.needs_scheduling.load()) {
- Schedule();
- } else {
- GetCurrentThread()->EnableDispatch();
- guard.Unlock();
- }
-}
-
-void KScheduler::OnThreadStart() {
- SwitchContextStep2();
}
-void KScheduler::Unload(KThread* thread) {
- ASSERT(thread);
-
- LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
-
- if (thread->IsCallingSvc()) {
- thread->ClearIsCallingSvc();
- }
-
- auto& physical_core = system.Kernel().PhysicalCore(core_id);
- if (!physical_core.IsInitialized()) {
- return;
- }
-
- Core::ARM_Interface& cpu_core = physical_core.ArmInterface();
- cpu_core.SaveContext(thread->GetContext32());
- cpu_core.SaveContext(thread->GetContext64());
- // Save the TPIDR_EL0 system register in case it was modified.
- thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
- cpu_core.ClearExclusiveState();
-
- if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) {
- prev_thread = thread;
- } else {
- prev_thread = nullptr;
- }
-
- thread->context_guard.unlock();
-}
-
-void KScheduler::Reload(KThread* thread) {
- LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread->GetName());
-
- Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
- cpu_core.LoadContext(thread->GetContext32());
- cpu_core.LoadContext(thread->GetContext64());
- cpu_core.SetTlsAddress(thread->GetTLSAddress());
- cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
- cpu_core.ClearExclusiveState();
-}
-
-void KScheduler::SwitchContextStep2() {
- // Load context of new thread
- Reload(GetCurrentThread());
-
- RescheduleCurrentCore();
-}
-
-void KScheduler::ScheduleImpl() {
- KThread* previous_thread = GetCurrentThread();
- KThread* next_thread = state.highest_priority_thread;
-
- state.needs_scheduling.store(false);
-
- // We never want to schedule a null thread, so use the idle thread if we don't have a next.
- if (next_thread == nullptr) {
- next_thread = idle_thread;
- }
-
- if (next_thread->GetCurrentCore() != core_id) {
- next_thread->SetCurrentCore(core_id);
- }
-
- // We never want to schedule a dummy thread, as these are only used by host threads for locking.
- if (next_thread->GetThreadType() == ThreadType::Dummy) {
- ASSERT_MSG(false, "Dummy threads should never be scheduled!");
- next_thread = idle_thread;
- }
-
- // If we're not actually switching thread, there's nothing to do.
- if (next_thread == current_thread.load()) {
- previous_thread->EnableDispatch();
- guard.Unlock();
- return;
- }
-
- // Update the CPU time tracking variables.
- KProcess* const previous_process = system.Kernel().CurrentProcess();
- UpdateLastContextSwitchTime(previous_thread, previous_process);
-
- // Save context for previous thread
- Unload(previous_thread);
-
- std::shared_ptr<Common::Fiber>* old_context;
- old_context = &previous_thread->GetHostContext();
-
- // Set the new thread.
- current_thread.store(next_thread);
-
- guard.Unlock();
-
- Common::Fiber::YieldTo(*old_context, *switch_fiber);
- /// When a thread wakes up, the scheduler may have changed to other in another core.
- auto& next_scheduler = *system.Kernel().CurrentScheduler();
- next_scheduler.SwitchContextStep2();
-}
-
-void KScheduler::OnSwitch(void* this_scheduler) {
- KScheduler* sched = static_cast<KScheduler*>(this_scheduler);
- sched->SwitchToCurrent();
-}
-
-void KScheduler::SwitchToCurrent() {
- while (true) {
- {
- KScopedSpinLock lk{guard};
- current_thread.store(state.highest_priority_thread);
- state.needs_scheduling.store(false);
+void KScheduler::RescheduleCores(KernelCore& kernel, u64 core_mask) {
+ // Send IPI
+ for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
+ if (core_mask & (1ULL << i)) {
+ kernel.PhysicalCore(i).Interrupt();
}
- const auto is_switch_pending = [this] {
- KScopedSpinLock lk{guard};
- return state.needs_scheduling.load();
- };
- do {
- auto next_thread = current_thread.load();
- if (next_thread != nullptr) {
- const auto locked = next_thread->context_guard.try_lock();
- if (state.needs_scheduling.load()) {
- next_thread->context_guard.unlock();
- break;
- }
- if (next_thread->GetActiveCore() != core_id) {
- next_thread->context_guard.unlock();
- break;
- }
- if (!locked) {
- continue;
- }
- }
- auto thread = next_thread ? next_thread : idle_thread;
- Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());
- } while (!is_switch_pending());
}
}
-void KScheduler::UpdateLastContextSwitchTime(KThread* thread, KProcess* process) {
- const u64 prev_switch_ticks = last_context_switch_time;
- const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
- const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
-
- if (thread != nullptr) {
- thread->AddCpuTime(core_id, update_ticks);
- }
-
- if (process != nullptr) {
- process->UpdateCPUTimeTicks(update_ticks);
- }
-
- last_context_switch_time = most_recent_switch_ticks;
-}
-
-void KScheduler::Initialize() {
- idle_thread = KThread::Create(system.Kernel());
- ASSERT(KThread::InitializeIdleThread(system, idle_thread, core_id).IsSuccess());
- idle_thread->SetName(fmt::format("IdleThread:{}", core_id));
-}
-
-KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
- : KScopedLock(kernel.GlobalSchedulerContext().SchedulerLock()) {}
-
-KScopedSchedulerLock::~KScopedSchedulerLock() = default;
-
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 729e006f2..534321d8d 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -11,6 +11,7 @@
#include "core/hle/kernel/k_scheduler_lock.h"
#include "core/hle/kernel/k_scoped_lock.h"
#include "core/hle/kernel/k_spin_lock.h"
+#include "core/hle/kernel/k_thread.h"
namespace Common {
class Fiber;
@@ -23,188 +24,150 @@ class System;
namespace Kernel {
class KernelCore;
+class KInterruptTaskManager;
class KProcess;
-class SchedulerLock;
class KThread;
+class KScopedDisableDispatch;
+class KScopedSchedulerLock;
+class KScopedSchedulerLockAndSleep;
class KScheduler final {
public:
- explicit KScheduler(Core::System& system_, s32 core_id_);
- ~KScheduler();
-
- void Finalize();
+ YUZU_NON_COPYABLE(KScheduler);
+ YUZU_NON_MOVEABLE(KScheduler);
- /// Reschedules to the next available thread (call after current thread is suspended)
- void RescheduleCurrentCore();
+ using LockType = KAbstractSchedulerLock<KScheduler>;
- /// Reschedules cores pending reschedule, to be called on EnableScheduling.
- static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule);
+ explicit KScheduler(KernelCore& kernel);
+ ~KScheduler();
- /// The next two are for SingleCore Only.
- /// Unload current thread before preempting core.
+ void Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id);
+ void Activate();
+ void OnThreadStart();
void Unload(KThread* thread);
-
- /// Reload current thread after core preemption.
void Reload(KThread* thread);
- /// Gets the current running thread
- [[nodiscard]] KThread* GetCurrentThread() const;
+ void SetInterruptTaskRunnable();
+ void RequestScheduleOnInterrupt();
+ void PreemptSingleCore();
- /// Gets the idle thread
- [[nodiscard]] KThread* GetIdleThread() const {
- return idle_thread;
+ u64 GetIdleCount() {
+ return m_state.idle_count;
}
- /// Returns true if the scheduler is idle
- [[nodiscard]] bool IsIdle() const {
- return GetCurrentThread() == idle_thread;
+ KThread* GetIdleThread() const {
+ return m_idle_thread;
}
- /// Gets the timestamp for the last context switch in ticks.
- [[nodiscard]] u64 GetLastContextSwitchTicks() const;
-
- [[nodiscard]] bool ContextSwitchPending() const {
- return state.needs_scheduling.load(std::memory_order_relaxed);
+ bool IsIdle() const {
+ return m_current_thread.load() == m_idle_thread;
}
- void Initialize();
-
- void OnThreadStart();
+ KThread* GetPreviousThread() const {
+ return m_state.prev_thread;
+ }
- [[nodiscard]] std::shared_ptr<Common::Fiber>& ControlContext() {
- return switch_fiber;
+ KThread* GetSchedulerCurrentThread() const {
+ return m_current_thread.load();
}
- [[nodiscard]] const std::shared_ptr<Common::Fiber>& ControlContext() const {
- return switch_fiber;
+ s64 GetLastContextSwitchTime() const {
+ return m_last_context_switch_time;
}
- [[nodiscard]] u64 UpdateHighestPriorityThread(KThread* highest_thread);
+ // Static public API.
+ static bool CanSchedule(KernelCore& kernel) {
+ return GetCurrentThread(kernel).GetDisableDispatchCount() == 0;
+ }
+ static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) {
+ return kernel.GlobalSchedulerContext().scheduler_lock.IsLockedByCurrentThread();
+ }
- /**
- * Takes a thread and moves it to the back of the it's priority list.
- *
- * @note This operation can be redundant and no scheduling is changed if marked as so.
- */
- static void YieldWithoutCoreMigration(KernelCore& kernel);
+ static bool IsSchedulerUpdateNeeded(KernelCore& kernel) {
+ return kernel.GlobalSchedulerContext().scheduler_update_needed;
+ }
+ static void SetSchedulerUpdateNeeded(KernelCore& kernel) {
+ kernel.GlobalSchedulerContext().scheduler_update_needed = true;
+ }
+ static void ClearSchedulerUpdateNeeded(KernelCore& kernel) {
+ kernel.GlobalSchedulerContext().scheduler_update_needed = false;
+ }
- /**
- * Takes a thread and moves it to the back of the it's priority list.
- * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
- * a better priority than the next thread in the core.
- *
- * @note This operation can be redundant and no scheduling is changed if marked as so.
- */
- static void YieldWithCoreMigration(KernelCore& kernel);
+ static void DisableScheduling(KernelCore& kernel);
+ static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling);
- /**
- * Takes a thread and moves it out of the scheduling queue.
- * and into the suggested queue. If no thread can be scheduled afterwards in that core,
- * a suggested thread is obtained instead.
- *
- * @note This operation can be redundant and no scheduling is changed if marked as so.
- */
- static void YieldToAnyThread(KernelCore& kernel);
+ static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
static void ClearPreviousThread(KernelCore& kernel, KThread* thread);
- /// Notify the scheduler a thread's status has changed.
static void OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state);
-
- /// Notify the scheduler a thread's priority has changed.
static void OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority);
-
- /// Notify the scheduler a thread's core and/or affinity mask has changed.
static void OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
const KAffinityMask& old_affinity, s32 old_core);
- static bool CanSchedule(KernelCore& kernel);
- static bool IsSchedulerUpdateNeeded(const KernelCore& kernel);
- static void SetSchedulerUpdateNeeded(KernelCore& kernel);
- static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
- static void DisableScheduling(KernelCore& kernel);
- static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling);
- [[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
+ static void RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 priority);
+ static void RescheduleCores(KernelCore& kernel, u64 cores_needing_scheduling);
+
+ static void YieldWithoutCoreMigration(KernelCore& kernel);
+ static void YieldWithCoreMigration(KernelCore& kernel);
+ static void YieldToAnyThread(KernelCore& kernel);
private:
- friend class GlobalSchedulerContext;
-
- /**
- * Takes care of selecting the new scheduled threads in three steps:
- *
- * 1. First a thread is selected from the top of the priority queue. If no thread
- * is obtained then we move to step two, else we are done.
- *
- * 2. Second we try to get a suggested thread that's not assigned to any core or
- * that is not the top thread in that core.
- *
- * 3. Third is no suggested thread is found, we do a second pass and pick a running
- * thread in another core and swap it with its current thread.
- *
- * returns the cores needing scheduling.
- */
- [[nodiscard]] static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
-
- [[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel);
-
- void RotateScheduledQueue(s32 cpu_core_id, s32 priority);
-
- void Schedule() {
- ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
- this->ScheduleImpl();
+ // Static private API.
+ static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel) {
+ return kernel.GlobalSchedulerContext().priority_queue;
}
+ static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
- /// Switches the CPU's active thread context to that of the specified thread
- void ScheduleImpl();
-
- /// When a thread wakes up, it must run this through it's new scheduler
- void SwitchContextStep2();
+ static void RescheduleCurrentHLEThread(KernelCore& kernel);
- /**
- * Called on every context switch to update the internal timestamp
- * This also updates the running time ticks for the given thread and
- * process using the following difference:
- *
- * ticks += most_recent_ticks - last_context_switch_ticks
- *
- * The internal tick timestamp for the scheduler is simply the
- * most recent tick count retrieved. No special arithmetic is
- * applied to it.
- */
- void UpdateLastContextSwitchTime(KThread* thread, KProcess* process);
+ // Instanced private API.
+ void ScheduleImpl();
+ void ScheduleImplFiber();
+ void SwitchThread(KThread* next_thread);
- static void OnSwitch(void* this_scheduler);
- void SwitchToCurrent();
+ void Schedule();
+ void ScheduleOnInterrupt();
- KThread* prev_thread{};
- std::atomic<KThread*> current_thread{};
+ void RescheduleOtherCores(u64 cores_needing_scheduling);
+ void RescheduleCurrentCore();
+ void RescheduleCurrentCoreImpl();
- KThread* idle_thread{};
+ u64 UpdateHighestPriorityThread(KThread* thread);
- std::shared_ptr<Common::Fiber> switch_fiber{};
+private:
+ friend class KScopedDisableDispatch;
struct SchedulingState {
- std::atomic<bool> needs_scheduling{};
- bool interrupt_task_thread_runnable{};
- bool should_count_idle{};
- u64 idle_count{};
- KThread* highest_priority_thread{};
- void* idle_thread_stack{};
+ std::atomic<bool> needs_scheduling{false};
+ bool interrupt_task_runnable{false};
+ bool should_count_idle{false};
+ u64 idle_count{0};
+ KThread* highest_priority_thread{nullptr};
+ void* idle_thread_stack{nullptr};
+ std::atomic<KThread*> prev_thread{nullptr};
+ KInterruptTaskManager* interrupt_task_manager{nullptr};
};
- SchedulingState state;
-
- Core::System& system;
- u64 last_context_switch_time{};
- const s32 core_id;
-
- KSpinLock guard{};
+ KernelCore& kernel;
+ SchedulingState m_state;
+ bool m_is_active{false};
+ s32 m_core_id{0};
+ s64 m_last_context_switch_time{0};
+ KThread* m_idle_thread{nullptr};
+ std::atomic<KThread*> m_current_thread{nullptr};
+
+ std::shared_ptr<Common::Fiber> m_switch_fiber{};
+ KThread* m_switch_cur_thread{};
+ KThread* m_switch_highest_priority_thread{};
+ bool m_switch_from_schedule{};
};
-class [[nodiscard]] KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> {
+class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> {
public:
- explicit KScopedSchedulerLock(KernelCore& kernel);
- ~KScopedSchedulerLock();
+ explicit KScopedSchedulerLock(KernelCore& kernel)
+ : KScopedLock(kernel.GlobalSchedulerContext().scheduler_lock) {}
+ ~KScopedSchedulerLock() = default;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index 4fa256970..73314b45e 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -5,9 +5,11 @@
#include <atomic>
#include "common/assert.h"
+#include "core/hle/kernel/k_interrupt_manager.h"
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/physical_core.h"
namespace Kernel {
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index 60f8ed470..802c646a6 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -79,7 +79,7 @@ std::size_t KServerSession::NumDomainRequestHandlers() const {
return manager->DomainHandlerCount();
}
-ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
+Result KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
if (!context.HasDomainMessageHeader()) {
return ResultSuccess;
}
@@ -123,7 +123,7 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co
return ResultSuccess;
}
-ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
+Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread);
@@ -143,8 +143,8 @@ ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memor
return ResultSuccess;
}
-ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
- ResultCode result = ResultSuccess;
+Result KServerSession::CompleteSyncRequest(HLERequestContext& context) {
+ Result result = ResultSuccess;
// If the session has been converted to a domain, handle the domain request
if (manager->HasSessionRequestHandler(context)) {
@@ -173,8 +173,8 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
return result;
}
-ResultCode KServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
- Core::Timing::CoreTiming& core_timing) {
+Result KServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
+ Core::Timing::CoreTiming& core_timing) {
return QueueSyncRequest(thread, memory);
}
diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h
index b628a843f..6d0821945 100644
--- a/src/core/hle/kernel/k_server_session.h
+++ b/src/core/hle/kernel/k_server_session.h
@@ -73,10 +73,10 @@ public:
* @param memory Memory context to handle the sync request under.
* @param core_timing Core timing context to schedule the request event under.
*
- * @returns ResultCode from the operation.
+ * @returns Result from the operation.
*/
- ResultCode HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
- Core::Timing::CoreTiming& core_timing);
+ Result HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
+ Core::Timing::CoreTiming& core_timing);
/// Adds a new domain request handler to the collection of request handlers within
/// this ServerSession instance.
@@ -103,14 +103,14 @@ public:
private:
/// Queues a sync request from the emulated application.
- ResultCode QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory);
+ Result QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory);
/// Completes a sync request from the emulated application.
- ResultCode CompleteSyncRequest(HLERequestContext& context);
+ Result CompleteSyncRequest(HLERequestContext& context);
/// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an
/// object handle.
- ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context);
+ Result HandleDomainSyncRequest(Kernel::HLERequestContext& context);
/// This session's HLE request handlers
std::shared_ptr<SessionRequestManager> manager;
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 51d7538ca..8ff1545b6 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -1,6 +1,5 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
#include "core/core.h"
@@ -18,12 +17,10 @@ KSharedMemory::~KSharedMemory() {
kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size);
}
-ResultCode KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
- KPageLinkedList&& page_list_,
- Svc::MemoryPermission owner_permission_,
- Svc::MemoryPermission user_permission_,
- PAddr physical_address_, std::size_t size_,
- std::string name_) {
+Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
+ KPageGroup&& page_list_, Svc::MemoryPermission owner_permission_,
+ Svc::MemoryPermission user_permission_, PAddr physical_address_,
+ std::size_t size_, std::string name_) {
// Set members.
owner_process = owner_process_;
device_memory = &device_memory_;
@@ -67,8 +64,8 @@ void KSharedMemory::Finalize() {
KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList>::Finalize();
}
-ResultCode KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size,
- Svc::MemoryPermission permissions) {
+Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size,
+ Svc::MemoryPermission permissions) {
const u64 page_count{(map_size + PageSize - 1) / PageSize};
if (page_list.GetNumPages() != page_count) {
@@ -86,7 +83,7 @@ ResultCode KSharedMemory::Map(KProcess& target_process, VAddr address, std::size
ConvertToKMemoryPermission(permissions));
}
-ResultCode KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
+Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
const u64 page_count{(unmap_size + PageSize - 1) / PageSize};
if (page_list.GetNumPages() != page_count) {
diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h
index 81de36136..34cb98456 100644
--- a/src/core/hle/kernel/k_shared_memory.h
+++ b/src/core/hle/kernel/k_shared_memory.h
@@ -1,6 +1,5 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -9,7 +8,7 @@
#include "common/common_types.h"
#include "core/device_memory.h"
#include "core/hle/kernel/k_memory_block.h"
-#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
@@ -26,10 +25,10 @@ public:
explicit KSharedMemory(KernelCore& kernel_);
~KSharedMemory() override;
- ResultCode Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
- KPageLinkedList&& page_list_, Svc::MemoryPermission owner_permission_,
- Svc::MemoryPermission user_permission_, PAddr physical_address_,
- std::size_t size_, std::string name_);
+ Result Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
+ KPageGroup&& page_list_, Svc::MemoryPermission owner_permission_,
+ Svc::MemoryPermission user_permission_, PAddr physical_address_,
+ std::size_t size_, std::string name_);
/**
* Maps a shared memory block to an address in the target process' address space
@@ -38,8 +37,8 @@ public:
* @param map_size Size of the shared memory block to map
* @param permissions Memory block map permissions (specified by SVC field)
*/
- ResultCode Map(KProcess& target_process, VAddr address, std::size_t map_size,
- Svc::MemoryPermission permissions);
+ Result Map(KProcess& target_process, VAddr address, std::size_t map_size,
+ Svc::MemoryPermission permissions);
/**
* Unmaps a shared memory block from an address in the target process' address space
@@ -47,7 +46,7 @@ public:
* @param address Address in system memory to unmap shared memory block
* @param unmap_size Size of the shared memory block to unmap
*/
- ResultCode Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size);
+ Result Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size);
/**
* Gets a pointer to the shared memory block
@@ -77,7 +76,7 @@ public:
private:
Core::DeviceMemory* device_memory;
KProcess* owner_process{};
- KPageLinkedList page_list;
+ KPageGroup page_list;
Svc::MemoryPermission owner_permission{};
Svc::MemoryPermission user_permission{};
PAddr physical_address{};
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp
index 8554144d5..802dca046 100644
--- a/src/core/hle/kernel/k_synchronization_object.cpp
+++ b/src/core/hle/kernel/k_synchronization_object.cpp
@@ -22,7 +22,7 @@ public:
: KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {}
void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
- ResultCode wait_result) override {
+ Result wait_result) override {
// Determine the sync index, and unlink all nodes.
s32 sync_index = -1;
for (auto i = 0; i < m_count; ++i) {
@@ -45,8 +45,7 @@ public:
KThreadQueue::EndWait(waiting_thread, wait_result);
}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove all nodes from our list.
for (auto i = 0; i < m_count; ++i) {
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
@@ -72,9 +71,9 @@ void KSynchronizationObject::Finalize() {
KAutoObject::Finalize();
}
-ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
- KSynchronizationObject** objects, const s32 num_objects,
- s64 timeout) {
+Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
+ KSynchronizationObject** objects, const s32 num_objects,
+ s64 timeout) {
// Allocate space on stack for thread nodes.
std::vector<ThreadListNode> thread_nodes(num_objects);
@@ -148,7 +147,7 @@ KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
KSynchronizationObject::~KSynchronizationObject() = default;
-void KSynchronizationObject::NotifyAvailable(ResultCode result) {
+void KSynchronizationObject::NotifyAvailable(Result result) {
KScopedSchedulerLock sl(kernel);
// If we're not signaled, we've nothing to notify.
diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h
index d7540d6c7..8d8122ab7 100644
--- a/src/core/hle/kernel/k_synchronization_object.h
+++ b/src/core/hle/kernel/k_synchronization_object.h
@@ -24,9 +24,9 @@ public:
KThread* thread{};
};
- [[nodiscard]] static ResultCode Wait(KernelCore& kernel, s32* out_index,
- KSynchronizationObject** objects, const s32 num_objects,
- s64 timeout);
+ [[nodiscard]] static Result Wait(KernelCore& kernel, s32* out_index,
+ KSynchronizationObject** objects, const s32 num_objects,
+ s64 timeout);
void Finalize() override;
@@ -72,7 +72,7 @@ protected:
virtual void OnFinalizeSynchronizationObject() {}
- void NotifyAvailable(ResultCode result);
+ void NotifyAvailable(Result result);
void NotifyAvailable() {
return this->NotifyAvailable(ResultSuccess);
}
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 8d48a7901..174afc80d 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -80,8 +80,7 @@ public:
explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl)
: KThreadQueue(kernel_), m_wait_list(wl) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread from the wait list.
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
@@ -99,8 +98,8 @@ KThread::KThread(KernelCore& kernel_)
: KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {}
KThread::~KThread() = default;
-ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
- s32 virt_core, KProcess* owner, ThreadType type) {
+Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
+ s32 virt_core, KProcess* owner, ThreadType type) {
// Assert parameters are valid.
ASSERT((type == ThreadType::Main) || (type == ThreadType::Dummy) ||
(Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
@@ -225,7 +224,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
// Setup the stack parameters.
StackParameters& sp = GetStackParameters();
sp.cur_thread = this;
- sp.disable_count = 0;
+ sp.disable_count = 1;
SetInExceptionHandler();
// Set thread ID.
@@ -245,46 +244,51 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
return ResultSuccess;
}
-ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
- VAddr user_stack_top, s32 prio, s32 core, KProcess* owner,
- ThreadType type, std::function<void(void*)>&& init_func,
- void* init_func_parameter) {
+Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
+ VAddr user_stack_top, s32 prio, s32 core, KProcess* owner,
+ ThreadType type, std::function<void()>&& init_func) {
// Initialize the thread.
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
// Initialize emulation parameters.
- thread->host_context =
- std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter);
+ thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func));
thread->is_single_core = !Settings::values.use_multi_core.GetValue();
return ResultSuccess;
}
-ResultCode KThread::InitializeDummyThread(KThread* thread) {
- return thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy);
+Result KThread::InitializeDummyThread(KThread* thread) {
+ // Initialize the thread.
+ R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy));
+
+ // Initialize emulation parameters.
+ thread->stack_parameters.disable_count = 0;
+
+ return ResultSuccess;
}
-ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
+Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) {
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
- Core::CpuManager::GetIdleThreadStartFunc(),
- system.GetCpuManager().GetStartFuncParameter());
+ system.GetCpuManager().GetGuestActivateFunc());
}
-ResultCode KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
- KThreadFunction func, uintptr_t arg,
- s32 virt_core) {
+Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
+ return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
+ system.GetCpuManager().GetIdleThreadStartFunc());
+}
+
+Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
+ KThreadFunction func, uintptr_t arg, s32 virt_core) {
return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority,
- Core::CpuManager::GetShutdownThreadStartFunc(),
- system.GetCpuManager().GetStartFuncParameter());
+ system.GetCpuManager().GetShutdownThreadStartFunc());
}
-ResultCode KThread::InitializeUserThread(Core::System& system, KThread* thread,
- KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
- s32 prio, s32 virt_core, KProcess* owner) {
+Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
+ uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
+ KProcess* owner) {
system.Kernel().GlobalSchedulerContext().AddThread(thread);
return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
- ThreadType::User, Core::CpuManager::GetGuestThreadStartFunc(),
- system.GetCpuManager().GetStartFuncParameter());
+ ThreadType::User, system.GetCpuManager().GetGuestThreadFunc());
}
void KThread::PostDestroy(uintptr_t arg) {
@@ -315,14 +319,20 @@ void KThread::Finalize() {
auto it = waiter_list.begin();
while (it != waiter_list.end()) {
- // Clear the lock owner
- it->SetLockOwner(nullptr);
+ // Get the thread.
+ KThread* const waiter = std::addressof(*it);
+
+ // The thread shouldn't be a kernel waiter.
+ ASSERT(!IsKernelAddressKey(waiter->GetAddressKey()));
+
+ // Clear the lock owner.
+ waiter->SetLockOwner(nullptr);
// Erase the waiter from our list.
it = waiter_list.erase(it);
// Cancel the thread's wait.
- it->CancelWait(ResultInvalidState, true);
+ waiter->CancelWait(ResultInvalidState, true);
}
}
@@ -382,7 +392,7 @@ void KThread::FinishTermination() {
for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
KThread* core_thread{};
do {
- core_thread = kernel.Scheduler(i).GetCurrentThread();
+ core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread();
} while (core_thread == this);
}
}
@@ -487,9 +497,7 @@ void KThread::Unpin() {
// Resume any threads that began waiting on us while we were pinned.
for (auto it = pinned_waiter_list.begin(); it != pinned_waiter_list.end(); ++it) {
- if (it->GetState() == ThreadState::Waiting) {
- it->SetState(ThreadState::Runnable);
- }
+ it->EndWait(ResultSuccess);
}
}
@@ -523,7 +531,7 @@ void KThread::ClearInterruptFlag() {
memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0);
}
-ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
+Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
KScopedSchedulerLock sl{kernel};
// Get the virtual mask.
@@ -533,7 +541,7 @@ ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
return ResultSuccess;
}
-ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
+Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
KScopedSchedulerLock sl{kernel};
ASSERT(num_core_migration_disables >= 0);
@@ -549,7 +557,7 @@ ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_m
return ResultSuccess;
}
-ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
+Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
ASSERT(parent != nullptr);
ASSERT(v_affinity_mask != 0);
KScopedLightLock lk(activity_pause_lock);
@@ -631,7 +639,7 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
s32 thread_core;
for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
++thread_core) {
- if (kernel.Scheduler(thread_core).GetCurrentThread() == this) {
+ if (kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) {
thread_is_current = true;
break;
}
@@ -748,7 +756,20 @@ void KThread::Continue() {
KScheduler::OnThreadStateChanged(kernel, this, old_state);
}
-ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
+void KThread::WaitUntilSuspended() {
+ // Make sure we have a suspend requested.
+ ASSERT(IsSuspendRequested());
+
+ // Loop until the thread is not executing on any core.
+ for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
+ KThread* core_thread{};
+ do {
+ core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread();
+ } while (core_thread == this);
+ }
+}
+
+Result KThread::SetActivity(Svc::ThreadActivity activity) {
// Lock ourselves.
KScopedLightLock lk(activity_pause_lock);
@@ -809,7 +830,7 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
// Check if the thread is currently running.
// If it is, we'll need to retry.
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
- if (kernel.Scheduler(i).GetCurrentThread() == this) {
+ if (kernel.Scheduler(i).GetSchedulerCurrentThread() == this) {
thread_is_current = true;
break;
}
@@ -821,7 +842,7 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
return ResultSuccess;
}
-ResultCode KThread::GetThreadContext3(std::vector<u8>& out) {
+Result KThread::GetThreadContext3(std::vector<u8>& out) {
// Lock ourselves.
KScopedLightLock lk{activity_pause_lock};
@@ -871,6 +892,7 @@ void KThread::AddWaiterImpl(KThread* thread) {
// Keep track of how many kernel waiters we have.
if (IsKernelAddressKey(thread->GetAddressKey())) {
ASSERT((num_kernel_waiters++) >= 0);
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
}
// Insert the waiter.
@@ -884,6 +906,7 @@ void KThread::RemoveWaiterImpl(KThread* thread) {
// Keep track of how many kernel waiters we have.
if (IsKernelAddressKey(thread->GetAddressKey())) {
ASSERT((num_kernel_waiters--) > 0);
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
}
// Remove the waiter.
@@ -959,6 +982,7 @@ KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
// Keep track of how many kernel waiters we have.
if (IsKernelAddressKey(thread->GetAddressKey())) {
ASSERT((num_kernel_waiters--) > 0);
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
}
it = waiter_list.erase(it);
@@ -986,7 +1010,7 @@ KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
return next_lock_owner;
}
-ResultCode KThread::Run() {
+Result KThread::Run() {
while (true) {
KScopedSchedulerLock lk{kernel};
@@ -1014,8 +1038,6 @@ ResultCode KThread::Run() {
// Set our state and finish.
SetState(ThreadState::Runnable);
- DisableDispatch();
-
return ResultSuccess;
}
}
@@ -1047,9 +1069,11 @@ void KThread::Exit() {
// Register the thread as a work task.
KWorkerTaskManager::AddTask(kernel, KWorkerTaskManager::WorkerType::Exit, this);
}
+
+ UNREACHABLE_MSG("KThread::Exit() would return");
}
-ResultCode KThread::Sleep(s64 timeout) {
+Result KThread::Sleep(s64 timeout) {
ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
ASSERT(this == GetCurrentThreadPointer(kernel));
ASSERT(timeout > 0);
@@ -1082,6 +1106,8 @@ void KThread::IfDummyThreadTryWait() {
return;
}
+ ASSERT(!kernel.IsPhantomModeForSingleCore());
+
// Block until we are no longer waiting.
std::unique_lock lk(dummy_wait_lock);
dummy_wait_cv.wait(
@@ -1105,7 +1131,7 @@ void KThread::BeginWait(KThreadQueue* queue) {
wait_queue = queue;
}
-void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
+void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_) {
// Lock the scheduler.
KScopedSchedulerLock sl(kernel);
@@ -1115,7 +1141,7 @@ void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCod
}
}
-void KThread::EndWait(ResultCode wait_result_) {
+void KThread::EndWait(Result wait_result_) {
// Lock the scheduler.
KScopedSchedulerLock sl(kernel);
@@ -1134,7 +1160,7 @@ void KThread::EndWait(ResultCode wait_result_) {
}
}
-void KThread::CancelWait(ResultCode wait_result_, bool cancel_timer_task) {
+void KThread::CancelWait(Result wait_result_, bool cancel_timer_task) {
// Lock the scheduler.
KScopedSchedulerLock sl(kernel);
@@ -1164,6 +1190,10 @@ std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
return host_context;
}
+void SetCurrentThread(KernelCore& kernel, KThread* thread) {
+ kernel.SetCurrentEmuThread(thread);
+}
+
KThread* GetCurrentThreadPointer(KernelCore& kernel) {
return kernel.GetCurrentEmuThread();
}
@@ -1182,16 +1212,13 @@ KScopedDisableDispatch::~KScopedDisableDispatch() {
return;
}
- // Skip the reschedule if single-core, as dispatch tracking is disabled here.
- if (!Settings::values.use_multi_core.GetValue()) {
- return;
- }
-
if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
- auto scheduler = kernel.CurrentScheduler();
+ auto* scheduler = kernel.CurrentScheduler();
- if (scheduler) {
+ if (scheduler && !kernel.IsPhantomModeForSingleCore()) {
scheduler->RescheduleCurrentCore();
+ } else {
+ KScheduler::RescheduleCurrentHLEThread(kernel);
}
} else {
GetCurrentThread(kernel).EnableDispatch();
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index f4d83f99a..9ee20208e 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -106,6 +106,7 @@ enum class StepState : u32 {
StepPerformed, ///< Thread has stepped, waiting to be scheduled again
};
+void SetCurrentThread(KernelCore& kernel, KThread* thread);
[[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel);
[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel);
[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel);
@@ -175,7 +176,7 @@ public:
void SetBasePriority(s32 value);
- [[nodiscard]] ResultCode Run();
+ [[nodiscard]] Result Run();
void Exit();
@@ -207,6 +208,8 @@ public:
void Continue();
+ void WaitUntilSuspended();
+
constexpr void SetSyncedIndex(s32 index) {
synced_index = index;
}
@@ -215,11 +218,11 @@ public:
return synced_index;
}
- constexpr void SetWaitResult(ResultCode wait_res) {
+ constexpr void SetWaitResult(Result wait_res) {
wait_result = wait_res;
}
- [[nodiscard]] constexpr ResultCode GetWaitResult() const {
+ [[nodiscard]] constexpr Result GetWaitResult() const {
return wait_result;
}
@@ -342,15 +345,15 @@ public:
return physical_affinity_mask;
}
- [[nodiscard]] ResultCode GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
+ [[nodiscard]] Result GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
- [[nodiscard]] ResultCode GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
+ [[nodiscard]] Result GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
- [[nodiscard]] ResultCode SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask);
+ [[nodiscard]] Result SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask);
- [[nodiscard]] ResultCode SetActivity(Svc::ThreadActivity activity);
+ [[nodiscard]] Result SetActivity(Svc::ThreadActivity activity);
- [[nodiscard]] ResultCode Sleep(s64 timeout);
+ [[nodiscard]] Result Sleep(s64 timeout);
[[nodiscard]] s64 GetYieldScheduleCount() const {
return schedule_count;
@@ -408,20 +411,22 @@ public:
static void PostDestroy(uintptr_t arg);
- [[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread);
+ [[nodiscard]] static Result InitializeDummyThread(KThread* thread);
+
+ [[nodiscard]] static Result InitializeMainThread(Core::System& system, KThread* thread,
+ s32 virt_core);
- [[nodiscard]] static ResultCode InitializeIdleThread(Core::System& system, KThread* thread,
- s32 virt_core);
+ [[nodiscard]] static Result InitializeIdleThread(Core::System& system, KThread* thread,
+ s32 virt_core);
- [[nodiscard]] static ResultCode InitializeHighPriorityThread(Core::System& system,
- KThread* thread,
- KThreadFunction func,
- uintptr_t arg, s32 virt_core);
+ [[nodiscard]] static Result InitializeHighPriorityThread(Core::System& system, KThread* thread,
+ KThreadFunction func, uintptr_t arg,
+ s32 virt_core);
- [[nodiscard]] static ResultCode InitializeUserThread(Core::System& system, KThread* thread,
- KThreadFunction func, uintptr_t arg,
- VAddr user_stack_top, s32 prio,
- s32 virt_core, KProcess* owner);
+ [[nodiscard]] static Result InitializeUserThread(Core::System& system, KThread* thread,
+ KThreadFunction func, uintptr_t arg,
+ VAddr user_stack_top, s32 prio, s32 virt_core,
+ KProcess* owner);
public:
struct StackParameters {
@@ -478,39 +483,16 @@ public:
return per_core_priority_queue_entry[core];
}
- [[nodiscard]] bool IsKernelThread() const {
- return GetActiveCore() == 3;
- }
-
- [[nodiscard]] bool IsDispatchTrackingDisabled() const {
- return is_single_core || IsKernelThread();
- }
-
[[nodiscard]] s32 GetDisableDispatchCount() const {
- if (IsDispatchTrackingDisabled()) {
- // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
- return 1;
- }
-
return this->GetStackParameters().disable_count;
}
void DisableDispatch() {
- if (IsDispatchTrackingDisabled()) {
- // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
- return;
- }
-
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
this->GetStackParameters().disable_count++;
}
void EnableDispatch() {
- if (IsDispatchTrackingDisabled()) {
- // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
- return;
- }
-
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
this->GetStackParameters().disable_count--;
}
@@ -607,7 +589,7 @@ public:
void RemoveWaiter(KThread* thread);
- [[nodiscard]] ResultCode GetThreadContext3(std::vector<u8>& out);
+ [[nodiscard]] Result GetThreadContext3(std::vector<u8>& out);
[[nodiscard]] KThread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
@@ -633,9 +615,9 @@ public:
}
void BeginWait(KThreadQueue* queue);
- void NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_);
- void EndWait(ResultCode wait_result_);
- void CancelWait(ResultCode wait_result_, bool cancel_timer_task);
+ void NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_);
+ void EndWait(Result wait_result_);
+ void CancelWait(Result wait_result_, bool cancel_timer_task);
[[nodiscard]] bool HasWaiters() const {
return !waiter_list.empty();
@@ -721,14 +703,13 @@ private:
void FinishTermination();
- [[nodiscard]] ResultCode Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
- s32 prio, s32 virt_core, KProcess* owner, ThreadType type);
+ [[nodiscard]] Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
+ s32 prio, s32 virt_core, KProcess* owner, ThreadType type);
- [[nodiscard]] static ResultCode InitializeThread(KThread* thread, KThreadFunction func,
- uintptr_t arg, VAddr user_stack_top, s32 prio,
- s32 core, KProcess* owner, ThreadType type,
- std::function<void(void*)>&& init_func,
- void* init_func_parameter);
+ [[nodiscard]] static Result InitializeThread(KThread* thread, KThreadFunction func,
+ uintptr_t arg, VAddr user_stack_top, s32 prio,
+ s32 core, KProcess* owner, ThreadType type,
+ std::function<void()>&& init_func);
static void RestorePriority(KernelCore& kernel_ctx, KThread* thread);
@@ -765,7 +746,7 @@ private:
u32 suspend_request_flags{};
u32 suspend_allowed_flags{};
s32 synced_index{};
- ResultCode wait_result{ResultSuccess};
+ Result wait_result{ResultSuccess};
s32 base_priority{};
s32 physical_ideal_core_id{};
s32 virtual_ideal_core_id{};
diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp
index fbdc40b3a..563560114 100644
--- a/src/core/hle/kernel/k_thread_local_page.cpp
+++ b/src/core/hle/kernel/k_thread_local_page.cpp
@@ -13,7 +13,7 @@
namespace Kernel {
-ResultCode KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
+Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
// Set that this process owns us.
m_owner = process;
m_kernel = &kernel;
@@ -35,7 +35,7 @@ ResultCode KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
return ResultSuccess;
}
-ResultCode KThreadLocalPage::Finalize() {
+Result KThreadLocalPage::Finalize() {
// Get the physical address of the page.
const PAddr phys_addr = m_owner->PageTable().GetPhysicalAddr(m_virt_addr);
ASSERT(phys_addr);
diff --git a/src/core/hle/kernel/k_thread_local_page.h b/src/core/hle/kernel/k_thread_local_page.h
index a4fe43ee5..0a7f22680 100644
--- a/src/core/hle/kernel/k_thread_local_page.h
+++ b/src/core/hle/kernel/k_thread_local_page.h
@@ -34,8 +34,8 @@ public:
return m_virt_addr;
}
- ResultCode Initialize(KernelCore& kernel, KProcess* process);
- ResultCode Finalize();
+ Result Initialize(KernelCore& kernel, KProcess* process);
+ Result Finalize();
VAddr Reserve();
void Release(VAddr addr);
diff --git a/src/core/hle/kernel/k_thread_queue.cpp b/src/core/hle/kernel/k_thread_queue.cpp
index 1c338904a..9f4e081ba 100644
--- a/src/core/hle/kernel/k_thread_queue.cpp
+++ b/src/core/hle/kernel/k_thread_queue.cpp
@@ -9,9 +9,9 @@ namespace Kernel {
void KThreadQueue::NotifyAvailable([[maybe_unused]] KThread* waiting_thread,
[[maybe_unused]] KSynchronizationObject* signaled_object,
- [[maybe_unused]] ResultCode wait_result) {}
+ [[maybe_unused]] Result wait_result) {}
-void KThreadQueue::EndWait(KThread* waiting_thread, ResultCode wait_result) {
+void KThreadQueue::EndWait(KThread* waiting_thread, Result wait_result) {
// Set the thread's wait result.
waiting_thread->SetWaitResult(wait_result);
@@ -25,8 +25,7 @@ void KThreadQueue::EndWait(KThread* waiting_thread, ResultCode wait_result) {
kernel.TimeManager().UnscheduleTimeEvent(waiting_thread);
}
-void KThreadQueue::CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) {
+void KThreadQueue::CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) {
// Set the thread's wait result.
waiting_thread->SetWaitResult(wait_result);
@@ -43,6 +42,6 @@ void KThreadQueue::CancelWait(KThread* waiting_thread, ResultCode wait_result,
}
void KThreadQueueWithoutEndWait::EndWait([[maybe_unused]] KThread* waiting_thread,
- [[maybe_unused]] ResultCode wait_result) {}
+ [[maybe_unused]] Result wait_result) {}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread_queue.h b/src/core/hle/kernel/k_thread_queue.h
index 4a7dbdd47..8d76ece81 100644
--- a/src/core/hle/kernel/k_thread_queue.h
+++ b/src/core/hle/kernel/k_thread_queue.h
@@ -14,10 +14,9 @@ public:
virtual ~KThreadQueue() = default;
virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
- ResultCode wait_result);
- virtual void EndWait(KThread* waiting_thread, ResultCode wait_result);
- virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task);
+ Result wait_result);
+ virtual void EndWait(KThread* waiting_thread, Result wait_result);
+ virtual void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task);
private:
KernelCore& kernel;
@@ -28,7 +27,7 @@ class KThreadQueueWithoutEndWait : public KThreadQueue {
public:
explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {}
- void EndWait(KThread* waiting_thread, ResultCode wait_result) override final;
+ void EndWait(KThread* waiting_thread, Result wait_result) override final;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp
index 1ed4b0f6f..b0320eb73 100644
--- a/src/core/hle/kernel/k_transfer_memory.cpp
+++ b/src/core/hle/kernel/k_transfer_memory.cpp
@@ -13,8 +13,8 @@ KTransferMemory::KTransferMemory(KernelCore& kernel_)
KTransferMemory::~KTransferMemory() = default;
-ResultCode KTransferMemory::Initialize(VAddr address_, std::size_t size_,
- Svc::MemoryPermission owner_perm_) {
+Result KTransferMemory::Initialize(VAddr address_, std::size_t size_,
+ Svc::MemoryPermission owner_perm_) {
// Set members.
owner = kernel.CurrentProcess();
diff --git a/src/core/hle/kernel/k_transfer_memory.h b/src/core/hle/kernel/k_transfer_memory.h
index 9ad80ba30..85d508ee7 100644
--- a/src/core/hle/kernel/k_transfer_memory.h
+++ b/src/core/hle/kernel/k_transfer_memory.h
@@ -7,7 +7,7 @@
#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
-union ResultCode;
+union Result;
namespace Core::Memory {
class Memory;
@@ -26,7 +26,7 @@ public:
explicit KTransferMemory(KernelCore& kernel_);
~KTransferMemory() override;
- ResultCode Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_);
+ Result Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_);
void Finalize() override;
diff --git a/src/core/hle/kernel/k_writable_event.cpp b/src/core/hle/kernel/k_writable_event.cpp
index 26c8489ad..ff88c5acd 100644
--- a/src/core/hle/kernel/k_writable_event.cpp
+++ b/src/core/hle/kernel/k_writable_event.cpp
@@ -18,11 +18,11 @@ void KWritableEvent::Initialize(KEvent* parent_event_, std::string&& name_) {
parent->GetReadableEvent().Open();
}
-ResultCode KWritableEvent::Signal() {
+Result KWritableEvent::Signal() {
return parent->GetReadableEvent().Signal();
}
-ResultCode KWritableEvent::Clear() {
+Result KWritableEvent::Clear() {
return parent->GetReadableEvent().Clear();
}
diff --git a/src/core/hle/kernel/k_writable_event.h b/src/core/hle/kernel/k_writable_event.h
index e289e80c4..3fd0c7d0a 100644
--- a/src/core/hle/kernel/k_writable_event.h
+++ b/src/core/hle/kernel/k_writable_event.h
@@ -25,8 +25,8 @@ public:
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
void Initialize(KEvent* parent_, std::string&& name_);
- ResultCode Signal();
- ResultCode Clear();
+ Result Signal();
+ Result Clear();
KEvent* GetParent() const {
return parent;
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 73593c7a0..ce7fa8275 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -17,7 +17,6 @@
#include "common/thread.h"
#include "common/thread_worker.h"
#include "core/arm/arm_interface.h"
-#include "core/arm/cpu_interrupt_handler.h"
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
#include "core/core_timing.h"
@@ -64,8 +63,6 @@ struct KernelCore::Impl {
is_phantom_mode_for_singlecore = false;
- InitializePhysicalCores();
-
// Derive the initial memory layout from the emulated board
Init::InitializeSlabResourceCounts(kernel);
DeriveInitialMemoryLayout();
@@ -75,16 +72,16 @@ struct KernelCore::Impl {
InitializeSystemResourceLimit(kernel, system.CoreTiming());
InitializeMemoryLayout();
Init::InitializeKPageBufferSlabHeap(system);
- InitializeSchedulers();
InitializeShutdownThreads();
InitializePreemption(kernel);
+ InitializePhysicalCores();
RegisterHostThread();
}
void InitializeCores() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
- cores[core_id].Initialize((*current_process).Is64BitProcess());
+ cores[core_id]->Initialize((*current_process).Is64BitProcess());
system.Memory().SetCurrentPageTable(*current_process, core_id);
}
}
@@ -95,26 +92,16 @@ struct KernelCore::Impl {
process_list.clear();
- // Close all open server sessions and ports.
- std::unordered_set<KAutoObject*> server_objects_;
- {
- std::scoped_lock lk(server_objects_lock);
- server_objects_ = server_objects;
- server_objects.clear();
- }
- for (auto* server_object : server_objects_) {
- server_object->Close();
- }
-
- // Ensures all service threads gracefully shutdown.
- ClearServiceThreads();
+ CloseServices();
next_object_id = 0;
next_kernel_process_id = KProcess::InitialKIPIDMin;
next_user_process_id = KProcess::ProcessIDMin;
next_thread_id = 1;
- cores.clear();
+ for (auto& core : cores) {
+ core = nullptr;
+ }
global_handle_table->Finalize();
global_handle_table.reset();
@@ -148,7 +135,6 @@ struct KernelCore::Impl {
shutdown_threads[core_id] = nullptr;
}
- schedulers[core_id]->Finalize();
schedulers[core_id].reset();
}
@@ -191,18 +177,41 @@ struct KernelCore::Impl {
global_object_list_container.reset();
}
+ void CloseServices() {
+ // Close all open server sessions and ports.
+ std::unordered_set<KAutoObject*> server_objects_;
+ {
+ std::scoped_lock lk(server_objects_lock);
+ server_objects_ = server_objects;
+ server_objects.clear();
+ }
+ for (auto* server_object : server_objects_) {
+ server_object->Close();
+ }
+
+ // Ensures all service threads gracefully shutdown.
+ ClearServiceThreads();
+ }
+
void InitializePhysicalCores() {
exclusive_monitor =
Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
- schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i);
- cores.emplace_back(i, system, *schedulers[i], interrupts);
- }
- }
+ const s32 core{static_cast<s32>(i)};
- void InitializeSchedulers() {
- for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
- cores[i].Scheduler().Initialize();
+ schedulers[i] = std::make_unique<Kernel::KScheduler>(system.Kernel());
+ cores[i] = std::make_unique<Kernel::PhysicalCore>(i, system, *schedulers[i]);
+
+ auto* main_thread{Kernel::KThread::Create(system.Kernel())};
+ main_thread->SetName(fmt::format("MainThread:{}", core));
+ main_thread->SetCurrentCore(core);
+ ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess());
+
+ auto* idle_thread{Kernel::KThread::Create(system.Kernel())};
+ idle_thread->SetCurrentCore(core);
+ ASSERT(Kernel::KThread::InitializeIdleThread(system, idle_thread, core).IsSuccess());
+
+ schedulers[i]->Initialize(main_thread, idle_thread, core);
}
}
@@ -234,17 +243,18 @@ struct KernelCore::Impl {
void InitializePreemption(KernelCore& kernel) {
preemption_event = Core::Timing::CreateEvent(
- "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) {
+ "PreemptionCallback",
+ [this, &kernel](std::uintptr_t, s64 time,
+ std::chrono::nanoseconds) -> std::optional<std::chrono::nanoseconds> {
{
KScopedSchedulerLock lock(kernel);
global_scheduler_context->PreemptThreads();
}
- const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
- system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
+ return std::nullopt;
});
const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
- system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
+ system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
}
void InitializeShutdownThreads() {
@@ -254,7 +264,6 @@ struct KernelCore::Impl {
core_id)
.IsSuccess());
shutdown_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
- shutdown_threads[core_id]->DisableDispatch();
}
}
@@ -332,6 +341,8 @@ struct KernelCore::Impl {
return is_shutting_down.load(std::memory_order_relaxed);
}
+ static inline thread_local KThread* current_thread{nullptr};
+
KThread* GetCurrentEmuThread() {
// If we are shutting down the kernel, none of this is relevant anymore.
if (IsShuttingDown()) {
@@ -342,7 +353,12 @@ struct KernelCore::Impl {
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
return GetHostDummyThread();
}
- return schedulers[thread_id]->GetCurrentThread();
+
+ return current_thread;
+ }
+
+ void SetCurrentEmuThread(KThread* thread) {
+ current_thread = thread;
}
void DeriveInitialMemoryLayout() {
@@ -746,7 +762,7 @@ struct KernelCore::Impl {
std::unordered_set<KAutoObject*> registered_in_use_objects;
std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
- std::vector<Kernel::PhysicalCore> cores;
+ std::array<std::unique_ptr<Kernel::PhysicalCore>, Core::Hardware::NUM_CPU_CORES> cores;
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
std::atomic<u32> next_host_thread_id{Core::Hardware::NUM_CPU_CORES};
@@ -770,7 +786,6 @@ struct KernelCore::Impl {
Common::ThreadWorker service_threads_manager;
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> shutdown_threads;
- std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
bool is_multicore{};
@@ -806,6 +821,10 @@ void KernelCore::Shutdown() {
impl->Shutdown();
}
+void KernelCore::CloseServices() {
+ impl->CloseServices();
+}
+
const KResourceLimit* KernelCore::GetSystemResourceLimit() const {
return impl->system_resource_limit;
}
@@ -855,11 +874,11 @@ const Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) const {
}
Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) {
- return impl->cores[id];
+ return *impl->cores[id];
}
const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
- return impl->cores[id];
+ return *impl->cores[id];
}
size_t KernelCore::CurrentPhysicalCoreIndex() const {
@@ -871,11 +890,11 @@ size_t KernelCore::CurrentPhysicalCoreIndex() const {
}
Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
- return impl->cores[CurrentPhysicalCoreIndex()];
+ return *impl->cores[CurrentPhysicalCoreIndex()];
}
const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
- return impl->cores[CurrentPhysicalCoreIndex()];
+ return *impl->cores[CurrentPhysicalCoreIndex()];
}
Kernel::KScheduler* KernelCore::CurrentScheduler() {
@@ -887,15 +906,6 @@ Kernel::KScheduler* KernelCore::CurrentScheduler() {
return impl->schedulers[core_id].get();
}
-std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() {
- return impl->interrupts;
-}
-
-const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts()
- const {
- return impl->interrupts;
-}
-
Kernel::TimeManager& KernelCore::TimeManager() {
return impl->time_manager;
}
@@ -920,24 +930,18 @@ const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const {
return *impl->global_object_list_container;
}
-void KernelCore::InterruptAllPhysicalCores() {
- for (auto& physical_core : impl->cores) {
- physical_core.Interrupt();
- }
-}
-
void KernelCore::InvalidateAllInstructionCaches() {
for (auto& physical_core : impl->cores) {
- physical_core.ArmInterface().ClearInstructionCache();
+ physical_core->ArmInterface().ClearInstructionCache();
}
}
void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
for (auto& physical_core : impl->cores) {
- if (!physical_core.IsInitialized()) {
+ if (!physical_core->IsInitialized()) {
continue;
}
- physical_core.ArmInterface().InvalidateCacheRange(addr, size);
+ physical_core->ArmInterface().InvalidateCacheRange(addr, size);
}
}
@@ -1025,6 +1029,10 @@ KThread* KernelCore::GetCurrentEmuThread() const {
return impl->GetCurrentEmuThread();
}
+void KernelCore::SetCurrentEmuThread(KThread* thread) {
+ impl->SetCurrentEmuThread(thread);
+}
+
KMemoryManager& KernelCore::MemoryManager() {
return *impl->memory_manager;
}
@@ -1079,14 +1087,22 @@ void KernelCore::Suspend(bool suspended) {
for (auto* process : GetProcessList()) {
process->SetActivity(activity);
+
+ if (should_suspend) {
+ // Wait for execution to stop
+ for (auto* thread : process->GetThreadList()) {
+ thread->WaitUntilSuspended();
+ }
+ }
}
}
void KernelCore::ShutdownCores() {
+ KScopedSchedulerLock lk{*this};
+
for (auto* thread : impl->shutdown_threads) {
void(thread->Run());
}
- InterruptAllPhysicalCores();
}
bool KernelCore::IsMulticore() const {
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 4e7beab0e..bcf016a97 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -9,14 +9,12 @@
#include <string>
#include <unordered_map>
#include <vector>
-#include "core/arm/cpu_interrupt_handler.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_slab_heap.h"
#include "core/hle/kernel/svc_common.h"
namespace Core {
-class CPUInterruptHandler;
class ExclusiveMonitor;
class System;
} // namespace Core
@@ -109,6 +107,9 @@ public:
/// Clears all resources in use by the kernel instance.
void Shutdown();
+ /// Close all active services in use by the kernel instance.
+ void CloseServices();
+
/// Retrieves a shared pointer to the system resource limit instance.
const KResourceLimit* GetSystemResourceLimit() const;
@@ -180,12 +181,6 @@ public:
const KAutoObjectWithListContainer& ObjectListContainer() const;
- std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts();
-
- const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const;
-
- void InterruptAllPhysicalCores();
-
void InvalidateAllInstructionCaches();
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
@@ -226,6 +221,9 @@ public:
/// Gets the current host_thread/guest_thread pointer.
KThread* GetCurrentEmuThread() const;
+ /// Sets the current guest_thread pointer.
+ void SetCurrentEmuThread(KThread* thread);
+
/// Gets the current host_thread handle.
u32 GetCurrentHostThreadID() const;
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index a5b16ae2e..d4375962f 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -1,7 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
-#include "core/arm/cpu_interrupt_handler.h"
#include "core/arm/dynarmic/arm_dynarmic_32.h"
#include "core/arm/dynarmic/arm_dynarmic_64.h"
#include "core/core.h"
@@ -11,16 +10,14 @@
namespace Kernel {
-PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_,
- Core::CPUInterrupts& interrupts_)
- : core_index{core_index_}, system{system_}, scheduler{scheduler_},
- interrupts{interrupts_}, guard{std::make_unique<std::mutex>()} {
+PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_)
+ : core_index{core_index_}, system{system_}, scheduler{scheduler_} {
#ifdef ARCHITECTURE_x86_64
// TODO(bunnei): Initialization relies on a core being available. We may later replace this with
// a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager.
auto& kernel = system.Kernel();
arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
- system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
+ system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
#else
#error Platform not supported yet.
#endif
@@ -34,7 +31,7 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
if (!is_64_bit) {
// We already initialized a 64-bit core, replace with a 32-bit one.
arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
- system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
+ system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
}
#else
#error Platform not supported yet.
@@ -43,27 +40,30 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
void PhysicalCore::Run() {
arm_interface->Run();
+ arm_interface->ClearExclusiveState();
}
void PhysicalCore::Idle() {
- interrupts[core_index].AwaitInterrupt();
+ std::unique_lock lk{guard};
+ on_interrupt.wait(lk, [this] { return is_interrupted; });
}
bool PhysicalCore::IsInterrupted() const {
- return interrupts[core_index].IsInterrupted();
+ return is_interrupted;
}
void PhysicalCore::Interrupt() {
- guard->lock();
- interrupts[core_index].SetInterrupt(true);
+ std::unique_lock lk{guard};
+ is_interrupted = true;
arm_interface->SignalInterrupt();
- guard->unlock();
+ on_interrupt.notify_all();
}
void PhysicalCore::ClearInterrupt() {
- guard->lock();
- interrupts[core_index].SetInterrupt(false);
- guard->unlock();
+ std::unique_lock lk{guard};
+ is_interrupted = false;
+ arm_interface->ClearInterrupt();
+ on_interrupt.notify_all();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index 898d1e5db..2fc8d4be2 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -14,7 +14,6 @@ class KScheduler;
} // namespace Kernel
namespace Core {
-class CPUInterruptHandler;
class ExclusiveMonitor;
class System;
} // namespace Core
@@ -23,15 +22,11 @@ namespace Kernel {
class PhysicalCore {
public:
- PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_,
- Core::CPUInterrupts& interrupts_);
+ PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_);
~PhysicalCore();
- PhysicalCore(const PhysicalCore&) = delete;
- PhysicalCore& operator=(const PhysicalCore&) = delete;
-
- PhysicalCore(PhysicalCore&&) = default;
- PhysicalCore& operator=(PhysicalCore&&) = delete;
+ YUZU_NON_COPYABLE(PhysicalCore);
+ YUZU_NON_MOVEABLE(PhysicalCore);
/// Initialize the core for the specified parameters.
void Initialize(bool is_64_bit);
@@ -86,9 +81,11 @@ private:
const std::size_t core_index;
Core::System& system;
Kernel::KScheduler& scheduler;
- Core::CPUInterrupts& interrupts;
- std::unique_ptr<std::mutex> guard;
+
+ std::mutex guard;
+ std::condition_variable on_interrupt;
std::unique_ptr<Core::ARM_Interface> arm_interface;
+ bool is_interrupted;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
index 54872626e..773319ad8 100644
--- a/src/core/hle/kernel/process_capability.cpp
+++ b/src/core/hle/kernel/process_capability.cpp
@@ -68,9 +68,9 @@ u32 GetFlagBitOffset(CapabilityType type) {
} // Anonymous namespace
-ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
- std::size_t num_capabilities,
- KPageTable& page_table) {
+Result ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
+ std::size_t num_capabilities,
+ KPageTable& page_table) {
Clear();
// Allow all cores and priorities.
@@ -81,9 +81,9 @@ ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabiliti
return ParseCapabilities(capabilities, num_capabilities, page_table);
}
-ResultCode ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
- std::size_t num_capabilities,
- KPageTable& page_table) {
+Result ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
+ std::size_t num_capabilities,
+ KPageTable& page_table) {
Clear();
return ParseCapabilities(capabilities, num_capabilities, page_table);
@@ -107,9 +107,8 @@ void ProcessCapabilities::InitializeForMetadatalessProcess() {
can_force_debug = true;
}
-ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
- std::size_t num_capabilities,
- KPageTable& page_table) {
+Result ProcessCapabilities::ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
+ KPageTable& page_table) {
u32 set_flags = 0;
u32 set_svc_bits = 0;
@@ -155,8 +154,8 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
return ResultSuccess;
}
-ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits,
- u32 flag, KPageTable& page_table) {
+Result ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
+ KPageTable& page_table) {
const auto type = GetCapabilityType(flag);
if (type == CapabilityType::Unset) {
@@ -224,7 +223,7 @@ void ProcessCapabilities::Clear() {
can_force_debug = false;
}
-ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
+Result ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
if (priority_mask != 0 || core_mask != 0) {
LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
priority_mask, core_mask);
@@ -266,7 +265,7 @@ ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) {
+Result ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) {
const u32 index = flags >> 29;
const u32 svc_bit = 1U << index;
@@ -290,23 +289,23 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags)
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
- KPageTable& page_table) {
+Result ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
+ KPageTable& page_table) {
// TODO(Lioncache): Implement once the memory manager can handle this.
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) {
+Result ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) {
// TODO(Lioncache): Implement once the memory manager can handle this.
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleMapRegionFlags(u32 flags, KPageTable& page_table) {
+Result ProcessCapabilities::HandleMapRegionFlags(u32 flags, KPageTable& page_table) {
// TODO(Lioncache): Implement once the memory manager can handle this.
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
+Result ProcessCapabilities::HandleInterruptFlags(u32 flags) {
constexpr u32 interrupt_ignore_value = 0x3FF;
const u32 interrupt0 = (flags >> 12) & 0x3FF;
const u32 interrupt1 = (flags >> 22) & 0x3FF;
@@ -333,7 +332,7 @@ ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
+Result ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
const u32 reserved = flags >> 17;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
@@ -344,7 +343,7 @@ ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
+Result ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
// Yes, the internal member variable is checked in the actual kernel here.
// This might look odd for options that are only allowed to be initialized
// just once, however the kernel has a separate initialization function for
@@ -364,7 +363,7 @@ ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
+Result ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
const u32 reserved = flags >> 26;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
@@ -375,7 +374,7 @@ ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) {
+Result ProcessCapabilities::HandleDebugFlags(u32 flags) {
const u32 reserved = flags >> 19;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h
index 7f3a2339d..ff05dc5ff 100644
--- a/src/core/hle/kernel/process_capability.h
+++ b/src/core/hle/kernel/process_capability.h
@@ -7,7 +7,7 @@
#include "common/common_types.h"
-union ResultCode;
+union Result;
namespace Kernel {
@@ -86,8 +86,8 @@ public:
/// @returns ResultSuccess if this capabilities instance was able to be initialized,
/// otherwise, an error code upon failure.
///
- ResultCode InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
+ Result InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
+ KPageTable& page_table);
/// Initializes this process capabilities instance for a userland process.
///
@@ -99,8 +99,8 @@ public:
/// @returns ResultSuccess if this capabilities instance was able to be initialized,
/// otherwise, an error code upon failure.
///
- ResultCode InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
+ Result InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
+ KPageTable& page_table);
/// Initializes this process capabilities instance for a process that does not
/// have any metadata to parse.
@@ -185,8 +185,8 @@ private:
///
/// @return ResultSuccess if no errors occur, otherwise an error code.
///
- ResultCode ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
+ Result ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
+ KPageTable& page_table);
/// Attempts to parse a capability descriptor that is only represented by a
/// single flag set.
@@ -200,8 +200,8 @@ private:
///
/// @return ResultSuccess if no errors occurred, otherwise an error code.
///
- ResultCode ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
- KPageTable& page_table);
+ Result ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
+ KPageTable& page_table);
/// Clears the internal state of this process capability instance. Necessary,
/// to have a sane starting point due to us allowing running executables without
@@ -219,34 +219,34 @@ private:
void Clear();
/// Handles flags related to the priority and core number capability flags.
- ResultCode HandlePriorityCoreNumFlags(u32 flags);
+ Result HandlePriorityCoreNumFlags(u32 flags);
/// Handles flags related to determining the allowable SVC mask.
- ResultCode HandleSyscallFlags(u32& set_svc_bits, u32 flags);
+ Result HandleSyscallFlags(u32& set_svc_bits, u32 flags);
/// Handles flags related to mapping physical memory pages.
- ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table);
+ Result HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table);
/// Handles flags related to mapping IO pages.
- ResultCode HandleMapIOFlags(u32 flags, KPageTable& page_table);
+ Result HandleMapIOFlags(u32 flags, KPageTable& page_table);
/// Handles flags related to mapping physical memory regions.
- ResultCode HandleMapRegionFlags(u32 flags, KPageTable& page_table);
+ Result HandleMapRegionFlags(u32 flags, KPageTable& page_table);
/// Handles flags related to the interrupt capability flags.
- ResultCode HandleInterruptFlags(u32 flags);
+ Result HandleInterruptFlags(u32 flags);
/// Handles flags related to the program type.
- ResultCode HandleProgramTypeFlags(u32 flags);
+ Result HandleProgramTypeFlags(u32 flags);
/// Handles flags related to the handle table size.
- ResultCode HandleHandleTableFlags(u32 flags);
+ Result HandleHandleTableFlags(u32 flags);
/// Handles flags related to the kernel version capability flags.
- ResultCode HandleKernelVersionFlags(u32 flags);
+ Result HandleKernelVersionFlags(u32 flags);
/// Handles flags related to debug-specific capabilities.
- ResultCode HandleDebugFlags(u32 flags);
+ Result HandleDebugFlags(u32 flags);
SyscallCapabilities svc_capabilities;
InterruptCapabilities interrupt_capabilities;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 2ff6d5fa6..27e5a805d 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -58,8 +58,8 @@ constexpr bool IsValidAddressRange(VAddr address, u64 size) {
// Helper function that performs the common sanity checks for svcMapMemory
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
// in the same order.
-ResultCode MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr,
- u64 size) {
+Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr,
+ u64 size) {
if (!Common::Is4KBAligned(dst_addr)) {
LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
return ResultInvalidAddress;
@@ -135,7 +135,7 @@ enum class ResourceLimitValueType {
} // Anonymous namespace
/// Set the process heap to a given Size. It can both extend and shrink the heap.
-static ResultCode SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
+static Result SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", size);
// Validate size.
@@ -148,9 +148,9 @@ static ResultCode SetHeapSize(Core::System& system, VAddr* out_address, u64 size
return ResultSuccess;
}
-static ResultCode SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_size) {
+static Result SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_size) {
VAddr temp_heap_addr{};
- const ResultCode result{SetHeapSize(system, &temp_heap_addr, heap_size)};
+ const Result result{SetHeapSize(system, &temp_heap_addr, heap_size)};
*heap_addr = static_cast<u32>(temp_heap_addr);
return result;
}
@@ -166,8 +166,8 @@ constexpr bool IsValidSetMemoryPermission(MemoryPermission perm) {
}
}
-static ResultCode SetMemoryPermission(Core::System& system, VAddr address, u64 size,
- MemoryPermission perm) {
+static Result SetMemoryPermission(Core::System& system, VAddr address, u64 size,
+ MemoryPermission perm) {
LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size,
perm);
@@ -188,8 +188,8 @@ static ResultCode SetMemoryPermission(Core::System& system, VAddr address, u64 s
return page_table.SetMemoryPermission(address, size, perm);
}
-static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask,
- u32 attr) {
+static Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask,
+ u32 attr) {
LOG_DEBUG(Kernel_SVC,
"called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
size, mask, attr);
@@ -213,19 +213,19 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si
return page_table.SetMemoryAttribute(address, size, mask, attr);
}
-static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
- u32 attr) {
+static Result SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
+ u32 attr) {
return SetMemoryAttribute(system, address, size, mask, attr);
}
/// Maps a memory range into a different range.
-static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
+static Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
- if (const ResultCode result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
+ if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
result.IsError()) {
return result;
}
@@ -233,18 +233,18 @@ static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr
return page_table.MapMemory(dst_addr, src_addr, size);
}
-static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
+static Result MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
return MapMemory(system, dst_addr, src_addr, size);
}
/// Unmaps a region that was previously mapped with svcMapMemory
-static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
+static Result UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
- if (const ResultCode result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
+ if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
result.IsError()) {
return result;
}
@@ -252,12 +252,12 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad
return page_table.UnmapMemory(dst_addr, src_addr, size);
}
-static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
+static Result UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
return UnmapMemory(system, dst_addr, src_addr, size);
}
/// Connect to an OS service given the port name, returns the handle to the port to out
-static ResultCode ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) {
+static Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) {
auto& memory = system.Memory();
if (!memory.IsValidVirtualAddress(port_name_address)) {
LOG_ERROR(Kernel_SVC,
@@ -307,14 +307,14 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out, VAddr po
return ResultSuccess;
}
-static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
- u32 port_name_address) {
+static Result ConnectToNamedPort32(Core::System& system, Handle* out_handle,
+ u32 port_name_address) {
return ConnectToNamedPort(system, out_handle, port_name_address);
}
/// Makes a blocking IPC call to an OS service.
-static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
+static Result SendSyncRequest(Core::System& system, Handle handle) {
auto& kernel = system.Kernel();
// Create the wait queue.
@@ -327,7 +327,6 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
- auto thread = kernel.CurrentScheduler()->GetCurrentThread();
{
KScopedSchedulerLock lock(kernel);
@@ -337,15 +336,15 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming());
}
- return thread->GetWaitResult();
+ return GetCurrentThread(kernel).GetWaitResult();
}
-static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
+static Result SendSyncRequest32(Core::System& system, Handle handle) {
return SendSyncRequest(system, handle);
}
/// Get the ID for the specified thread.
-static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
+static Result GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
// Get the thread from its handle.
KScopedAutoObject thread =
system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
@@ -356,10 +355,10 @@ static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle t
return ResultSuccess;
}
-static ResultCode GetThreadId32(Core::System& system, u32* out_thread_id_low,
- u32* out_thread_id_high, Handle thread_handle) {
+static Result GetThreadId32(Core::System& system, u32* out_thread_id_low, u32* out_thread_id_high,
+ Handle thread_handle) {
u64 out_thread_id{};
- const ResultCode result{GetThreadId(system, &out_thread_id, thread_handle)};
+ const Result result{GetThreadId(system, &out_thread_id, thread_handle)};
*out_thread_id_low = static_cast<u32>(out_thread_id >> 32);
*out_thread_id_high = static_cast<u32>(out_thread_id & std::numeric_limits<u32>::max());
@@ -368,7 +367,7 @@ static ResultCode GetThreadId32(Core::System& system, u32* out_thread_id_low,
}
/// Gets the ID of the specified process or a specified thread's owning process.
-static ResultCode GetProcessId(Core::System& system, u64* out_process_id, Handle handle) {
+static Result GetProcessId(Core::System& system, u64* out_process_id, Handle handle) {
LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle);
// Get the object from the handle table.
@@ -399,8 +398,8 @@ static ResultCode GetProcessId(Core::System& system, u64* out_process_id, Handle
return ResultSuccess;
}
-static ResultCode GetProcessId32(Core::System& system, u32* out_process_id_low,
- u32* out_process_id_high, Handle handle) {
+static Result GetProcessId32(Core::System& system, u32* out_process_id_low,
+ u32* out_process_id_high, Handle handle) {
u64 out_process_id{};
const auto result = GetProcessId(system, &out_process_id, handle);
*out_process_id_low = static_cast<u32>(out_process_id);
@@ -409,8 +408,8 @@ static ResultCode GetProcessId32(Core::System& system, u32* out_process_id_low,
}
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
-static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
- s32 num_handles, s64 nano_seconds) {
+static Result WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
+ s32 num_handles, s64 nano_seconds) {
LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, num_handles={}, nano_seconds={}",
handles_address, num_handles, nano_seconds);
@@ -445,14 +444,14 @@ static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr ha
nano_seconds);
}
-static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
- s32 num_handles, u32 timeout_high, s32* index) {
+static Result WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
+ s32 num_handles, u32 timeout_high, s32* index) {
const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
return WaitSynchronization(system, index, handles_address, num_handles, nano_seconds);
}
/// Resumes a thread waiting on WaitSynchronization
-static ResultCode CancelSynchronization(Core::System& system, Handle handle) {
+static Result CancelSynchronization(Core::System& system, Handle handle) {
LOG_TRACE(Kernel_SVC, "called handle=0x{:X}", handle);
// Get the thread from its handle.
@@ -465,13 +464,12 @@ static ResultCode CancelSynchronization(Core::System& system, Handle handle) {
return ResultSuccess;
}
-static ResultCode CancelSynchronization32(Core::System& system, Handle handle) {
+static Result CancelSynchronization32(Core::System& system, Handle handle) {
return CancelSynchronization(system, handle);
}
/// Attempts to locks a mutex
-static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address,
- u32 tag) {
+static Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address, u32 tag) {
LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
thread_handle, address, tag);
@@ -489,13 +487,12 @@ static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAdd
return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag);
}
-static ResultCode ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address,
- u32 tag) {
+static Result ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address, u32 tag) {
return ArbitrateLock(system, thread_handle, address, tag);
}
/// Unlock a mutex
-static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) {
+static Result ArbitrateUnlock(Core::System& system, VAddr address) {
LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
// Validate the input address.
@@ -513,7 +510,7 @@ static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) {
return system.Kernel().CurrentProcess()->SignalToAddress(address);
}
-static ResultCode ArbitrateUnlock32(Core::System& system, u32 address) {
+static Result ArbitrateUnlock32(Core::System& system, u32 address) {
return ArbitrateUnlock(system, address);
}
@@ -624,7 +621,7 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
handle_debug_buffer(info1, info2);
- auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
+ auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
const auto thread_processor_id = current_thread->GetActiveCore();
system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
}
@@ -656,8 +653,8 @@ static void OutputDebugString32(Core::System& system, u32 address, u32 len) {
}
/// Gets system/memory information for the current process
-static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle,
- u64 info_sub_id) {
+static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle,
+ u64 info_sub_id) {
LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
info_sub_id, handle);
@@ -692,6 +689,9 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
// 6.0.0+
TotalPhysicalMemoryAvailableWithoutSystemResource = 21,
TotalPhysicalMemoryUsedWithoutSystemResource = 22,
+
+ // Homebrew only
+ MesosphereCurrentProcess = 65001,
};
const auto info_id_type = static_cast<GetInfoType>(info_id);
@@ -884,10 +884,10 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
const auto& core_timing = system.CoreTiming();
const auto& scheduler = *system.Kernel().CurrentScheduler();
- const auto* const current_thread = scheduler.GetCurrentThread();
+ const auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
const bool same_thread = current_thread == thread.GetPointerUnsafe();
- const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks();
+ const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTime();
u64 out_ticks = 0;
if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
const u64 thread_ticks = current_thread->GetCpuTime();
@@ -914,18 +914,39 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
*result = system.Kernel().CurrentScheduler()->GetIdleThread()->GetCpuTime();
return ResultSuccess;
}
+ case GetInfoType::MesosphereCurrentProcess: {
+ // Verify the input handle is invalid.
+ R_UNLESS(handle == InvalidHandle, ResultInvalidHandle);
+
+ // Verify the sub-type is valid.
+ R_UNLESS(info_sub_id == 0, ResultInvalidCombination);
+
+ // Get the handle table.
+ KProcess* current_process = system.Kernel().CurrentProcess();
+ KHandleTable& handle_table = current_process->GetHandleTable();
+
+ // Get a new handle for the current process.
+ Handle tmp;
+ R_TRY(handle_table.Add(&tmp, current_process));
+
+ // Set the output.
+ *result = tmp;
+
+ // We succeeded.
+ return ResultSuccess;
+ }
default:
LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
return ResultInvalidEnumValue;
}
}
-static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
- u32 info_id, u32 handle, u32 sub_id_high) {
+static Result GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
+ u32 info_id, u32 handle, u32 sub_id_high) {
const u64 sub_id{u64{sub_id_low} | (u64{sub_id_high} << 32)};
u64 res_value{};
- const ResultCode result{GetInfo(system, &res_value, info_id, handle, sub_id)};
+ const Result result{GetInfo(system, &res_value, info_id, handle, sub_id)};
*result_high = static_cast<u32>(res_value >> 32);
*result_low = static_cast<u32>(res_value & std::numeric_limits<u32>::max());
@@ -933,7 +954,7 @@ static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_h
}
/// Maps memory at a desired address
-static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
+static Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {
@@ -981,12 +1002,12 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size)
return page_table.MapPhysicalMemory(addr, size);
}
-static ResultCode MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
+static Result MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
return MapPhysicalMemory(system, addr, size);
}
/// Unmaps memory previously mapped via MapPhysicalMemory
-static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
+static Result UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {
@@ -1034,13 +1055,13 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size
return page_table.UnmapPhysicalMemory(addr, size);
}
-static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
+static Result UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
return UnmapPhysicalMemory(system, addr, size);
}
/// Sets the thread activity
-static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle,
- ThreadActivity thread_activity) {
+static Result SetThreadActivity(Core::System& system, Handle thread_handle,
+ ThreadActivity thread_activity) {
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", thread_handle,
thread_activity);
@@ -1065,13 +1086,13 @@ static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle,
return ResultSuccess;
}
-static ResultCode SetThreadActivity32(Core::System& system, Handle thread_handle,
- Svc::ThreadActivity thread_activity) {
+static Result SetThreadActivity32(Core::System& system, Handle thread_handle,
+ Svc::ThreadActivity thread_activity) {
return SetThreadActivity(system, thread_handle, thread_activity);
}
/// Gets the thread context
-static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle) {
+static Result GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle) {
LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context,
thread_handle);
@@ -1103,7 +1124,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Hand
if (thread->GetRawState() != ThreadState::Runnable) {
bool current = false;
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
- if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetCurrentThread()) {
+ if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetSchedulerCurrentThread()) {
current = true;
break;
}
@@ -1128,12 +1149,12 @@ static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Hand
return ResultSuccess;
}
-static ResultCode GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle) {
+static Result GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle) {
return GetThreadContext(system, out_context, thread_handle);
}
/// Gets the priority for the specified thread
-static ResultCode GetThreadPriority(Core::System& system, u32* out_priority, Handle handle) {
+static Result GetThreadPriority(Core::System& system, u32* out_priority, Handle handle) {
LOG_TRACE(Kernel_SVC, "called");
// Get the thread from its handle.
@@ -1146,12 +1167,12 @@ static ResultCode GetThreadPriority(Core::System& system, u32* out_priority, Han
return ResultSuccess;
}
-static ResultCode GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle) {
+static Result GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle) {
return GetThreadPriority(system, out_priority, handle);
}
/// Sets the priority for the specified thread
-static ResultCode SetThreadPriority(Core::System& system, Handle thread_handle, u32 priority) {
+static Result SetThreadPriority(Core::System& system, Handle thread_handle, u32 priority) {
// Get the current process.
KProcess& process = *system.Kernel().CurrentProcess();
@@ -1169,7 +1190,7 @@ static ResultCode SetThreadPriority(Core::System& system, Handle thread_handle,
return ResultSuccess;
}
-static ResultCode SetThreadPriority32(Core::System& system, Handle thread_handle, u32 priority) {
+static Result SetThreadPriority32(Core::System& system, Handle thread_handle, u32 priority) {
return SetThreadPriority(system, thread_handle, priority);
}
@@ -1229,8 +1250,8 @@ constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(Svc::MemoryPermission p
} // Anonymous namespace
-static ResultCode MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
- u64 size, Svc::MemoryPermission map_perm) {
+static Result MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size,
+ Svc::MemoryPermission map_perm) {
LOG_TRACE(Kernel_SVC,
"called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
shmem_handle, address, size, map_perm);
@@ -1270,13 +1291,13 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shmem_handle, VAd
return ResultSuccess;
}
-static ResultCode MapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address,
- u32 size, Svc::MemoryPermission map_perm) {
+static Result MapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address, u32 size,
+ Svc::MemoryPermission map_perm) {
return MapSharedMemory(system, shmem_handle, address, size, map_perm);
}
-static ResultCode UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
- u64 size) {
+static Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
+ u64 size) {
// Validate the address/size.
R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
@@ -1303,13 +1324,13 @@ static ResultCode UnmapSharedMemory(Core::System& system, Handle shmem_handle, V
return ResultSuccess;
}
-static ResultCode UnmapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address,
- u32 size) {
+static Result UnmapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address,
+ u32 size) {
return UnmapSharedMemory(system, shmem_handle, address, size);
}
-static ResultCode SetProcessMemoryPermission(Core::System& system, Handle process_handle,
- VAddr address, u64 size, Svc::MemoryPermission perm) {
+static Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, VAddr address,
+ u64 size, Svc::MemoryPermission perm) {
LOG_TRACE(Kernel_SVC,
"called, process_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
process_handle, address, size, perm);
@@ -1338,8 +1359,8 @@ static ResultCode SetProcessMemoryPermission(Core::System& system, Handle proces
return page_table.SetProcessMemoryPermission(address, size, perm);
}
-static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
- VAddr src_address, u64 size) {
+static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
+ VAddr src_address, u64 size) {
LOG_TRACE(Kernel_SVC,
"called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
dst_address, process_handle, src_address, size);
@@ -1368,7 +1389,7 @@ static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Hand
ResultInvalidMemoryRegion);
// Create a new page group.
- KPageLinkedList pg;
+ KPageGroup pg;
R_TRY(src_pt.MakeAndOpenPageGroup(
std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,
@@ -1381,8 +1402,8 @@ static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Hand
return ResultSuccess;
}
-static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
- VAddr src_address, u64 size) {
+static Result UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
+ VAddr src_address, u64 size) {
LOG_TRACE(Kernel_SVC,
"called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
dst_address, process_handle, src_address, size);
@@ -1416,7 +1437,7 @@ static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Ha
return ResultSuccess;
}
-static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
+static Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size);
// Get kernel instance.
@@ -1451,12 +1472,12 @@ static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr addr
return ResultSuccess;
}
-static ResultCode CreateCodeMemory32(Core::System& system, Handle* out, u32 address, u32 size) {
+static Result CreateCodeMemory32(Core::System& system, Handle* out, u32 address, u32 size) {
return CreateCodeMemory(system, out, address, size);
}
-static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
- VAddr address, size_t size, Svc::MemoryPermission perm) {
+static Result ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
+ VAddr address, size_t size, Svc::MemoryPermission perm) {
LOG_TRACE(Kernel_SVC,
"called, code_memory_handle=0x{:X}, operation=0x{:X}, address=0x{:X}, size=0x{:X}, "
@@ -1534,15 +1555,13 @@ static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_han
return ResultSuccess;
}
-static ResultCode ControlCodeMemory32(Core::System& system, Handle code_memory_handle,
- u32 operation, u64 address, u64 size,
- Svc::MemoryPermission perm) {
+static Result ControlCodeMemory32(Core::System& system, Handle code_memory_handle, u32 operation,
+ u64 address, u64 size, Svc::MemoryPermission perm) {
return ControlCodeMemory(system, code_memory_handle, operation, address, size, perm);
}
-static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
- VAddr page_info_address, Handle process_handle,
- VAddr address) {
+static Result QueryProcessMemory(Core::System& system, VAddr memory_info_address,
+ VAddr page_info_address, Handle process_handle, VAddr address) {
LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
@@ -1570,8 +1589,8 @@ static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_add
return ResultSuccess;
}
-static ResultCode QueryMemory(Core::System& system, VAddr memory_info_address,
- VAddr page_info_address, VAddr query_address) {
+static Result QueryMemory(Core::System& system, VAddr memory_info_address, VAddr page_info_address,
+ VAddr query_address) {
LOG_TRACE(Kernel_SVC,
"called, memory_info_address=0x{:016X}, page_info_address=0x{:016X}, "
"query_address=0x{:016X}",
@@ -1581,13 +1600,13 @@ static ResultCode QueryMemory(Core::System& system, VAddr memory_info_address,
query_address);
}
-static ResultCode QueryMemory32(Core::System& system, u32 memory_info_address,
- u32 page_info_address, u32 query_address) {
+static Result QueryMemory32(Core::System& system, u32 memory_info_address, u32 page_info_address,
+ u32 query_address) {
return QueryMemory(system, memory_info_address, page_info_address, query_address);
}
-static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
- u64 src_address, u64 size) {
+static Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
+ u64 src_address, u64 size) {
LOG_DEBUG(Kernel_SVC,
"called. process_handle=0x{:08X}, dst_address=0x{:016X}, "
"src_address=0x{:016X}, size=0x{:016X}",
@@ -1654,8 +1673,8 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
return page_table.MapCodeMemory(dst_address, src_address, size);
}
-static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_handle,
- u64 dst_address, u64 src_address, u64 size) {
+static Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
+ u64 src_address, u64 size) {
LOG_DEBUG(Kernel_SVC,
"called. process_handle=0x{:08X}, dst_address=0x{:016X}, src_address=0x{:016X}, "
"size=0x{:016X}",
@@ -1747,8 +1766,8 @@ constexpr bool IsValidVirtualCoreId(int32_t core_id) {
} // Anonymous namespace
/// Creates a new thread
-static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
- VAddr stack_bottom, u32 priority, s32 core_id) {
+static Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
+ VAddr stack_bottom, u32 priority, s32 core_id) {
LOG_DEBUG(Kernel_SVC,
"called entry_point=0x{:08X}, arg=0x{:08X}, stack_bottom=0x{:08X}, "
"priority=0x{:08X}, core_id=0x{:08X}",
@@ -1819,13 +1838,13 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
return ResultSuccess;
}
-static ResultCode CreateThread32(Core::System& system, Handle* out_handle, u32 priority,
- u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) {
+static Result CreateThread32(Core::System& system, Handle* out_handle, u32 priority,
+ u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) {
return CreateThread(system, out_handle, entry_point, arg, stack_top, priority, processor_id);
}
/// Starts the thread for the provided handle
-static ResultCode StartThread(Core::System& system, Handle thread_handle) {
+static Result StartThread(Core::System& system, Handle thread_handle) {
LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
// Get the thread from its handle.
@@ -1843,7 +1862,7 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) {
return ResultSuccess;
}
-static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
+static Result StartThread32(Core::System& system, Handle thread_handle) {
return StartThread(system, thread_handle);
}
@@ -1851,7 +1870,7 @@ static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
static void ExitThread(Core::System& system) {
LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
- auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
+ auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
system.GlobalSchedulerContext().RemoveThread(current_thread);
current_thread->Exit();
system.Kernel().UnregisterInUseObject(current_thread);
@@ -1894,8 +1913,8 @@ static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanosec
}
/// Wait process wide key atomic
-static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key,
- u32 tag, s64 timeout_ns) {
+static Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key, u32 tag,
+ s64 timeout_ns) {
LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
cv_key, tag, timeout_ns);
@@ -1930,8 +1949,8 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address,
address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout);
}
-static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
- u32 timeout_ns_low, u32 timeout_ns_high) {
+static Result WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
+ u32 timeout_ns_low, u32 timeout_ns_high) {
const auto timeout_ns = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns);
}
@@ -1976,8 +1995,8 @@ constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
} // namespace
// Wait for an address (via Address Arbiter)
-static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
- s32 value, s64 timeout_ns) {
+static Result WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
+ s32 value, s64 timeout_ns) {
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
address, arb_type, value, timeout_ns);
@@ -2014,15 +2033,15 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::Arbit
return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout);
}
-static ResultCode WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
- s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
+static Result WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
+ s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
const auto timeout = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
return WaitForAddress(system, address, arb_type, value, timeout);
}
// Signals to an address (via Address Arbiter)
-static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
- s32 value, s32 count) {
+static Result SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
+ s32 value, s32 count) {
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
address, signal_type, value, count);
@@ -2063,8 +2082,8 @@ static void SynchronizePreemptionState(Core::System& system) {
}
}
-static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
- s32 value, s32 count) {
+static Result SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
+ s32 value, s32 count) {
return SignalToAddress(system, address, signal_type, value, count);
}
@@ -2102,7 +2121,7 @@ static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high)
}
/// Close a handle
-static ResultCode CloseHandle(Core::System& system, Handle handle) {
+static Result CloseHandle(Core::System& system, Handle handle) {
LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle);
// Remove the handle.
@@ -2112,12 +2131,12 @@ static ResultCode CloseHandle(Core::System& system, Handle handle) {
return ResultSuccess;
}
-static ResultCode CloseHandle32(Core::System& system, Handle handle) {
+static Result CloseHandle32(Core::System& system, Handle handle) {
return CloseHandle(system, handle);
}
/// Clears the signaled state of an event or process.
-static ResultCode ResetSignal(Core::System& system, Handle handle) {
+static Result ResetSignal(Core::System& system, Handle handle) {
LOG_DEBUG(Kernel_SVC, "called handle 0x{:08X}", handle);
// Get the current handle table.
@@ -2144,7 +2163,7 @@ static ResultCode ResetSignal(Core::System& system, Handle handle) {
return ResultInvalidHandle;
}
-static ResultCode ResetSignal32(Core::System& system, Handle handle) {
+static Result ResetSignal32(Core::System& system, Handle handle) {
return ResetSignal(system, handle);
}
@@ -2164,8 +2183,8 @@ constexpr bool IsValidTransferMemoryPermission(MemoryPermission perm) {
} // Anonymous namespace
/// Creates a TransferMemory object
-static ResultCode CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size,
- MemoryPermission map_perm) {
+static Result CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size,
+ MemoryPermission map_perm) {
auto& kernel = system.Kernel();
// Validate the size.
@@ -2211,13 +2230,13 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* out, VAddr
return ResultSuccess;
}
-static ResultCode CreateTransferMemory32(Core::System& system, Handle* out, u32 address, u32 size,
- MemoryPermission map_perm) {
+static Result CreateTransferMemory32(Core::System& system, Handle* out, u32 address, u32 size,
+ MemoryPermission map_perm) {
return CreateTransferMemory(system, out, address, size, map_perm);
}
-static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
- u64* out_affinity_mask) {
+static Result GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
+ u64* out_affinity_mask) {
LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
// Get the thread from its handle.
@@ -2231,8 +2250,8 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
return ResultSuccess;
}
-static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
- u32* out_affinity_mask_low, u32* out_affinity_mask_high) {
+static Result GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
+ u32* out_affinity_mask_low, u32* out_affinity_mask_high) {
u64 out_affinity_mask{};
const auto result = GetThreadCoreMask(system, thread_handle, out_core_id, &out_affinity_mask);
*out_affinity_mask_high = static_cast<u32>(out_affinity_mask >> 32);
@@ -2240,8 +2259,8 @@ static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle
return result;
}
-static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
- u64 affinity_mask) {
+static Result SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
+ u64 affinity_mask) {
// Determine the core id/affinity mask.
if (core_id == IdealCoreUseProcessValue) {
core_id = system.Kernel().CurrentProcess()->GetIdealCoreId();
@@ -2272,13 +2291,13 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
return ResultSuccess;
}
-static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
- u32 affinity_mask_low, u32 affinity_mask_high) {
+static Result SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
+ u32 affinity_mask_low, u32 affinity_mask_high) {
const auto affinity_mask = u64{affinity_mask_low} | (u64{affinity_mask_high} << 32);
return SetThreadCoreMask(system, thread_handle, core_id, affinity_mask);
}
-static ResultCode SignalEvent(Core::System& system, Handle event_handle) {
+static Result SignalEvent(Core::System& system, Handle event_handle) {
LOG_DEBUG(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
// Get the current handle table.
@@ -2291,11 +2310,11 @@ static ResultCode SignalEvent(Core::System& system, Handle event_handle) {
return writable_event->Signal();
}
-static ResultCode SignalEvent32(Core::System& system, Handle event_handle) {
+static Result SignalEvent32(Core::System& system, Handle event_handle) {
return SignalEvent(system, event_handle);
}
-static ResultCode ClearEvent(Core::System& system, Handle event_handle) {
+static Result ClearEvent(Core::System& system, Handle event_handle) {
LOG_TRACE(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
// Get the current handle table.
@@ -2322,11 +2341,11 @@ static ResultCode ClearEvent(Core::System& system, Handle event_handle) {
return ResultInvalidHandle;
}
-static ResultCode ClearEvent32(Core::System& system, Handle event_handle) {
+static Result ClearEvent32(Core::System& system, Handle event_handle) {
return ClearEvent(system, event_handle);
}
-static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
+static Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
LOG_DEBUG(Kernel_SVC, "called");
// Get the kernel reference and handle table.
@@ -2371,11 +2390,11 @@ static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* o
return ResultSuccess;
}
-static ResultCode CreateEvent32(Core::System& system, Handle* out_write, Handle* out_read) {
+static Result CreateEvent32(Core::System& system, Handle* out_write, Handle* out_read) {
return CreateEvent(system, out_write, out_read);
}
-static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
+static Result GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type);
// This function currently only allows retrieving a process' status.
@@ -2401,7 +2420,7 @@ static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_
return ResultSuccess;
}
-static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle) {
+static Result CreateResourceLimit(Core::System& system, Handle* out_handle) {
LOG_DEBUG(Kernel_SVC, "called");
// Create a new resource limit.
@@ -2424,9 +2443,8 @@ static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle)
return ResultSuccess;
}
-static ResultCode GetResourceLimitLimitValue(Core::System& system, u64* out_limit_value,
- Handle resource_limit_handle,
- LimitableResource which) {
+static Result GetResourceLimitLimitValue(Core::System& system, u64* out_limit_value,
+ Handle resource_limit_handle, LimitableResource which) {
LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
which);
@@ -2445,9 +2463,8 @@ static ResultCode GetResourceLimitLimitValue(Core::System& system, u64* out_limi
return ResultSuccess;
}
-static ResultCode GetResourceLimitCurrentValue(Core::System& system, u64* out_current_value,
- Handle resource_limit_handle,
- LimitableResource which) {
+static Result GetResourceLimitCurrentValue(Core::System& system, u64* out_current_value,
+ Handle resource_limit_handle, LimitableResource which) {
LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
which);
@@ -2466,8 +2483,8 @@ static ResultCode GetResourceLimitCurrentValue(Core::System& system, u64* out_cu
return ResultSuccess;
}
-static ResultCode SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle,
- LimitableResource which, u64 limit_value) {
+static Result SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle,
+ LimitableResource which, u64 limit_value) {
LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}, limit_value={}",
resource_limit_handle, which, limit_value);
@@ -2486,8 +2503,8 @@ static ResultCode SetResourceLimitLimitValue(Core::System& system, Handle resour
return ResultSuccess;
}
-static ResultCode GetProcessList(Core::System& system, u32* out_num_processes,
- VAddr out_process_ids, u32 out_process_ids_size) {
+static Result GetProcessList(Core::System& system, u32* out_num_processes, VAddr out_process_ids,
+ u32 out_process_ids_size) {
LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}",
out_process_ids, out_process_ids_size);
@@ -2523,8 +2540,8 @@ static ResultCode GetProcessList(Core::System& system, u32* out_num_processes,
return ResultSuccess;
}
-static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAddr out_thread_ids,
- u32 out_thread_ids_size, Handle debug_handle) {
+static Result GetThreadList(Core::System& system, u32* out_num_threads, VAddr out_thread_ids,
+ u32 out_thread_ids_size, Handle debug_handle) {
// TODO: Handle this case when debug events are supported.
UNIMPLEMENTED_IF(debug_handle != InvalidHandle);
@@ -2563,9 +2580,9 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
return ResultSuccess;
}
-static ResultCode FlushProcessDataCache32([[maybe_unused]] Core::System& system,
- [[maybe_unused]] Handle handle,
- [[maybe_unused]] u32 address, [[maybe_unused]] u32 size) {
+static Result FlushProcessDataCache32([[maybe_unused]] Core::System& system,
+ [[maybe_unused]] Handle handle, [[maybe_unused]] u32 address,
+ [[maybe_unused]] u32 size) {
// Note(Blinkhawk): For emulation purposes of the data cache this is mostly a no-op,
// as all emulation is done in the same cache level in host architecture, thus data cache
// does not need flushing.
@@ -2993,7 +3010,7 @@ void Call(Core::System& system, u32 immediate) {
auto& kernel = system.Kernel();
kernel.EnterSVCProfile();
- auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
+ auto* thread = GetCurrentThreadPointer(kernel);
thread->SetIsCallingSvc();
const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
@@ -3009,11 +3026,6 @@ void Call(Core::System& system, u32 immediate) {
}
kernel.ExitSVCProfile();
-
- if (!thread->IsCallingSvc()) {
- auto* host_context = thread->GetHostContext().get();
- host_context->Rewind();
- }
}
} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h
index fab12d070..f27cade33 100644
--- a/src/core/hle/kernel/svc_results.h
+++ b/src/core/hle/kernel/svc_results.h
@@ -9,34 +9,34 @@ namespace Kernel {
// Confirmed Switch kernel error codes
-constexpr ResultCode ResultOutOfSessions{ErrorModule::Kernel, 7};
-constexpr ResultCode ResultInvalidArgument{ErrorModule::Kernel, 14};
-constexpr ResultCode ResultNoSynchronizationObject{ErrorModule::Kernel, 57};
-constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59};
-constexpr ResultCode ResultInvalidSize{ErrorModule::Kernel, 101};
-constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102};
-constexpr ResultCode ResultOutOfResource{ErrorModule::Kernel, 103};
-constexpr ResultCode ResultOutOfMemory{ErrorModule::Kernel, 104};
-constexpr ResultCode ResultOutOfHandles{ErrorModule::Kernel, 105};
-constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
-constexpr ResultCode ResultInvalidNewMemoryPermission{ErrorModule::Kernel, 108};
-constexpr ResultCode ResultInvalidMemoryRegion{ErrorModule::Kernel, 110};
-constexpr ResultCode ResultInvalidPriority{ErrorModule::Kernel, 112};
-constexpr ResultCode ResultInvalidCoreId{ErrorModule::Kernel, 113};
-constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114};
-constexpr ResultCode ResultInvalidPointer{ErrorModule::Kernel, 115};
-constexpr ResultCode ResultInvalidCombination{ErrorModule::Kernel, 116};
-constexpr ResultCode ResultTimedOut{ErrorModule::Kernel, 117};
-constexpr ResultCode ResultCancelled{ErrorModule::Kernel, 118};
-constexpr ResultCode ResultOutOfRange{ErrorModule::Kernel, 119};
-constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120};
-constexpr ResultCode ResultNotFound{ErrorModule::Kernel, 121};
-constexpr ResultCode ResultBusy{ErrorModule::Kernel, 122};
-constexpr ResultCode ResultSessionClosed{ErrorModule::Kernel, 123};
-constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125};
-constexpr ResultCode ResultReservedUsed{ErrorModule::Kernel, 126};
-constexpr ResultCode ResultPortClosed{ErrorModule::Kernel, 131};
-constexpr ResultCode ResultLimitReached{ErrorModule::Kernel, 132};
-constexpr ResultCode ResultInvalidId{ErrorModule::Kernel, 519};
+constexpr Result ResultOutOfSessions{ErrorModule::Kernel, 7};
+constexpr Result ResultInvalidArgument{ErrorModule::Kernel, 14};
+constexpr Result ResultNoSynchronizationObject{ErrorModule::Kernel, 57};
+constexpr Result ResultTerminationRequested{ErrorModule::Kernel, 59};
+constexpr Result ResultInvalidSize{ErrorModule::Kernel, 101};
+constexpr Result ResultInvalidAddress{ErrorModule::Kernel, 102};
+constexpr Result ResultOutOfResource{ErrorModule::Kernel, 103};
+constexpr Result ResultOutOfMemory{ErrorModule::Kernel, 104};
+constexpr Result ResultOutOfHandles{ErrorModule::Kernel, 105};
+constexpr Result ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
+constexpr Result ResultInvalidNewMemoryPermission{ErrorModule::Kernel, 108};
+constexpr Result ResultInvalidMemoryRegion{ErrorModule::Kernel, 110};
+constexpr Result ResultInvalidPriority{ErrorModule::Kernel, 112};
+constexpr Result ResultInvalidCoreId{ErrorModule::Kernel, 113};
+constexpr Result ResultInvalidHandle{ErrorModule::Kernel, 114};
+constexpr Result ResultInvalidPointer{ErrorModule::Kernel, 115};
+constexpr Result ResultInvalidCombination{ErrorModule::Kernel, 116};
+constexpr Result ResultTimedOut{ErrorModule::Kernel, 117};
+constexpr Result ResultCancelled{ErrorModule::Kernel, 118};
+constexpr Result ResultOutOfRange{ErrorModule::Kernel, 119};
+constexpr Result ResultInvalidEnumValue{ErrorModule::Kernel, 120};
+constexpr Result ResultNotFound{ErrorModule::Kernel, 121};
+constexpr Result ResultBusy{ErrorModule::Kernel, 122};
+constexpr Result ResultSessionClosed{ErrorModule::Kernel, 123};
+constexpr Result ResultInvalidState{ErrorModule::Kernel, 125};
+constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126};
+constexpr Result ResultPortClosed{ErrorModule::Kernel, 131};
+constexpr Result ResultLimitReached{ErrorModule::Kernel, 132};
+constexpr Result ResultInvalidId{ErrorModule::Kernel, 519};
} // namespace Kernel
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 2271bb80c..4bc49087e 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -33,24 +33,24 @@ static inline void FuncReturn32(Core::System& system, u32 result) {
}
////////////////////////////////////////////////////////////////////////////////////////////////////
-// Function wrappers that return type ResultCode
+// Function wrappers that return type Result
-template <ResultCode func(Core::System&, u64)>
+template <Result func(Core::System&, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0)).raw);
}
-template <ResultCode func(Core::System&, u64, u64)>
+template <Result func(Core::System&, u64, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1)).raw);
}
-template <ResultCode func(Core::System&, u32)>
+template <Result func(Core::System&, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw);
}
-template <ResultCode func(Core::System&, u32, u32)>
+template <Result func(Core::System&, u32, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(
system,
@@ -58,14 +58,14 @@ void SvcWrap64(Core::System& system) {
}
// Used by SetThreadActivity
-template <ResultCode func(Core::System&, Handle, Svc::ThreadActivity)>
+template <Result func(Core::System&, Handle, Svc::ThreadActivity)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
static_cast<Svc::ThreadActivity>(Param(system, 1)))
.raw);
}
-template <ResultCode func(Core::System&, u32, u64, u64, u64)>
+template <Result func(Core::System&, u32, u64, u64, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
Param(system, 2), Param(system, 3))
@@ -73,7 +73,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by MapProcessMemory and UnmapProcessMemory
-template <ResultCode func(Core::System&, u64, u32, u64, u64)>
+template <Result func(Core::System&, u64, u32, u64, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
Param(system, 2), Param(system, 3))
@@ -81,7 +81,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by ControlCodeMemory
-template <ResultCode func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
+template <Result func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
static_cast<u32>(Param(system, 1)), Param(system, 2), Param(system, 3),
@@ -89,7 +89,7 @@ void SvcWrap64(Core::System& system) {
.raw);
}
-template <ResultCode func(Core::System&, u32*)>
+template <Result func(Core::System&, u32*)>
void SvcWrap64(Core::System& system) {
u32 param = 0;
const u32 retval = func(system, &param).raw;
@@ -97,7 +97,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32*, u32)>
+template <Result func(Core::System&, u32*, u32)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, static_cast<u32>(Param(system, 1))).raw;
@@ -105,7 +105,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32*, u32*)>
+template <Result func(Core::System&, u32*, u32*)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
u32 param_2 = 0;
@@ -118,7 +118,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32*, u64)>
+template <Result func(Core::System&, u32*, u64)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1)).raw;
@@ -126,7 +126,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32*, u64, u32)>
+template <Result func(Core::System&, u32*, u64, u32)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval =
@@ -136,7 +136,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u64*, u32)>
+template <Result func(Core::System&, u64*, u32)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, static_cast<u32>(Param(system, 1))).raw;
@@ -145,12 +145,12 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u64, u32)>
+template <Result func(Core::System&, u64, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1))).raw);
}
-template <ResultCode func(Core::System&, u64*, u64)>
+template <Result func(Core::System&, u64*, u64)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1)).raw;
@@ -159,7 +159,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u64*, u32, u32)>
+template <Result func(Core::System&, u64*, u32, u32)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, static_cast<u32>(Param(system, 1)),
@@ -171,7 +171,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by GetResourceLimitLimitValue.
-template <ResultCode func(Core::System&, u64*, Handle, LimitableResource)>
+template <Result func(Core::System&, u64*, Handle, LimitableResource)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, static_cast<Handle>(Param(system, 1)),
@@ -182,13 +182,13 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32, u64)>
+template <Result func(Core::System&, u32, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1)).raw);
}
// Used by SetResourceLimitLimitValue
-template <ResultCode func(Core::System&, Handle, LimitableResource, u64)>
+template <Result func(Core::System&, Handle, LimitableResource, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
static_cast<LimitableResource>(Param(system, 1)), Param(system, 2))
@@ -196,7 +196,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by SetThreadCoreMask
-template <ResultCode func(Core::System&, Handle, s32, u64)>
+template <Result func(Core::System&, Handle, s32, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
static_cast<s32>(Param(system, 1)), Param(system, 2))
@@ -204,44 +204,44 @@ void SvcWrap64(Core::System& system) {
}
// Used by GetThreadCoreMask
-template <ResultCode func(Core::System&, Handle, s32*, u64*)>
+template <Result func(Core::System&, Handle, s32*, u64*)>
void SvcWrap64(Core::System& system) {
s32 param_1 = 0;
u64 param_2 = 0;
- const ResultCode retval = func(system, static_cast<u32>(Param(system, 2)), &param_1, &param_2);
+ const Result retval = func(system, static_cast<u32>(Param(system, 2)), &param_1, &param_2);
system.CurrentArmInterface().SetReg(1, param_1);
system.CurrentArmInterface().SetReg(2, param_2);
FuncReturn(system, retval.raw);
}
-template <ResultCode func(Core::System&, u64, u64, u32, u32)>
+template <Result func(Core::System&, u64, u64, u32, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
static_cast<u32>(Param(system, 2)), static_cast<u32>(Param(system, 3)))
.raw);
}
-template <ResultCode func(Core::System&, u64, u64, u32, u64)>
+template <Result func(Core::System&, u64, u64, u32, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
static_cast<u32>(Param(system, 2)), Param(system, 3))
.raw);
}
-template <ResultCode func(Core::System&, u32, u64, u32)>
+template <Result func(Core::System&, u32, u64, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
static_cast<u32>(Param(system, 2)))
.raw);
}
-template <ResultCode func(Core::System&, u64, u64, u64)>
+template <Result func(Core::System&, u64, u64, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1), Param(system, 2)).raw);
}
-template <ResultCode func(Core::System&, u64, u64, u32)>
+template <Result func(Core::System&, u64, u64, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(
system,
@@ -249,7 +249,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by SetMemoryPermission
-template <ResultCode func(Core::System&, u64, u64, Svc::MemoryPermission)>
+template <Result func(Core::System&, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
static_cast<Svc::MemoryPermission>(Param(system, 2)))
@@ -257,14 +257,14 @@ void SvcWrap64(Core::System& system) {
}
// Used by MapSharedMemory
-template <ResultCode func(Core::System&, Handle, u64, u64, Svc::MemoryPermission)>
+template <Result func(Core::System&, Handle, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)), Param(system, 1),
Param(system, 2), static_cast<Svc::MemoryPermission>(Param(system, 3)))
.raw);
}
-template <ResultCode func(Core::System&, u32, u64, u64)>
+template <Result func(Core::System&, u32, u64, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(
system,
@@ -272,7 +272,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by WaitSynchronization
-template <ResultCode func(Core::System&, s32*, u64, s32, s64)>
+template <Result func(Core::System&, s32*, u64, s32, s64)>
void SvcWrap64(Core::System& system) {
s32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), static_cast<s32>(Param(system, 2)),
@@ -283,7 +283,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u64, u64, u32, s64)>
+template <Result func(Core::System&, u64, u64, u32, s64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
static_cast<u32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
@@ -291,7 +291,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by GetInfo
-template <ResultCode func(Core::System&, u64*, u64, Handle, u64)>
+template <Result func(Core::System&, u64*, u64, Handle, u64)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1),
@@ -302,7 +302,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32*, u64, u64, u64, u32, s32)>
+template <Result func(Core::System&, u32*, u64, u64, u64, u32, s32)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2), Param(system, 3),
@@ -314,7 +314,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by CreateTransferMemory
-template <ResultCode func(Core::System&, Handle*, u64, u64, Svc::MemoryPermission)>
+template <Result func(Core::System&, Handle*, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2),
@@ -326,7 +326,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by CreateCodeMemory
-template <ResultCode func(Core::System&, Handle*, u64, u64)>
+template <Result func(Core::System&, Handle*, u64, u64)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2)).raw;
@@ -335,7 +335,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, Handle*, u64, u32, u32)>
+template <Result func(Core::System&, Handle*, u64, u32, u32)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), static_cast<u32>(Param(system, 2)),
@@ -347,7 +347,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by WaitForAddress
-template <ResultCode func(Core::System&, u64, Svc::ArbitrationType, s32, s64)>
+template <Result func(Core::System&, u64, Svc::ArbitrationType, s32, s64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system,
func(system, Param(system, 0), static_cast<Svc::ArbitrationType>(Param(system, 1)),
@@ -356,7 +356,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by SignalToAddress
-template <ResultCode func(Core::System&, u64, Svc::SignalType, s32, s32)>
+template <Result func(Core::System&, u64, Svc::SignalType, s32, s32)>
void SvcWrap64(Core::System& system) {
FuncReturn(system,
func(system, Param(system, 0), static_cast<Svc::SignalType>(Param(system, 1)),
@@ -425,7 +425,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by QueryMemory32, ArbitrateLock32
-template <ResultCode func(Core::System&, u32, u32, u32)>
+template <Result func(Core::System&, u32, u32, u32)>
void SvcWrap32(Core::System& system) {
FuncReturn32(system,
func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2)).raw);
@@ -456,7 +456,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by CreateThread32
-template <ResultCode func(Core::System&, Handle*, u32, u32, u32, u32, s32)>
+template <Result func(Core::System&, Handle*, u32, u32, u32, u32, s32)>
void SvcWrap32(Core::System& system) {
Handle param_1 = 0;
@@ -469,7 +469,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by GetInfo32
-template <ResultCode func(Core::System&, u32*, u32*, u32, u32, u32, u32)>
+template <Result func(Core::System&, u32*, u32*, u32, u32, u32, u32)>
void SvcWrap32(Core::System& system) {
u32 param_1 = 0;
u32 param_2 = 0;
@@ -484,7 +484,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by GetThreadPriority32, ConnectToNamedPort32
-template <ResultCode func(Core::System&, u32*, u32)>
+template <Result func(Core::System&, u32*, u32)>
void SvcWrap32(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param32(system, 1)).raw;
@@ -493,7 +493,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by GetThreadId32
-template <ResultCode func(Core::System&, u32*, u32*, u32)>
+template <Result func(Core::System&, u32*, u32*, u32)>
void SvcWrap32(Core::System& system) {
u32 param_1 = 0;
u32 param_2 = 0;
@@ -516,7 +516,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by CreateEvent32
-template <ResultCode func(Core::System&, Handle*, Handle*)>
+template <Result func(Core::System&, Handle*, Handle*)>
void SvcWrap32(Core::System& system) {
Handle param_1 = 0;
Handle param_2 = 0;
@@ -528,7 +528,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by GetThreadId32
-template <ResultCode func(Core::System&, Handle, u32*, u32*, u32*)>
+template <Result func(Core::System&, Handle, u32*, u32*, u32*)>
void SvcWrap32(Core::System& system) {
u32 param_1 = 0;
u32 param_2 = 0;
@@ -542,7 +542,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by GetThreadCoreMask32
-template <ResultCode func(Core::System&, Handle, s32*, u32*, u32*)>
+template <Result func(Core::System&, Handle, s32*, u32*, u32*)>
void SvcWrap32(Core::System& system) {
s32 param_1 = 0;
u32 param_2 = 0;
@@ -562,7 +562,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by SetThreadActivity32
-template <ResultCode func(Core::System&, Handle, Svc::ThreadActivity)>
+template <Result func(Core::System&, Handle, Svc::ThreadActivity)>
void SvcWrap32(Core::System& system) {
const u32 retval = func(system, static_cast<Handle>(Param(system, 0)),
static_cast<Svc::ThreadActivity>(Param(system, 1)))
@@ -571,7 +571,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by SetThreadPriority32
-template <ResultCode func(Core::System&, Handle, u32)>
+template <Result func(Core::System&, Handle, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval =
func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw;
@@ -579,7 +579,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by SetMemoryAttribute32
-template <ResultCode func(Core::System&, Handle, u32, u32, u32)>
+template <Result func(Core::System&, Handle, u32, u32, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval =
func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
@@ -589,7 +589,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by MapSharedMemory32
-template <ResultCode func(Core::System&, Handle, u32, u32, Svc::MemoryPermission)>
+template <Result func(Core::System&, Handle, u32, u32, Svc::MemoryPermission)>
void SvcWrap32(Core::System& system) {
const u32 retval = func(system, static_cast<Handle>(Param(system, 0)),
static_cast<u32>(Param(system, 1)), static_cast<u32>(Param(system, 2)),
@@ -599,7 +599,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by SetThreadCoreMask32
-template <ResultCode func(Core::System&, Handle, s32, u32, u32)>
+template <Result func(Core::System&, Handle, s32, u32, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval =
func(system, static_cast<Handle>(Param(system, 0)), static_cast<s32>(Param(system, 1)),
@@ -609,7 +609,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by WaitProcessWideKeyAtomic32
-template <ResultCode func(Core::System&, u32, u32, Handle, u32, u32)>
+template <Result func(Core::System&, u32, u32, Handle, u32, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval =
func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
@@ -620,7 +620,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by WaitForAddress32
-template <ResultCode func(Core::System&, u32, Svc::ArbitrationType, s32, u32, u32)>
+template <Result func(Core::System&, u32, Svc::ArbitrationType, s32, u32, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
static_cast<Svc::ArbitrationType>(Param(system, 1)),
@@ -631,7 +631,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by SignalToAddress32
-template <ResultCode func(Core::System&, u32, Svc::SignalType, s32, s32)>
+template <Result func(Core::System&, u32, Svc::SignalType, s32, s32)>
void SvcWrap32(Core::System& system) {
const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
static_cast<Svc::SignalType>(Param(system, 1)),
@@ -641,13 +641,13 @@ void SvcWrap32(Core::System& system) {
}
// Used by SendSyncRequest32, ArbitrateUnlock32
-template <ResultCode func(Core::System&, u32)>
+template <Result func(Core::System&, u32)>
void SvcWrap32(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw);
}
// Used by CreateTransferMemory32
-template <ResultCode func(Core::System&, Handle*, u32, u32, Svc::MemoryPermission)>
+template <Result func(Core::System&, Handle*, u32, u32, Svc::MemoryPermission)>
void SvcWrap32(Core::System& system) {
Handle handle = 0;
const u32 retval = func(system, &handle, Param32(system, 1), Param32(system, 2),
@@ -658,7 +658,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by WaitSynchronization32
-template <ResultCode func(Core::System&, u32, u32, s32, u32, s32*)>
+template <Result func(Core::System&, u32, u32, s32, u32, s32*)>
void SvcWrap32(Core::System& system) {
s32 param_1 = 0;
const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2),
@@ -669,7 +669,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by CreateCodeMemory32
-template <ResultCode func(Core::System&, Handle*, u32, u32)>
+template <Result func(Core::System&, Handle*, u32, u32)>
void SvcWrap32(Core::System& system) {
Handle handle = 0;
@@ -680,7 +680,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by ControlCodeMemory32
-template <ResultCode func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
+template <Result func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
void SvcWrap32(Core::System& system) {
const u32 retval =
func(system, Param32(system, 0), Param32(system, 1), Param(system, 2), Param(system, 4),
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index 2724c3782..5ee72c432 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -11,15 +11,17 @@
namespace Kernel {
TimeManager::TimeManager(Core::System& system_) : system{system_} {
- time_manager_event_type =
- Core::Timing::CreateEvent("Kernel::TimeManagerCallback",
- [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
- KThread* thread = reinterpret_cast<KThread*>(thread_handle);
- {
- KScopedSchedulerLock sl(system.Kernel());
- thread->OnTimer();
- }
- });
+ time_manager_event_type = Core::Timing::CreateEvent(
+ "Kernel::TimeManagerCallback",
+ [this](std::uintptr_t thread_handle, s64 time,
+ std::chrono::nanoseconds) -> std::optional<std::chrono::nanoseconds> {
+ KThread* thread = reinterpret_cast<KThread*>(thread_handle);
+ {
+ KScopedSchedulerLock sl(system.Kernel());
+ thread->OnTimer();
+ }
+ return std::nullopt;
+ });
}
void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) {