aboutsummaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp317
-rw-r--r--src/core/hle/kernel/address_arbiter.h91
-rw-r--r--src/core/hle/kernel/client_port.cpp7
-rw-r--r--src/core/hle/kernel/client_port.h2
-rw-r--r--src/core/hle/kernel/client_session.cpp19
-rw-r--r--src/core/hle/kernel/client_session.h14
-rw-r--r--src/core/hle/kernel/errors.h40
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp4
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h19
-rw-r--r--src/core/hle/kernel/handle_table.cpp16
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp34
-rw-r--r--src/core/hle/kernel/hle_ipc.h25
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp341
-rw-r--r--src/core/hle/kernel/k_address_arbiter.h70
-rw-r--r--src/core/hle/kernel/k_affinity_mask.h2
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp345
-rw-r--r--src/core/hle/kernel/k_condition_variable.h59
-rw-r--r--src/core/hle/kernel/k_event.cpp32
-rw-r--r--src/core/hle/kernel/k_event.h57
-rw-r--r--src/core/hle/kernel/k_light_condition_variable.h57
-rw-r--r--src/core/hle/kernel/k_light_lock.cpp130
-rw-r--r--src/core/hle/kernel/k_light_lock.h41
-rw-r--r--src/core/hle/kernel/k_priority_queue.h26
-rw-r--r--src/core/hle/kernel/k_readable_event.cpp56
-rw-r--r--src/core/hle/kernel/k_readable_event.h51
-rw-r--r--src/core/hle/kernel/k_resource_limit.cpp152
-rw-r--r--src/core/hle/kernel/k_resource_limit.h81
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp290
-rw-r--r--src/core/hle/kernel/k_scheduler.h50
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h36
-rw-r--r--src/core/hle/kernel/k_scoped_resource_reservation.h67
-rw-r--r--src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h18
-rw-r--r--src/core/hle/kernel/k_synchronization_object.cpp171
-rw-r--r--src/core/hle/kernel/k_synchronization_object.h59
-rw-r--r--src/core/hle/kernel/k_thread.cpp1048
-rw-r--r--src/core/hle/kernel/k_thread.h768
-rw-r--r--src/core/hle/kernel/k_thread_queue.h81
-rw-r--r--src/core/hle/kernel/k_writable_event.cpp27
-rw-r--r--src/core/hle/kernel/k_writable_event.h44
-rw-r--r--src/core/hle/kernel/kernel.cpp134
-rw-r--r--src/core/hle/kernel/kernel.h29
-rw-r--r--src/core/hle/kernel/memory/address_space_info.cpp2
-rw-r--r--src/core/hle/kernel/memory/memory_layout.h19
-rw-r--r--src/core/hle/kernel/memory/memory_manager.cpp6
-rw-r--r--src/core/hle/kernel/memory/page_heap.h4
-rw-r--r--src/core/hle/kernel/memory/page_table.cpp87
-rw-r--r--src/core/hle/kernel/mutex.cpp170
-rw-r--r--src/core/hle/kernel/mutex.h42
-rw-r--r--src/core/hle/kernel/object.cpp6
-rw-r--r--src/core/hle/kernel/object.h14
-rw-r--r--src/core/hle/kernel/process.cpp210
-rw-r--r--src/core/hle/kernel/process.h156
-rw-r--r--src/core/hle/kernel/process_capability.cpp38
-rw-r--r--src/core/hle/kernel/readable_event.cpp54
-rw-r--r--src/core/hle/kernel/readable_event.h57
-rw-r--r--src/core/hle/kernel/resource_limit.cpp73
-rw-r--r--src/core/hle/kernel/resource_limit.h104
-rw-r--r--src/core/hle/kernel/server_port.cpp20
-rw-r--r--src/core/hle/kernel/server_port.h9
-rw-r--r--src/core/hle/kernel/server_session.cpp31
-rw-r--r--src/core/hle/kernel/server_session.h24
-rw-r--r--src/core/hle/kernel/session.cpp22
-rw-r--r--src/core/hle/kernel/session.h8
-rw-r--r--src/core/hle/kernel/shared_memory.cpp11
-rw-r--r--src/core/hle/kernel/shared_memory.h2
-rw-r--r--src/core/hle/kernel/svc.cpp1262
-rw-r--r--src/core/hle/kernel/svc_common.h14
-rw-r--r--src/core/hle/kernel/svc_results.h41
-rw-r--r--src/core/hle/kernel/svc_types.h30
-rw-r--r--src/core/hle/kernel/svc_wrap.h103
-rw-r--r--src/core/hle/kernel/synchronization.cpp116
-rw-r--r--src/core/hle/kernel/synchronization.h44
-rw-r--r--src/core/hle/kernel/synchronization_object.cpp49
-rw-r--r--src/core/hle/kernel/synchronization_object.h77
-rw-r--r--src/core/hle/kernel/thread.cpp478
-rw-r--r--src/core/hle/kernel/thread.h731
-rw-r--r--src/core/hle/kernel/time_manager.cpp47
-rw-r--r--src/core/hle/kernel/time_manager.h10
-rw-r--r--src/core/hle/kernel/transfer_memory.cpp2
-rw-r--r--src/core/hle/kernel/transfer_memory.h2
-rw-r--r--src/core/hle/kernel/writable_event.cpp45
-rw-r--r--src/core/hle/kernel/writable_event.h59
82 files changed, 5353 insertions, 3836 deletions
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
deleted file mode 100644
index 20ffa7d47..000000000
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <algorithm>
-#include <vector>
-
-#include "common/assert.h"
-#include "common/common_types.h"
-#include "core/arm/exclusive_monitor.h"
-#include "core/core.h"
-#include "core/hle/kernel/address_arbiter.h"
-#include "core/hle/kernel/errors.h"
-#include "core/hle/kernel/handle_table.h"
-#include "core/hle/kernel/k_scheduler.h"
-#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/thread.h"
-#include "core/hle/kernel/time_manager.h"
-#include "core/hle/result.h"
-#include "core/memory.h"
-
-namespace Kernel {
-
-// Wake up num_to_wake (or all) threads in a vector.
-void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads,
- s32 num_to_wake) {
- // Only process up to 'target' threads, unless 'target' is <= 0, in which case process
- // them all.
- std::size_t last = waiting_threads.size();
- if (num_to_wake > 0) {
- last = std::min(last, static_cast<std::size_t>(num_to_wake));
- }
-
- // Signal the waiting threads.
- for (std::size_t i = 0; i < last; i++) {
- waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
- RemoveThread(waiting_threads[i]);
- waiting_threads[i]->WaitForArbitration(false);
- waiting_threads[i]->ResumeFromWait();
- }
-}
-
-AddressArbiter::AddressArbiter(Core::System& system) : system{system} {}
-AddressArbiter::~AddressArbiter() = default;
-
-ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 value,
- s32 num_to_wake) {
- switch (type) {
- case SignalType::Signal:
- return SignalToAddressOnly(address, num_to_wake);
- case SignalType::IncrementAndSignalIfEqual:
- return IncrementAndSignalToAddressIfEqual(address, value, num_to_wake);
- case SignalType::ModifyByWaitingCountAndSignalIfEqual:
- return ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, num_to_wake);
- default:
- return ERR_INVALID_ENUM_VALUE;
- }
-}
-
-ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
- KScopedSchedulerLock lock(system.Kernel());
- const std::vector<std::shared_ptr<Thread>> waiting_threads =
- GetThreadsWaitingOnAddress(address);
- WakeThreads(waiting_threads, num_to_wake);
- return RESULT_SUCCESS;
-}
-
-ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
- s32 num_to_wake) {
- KScopedSchedulerLock lock(system.Kernel());
- auto& memory = system.Memory();
-
- // Ensure that we can write to the address.
- if (!memory.IsValidVirtualAddress(address)) {
- return ERR_INVALID_ADDRESS_STATE;
- }
-
- const std::size_t current_core = system.CurrentCoreIndex();
- auto& monitor = system.Monitor();
- u32 current_value;
- do {
- current_value = monitor.ExclusiveRead32(current_core, address);
-
- if (current_value != static_cast<u32>(value)) {
- return ERR_INVALID_STATE;
- }
- current_value++;
- } while (!monitor.ExclusiveWrite32(current_core, address, current_value));
-
- return SignalToAddressOnly(address, num_to_wake);
-}
-
-ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
- s32 num_to_wake) {
- KScopedSchedulerLock lock(system.Kernel());
- auto& memory = system.Memory();
-
- // Ensure that we can write to the address.
- if (!memory.IsValidVirtualAddress(address)) {
- return ERR_INVALID_ADDRESS_STATE;
- }
-
- // Get threads waiting on the address.
- const std::vector<std::shared_ptr<Thread>> waiting_threads =
- GetThreadsWaitingOnAddress(address);
-
- const std::size_t current_core = system.CurrentCoreIndex();
- auto& monitor = system.Monitor();
- s32 updated_value;
- do {
- updated_value = monitor.ExclusiveRead32(current_core, address);
-
- if (updated_value != value) {
- return ERR_INVALID_STATE;
- }
- // Determine the modified value depending on the waiting count.
- if (num_to_wake <= 0) {
- if (waiting_threads.empty()) {
- updated_value = value + 1;
- } else {
- updated_value = value - 1;
- }
- } else {
- if (waiting_threads.empty()) {
- updated_value = value + 1;
- } else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
- updated_value = value - 1;
- } else {
- updated_value = value;
- }
- }
- } while (!monitor.ExclusiveWrite32(current_core, address, updated_value));
-
- WakeThreads(waiting_threads, num_to_wake);
- return RESULT_SUCCESS;
-}
-
-ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s32 value,
- s64 timeout_ns) {
- switch (type) {
- case ArbitrationType::WaitIfLessThan:
- return WaitForAddressIfLessThan(address, value, timeout_ns, false);
- case ArbitrationType::DecrementAndWaitIfLessThan:
- return WaitForAddressIfLessThan(address, value, timeout_ns, true);
- case ArbitrationType::WaitIfEqual:
- return WaitForAddressIfEqual(address, value, timeout_ns);
- default:
- return ERR_INVALID_ENUM_VALUE;
- }
-}
-
-ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
- bool should_decrement) {
- auto& memory = system.Memory();
- auto& kernel = system.Kernel();
- Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
-
- Handle event_handle = InvalidHandle;
- {
- KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
-
- if (current_thread->IsPendingTermination()) {
- lock.CancelSleep();
- return ERR_THREAD_TERMINATING;
- }
-
- // Ensure that we can read the address.
- if (!memory.IsValidVirtualAddress(address)) {
- lock.CancelSleep();
- return ERR_INVALID_ADDRESS_STATE;
- }
-
- s32 current_value = static_cast<s32>(memory.Read32(address));
- if (current_value >= value) {
- lock.CancelSleep();
- return ERR_INVALID_STATE;
- }
-
- current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
-
- s32 decrement_value;
-
- const std::size_t current_core = system.CurrentCoreIndex();
- auto& monitor = system.Monitor();
- do {
- current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
- if (should_decrement) {
- decrement_value = current_value - 1;
- } else {
- decrement_value = current_value;
- }
- } while (
- !monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value)));
-
- // Short-circuit without rescheduling, if timeout is zero.
- if (timeout == 0) {
- lock.CancelSleep();
- return RESULT_TIMEOUT;
- }
-
- current_thread->SetArbiterWaitAddress(address);
- InsertThread(SharedFrom(current_thread));
- current_thread->SetStatus(ThreadStatus::WaitArb);
- current_thread->WaitForArbitration(true);
- }
-
- if (event_handle != InvalidHandle) {
- auto& time_manager = kernel.TimeManager();
- time_manager.UnscheduleTimeEvent(event_handle);
- }
-
- {
- KScopedSchedulerLock lock(kernel);
- if (current_thread->IsWaitingForArbitration()) {
- RemoveThread(SharedFrom(current_thread));
- current_thread->WaitForArbitration(false);
- }
- }
-
- return current_thread->GetSignalingResult();
-}
-
-ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
- auto& memory = system.Memory();
- auto& kernel = system.Kernel();
- Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
-
- Handle event_handle = InvalidHandle;
- {
- KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
-
- if (current_thread->IsPendingTermination()) {
- lock.CancelSleep();
- return ERR_THREAD_TERMINATING;
- }
-
- // Ensure that we can read the address.
- if (!memory.IsValidVirtualAddress(address)) {
- lock.CancelSleep();
- return ERR_INVALID_ADDRESS_STATE;
- }
-
- s32 current_value = static_cast<s32>(memory.Read32(address));
- if (current_value != value) {
- lock.CancelSleep();
- return ERR_INVALID_STATE;
- }
-
- // Short-circuit without rescheduling, if timeout is zero.
- if (timeout == 0) {
- lock.CancelSleep();
- return RESULT_TIMEOUT;
- }
-
- current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
- current_thread->SetArbiterWaitAddress(address);
- InsertThread(SharedFrom(current_thread));
- current_thread->SetStatus(ThreadStatus::WaitArb);
- current_thread->WaitForArbitration(true);
- }
-
- if (event_handle != InvalidHandle) {
- auto& time_manager = kernel.TimeManager();
- time_manager.UnscheduleTimeEvent(event_handle);
- }
-
- {
- KScopedSchedulerLock lock(kernel);
- if (current_thread->IsWaitingForArbitration()) {
- RemoveThread(SharedFrom(current_thread));
- current_thread->WaitForArbitration(false);
- }
- }
-
- return current_thread->GetSignalingResult();
-}
-
-void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) {
- const VAddr arb_addr = thread->GetArbiterWaitAddress();
- std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
-
- const auto iter =
- std::find_if(thread_list.cbegin(), thread_list.cend(), [&thread](const auto& entry) {
- return entry->GetPriority() >= thread->GetPriority();
- });
-
- if (iter == thread_list.cend()) {
- thread_list.push_back(std::move(thread));
- } else {
- thread_list.insert(iter, std::move(thread));
- }
-}
-
-void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) {
- const VAddr arb_addr = thread->GetArbiterWaitAddress();
- std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
-
- const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(),
- [&thread](const auto& entry) { return thread == entry; });
-
- if (iter != thread_list.cend()) {
- thread_list.erase(iter);
- }
-}
-
-std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(
- VAddr address) const {
- const auto iter = arb_threads.find(address);
- if (iter == arb_threads.cend()) {
- return {};
- }
-
- const std::list<std::shared_ptr<Thread>>& thread_list = iter->second;
- return {thread_list.cbegin(), thread_list.cend()};
-}
-} // namespace Kernel
diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h
deleted file mode 100644
index b91edc67d..000000000
--- a/src/core/hle/kernel/address_arbiter.h
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <list>
-#include <memory>
-#include <unordered_map>
-#include <vector>
-
-#include "common/common_types.h"
-
-union ResultCode;
-
-namespace Core {
-class System;
-}
-
-namespace Kernel {
-
-class Thread;
-
-class AddressArbiter {
-public:
- enum class ArbitrationType {
- WaitIfLessThan = 0,
- DecrementAndWaitIfLessThan = 1,
- WaitIfEqual = 2,
- };
-
- enum class SignalType {
- Signal = 0,
- IncrementAndSignalIfEqual = 1,
- ModifyByWaitingCountAndSignalIfEqual = 2,
- };
-
- explicit AddressArbiter(Core::System& system);
- ~AddressArbiter();
-
- AddressArbiter(const AddressArbiter&) = delete;
- AddressArbiter& operator=(const AddressArbiter&) = delete;
-
- AddressArbiter(AddressArbiter&&) = default;
- AddressArbiter& operator=(AddressArbiter&&) = delete;
-
- /// Signals an address being waited on with a particular signaling type.
- ResultCode SignalToAddress(VAddr address, SignalType type, s32 value, s32 num_to_wake);
-
- /// Waits on an address with a particular arbitration type.
- ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns);
-
-private:
- /// Signals an address being waited on.
- ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake);
-
- /// Signals an address being waited on and increments its value if equal to the value argument.
- ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake);
-
- /// Signals an address being waited on and modifies its value based on waiting thread count if
- /// equal to the value argument.
- ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
- s32 num_to_wake);
-
- /// Waits on an address if the value passed is less than the argument value,
- /// optionally decrementing.
- ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
- bool should_decrement);
-
- /// Waits on an address if the value passed is equal to the argument value.
- ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout);
-
- /// Wake up num_to_wake (or all) threads in a vector.
- void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake);
-
- /// Insert a thread into the address arbiter container
- void InsertThread(std::shared_ptr<Thread> thread);
-
- /// Removes a thread from the address arbiter container
- void RemoveThread(std::shared_ptr<Thread> thread);
-
- // Gets the threads waiting on an address.
- std::vector<std::shared_ptr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const;
-
- /// List of threads waiting for a address arbiter
- std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> arb_threads;
-
- Core::System& system;
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/client_port.cpp b/src/core/hle/kernel/client_port.cpp
index 8aff2227a..0b6957e31 100644
--- a/src/core/hle/kernel/client_port.cpp
+++ b/src/core/hle/kernel/client_port.cpp
@@ -4,11 +4,11 @@
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
-#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/kernel/session.h"
+#include "core/hle/kernel/svc_results.h"
namespace Kernel {
@@ -21,7 +21,7 @@ std::shared_ptr<ServerPort> ClientPort::GetServerPort() const {
ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
if (active_sessions >= max_sessions) {
- return ERR_MAX_CONNECTIONS_REACHED;
+ return ResultMaxConnectionsReached;
}
active_sessions++;
@@ -33,9 +33,6 @@ ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
server_port->AppendPendingSession(std::move(server));
}
- // Wake the threads waiting on the ServerPort
- server_port->Signal();
-
return MakeResult(std::move(client));
}
diff --git a/src/core/hle/kernel/client_port.h b/src/core/hle/kernel/client_port.h
index 9762bbf0d..77559ebf9 100644
--- a/src/core/hle/kernel/client_port.h
+++ b/src/core/hle/kernel/client_port.h
@@ -51,6 +51,8 @@ public:
*/
void ConnectionClosed();
+ void Finalize() override {}
+
private:
std::shared_ptr<ServerPort> server_port; ///< ServerPort associated with this client port.
u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have
diff --git a/src/core/hle/kernel/client_session.cpp b/src/core/hle/kernel/client_session.cpp
index be9eba519..e230f365a 100644
--- a/src/core/hle/kernel/client_session.cpp
+++ b/src/core/hle/kernel/client_session.cpp
@@ -3,16 +3,16 @@
// Refer to the license.txt file included.
#include "core/hle/kernel/client_session.h"
-#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
-#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/svc_results.h"
#include "core/hle/result.h"
namespace Kernel {
-ClientSession::ClientSession(KernelCore& kernel) : SynchronizationObject{kernel} {}
+ClientSession::ClientSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
ClientSession::~ClientSession() {
// This destructor will be called automatically when the last ClientSession handle is closed by
@@ -22,15 +22,6 @@ ClientSession::~ClientSession() {
}
}
-bool ClientSession::ShouldWait(const Thread* thread) const {
- UNIMPLEMENTED();
- return {};
-}
-
-void ClientSession::Acquire(Thread* thread) {
- UNIMPLEMENTED();
-}
-
bool ClientSession::IsSignaled() const {
UNIMPLEMENTED();
return true;
@@ -47,12 +38,12 @@ ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kern
return MakeResult(std::move(client_session));
}
-ResultCode ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread,
+ResultCode ClientSession::SendSyncRequest(std::shared_ptr<KThread> thread,
Core::Memory::Memory& memory,
Core::Timing::CoreTiming& core_timing) {
// Keep ServerSession alive until we're done working with it.
if (!parent->Server()) {
- return ERR_SESSION_CLOSED_BY_REMOTE;
+ return ResultSessionClosedByRemote;
}
// Signal the server session that new data is available
diff --git a/src/core/hle/kernel/client_session.h b/src/core/hle/kernel/client_session.h
index e5e0690c2..85aafeaf4 100644
--- a/src/core/hle/kernel/client_session.h
+++ b/src/core/hle/kernel/client_session.h
@@ -7,7 +7,7 @@
#include <memory>
#include <string>
-#include "core/hle/kernel/synchronization_object.h"
+#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/result.h"
union ResultCode;
@@ -24,9 +24,9 @@ namespace Kernel {
class KernelCore;
class Session;
-class Thread;
+class KThread;
-class ClientSession final : public SynchronizationObject {
+class ClientSession final : public KSynchronizationObject {
public:
explicit ClientSession(KernelCore& kernel);
~ClientSession() override;
@@ -46,15 +46,13 @@ public:
return HANDLE_TYPE;
}
- ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory,
+ ResultCode SendSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory,
Core::Timing::CoreTiming& core_timing);
- bool ShouldWait(const Thread* thread) const override;
-
- void Acquire(Thread* thread) override;
-
bool IsSignaled() const override;
+ void Finalize() override {}
+
private:
static ResultVal<std::shared_ptr<ClientSession>> Create(KernelCore& kernel,
std::shared_ptr<Session> parent,
diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h
deleted file mode 100644
index d4e5d88cf..000000000
--- a/src/core/hle/kernel/errors.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include "core/hle/result.h"
-
-namespace Kernel {
-
-// Confirmed Switch kernel error codes
-
-constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7};
-constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14};
-constexpr ResultCode ERR_THREAD_TERMINATING{ErrorModule::Kernel, 59};
-constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101};
-constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102};
-constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103};
-constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104};
-constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105};
-constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106};
-constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS{ErrorModule::Kernel, 108};
-constexpr ResultCode ERR_INVALID_MEMORY_RANGE{ErrorModule::Kernel, 110};
-constexpr ResultCode ERR_INVALID_PROCESSOR_ID{ErrorModule::Kernel, 113};
-constexpr ResultCode ERR_INVALID_THREAD_PRIORITY{ErrorModule::Kernel, 112};
-constexpr ResultCode ERR_INVALID_HANDLE{ErrorModule::Kernel, 114};
-constexpr ResultCode ERR_INVALID_POINTER{ErrorModule::Kernel, 115};
-constexpr ResultCode ERR_INVALID_COMBINATION{ErrorModule::Kernel, 116};
-constexpr ResultCode RESULT_TIMEOUT{ErrorModule::Kernel, 117};
-constexpr ResultCode ERR_SYNCHRONIZATION_CANCELED{ErrorModule::Kernel, 118};
-constexpr ResultCode ERR_OUT_OF_RANGE{ErrorModule::Kernel, 119};
-constexpr ResultCode ERR_INVALID_ENUM_VALUE{ErrorModule::Kernel, 120};
-constexpr ResultCode ERR_NOT_FOUND{ErrorModule::Kernel, 121};
-constexpr ResultCode ERR_BUSY{ErrorModule::Kernel, 122};
-constexpr ResultCode ERR_SESSION_CLOSED_BY_REMOTE{ErrorModule::Kernel, 123};
-constexpr ResultCode ERR_INVALID_STATE{ErrorModule::Kernel, 125};
-constexpr ResultCode ERR_RESERVED_VALUE{ErrorModule::Kernel, 126};
-constexpr ResultCode ERR_RESOURCE_LIMIT_EXCEEDED{ErrorModule::Kernel, 132};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
index a133e8ed0..c6838649f 100644
--- a/src/core/hle/kernel/global_scheduler_context.cpp
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -17,12 +17,12 @@ GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
GlobalSchedulerContext::~GlobalSchedulerContext() = default;
-void GlobalSchedulerContext::AddThread(std::shared_ptr<Thread> thread) {
+void GlobalSchedulerContext::AddThread(std::shared_ptr<KThread> thread) {
std::scoped_lock lock{global_list_guard};
thread_list.push_back(std::move(thread));
}
-void GlobalSchedulerContext::RemoveThread(std::shared_ptr<Thread> thread) {
+void GlobalSchedulerContext::RemoveThread(std::shared_ptr<KThread> thread) {
std::scoped_lock lock{global_list_guard};
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
thread_list.end());
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
index 5c7b89290..11592843e 100644
--- a/src/core/hle/kernel/global_scheduler_context.h
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -12,7 +12,8 @@
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_priority_queue.h"
#include "core/hle/kernel/k_scheduler_lock.h"
-#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/svc_types.h"
namespace Kernel {
@@ -20,8 +21,12 @@ class KernelCore;
class SchedulerLock;
using KSchedulerPriorityQueue =
- KPriorityQueue<Thread, Core::Hardware::NUM_CPU_CORES, THREADPRIO_LOWEST, THREADPRIO_HIGHEST>;
-constexpr s32 HighestCoreMigrationAllowedPriority = 2;
+ KPriorityQueue<KThread, Core::Hardware::NUM_CPU_CORES, Svc::LowestThreadPriority,
+ Svc::HighestThreadPriority>;
+
+static constexpr s32 HighestCoreMigrationAllowedPriority = 2;
+static_assert(Svc::LowestThreadPriority >= HighestCoreMigrationAllowedPriority);
+static_assert(Svc::HighestThreadPriority <= HighestCoreMigrationAllowedPriority);
class GlobalSchedulerContext final {
friend class KScheduler;
@@ -33,13 +38,13 @@ public:
~GlobalSchedulerContext();
/// Adds a new thread to the scheduler
- void AddThread(std::shared_ptr<Thread> thread);
+ void AddThread(std::shared_ptr<KThread> thread);
/// Removes a thread from the scheduler
- void RemoveThread(std::shared_ptr<Thread> thread);
+ void RemoveThread(std::shared_ptr<KThread> thread);
/// Returns a list of all threads managed by the scheduler
- [[nodiscard]] const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
+ [[nodiscard]] const std::vector<std::shared_ptr<KThread>>& GetThreadList() const {
return thread_list;
}
@@ -74,7 +79,7 @@ private:
LockType scheduler_lock;
/// Lists all thread ids that aren't deleted/etc.
- std::vector<std::shared_ptr<Thread>> thread_list;
+ std::vector<std::shared_ptr<KThread>> thread_list;
Common::SpinLock global_list_guard{};
};
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index 40988b0fd..f96d34078 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -6,12 +6,12 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
-#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/svc_results.h"
namespace Kernel {
namespace {
@@ -33,7 +33,7 @@ HandleTable::~HandleTable() = default;
ResultCode HandleTable::SetSize(s32 handle_table_size) {
if (static_cast<u32>(handle_table_size) > MAX_COUNT) {
LOG_ERROR(Kernel, "Handle table size {} is greater than {}", handle_table_size, MAX_COUNT);
- return ERR_OUT_OF_MEMORY;
+ return ResultOutOfMemory;
}
// Values less than or equal to zero indicate to use the maximum allowable
@@ -53,7 +53,7 @@ ResultVal<Handle> HandleTable::Create(std::shared_ptr<Object> obj) {
const u16 slot = next_free_slot;
if (slot >= table_size) {
LOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use.");
- return ERR_HANDLE_TABLE_FULL;
+ return ResultHandleTableFull;
}
next_free_slot = generations[slot];
@@ -76,7 +76,7 @@ ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
std::shared_ptr<Object> object = GetGeneric(handle);
if (object == nullptr) {
LOG_ERROR(Kernel, "Tried to duplicate invalid handle: {:08X}", handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
return Create(std::move(object));
}
@@ -84,11 +84,15 @@ ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
ResultCode HandleTable::Close(Handle handle) {
if (!IsValid(handle)) {
LOG_ERROR(Kernel, "Handle is not valid! handle={:08X}", handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
const u16 slot = GetSlot(handle);
+ if (objects[slot].use_count() == 1) {
+ objects[slot]->Finalize();
+ }
+
objects[slot] = nullptr;
generations[slot] = next_free_slot;
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 83decf6cf..161d9f782 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -14,19 +14,19 @@
#include "common/common_types.h"
#include "common/logging/log.h"
#include "core/hle/ipc_helpers.h"
-#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h"
+#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/readable_event.h"
#include "core/hle/kernel/server_session.h"
-#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/time_manager.h"
-#include "core/hle/kernel/writable_event.h"
#include "core/memory.h"
namespace Kernel {
@@ -48,7 +48,7 @@ void SessionRequestHandler::ClientDisconnected(
HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
std::shared_ptr<ServerSession> server_session,
- std::shared_ptr<Thread> thread)
+ std::shared_ptr<KThread> thread)
: server_session(std::move(server_session)),
thread(std::move(thread)), kernel{kernel}, memory{memory} {
cmd_buf[0] = 0;
@@ -182,7 +182,7 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const HandleTabl
return RESULT_SUCCESS;
}
-ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) {
+ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& thread) {
auto& owner_process = *thread.GetOwnerProcess();
auto& handle_table = owner_process.GetHandleTable();
@@ -338,6 +338,28 @@ std::size_t HLERequestContext::GetWriteBufferSize(std::size_t buffer_index) cons
return 0;
}
+bool HLERequestContext::CanReadBuffer(std::size_t buffer_index) const {
+ const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
+ BufferDescriptorA()[buffer_index].Size()};
+
+ if (is_buffer_a) {
+ return BufferDescriptorA().size() > buffer_index;
+ } else {
+ return BufferDescriptorX().size() > buffer_index;
+ }
+}
+
+bool HLERequestContext::CanWriteBuffer(std::size_t buffer_index) const {
+ const bool is_buffer_b{BufferDescriptorB().size() > buffer_index &&
+ BufferDescriptorB()[buffer_index].Size()};
+
+ if (is_buffer_b) {
+ return BufferDescriptorB().size() > buffer_index;
+ } else {
+ return BufferDescriptorC().size() > buffer_index;
+ }
+}
+
std::string HLERequestContext::Description() const {
if (!command_header) {
return "No command header available";
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index b112e1ebd..9a769781b 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -40,9 +40,9 @@ class HLERequestContext;
class KernelCore;
class Process;
class ServerSession;
-class Thread;
-class ReadableEvent;
-class WritableEvent;
+class KThread;
+class KReadableEvent;
+class KWritableEvent;
enum class ThreadWakeupReason;
@@ -110,7 +110,7 @@ class HLERequestContext {
public:
explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
std::shared_ptr<ServerSession> session,
- std::shared_ptr<Thread> thread);
+ std::shared_ptr<KThread> thread);
~HLERequestContext();
/// Returns a pointer to the IPC command buffer for this request.
@@ -126,15 +126,12 @@ public:
return server_session;
}
- using WakeupCallback = std::function<void(
- std::shared_ptr<Thread> thread, HLERequestContext& context, ThreadWakeupReason reason)>;
-
/// Populates this context with data from the requesting process/thread.
ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table,
u32_le* src_cmdbuf);
/// Writes data from this context back to the requesting process/thread.
- ResultCode WriteToOutgoingCommandBuffer(Thread& thread);
+ ResultCode WriteToOutgoingCommandBuffer(KThread& thread);
u32_le GetCommand() const {
return command;
@@ -207,6 +204,12 @@ public:
/// Helper function to get the size of the output buffer
std::size_t GetWriteBufferSize(std::size_t buffer_index = 0) const;
+ /// Helper function to test whether the input buffer at buffer_index can be read
+ bool CanReadBuffer(std::size_t buffer_index = 0) const;
+
+ /// Helper function to test whether the output buffer at buffer_index can be written
+ bool CanWriteBuffer(std::size_t buffer_index = 0) const;
+
template <typename T>
std::shared_ptr<T> GetCopyObject(std::size_t index) {
return DynamicObjectCast<T>(copy_objects.at(index));
@@ -261,11 +264,11 @@ public:
std::string Description() const;
- Thread& GetThread() {
+ KThread& GetThread() {
return *thread;
}
- const Thread& GetThread() const {
+ const KThread& GetThread() const {
return *thread;
}
@@ -280,7 +283,7 @@ private:
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
std::shared_ptr<Kernel::ServerSession> server_session;
- std::shared_ptr<Thread> thread;
+ std::shared_ptr<KThread> thread;
// TODO(yuriks): Check common usage of this and optimize size accordingly
boost::container::small_vector<std::shared_ptr<Object>, 8> move_objects;
boost::container::small_vector<std::shared_ptr<Object>, 8> copy_objects;
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
new file mode 100644
index 000000000..7018f56da
--- /dev/null
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -0,0 +1,341 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "core/arm/exclusive_monitor.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_address_arbiter.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc_results.h"
+#include "core/hle/kernel/time_manager.h"
+#include "core/memory.h"
+
+namespace Kernel {
+
+KAddressArbiter::KAddressArbiter(Core::System& system_)
+ : system{system_}, kernel{system.Kernel()} {}
+KAddressArbiter::~KAddressArbiter() = default;
+
+namespace {
+
+bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
+ *out = system.Memory().Read32(address);
+ return true;
+}
+
+bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
+ auto& monitor = system.Monitor();
+ const auto current_core = system.CurrentCoreIndex();
+
+ // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
+ // TODO(bunnei): We should call CanAccessAtomic(..) here.
+
+ // Load the value from the address.
+ const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
+
+ // Compare it to the desired one.
+ if (current_value < value) {
+ // If less than, we want to try to decrement.
+ const s32 decrement_value = current_value - 1;
+
+ // Decrement and try to store.
+ if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value))) {
+ // If we failed to store, try again.
+ DecrementIfLessThan(system, out, address, value);
+ }
+ } else {
+ // Otherwise, clear our exclusive hold and finish
+ monitor.ClearExclusive();
+ }
+
+ // We're done.
+ *out = current_value;
+ return true;
+}
+
+bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
+ auto& monitor = system.Monitor();
+ const auto current_core = system.CurrentCoreIndex();
+
+ // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
+ // TODO(bunnei): We should call CanAccessAtomic(..) here.
+
+ // Load the value from the address.
+ const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
+
+ // Compare it to the desired one.
+ if (current_value == value) {
+ // If equal, we want to try to write the new value.
+
+ // Try to store.
+ if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(new_value))) {
+ // If we failed to store, try again.
+ UpdateIfEqual(system, out, address, value, new_value);
+ }
+ } else {
+ // Otherwise, clear our exclusive hold and finish.
+ monitor.ClearExclusive();
+ }
+
+ // We're done.
+ *out = current_value;
+ return true;
+}
+
+} // namespace
+
+ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
+ // Perform signaling.
+ s32 num_waiters{};
+ {
+ KScopedSchedulerLock sl(kernel);
+
+ auto it = thread_tree.nfind_light({addr, -1});
+ while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
+ (it->GetAddressArbiterKey() == addr)) {
+ KThread* target_thread = std::addressof(*it);
+ target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
+
+ ASSERT(target_thread->IsWaitingForAddressArbiter());
+ target_thread->Wakeup();
+
+ it = thread_tree.erase(it);
+ target_thread->ClearAddressArbiter();
+ ++num_waiters;
+ }
+ }
+ return RESULT_SUCCESS;
+}
+
+ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
+ // Perform signaling.
+ s32 num_waiters{};
+ {
+ KScopedSchedulerLock sl(kernel);
+
+ // Check the userspace value.
+ s32 user_value{};
+ if (!UpdateIfEqual(system, &user_value, addr, value, value + 1)) {
+ LOG_ERROR(Kernel, "Invalid current memory!");
+ return ResultInvalidCurrentMemory;
+ }
+ if (user_value != value) {
+ return ResultInvalidState;
+ }
+
+ auto it = thread_tree.nfind_light({addr, -1});
+ while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
+ (it->GetAddressArbiterKey() == addr)) {
+ KThread* target_thread = std::addressof(*it);
+ target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
+
+ ASSERT(target_thread->IsWaitingForAddressArbiter());
+ target_thread->Wakeup();
+
+ it = thread_tree.erase(it);
+ target_thread->ClearAddressArbiter();
+ ++num_waiters;
+ }
+ }
+ return RESULT_SUCCESS;
+}
+
+ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
+ // Perform signaling.
+ s32 num_waiters{};
+ {
+ [[maybe_unused]] const KScopedSchedulerLock sl(kernel);
+
+ auto it = thread_tree.nfind_light({addr, -1});
+ // Determine the updated value.
+ s32 new_value{};
+ if (count <= 0) {
+ if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) {
+ new_value = value - 2;
+ } else {
+ new_value = value + 1;
+ }
+ } else {
+ if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) {
+ auto tmp_it = it;
+ s32 tmp_num_waiters{};
+ while (++tmp_it != thread_tree.end() && tmp_it->GetAddressArbiterKey() == addr) {
+ if (tmp_num_waiters++ >= count) {
+ break;
+ }
+ }
+
+ if (tmp_num_waiters < count) {
+ new_value = value - 1;
+ } else {
+ new_value = value;
+ }
+ } else {
+ new_value = value + 1;
+ }
+ }
+
+ // Check the userspace value.
+ s32 user_value{};
+ bool succeeded{};
+ if (value != new_value) {
+ succeeded = UpdateIfEqual(system, &user_value, addr, value, new_value);
+ } else {
+ succeeded = ReadFromUser(system, &user_value, addr);
+ }
+
+ if (!succeeded) {
+ LOG_ERROR(Kernel, "Invalid current memory!");
+ return ResultInvalidCurrentMemory;
+ }
+ if (user_value != value) {
+ return ResultInvalidState;
+ }
+
+ while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
+ (it->GetAddressArbiterKey() == addr)) {
+ KThread* target_thread = std::addressof(*it);
+ target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
+
+ ASSERT(target_thread->IsWaitingForAddressArbiter());
+ target_thread->Wakeup();
+
+ it = thread_tree.erase(it);
+ target_thread->ClearAddressArbiter();
+ ++num_waiters;
+ }
+ }
+ return RESULT_SUCCESS;
+}
+
+ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
+ // Prepare to wait.
+ KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+
+ {
+ KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
+
+ // Check that the thread isn't terminating.
+ if (cur_thread->IsTerminationRequested()) {
+ slp.CancelSleep();
+ return ResultTerminationRequested;
+ }
+
+ // Set the synced object.
+ cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
+
+ // Read the value from userspace.
+ s32 user_value{};
+ bool succeeded{};
+ if (decrement) {
+ succeeded = DecrementIfLessThan(system, &user_value, addr, value);
+ } else {
+ succeeded = ReadFromUser(system, &user_value, addr);
+ }
+
+ if (!succeeded) {
+ slp.CancelSleep();
+ return ResultInvalidCurrentMemory;
+ }
+
+ // Check that the value is less than the specified one.
+ if (user_value >= value) {
+ slp.CancelSleep();
+ return ResultInvalidState;
+ }
+
+ // Check that the timeout is non-zero.
+ if (timeout == 0) {
+ slp.CancelSleep();
+ return ResultTimedOut;
+ }
+
+ // Set the arbiter.
+ cur_thread->SetAddressArbiter(&thread_tree, addr);
+ thread_tree.insert(*cur_thread);
+ cur_thread->SetState(ThreadState::Waiting);
+ cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
+ }
+
+ // Cancel the timer wait.
+ kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
+
+ // Remove from the address arbiter.
+ {
+ KScopedSchedulerLock sl(kernel);
+
+ if (cur_thread->IsWaitingForAddressArbiter()) {
+ thread_tree.erase(thread_tree.iterator_to(*cur_thread));
+ cur_thread->ClearAddressArbiter();
+ }
+ }
+
+ // Get the result.
+ KSynchronizationObject* dummy{};
+ return cur_thread->GetWaitResult(&dummy);
+}
+
+ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
+ // Prepare to wait.
+ KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+
+ {
+ KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
+
+ // Check that the thread isn't terminating.
+ if (cur_thread->IsTerminationRequested()) {
+ slp.CancelSleep();
+ return ResultTerminationRequested;
+ }
+
+ // Set the synced object.
+ cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
+
+ // Read the value from userspace.
+ s32 user_value{};
+ if (!ReadFromUser(system, &user_value, addr)) {
+ slp.CancelSleep();
+ return ResultInvalidCurrentMemory;
+ }
+
+ // Check that the value is equal.
+ if (value != user_value) {
+ slp.CancelSleep();
+ return ResultInvalidState;
+ }
+
+ // Check that the timeout is non-zero.
+ if (timeout == 0) {
+ slp.CancelSleep();
+ return ResultTimedOut;
+ }
+
+ // Set the arbiter.
+ cur_thread->SetAddressArbiter(&thread_tree, addr);
+ thread_tree.insert(*cur_thread);
+ cur_thread->SetState(ThreadState::Waiting);
+ cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
+ }
+
+ // Cancel the timer wait.
+ kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
+
+ // Remove from the address arbiter.
+ {
+ KScopedSchedulerLock sl(kernel);
+
+ if (cur_thread->IsWaitingForAddressArbiter()) {
+ thread_tree.erase(thread_tree.iterator_to(*cur_thread));
+ cur_thread->ClearAddressArbiter();
+ }
+ }
+
+ // Get the result.
+ KSynchronizationObject* dummy{};
+ return cur_thread->GetWaitResult(&dummy);
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_address_arbiter.h b/src/core/hle/kernel/k_address_arbiter.h
new file mode 100644
index 000000000..8d379b524
--- /dev/null
+++ b/src/core/hle/kernel/k_address_arbiter.h
@@ -0,0 +1,70 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "core/hle/kernel/k_condition_variable.h"
+#include "core/hle/kernel/svc_types.h"
+
+union ResultCode;
+
+namespace Core {
+class System;
+}
+
+namespace Kernel {
+
+class KernelCore;
+
+class KAddressArbiter {
+public:
+ using ThreadTree = KConditionVariable::ThreadTree;
+
+ explicit KAddressArbiter(Core::System& system_);
+ ~KAddressArbiter();
+
+ [[nodiscard]] ResultCode SignalToAddress(VAddr addr, Svc::SignalType type, s32 value,
+ s32 count) {
+ switch (type) {
+ case Svc::SignalType::Signal:
+ return Signal(addr, count);
+ case Svc::SignalType::SignalAndIncrementIfEqual:
+ return SignalAndIncrementIfEqual(addr, value, count);
+ case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
+ return SignalAndModifyByWaitingCountIfEqual(addr, value, count);
+ }
+ UNREACHABLE();
+ return RESULT_UNKNOWN;
+ }
+
+ [[nodiscard]] ResultCode WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
+ s64 timeout) {
+ switch (type) {
+ case Svc::ArbitrationType::WaitIfLessThan:
+ return WaitIfLessThan(addr, value, false, timeout);
+ case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
+ return WaitIfLessThan(addr, value, true, timeout);
+ case Svc::ArbitrationType::WaitIfEqual:
+ return WaitIfEqual(addr, value, timeout);
+ }
+ UNREACHABLE();
+ return RESULT_UNKNOWN;
+ }
+
+private:
+ [[nodiscard]] ResultCode Signal(VAddr addr, s32 count);
+ [[nodiscard]] ResultCode SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
+ [[nodiscard]] ResultCode SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
+ [[nodiscard]] ResultCode WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
+ [[nodiscard]] ResultCode WaitIfEqual(VAddr addr, s32 value, s64 timeout);
+
+ ThreadTree thread_tree;
+
+ Core::System& system;
+ KernelCore& kernel;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_affinity_mask.h b/src/core/hle/kernel/k_affinity_mask.h
index dd73781cd..b906895fc 100644
--- a/src/core/hle/kernel/k_affinity_mask.h
+++ b/src/core/hle/kernel/k_affinity_mask.h
@@ -27,7 +27,7 @@ public:
}
[[nodiscard]] constexpr bool GetAffinity(s32 core) const {
- return this->mask & GetCoreBit(core);
+ return (this->mask & GetCoreBit(core)) != 0;
}
constexpr void SetAffinity(s32 core, bool set) {
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
new file mode 100644
index 000000000..170d8fa0d
--- /dev/null
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -0,0 +1,345 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <vector>
+
+#include "core/arm/exclusive_monitor.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_condition_variable.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/k_synchronization_object.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/process.h"
+#include "core/hle/kernel/svc_common.h"
+#include "core/hle/kernel/svc_results.h"
+#include "core/memory.h"
+
+namespace Kernel {
+
+namespace {
+
+bool ReadFromUser(Core::System& system, u32* out, VAddr address) {
+ *out = system.Memory().Read32(address);
+ return true;
+}
+
+bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
+ system.Memory().Write32(address, *p);
+ return true;
+}
+
+bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
+ u32 new_orr_mask) {
+ auto& monitor = system.Monitor();
+ const auto current_core = system.CurrentCoreIndex();
+
+ // Load the value from the address.
+ const auto expected = monitor.ExclusiveRead32(current_core, address);
+
+ // Orr in the new mask.
+ u32 value = expected | new_orr_mask;
+
+ // If the value is zero, use the if_zero value, otherwise use the newly orr'd value.
+ if (!expected) {
+ value = if_zero;
+ }
+
+ // Try to store.
+ if (!monitor.ExclusiveWrite32(current_core, address, value)) {
+ // If we failed to store, try again.
+ return UpdateLockAtomic(system, out, address, if_zero, new_orr_mask);
+ }
+
+ // We're done.
+ *out = expected;
+ return true;
+}
+
+} // namespace
+
+KConditionVariable::KConditionVariable(Core::System& system_)
+ : system{system_}, kernel{system.Kernel()} {}
+
+KConditionVariable::~KConditionVariable() = default;
+
+ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
+ KThread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
+
+ // Signal the address.
+ {
+ KScopedSchedulerLock sl(kernel);
+
+ // Remove waiter thread.
+ s32 num_waiters{};
+ KThread* next_owner_thread =
+ owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
+
+ // Determine the next tag.
+ u32 next_value{};
+ if (next_owner_thread) {
+ next_value = next_owner_thread->GetAddressKeyValue();
+ if (num_waiters > 1) {
+ next_value |= Svc::HandleWaitMask;
+ }
+
+ next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
+ next_owner_thread->Wakeup();
+ }
+
+ // Write the value to userspace.
+ if (!WriteToUser(system, addr, std::addressof(next_value))) {
+ if (next_owner_thread) {
+ next_owner_thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory);
+ }
+
+ return ResultInvalidCurrentMemory;
+ }
+ }
+
+ return RESULT_SUCCESS;
+}
+
+ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
+ KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+
+ // Wait for the address.
+ {
+ std::shared_ptr<KThread> owner_thread;
+ ASSERT(!owner_thread);
+ {
+ KScopedSchedulerLock sl(kernel);
+ cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
+
+ // Check if the thread should terminate.
+ R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
+
+ {
+ // Read the tag from userspace.
+ u32 test_tag{};
+ R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr),
+ ResultInvalidCurrentMemory);
+
+ // If the tag isn't the handle (with wait mask), we're done.
+ R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS);
+
+ // Get the lock owner thread.
+ owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(handle);
+ R_UNLESS(owner_thread, ResultInvalidHandle);
+
+ // Update the lock.
+ cur_thread->SetAddressKey(addr, value);
+ owner_thread->AddWaiter(cur_thread);
+ cur_thread->SetState(ThreadState::Waiting);
+ cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
+ cur_thread->SetMutexWaitAddressForDebugging(addr);
+ }
+ }
+ ASSERT(owner_thread);
+ }
+
+ // Remove the thread as a waiter from the lock owner.
+ {
+ KScopedSchedulerLock sl(kernel);
+ KThread* owner_thread = cur_thread->GetLockOwner();
+ if (owner_thread != nullptr) {
+ owner_thread->RemoveWaiter(cur_thread);
+ }
+ }
+
+ // Get the wait result.
+ KSynchronizationObject* dummy{};
+ return cur_thread->GetWaitResult(std::addressof(dummy));
+}
+
+KThread* KConditionVariable::SignalImpl(KThread* thread) {
+ // Check pre-conditions.
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Update the tag.
+ VAddr address = thread->GetAddressKey();
+ u32 own_tag = thread->GetAddressKeyValue();
+
+ u32 prev_tag{};
+ bool can_access{};
+ {
+ // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
+ // TODO(bunnei): We should call CanAccessAtomic(..) here.
+ can_access = true;
+ if (can_access) {
+ UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
+ Svc::HandleWaitMask);
+ }
+ }
+
+ KThread* thread_to_close = nullptr;
+ if (can_access) {
+ if (prev_tag == InvalidHandle) {
+ // If nobody held the lock previously, we're all good.
+ thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
+ thread->Wakeup();
+ } else {
+ // Get the previous owner.
+ auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(
+ prev_tag & ~Svc::HandleWaitMask);
+
+ if (owner_thread) {
+ // Add the thread as a waiter on the owner.
+ owner_thread->AddWaiter(thread);
+ thread_to_close = owner_thread.get();
+ } else {
+ // The lock was tagged with a thread that doesn't exist.
+ thread->SetSyncedObject(nullptr, ResultInvalidState);
+ thread->Wakeup();
+ }
+ }
+ } else {
+ // If the address wasn't accessible, note so.
+ thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory);
+ thread->Wakeup();
+ }
+
+ return thread_to_close;
+}
+
+void KConditionVariable::Signal(u64 cv_key, s32 count) {
+ // Prepare for signaling.
+ constexpr int MaxThreads = 16;
+
+ // TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using
+ // std::shared_ptr.
+ std::vector<std::shared_ptr<KThread>> thread_list;
+ std::array<KThread*, MaxThreads> thread_array;
+ s32 num_to_close{};
+
+ // Perform signaling.
+ s32 num_waiters{};
+ {
+ KScopedSchedulerLock sl(kernel);
+
+ auto it = thread_tree.nfind_light({cv_key, -1});
+ while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
+ (it->GetConditionVariableKey() == cv_key)) {
+ KThread* target_thread = std::addressof(*it);
+
+ if (KThread* thread = SignalImpl(target_thread); thread != nullptr) {
+ if (num_to_close < MaxThreads) {
+ thread_array[num_to_close++] = thread;
+ } else {
+ thread_list.push_back(SharedFrom(thread));
+ }
+ }
+
+ it = thread_tree.erase(it);
+ target_thread->ClearConditionVariable();
+ ++num_waiters;
+ }
+
+ // If we have no waiters, clear the has waiter flag.
+ if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
+ const u32 has_waiter_flag{};
+ WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
+ }
+ }
+
+ // Close threads in the array.
+ for (auto i = 0; i < num_to_close; ++i) {
+ thread_array[i]->Close();
+ }
+
+ // Close threads in the list.
+ for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) {
+ (*it)->Close();
+ }
+}
+
+ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
+ // Prepare to wait.
+ KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+
+ {
+ KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
+
+ // Set the synced object.
+ cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
+
+ // Check that the thread isn't terminating.
+ if (cur_thread->IsTerminationRequested()) {
+ slp.CancelSleep();
+ return ResultTerminationRequested;
+ }
+
+ // Update the value and process for the next owner.
+ {
+ // Remove waiter thread.
+ s32 num_waiters{};
+ KThread* next_owner_thread =
+ cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
+
+ // Update for the next owner thread.
+ u32 next_value{};
+ if (next_owner_thread != nullptr) {
+ // Get the next tag value.
+ next_value = next_owner_thread->GetAddressKeyValue();
+ if (num_waiters > 1) {
+ next_value |= Svc::HandleWaitMask;
+ }
+
+ // Wake up the next owner.
+ next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
+ next_owner_thread->Wakeup();
+ }
+
+ // Write to the cv key.
+ {
+ const u32 has_waiter_flag = 1;
+ WriteToUser(system, key, std::addressof(has_waiter_flag));
+ // TODO(bunnei): We should call DataMemoryBarrier(..) here.
+ }
+
+ // Write the value to userspace.
+ if (!WriteToUser(system, addr, std::addressof(next_value))) {
+ slp.CancelSleep();
+ return ResultInvalidCurrentMemory;
+ }
+ }
+
+ // Update condition variable tracking.
+ {
+ cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
+ thread_tree.insert(*cur_thread);
+ }
+
+ // If the timeout is non-zero, set the thread as waiting.
+ if (timeout != 0) {
+ cur_thread->SetState(ThreadState::Waiting);
+ cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
+ cur_thread->SetMutexWaitAddressForDebugging(addr);
+ }
+ }
+
+ // Cancel the timer wait.
+ kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
+
+ // Remove from the condition variable.
+ {
+ KScopedSchedulerLock sl(kernel);
+
+ if (KThread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
+ owner->RemoveWaiter(cur_thread);
+ }
+
+ if (cur_thread->IsWaitingForConditionVariable()) {
+ thread_tree.erase(thread_tree.iterator_to(*cur_thread));
+ cur_thread->ClearConditionVariable();
+ }
+ }
+
+ // Get the result.
+ KSynchronizationObject* dummy{};
+ return cur_thread->GetWaitResult(std::addressof(dummy));
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
new file mode 100644
index 000000000..861dbd420
--- /dev/null
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -0,0 +1,59 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/assert.h"
+#include "common/common_types.h"
+
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/result.h"
+
+namespace Core {
+class System;
+}
+
+namespace Kernel {
+
+class KConditionVariable {
+public:
+ using ThreadTree = typename KThread::ConditionVariableThreadTreeType;
+
+ explicit KConditionVariable(Core::System& system_);
+ ~KConditionVariable();
+
+ // Arbitration
+ [[nodiscard]] ResultCode SignalToAddress(VAddr addr);
+ [[nodiscard]] ResultCode WaitForAddress(Handle handle, VAddr addr, u32 value);
+
+ // Condition variable
+ void Signal(u64 cv_key, s32 count);
+ [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
+
+private:
+ [[nodiscard]] KThread* SignalImpl(KThread* thread);
+
+ ThreadTree thread_tree;
+
+ Core::System& system;
+ KernelCore& kernel;
+};
+
+inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
+ KThread* thread) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ tree->erase(tree->iterator_to(*thread));
+}
+
+inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
+ KThread* thread) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ tree->insert(*thread);
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp
new file mode 100644
index 000000000..bb2fa4ad5
--- /dev/null
+++ b/src/core/hle/kernel/k_event.cpp
@@ -0,0 +1,32 @@
+// Copyright 2021 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "core/hle/kernel/k_event.h"
+#include "core/hle/kernel/k_readable_event.h"
+#include "core/hle/kernel/k_writable_event.h"
+
+namespace Kernel {
+
+KEvent::KEvent(KernelCore& kernel, std::string&& name) : Object{kernel, std::move(name)} {}
+
+KEvent::~KEvent() = default;
+
+std::shared_ptr<KEvent> KEvent::Create(KernelCore& kernel, std::string&& name) {
+ return std::make_shared<KEvent>(kernel, std::move(name));
+}
+
+void KEvent::Initialize() {
+ // Create our sub events.
+ readable_event = std::make_shared<KReadableEvent>(kernel, GetName() + ":Readable");
+ writable_event = std::make_shared<KWritableEvent>(kernel, GetName() + ":Writable");
+
+ // Initialize our sub sessions.
+ readable_event->Initialize(this);
+ writable_event->Initialize(this);
+
+ // Mark initialized.
+ initialized = true;
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_event.h b/src/core/hle/kernel/k_event.h
new file mode 100644
index 000000000..2fb887129
--- /dev/null
+++ b/src/core/hle/kernel/k_event.h
@@ -0,0 +1,57 @@
+// Copyright 2021 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "core/hle/kernel/object.h"
+
+namespace Kernel {
+
+class KernelCore;
+class KReadableEvent;
+class KWritableEvent;
+
+class KEvent final : public Object {
+public:
+ explicit KEvent(KernelCore& kernel, std::string&& name);
+ ~KEvent() override;
+
+ static std::shared_ptr<KEvent> Create(KernelCore& kernel, std::string&& name);
+
+ void Initialize();
+
+ void Finalize() override {}
+
+ std::string GetTypeName() const override {
+ return "KEvent";
+ }
+
+ static constexpr HandleType HANDLE_TYPE = HandleType::Event;
+ HandleType GetHandleType() const override {
+ return HANDLE_TYPE;
+ }
+
+ std::shared_ptr<KReadableEvent>& GetReadableEvent() {
+ return readable_event;
+ }
+
+ std::shared_ptr<KWritableEvent>& GetWritableEvent() {
+ return writable_event;
+ }
+
+ const std::shared_ptr<KReadableEvent>& GetReadableEvent() const {
+ return readable_event;
+ }
+
+ const std::shared_ptr<KWritableEvent>& GetWritableEvent() const {
+ return writable_event;
+ }
+
+private:
+ std::shared_ptr<KReadableEvent> readable_event;
+ std::shared_ptr<KWritableEvent> writable_event;
+ bool initialized{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_light_condition_variable.h b/src/core/hle/kernel/k_light_condition_variable.h
new file mode 100644
index 000000000..362d0db28
--- /dev/null
+++ b/src/core/hle/kernel/k_light_condition_variable.h
@@ -0,0 +1,57 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include "common/common_types.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/k_thread_queue.h"
+#include "core/hle/kernel/time_manager.h"
+
+namespace Kernel {
+class KernelCore;
+
+class KLightConditionVariable {
+public:
+ explicit KLightConditionVariable(KernelCore& kernel) : thread_queue(kernel), kernel(kernel) {}
+
+ void Wait(KLightLock* lock, s64 timeout = -1) {
+ WaitImpl(lock, timeout);
+ lock->Lock();
+ }
+
+ void Broadcast() {
+ KScopedSchedulerLock lk{kernel};
+ while (thread_queue.WakeupFrontThread() != nullptr) {
+ // We want to signal all threads, and so should continue waking up until there's nothing
+ // to wake.
+ }
+ }
+
+private:
+ void WaitImpl(KLightLock* lock, s64 timeout) {
+ KThread* owner = GetCurrentThreadPointer(kernel);
+
+ // Sleep the thread.
+ {
+ KScopedSchedulerLockAndSleep lk(kernel, owner, timeout);
+ lock->Unlock();
+
+ if (!thread_queue.SleepThread(owner)) {
+ lk.CancelSleep();
+ return;
+ }
+ }
+
+ // Cancel the task that the sleep setup.
+ kernel.TimeManager().UnscheduleTimeEvent(owner);
+ }
+ KThreadQueue thread_queue;
+ KernelCore& kernel;
+};
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp
new file mode 100644
index 000000000..f974022e8
--- /dev/null
+++ b/src/core/hle/kernel/k_light_lock.cpp
@@ -0,0 +1,130 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "core/hle/kernel/k_light_lock.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/kernel.h"
+
+namespace Kernel {
+
+void KLightLock::Lock() {
+ const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
+ const uintptr_t cur_thread_tag = (cur_thread | 1);
+
+ while (true) {
+ uintptr_t old_tag = tag.load(std::memory_order_relaxed);
+
+ while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1,
+ std::memory_order_acquire)) {
+ if ((old_tag | 1) == cur_thread_tag) {
+ return;
+ }
+ }
+
+ if ((old_tag == 0) || ((old_tag | 1) == cur_thread_tag)) {
+ break;
+ }
+
+ LockSlowPath(old_tag | 1, cur_thread);
+ }
+}
+
+void KLightLock::Unlock() {
+ const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
+ uintptr_t expected = cur_thread;
+ do {
+ if (expected != cur_thread) {
+ return UnlockSlowPath(cur_thread);
+ }
+ } while (!tag.compare_exchange_weak(expected, 0, std::memory_order_release));
+}
+
+void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
+ KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
+
+ // Pend the current thread waiting on the owner thread.
+ {
+ KScopedSchedulerLock sl{kernel};
+
+ // Ensure we actually have locking to do.
+ if (tag.load(std::memory_order_relaxed) != _owner) {
+ return;
+ }
+
+ // Add the current thread as a waiter on the owner.
+ KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL);
+ cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
+ owner_thread->AddWaiter(cur_thread);
+
+ // Set thread states.
+ if (cur_thread->GetState() == ThreadState::Runnable) {
+ cur_thread->SetState(ThreadState::Waiting);
+ } else {
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
+ }
+
+ if (owner_thread->IsSuspended()) {
+ owner_thread->ContinueIfHasKernelWaiters();
+ }
+ }
+
+ // We're no longer waiting on the lock owner.
+ {
+ KScopedSchedulerLock sl{kernel};
+ KThread* owner_thread = cur_thread->GetLockOwner();
+ if (owner_thread) {
+ owner_thread->RemoveWaiter(cur_thread);
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
+ }
+ }
+}
+
+void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
+ KThread* owner_thread = reinterpret_cast<KThread*>(_cur_thread);
+
+ // Unlock.
+ {
+ KScopedSchedulerLock sl{kernel};
+
+ // Get the next owner.
+ s32 num_waiters = 0;
+ KThread* next_owner = owner_thread->RemoveWaiterByKey(
+ std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
+
+ // Pass the lock to the next owner.
+ uintptr_t next_tag = 0;
+ if (next_owner) {
+ next_tag = reinterpret_cast<uintptr_t>(next_owner);
+ if (num_waiters > 1) {
+ next_tag |= 0x1;
+ }
+
+ if (next_owner->GetState() == ThreadState::Waiting) {
+ next_owner->SetState(ThreadState::Runnable);
+ } else {
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
+ }
+
+ if (next_owner->IsSuspended()) {
+ next_owner->ContinueIfHasKernelWaiters();
+ }
+ }
+
+ // We may have unsuspended in the process of acquiring the lock, so we'll re-suspend now if
+ // so.
+ if (owner_thread->IsSuspended()) {
+ owner_thread->TrySuspend();
+ }
+
+ // Write the new tag value.
+ tag.store(next_tag);
+ }
+}
+
+bool KLightLock::IsLockedByCurrentThread() const {
+ return (tag | 1ULL) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)) | 1ULL);
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_light_lock.h b/src/core/hle/kernel/k_light_lock.h
new file mode 100644
index 000000000..f4c45f76a
--- /dev/null
+++ b/src/core/hle/kernel/k_light_lock.h
@@ -0,0 +1,41 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <atomic>
+
+#include "common/common_types.h"
+#include "core/hle/kernel/k_scoped_lock.h"
+
+namespace Kernel {
+
+class KernelCore;
+
+class KLightLock {
+public:
+ explicit KLightLock(KernelCore& kernel_) : kernel{kernel_} {}
+
+ void Lock();
+
+ void Unlock();
+
+ void LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
+
+ void UnlockSlowPath(uintptr_t cur_thread);
+
+ bool IsLocked() const {
+ return tag != 0;
+ }
+
+ bool IsLockedByCurrentThread() const;
+
+private:
+ std::atomic<uintptr_t> tag{};
+ KernelCore& kernel;
+};
+
+using KScopedLightLock = KScopedLock<KLightLock>;
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h
index 99fb8fe93..4aa669d95 100644
--- a/src/core/hle/kernel/k_priority_queue.h
+++ b/src/core/hle/kernel/k_priority_queue.h
@@ -8,27 +8,27 @@
#pragma once
#include <array>
+#include <bit>
#include <concepts>
#include "common/assert.h"
#include "common/bit_set.h"
-#include "common/bit_util.h"
#include "common/common_types.h"
#include "common/concepts.h"
namespace Kernel {
-class Thread;
+class KThread;
template <typename T>
concept KPriorityQueueAffinityMask = !std::is_reference_v<T> && requires(T & t) {
{ t.GetAffinityMask() }
->Common::ConvertibleTo<u64>;
- {t.SetAffinityMask(std::declval<u64>())};
+ {t.SetAffinityMask(0)};
- { t.GetAffinity(std::declval<int32_t>()) }
+ { t.GetAffinity(0) }
->std::same_as<bool>;
- {t.SetAffinity(std::declval<int32_t>(), std::declval<bool>())};
+ {t.SetAffinity(0, false)};
{t.SetAll()};
};
@@ -42,11 +42,11 @@ concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
->std::same_as<T*>;
{ (typename T::QueueEntry()).GetPrev() }
->std::same_as<T*>;
- { t.GetPriorityQueueEntry(std::declval<s32>()) }
+ { t.GetPriorityQueueEntry(0) }
->std::same_as<typename T::QueueEntry&>;
{t.GetAffinityMask()};
- { typename std::remove_cvref<decltype(t.GetAffinityMask())>::type() }
+ { std::remove_cvref_t<decltype(t.GetAffinityMask())>() }
->KPriorityQueueAffinityMask;
{ t.GetActiveCore() }
@@ -55,17 +55,17 @@ concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
->Common::ConvertibleTo<s32>;
};
-template <typename Member, size_t _NumCores, int LowestPriority, int HighestPriority>
+template <typename Member, size_t NumCores_, int LowestPriority, int HighestPriority>
requires KPriorityQueueMember<Member> class KPriorityQueue {
public:
- using AffinityMaskType = typename std::remove_cv_t<
- typename std::remove_reference<decltype(std::declval<Member>().GetAffinityMask())>::type>;
+ using AffinityMaskType = std::remove_cv_t<
+ std::remove_reference_t<decltype(std::declval<Member>().GetAffinityMask())>>;
static_assert(LowestPriority >= 0);
static_assert(HighestPriority >= 0);
static_assert(LowestPriority >= HighestPriority);
static constexpr size_t NumPriority = LowestPriority - HighestPriority + 1;
- static constexpr size_t NumCores = _NumCores;
+ static constexpr size_t NumCores = NumCores_;
static constexpr bool IsValidCore(s32 core) {
return 0 <= core && core < static_cast<s32>(NumCores);
@@ -268,7 +268,7 @@ private:
}
constexpr s32 GetNextCore(u64& affinity) {
- const s32 core = Common::CountTrailingZeroes64(affinity);
+ const s32 core = std::countr_zero(affinity);
ClearAffinityBit(affinity, core);
return core;
}
@@ -367,7 +367,7 @@ public:
this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
}
- constexpr Thread* MoveToScheduledBack(Member* member) {
+ constexpr KThread* MoveToScheduledBack(Member* member) {
return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(),
member);
}
diff --git a/src/core/hle/kernel/k_readable_event.cpp b/src/core/hle/kernel/k_readable_event.cpp
new file mode 100644
index 000000000..4b4d34857
--- /dev/null
+++ b/src/core/hle/kernel/k_readable_event.cpp
@@ -0,0 +1,56 @@
+// Copyright 2021 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include "common/assert.h"
+#include "common/common_funcs.h"
+#include "common/logging/log.h"
+#include "core/hle/kernel/k_readable_event.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/object.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel {
+
+KReadableEvent::KReadableEvent(KernelCore& kernel, std::string&& name)
+ : KSynchronizationObject{kernel, std::move(name)} {}
+KReadableEvent::~KReadableEvent() = default;
+
+bool KReadableEvent::IsSignaled() const {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ return is_signaled;
+}
+
+ResultCode KReadableEvent::Signal() {
+ KScopedSchedulerLock lk{kernel};
+
+ if (!is_signaled) {
+ is_signaled = true;
+ NotifyAvailable();
+ }
+
+ return RESULT_SUCCESS;
+}
+
+ResultCode KReadableEvent::Clear() {
+ Reset();
+
+ return RESULT_SUCCESS;
+}
+
+ResultCode KReadableEvent::Reset() {
+ KScopedSchedulerLock lk{kernel};
+
+ if (!is_signaled) {
+ return ResultInvalidState;
+ }
+
+ is_signaled = false;
+ return RESULT_SUCCESS;
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_readable_event.h b/src/core/hle/kernel/k_readable_event.h
new file mode 100644
index 000000000..e6f0fd900
--- /dev/null
+++ b/src/core/hle/kernel/k_readable_event.h
@@ -0,0 +1,51 @@
+// Copyright 2021 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "core/hle/kernel/k_synchronization_object.h"
+#include "core/hle/kernel/object.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+class KernelCore;
+class KEvent;
+
+class KReadableEvent final : public KSynchronizationObject {
+public:
+ explicit KReadableEvent(KernelCore& kernel, std::string&& name);
+ ~KReadableEvent() override;
+
+ std::string GetTypeName() const override {
+ return "KReadableEvent";
+ }
+
+ static constexpr HandleType HANDLE_TYPE = HandleType::ReadableEvent;
+ HandleType GetHandleType() const override {
+ return HANDLE_TYPE;
+ }
+
+ KEvent* GetParent() const {
+ return parent;
+ }
+
+ void Initialize(KEvent* parent_) {
+ is_signaled = false;
+ parent = parent_;
+ }
+
+ bool IsSignaled() const override;
+ void Finalize() override {}
+
+ ResultCode Signal();
+ ResultCode Clear();
+ ResultCode Reset();
+
+private:
+ bool is_signaled{};
+ KEvent* parent{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp
new file mode 100644
index 000000000..d7a4a38e6
--- /dev/null
+++ b/src/core/hle/kernel/k_resource_limit.cpp
@@ -0,0 +1,152 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#include "common/assert.h"
+#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/core_timing_util.h"
+#include "core/hle/kernel/k_resource_limit.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel {
+constexpr s64 DefaultTimeout = 10000000000; // 10 seconds
+
+KResourceLimit::KResourceLimit(KernelCore& kernel, Core::System& system)
+ : Object{kernel}, lock{kernel}, cond_var{kernel}, kernel{kernel}, system(system) {}
+KResourceLimit::~KResourceLimit() = default;
+
+s64 KResourceLimit::GetLimitValue(LimitableResource which) const {
+ const auto index = static_cast<std::size_t>(which);
+ s64 value{};
+ {
+ KScopedLightLock lk{lock};
+ value = limit_values[index];
+ ASSERT(value >= 0);
+ ASSERT(current_values[index] <= limit_values[index]);
+ ASSERT(current_hints[index] <= current_values[index]);
+ }
+ return value;
+}
+
+s64 KResourceLimit::GetCurrentValue(LimitableResource which) const {
+ const auto index = static_cast<std::size_t>(which);
+ s64 value{};
+ {
+ KScopedLightLock lk{lock};
+ value = current_values[index];
+ ASSERT(value >= 0);
+ ASSERT(current_values[index] <= limit_values[index]);
+ ASSERT(current_hints[index] <= current_values[index]);
+ }
+ return value;
+}
+
+s64 KResourceLimit::GetPeakValue(LimitableResource which) const {
+ const auto index = static_cast<std::size_t>(which);
+ s64 value{};
+ {
+ KScopedLightLock lk{lock};
+ value = peak_values[index];
+ ASSERT(value >= 0);
+ ASSERT(current_values[index] <= limit_values[index]);
+ ASSERT(current_hints[index] <= current_values[index]);
+ }
+ return value;
+}
+
+s64 KResourceLimit::GetFreeValue(LimitableResource which) const {
+ const auto index = static_cast<std::size_t>(which);
+ s64 value{};
+ {
+ KScopedLightLock lk(lock);
+ ASSERT(current_values[index] >= 0);
+ ASSERT(current_values[index] <= limit_values[index]);
+ ASSERT(current_hints[index] <= current_values[index]);
+ value = limit_values[index] - current_values[index];
+ }
+
+ return value;
+}
+
+ResultCode KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
+ const auto index = static_cast<std::size_t>(which);
+ KScopedLightLock lk(lock);
+ R_UNLESS(current_values[index] <= value, ResultInvalidState);
+
+ limit_values[index] = value;
+
+ return RESULT_SUCCESS;
+}
+
+bool KResourceLimit::Reserve(LimitableResource which, s64 value) {
+ return Reserve(which, value, system.CoreTiming().GetGlobalTimeNs().count() + DefaultTimeout);
+}
+
+bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
+ ASSERT(value >= 0);
+ const auto index = static_cast<std::size_t>(which);
+ KScopedLightLock lk(lock);
+
+ ASSERT(current_hints[index] <= current_values[index]);
+ if (current_hints[index] >= limit_values[index]) {
+ return false;
+ }
+
+ // Loop until we reserve or run out of time.
+ while (true) {
+ ASSERT(current_values[index] <= limit_values[index]);
+ ASSERT(current_hints[index] <= current_values[index]);
+
+ // If we would overflow, don't allow to succeed.
+ if (current_values[index] + value <= current_values[index]) {
+ break;
+ }
+
+ if (current_values[index] + value <= limit_values[index]) {
+ current_values[index] += value;
+ current_hints[index] += value;
+ peak_values[index] = std::max(peak_values[index], current_values[index]);
+ return true;
+ }
+
+ if (current_hints[index] + value <= limit_values[index] &&
+ (timeout < 0 || system.CoreTiming().GetGlobalTimeNs().count() < timeout)) {
+ waiter_count++;
+ cond_var.Wait(&lock, timeout);
+ waiter_count--;
+ } else {
+ break;
+ }
+ }
+
+ return false;
+}
+
+void KResourceLimit::Release(LimitableResource which, s64 value) {
+ Release(which, value, value);
+}
+
+void KResourceLimit::Release(LimitableResource which, s64 value, s64 hint) {
+ ASSERT(value >= 0);
+ ASSERT(hint >= 0);
+
+ const auto index = static_cast<std::size_t>(which);
+ KScopedLightLock lk(lock);
+ ASSERT(current_values[index] <= limit_values[index]);
+ ASSERT(current_hints[index] <= current_values[index]);
+ ASSERT(value <= current_values[index]);
+ ASSERT(hint <= current_hints[index]);
+
+ current_values[index] -= value;
+ current_hints[index] -= hint;
+
+ if (waiter_count != 0) {
+ cond_var.Broadcast();
+ }
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_resource_limit.h b/src/core/hle/kernel/k_resource_limit.h
new file mode 100644
index 000000000..58ae456f1
--- /dev/null
+++ b/src/core/hle/kernel/k_resource_limit.h
@@ -0,0 +1,81 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include <array>
+#include "common/common_types.h"
+#include "core/hle/kernel/k_light_condition_variable.h"
+#include "core/hle/kernel/k_light_lock.h"
+#include "core/hle/kernel/object.h"
+
+union ResultCode;
+
+namespace Core {
+class System;
+}
+
+namespace Kernel {
+class KernelCore;
+enum class LimitableResource : u32 {
+ PhysicalMemory = 0,
+ Threads = 1,
+ Events = 2,
+ TransferMemory = 3,
+ Sessions = 4,
+
+ Count,
+};
+
+constexpr bool IsValidResourceType(LimitableResource type) {
+ return type < LimitableResource::Count;
+}
+
+class KResourceLimit final : public Object {
+public:
+ explicit KResourceLimit(KernelCore& kernel, Core::System& system);
+ ~KResourceLimit();
+
+ s64 GetLimitValue(LimitableResource which) const;
+ s64 GetCurrentValue(LimitableResource which) const;
+ s64 GetPeakValue(LimitableResource which) const;
+ s64 GetFreeValue(LimitableResource which) const;
+
+ ResultCode SetLimitValue(LimitableResource which, s64 value);
+
+ bool Reserve(LimitableResource which, s64 value);
+ bool Reserve(LimitableResource which, s64 value, s64 timeout);
+ void Release(LimitableResource which, s64 value);
+ void Release(LimitableResource which, s64 value, s64 hint);
+
+ std::string GetTypeName() const override {
+ return "KResourceLimit";
+ }
+ std::string GetName() const override {
+ return GetTypeName();
+ }
+
+ static constexpr HandleType HANDLE_TYPE = HandleType::ResourceLimit;
+ HandleType GetHandleType() const override {
+ return HANDLE_TYPE;
+ }
+
+ virtual void Finalize() override {}
+
+private:
+ using ResourceArray = std::array<s64, static_cast<std::size_t>(LimitableResource::Count)>;
+ ResourceArray limit_values{};
+ ResourceArray current_values{};
+ ResourceArray current_hints{};
+ ResourceArray peak_values{};
+ mutable KLightLock lock;
+ s32 waiter_count{};
+ KLightConditionVariable cond_var;
+ KernelCore& kernel;
+ Core::System& system;
+};
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index c5fd82a6b..bb5f43b53 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -5,6 +5,8 @@
// This file references various implementation details from Atmosphere, an open-source firmware for
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+#include <bit>
+
#include "common/assert.h"
#include "common/bit_util.h"
#include "common/fiber.h"
@@ -15,28 +17,33 @@
#include "core/cpu_manager.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/time_manager.h"
namespace Kernel {
-static void IncrementScheduledCount(Kernel::Thread* thread) {
+static void IncrementScheduledCount(Kernel::KThread* thread) {
if (auto process = thread->GetOwnerProcess(); process) {
process->IncrementScheduledCount();
}
}
-void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
- Core::EmuThreadHandle global_thread) {
- u32 current_core = global_thread.host_handle;
- bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
- (current_core < Core::Hardware::NUM_CPU_CORES);
+void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule) {
+ auto scheduler = kernel.CurrentScheduler();
+
+ u32 current_core{0xF};
+ bool must_context_switch{};
+ if (scheduler) {
+ current_core = scheduler->core_id;
+ // TODO(bunnei): Should be set to true when we deprecate single core
+ must_context_switch = !kernel.IsPhantomModeForSingleCore();
+ }
while (cores_pending_reschedule != 0) {
- u32 core = Common::CountTrailingZeroes64(cores_pending_reschedule);
+ const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule));
ASSERT(core < Core::Hardware::NUM_CPU_CORES);
if (!must_context_switch || core != current_core) {
auto& phys_core = kernel.PhysicalCore(core);
@@ -54,28 +61,27 @@ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedul
}
}
-u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
+u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
std::scoped_lock lock{guard};
- if (Thread* prev_highest_thread = this->state.highest_priority_thread;
+ if (KThread* prev_highest_thread = state.highest_priority_thread;
prev_highest_thread != highest_thread) {
if (prev_highest_thread != nullptr) {
IncrementScheduledCount(prev_highest_thread);
prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks());
}
- if (this->state.should_count_idle) {
+ if (state.should_count_idle) {
if (highest_thread != nullptr) {
- // if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
- // process->SetRunningThread(this->core_id, highest_thread,
- // this->state.idle_count);
- //}
+ if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
+ process->SetRunningThread(core_id, highest_thread, state.idle_count);
+ }
} else {
- this->state.idle_count++;
+ state.idle_count++;
}
}
- this->state.highest_priority_thread = highest_thread;
- this->state.needs_scheduling = true;
- return (1ULL << this->core_id);
+ state.highest_priority_thread = highest_thread;
+ state.needs_scheduling.store(true);
+ return (1ULL << core_id);
} else {
return 0;
}
@@ -88,16 +94,29 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
ClearSchedulerUpdateNeeded(kernel);
u64 cores_needing_scheduling = 0, idle_cores = 0;
- Thread* top_threads[Core::Hardware::NUM_CPU_CORES];
+ KThread* top_threads[Core::Hardware::NUM_CPU_CORES];
auto& priority_queue = GetPriorityQueue(kernel);
/// We want to go over all cores, finding the highest priority thread and determining if
/// scheduling is needed for that core.
for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
- Thread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
+ KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
if (top_thread != nullptr) {
// If the thread has no waiters, we need to check if the process has a thread pinned.
- // TODO(bunnei): Implement thread pinning
+ if (top_thread->GetNumKernelWaiters() == 0) {
+ if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) {
+ if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id));
+ pinned != nullptr && pinned != top_thread) {
+ // We prefer our parent's pinned thread if possible. However, we also don't
+ // want to schedule un-runnable threads.
+ if (pinned->GetRawState() == ThreadState::Runnable) {
+ top_thread = pinned;
+ } else {
+ top_thread = nullptr;
+ }
+ }
+ }
+ }
} else {
idle_cores |= (1ULL << core_id);
}
@@ -109,8 +128,8 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
// Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
while (idle_cores != 0) {
- u32 core_id = Common::CountTrailingZeroes64(idle_cores);
- if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
+ const auto core_id = static_cast<u32>(std::countr_zero(idle_cores));
+ if (KThread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
size_t num_candidates = 0;
@@ -118,7 +137,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
while (suggested != nullptr) {
// Check if the suggested thread is the top thread on its core.
const s32 suggested_core = suggested->GetActiveCore();
- if (Thread* top_thread =
+ if (KThread* top_thread =
(suggested_core >= 0) ? top_threads[suggested_core] : nullptr;
top_thread != suggested) {
// Make sure we're not dealing with threads too high priority for migration.
@@ -150,7 +169,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
// Check if there's some other thread that can run on the candidate core.
const s32 candidate_core = migration_candidates[i];
suggested = top_threads[candidate_core];
- if (Thread* next_on_candidate_core =
+ if (KThread* next_on_candidate_core =
priority_queue.GetScheduledNext(candidate_core, suggested);
next_on_candidate_core != nullptr) {
// The candidate core can run some other thread! We'll migrate its current
@@ -180,22 +199,35 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
return cores_needing_scheduling;
}
-void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) {
+void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) {
+ // Get an atomic reference to the core scheduler's previous thread.
+ std::atomic_ref<KThread*> prev_thread(kernel.Scheduler(static_cast<s32>(i)).prev_thread);
+ static_assert(std::atomic_ref<KThread*>::is_always_lock_free);
+
+ // Atomically clear the previous thread if it's our target.
+ KThread* compare = thread;
+ prev_thread.compare_exchange_strong(compare, nullptr);
+ }
+}
+
+void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Check if the state has changed, because if it hasn't there's nothing to do.
- const auto cur_state = thread->scheduling_state;
+ const auto cur_state = thread->GetRawState();
if (cur_state == old_state) {
return;
}
// Update the priority queues.
- if (old_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ if (old_state == ThreadState::Runnable) {
// If we were previously runnable, then we're not runnable now, and we should remove.
GetPriorityQueue(kernel).Remove(thread);
IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded(kernel);
- } else if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ } else if (cur_state == ThreadState::Runnable) {
// If we're now runnable, then we weren't previously, and we should add.
GetPriorityQueue(kernel).PushBack(thread);
IncrementScheduledCount(thread);
@@ -203,13 +235,11 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 ol
}
}
-void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
- u32 old_priority) {
-
+void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// If the thread is runnable, we want to change its priority in the queue.
- if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ if (thread->GetRawState() == ThreadState::Runnable) {
GetPriorityQueue(kernel).ChangePriority(
old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
IncrementScheduledCount(thread);
@@ -217,12 +247,12 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thr
}
}
-void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
+void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
const KAffinityMask& old_affinity, s32 old_core) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// If the thread is runnable, we want to change its affinity in the queue.
- if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ if (thread->GetRawState() == ThreadState::Runnable) {
GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread);
IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded(kernel);
@@ -237,8 +267,8 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
auto& priority_queue = GetPriorityQueue(kernel);
// Rotate the front of the queue to the end.
- Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
- Thread* next_thread = nullptr;
+ KThread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
+ KThread* next_thread = nullptr;
if (top_thread != nullptr) {
next_thread = priority_queue.MoveToScheduledBack(top_thread);
if (next_thread != top_thread) {
@@ -249,11 +279,11 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
// While we have a suggested thread, try to migrate it!
{
- Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
+ KThread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
while (suggested != nullptr) {
// Check if the suggested thread is the top thread on its core.
const s32 suggested_core = suggested->GetActiveCore();
- if (Thread* top_on_suggested_core =
+ if (KThread* top_on_suggested_core =
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
: nullptr;
top_on_suggested_core != suggested) {
@@ -285,15 +315,15 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
// Now that we might have migrated a thread with the same priority, check if we can do better.
{
- Thread* best_thread = priority_queue.GetScheduledFront(core_id);
+ KThread* best_thread = priority_queue.GetScheduledFront(core_id);
if (best_thread == GetCurrentThread()) {
best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
}
// If the best thread we can choose has a priority the same or worse than ours, try to
// migrate a higher priority thread.
- if (best_thread != nullptr && best_thread->GetPriority() >= static_cast<u32>(priority)) {
- Thread* suggested = priority_queue.GetSuggestedFront(core_id);
+ if (best_thread != nullptr && best_thread->GetPriority() >= priority) {
+ KThread* suggested = priority_queue.GetSuggestedFront(core_id);
while (suggested != nullptr) {
// If the suggestion's priority is the same as ours, don't bother.
if (suggested->GetPriority() >= best_thread->GetPriority()) {
@@ -302,7 +332,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
// Check if the suggested thread is the top thread on its core.
const s32 suggested_core = suggested->GetActiveCore();
- if (Thread* top_on_suggested_core =
+ if (KThread* top_on_suggested_core =
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
: nullptr;
top_on_suggested_core != suggested) {
@@ -352,12 +382,14 @@ void KScheduler::DisableScheduling(KernelCore& kernel) {
}
}
-void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
- Core::EmuThreadHandle global_thread) {
+void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
- scheduler->GetCurrentThread()->EnableDispatch();
+ ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1);
+ if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) {
+ scheduler->GetCurrentThread()->EnableDispatch();
+ }
}
- RescheduleCores(kernel, cores_needing_scheduling, global_thread);
+ RescheduleCores(kernel, cores_needing_scheduling);
}
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
@@ -372,15 +404,13 @@ KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
return kernel.GlobalSchedulerContext().priority_queue;
}
-void KScheduler::YieldWithoutCoreMigration() {
- auto& kernel = system.Kernel();
-
+void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
// Validate preconditions.
ASSERT(CanSchedule(kernel));
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
- Thread& cur_thread = *GetCurrentThread();
+ KThread& cur_thread = Kernel::GetCurrentThread(kernel);
Process& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -395,10 +425,10 @@ void KScheduler::YieldWithoutCoreMigration() {
{
KScopedSchedulerLock lock(kernel);
- const auto cur_state = cur_thread.scheduling_state;
- if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ const auto cur_state = cur_thread.GetRawState();
+ if (cur_state == ThreadState::Runnable) {
// Put the current thread at the back of the queue.
- Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
+ KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
IncrementScheduledCount(std::addressof(cur_thread));
// If the next thread is different, we have an update to perform.
@@ -413,15 +443,13 @@ void KScheduler::YieldWithoutCoreMigration() {
}
}
-void KScheduler::YieldWithCoreMigration() {
- auto& kernel = system.Kernel();
-
+void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
// Validate preconditions.
ASSERT(CanSchedule(kernel));
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
- Thread& cur_thread = *GetCurrentThread();
+ KThread& cur_thread = Kernel::GetCurrentThread(kernel);
Process& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -436,23 +464,23 @@ void KScheduler::YieldWithCoreMigration() {
{
KScopedSchedulerLock lock(kernel);
- const auto cur_state = cur_thread.scheduling_state;
- if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ const auto cur_state = cur_thread.GetRawState();
+ if (cur_state == ThreadState::Runnable) {
// Get the current active core.
const s32 core_id = cur_thread.GetActiveCore();
// Put the current thread at the back of the queue.
- Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
+ KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
IncrementScheduledCount(std::addressof(cur_thread));
// While we have a suggested thread, try to migrate it!
bool recheck = false;
- Thread* suggested = priority_queue.GetSuggestedFront(core_id);
+ KThread* suggested = priority_queue.GetSuggestedFront(core_id);
while (suggested != nullptr) {
// Check if the suggested thread is the thread running on its core.
const s32 suggested_core = suggested->GetActiveCore();
- if (Thread* running_on_suggested_core =
+ if (KThread* running_on_suggested_core =
(suggested_core >= 0)
? kernel.Scheduler(suggested_core).state.highest_priority_thread
: nullptr;
@@ -503,15 +531,13 @@ void KScheduler::YieldWithCoreMigration() {
}
}
-void KScheduler::YieldToAnyThread() {
- auto& kernel = system.Kernel();
-
+void KScheduler::YieldToAnyThread(KernelCore& kernel) {
// Validate preconditions.
ASSERT(CanSchedule(kernel));
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
- Thread& cur_thread = *GetCurrentThread();
+ KThread& cur_thread = Kernel::GetCurrentThread(kernel);
Process& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -526,8 +552,8 @@ void KScheduler::YieldToAnyThread() {
{
KScopedSchedulerLock lock(kernel);
- const auto cur_state = cur_thread.scheduling_state;
- if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ const auto cur_state = cur_thread.GetRawState();
+ if (cur_state == ThreadState::Runnable) {
// Get the current active core.
const s32 core_id = cur_thread.GetActiveCore();
@@ -539,11 +565,11 @@ void KScheduler::YieldToAnyThread() {
// If there's nothing scheduled, we can try to perform a migration.
if (priority_queue.GetScheduledFront(core_id) == nullptr) {
// While we have a suggested thread, try to migrate it!
- Thread* suggested = priority_queue.GetSuggestedFront(core_id);
+ KThread* suggested = priority_queue.GetSuggestedFront(core_id);
while (suggested != nullptr) {
// Check if the suggested thread is the top thread on its core.
const s32 suggested_core = suggested->GetActiveCore();
- if (Thread* top_on_suggested_core =
+ if (KThread* top_on_suggested_core =
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
: nullptr;
top_on_suggested_core != suggested) {
@@ -581,22 +607,21 @@ void KScheduler::YieldToAnyThread() {
}
}
-KScheduler::KScheduler(Core::System& system, std::size_t core_id)
- : system(system), core_id(core_id) {
+KScheduler::KScheduler(Core::System& system, s32 core_id) : system(system), core_id(core_id) {
switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this);
- this->state.needs_scheduling = true;
- this->state.interrupt_task_thread_runnable = false;
- this->state.should_count_idle = false;
- this->state.idle_count = 0;
- this->state.idle_thread_stack = nullptr;
- this->state.highest_priority_thread = nullptr;
+ state.needs_scheduling.store(true);
+ state.interrupt_task_thread_runnable = false;
+ state.should_count_idle = false;
+ state.idle_count = 0;
+ state.idle_thread_stack = nullptr;
+ state.highest_priority_thread = nullptr;
}
KScheduler::~KScheduler() = default;
-Thread* KScheduler::GetCurrentThread() const {
- if (current_thread) {
- return current_thread;
+KThread* KScheduler::GetCurrentThread() const {
+ if (auto result = current_thread.load(); result) {
+ return result;
}
return idle_thread;
}
@@ -613,7 +638,7 @@ void KScheduler::RescheduleCurrentCore() {
phys_core.ClearInterrupt();
}
guard.lock();
- if (this->state.needs_scheduling) {
+ if (state.needs_scheduling.load()) {
Schedule();
} else {
guard.unlock();
@@ -624,67 +649,76 @@ void KScheduler::OnThreadStart() {
SwitchContextStep2();
}
-void KScheduler::Unload(Thread* thread) {
+void KScheduler::Unload(KThread* thread) {
+ LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
+
if (thread) {
- thread->SetIsRunning(false);
- if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) {
+ if (thread->IsCallingSvc()) {
system.ArmInterface(core_id).ExceptionalExit();
- thread->SetContinuousOnSVC(false);
+ thread->ClearIsCallingSvc();
}
- if (!thread->IsHLEThread() && !thread->HasExited()) {
+ if (!thread->IsTerminationRequested()) {
+ prev_thread = thread;
+
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
cpu_core.SaveContext(thread->GetContext32());
cpu_core.SaveContext(thread->GetContext64());
// Save the TPIDR_EL0 system register in case it was modified.
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
cpu_core.ClearExclusiveState();
+ } else {
+ prev_thread = nullptr;
}
thread->context_guard.unlock();
}
}
-void KScheduler::Reload(Thread* thread) {
- if (thread) {
- ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
- "Thread must be runnable.");
+void KScheduler::Reload(KThread* thread) {
+ LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr");
- // Cancel any outstanding wakeup events for this thread
- thread->SetIsRunning(true);
- thread->SetWasRunning(false);
+ if (thread) {
+ ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
auto* const thread_owner_process = thread->GetOwnerProcess();
if (thread_owner_process != nullptr) {
system.Kernel().MakeCurrentProcess(thread_owner_process);
}
- if (!thread->IsHLEThread()) {
- Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
- cpu_core.LoadContext(thread->GetContext32());
- cpu_core.LoadContext(thread->GetContext64());
- cpu_core.SetTlsAddress(thread->GetTLSAddress());
- cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
- cpu_core.ClearExclusiveState();
- }
+
+ Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
+ cpu_core.LoadContext(thread->GetContext32());
+ cpu_core.LoadContext(thread->GetContext64());
+ cpu_core.SetTlsAddress(thread->GetTLSAddress());
+ cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
+ cpu_core.ClearExclusiveState();
}
}
void KScheduler::SwitchContextStep2() {
// Load context of new thread
- Reload(current_thread);
+ Reload(current_thread.load());
RescheduleCurrentCore();
}
void KScheduler::ScheduleImpl() {
- Thread* previous_thread = current_thread;
- current_thread = state.highest_priority_thread;
+ KThread* previous_thread = current_thread.load();
+ KThread* next_thread = state.highest_priority_thread;
- this->state.needs_scheduling = false;
+ state.needs_scheduling = false;
+
+ // We never want to schedule a null thread, so use the idle thread if we don't have a next.
+ if (next_thread == nullptr) {
+ next_thread = idle_thread;
+ }
- if (current_thread == previous_thread) {
+ // If we're not actually switching thread, there's nothing to do.
+ if (next_thread == current_thread.load()) {
guard.unlock();
return;
}
+ current_thread.store(next_thread);
+
Process* const previous_process = system.Kernel().CurrentProcess();
UpdateLastContextSwitchTime(previous_thread, previous_process);
@@ -715,28 +749,29 @@ void KScheduler::SwitchToCurrent() {
while (true) {
{
std::scoped_lock lock{guard};
- current_thread = state.highest_priority_thread;
- this->state.needs_scheduling = false;
+ current_thread.store(state.highest_priority_thread);
+ state.needs_scheduling.store(false);
}
const auto is_switch_pending = [this] {
std::scoped_lock lock{guard};
- return state.needs_scheduling.load(std::memory_order_relaxed);
+ return state.needs_scheduling.load();
};
do {
- if (current_thread != nullptr && !current_thread->IsHLEThread()) {
- current_thread->context_guard.lock();
- if (!current_thread->IsRunnable()) {
- current_thread->context_guard.unlock();
+ auto next_thread = current_thread.load();
+ if (next_thread != nullptr) {
+ next_thread->context_guard.lock();
+ if (next_thread->GetRawState() != ThreadState::Runnable) {
+ next_thread->context_guard.unlock();
break;
}
- if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) {
- current_thread->context_guard.unlock();
+ if (next_thread->GetActiveCore() != core_id) {
+ next_thread->context_guard.unlock();
break;
}
}
std::shared_ptr<Common::Fiber>* next_context;
- if (current_thread != nullptr) {
- next_context = &current_thread->GetHostContext();
+ if (next_thread != nullptr) {
+ next_context = &next_thread->GetHostContext();
} else {
next_context = &idle_thread->GetHostContext();
}
@@ -745,13 +780,13 @@ void KScheduler::SwitchToCurrent() {
}
}
-void KScheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
+void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process) {
const u64 prev_switch_ticks = last_context_switch_time;
const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
if (thread != nullptr) {
- thread->UpdateCPUTimeTicks(update_ticks);
+ thread->AddCpuTime(core_id, update_ticks);
}
if (process != nullptr) {
@@ -765,15 +800,10 @@ void KScheduler::Initialize() {
std::string name = "Idle Thread Id:" + std::to_string(core_id);
std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
- ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
- auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
- nullptr, std::move(init_func), init_func_parameter);
+ auto thread_res = KThread::Create(system, ThreadType::Main, name, 0,
+ KThread::IdleThreadPriority, 0, static_cast<u32>(core_id), 0,
+ nullptr, std::move(init_func), init_func_parameter);
idle_thread = thread_res.Unwrap().get();
-
- {
- KScopedSchedulerLock lock{system.Kernel()};
- idle_thread->SetStatus(ThreadStatus::Ready);
- }
}
KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index e84abc84c..f595b9a5c 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -29,29 +29,33 @@ namespace Kernel {
class KernelCore;
class Process;
class SchedulerLock;
-class Thread;
+class KThread;
class KScheduler final {
public:
- explicit KScheduler(Core::System& system, std::size_t core_id);
+ explicit KScheduler(Core::System& system, s32 core_id);
~KScheduler();
/// Reschedules to the next available thread (call after current thread is suspended)
void RescheduleCurrentCore();
/// Reschedules cores pending reschedule, to be called on EnableScheduling.
- static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
- Core::EmuThreadHandle global_thread);
+ static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule);
/// The next two are for SingleCore Only.
/// Unload current thread before preempting core.
- void Unload(Thread* thread);
+ void Unload(KThread* thread);
/// Reload current thread after core preemption.
- void Reload(Thread* thread);
+ void Reload(KThread* thread);
/// Gets the current running thread
- [[nodiscard]] Thread* GetCurrentThread() const;
+ [[nodiscard]] KThread* GetCurrentThread() const;
+
+ /// Returns true if the scheduler is idle
+ [[nodiscard]] bool IsIdle() const {
+ return GetCurrentThread() == idle_thread;
+ }
/// Gets the timestamp for the last context switch in ticks.
[[nodiscard]] u64 GetLastContextSwitchTicks() const;
@@ -72,14 +76,14 @@ public:
return switch_fiber;
}
- [[nodiscard]] u64 UpdateHighestPriorityThread(Thread* highest_thread);
+ [[nodiscard]] u64 UpdateHighestPriorityThread(KThread* highest_thread);
/**
* Takes a thread and moves it to the back of the it's priority list.
*
* @note This operation can be redundant and no scheduling is changed if marked as so.
*/
- void YieldWithoutCoreMigration();
+ static void YieldWithoutCoreMigration(KernelCore& kernel);
/**
* Takes a thread and moves it to the back of the it's priority list.
@@ -88,7 +92,7 @@ public:
*
* @note This operation can be redundant and no scheduling is changed if marked as so.
*/
- void YieldWithCoreMigration();
+ static void YieldWithCoreMigration(KernelCore& kernel);
/**
* Takes a thread and moves it out of the scheduling queue.
@@ -97,17 +101,18 @@ public:
*
* @note This operation can be redundant and no scheduling is changed if marked as so.
*/
- void YieldToAnyThread();
+ static void YieldToAnyThread(KernelCore& kernel);
+
+ static void ClearPreviousThread(KernelCore& kernel, KThread* thread);
/// Notify the scheduler a thread's status has changed.
- static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state);
+ static void OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state);
/// Notify the scheduler a thread's priority has changed.
- static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
- u32 old_priority);
+ static void OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority);
/// Notify the scheduler a thread's core and/or affinity mask has changed.
- static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
+ static void OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
const KAffinityMask& old_affinity, s32 old_core);
static bool CanSchedule(KernelCore& kernel);
@@ -115,8 +120,7 @@ public:
static void SetSchedulerUpdateNeeded(KernelCore& kernel);
static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
static void DisableScheduling(KernelCore& kernel);
- static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
- Core::EmuThreadHandle global_thread);
+ static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling);
[[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
private:
@@ -164,13 +168,15 @@ private:
* most recent tick count retrieved. No special arithmetic is
* applied to it.
*/
- void UpdateLastContextSwitchTime(Thread* thread, Process* process);
+ void UpdateLastContextSwitchTime(KThread* thread, Process* process);
static void OnSwitch(void* this_scheduler);
void SwitchToCurrent();
- Thread* current_thread{};
- Thread* idle_thread{};
+ KThread* prev_thread{};
+ std::atomic<KThread*> current_thread{};
+
+ KThread* idle_thread;
std::shared_ptr<Common::Fiber> switch_fiber{};
@@ -179,7 +185,7 @@ private:
bool interrupt_task_thread_runnable{};
bool should_count_idle{};
u64 idle_count{};
- Thread* highest_priority_thread{};
+ KThread* highest_priority_thread{};
void* idle_thread_stack{};
};
@@ -187,7 +193,7 @@ private:
Core::System& system;
u64 last_context_switch_time{};
- const std::size_t core_id;
+ const s32 core_id;
Common::SpinLock guard{};
};
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index 2f1c1f691..169455d18 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -10,6 +10,7 @@
#include "common/assert.h"
#include "common/spin_lock.h"
#include "core/hardware_properties.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
@@ -19,49 +20,48 @@ class KernelCore;
template <typename SchedulerType>
class KAbstractSchedulerLock {
public:
- explicit KAbstractSchedulerLock(KernelCore& kernel) : kernel{kernel} {}
+ explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {}
bool IsLockedByCurrentThread() const {
- return this->owner_thread == kernel.GetCurrentEmuThreadID();
+ return owner_thread == GetCurrentThreadPointer(kernel);
}
void Lock() {
- if (this->IsLockedByCurrentThread()) {
+ if (IsLockedByCurrentThread()) {
// If we already own the lock, we can just increment the count.
- ASSERT(this->lock_count > 0);
- this->lock_count++;
+ ASSERT(lock_count > 0);
+ lock_count++;
} else {
// Otherwise, we want to disable scheduling and acquire the spinlock.
SchedulerType::DisableScheduling(kernel);
- this->spin_lock.lock();
+ spin_lock.lock();
// For debug, ensure that our state is valid.
- ASSERT(this->lock_count == 0);
- ASSERT(this->owner_thread == Core::EmuThreadHandle::InvalidHandle());
+ ASSERT(lock_count == 0);
+ ASSERT(owner_thread == nullptr);
// Increment count, take ownership.
- this->lock_count = 1;
- this->owner_thread = kernel.GetCurrentEmuThreadID();
+ lock_count = 1;
+ owner_thread = GetCurrentThreadPointer(kernel);
}
}
void Unlock() {
- ASSERT(this->IsLockedByCurrentThread());
- ASSERT(this->lock_count > 0);
+ ASSERT(IsLockedByCurrentThread());
+ ASSERT(lock_count > 0);
// Release an instance of the lock.
- if ((--this->lock_count) == 0) {
+ if ((--lock_count) == 0) {
// We're no longer going to hold the lock. Take note of what cores need scheduling.
const u64 cores_needing_scheduling =
SchedulerType::UpdateHighestPriorityThreads(kernel);
- Core::EmuThreadHandle leaving_thread = owner_thread;
// Note that we no longer hold the lock, and unlock the spinlock.
- this->owner_thread = Core::EmuThreadHandle::InvalidHandle();
- this->spin_lock.unlock();
+ owner_thread = nullptr;
+ spin_lock.unlock();
// Enable scheduling, and perform a rescheduling operation.
- SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread);
+ SchedulerType::EnableScheduling(kernel, cores_needing_scheduling);
}
}
@@ -69,7 +69,7 @@ private:
KernelCore& kernel;
Common::SpinLock spin_lock{};
s32 lock_count{};
- Core::EmuThreadHandle owner_thread{Core::EmuThreadHandle::InvalidHandle()};
+ KThread* owner_thread{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_resource_reservation.h b/src/core/hle/kernel/k_scoped_resource_reservation.h
new file mode 100644
index 000000000..c5deca00b
--- /dev/null
+++ b/src/core/hle/kernel/k_scoped_resource_reservation.h
@@ -0,0 +1,67 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include "common/common_types.h"
+#include "core/hle/kernel/k_resource_limit.h"
+#include "core/hle/kernel/process.h"
+
+namespace Kernel {
+
+class KScopedResourceReservation {
+public:
+ explicit KScopedResourceReservation(std::shared_ptr<KResourceLimit> l, LimitableResource r,
+ s64 v, s64 timeout)
+ : resource_limit(std::move(l)), value(v), resource(r) {
+ if (resource_limit && value) {
+ success = resource_limit->Reserve(resource, value, timeout);
+ } else {
+ success = true;
+ }
+ }
+
+ explicit KScopedResourceReservation(std::shared_ptr<KResourceLimit> l, LimitableResource r,
+ s64 v = 1)
+ : resource_limit(std::move(l)), value(v), resource(r) {
+ if (resource_limit && value) {
+ success = resource_limit->Reserve(resource, value);
+ } else {
+ success = true;
+ }
+ }
+
+ explicit KScopedResourceReservation(const Process* p, LimitableResource r, s64 v, s64 t)
+ : KScopedResourceReservation(p->GetResourceLimit(), r, v, t) {}
+
+ explicit KScopedResourceReservation(const Process* p, LimitableResource r, s64 v = 1)
+ : KScopedResourceReservation(p->GetResourceLimit(), r, v) {}
+
+ ~KScopedResourceReservation() noexcept {
+ if (resource_limit && value && success) {
+ // resource was not committed, release the reservation.
+ resource_limit->Release(resource, value);
+ }
+ }
+
+ /// Commit the resource reservation, destruction of this object does not release the resource
+ void Commit() {
+ resource_limit = nullptr;
+ }
+
+ [[nodiscard]] bool Succeeded() const {
+ return success;
+ }
+
+private:
+ std::shared_ptr<KResourceLimit> resource_limit;
+ s64 value;
+ LimitableResource resource;
+ bool success;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
index 2bb3817fa..f8189e107 100644
--- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
+++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
@@ -9,27 +9,24 @@
#include "common/common_types.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/time_manager.h"
namespace Kernel {
class KScopedSchedulerLockAndSleep {
public:
- explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* t,
- s64 timeout)
- : kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) {
- event_handle = InvalidHandle;
-
+ explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, KThread* t, s64 timeout)
+ : kernel(kernel), thread(t), timeout_tick(timeout) {
// Lock the scheduler.
kernel.GlobalSchedulerContext().scheduler_lock.Lock();
}
~KScopedSchedulerLockAndSleep() {
// Register the sleep.
- if (this->timeout_tick > 0) {
- kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick);
+ if (timeout_tick > 0) {
+ kernel.TimeManager().ScheduleTimeEvent(thread, timeout_tick);
}
// Unlock the scheduler.
@@ -37,13 +34,12 @@ public:
}
void CancelSleep() {
- this->timeout_tick = 0;
+ timeout_tick = 0;
}
private:
KernelCore& kernel;
- Handle& event_handle;
- Thread* thread{};
+ KThread* thread{};
s64 timeout_tick{};
};
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp
new file mode 100644
index 000000000..82f72a0fe
--- /dev/null
+++ b/src/core/hle/kernel/k_synchronization_object.cpp
@@ -0,0 +1,171 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/k_synchronization_object.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel {
+
+ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
+ KSynchronizationObject** objects, const s32 num_objects,
+ s64 timeout) {
+ // Allocate space on stack for thread nodes.
+ std::vector<ThreadListNode> thread_nodes(num_objects);
+
+ // Prepare for wait.
+ KThread* thread = kernel.CurrentScheduler()->GetCurrentThread();
+
+ {
+ // Setup the scheduling lock and sleep.
+ KScopedSchedulerLockAndSleep slp{kernel, thread, timeout};
+
+ // Check if any of the objects are already signaled.
+ for (auto i = 0; i < num_objects; ++i) {
+ ASSERT(objects[i] != nullptr);
+
+ if (objects[i]->IsSignaled()) {
+ *out_index = i;
+ slp.CancelSleep();
+ return RESULT_SUCCESS;
+ }
+ }
+
+ // Check if the timeout is zero.
+ if (timeout == 0) {
+ slp.CancelSleep();
+ return ResultTimedOut;
+ }
+
+ // Check if the thread should terminate.
+ if (thread->IsTerminationRequested()) {
+ slp.CancelSleep();
+ return ResultTerminationRequested;
+ }
+
+ // Check if waiting was canceled.
+ if (thread->IsWaitCancelled()) {
+ slp.CancelSleep();
+ thread->ClearWaitCancelled();
+ return ResultCancelled;
+ }
+
+ // Add the waiters.
+ for (auto i = 0; i < num_objects; ++i) {
+ thread_nodes[i].thread = thread;
+ thread_nodes[i].next = nullptr;
+
+ if (objects[i]->thread_list_tail == nullptr) {
+ objects[i]->thread_list_head = std::addressof(thread_nodes[i]);
+ } else {
+ objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]);
+ }
+
+ objects[i]->thread_list_tail = std::addressof(thread_nodes[i]);
+ }
+
+ // For debugging only
+ thread->SetWaitObjectsForDebugging({objects, static_cast<std::size_t>(num_objects)});
+
+ // Mark the thread as waiting.
+ thread->SetCancellable();
+ thread->SetSyncedObject(nullptr, ResultTimedOut);
+ thread->SetState(ThreadState::Waiting);
+ thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
+ }
+
+ // The lock/sleep is done, so we should be able to get our result.
+
+ // Thread is no longer cancellable.
+ thread->ClearCancellable();
+
+ // For debugging only
+ thread->SetWaitObjectsForDebugging({});
+
+ // Cancel the timer as needed.
+ kernel.TimeManager().UnscheduleTimeEvent(thread);
+
+ // Get the wait result.
+ ResultCode wait_result{RESULT_SUCCESS};
+ s32 sync_index = -1;
+ {
+ KScopedSchedulerLock lock(kernel);
+ KSynchronizationObject* synced_obj;
+ wait_result = thread->GetWaitResult(std::addressof(synced_obj));
+
+ for (auto i = 0; i < num_objects; ++i) {
+ // Unlink the object from the list.
+ ThreadListNode* prev_ptr =
+ reinterpret_cast<ThreadListNode*>(std::addressof(objects[i]->thread_list_head));
+ ThreadListNode* prev_val = nullptr;
+ ThreadListNode *prev, *tail_prev;
+
+ do {
+ prev = prev_ptr;
+ prev_ptr = prev_ptr->next;
+ tail_prev = prev_val;
+ prev_val = prev_ptr;
+ } while (prev_ptr != std::addressof(thread_nodes[i]));
+
+ if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) {
+ objects[i]->thread_list_tail = tail_prev;
+ }
+
+ prev->next = thread_nodes[i].next;
+
+ if (objects[i] == synced_obj) {
+ sync_index = i;
+ }
+ }
+ }
+
+ // Set output.
+ *out_index = sync_index;
+ return wait_result;
+}
+
+KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : Object{kernel} {}
+
+KSynchronizationObject::KSynchronizationObject(KernelCore& kernel, std::string&& name)
+ : Object{kernel, std::move(name)} {}
+
+KSynchronizationObject::~KSynchronizationObject() = default;
+
+void KSynchronizationObject::NotifyAvailable(ResultCode result) {
+ KScopedSchedulerLock lock(kernel);
+
+ // If we're not signaled, we've nothing to notify.
+ if (!this->IsSignaled()) {
+ return;
+ }
+
+ // Iterate over each thread.
+ for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
+ KThread* thread = cur_node->thread;
+ if (thread->GetState() == ThreadState::Waiting) {
+ thread->SetSyncedObject(this, result);
+ thread->SetState(ThreadState::Runnable);
+ }
+ }
+}
+
+std::vector<KThread*> KSynchronizationObject::GetWaitingThreadsForDebugging() const {
+ std::vector<KThread*> threads;
+
+ // If debugging, dump the list of waiters.
+ {
+ KScopedSchedulerLock lock(kernel);
+ for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
+ threads.emplace_back(cur_node->thread);
+ }
+ }
+
+ return threads;
+}
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h
new file mode 100644
index 000000000..5803718fd
--- /dev/null
+++ b/src/core/hle/kernel/k_synchronization_object.h
@@ -0,0 +1,59 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <vector>
+
+#include "core/hle/kernel/object.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+class KernelCore;
+class Synchronization;
+class KThread;
+
+/// Class that represents a Kernel object that a thread can be waiting on
+class KSynchronizationObject : public Object {
+public:
+ struct ThreadListNode {
+ ThreadListNode* next{};
+ KThread* thread{};
+ };
+
+ [[nodiscard]] static ResultCode Wait(KernelCore& kernel, s32* out_index,
+ KSynchronizationObject** objects, const s32 num_objects,
+ s64 timeout);
+
+ [[nodiscard]] virtual bool IsSignaled() const = 0;
+
+ [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
+
+protected:
+ explicit KSynchronizationObject(KernelCore& kernel);
+ explicit KSynchronizationObject(KernelCore& kernel, std::string&& name);
+ virtual ~KSynchronizationObject();
+
+ void NotifyAvailable(ResultCode result);
+ void NotifyAvailable() {
+ return this->NotifyAvailable(RESULT_SUCCESS);
+ }
+
+private:
+ ThreadListNode* thread_list_head{};
+ ThreadListNode* thread_list_tail{};
+};
+
+// Specialization of DynamicObjectCast for KSynchronizationObjects
+template <>
+inline std::shared_ptr<KSynchronizationObject> DynamicObjectCast<KSynchronizationObject>(
+ std::shared_ptr<Object> object) {
+ if (object != nullptr && object->IsWaitable()) {
+ return std::static_pointer_cast<KSynchronizationObject>(object);
+ }
+ return nullptr;
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
new file mode 100644
index 000000000..e5620da5a
--- /dev/null
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -0,0 +1,1048 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <cinttypes>
+#include <optional>
+#include <vector>
+
+#include "common/assert.h"
+#include "common/bit_util.h"
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+#include "common/fiber.h"
+#include "common/logging/log.h"
+#include "common/scope_exit.h"
+#include "common/thread_queue_list.h"
+#include "core/core.h"
+#include "core/cpu_manager.h"
+#include "core/hardware_properties.h"
+#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_condition_variable.h"
+#include "core/hle/kernel/k_resource_limit.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/k_thread_queue.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/memory/memory_layout.h"
+#include "core/hle/kernel/object.h"
+#include "core/hle/kernel/process.h"
+#include "core/hle/kernel/svc_results.h"
+#include "core/hle/kernel/time_manager.h"
+#include "core/hle/result.h"
+#include "core/memory.h"
+
+#ifdef ARCHITECTURE_x86_64
+#include "core/arm/dynarmic/arm_dynarmic_32.h"
+#include "core/arm/dynarmic/arm_dynarmic_64.h"
+#endif
+
+namespace {
+static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
+ u32 entry_point, u32 arg) {
+ context = {};
+ context.cpu_registers[0] = arg;
+ context.cpu_registers[15] = entry_point;
+ context.cpu_registers[13] = stack_top;
+}
+
+static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top,
+ VAddr entry_point, u64 arg) {
+ context = {};
+ context.cpu_registers[0] = arg;
+ context.pc = entry_point;
+ context.sp = stack_top;
+ // TODO(merry): Perform a hardware test to determine the below value.
+ context.fpcr = 0;
+}
+} // namespace
+
+namespace Kernel {
+
+KThread::KThread(KernelCore& kernel)
+ : KSynchronizationObject{kernel}, activity_pause_lock{kernel} {}
+KThread::~KThread() = default;
+
+ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
+ s32 virt_core, Process* owner, ThreadType type) {
+ // Assert parameters are valid.
+ ASSERT((type == ThreadType::Main) ||
+ (Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
+ ASSERT((owner != nullptr) || (type != ThreadType::User));
+ ASSERT(0 <= virt_core && virt_core < static_cast<s32>(Common::BitSize<u64>()));
+
+ // Convert the virtual core to a physical core.
+ const s32 phys_core = Core::Hardware::VirtualToPhysicalCoreMap[virt_core];
+ ASSERT(0 <= phys_core && phys_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+
+ // First, clear the TLS address.
+ tls_address = {};
+
+ // Next, assert things based on the type.
+ switch (type) {
+ case ThreadType::Main:
+ ASSERT(arg == 0);
+ [[fallthrough]];
+ case ThreadType::HighPriority:
+ [[fallthrough]];
+ case ThreadType::User:
+ ASSERT(((owner == nullptr) ||
+ (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
+ ASSERT(((owner == nullptr) ||
+ (owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask()));
+ break;
+ case ThreadType::Kernel:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
+ break;
+ }
+ thread_type_for_debugging = type;
+
+ // Set the ideal core ID and affinity mask.
+ virtual_ideal_core_id = virt_core;
+ physical_ideal_core_id = phys_core;
+ virtual_affinity_mask = 1ULL << virt_core;
+ physical_affinity_mask.SetAffinity(phys_core, true);
+
+ // Set the thread state.
+ thread_state = (type == ThreadType::Main) ? ThreadState::Runnable : ThreadState::Initialized;
+
+ // Set TLS address.
+ tls_address = 0;
+
+ // Set parent and condvar tree.
+ parent = nullptr;
+ condvar_tree = nullptr;
+
+ // Set sync booleans.
+ signaled = false;
+ termination_requested = false;
+ wait_cancelled = false;
+ cancellable = false;
+
+ // Set core ID and wait result.
+ core_id = phys_core;
+ wait_result = ResultNoSynchronizationObject;
+
+ // Set priorities.
+ priority = prio;
+ base_priority = prio;
+
+ // Set sync object and waiting lock to null.
+ synced_object = nullptr;
+
+ // Initialize sleeping queue.
+ sleeping_queue = nullptr;
+
+ // Set suspend flags.
+ suspend_request_flags = 0;
+ suspend_allowed_flags = static_cast<u32>(ThreadState::SuspendFlagMask);
+
+ // We're neither debug attached, nor are we nesting our priority inheritance.
+ debug_attached = false;
+ priority_inheritance_count = 0;
+
+ // We haven't been scheduled, and we have done no light IPC.
+ schedule_count = -1;
+ last_scheduled_tick = 0;
+ light_ipc_data = nullptr;
+
+ // We're not waiting for a lock, and we haven't disabled migration.
+ lock_owner = nullptr;
+ num_core_migration_disables = 0;
+
+ // We have no waiters, but we do have an entrypoint.
+ num_kernel_waiters = 0;
+
+ // Set our current core id.
+ current_core_id = phys_core;
+
+ // We haven't released our resource limit hint, and we've spent no time on the cpu.
+ resource_limit_release_hint = false;
+ cpu_time = 0;
+
+ // Clear our stack parameters.
+ std::memset(static_cast<void*>(std::addressof(GetStackParameters())), 0,
+ sizeof(StackParameters));
+
+ // Setup the TLS, if needed.
+ if (type == ThreadType::User) {
+ tls_address = owner->CreateTLSRegion();
+ }
+
+ // Set parent, if relevant.
+ if (owner != nullptr) {
+ parent = owner;
+ parent->IncrementThreadCount();
+ }
+
+ // Initialize thread context.
+ ResetThreadContext64(thread_context_64, user_stack_top, func, arg);
+ ResetThreadContext32(thread_context_32, static_cast<u32>(user_stack_top),
+ static_cast<u32>(func), static_cast<u32>(arg));
+
+ // Setup the stack parameters.
+ StackParameters& sp = GetStackParameters();
+ sp.cur_thread = this;
+ sp.disable_count = 1;
+ SetInExceptionHandler();
+
+ // Set thread ID.
+ thread_id = kernel.CreateNewThreadID();
+
+ // We initialized!
+ initialized = true;
+
+ // Register ourselves with our parent process.
+ if (parent != nullptr) {
+ parent->RegisterThread(this);
+ if (parent->IsSuspended()) {
+ RequestSuspend(SuspendType::Process);
+ }
+ }
+
+ return RESULT_SUCCESS;
+}
+
+ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
+ VAddr user_stack_top, s32 prio, s32 core, Process* owner,
+ ThreadType type) {
+ // Initialize the thread.
+ R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
+
+ return RESULT_SUCCESS;
+}
+
+void KThread::Finalize() {
+ // If the thread has an owner process, unregister it.
+ if (parent != nullptr) {
+ parent->UnregisterThread(this);
+ }
+
+ // If the thread has a local region, delete it.
+ if (tls_address != 0) {
+ parent->FreeTLSRegion(tls_address);
+ }
+
+ // Release any waiters.
+ {
+ ASSERT(lock_owner == nullptr);
+ KScopedSchedulerLock sl{kernel};
+
+ auto it = waiter_list.begin();
+ while (it != waiter_list.end()) {
+ // The thread shouldn't be a kernel waiter.
+ it->SetLockOwner(nullptr);
+ it->SetSyncedObject(nullptr, ResultInvalidState);
+ it->Wakeup();
+ it = waiter_list.erase(it);
+ }
+ }
+
+ // Decrement the parent process's thread count.
+ if (parent != nullptr) {
+ parent->DecrementThreadCount();
+ parent->GetResourceLimit()->Release(LimitableResource::Threads, 1);
+ }
+}
+
+bool KThread::IsSignaled() const {
+ return signaled;
+}
+
+void KThread::Wakeup() {
+ KScopedSchedulerLock sl{kernel};
+
+ if (GetState() == ThreadState::Waiting) {
+ if (sleeping_queue != nullptr) {
+ sleeping_queue->WakeupThread(this);
+ } else {
+ SetState(ThreadState::Runnable);
+ }
+ }
+}
+
+void KThread::StartTermination() {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Release user exception and unpin, if relevant.
+ if (parent != nullptr) {
+ parent->ReleaseUserException(this);
+ if (parent->GetPinnedThread(GetCurrentCoreId(kernel)) == this) {
+ parent->UnpinCurrentThread();
+ }
+ }
+
+ // Set state to terminated.
+ SetState(ThreadState::Terminated);
+
+ // Clear the thread's status as running in parent.
+ if (parent != nullptr) {
+ parent->ClearRunningThread(this);
+ }
+
+ // Signal.
+ signaled = true;
+ NotifyAvailable();
+
+ // Clear previous thread in KScheduler.
+ KScheduler::ClearPreviousThread(kernel, this);
+
+ // Register terminated dpc flag.
+ RegisterDpc(DpcFlag::Terminated);
+}
+
+void KThread::Pin() {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Set ourselves as pinned.
+ GetStackParameters().is_pinned = true;
+
+ // Disable core migration.
+ ASSERT(num_core_migration_disables == 0);
+ {
+ ++num_core_migration_disables;
+
+ // Save our ideal state to restore when we're unpinned.
+ original_physical_ideal_core_id = physical_ideal_core_id;
+ original_physical_affinity_mask = physical_affinity_mask;
+
+ // Bind ourselves to this core.
+ const s32 active_core = GetActiveCore();
+ const s32 current_core = GetCurrentCoreId(kernel);
+
+ SetActiveCore(current_core);
+ physical_ideal_core_id = current_core;
+ physical_affinity_mask.SetAffinityMask(1ULL << current_core);
+
+ if (active_core != current_core || physical_affinity_mask.GetAffinityMask() !=
+ original_physical_affinity_mask.GetAffinityMask()) {
+ KScheduler::OnThreadAffinityMaskChanged(kernel, this, original_physical_affinity_mask,
+ active_core);
+ }
+ }
+
+ // Disallow performing thread suspension.
+ {
+ // Update our allow flags.
+ suspend_allowed_flags &= ~(1 << (static_cast<u32>(SuspendType::Thread) +
+ static_cast<u32>(ThreadState::SuspendShift)));
+
+ // Update our state.
+ const ThreadState old_state = thread_state;
+ thread_state = static_cast<ThreadState>(GetSuspendFlags() |
+ static_cast<u32>(old_state & ThreadState::Mask));
+ if (thread_state != old_state) {
+ KScheduler::OnThreadStateChanged(kernel, this, old_state);
+ }
+ }
+
+ // TODO(bunnei): Update our SVC access permissions.
+ ASSERT(parent != nullptr);
+}
+
+void KThread::Unpin() {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Set ourselves as unpinned.
+ GetStackParameters().is_pinned = false;
+
+ // Enable core migration.
+ ASSERT(num_core_migration_disables == 1);
+ {
+ num_core_migration_disables--;
+
+ // Restore our original state.
+ const KAffinityMask old_mask = physical_affinity_mask;
+
+ physical_ideal_core_id = original_physical_ideal_core_id;
+ physical_affinity_mask = original_physical_affinity_mask;
+
+ if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
+ const s32 active_core = GetActiveCore();
+
+ if (!physical_affinity_mask.GetAffinity(active_core)) {
+ if (physical_ideal_core_id >= 0) {
+ SetActiveCore(physical_ideal_core_id);
+ } else {
+ SetActiveCore(static_cast<s32>(
+ Common::BitSize<u64>() - 1 -
+ std::countl_zero(physical_affinity_mask.GetAffinityMask())));
+ }
+ }
+ KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core);
+ }
+ }
+
+ // Allow performing thread suspension (if termination hasn't been requested).
+ {
+ // Update our allow flags.
+ if (!IsTerminationRequested()) {
+ suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) +
+ static_cast<u32>(ThreadState::SuspendShift)));
+ }
+
+ // Update our state.
+ const ThreadState old_state = thread_state;
+ thread_state = static_cast<ThreadState>(GetSuspendFlags() |
+ static_cast<u32>(old_state & ThreadState::Mask));
+ if (thread_state != old_state) {
+ KScheduler::OnThreadStateChanged(kernel, this, old_state);
+ }
+ }
+
+ // TODO(bunnei): Update our SVC access permissions.
+ ASSERT(parent != nullptr);
+
+ // Resume any threads that began waiting on us while we were pinned.
+ for (auto it = pinned_waiter_list.begin(); it != pinned_waiter_list.end(); ++it) {
+ if (it->GetState() == ThreadState::Waiting) {
+ it->SetState(ThreadState::Runnable);
+ }
+ }
+}
+
+ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
+ KScopedSchedulerLock sl{kernel};
+
+ // Get the virtual mask.
+ *out_ideal_core = virtual_ideal_core_id;
+ *out_affinity_mask = virtual_affinity_mask;
+
+ return RESULT_SUCCESS;
+}
+
+ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
+ KScopedSchedulerLock sl{kernel};
+ ASSERT(num_core_migration_disables >= 0);
+
+ // Select between core mask and original core mask.
+ if (num_core_migration_disables == 0) {
+ *out_ideal_core = physical_ideal_core_id;
+ *out_affinity_mask = physical_affinity_mask.GetAffinityMask();
+ } else {
+ *out_ideal_core = original_physical_ideal_core_id;
+ *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask();
+ }
+
+ return RESULT_SUCCESS;
+}
+
+ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) {
+ ASSERT(parent != nullptr);
+ ASSERT(v_affinity_mask != 0);
+ KScopedLightLock lk{activity_pause_lock};
+
+ // Set the core mask.
+ u64 p_affinity_mask = 0;
+ {
+ KScopedSchedulerLock sl{kernel};
+ ASSERT(num_core_migration_disables >= 0);
+
+ // If the core id is no-update magic, preserve the ideal core id.
+ if (core_id == Svc::IdealCoreNoUpdate) {
+ core_id = virtual_ideal_core_id;
+ R_UNLESS(((1ULL << core_id) & v_affinity_mask) != 0, ResultInvalidCombination);
+ }
+
+ // Set the virtual core/affinity mask.
+ virtual_ideal_core_id = core_id;
+ virtual_affinity_mask = v_affinity_mask;
+
+ // Translate the virtual core to a physical core.
+ if (core_id >= 0) {
+ core_id = Core::Hardware::VirtualToPhysicalCoreMap[core_id];
+ }
+
+ // Translate the virtual affinity mask to a physical one.
+ while (v_affinity_mask != 0) {
+ const u64 next = std::countr_zero(v_affinity_mask);
+ v_affinity_mask &= ~(1ULL << next);
+ p_affinity_mask |= (1ULL << Core::Hardware::VirtualToPhysicalCoreMap[next]);
+ }
+
+ // If we haven't disabled migration, perform an affinity change.
+ if (num_core_migration_disables == 0) {
+ const KAffinityMask old_mask = physical_affinity_mask;
+
+ // Set our new ideals.
+ physical_ideal_core_id = core_id;
+ physical_affinity_mask.SetAffinityMask(p_affinity_mask);
+
+ if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
+ const s32 active_core = GetActiveCore();
+
+ if (active_core >= 0 && !physical_affinity_mask.GetAffinity(active_core)) {
+ const s32 new_core = static_cast<s32>(
+ physical_ideal_core_id >= 0
+ ? physical_ideal_core_id
+ : Common::BitSize<u64>() - 1 -
+ std::countl_zero(physical_affinity_mask.GetAffinityMask()));
+ SetActiveCore(new_core);
+ }
+ KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core);
+ }
+ } else {
+ // Otherwise, we edit the original affinity for restoration later.
+ original_physical_ideal_core_id = core_id;
+ original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
+ }
+ }
+
+ // Update the pinned waiter list.
+ {
+ bool retry_update{};
+ bool thread_is_pinned{};
+ do {
+ // Lock the scheduler.
+ KScopedSchedulerLock sl{kernel};
+
+ // Don't do any further management if our termination has been requested.
+ R_SUCCEED_IF(IsTerminationRequested());
+
+ // By default, we won't need to retry.
+ retry_update = false;
+
+ // Check if the thread is currently running.
+ bool thread_is_current{};
+ s32 thread_core;
+ for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
+ ++thread_core) {
+ if (kernel.Scheduler(thread_core).GetCurrentThread() == this) {
+ thread_is_current = true;
+ break;
+ }
+ }
+
+ // If the thread is currently running, check whether it's no longer allowed under the
+ // new mask.
+ if (thread_is_current && ((1ULL << thread_core) & p_affinity_mask) == 0) {
+ // If the thread is pinned, we want to wait until it's not pinned.
+ if (GetStackParameters().is_pinned) {
+ // Verify that the current thread isn't terminating.
+ R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
+ ResultTerminationRequested);
+
+ // Note that the thread was pinned.
+ thread_is_pinned = true;
+
+ // Wait until the thread isn't pinned any more.
+ pinned_waiter_list.push_back(GetCurrentThread(kernel));
+ GetCurrentThread(kernel).SetState(ThreadState::Waiting);
+ } else {
+ // If the thread isn't pinned, release the scheduler lock and retry until it's
+ // not current.
+ retry_update = true;
+ }
+ }
+ } while (retry_update);
+
+ // If the thread was pinned, it no longer is, and we should remove the current thread from
+ // our waiter list.
+ if (thread_is_pinned) {
+ // Lock the scheduler.
+ KScopedSchedulerLock sl{kernel};
+
+ // Remove from the list.
+ pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
+ }
+ }
+
+ return RESULT_SUCCESS;
+}
+
+void KThread::SetBasePriority(s32 value) {
+ ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority);
+
+ KScopedSchedulerLock sl{kernel};
+
+ // Change our base priority.
+ base_priority = value;
+
+ // Perform a priority restoration.
+ RestorePriority(kernel, this);
+}
+
+void KThread::RequestSuspend(SuspendType type) {
+ KScopedSchedulerLock sl{kernel};
+
+ // Note the request in our flags.
+ suspend_request_flags |=
+ (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
+
+ // Try to perform the suspend.
+ TrySuspend();
+}
+
+void KThread::Resume(SuspendType type) {
+ KScopedSchedulerLock sl{kernel};
+
+ // Clear the request in our flags.
+ suspend_request_flags &=
+ ~(1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
+
+ // Update our state.
+ const ThreadState old_state = thread_state;
+ thread_state = static_cast<ThreadState>(GetSuspendFlags() |
+ static_cast<u32>(old_state & ThreadState::Mask));
+ if (thread_state != old_state) {
+ KScheduler::OnThreadStateChanged(kernel, this, old_state);
+ }
+}
+
+void KThread::WaitCancel() {
+ KScopedSchedulerLock sl{kernel};
+
+ // Check if we're waiting and cancellable.
+ if (GetState() == ThreadState::Waiting && cancellable) {
+ if (sleeping_queue != nullptr) {
+ sleeping_queue->WakeupThread(this);
+ wait_cancelled = true;
+ } else {
+ SetSyncedObject(nullptr, ResultCancelled);
+ SetState(ThreadState::Runnable);
+ wait_cancelled = false;
+ }
+ } else {
+ // Otherwise, note that we cancelled a wait.
+ wait_cancelled = true;
+ }
+}
+
+void KThread::TrySuspend() {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(IsSuspendRequested());
+
+ // Ensure that we have no waiters.
+ if (GetNumKernelWaiters() > 0) {
+ return;
+ }
+ ASSERT(GetNumKernelWaiters() == 0);
+
+ // Perform the suspend.
+ Suspend();
+}
+
+void KThread::Suspend() {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(IsSuspendRequested());
+
+ // Set our suspend flags in state.
+ const auto old_state = thread_state;
+ thread_state = static_cast<ThreadState>(GetSuspendFlags()) | (old_state & ThreadState::Mask);
+
+ // Note the state change in scheduler.
+ KScheduler::OnThreadStateChanged(kernel, this, old_state);
+}
+
+void KThread::Continue() {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Clear our suspend flags in state.
+ const auto old_state = thread_state;
+ thread_state = old_state & ThreadState::Mask;
+
+ // Note the state change in scheduler.
+ KScheduler::OnThreadStateChanged(kernel, this, old_state);
+}
+
+ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
+ // Lock ourselves.
+ KScopedLightLock lk(activity_pause_lock);
+
+ // Set the activity.
+ {
+ // Lock the scheduler.
+ KScopedSchedulerLock sl{kernel};
+
+ // Verify our state.
+ const auto cur_state = GetState();
+ R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable),
+ ResultInvalidState);
+
+ // Either pause or resume.
+ if (activity == Svc::ThreadActivity::Paused) {
+ // Verify that we're not suspended.
+ R_UNLESS(!IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
+
+ // Suspend.
+ RequestSuspend(SuspendType::Thread);
+ } else {
+ ASSERT(activity == Svc::ThreadActivity::Runnable);
+
+ // Verify that we're suspended.
+ R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
+
+ // Resume.
+ Resume(SuspendType::Thread);
+ }
+ }
+
+ // If the thread is now paused, update the pinned waiter list.
+ if (activity == Svc::ThreadActivity::Paused) {
+ bool thread_is_pinned{};
+ bool thread_is_current{};
+ do {
+ // Lock the scheduler.
+ KScopedSchedulerLock sl{kernel};
+
+ // Don't do any further management if our termination has been requested.
+ R_SUCCEED_IF(IsTerminationRequested());
+
+ // Check whether the thread is pinned.
+ if (GetStackParameters().is_pinned) {
+ // Verify that the current thread isn't terminating.
+ R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
+ ResultTerminationRequested);
+
+ // Note that the thread was pinned and not current.
+ thread_is_pinned = true;
+ thread_is_current = false;
+
+ // Wait until the thread isn't pinned any more.
+ pinned_waiter_list.push_back(GetCurrentThread(kernel));
+ GetCurrentThread(kernel).SetState(ThreadState::Waiting);
+ } else {
+ // Check if the thread is currently running.
+ // If it is, we'll need to retry.
+ thread_is_current = false;
+
+ for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
+ if (kernel.Scheduler(i).GetCurrentThread() == this) {
+ thread_is_current = true;
+ break;
+ }
+ }
+ }
+ } while (thread_is_current);
+
+ // If the thread was pinned, it no longer is, and we should remove the current thread from
+ // our waiter list.
+ if (thread_is_pinned) {
+ // Lock the scheduler.
+ KScopedSchedulerLock sl{kernel};
+
+ // Remove from the list.
+ pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
+ }
+ }
+
+ return RESULT_SUCCESS;
+}
+
+ResultCode KThread::GetThreadContext3(std::vector<u8>& out) {
+ // Lock ourselves.
+ KScopedLightLock lk{activity_pause_lock};
+
+ // Get the context.
+ {
+ // Lock the scheduler.
+ KScopedSchedulerLock sl{kernel};
+
+ // Verify that we're suspended.
+ R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
+
+ // If we're not terminating, get the thread's user context.
+ if (!IsTerminationRequested()) {
+ if (parent->Is64BitProcess()) {
+ // Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
+ auto context = GetContext64();
+ context.pstate &= 0xFF0FFE20;
+
+ out.resize(sizeof(context));
+ std::memcpy(out.data(), &context, sizeof(context));
+ } else {
+ // Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
+ auto context = GetContext32();
+ context.cpsr &= 0xFF0FFE20;
+
+ out.resize(sizeof(context));
+ std::memcpy(out.data(), &context, sizeof(context));
+ }
+ }
+ }
+
+ return RESULT_SUCCESS;
+}
+
+void KThread::AddWaiterImpl(KThread* thread) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Find the right spot to insert the waiter.
+ auto it = waiter_list.begin();
+ while (it != waiter_list.end()) {
+ if (it->GetPriority() > thread->GetPriority()) {
+ break;
+ }
+ it++;
+ }
+
+ // Keep track of how many kernel waiters we have.
+ if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
+ ASSERT((num_kernel_waiters++) >= 0);
+ }
+
+ // Insert the waiter.
+ waiter_list.insert(it, *thread);
+ thread->SetLockOwner(this);
+}
+
+void KThread::RemoveWaiterImpl(KThread* thread) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Keep track of how many kernel waiters we have.
+ if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
+ ASSERT((num_kernel_waiters--) > 0);
+ }
+
+ // Remove the waiter.
+ waiter_list.erase(waiter_list.iterator_to(*thread));
+ thread->SetLockOwner(nullptr);
+}
+
+void KThread::RestorePriority(KernelCore& kernel, KThread* thread) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ while (true) {
+ // We want to inherit priority where possible.
+ s32 new_priority = thread->GetBasePriority();
+ if (thread->HasWaiters()) {
+ new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority());
+ }
+
+ // If the priority we would inherit is not different from ours, don't do anything.
+ if (new_priority == thread->GetPriority()) {
+ return;
+ }
+
+ // Ensure we don't violate condition variable red black tree invariants.
+ if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
+ BeforeUpdatePriority(kernel, cv_tree, thread);
+ }
+
+ // Change the priority.
+ const s32 old_priority = thread->GetPriority();
+ thread->SetPriority(new_priority);
+
+ // Restore the condition variable, if relevant.
+ if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
+ AfterUpdatePriority(kernel, cv_tree, thread);
+ }
+
+ // Update the scheduler.
+ KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority);
+
+ // Keep the lock owner up to date.
+ KThread* lock_owner = thread->GetLockOwner();
+ if (lock_owner == nullptr) {
+ return;
+ }
+
+ // Update the thread in the lock owner's sorted list, and continue inheriting.
+ lock_owner->RemoveWaiterImpl(thread);
+ lock_owner->AddWaiterImpl(thread);
+ thread = lock_owner;
+ }
+}
+
+void KThread::AddWaiter(KThread* thread) {
+ AddWaiterImpl(thread);
+ RestorePriority(kernel, this);
+}
+
+void KThread::RemoveWaiter(KThread* thread) {
+ RemoveWaiterImpl(thread);
+ RestorePriority(kernel, this);
+}
+
+KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ s32 num_waiters{};
+ KThread* next_lock_owner{};
+ auto it = waiter_list.begin();
+ while (it != waiter_list.end()) {
+ if (it->GetAddressKey() == key) {
+ KThread* thread = std::addressof(*it);
+
+ // Keep track of how many kernel waiters we have.
+ if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
+ ASSERT((num_kernel_waiters--) > 0);
+ }
+ it = waiter_list.erase(it);
+
+ // Update the next lock owner.
+ if (next_lock_owner == nullptr) {
+ next_lock_owner = thread;
+ next_lock_owner->SetLockOwner(nullptr);
+ } else {
+ next_lock_owner->AddWaiterImpl(thread);
+ }
+ num_waiters++;
+ } else {
+ it++;
+ }
+ }
+
+ // Do priority updates, if we have a next owner.
+ if (next_lock_owner) {
+ RestorePriority(kernel, this);
+ RestorePriority(kernel, next_lock_owner);
+ }
+
+ // Return output.
+ *out_num_waiters = num_waiters;
+ return next_lock_owner;
+}
+
+ResultCode KThread::Run() {
+ while (true) {
+ KScopedSchedulerLock lk{kernel};
+
+ // If either this thread or the current thread are requesting termination, note it.
+ R_UNLESS(!IsTerminationRequested(), ResultTerminationRequested);
+ R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested);
+
+ // Ensure our thread state is correct.
+ R_UNLESS(GetState() == ThreadState::Initialized, ResultInvalidState);
+
+ // If the current thread has been asked to suspend, suspend it and retry.
+ if (GetCurrentThread(kernel).IsSuspended()) {
+ GetCurrentThread(kernel).Suspend();
+ continue;
+ }
+
+ // If we're not a kernel thread and we've been asked to suspend, suspend ourselves.
+ if (IsUserThread() && IsSuspended()) {
+ Suspend();
+ }
+
+ // Set our state and finish.
+ SetState(ThreadState::Runnable);
+ return RESULT_SUCCESS;
+ }
+}
+
+void KThread::Exit() {
+ ASSERT(this == GetCurrentThreadPointer(kernel));
+
+ // Release the thread resource hint from parent.
+ if (parent != nullptr) {
+ // TODO(bunnei): Hint that the resource is about to be released.
+ resource_limit_release_hint = true;
+ }
+
+ // Perform termination.
+ {
+ KScopedSchedulerLock sl{kernel};
+
+ // Disallow all suspension.
+ suspend_allowed_flags = 0;
+
+ // Start termination.
+ StartTermination();
+ }
+}
+
+ResultCode KThread::Sleep(s64 timeout) {
+ ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(this == GetCurrentThreadPointer(kernel));
+ ASSERT(timeout > 0);
+
+ {
+ // Setup the scheduling lock and sleep.
+ KScopedSchedulerLockAndSleep slp{kernel, this, timeout};
+
+ // Check if the thread should terminate.
+ if (IsTerminationRequested()) {
+ slp.CancelSleep();
+ return ResultTerminationRequested;
+ }
+
+ // Mark the thread as waiting.
+ SetState(ThreadState::Waiting);
+ SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
+ }
+
+ // The lock/sleep is done.
+
+ // Cancel the timer.
+ kernel.TimeManager().UnscheduleTimeEvent(this);
+
+ return RESULT_SUCCESS;
+}
+
+void KThread::SetState(ThreadState state) {
+ KScopedSchedulerLock sl{kernel};
+
+ // Clear debugging state
+ SetMutexWaitAddressForDebugging({});
+ SetWaitReasonForDebugging({});
+
+ const ThreadState old_state = thread_state;
+ thread_state =
+ static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask));
+ if (thread_state != old_state) {
+ KScheduler::OnThreadStateChanged(kernel, this, old_state);
+ }
+}
+
+std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
+ return host_context;
+}
+
+ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags,
+ std::string name, VAddr entry_point,
+ u32 priority, u64 arg, s32 processor_id,
+ VAddr stack_top, Process* owner_process) {
+ std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
+ void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
+ return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
+ owner_process, std::move(init_func), init_func_parameter);
+}
+
+ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags,
+ std::string name, VAddr entry_point,
+ u32 priority, u64 arg, s32 processor_id,
+ VAddr stack_top, Process* owner_process,
+ std::function<void(void*)>&& thread_start_func,
+ void* thread_start_parameter) {
+ auto& kernel = system.Kernel();
+
+ std::shared_ptr<KThread> thread = std::make_shared<KThread>(kernel);
+
+ if (const auto result =
+ thread->InitializeThread(thread.get(), entry_point, arg, stack_top, priority,
+ processor_id, owner_process, type_flags);
+ result.IsError()) {
+ return result;
+ }
+
+ thread->name = name;
+
+ auto& scheduler = kernel.GlobalSchedulerContext();
+ scheduler.AddThread(thread);
+
+ thread->host_context =
+ std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
+
+ return MakeResult<std::shared_ptr<KThread>>(std::move(thread));
+}
+
+KThread* GetCurrentThreadPointer(KernelCore& kernel) {
+ return kernel.GetCurrentEmuThread();
+}
+
+KThread& GetCurrentThread(KernelCore& kernel) {
+ return *GetCurrentThreadPointer(kernel);
+}
+
+s32 GetCurrentCoreId(KernelCore& kernel) {
+ return GetCurrentThread(kernel).GetCurrentCore();
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
new file mode 100644
index 000000000..c8ac656a4
--- /dev/null
+++ b/src/core/hle/kernel/k_thread.h
@@ -0,0 +1,768 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+#include <span>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <boost/intrusive/list.hpp>
+
+#include "common/common_types.h"
+#include "common/intrusive_red_black_tree.h"
+#include "common/spin_lock.h"
+#include "core/arm/arm_interface.h"
+#include "core/hle/kernel/k_affinity_mask.h"
+#include "core/hle/kernel/k_light_lock.h"
+#include "core/hle/kernel/k_synchronization_object.h"
+#include "core/hle/kernel/object.h"
+#include "core/hle/kernel/svc_common.h"
+#include "core/hle/kernel/svc_types.h"
+#include "core/hle/result.h"
+
+namespace Common {
+class Fiber;
+}
+
+namespace Core {
+class ARM_Interface;
+class System;
+} // namespace Core
+
+namespace Kernel {
+
+class GlobalSchedulerContext;
+class KernelCore;
+class Process;
+class KScheduler;
+class KThreadQueue;
+
+using KThreadFunction = VAddr;
+
+enum class ThreadType : u32 {
+ Main = 0,
+ Kernel = 1,
+ HighPriority = 2,
+ User = 3,
+};
+DECLARE_ENUM_FLAG_OPERATORS(ThreadType);
+
+enum class SuspendType : u32 {
+ Process = 0,
+ Thread = 1,
+ Debug = 2,
+ Backtrace = 3,
+ Init = 4,
+
+ Count,
+};
+
+enum class ThreadState : u16 {
+ Initialized = 0,
+ Waiting = 1,
+ Runnable = 2,
+ Terminated = 3,
+
+ SuspendShift = 4,
+ Mask = (1 << SuspendShift) - 1,
+
+ ProcessSuspended = (1 << (0 + SuspendShift)),
+ ThreadSuspended = (1 << (1 + SuspendShift)),
+ DebugSuspended = (1 << (2 + SuspendShift)),
+ BacktraceSuspended = (1 << (3 + SuspendShift)),
+ InitSuspended = (1 << (4 + SuspendShift)),
+
+ SuspendFlagMask = ((1 << 5) - 1) << SuspendShift,
+};
+DECLARE_ENUM_FLAG_OPERATORS(ThreadState);
+
+enum class DpcFlag : u32 {
+ Terminating = (1 << 0),
+ Terminated = (1 << 1),
+};
+
+enum class ThreadWaitReasonForDebugging : u32 {
+ None, ///< Thread is not waiting
+ Sleep, ///< Thread is waiting due to a SleepThread SVC
+ IPC, ///< Thread is waiting for the reply from an IPC request
+ Synchronization, ///< Thread is waiting due to a WaitSynchronization SVC
+ ConditionVar, ///< Thread is waiting due to a WaitProcessWideKey SVC
+ Arbitration, ///< Thread is waiting due to a SignalToAddress/WaitForAddress SVC
+ Suspended, ///< Thread is waiting due to process suspension
+};
+
+[[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel);
+[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel);
+[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel);
+
+class KThread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> {
+ friend class KScheduler;
+ friend class Process;
+
+public:
+ static constexpr s32 DefaultThreadPriority = 44;
+ static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1;
+
+ explicit KThread(KernelCore& kernel);
+ ~KThread() override;
+
+public:
+ using ThreadContext32 = Core::ARM_Interface::ThreadContext32;
+ using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
+ using WaiterList = boost::intrusive::list<KThread>;
+
+ /**
+ * Creates and returns a new thread. The new thread is immediately scheduled
+ * @param system The instance of the whole system
+ * @param name The friendly name desired for the thread
+ * @param entry_point The address at which the thread should start execution
+ * @param priority The thread's priority
+ * @param arg User data to pass to the thread
+ * @param processor_id The ID(s) of the processors on which the thread is desired to be run
+ * @param stack_top The address of the thread's stack top
+ * @param owner_process The parent process for the thread, if null, it's a kernel thread
+ * @return A shared pointer to the newly created thread
+ */
+ [[nodiscard]] static ResultVal<std::shared_ptr<KThread>> Create(
+ Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
+ u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process);
+
+ /**
+ * Creates and returns a new thread. The new thread is immediately scheduled
+ * @param system The instance of the whole system
+ * @param name The friendly name desired for the thread
+ * @param entry_point The address at which the thread should start execution
+ * @param priority The thread's priority
+ * @param arg User data to pass to the thread
+ * @param processor_id The ID(s) of the processors on which the thread is desired to be run
+ * @param stack_top The address of the thread's stack top
+ * @param owner_process The parent process for the thread, if null, it's a kernel thread
+ * @param thread_start_func The function where the host context will start.
+ * @param thread_start_parameter The parameter which will passed to host context on init
+ * @return A shared pointer to the newly created thread
+ */
+ [[nodiscard]] static ResultVal<std::shared_ptr<KThread>> Create(
+ Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
+ u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process,
+ std::function<void(void*)>&& thread_start_func, void* thread_start_parameter);
+
+ [[nodiscard]] std::string GetName() const override {
+ return name;
+ }
+
+ void SetName(std::string new_name) {
+ name = std::move(new_name);
+ }
+
+ [[nodiscard]] std::string GetTypeName() const override {
+ return "Thread";
+ }
+
+ static constexpr HandleType HANDLE_TYPE = HandleType::Thread;
+ [[nodiscard]] HandleType GetHandleType() const override {
+ return HANDLE_TYPE;
+ }
+
+ /**
+ * Gets the thread's current priority
+ * @return The current thread's priority
+ */
+ [[nodiscard]] s32 GetPriority() const {
+ return priority;
+ }
+
+ /**
+ * Sets the thread's current priority.
+ * @param priority The new priority.
+ */
+ void SetPriority(s32 value) {
+ priority = value;
+ }
+
+ /**
+ * Gets the thread's nominal priority.
+ * @return The current thread's nominal priority.
+ */
+ [[nodiscard]] s32 GetBasePriority() const {
+ return base_priority;
+ }
+
+ /**
+ * Gets the thread's thread ID
+ * @return The thread's ID
+ */
+ [[nodiscard]] u64 GetThreadID() const {
+ return thread_id;
+ }
+
+ void ContinueIfHasKernelWaiters() {
+ if (GetNumKernelWaiters() > 0) {
+ Continue();
+ }
+ }
+
+ void Wakeup();
+
+ void SetBasePriority(s32 value);
+
+ [[nodiscard]] ResultCode Run();
+
+ void Exit();
+
+ [[nodiscard]] u32 GetSuspendFlags() const {
+ return suspend_allowed_flags & suspend_request_flags;
+ }
+
+ [[nodiscard]] bool IsSuspended() const {
+ return GetSuspendFlags() != 0;
+ }
+
+ [[nodiscard]] bool IsSuspendRequested(SuspendType type) const {
+ return (suspend_request_flags &
+ (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)))) !=
+ 0;
+ }
+
+ [[nodiscard]] bool IsSuspendRequested() const {
+ return suspend_request_flags != 0;
+ }
+
+ void RequestSuspend(SuspendType type);
+
+ void Resume(SuspendType type);
+
+ void TrySuspend();
+
+ void Continue();
+
+ void Suspend();
+
+ void Finalize() override;
+
+ bool IsSignaled() const override;
+
+ void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) {
+ synced_object = obj;
+ wait_result = wait_res;
+ }
+
+ [[nodiscard]] ResultCode GetWaitResult(KSynchronizationObject** out) const {
+ *out = synced_object;
+ return wait_result;
+ }
+
+ /*
+ * Returns the Thread Local Storage address of the current thread
+ * @returns VAddr of the thread's TLS
+ */
+ [[nodiscard]] VAddr GetTLSAddress() const {
+ return tls_address;
+ }
+
+ /*
+ * Returns the value of the TPIDR_EL0 Read/Write system register for this thread.
+ * @returns The value of the TPIDR_EL0 register.
+ */
+ [[nodiscard]] u64 GetTPIDR_EL0() const {
+ return thread_context_64.tpidr;
+ }
+
+ /// Sets the value of the TPIDR_EL0 Read/Write system register for this thread.
+ void SetTPIDR_EL0(u64 value) {
+ thread_context_64.tpidr = value;
+ thread_context_32.tpidr = static_cast<u32>(value);
+ }
+
+ [[nodiscard]] ThreadContext32& GetContext32() {
+ return thread_context_32;
+ }
+
+ [[nodiscard]] const ThreadContext32& GetContext32() const {
+ return thread_context_32;
+ }
+
+ [[nodiscard]] ThreadContext64& GetContext64() {
+ return thread_context_64;
+ }
+
+ [[nodiscard]] const ThreadContext64& GetContext64() const {
+ return thread_context_64;
+ }
+
+ [[nodiscard]] std::shared_ptr<Common::Fiber>& GetHostContext();
+
+ [[nodiscard]] ThreadState GetState() const {
+ return thread_state & ThreadState::Mask;
+ }
+
+ [[nodiscard]] ThreadState GetRawState() const {
+ return thread_state;
+ }
+
+ void SetState(ThreadState state);
+
+ [[nodiscard]] s64 GetLastScheduledTick() const {
+ return last_scheduled_tick;
+ }
+
+ void SetLastScheduledTick(s64 tick) {
+ last_scheduled_tick = tick;
+ }
+
+ void AddCpuTime([[maybe_unused]] s32 core_id_, s64 amount) {
+ cpu_time += amount;
+ // TODO(bunnei): Debug kernels track per-core tick counts. Should we?
+ }
+
+ [[nodiscard]] s64 GetCpuTime() const {
+ return cpu_time;
+ }
+
+ [[nodiscard]] s32 GetActiveCore() const {
+ return core_id;
+ }
+
+ void SetActiveCore(s32 core) {
+ core_id = core;
+ }
+
+ [[nodiscard]] s32 GetCurrentCore() const {
+ return current_core_id;
+ }
+
+ void SetCurrentCore(s32 core) {
+ current_core_id = core;
+ }
+
+ [[nodiscard]] Process* GetOwnerProcess() {
+ return parent;
+ }
+
+ [[nodiscard]] const Process* GetOwnerProcess() const {
+ return parent;
+ }
+
+ [[nodiscard]] bool IsUserThread() const {
+ return parent != nullptr;
+ }
+
+ [[nodiscard]] KThread* GetLockOwner() const {
+ return lock_owner;
+ }
+
+ void SetLockOwner(KThread* owner) {
+ lock_owner = owner;
+ }
+
+ [[nodiscard]] const KAffinityMask& GetAffinityMask() const {
+ return physical_affinity_mask;
+ }
+
+ [[nodiscard]] ResultCode GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
+
+ [[nodiscard]] ResultCode GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
+
+ [[nodiscard]] ResultCode SetCoreMask(s32 core_id, u64 v_affinity_mask);
+
+ [[nodiscard]] ResultCode SetActivity(Svc::ThreadActivity activity);
+
+ [[nodiscard]] ResultCode Sleep(s64 timeout);
+
+ [[nodiscard]] s64 GetYieldScheduleCount() const {
+ return schedule_count;
+ }
+
+ void SetYieldScheduleCount(s64 count) {
+ schedule_count = count;
+ }
+
+ void WaitCancel();
+
+ [[nodiscard]] bool IsWaitCancelled() const {
+ return wait_cancelled;
+ }
+
+ [[nodiscard]] void ClearWaitCancelled() {
+ wait_cancelled = false;
+ }
+
+ [[nodiscard]] bool IsCancellable() const {
+ return cancellable;
+ }
+
+ void SetCancellable() {
+ cancellable = true;
+ }
+
+ void ClearCancellable() {
+ cancellable = false;
+ }
+
+ [[nodiscard]] bool IsTerminationRequested() const {
+ return termination_requested || GetRawState() == ThreadState::Terminated;
+ }
+
+ struct StackParameters {
+ u8 svc_permission[0x10];
+ std::atomic<u8> dpc_flags;
+ u8 current_svc_id;
+ bool is_calling_svc;
+ bool is_in_exception_handler;
+ bool is_pinned;
+ s32 disable_count;
+ KThread* cur_thread;
+ };
+
+ [[nodiscard]] StackParameters& GetStackParameters() {
+ return stack_parameters;
+ }
+
+ [[nodiscard]] const StackParameters& GetStackParameters() const {
+ return stack_parameters;
+ }
+
+ class QueueEntry {
+ public:
+ constexpr QueueEntry() = default;
+
+ constexpr void Initialize() {
+ prev = nullptr;
+ next = nullptr;
+ }
+
+ constexpr KThread* GetPrev() const {
+ return prev;
+ }
+ constexpr KThread* GetNext() const {
+ return next;
+ }
+ constexpr void SetPrev(KThread* thread) {
+ prev = thread;
+ }
+ constexpr void SetNext(KThread* thread) {
+ next = thread;
+ }
+
+ private:
+ KThread* prev{};
+ KThread* next{};
+ };
+
+ [[nodiscard]] QueueEntry& GetPriorityQueueEntry(s32 core) {
+ return per_core_priority_queue_entry[core];
+ }
+
+ [[nodiscard]] const QueueEntry& GetPriorityQueueEntry(s32 core) const {
+ return per_core_priority_queue_entry[core];
+ }
+
+ void SetSleepingQueue(KThreadQueue* q) {
+ sleeping_queue = q;
+ }
+
+ [[nodiscard]] s32 GetDisableDispatchCount() const {
+ return this->GetStackParameters().disable_count;
+ }
+
+ void DisableDispatch() {
+ ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
+ this->GetStackParameters().disable_count++;
+ }
+
+ void EnableDispatch() {
+ ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
+ this->GetStackParameters().disable_count--;
+ }
+
+ void Pin();
+
+ void Unpin();
+
+ void SetInExceptionHandler() {
+ this->GetStackParameters().is_in_exception_handler = true;
+ }
+
+ void ClearInExceptionHandler() {
+ this->GetStackParameters().is_in_exception_handler = false;
+ }
+
+ [[nodiscard]] bool IsInExceptionHandler() const {
+ return this->GetStackParameters().is_in_exception_handler;
+ }
+
+ void SetIsCallingSvc() {
+ this->GetStackParameters().is_calling_svc = true;
+ }
+
+ void ClearIsCallingSvc() {
+ this->GetStackParameters().is_calling_svc = false;
+ }
+
+ [[nodiscard]] bool IsCallingSvc() const {
+ return this->GetStackParameters().is_calling_svc;
+ }
+
+ [[nodiscard]] u8 GetSvcId() const {
+ return this->GetStackParameters().current_svc_id;
+ }
+
+ void RegisterDpc(DpcFlag flag) {
+ this->GetStackParameters().dpc_flags |= static_cast<u8>(flag);
+ }
+
+ void ClearDpc(DpcFlag flag) {
+ this->GetStackParameters().dpc_flags &= ~static_cast<u8>(flag);
+ }
+
+ [[nodiscard]] u8 GetDpc() const {
+ return this->GetStackParameters().dpc_flags;
+ }
+
+ [[nodiscard]] bool HasDpc() const {
+ return this->GetDpc() != 0;
+ }
+
+ void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) {
+ wait_reason_for_debugging = reason;
+ }
+
+ [[nodiscard]] ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const {
+ return wait_reason_for_debugging;
+ }
+
+ [[nodiscard]] ThreadType GetThreadTypeForDebugging() const {
+ return thread_type_for_debugging;
+ }
+
+ void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
+ wait_objects_for_debugging.clear();
+ wait_objects_for_debugging.reserve(objects.size());
+ for (const auto& object : objects) {
+ wait_objects_for_debugging.emplace_back(object);
+ }
+ }
+
+ [[nodiscard]] const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const {
+ return wait_objects_for_debugging;
+ }
+
+ void SetMutexWaitAddressForDebugging(VAddr address) {
+ mutex_wait_address_for_debugging = address;
+ }
+
+ [[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const {
+ return mutex_wait_address_for_debugging;
+ }
+
+ [[nodiscard]] s32 GetIdealCoreForDebugging() const {
+ return virtual_ideal_core_id;
+ }
+
+ void AddWaiter(KThread* thread);
+
+ void RemoveWaiter(KThread* thread);
+
+ [[nodiscard]] ResultCode GetThreadContext3(std::vector<u8>& out);
+
+ [[nodiscard]] KThread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
+
+ [[nodiscard]] VAddr GetAddressKey() const {
+ return address_key;
+ }
+
+ [[nodiscard]] u32 GetAddressKeyValue() const {
+ return address_key_value;
+ }
+
+ void SetAddressKey(VAddr key) {
+ address_key = key;
+ }
+
+ void SetAddressKey(VAddr key, u32 val) {
+ address_key = key;
+ address_key_value = val;
+ }
+
+ [[nodiscard]] bool HasWaiters() const {
+ return !waiter_list.empty();
+ }
+
+ [[nodiscard]] s32 GetNumKernelWaiters() const {
+ return num_kernel_waiters;
+ }
+
+ [[nodiscard]] u64 GetConditionVariableKey() const {
+ return condvar_key;
+ }
+
+ [[nodiscard]] u64 GetAddressArbiterKey() const {
+ return condvar_key;
+ }
+
+private:
+ static constexpr size_t PriorityInheritanceCountMax = 10;
+ union SyncObjectBuffer {
+ std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> sync_objects{};
+ std::array<Handle,
+ Svc::ArgumentHandleCountMax*(sizeof(KSynchronizationObject*) / sizeof(Handle))>
+ handles;
+ constexpr SyncObjectBuffer() {}
+ };
+ static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
+
+ struct ConditionVariableComparator {
+ struct LightCompareType {
+ u64 cv_key{};
+ s32 priority{};
+
+ [[nodiscard]] constexpr u64 GetConditionVariableKey() const {
+ return cv_key;
+ }
+
+ [[nodiscard]] constexpr s32 GetPriority() const {
+ return priority;
+ }
+ };
+
+ template <typename T>
+ requires(
+ std::same_as<T, KThread> ||
+ std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs,
+ const KThread& rhs) {
+ const u64 l_key = lhs.GetConditionVariableKey();
+ const u64 r_key = rhs.GetConditionVariableKey();
+
+ if (l_key < r_key) {
+ // Sort first by key
+ return -1;
+ } else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) {
+ // And then by priority.
+ return -1;
+ } else {
+ return 1;
+ }
+ }
+ };
+
+ void AddWaiterImpl(KThread* thread);
+
+ void RemoveWaiterImpl(KThread* thread);
+
+ void StartTermination();
+
+ [[nodiscard]] ResultCode Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
+ s32 prio, s32 virt_core, Process* owner, ThreadType type);
+
+ [[nodiscard]] static ResultCode InitializeThread(KThread* thread, KThreadFunction func,
+ uintptr_t arg, VAddr user_stack_top, s32 prio,
+ s32 core, Process* owner, ThreadType type);
+
+ static void RestorePriority(KernelCore& kernel, KThread* thread);
+
+ // For core KThread implementation
+ ThreadContext32 thread_context_32{};
+ ThreadContext64 thread_context_64{};
+ Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{};
+ s32 priority{};
+ using ConditionVariableThreadTreeTraits =
+ Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<
+ &KThread::condvar_arbiter_tree_node>;
+ using ConditionVariableThreadTree =
+ ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
+ ConditionVariableThreadTree* condvar_tree{};
+ u64 condvar_key{};
+ u64 virtual_affinity_mask{};
+ KAffinityMask physical_affinity_mask{};
+ u64 thread_id{};
+ std::atomic<s64> cpu_time{};
+ KSynchronizationObject* synced_object{};
+ VAddr address_key{};
+ Process* parent{};
+ VAddr kernel_stack_top{};
+ u32* light_ipc_data{};
+ VAddr tls_address{};
+ KLightLock activity_pause_lock;
+ s64 schedule_count{};
+ s64 last_scheduled_tick{};
+ std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
+ KThreadQueue* sleeping_queue{};
+ WaiterList waiter_list{};
+ WaiterList pinned_waiter_list{};
+ KThread* lock_owner{};
+ u32 address_key_value{};
+ u32 suspend_request_flags{};
+ u32 suspend_allowed_flags{};
+ ResultCode wait_result{RESULT_SUCCESS};
+ s32 base_priority{};
+ s32 physical_ideal_core_id{};
+ s32 virtual_ideal_core_id{};
+ s32 num_kernel_waiters{};
+ s32 current_core_id{};
+ s32 core_id{};
+ KAffinityMask original_physical_affinity_mask{};
+ s32 original_physical_ideal_core_id{};
+ s32 num_core_migration_disables{};
+ ThreadState thread_state{};
+ std::atomic<bool> termination_requested{};
+ bool wait_cancelled{};
+ bool cancellable{};
+ bool signaled{};
+ bool initialized{};
+ bool debug_attached{};
+ s8 priority_inheritance_count{};
+ bool resource_limit_release_hint{};
+ StackParameters stack_parameters{};
+ Common::SpinLock context_guard{};
+
+ // For emulation
+ std::shared_ptr<Common::Fiber> host_context{};
+
+ // For debugging
+ std::vector<KSynchronizationObject*> wait_objects_for_debugging;
+ VAddr mutex_wait_address_for_debugging{};
+ ThreadWaitReasonForDebugging wait_reason_for_debugging{};
+ ThreadType thread_type_for_debugging{};
+ std::string name;
+
+public:
+ using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
+
+ void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key,
+ u32 value) {
+ condvar_tree = tree;
+ condvar_key = cv_key;
+ address_key = address;
+ address_key_value = value;
+ }
+
+ void ClearConditionVariable() {
+ condvar_tree = nullptr;
+ }
+
+ [[nodiscard]] bool IsWaitingForConditionVariable() const {
+ return condvar_tree != nullptr;
+ }
+
+ void SetAddressArbiter(ConditionVariableThreadTree* tree, u64 address) {
+ condvar_tree = tree;
+ condvar_key = address;
+ }
+
+ void ClearAddressArbiter() {
+ condvar_tree = nullptr;
+ }
+
+ [[nodiscard]] bool IsWaitingForAddressArbiter() const {
+ return condvar_tree != nullptr;
+ }
+
+ [[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const {
+ return condvar_tree;
+ }
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread_queue.h b/src/core/hle/kernel/k_thread_queue.h
new file mode 100644
index 000000000..c52eba249
--- /dev/null
+++ b/src/core/hle/kernel/k_thread_queue.h
@@ -0,0 +1,81 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "core/hle/kernel/k_thread.h"
+
+namespace Kernel {
+
+class KThreadQueue {
+public:
+ explicit KThreadQueue(KernelCore& kernel) : kernel{kernel} {}
+
+ bool IsEmpty() const {
+ return wait_list.empty();
+ }
+
+ KThread::WaiterList::iterator begin() {
+ return wait_list.begin();
+ }
+ KThread::WaiterList::iterator end() {
+ return wait_list.end();
+ }
+
+ bool SleepThread(KThread* t) {
+ KScopedSchedulerLock sl{kernel};
+
+ // If the thread needs terminating, don't enqueue it.
+ if (t->IsTerminationRequested()) {
+ return false;
+ }
+
+ // Set the thread's queue and mark it as waiting.
+ t->SetSleepingQueue(this);
+ t->SetState(ThreadState::Waiting);
+
+ // Add the thread to the queue.
+ wait_list.push_back(*t);
+
+ return true;
+ }
+
+ void WakeupThread(KThread* t) {
+ KScopedSchedulerLock sl{kernel};
+
+ // Remove the thread from the queue.
+ wait_list.erase(wait_list.iterator_to(*t));
+
+ // Mark the thread as no longer sleeping.
+ t->SetState(ThreadState::Runnable);
+ t->SetSleepingQueue(nullptr);
+ }
+
+ KThread* WakeupFrontThread() {
+ KScopedSchedulerLock sl{kernel};
+
+ if (wait_list.empty()) {
+ return nullptr;
+ } else {
+ // Remove the thread from the queue.
+ auto it = wait_list.begin();
+ KThread* thread = std::addressof(*it);
+ wait_list.erase(it);
+
+ ASSERT(thread->GetState() == ThreadState::Waiting);
+
+ // Mark the thread as no longer sleeping.
+ thread->SetState(ThreadState::Runnable);
+ thread->SetSleepingQueue(nullptr);
+
+ return thread;
+ }
+ }
+
+private:
+ KernelCore& kernel;
+ KThread::WaiterList wait_list{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_writable_event.cpp b/src/core/hle/kernel/k_writable_event.cpp
new file mode 100644
index 000000000..25c52edb2
--- /dev/null
+++ b/src/core/hle/kernel/k_writable_event.cpp
@@ -0,0 +1,27 @@
+// Copyright 2021 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "core/hle/kernel/k_event.h"
+#include "core/hle/kernel/k_readable_event.h"
+#include "core/hle/kernel/k_writable_event.h"
+
+namespace Kernel {
+
+KWritableEvent::KWritableEvent(KernelCore& kernel, std::string&& name)
+ : Object{kernel, std::move(name)} {}
+KWritableEvent::~KWritableEvent() = default;
+
+void KWritableEvent::Initialize(KEvent* parent_) {
+ parent = parent_;
+}
+
+ResultCode KWritableEvent::Signal() {
+ return parent->GetReadableEvent()->Signal();
+}
+
+ResultCode KWritableEvent::Clear() {
+ return parent->GetReadableEvent()->Clear();
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_writable_event.h b/src/core/hle/kernel/k_writable_event.h
new file mode 100644
index 000000000..518f5448d
--- /dev/null
+++ b/src/core/hle/kernel/k_writable_event.h
@@ -0,0 +1,44 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "core/hle/kernel/object.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+class KernelCore;
+class KEvent;
+
+class KWritableEvent final : public Object {
+public:
+ explicit KWritableEvent(KernelCore& kernel, std::string&& name);
+ ~KWritableEvent() override;
+
+ std::string GetTypeName() const override {
+ return "KWritableEvent";
+ }
+
+ static constexpr HandleType HANDLE_TYPE = HandleType::WritableEvent;
+ HandleType GetHandleType() const override {
+ return HANDLE_TYPE;
+ }
+
+ void Initialize(KEvent* parent_);
+
+ void Finalize() override {}
+
+ ResultCode Signal();
+ ResultCode Clear();
+
+ KEvent* GetParent() const {
+ return parent;
+ }
+
+private:
+ KEvent* parent{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index e8ece8164..b6e6f115e 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -26,20 +26,19 @@
#include "core/device_memory.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/client_port.h"
-#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory/memory_layout.h"
#include "core/hle/kernel/memory/memory_manager.h"
#include "core/hle/kernel/memory/slab_heap.h"
#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/service_thread.h"
#include "core/hle/kernel/shared_memory.h"
-#include "core/hle/kernel/synchronization.h"
-#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/time_manager.h"
#include "core/hle/lock.h"
#include "core/hle/result.h"
@@ -51,22 +50,23 @@ namespace Kernel {
struct KernelCore::Impl {
explicit Impl(Core::System& system, KernelCore& kernel)
- : synchronization{system}, time_manager{system}, global_handle_table{kernel}, system{
- system} {}
+ : time_manager{system}, global_handle_table{kernel}, system{system} {}
void SetMulticore(bool is_multicore) {
this->is_multicore = is_multicore;
}
void Initialize(KernelCore& kernel) {
+ global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
+
RegisterHostThread();
- global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
service_thread_manager =
std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager");
+ is_phantom_mode_for_singlecore = false;
InitializePhysicalCores();
- InitializeSystemResourceLimit(kernel);
+ InitializeSystemResourceLimit(kernel, system);
InitializeMemoryLayout();
InitializePreemption(kernel);
InitializeSchedulers();
@@ -118,34 +118,40 @@ struct KernelCore::Impl {
void InitializePhysicalCores() {
exclusive_monitor =
Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
- for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
+ for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i);
cores.emplace_back(i, system, *schedulers[i], interrupts);
}
}
void InitializeSchedulers() {
- for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
+ for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
cores[i].Scheduler().Initialize();
}
}
// Creates the default system resource limit
- void InitializeSystemResourceLimit(KernelCore& kernel) {
- system_resource_limit = ResourceLimit::Create(kernel);
+ void InitializeSystemResourceLimit(KernelCore& kernel, Core::System& system) {
+ system_resource_limit = std::make_shared<KResourceLimit>(kernel, system);
// If setting the default system values fails, then something seriously wrong has occurred.
- ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x100000000)
+ ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, 0x100000000)
+ .IsSuccess());
+ ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Threads, 800).IsSuccess());
+ ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Events, 700).IsSuccess());
+ ASSERT(system_resource_limit->SetLimitValue(LimitableResource::TransferMemory, 200)
.IsSuccess());
- ASSERT(system_resource_limit->SetLimitValue(ResourceType::Threads, 800).IsSuccess());
- ASSERT(system_resource_limit->SetLimitValue(ResourceType::Events, 700).IsSuccess());
- ASSERT(system_resource_limit->SetLimitValue(ResourceType::TransferMemory, 200).IsSuccess());
- ASSERT(system_resource_limit->SetLimitValue(ResourceType::Sessions, 900).IsSuccess());
+ ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Sessions, 933).IsSuccess());
- if (!system_resource_limit->Reserve(ResourceType::PhysicalMemory, 0) ||
- !system_resource_limit->Reserve(ResourceType::PhysicalMemory, 0x60000)) {
+ // Derived from recent software updates. The kernel reserves 27MB
+ constexpr u64 kernel_size{0x1b00000};
+ if (!system_resource_limit->Reserve(LimitableResource::PhysicalMemory, kernel_size)) {
UNREACHABLE();
}
+ // Reserve secure applet memory, introduced in firmware 5.0.0
+ constexpr u64 secure_applet_memory_size{0x400000};
+ ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemory,
+ secure_applet_memory_size));
}
void InitializePreemption(KernelCore& kernel) {
@@ -170,11 +176,9 @@ struct KernelCore::Impl {
std::string name = "Suspend Thread Id:" + std::to_string(i);
std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc();
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
- const auto type =
- static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND);
- auto thread_res =
- Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast<u32>(i), 0,
- nullptr, std::move(init_func), init_func_parameter);
+ auto thread_res = KThread::Create(system, ThreadType::HighPriority, std::move(name), 0,
+ 0, 0, static_cast<u32>(i), 0, nullptr,
+ std::move(init_func), init_func_parameter);
suspend_threads[i] = std::move(thread_res).Unwrap();
}
@@ -209,6 +213,17 @@ struct KernelCore::Impl {
return host_thread_id;
}
+ // Gets the dummy KThread for the caller, allocating a new one if this is the first time
+ KThread* GetHostDummyThread() {
+ const thread_local auto thread =
+ KThread::Create(
+ system, ThreadType::Main, fmt::format("DummyThread:{}", GetHostThreadId()), 0,
+ KThread::DefaultThreadPriority, 0, static_cast<u32>(3), 0, nullptr,
+ []([[maybe_unused]] void* arg) { UNREACHABLE(); }, nullptr)
+ .Unwrap();
+ return thread.get();
+ }
+
/// Registers a CPU core thread by allocating a host thread ID for it
void RegisterCoreThread(std::size_t core_id) {
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
@@ -221,6 +236,7 @@ struct KernelCore::Impl {
/// Registers a new host thread by allocating a host thread ID for it
void RegisterHostThread() {
[[maybe_unused]] const auto this_id = GetHostThreadId();
+ [[maybe_unused]] const auto dummy_thread = GetHostDummyThread();
}
[[nodiscard]] u32 GetCurrentHostThreadID() {
@@ -231,20 +247,21 @@ struct KernelCore::Impl {
return this_id;
}
- [[nodiscard]] Core::EmuThreadHandle GetCurrentEmuThreadID() {
- Core::EmuThreadHandle result = Core::EmuThreadHandle::InvalidHandle();
- result.host_handle = GetCurrentHostThreadID();
- if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) {
- return result;
- }
- const Kernel::KScheduler& sched = cores[result.host_handle].Scheduler();
- const Kernel::Thread* current = sched.GetCurrentThread();
- if (current != nullptr && !current->IsPhantomMode()) {
- result.guest_handle = current->GetGlobalHandle();
- } else {
- result.guest_handle = InvalidHandle;
+ bool IsPhantomModeForSingleCore() const {
+ return is_phantom_mode_for_singlecore;
+ }
+
+ void SetIsPhantomModeForSingleCore(bool value) {
+ ASSERT(!is_multicore);
+ is_phantom_mode_for_singlecore = value;
+ }
+
+ KThread* GetCurrentEmuThread() {
+ const auto thread_id = GetCurrentHostThreadID();
+ if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
+ return GetHostDummyThread();
}
- return result;
+ return schedulers[thread_id]->GetCurrentThread();
}
void InitializeMemoryLayout() {
@@ -291,8 +308,11 @@ struct KernelCore::Impl {
// Allocate slab heaps
user_slab_heap_pages = std::make_unique<Memory::SlabHeap<Memory::Page>>();
+ constexpr u64 user_slab_heap_size{0x1ef000};
+ // Reserve slab heaps
+ ASSERT(
+ system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size));
// Initialize slab heaps
- constexpr u64 user_slab_heap_size{0x3de000};
user_slab_heap_pages->Initialize(
system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase),
user_slab_heap_size);
@@ -307,10 +327,9 @@ struct KernelCore::Impl {
std::vector<std::shared_ptr<Process>> process_list;
Process* current_process = nullptr;
std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
- Kernel::Synchronization synchronization;
Kernel::TimeManager time_manager;
- std::shared_ptr<ResourceLimit> system_resource_limit;
+ std::shared_ptr<KResourceLimit> system_resource_limit;
std::shared_ptr<Core::Timing::EventType> preemption_event;
@@ -345,11 +364,12 @@ struct KernelCore::Impl {
// the release of itself
std::unique_ptr<Common::ThreadWorker> service_thread_manager;
- std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
+ std::array<std::shared_ptr<KThread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
bool is_multicore{};
+ bool is_phantom_mode_for_singlecore{};
u32 single_core_thread_id{};
std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{};
@@ -379,12 +399,12 @@ void KernelCore::Shutdown() {
impl->Shutdown();
}
-std::shared_ptr<ResourceLimit> KernelCore::GetSystemResourceLimit() const {
+std::shared_ptr<KResourceLimit> KernelCore::GetSystemResourceLimit() const {
return impl->system_resource_limit;
}
-std::shared_ptr<Thread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
- return impl->global_handle_table.Get<Thread>(handle);
+std::shared_ptr<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
+ return impl->global_handle_table.Get<KThread>(handle);
}
void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) {
@@ -461,14 +481,6 @@ const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Kern
return impl->interrupts;
}
-Kernel::Synchronization& KernelCore::Synchronization() {
- return impl->synchronization;
-}
-
-const Kernel::Synchronization& KernelCore::Synchronization() const {
- return impl->synchronization;
-}
-
Kernel::TimeManager& KernelCore::TimeManager() {
return impl->time_manager;
}
@@ -557,8 +569,8 @@ u32 KernelCore::GetCurrentHostThreadID() const {
return impl->GetCurrentHostThreadID();
}
-Core::EmuThreadHandle KernelCore::GetCurrentEmuThreadID() const {
- return impl->GetCurrentEmuThreadID();
+KThread* KernelCore::GetCurrentEmuThread() const {
+ return impl->GetCurrentEmuThread();
}
Memory::MemoryManager& KernelCore::MemoryManager() {
@@ -613,9 +625,11 @@ void KernelCore::Suspend(bool in_suspention) {
const bool should_suspend = exception_exited || in_suspention;
{
KScopedSchedulerLock lock(*this);
- ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep;
+ const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting;
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
- impl->suspend_threads[i]->SetStatus(status);
+ impl->suspend_threads[i]->SetState(state);
+ impl->suspend_threads[i]->SetWaitReasonForDebugging(
+ ThreadWaitReasonForDebugging::Suspended);
}
}
}
@@ -654,4 +668,12 @@ void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> servi
});
}
+bool KernelCore::IsPhantomModeForSingleCore() const {
+ return impl->IsPhantomModeForSingleCore();
+}
+
+void KernelCore::SetIsPhantomModeForSingleCore(bool value) {
+ impl->SetIsPhantomModeForSingleCore(value);
+}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index e3169f5a7..806a0d986 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -33,20 +33,23 @@ template <typename T>
class SlabHeap;
} // namespace Memory
-class AddressArbiter;
class ClientPort;
class GlobalSchedulerContext;
class HandleTable;
class PhysicalCore;
class Process;
-class ResourceLimit;
+class KResourceLimit;
class KScheduler;
class SharedMemory;
class ServiceThread;
class Synchronization;
-class Thread;
+class KThread;
class TimeManager;
+using EmuThreadHandle = uintptr_t;
+constexpr EmuThreadHandle EmuThreadHandleInvalid{};
+constexpr EmuThreadHandle EmuThreadHandleReserved{1ULL << 63};
+
/// Represents a single instance of the kernel.
class KernelCore {
private:
@@ -82,10 +85,10 @@ public:
void Shutdown();
/// Retrieves a shared pointer to the system resource limit instance.
- std::shared_ptr<ResourceLimit> GetSystemResourceLimit() const;
+ std::shared_ptr<KResourceLimit> GetSystemResourceLimit() const;
/// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table.
- std::shared_ptr<Thread> RetrieveThreadFromGlobalHandleTable(Handle handle) const;
+ std::shared_ptr<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const;
/// Adds the given shared pointer to an internal list of active processes.
void AppendNewProcess(std::shared_ptr<Process> process);
@@ -129,12 +132,6 @@ public:
/// Gets the an instance of the current physical CPU core.
const Kernel::PhysicalCore& CurrentPhysicalCore() const;
- /// Gets the an instance of the Synchronization Interface.
- Kernel::Synchronization& Synchronization();
-
- /// Gets the an instance of the Synchronization Interface.
- const Kernel::Synchronization& Synchronization() const;
-
/// Gets the an instance of the TimeManager Interface.
Kernel::TimeManager& TimeManager();
@@ -168,8 +165,8 @@ public:
/// Determines whether or not the given port is a valid named port.
bool IsValidNamedPort(NamedPortTable::const_iterator port) const;
- /// Gets the current host_thread/guest_thread handle.
- Core::EmuThreadHandle GetCurrentEmuThreadID() const;
+ /// Gets the current host_thread/guest_thread pointer.
+ KThread* GetCurrentEmuThread() const;
/// Gets the current host_thread handle.
u32 GetCurrentHostThreadID() const;
@@ -244,10 +241,14 @@ public:
*/
void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread);
+ /// Workaround for single-core mode when preempting threads while idle.
+ bool IsPhantomModeForSingleCore() const;
+ void SetIsPhantomModeForSingleCore(bool value);
+
private:
friend class Object;
friend class Process;
- friend class Thread;
+ friend class KThread;
/// Creates a new object ID, incrementing the internal object ID counter.
u32 CreateNewObjectID();
diff --git a/src/core/hle/kernel/memory/address_space_info.cpp b/src/core/hle/kernel/memory/address_space_info.cpp
index e4288cab4..6cf43ba24 100644
--- a/src/core/hle/kernel/memory/address_space_info.cpp
+++ b/src/core/hle/kernel/memory/address_space_info.cpp
@@ -96,6 +96,7 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
}
UNREACHABLE();
+ return 0;
}
std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
@@ -112,6 +113,7 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
}
UNREACHABLE();
+ return 0;
}
} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/memory_layout.h b/src/core/hle/kernel/memory/memory_layout.h
index 9b3d6267a..c7c0b2f49 100644
--- a/src/core/hle/kernel/memory/memory_layout.h
+++ b/src/core/hle/kernel/memory/memory_layout.h
@@ -5,9 +5,28 @@
#pragma once
#include "common/common_types.h"
+#include "core/device_memory.h"
namespace Kernel::Memory {
+constexpr std::size_t KernelAslrAlignment = 2 * 1024 * 1024;
+constexpr std::size_t KernelVirtualAddressSpaceWidth = 1ULL << 39;
+constexpr std::size_t KernelPhysicalAddressSpaceWidth = 1ULL << 48;
+constexpr std::size_t KernelVirtualAddressSpaceBase = 0ULL - KernelVirtualAddressSpaceWidth;
+constexpr std::size_t KernelVirtualAddressSpaceEnd =
+ KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment);
+constexpr std::size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1;
+constexpr std::size_t KernelVirtualAddressSpaceSize =
+ KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase;
+
+constexpr bool IsKernelAddressKey(VAddr key) {
+ return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast;
+}
+
+constexpr bool IsKernelAddress(VAddr address) {
+ return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd;
+}
+
class MemoryRegion final {
friend class MemoryLayout;
diff --git a/src/core/hle/kernel/memory/memory_manager.cpp b/src/core/hle/kernel/memory/memory_manager.cpp
index acf13585c..77f135cdc 100644
--- a/src/core/hle/kernel/memory/memory_manager.cpp
+++ b/src/core/hle/kernel/memory/memory_manager.cpp
@@ -8,9 +8,9 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "common/scope_exit.h"
-#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/memory/memory_manager.h"
#include "core/hle/kernel/memory/page_linked_list.h"
+#include "core/hle/kernel/svc_results.h"
namespace Kernel::Memory {
@@ -95,7 +95,7 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
// Choose a heap based on our page size request
const s32 heap_index{PageHeap::GetBlockIndex(num_pages)};
if (heap_index < 0) {
- return ERR_OUT_OF_MEMORY;
+ return ResultOutOfMemory;
}
// TODO (bunnei): Support multiple managers
@@ -140,7 +140,7 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
// Only succeed if we allocated as many pages as we wanted
if (num_pages) {
- return ERR_OUT_OF_MEMORY;
+ return ResultOutOfMemory;
}
// We succeeded!
diff --git a/src/core/hle/kernel/memory/page_heap.h b/src/core/hle/kernel/memory/page_heap.h
index 22b0de860..131093284 100644
--- a/src/core/hle/kernel/memory/page_heap.h
+++ b/src/core/hle/kernel/memory/page_heap.h
@@ -8,11 +8,11 @@
#pragma once
#include <array>
+#include <bit>
#include <vector>
#include "common/alignment.h"
#include "common/assert.h"
-#include "common/bit_util.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/hle/kernel/memory/memory_types.h"
@@ -105,7 +105,7 @@ private:
ASSERT(depth == 0);
return -1;
}
- offset = offset * 64 + Common::CountTrailingZeroes64(v);
+ offset = offset * 64 + static_cast<u32>(std::countr_zero(v));
++depth;
} while (depth < static_cast<s32>(used_depths));
diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp
index 080886554..00ed9b881 100644
--- a/src/core/hle/kernel/memory/page_table.cpp
+++ b/src/core/hle/kernel/memory/page_table.cpp
@@ -6,7 +6,7 @@
#include "common/assert.h"
#include "common/scope_exit.h"
#include "core/core.h"
-#include "core/hle/kernel/errors.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory/address_space_info.h"
#include "core/hle/kernel/memory/memory_block.h"
@@ -15,7 +15,7 @@
#include "core/hle/kernel/memory/page_table.h"
#include "core/hle/kernel/memory/system_control.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/resource_limit.h"
+#include "core/hle/kernel/svc_results.h"
#include "core/memory.h"
namespace Kernel::Memory {
@@ -141,7 +141,7 @@ ResultCode PageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_t
(alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)};
if (alloc_size < needed_size) {
UNREACHABLE();
- return ERR_OUT_OF_MEMORY;
+ return ResultOutOfMemory;
}
const std::size_t remaining_size{alloc_size - needed_size};
@@ -277,11 +277,11 @@ ResultCode PageTable::MapProcessCode(VAddr addr, std::size_t num_pages, MemorySt
const u64 size{num_pages * PageSize};
if (!CanContain(addr, size, state)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (IsRegionMapped(addr, size)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
PageLinkedList page_linked_list;
@@ -307,7 +307,7 @@ ResultCode PageTable::MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::
MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped));
if (IsRegionMapped(dst_addr, size)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
PageLinkedList page_linked_list;
@@ -409,27 +409,25 @@ ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
return RESULT_SUCCESS;
}
- auto process{system.Kernel().CurrentProcess()};
const std::size_t remaining_size{size - mapped_size};
const std::size_t remaining_pages{remaining_size / PageSize};
- if (process->GetResourceLimit() &&
- !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, remaining_size)) {
- return ERR_RESOURCE_LIMIT_EXCEEDED;
+ // Reserve the memory from the process resource limit.
+ KScopedResourceReservation memory_reservation(
+ system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
+ remaining_size);
+ if (!memory_reservation.Succeeded()) {
+ LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size);
+ return ResultResourceLimitedExceeded;
}
PageLinkedList page_linked_list;
- {
- auto block_guard = detail::ScopeExit([&] {
- system.Kernel().MemoryManager().Free(page_linked_list, remaining_pages, memory_pool);
- process->GetResourceLimit()->Release(ResourceType::PhysicalMemory, remaining_size);
- });
- CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages,
- memory_pool));
+ CASCADE_CODE(
+ system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages, memory_pool));
- block_guard.Cancel();
- }
+ // We succeeded, so commit the memory reservation.
+ memory_reservation.Commit();
MapPhysicalMemory(page_linked_list, addr, end_addr);
@@ -454,12 +452,12 @@ ResultCode PageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
block_manager->IterateForRange(addr, end_addr, [&](const MemoryInfo& info) {
if (info.state == MemoryState::Normal) {
if (info.attribute != MemoryAttribute::None) {
- result = ERR_INVALID_ADDRESS_STATE;
+ result = ResultInvalidCurrentMemory;
return;
}
mapped_size += GetSizeInRange(info, addr, end_addr);
} else if (info.state != MemoryState::Free) {
- result = ERR_INVALID_ADDRESS_STATE;
+ result = ResultInvalidCurrentMemory;
}
});
@@ -474,7 +472,7 @@ ResultCode PageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
CASCADE_CODE(UnmapMemory(addr, size));
auto process{system.Kernel().CurrentProcess()};
- process->GetResourceLimit()->Release(ResourceType::PhysicalMemory, mapped_size);
+ process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
physical_memory_usage -= mapped_size;
return RESULT_SUCCESS;
@@ -526,7 +524,7 @@ ResultCode PageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) {
MemoryAttribute::Mask, MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped));
if (IsRegionMapped(dst_addr, size)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
PageLinkedList page_linked_list;
@@ -577,7 +575,7 @@ ResultCode PageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) {
AddRegionToPages(dst_addr, num_pages, dst_pages);
if (!dst_pages.IsEqual(src_pages)) {
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
{
@@ -626,11 +624,11 @@ ResultCode PageTable::MapPages(VAddr addr, PageLinkedList& page_linked_list, Mem
const std::size_t size{num_pages * PageSize};
if (!CanContain(addr, size, state)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (IsRegionMapped(addr, num_pages * PageSize)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
CASCADE_CODE(MapPages(addr, page_linked_list, perm));
@@ -768,7 +766,7 @@ ResultCode PageTable::SetHeapCapacity(std::size_t new_heap_capacity) {
ResultVal<VAddr> PageTable::SetHeapSize(std::size_t size) {
if (size > heap_region_end - heap_region_start) {
- return ERR_OUT_OF_MEMORY;
+ return ResultOutOfMemory;
}
const u64 previous_heap_size{GetHeapSize()};
@@ -781,10 +779,14 @@ ResultVal<VAddr> PageTable::SetHeapSize(std::size_t size) {
const u64 delta{size - previous_heap_size};
- auto process{system.Kernel().CurrentProcess()};
- if (process->GetResourceLimit() && delta != 0 &&
- !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, delta)) {
- return ERR_RESOURCE_LIMIT_EXCEEDED;
+ // Reserve memory for the heap extension.
+ KScopedResourceReservation memory_reservation(
+ system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
+ delta);
+
+ if (!memory_reservation.Succeeded()) {
+ LOG_ERROR(Kernel, "Could not reserve heap extension of size {:X} bytes", delta);
+ return ResultResourceLimitedExceeded;
}
PageLinkedList page_linked_list;
@@ -794,12 +796,15 @@ ResultVal<VAddr> PageTable::SetHeapSize(std::size_t size) {
system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool));
if (IsRegionMapped(current_heap_addr, delta)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
CASCADE_CODE(
Operate(current_heap_addr, num_pages, page_linked_list, OperationType::MapGroup));
+ // Succeeded in allocation, commit the resource reservation
+ memory_reservation.Commit();
+
block_manager->Update(current_heap_addr, num_pages, MemoryState::Normal,
MemoryPermission::ReadAndWrite);
@@ -816,17 +821,17 @@ ResultVal<VAddr> PageTable::AllocateAndMapMemory(std::size_t needed_num_pages, s
std::lock_guard lock{page_table_lock};
if (!CanContain(region_start, region_num_pages * PageSize, state)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (region_num_pages <= needed_num_pages) {
- return ERR_OUT_OF_MEMORY;
+ return ResultOutOfMemory;
}
const VAddr addr{
AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)};
if (!addr) {
- return ERR_OUT_OF_MEMORY;
+ return ResultOutOfMemory;
}
if (is_map_only) {
@@ -1105,13 +1110,13 @@ constexpr ResultCode PageTable::CheckMemoryState(const MemoryInfo& info, MemoryS
MemoryAttribute attr) const {
// Validate the states match expectation
if ((info.state & state_mask) != state) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if ((info.perm & perm_mask) != perm) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if ((info.attribute & attr_mask) != attr) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
return RESULT_SUCCESS;
@@ -1138,14 +1143,14 @@ ResultCode PageTable::CheckMemoryState(MemoryState* out_state, MemoryPermission*
while (true) {
// Validate the current block
if (!(info.state == first_state)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (!(info.perm == first_perm)) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (!((info.attribute | static_cast<MemoryAttribute>(ignore_attr)) ==
(first_attr | static_cast<MemoryAttribute>(ignore_attr)))) {
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
// Validate against the provided masks
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
deleted file mode 100644
index 4f8075e0e..000000000
--- a/src/core/hle/kernel/mutex.cpp
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <memory>
-#include <utility>
-#include <vector>
-
-#include "common/assert.h"
-#include "common/logging/log.h"
-#include "core/core.h"
-#include "core/hle/kernel/errors.h"
-#include "core/hle/kernel/handle_table.h"
-#include "core/hle/kernel/k_scheduler.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/mutex.h"
-#include "core/hle/kernel/object.h"
-#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/thread.h"
-#include "core/hle/result.h"
-#include "core/memory.h"
-
-namespace Kernel {
-
-/// Returns the number of threads that are waiting for a mutex, and the highest priority one among
-/// those.
-static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThread(
- const std::shared_ptr<Thread>& current_thread, VAddr mutex_addr) {
-
- std::shared_ptr<Thread> highest_priority_thread;
- u32 num_waiters = 0;
-
- for (const auto& thread : current_thread->GetMutexWaitingThreads()) {
- if (thread->GetMutexWaitAddress() != mutex_addr)
- continue;
-
- ++num_waiters;
- if (highest_priority_thread == nullptr ||
- thread->GetPriority() < highest_priority_thread->GetPriority()) {
- highest_priority_thread = thread;
- }
- }
-
- return {highest_priority_thread, num_waiters};
-}
-
-/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner.
-static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread,
- std::shared_ptr<Thread> new_owner) {
- current_thread->RemoveMutexWaiter(new_owner);
- const auto threads = current_thread->GetMutexWaitingThreads();
- for (const auto& thread : threads) {
- if (thread->GetMutexWaitAddress() != mutex_addr)
- continue;
-
- ASSERT(thread->GetLockOwner() == current_thread.get());
- current_thread->RemoveMutexWaiter(thread);
- if (new_owner != thread)
- new_owner->AddMutexWaiter(thread);
- }
-}
-
-Mutex::Mutex(Core::System& system) : system{system} {}
-Mutex::~Mutex() = default;
-
-ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
- Handle requesting_thread_handle) {
- // The mutex address must be 4-byte aligned
- if ((address % sizeof(u32)) != 0) {
- LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
- return ERR_INVALID_ADDRESS;
- }
-
- auto& kernel = system.Kernel();
- std::shared_ptr<Thread> current_thread =
- SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
- {
- KScopedSchedulerLock lock(kernel);
- // The mutex address must be 4-byte aligned
- if ((address % sizeof(u32)) != 0) {
- return ERR_INVALID_ADDRESS;
- }
-
- const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
- std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
- std::shared_ptr<Thread> requesting_thread =
- handle_table.Get<Thread>(requesting_thread_handle);
-
- // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of
- // another thread.
- ASSERT(requesting_thread == current_thread);
-
- current_thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
-
- const u32 addr_value = system.Memory().Read32(address);
-
- // If the mutex isn't being held, just return success.
- if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
- return RESULT_SUCCESS;
- }
-
- if (holding_thread == nullptr) {
- return ERR_INVALID_HANDLE;
- }
-
- // Wait until the mutex is released
- current_thread->SetMutexWaitAddress(address);
- current_thread->SetWaitHandle(requesting_thread_handle);
-
- current_thread->SetStatus(ThreadStatus::WaitMutex);
-
- // Update the lock holder thread's priority to prevent priority inversion.
- holding_thread->AddMutexWaiter(current_thread);
- }
-
- {
- KScopedSchedulerLock lock(kernel);
- auto* owner = current_thread->GetLockOwner();
- if (owner != nullptr) {
- owner->RemoveMutexWaiter(current_thread);
- }
- }
- return current_thread->GetSignalingResult();
-}
-
-std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thread> owner,
- VAddr address) {
- // The mutex address must be 4-byte aligned
- if ((address % sizeof(u32)) != 0) {
- LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
- return {ERR_INVALID_ADDRESS, nullptr};
- }
-
- auto [new_owner, num_waiters] = GetHighestPriorityMutexWaitingThread(owner, address);
- if (new_owner == nullptr) {
- system.Memory().Write32(address, 0);
- return {RESULT_SUCCESS, nullptr};
- }
- // Transfer the ownership of the mutex from the previous owner to the new one.
- TransferMutexOwnership(address, owner, new_owner);
- u32 mutex_value = new_owner->GetWaitHandle();
- if (num_waiters >= 2) {
- // Notify the guest that there are still some threads waiting for the mutex
- mutex_value |= Mutex::MutexHasWaitersFlag;
- }
- new_owner->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
- new_owner->SetLockOwner(nullptr);
- new_owner->ResumeFromWait();
-
- system.Memory().Write32(address, mutex_value);
- return {RESULT_SUCCESS, new_owner};
-}
-
-ResultCode Mutex::Release(VAddr address) {
- auto& kernel = system.Kernel();
- KScopedSchedulerLock lock(kernel);
-
- std::shared_ptr<Thread> current_thread =
- SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
-
- auto [result, new_owner] = Unlock(current_thread, address);
-
- if (result != RESULT_SUCCESS && new_owner != nullptr) {
- new_owner->SetSynchronizationResults(nullptr, result);
- }
-
- return result;
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/mutex.h b/src/core/hle/kernel/mutex.h
deleted file mode 100644
index 3b81dc3df..000000000
--- a/src/core/hle/kernel/mutex.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include "common/common_types.h"
-
-union ResultCode;
-
-namespace Core {
-class System;
-}
-
-namespace Kernel {
-
-class Mutex final {
-public:
- explicit Mutex(Core::System& system);
- ~Mutex();
-
- /// Flag that indicates that a mutex still has threads waiting for it.
- static constexpr u32 MutexHasWaitersFlag = 0x40000000;
- /// Mask of the bits in a mutex address value that contain the mutex owner.
- static constexpr u32 MutexOwnerMask = 0xBFFFFFFF;
-
- /// Attempts to acquire a mutex at the specified address.
- ResultCode TryAcquire(VAddr address, Handle holding_thread_handle,
- Handle requesting_thread_handle);
-
- /// Unlocks a mutex for owner at address
- std::pair<ResultCode, std::shared_ptr<Thread>> Unlock(std::shared_ptr<Thread> owner,
- VAddr address);
-
- /// Releases the mutex at the specified address.
- ResultCode Release(VAddr address);
-
-private:
- Core::System& system;
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/object.cpp b/src/core/hle/kernel/object.cpp
index 2c571792b..d7f40c403 100644
--- a/src/core/hle/kernel/object.cpp
+++ b/src/core/hle/kernel/object.cpp
@@ -8,7 +8,10 @@
namespace Kernel {
-Object::Object(KernelCore& kernel) : kernel{kernel}, object_id{kernel.CreateNewObjectID()} {}
+Object::Object(KernelCore& kernel_)
+ : kernel{kernel_}, object_id{kernel_.CreateNewObjectID()}, name{"[UNKNOWN KERNEL OBJECT]"} {}
+Object::Object(KernelCore& kernel_, std::string&& name_)
+ : kernel{kernel_}, object_id{kernel_.CreateNewObjectID()}, name{std::move(name_)} {}
Object::~Object() = default;
bool Object::IsWaitable() const {
@@ -21,6 +24,7 @@ bool Object::IsWaitable() const {
return true;
case HandleType::Unknown:
+ case HandleType::Event:
case HandleType::WritableEvent:
case HandleType::SharedMemory:
case HandleType::TransferMemory:
diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h
index e3391e2af..501e58b33 100644
--- a/src/core/hle/kernel/object.h
+++ b/src/core/hle/kernel/object.h
@@ -18,6 +18,7 @@ using Handle = u32;
enum class HandleType : u32 {
Unknown,
+ Event,
WritableEvent,
ReadableEvent,
SharedMemory,
@@ -34,7 +35,8 @@ enum class HandleType : u32 {
class Object : NonCopyable, public std::enable_shared_from_this<Object> {
public:
- explicit Object(KernelCore& kernel);
+ explicit Object(KernelCore& kernel_);
+ explicit Object(KernelCore& kernel_, std::string&& name_);
virtual ~Object();
/// Returns a unique identifier for the object. For debugging purposes only.
@@ -46,22 +48,30 @@ public:
return "[BAD KERNEL OBJECT TYPE]";
}
virtual std::string GetName() const {
- return "[UNKNOWN KERNEL OBJECT]";
+ return name;
}
virtual HandleType GetHandleType() const = 0;
+ void Close() {
+ // TODO(bunnei): This is a placeholder to decrement the reference count, which we will use
+ // when we implement KAutoObject instead of using shared_ptr.
+ }
+
/**
* Check if a thread can wait on the object
* @return True if a thread can wait on the object, otherwise false
*/
bool IsWaitable() const;
+ virtual void Finalize() = 0;
+
protected:
/// The kernel instance this object was created under.
KernelCore& kernel;
private:
std::atomic<u32> object_id{0};
+ std::string name;
};
template <typename T>
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index b905b486a..47b3ac57b 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -14,15 +14,16 @@
#include "core/device_memory.h"
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/code_set.h"
-#include "core/hle/kernel/errors.h"
+#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory/memory_block_manager.h"
#include "core/hle/kernel/memory/page_table.h"
#include "core/hle/kernel/memory/slab_heap.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/resource_limit.h"
-#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/svc_results.h"
#include "core/hle/lock.h"
#include "core/memory.h"
#include "core/settings.h"
@@ -38,11 +39,11 @@ namespace {
*/
void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) {
const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
- ThreadType type = THREADTYPE_USER;
- auto thread_res = Thread::Create(system, type, "main", entry_point, priority, 0,
- owner_process.GetIdealCore(), stack_top, &owner_process);
+ ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1));
+ auto thread_res = KThread::Create(system, ThreadType::User, "main", entry_point, priority, 0,
+ owner_process.GetIdealCoreId(), stack_top, &owner_process);
- std::shared_ptr<Thread> thread = std::move(thread_res).Unwrap();
+ std::shared_ptr<KThread> thread = std::move(thread_res).Unwrap();
// Register 1 must be a handle to the main thread
const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap();
@@ -55,7 +56,7 @@ void SetupMainThread(Core::System& system, Process& owner_process, u32 priority,
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
{
KScopedSchedulerLock lock{kernel};
- thread->SetStatus(ThreadStatus::Ready);
+ thread->SetState(ThreadState::Runnable);
}
}
} // Anonymous namespace
@@ -117,7 +118,10 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name,
std::shared_ptr<Process> process = std::make_shared<Process>(system);
process->name = std::move(name);
- process->resource_limit = ResourceLimit::Create(kernel);
+
+ // TODO: This is inaccurate
+ // The process should hold a reference to the kernel-wide resource limit.
+ process->resource_limit = std::make_shared<KResourceLimit>(kernel, system);
process->status = ProcessStatus::Created;
process->program_id = 0;
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
@@ -133,12 +137,32 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name,
return process;
}
-std::shared_ptr<ResourceLimit> Process::GetResourceLimit() const {
+std::shared_ptr<KResourceLimit> Process::GetResourceLimit() const {
return resource_limit;
}
+void Process::IncrementThreadCount() {
+ ASSERT(num_threads >= 0);
+ num_created_threads++;
+
+ if (const auto count = ++num_threads; count > peak_num_threads) {
+ peak_num_threads = count;
+ }
+}
+
+void Process::DecrementThreadCount() {
+ ASSERT(num_threads > 0);
+
+ if (const auto count = --num_threads; count == 0) {
+ UNIMPLEMENTED_MSG("Process termination is not implemented!");
+ }
+}
+
u64 Process::GetTotalPhysicalMemoryAvailable() const {
- const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) +
+ // TODO: This is expected to always return the application memory pool size after accurately
+ // reserving kernel resources. The current workaround uses a process-local resource limit of
+ // application memory pool size, which is inaccurate.
+ const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
main_thread_stack_size};
@@ -162,68 +186,79 @@ u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
}
-void Process::InsertConditionVariableThread(std::shared_ptr<Thread> thread) {
- VAddr cond_var_addr = thread->GetCondVarWaitAddress();
- std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
- auto it = thread_list.begin();
- while (it != thread_list.end()) {
- const std::shared_ptr<Thread> current_thread = *it;
- if (current_thread->GetPriority() > thread->GetPriority()) {
- thread_list.insert(it, thread);
- return;
+bool Process::ReleaseUserException(KThread* thread) {
+ KScopedSchedulerLock sl{kernel};
+
+ if (exception_thread == thread) {
+ exception_thread = nullptr;
+
+ // Remove waiter thread.
+ s32 num_waiters{};
+ KThread* next = thread->RemoveWaiterByKey(
+ std::addressof(num_waiters),
+ reinterpret_cast<uintptr_t>(std::addressof(exception_thread)));
+ if (next != nullptr) {
+ if (next->GetState() == ThreadState::Waiting) {
+ next->SetState(ThreadState::Runnable);
+ } else {
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
+ }
}
- ++it;
+
+ return true;
+ } else {
+ return false;
}
- thread_list.push_back(thread);
}
-void Process::RemoveConditionVariableThread(std::shared_ptr<Thread> thread) {
- VAddr cond_var_addr = thread->GetCondVarWaitAddress();
- std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
- auto it = thread_list.begin();
- while (it != thread_list.end()) {
- const std::shared_ptr<Thread> current_thread = *it;
- if (current_thread.get() == thread.get()) {
- thread_list.erase(it);
- return;
- }
- ++it;
- }
+void Process::PinCurrentThread() {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Get the current thread.
+ const s32 core_id = GetCurrentCoreId(kernel);
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
+
+ // Pin it.
+ PinThread(core_id, cur_thread);
+ cur_thread->Pin();
+
+ // An update is needed.
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
}
-std::vector<std::shared_ptr<Thread>> Process::GetConditionVariableThreads(
- const VAddr cond_var_addr) {
- std::vector<std::shared_ptr<Thread>> result{};
- std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
- auto it = thread_list.begin();
- while (it != thread_list.end()) {
- std::shared_ptr<Thread> current_thread = *it;
- result.push_back(current_thread);
- ++it;
- }
- return result;
+void Process::UnpinCurrentThread() {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Get the current thread.
+ const s32 core_id = GetCurrentCoreId(kernel);
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
+
+ // Unpin it.
+ cur_thread->Unpin();
+ UnpinThread(core_id, cur_thread);
+
+ // An update is needed.
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
}
-void Process::RegisterThread(const Thread* thread) {
+void Process::RegisterThread(const KThread* thread) {
thread_list.push_back(thread);
}
-void Process::UnregisterThread(const Thread* thread) {
+void Process::UnregisterThread(const KThread* thread) {
thread_list.remove(thread);
}
-ResultCode Process::ClearSignalState() {
- KScopedSchedulerLock lock(system.Kernel());
- if (status == ProcessStatus::Exited) {
- LOG_ERROR(Kernel, "called on a terminated process instance.");
- return ERR_INVALID_STATE;
- }
+ResultCode Process::Reset() {
+ // Lock the process and the scheduler.
+ KScopedLightLock lk(state_lock);
+ KScopedSchedulerLock sl{kernel};
- if (!is_signaled) {
- LOG_ERROR(Kernel, "called on a process instance that isn't signaled.");
- return ERR_INVALID_STATE;
- }
+ // Validate that we're in a state that we can reset.
+ R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
+ R_UNLESS(is_signaled, ResultInvalidState);
+ // Clear signaled.
is_signaled = false;
return RESULT_SUCCESS;
}
@@ -236,6 +271,17 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
system_resource_size = metadata.GetSystemResourceSize();
image_size = code_size;
+ // Set initial resource limits
+ resource_limit->SetLimitValue(
+ LimitableResource::PhysicalMemory,
+ kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application));
+ KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
+ code_size + system_resource_size);
+ if (!memory_reservation.Succeeded()) {
+ LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
+ code_size + system_resource_size);
+ return ResultResourceLimitedExceeded;
+ }
// Initialize proces address space
if (const ResultCode result{
page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 0x8000000,
@@ -277,24 +323,22 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
UNREACHABLE();
}
- // Set initial resource limits
- resource_limit->SetLimitValue(
- ResourceType::PhysicalMemory,
- kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application));
- resource_limit->SetLimitValue(ResourceType::Threads, 608);
- resource_limit->SetLimitValue(ResourceType::Events, 700);
- resource_limit->SetLimitValue(ResourceType::TransferMemory, 128);
- resource_limit->SetLimitValue(ResourceType::Sessions, 894);
- ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, code_size));
+ resource_limit->SetLimitValue(LimitableResource::Threads, 608);
+ resource_limit->SetLimitValue(LimitableResource::Events, 700);
+ resource_limit->SetLimitValue(LimitableResource::TransferMemory, 128);
+ resource_limit->SetLimitValue(LimitableResource::Sessions, 894);
// Create TLS region
tls_region_address = CreateTLSRegion();
+ memory_reservation.Commit();
return handle_table.SetSize(capabilities.GetHandleTableSize());
}
void Process::Run(s32 main_thread_priority, u64 stack_size) {
AllocateMainThreadStack(stack_size);
+ resource_limit->Reserve(LimitableResource::Threads, 1);
+ resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
const std::size_t heap_capacity{memory_usage_capacity - main_thread_stack_size - image_size};
ASSERT(!page_table->SetHeapCapacity(heap_capacity).IsError());
@@ -302,14 +346,12 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) {
ChangeStatus(ProcessStatus::Running);
SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top);
- resource_limit->Reserve(ResourceType::Threads, 1);
- resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size);
}
void Process::PrepareForTermination() {
ChangeStatus(ProcessStatus::Exiting);
- const auto stop_threads = [this](const std::vector<std::shared_ptr<Thread>>& thread_list) {
+ const auto stop_threads = [this](const std::vector<std::shared_ptr<KThread>>& thread_list) {
for (auto& thread : thread_list) {
if (thread->GetOwnerProcess() != this)
continue;
@@ -318,10 +360,10 @@ void Process::PrepareForTermination() {
continue;
// TODO(Subv): When are the other running/ready threads terminated?
- ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitSynch,
+ ASSERT_MSG(thread->GetState() == ThreadState::Waiting,
"Exiting processes with non-waiting threads is currently unimplemented");
- thread->Stop();
+ thread->Exit();
}
};
@@ -330,6 +372,11 @@ void Process::PrepareForTermination() {
FreeTLSRegion(tls_region_address);
tls_region_address = 0;
+ if (resource_limit) {
+ resource_limit->Release(LimitableResource::PhysicalMemory,
+ main_thread_stack_size + image_size);
+ }
+
ChangeStatus(ProcessStatus::Exited);
}
@@ -406,21 +453,18 @@ void Process::LoadModule(CodeSet code_set, VAddr base_addr) {
ReprotectSegment(code_set.DataSegment(), Memory::MemoryPermission::ReadAndWrite);
}
+bool Process::IsSignaled() const {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ return is_signaled;
+}
+
Process::Process(Core::System& system)
- : SynchronizationObject{system.Kernel()}, page_table{std::make_unique<Memory::PageTable>(
- system)},
- handle_table{system.Kernel()}, address_arbiter{system}, mutex{system}, system{system} {}
+ : KSynchronizationObject{system.Kernel()},
+ page_table{std::make_unique<Memory::PageTable>(system)}, handle_table{system.Kernel()},
+ address_arbiter{system}, condition_var{system}, state_lock{system.Kernel()}, system{system} {}
Process::~Process() = default;
-void Process::Acquire(Thread* thread) {
- ASSERT_MSG(!ShouldWait(thread), "Object unavailable!");
-}
-
-bool Process::ShouldWait(const Thread* thread) const {
- return !is_signaled;
-}
-
void Process::ChangeStatus(ProcessStatus new_status) {
if (status == new_status) {
return;
@@ -428,7 +472,7 @@ void Process::ChangeStatus(ProcessStatus new_status) {
status = new_status;
is_signaled = true;
- Signal();
+ NotifyAvailable();
}
ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) {
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index e412e58aa..320b0f347 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -11,11 +11,11 @@
#include <unordered_map>
#include <vector>
#include "common/common_types.h"
-#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/handle_table.h"
-#include "core/hle/kernel/mutex.h"
+#include "core/hle/kernel/k_address_arbiter.h"
+#include "core/hle/kernel/k_condition_variable.h"
+#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/process_capability.h"
-#include "core/hle/kernel/synchronization_object.h"
#include "core/hle/result.h"
namespace Core {
@@ -29,8 +29,8 @@ class ProgramMetadata;
namespace Kernel {
class KernelCore;
-class ResourceLimit;
-class Thread;
+class KResourceLimit;
+class KThread;
class TLSPage;
struct CodeSet;
@@ -63,7 +63,7 @@ enum class ProcessStatus {
DebugBreak,
};
-class Process final : public SynchronizationObject {
+class Process final : public KSynchronizationObject {
public:
explicit Process(Core::System& system);
~Process() override;
@@ -123,24 +123,30 @@ public:
return handle_table;
}
- /// Gets a reference to the process' address arbiter.
- AddressArbiter& GetAddressArbiter() {
- return address_arbiter;
+ ResultCode SignalToAddress(VAddr address) {
+ return condition_var.SignalToAddress(address);
}
- /// Gets a const reference to the process' address arbiter.
- const AddressArbiter& GetAddressArbiter() const {
- return address_arbiter;
+ ResultCode WaitForAddress(Handle handle, VAddr address, u32 tag) {
+ return condition_var.WaitForAddress(handle, address, tag);
}
- /// Gets a reference to the process' mutex lock.
- Mutex& GetMutex() {
- return mutex;
+ void SignalConditionVariable(u64 cv_key, int32_t count) {
+ return condition_var.Signal(cv_key, count);
}
- /// Gets a const reference to the process' mutex lock
- const Mutex& GetMutex() const {
- return mutex;
+ ResultCode WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
+ return condition_var.Wait(address, cv_key, tag, ns);
+ }
+
+ ResultCode SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value,
+ s32 count) {
+ return address_arbiter.SignalToAddress(address, signal_type, value, count);
+ }
+
+ ResultCode WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
+ s64 timeout) {
+ return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
}
/// Gets the address to the process' dedicated TLS region.
@@ -164,13 +170,18 @@ public:
}
/// Gets the resource limit descriptor for this process
- std::shared_ptr<ResourceLimit> GetResourceLimit() const;
+ std::shared_ptr<KResourceLimit> GetResourceLimit() const;
/// Gets the ideal CPU core ID for this process
- u8 GetIdealCore() const {
+ u8 GetIdealCoreId() const {
return ideal_core;
}
+ /// Checks if the specified thread priority is valid.
+ bool CheckThreadPriority(s32 prio) const {
+ return ((1ULL << prio) & GetPriorityMask()) != 0;
+ }
+
/// Gets the bitmask of allowed cores that this process' threads can run on.
u64 GetCoreMask() const {
return capabilities.GetCoreMask();
@@ -206,6 +217,14 @@ public:
return is_64bit_process;
}
+ [[nodiscard]] bool IsSuspended() const {
+ return is_suspended;
+ }
+
+ void SetSuspended(bool suspended) {
+ is_suspended = suspended;
+ }
+
/// Gets the total running time of the process instance in ticks.
u64 GetCPUTimeTicks() const {
return total_process_running_time_ticks;
@@ -226,6 +245,33 @@ public:
++schedule_count;
}
+ void IncrementThreadCount();
+ void DecrementThreadCount();
+
+ void SetRunningThread(s32 core, KThread* thread, u64 idle_count) {
+ running_threads[core] = thread;
+ running_thread_idle_counts[core] = idle_count;
+ }
+
+ void ClearRunningThread(KThread* thread) {
+ for (size_t i = 0; i < running_threads.size(); ++i) {
+ if (running_threads[i] == thread) {
+ running_threads[i] = nullptr;
+ }
+ }
+ }
+
+ [[nodiscard]] KThread* GetRunningThread(s32 core) const {
+ return running_threads[core];
+ }
+
+ bool ReleaseUserException(KThread* thread);
+
+ [[nodiscard]] KThread* GetPinnedThread(s32 core_id) const {
+ ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+ return pinned_threads[core_id];
+ }
+
/// Gets 8 bytes of random data for svcGetInfo RandomEntropy
u64 GetRandomEntropy(std::size_t index) const {
return random_entropy.at(index);
@@ -246,26 +292,17 @@ public:
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
/// Gets the list of all threads created with this process as their owner.
- const std::list<const Thread*>& GetThreadList() const {
+ const std::list<const KThread*>& GetThreadList() const {
return thread_list;
}
- /// Insert a thread into the condition variable wait container
- void InsertConditionVariableThread(std::shared_ptr<Thread> thread);
-
- /// Remove a thread from the condition variable wait container
- void RemoveConditionVariableThread(std::shared_ptr<Thread> thread);
-
- /// Obtain all condition variable threads waiting for some address
- std::vector<std::shared_ptr<Thread>> GetConditionVariableThreads(VAddr cond_var_addr);
-
/// Registers a thread as being created under this process,
/// adding it to this process' thread list.
- void RegisterThread(const Thread* thread);
+ void RegisterThread(const KThread* thread);
/// Unregisters a thread from this process, removing it
/// from this process' thread list.
- void UnregisterThread(const Thread* thread);
+ void UnregisterThread(const KThread* thread);
/// Clears the signaled state of the process if and only if it's signaled.
///
@@ -275,7 +312,7 @@ public:
/// @pre The process must be in a signaled state. If this is called on a
/// process instance that is not signaled, ERR_INVALID_STATE will be
/// returned.
- ResultCode ClearSignalState();
+ ResultCode Reset();
/**
* Loads process-specifics configuration info with metadata provided
@@ -304,6 +341,17 @@ public:
void LoadModule(CodeSet code_set, VAddr base_addr);
+ bool IsSignaled() const override;
+
+ void Finalize() override {}
+
+ void PinCurrentThread();
+ void UnpinCurrentThread();
+
+ KLightLock& GetStateLock() {
+ return state_lock;
+ }
+
///////////////////////////////////////////////////////////////////////////////////////////////
// Thread-local storage management
@@ -314,11 +362,19 @@ public:
void FreeTLSRegion(VAddr tls_address);
private:
- /// Checks if the specified thread should wait until this process is available.
- bool ShouldWait(const Thread* thread) const override;
+ void PinThread(s32 core_id, KThread* thread) {
+ ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+ ASSERT(thread != nullptr);
+ ASSERT(pinned_threads[core_id] == nullptr);
+ pinned_threads[core_id] = thread;
+ }
- /// Acquires/locks this process for the specified thread if it's available.
- void Acquire(Thread* thread) override;
+ void UnpinThread(s32 core_id, KThread* thread) {
+ ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+ ASSERT(thread != nullptr);
+ ASSERT(pinned_threads[core_id] == thread);
+ pinned_threads[core_id] = nullptr;
+ }
/// Changes the process status. If the status is different
/// from the current process status, then this will trigger
@@ -346,7 +402,7 @@ private:
u32 system_resource_size = 0;
/// Resource limit descriptor for this process
- std::shared_ptr<ResourceLimit> resource_limit;
+ std::shared_ptr<KResourceLimit> resource_limit;
/// The ideal CPU core for this process, threads are scheduled on this core by default.
u8 ideal_core = 0;
@@ -373,12 +429,12 @@ private:
HandleTable handle_table;
/// Per-process address arbiter.
- AddressArbiter address_arbiter;
+ KAddressArbiter address_arbiter;
/// The per-process mutex lock instance used for handling various
/// forms of services, such as lock arbitration, and condition
/// variable related facilities.
- Mutex mutex;
+ KConditionVariable condition_var;
/// Address indicating the location of the process' dedicated TLS region.
VAddr tls_region_address = 0;
@@ -387,10 +443,7 @@ private:
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
/// List of threads that are running with this process as their owner.
- std::list<const Thread*> thread_list;
-
- /// List of threads waiting for a condition variable
- std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> cond_var_threads;
+ std::list<const KThread*> thread_list;
/// Address of the top of the main thread's stack
VAddr main_thread_stack_top{};
@@ -410,6 +463,21 @@ private:
/// Schedule count of this process
s64 schedule_count{};
+ bool is_signaled{};
+ bool is_suspended{};
+
+ std::atomic<s32> num_created_threads{};
+ std::atomic<u16> num_threads{};
+ u16 peak_num_threads{};
+
+ std::array<KThread*, Core::Hardware::NUM_CPU_CORES> running_threads{};
+ std::array<u64, Core::Hardware::NUM_CPU_CORES> running_thread_idle_counts{};
+ std::array<KThread*, Core::Hardware::NUM_CPU_CORES> pinned_threads{};
+
+ KThread* exception_thread{};
+
+ KLightLock state_lock;
+
/// System context
Core::System& system;
};
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
index 0f128c586..7c567049e 100644
--- a/src/core/hle/kernel/process_capability.cpp
+++ b/src/core/hle/kernel/process_capability.cpp
@@ -2,12 +2,14 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <bit>
+
#include "common/bit_util.h"
#include "common/logging/log.h"
-#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/memory/page_table.h"
#include "core/hle/kernel/process_capability.h"
+#include "core/hle/kernel/svc_results.h"
namespace Kernel {
namespace {
@@ -60,7 +62,7 @@ constexpr CapabilityType GetCapabilityType(u32 value) {
u32 GetFlagBitOffset(CapabilityType type) {
const auto value = static_cast<u32>(type);
- return static_cast<u32>(Common::BitSize<u32>() - Common::CountLeadingZeroes32(value));
+ return static_cast<u32>(Common::BitSize<u32>() - static_cast<u32>(std::countl_zero(value)));
}
} // Anonymous namespace
@@ -121,13 +123,13 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
// If there's only one, then there's a problem.
if (i >= num_capabilities) {
LOG_ERROR(Kernel, "Invalid combination! i={}", i);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
const auto size_flags = capabilities[i];
if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) {
LOG_ERROR(Kernel, "Invalid capability type! size_flags={}", size_flags);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table);
@@ -157,7 +159,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
const auto type = GetCapabilityType(flag);
if (type == CapabilityType::Unset) {
- return ERR_INVALID_CAPABILITY_DESCRIPTOR;
+ return ResultInvalidCapabilityDescriptor;
}
// Bail early on ignorable entries, as one would expect,
@@ -174,7 +176,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
LOG_ERROR(Kernel,
"Attempted to initialize flags that may only be initialized once. set_flags={}",
set_flags);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
set_flags |= set_flag;
@@ -200,7 +202,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
}
LOG_ERROR(Kernel, "Invalid capability type! type={}", type);
- return ERR_INVALID_CAPABILITY_DESCRIPTOR;
+ return ResultInvalidCapabilityDescriptor;
}
void ProcessCapabilities::Clear() {
@@ -223,7 +225,7 @@ ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
if (priority_mask != 0 || core_mask != 0) {
LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
priority_mask, core_mask);
- return ERR_INVALID_CAPABILITY_DESCRIPTOR;
+ return ResultInvalidCapabilityDescriptor;
}
const u32 core_num_min = (flags >> 16) & 0xFF;
@@ -231,7 +233,7 @@ ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
if (core_num_min > core_num_max) {
LOG_ERROR(Kernel, "Core min is greater than core max! core_num_min={}, core_num_max={}",
core_num_min, core_num_max);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
const u32 priority_min = (flags >> 10) & 0x3F;
@@ -240,13 +242,13 @@ ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
LOG_ERROR(Kernel,
"Priority min is greater than priority max! priority_min={}, priority_max={}",
core_num_min, priority_max);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
// The switch only has 4 usable cores.
if (core_num_max >= 4) {
LOG_ERROR(Kernel, "Invalid max cores specified! core_num_max={}", core_num_max);
- return ERR_INVALID_PROCESSOR_ID;
+ return ResultInvalidCoreId;
}
const auto make_mask = [](u64 min, u64 max) {
@@ -267,7 +269,7 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags)
// If we've already set this svc before, bail.
if ((set_svc_bits & svc_bit) != 0) {
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
set_svc_bits |= svc_bit;
@@ -281,7 +283,7 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags)
if (svc_number >= svc_capabilities.size()) {
LOG_ERROR(Kernel, "Process svc capability is out of range! svc_number={}", svc_number);
- return ERR_OUT_OF_RANGE;
+ return ResultOutOfRange;
}
svc_capabilities[svc_number] = true;
@@ -319,7 +321,7 @@ ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
if (interrupt >= interrupt_capabilities.size()) {
LOG_ERROR(Kernel, "Process interrupt capability is out of range! svc_number={}",
interrupt);
- return ERR_OUT_OF_RANGE;
+ return ResultOutOfRange;
}
interrupt_capabilities[interrupt] = true;
@@ -332,7 +334,7 @@ ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
const u32 reserved = flags >> 17;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
- return ERR_RESERVED_VALUE;
+ return ResultReservedValue;
}
program_type = static_cast<ProgramType>((flags >> 14) & 0b111);
@@ -352,7 +354,7 @@ ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
LOG_ERROR(Kernel,
"Kernel version is non zero or flags are too small! major_version={}, flags={}",
major_version, flags);
- return ERR_INVALID_CAPABILITY_DESCRIPTOR;
+ return ResultInvalidCapabilityDescriptor;
}
kernel_version = flags;
@@ -363,7 +365,7 @@ ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
const u32 reserved = flags >> 26;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
- return ERR_RESERVED_VALUE;
+ return ResultReservedValue;
}
handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF);
@@ -374,7 +376,7 @@ ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) {
const u32 reserved = flags >> 19;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
- return ERR_RESERVED_VALUE;
+ return ResultReservedValue;
}
is_debuggable = (flags & 0x20000) != 0;
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
deleted file mode 100644
index cea262ce0..000000000
--- a/src/core/hle/kernel/readable_event.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <algorithm>
-#include "common/assert.h"
-#include "common/logging/log.h"
-#include "core/hle/kernel/errors.h"
-#include "core/hle/kernel/k_scheduler.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/object.h"
-#include "core/hle/kernel/readable_event.h"
-#include "core/hle/kernel/thread.h"
-
-namespace Kernel {
-
-ReadableEvent::ReadableEvent(KernelCore& kernel) : SynchronizationObject{kernel} {}
-ReadableEvent::~ReadableEvent() = default;
-
-bool ReadableEvent::ShouldWait(const Thread* thread) const {
- return !is_signaled;
-}
-
-void ReadableEvent::Acquire(Thread* thread) {
- ASSERT_MSG(IsSignaled(), "object unavailable!");
-}
-
-void ReadableEvent::Signal() {
- if (is_signaled) {
- return;
- }
-
- is_signaled = true;
- SynchronizationObject::Signal();
-}
-
-void ReadableEvent::Clear() {
- is_signaled = false;
-}
-
-ResultCode ReadableEvent::Reset() {
- KScopedSchedulerLock lock(kernel);
- if (!is_signaled) {
- LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
- GetObjectId(), GetTypeName(), GetName());
- return ERR_INVALID_STATE;
- }
-
- Clear();
-
- return RESULT_SUCCESS;
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/readable_event.h b/src/core/hle/kernel/readable_event.h
deleted file mode 100644
index 3264dd066..000000000
--- a/src/core/hle/kernel/readable_event.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include "core/hle/kernel/object.h"
-#include "core/hle/kernel/synchronization_object.h"
-
-union ResultCode;
-
-namespace Kernel {
-
-class KernelCore;
-class WritableEvent;
-
-class ReadableEvent final : public SynchronizationObject {
- friend class WritableEvent;
-
-public:
- ~ReadableEvent() override;
-
- std::string GetTypeName() const override {
- return "ReadableEvent";
- }
- std::string GetName() const override {
- return name;
- }
-
- static constexpr HandleType HANDLE_TYPE = HandleType::ReadableEvent;
- HandleType GetHandleType() const override {
- return HANDLE_TYPE;
- }
-
- bool ShouldWait(const Thread* thread) const override;
- void Acquire(Thread* thread) override;
-
- /// Unconditionally clears the readable event's state.
- void Clear();
-
- /// Clears the readable event's state if and only if it
- /// has already been signaled.
- ///
- /// @pre The event must be in a signaled state. If this event
- /// is in an unsignaled state and this function is called,
- /// then ERR_INVALID_STATE will be returned.
- ResultCode Reset();
-
- void Signal() override;
-
-private:
- explicit ReadableEvent(KernelCore& kernel);
-
- std::string name; ///< Name of event (optional)
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp
deleted file mode 100644
index 7bf50339d..000000000
--- a/src/core/hle/kernel/resource_limit.cpp
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2015 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include "core/hle/kernel/errors.h"
-#include "core/hle/kernel/resource_limit.h"
-#include "core/hle/result.h"
-
-namespace Kernel {
-namespace {
-constexpr std::size_t ResourceTypeToIndex(ResourceType type) {
- return static_cast<std::size_t>(type);
-}
-} // Anonymous namespace
-
-ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {}
-ResourceLimit::~ResourceLimit() = default;
-
-bool ResourceLimit::Reserve(ResourceType resource, s64 amount) {
- return Reserve(resource, amount, 10000000000);
-}
-
-bool ResourceLimit::Reserve(ResourceType resource, s64 amount, u64 timeout) {
- const std::size_t index{ResourceTypeToIndex(resource)};
-
- s64 new_value = current[index] + amount;
- if (new_value > limit[index] && available[index] + amount <= limit[index]) {
- // TODO(bunnei): This is wrong for multicore, we should wait the calling thread for timeout
- new_value = current[index] + amount;
- }
-
- if (new_value <= limit[index]) {
- current[index] = new_value;
- return true;
- }
- return false;
-}
-
-void ResourceLimit::Release(ResourceType resource, u64 amount) {
- Release(resource, amount, amount);
-}
-
-void ResourceLimit::Release(ResourceType resource, u64 used_amount, u64 available_amount) {
- const std::size_t index{ResourceTypeToIndex(resource)};
-
- current[index] -= used_amount;
- available[index] -= available_amount;
-}
-
-std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) {
- return std::make_shared<ResourceLimit>(kernel);
-}
-
-s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const {
- return limit.at(ResourceTypeToIndex(resource)) - current.at(ResourceTypeToIndex(resource));
-}
-
-s64 ResourceLimit::GetMaxResourceValue(ResourceType resource) const {
- return limit.at(ResourceTypeToIndex(resource));
-}
-
-ResultCode ResourceLimit::SetLimitValue(ResourceType resource, s64 value) {
- const std::size_t index{ResourceTypeToIndex(resource)};
- if (current[index] <= value) {
- limit[index] = value;
- return RESULT_SUCCESS;
- } else {
- LOG_ERROR(Kernel, "Limit value is too large! resource={}, value={}, index={}", resource,
- value, index);
- return ERR_INVALID_STATE;
- }
-}
-} // namespace Kernel
diff --git a/src/core/hle/kernel/resource_limit.h b/src/core/hle/kernel/resource_limit.h
deleted file mode 100644
index 936cc4d0f..000000000
--- a/src/core/hle/kernel/resource_limit.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2015 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <array>
-#include <memory>
-
-#include "common/common_types.h"
-#include "core/hle/kernel/object.h"
-
-union ResultCode;
-
-namespace Kernel {
-
-class KernelCore;
-
-enum class ResourceType : u32 {
- PhysicalMemory,
- Threads,
- Events,
- TransferMemory,
- Sessions,
-
- // Used as a count, not an actual type.
- ResourceTypeCount
-};
-
-constexpr bool IsValidResourceType(ResourceType type) {
- return type < ResourceType::ResourceTypeCount;
-}
-
-class ResourceLimit final : public Object {
-public:
- explicit ResourceLimit(KernelCore& kernel);
- ~ResourceLimit() override;
-
- /// Creates a resource limit object.
- static std::shared_ptr<ResourceLimit> Create(KernelCore& kernel);
-
- std::string GetTypeName() const override {
- return "ResourceLimit";
- }
- std::string GetName() const override {
- return GetTypeName();
- }
-
- static constexpr HandleType HANDLE_TYPE = HandleType::ResourceLimit;
- HandleType GetHandleType() const override {
- return HANDLE_TYPE;
- }
-
- bool Reserve(ResourceType resource, s64 amount);
- bool Reserve(ResourceType resource, s64 amount, u64 timeout);
- void Release(ResourceType resource, u64 amount);
- void Release(ResourceType resource, u64 used_amount, u64 available_amount);
-
- /**
- * Gets the current value for the specified resource.
- * @param resource Requested resource type
- * @returns The current value of the resource type
- */
- s64 GetCurrentResourceValue(ResourceType resource) const;
-
- /**
- * Gets the max value for the specified resource.
- * @param resource Requested resource type
- * @returns The max value of the resource type
- */
- s64 GetMaxResourceValue(ResourceType resource) const;
-
- /**
- * Sets the limit value for a given resource type.
- *
- * @param resource The resource type to apply the limit to.
- * @param value The limit to apply to the given resource type.
- *
- * @return A result code indicating if setting the limit value
- * was successful or not.
- *
- * @note The supplied limit value *must* be greater than or equal to
- * the current resource value for the given resource type,
- * otherwise ERR_INVALID_STATE will be returned.
- */
- ResultCode SetLimitValue(ResourceType resource, s64 value);
-
-private:
- // TODO(Subv): Increment resource limit current values in their respective Kernel::T::Create
- // functions
- //
- // Currently we have no way of distinguishing if a Create was called by the running application,
- // or by a service module. Approach this once we have separated the service modules into their
- // own processes
-
- using ResourceArray =
- std::array<s64, static_cast<std::size_t>(ResourceType::ResourceTypeCount)>;
-
- ResourceArray limit{};
- ResourceArray current{};
- ResourceArray available{};
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp
index a549ae9d7..5d17346ad 100644
--- a/src/core/hle/kernel/server_port.cpp
+++ b/src/core/hle/kernel/server_port.cpp
@@ -5,20 +5,20 @@
#include <tuple>
#include "common/assert.h"
#include "core/hle/kernel/client_port.h"
-#include "core/hle/kernel/errors.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/kernel/server_session.h"
-#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/svc_results.h"
namespace Kernel {
-ServerPort::ServerPort(KernelCore& kernel) : SynchronizationObject{kernel} {}
+ServerPort::ServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
ServerPort::~ServerPort() = default;
ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() {
if (pending_sessions.empty()) {
- return ERR_NOT_FOUND;
+ return ResultNotFound;
}
auto session = std::move(pending_sessions.back());
@@ -28,15 +28,9 @@ ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() {
void ServerPort::AppendPendingSession(std::shared_ptr<ServerSession> pending_session) {
pending_sessions.push_back(std::move(pending_session));
-}
-
-bool ServerPort::ShouldWait(const Thread* thread) const {
- // If there are no pending sessions, we wait until a new one is added.
- return pending_sessions.empty();
-}
-
-void ServerPort::Acquire(Thread* thread) {
- ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
+ if (pending_sessions.size() == 1) {
+ NotifyAvailable();
+ }
}
bool ServerPort::IsSignaled() const {
diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h
index 41b191b86..29b4f2509 100644
--- a/src/core/hle/kernel/server_port.h
+++ b/src/core/hle/kernel/server_port.h
@@ -9,8 +9,8 @@
#include <utility>
#include <vector>
#include "common/common_types.h"
+#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/object.h"
-#include "core/hle/kernel/synchronization_object.h"
#include "core/hle/result.h"
namespace Kernel {
@@ -20,7 +20,7 @@ class KernelCore;
class ServerSession;
class SessionRequestHandler;
-class ServerPort final : public SynchronizationObject {
+class ServerPort final : public KSynchronizationObject {
public:
explicit ServerPort(KernelCore& kernel);
~ServerPort() override;
@@ -79,11 +79,10 @@ public:
/// waiting to be accepted by this port.
void AppendPendingSession(std::shared_ptr<ServerSession> pending_session);
- bool ShouldWait(const Thread* thread) const override;
- void Acquire(Thread* thread) override;
-
bool IsSignaled() const override;
+ void Finalize() override {}
+
private:
/// ServerSessions waiting to be accepted by the port
std::vector<std::shared_ptr<ServerSession>> pending_sessions;
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index b40fe3916..790dbb998 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -15,16 +15,16 @@
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
-#include "core/hle/kernel/thread.h"
#include "core/memory.h"
namespace Kernel {
-ServerSession::ServerSession(KernelCore& kernel) : SynchronizationObject{kernel} {}
+ServerSession::ServerSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
ServerSession::~ServerSession() {
kernel.ReleaseServiceThread(service_thread);
@@ -42,16 +42,6 @@ ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kern
return MakeResult(std::move(session));
}
-bool ServerSession::ShouldWait(const Thread* thread) const {
- // Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
- if (!parent->Client()) {
- return false;
- }
-
- // Wait if we have no pending requests, or if we're currently handling a request.
- return pending_requesting_threads.empty() || currently_handling != nullptr;
-}
-
bool ServerSession::IsSignaled() const {
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
if (!parent->Client()) {
@@ -62,15 +52,6 @@ bool ServerSession::IsSignaled() const {
return !pending_requesting_threads.empty() && currently_handling == nullptr;
}
-void ServerSession::Acquire(Thread* thread) {
- ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
- // We are now handling a request, pop it from the stack.
- // TODO(Subv): What happens if the client endpoint is closed before any requests are made?
- ASSERT(!pending_requesting_threads.empty());
- currently_handling = pending_requesting_threads.back();
- pending_requesting_threads.pop_back();
-}
-
void ServerSession::ClientDisconnected() {
// We keep a shared pointer to the hle handler to keep it alive throughout
// the call to ClientDisconnected, as ClientDisconnected invalidates the
@@ -135,7 +116,7 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con
return RESULT_SUCCESS;
}
-ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<Thread> thread,
+ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<KThread> thread,
Core::Memory::Memory& memory) {
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
auto context =
@@ -172,15 +153,15 @@ ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) {
{
KScopedSchedulerLock lock(kernel);
if (!context.IsThreadWaiting()) {
- context.GetThread().ResumeFromWait();
- context.GetThread().SetSynchronizationResults(nullptr, result);
+ context.GetThread().Wakeup();
+ context.GetThread().SetSyncedObject(nullptr, result);
}
}
return result;
}
-ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread,
+ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<KThread> thread,
Core::Memory::Memory& memory,
Core::Timing::CoreTiming& core_timing) {
return QueueSyncRequest(std::move(thread), memory);
diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h
index e8d1d99ea..c42d5ee59 100644
--- a/src/core/hle/kernel/server_session.h
+++ b/src/core/hle/kernel/server_session.h
@@ -10,8 +10,8 @@
#include <vector>
#include "common/threadsafe_queue.h"
+#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/service_thread.h"
-#include "core/hle/kernel/synchronization_object.h"
#include "core/hle/result.h"
namespace Core::Memory {
@@ -29,7 +29,7 @@ class HLERequestContext;
class KernelCore;
class Session;
class SessionRequestHandler;
-class Thread;
+class KThread;
/**
* Kernel object representing the server endpoint of an IPC session. Sessions are the basic CTR-OS
@@ -43,7 +43,7 @@ class Thread;
* After the server replies to the request, the response is marshalled back to the caller's
* TLS buffer and control is transferred back to it.
*/
-class ServerSession final : public SynchronizationObject {
+class ServerSession final : public KSynchronizationObject {
friend class ServiceThread;
public:
@@ -77,8 +77,6 @@ public:
return parent.get();
}
- bool IsSignaled() const override;
-
/**
* Sets the HLE handler for the session. This handler will be called to service IPC requests
* instead of the regular IPC machinery. (The regular IPC machinery is currently not
@@ -97,13 +95,9 @@ public:
*
* @returns ResultCode from the operation.
*/
- ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory,
+ ResultCode HandleSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory,
Core::Timing::CoreTiming& core_timing);
- bool ShouldWait(const Thread* thread) const override;
-
- void Acquire(Thread* thread) override;
-
/// Called when a client disconnection occurs.
void ClientDisconnected();
@@ -130,9 +124,13 @@ public:
convert_to_domain = true;
}
+ bool IsSignaled() const override;
+
+ void Finalize() override {}
+
private:
/// Queues a sync request from the emulated application.
- ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory);
+ ResultCode QueueSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory);
/// Completes a sync request from the emulated application.
ResultCode CompleteSyncRequest(HLERequestContext& context);
@@ -153,12 +151,12 @@ private:
/// List of threads that are pending a response after a sync request. This list is processed in
/// a LIFO manner, thus, the last request will be dispatched first.
/// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test.
- std::vector<std::shared_ptr<Thread>> pending_requesting_threads;
+ std::vector<std::shared_ptr<KThread>> pending_requesting_threads;
/// Thread whose request is currently being handled. A request is considered "handled" when a
/// response is sent via svcReplyAndReceive.
/// TODO(Subv): Find a better name for this.
- std::shared_ptr<Thread> currently_handling;
+ std::shared_ptr<KThread> currently_handling;
/// When set to True, converts the session to a domain at the end of the command
bool convert_to_domain{};
diff --git a/src/core/hle/kernel/session.cpp b/src/core/hle/kernel/session.cpp
index e4dd53e24..8830d4e91 100644
--- a/src/core/hle/kernel/session.cpp
+++ b/src/core/hle/kernel/session.cpp
@@ -4,15 +4,23 @@
#include "common/assert.h"
#include "core/hle/kernel/client_session.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
namespace Kernel {
-Session::Session(KernelCore& kernel) : SynchronizationObject{kernel} {}
-Session::~Session() = default;
+Session::Session(KernelCore& kernel) : KSynchronizationObject{kernel} {}
+Session::~Session() {
+ // Release reserved resource when the Session pair was created.
+ kernel.GetSystemResourceLimit()->Release(LimitableResource::Sessions, 1);
+}
Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
+ // Reserve a new session from the resource limit.
+ KScopedResourceReservation session_reservation(kernel.GetSystemResourceLimit(),
+ LimitableResource::Sessions);
+ ASSERT(session_reservation.Succeeded());
auto session{std::make_shared<Session>(kernel)};
auto client_session{Kernel::ClientSession::Create(kernel, session, name + "_Client").Unwrap()};
auto server_session{Kernel::ServerSession::Create(kernel, session, name + "_Server").Unwrap()};
@@ -21,21 +29,13 @@ Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
session->client = client_session;
session->server = server_session;
+ session_reservation.Commit();
return std::make_pair(std::move(client_session), std::move(server_session));
}
-bool Session::ShouldWait(const Thread* thread) const {
- UNIMPLEMENTED();
- return {};
-}
-
bool Session::IsSignaled() const {
UNIMPLEMENTED();
return true;
}
-void Session::Acquire(Thread* thread) {
- UNIMPLEMENTED();
-}
-
} // namespace Kernel
diff --git a/src/core/hle/kernel/session.h b/src/core/hle/kernel/session.h
index 7cd9c0d77..fa3c5651a 100644
--- a/src/core/hle/kernel/session.h
+++ b/src/core/hle/kernel/session.h
@@ -8,7 +8,7 @@
#include <string>
#include <utility>
-#include "core/hle/kernel/synchronization_object.h"
+#include "core/hle/kernel/k_synchronization_object.h"
namespace Kernel {
@@ -19,7 +19,7 @@ class ServerSession;
* Parent structure to link the client and server endpoints of a session with their associated
* client port.
*/
-class Session final : public SynchronizationObject {
+class Session final : public KSynchronizationObject {
public:
explicit Session(KernelCore& kernel);
~Session() override;
@@ -37,11 +37,9 @@ public:
return HANDLE_TYPE;
}
- bool ShouldWait(const Thread* thread) const override;
-
bool IsSignaled() const override;
- void Acquire(Thread* thread) override;
+ void Finalize() override {}
std::shared_ptr<ClientSession> Client() {
if (auto result{client.lock()}) {
diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp
index 0cd467110..2eadd51d7 100644
--- a/src/core/hle/kernel/shared_memory.cpp
+++ b/src/core/hle/kernel/shared_memory.cpp
@@ -4,6 +4,7 @@
#include "common/assert.h"
#include "core/core.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory/page_table.h"
#include "core/hle/kernel/shared_memory.h"
@@ -13,7 +14,9 @@ namespace Kernel {
SharedMemory::SharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory)
: Object{kernel}, device_memory{device_memory} {}
-SharedMemory::~SharedMemory() = default;
+SharedMemory::~SharedMemory() {
+ kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size);
+}
std::shared_ptr<SharedMemory> SharedMemory::Create(
KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process,
@@ -21,6 +24,11 @@ std::shared_ptr<SharedMemory> SharedMemory::Create(
Memory::MemoryPermission user_permission, PAddr physical_address, std::size_t size,
std::string name) {
+ const auto resource_limit = kernel.GetSystemResourceLimit();
+ KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
+ size);
+ ASSERT(memory_reservation.Succeeded());
+
std::shared_ptr<SharedMemory> shared_memory{
std::make_shared<SharedMemory>(kernel, device_memory)};
@@ -32,6 +40,7 @@ std::shared_ptr<SharedMemory> SharedMemory::Create(
shared_memory->size = size;
shared_memory->name = name;
+ memory_reservation.Commit();
return shared_memory;
}
diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h
index 0ef87235c..623bd8b11 100644
--- a/src/core/hle/kernel/shared_memory.h
+++ b/src/core/hle/kernel/shared_memory.h
@@ -71,6 +71,8 @@ public:
return device_memory.GetPointer(physical_address + offset);
}
+ void Finalize() override {}
+
private:
Core::DeviceMemory& device_memory;
Process* owner_process{};
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index de3ed25da..31d899e06 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -10,39 +10,44 @@
#include "common/alignment.h"
#include "common/assert.h"
+#include "common/common_funcs.h"
#include "common/fiber.h"
#include "common/logging/log.h"
#include "common/microprofile.h"
+#include "common/scope_exit.h"
#include "common/string_util.h"
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/core_timing_util.h"
#include "core/cpu_manager.h"
-#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
-#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_address_arbiter.h"
+#include "core/hle/kernel/k_condition_variable.h"
+#include "core/hle/kernel/k_event.h"
+#include "core/hle/kernel/k_readable_event.h"
+#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/k_synchronization_object.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory/memory_block.h"
+#include "core/hle/kernel/memory/memory_layout.h"
#include "core/hle/kernel/memory/page_table.h"
-#include "core/hle/kernel/mutex.h"
#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/readable_event.h"
-#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/kernel/svc_wrap.h"
-#include "core/hle/kernel/synchronization.h"
-#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/time_manager.h"
#include "core/hle/kernel/transfer_memory.h"
-#include "core/hle/kernel/writable_event.h"
#include "core/hle/lock.h"
#include "core/hle/result.h"
#include "core/hle/service/service.h"
@@ -66,49 +71,49 @@ ResultCode MapUnmapMemorySanityChecks(const Memory::PageTable& manager, VAddr ds
VAddr src_addr, u64 size) {
if (!Common::Is4KBAligned(dst_addr)) {
LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (!Common::Is4KBAligned(src_addr)) {
LOG_ERROR(Kernel_SVC, "Source address is not aligned to 4KB, 0x{:016X}", src_addr);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (size == 0) {
LOG_ERROR(Kernel_SVC, "Size is 0");
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (!Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (!IsValidAddressRange(dst_addr, size)) {
LOG_ERROR(Kernel_SVC,
"Destination is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
dst_addr, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (!IsValidAddressRange(src_addr, size)) {
LOG_ERROR(Kernel_SVC, "Source is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
src_addr, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (!manager.IsInsideAddressSpace(src_addr, size)) {
LOG_ERROR(Kernel_SVC,
"Source is not within the address space, addr=0x{:016X}, size=0x{:016X}",
src_addr, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (manager.IsOutsideStackRegion(dst_addr, size)) {
LOG_ERROR(Kernel_SVC,
"Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}",
dst_addr, size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
if (manager.IsInsideHeapRegion(dst_addr, size)) {
@@ -116,7 +121,7 @@ ResultCode MapUnmapMemorySanityChecks(const Memory::PageTable& manager, VAddr ds
"Destination does not fit within the heap region, addr=0x{:016X}, "
"size=0x{:016X}",
dst_addr, size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
if (manager.IsInsideAliasRegion(dst_addr, size)) {
@@ -124,7 +129,7 @@ ResultCode MapUnmapMemorySanityChecks(const Memory::PageTable& manager, VAddr ds
"Destination does not fit within the map region, addr=0x{:016X}, "
"size=0x{:016X}",
dst_addr, size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
return RESULT_SUCCESS;
@@ -133,33 +138,40 @@ ResultCode MapUnmapMemorySanityChecks(const Memory::PageTable& manager, VAddr ds
enum class ResourceLimitValueType {
CurrentValue,
LimitValue,
+ PeakValue,
};
ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_limit,
u32 resource_type, ResourceLimitValueType value_type) {
std::lock_guard lock{HLE::g_hle_lock};
- const auto type = static_cast<ResourceType>(resource_type);
+ const auto type = static_cast<LimitableResource>(resource_type);
if (!IsValidResourceType(type)) {
LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type);
- return ERR_INVALID_ENUM_VALUE;
+ return ResultInvalidEnumValue;
}
const auto* const current_process = system.Kernel().CurrentProcess();
ASSERT(current_process != nullptr);
const auto resource_limit_object =
- current_process->GetHandleTable().Get<ResourceLimit>(resource_limit);
+ current_process->GetHandleTable().Get<KResourceLimit>(resource_limit);
if (!resource_limit_object) {
LOG_ERROR(Kernel_SVC, "Handle to non-existent resource limit instance used. Handle={:08X}",
resource_limit);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
- if (value_type == ResourceLimitValueType::CurrentValue) {
- return MakeResult(resource_limit_object->GetCurrentResourceValue(type));
+ switch (value_type) {
+ case ResourceLimitValueType::CurrentValue:
+ return MakeResult(resource_limit_object->GetCurrentValue(type));
+ case ResourceLimitValueType::LimitValue:
+ return MakeResult(resource_limit_object->GetLimitValue(type));
+ case ResourceLimitValueType::PeakValue:
+ return MakeResult(resource_limit_object->GetPeakValue(type));
+ default:
+ LOG_ERROR(Kernel_SVC, "Invalid resource value_type: '{}'", value_type);
+ return ResultInvalidEnumValue;
}
-
- return MakeResult(resource_limit_object->GetMaxResourceValue(type));
}
} // Anonymous namespace
@@ -172,12 +184,12 @@ static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_s
if ((heap_size % 0x200000) != 0) {
LOG_ERROR(Kernel_SVC, "The heap size is not a multiple of 2MB, heap_size=0x{:016X}",
heap_size);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (heap_size >= 0x200000000) {
LOG_ERROR(Kernel_SVC, "The heap size is not less than 8GB, heap_size=0x{:016X}", heap_size);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
@@ -203,19 +215,19 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si
if (!Common::Is4KBAligned(address)) {
LOG_ERROR(Kernel_SVC, "Address not page aligned (0x{:016X})", address);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (size == 0 || !Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Invalid size (0x{:X}). Size must be non-zero and page aligned.",
size);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (!IsValidAddressRange(address, size)) {
LOG_ERROR(Kernel_SVC, "Address range overflowed (Address: 0x{:016X}, Size: 0x{:016X})",
address, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
const auto attributes{static_cast<Memory::MemoryAttribute>(mask | attribute)};
@@ -224,7 +236,7 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si
LOG_ERROR(Kernel_SVC,
"Memory attribute doesn't match the given mask (Attribute: 0x{:X}, Mask: {:X}",
attribute, mask);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
@@ -288,7 +300,7 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
LOG_ERROR(Kernel_SVC,
"Port Name Address is not a valid virtual address, port_name_address=0x{:016X}",
port_name_address);
- return ERR_NOT_FOUND;
+ return ResultNotFound;
}
static constexpr std::size_t PortNameMaxLength = 11;
@@ -297,7 +309,7 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
if (port_name.size() > PortNameMaxLength) {
LOG_ERROR(Kernel_SVC, "Port name is too long, expected {} but got {}", PortNameMaxLength,
port_name.size());
- return ERR_OUT_OF_RANGE;
+ return ResultOutOfRange;
}
LOG_TRACE(Kernel_SVC, "called port_name={}", port_name);
@@ -306,11 +318,9 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
const auto it = kernel.FindNamedPort(port_name);
if (!kernel.IsValidNamedPort(it)) {
LOG_WARNING(Kernel_SVC, "tried to connect to unknown port: {}", port_name);
- return ERR_NOT_FOUND;
+ return ResultNotFound;
}
- ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Sessions, 1));
-
auto client_port = it->second;
std::shared_ptr<ClientSession> client_session;
@@ -335,7 +345,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle);
if (!session) {
LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
@@ -343,28 +353,13 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
auto thread = kernel.CurrentScheduler()->GetCurrentThread();
{
KScopedSchedulerLock lock(kernel);
- thread->InvalidateHLECallback();
- thread->SetStatus(ThreadStatus::WaitIPC);
+ thread->SetState(ThreadState::Waiting);
+ thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming());
}
- if (thread->HasHLECallback()) {
- Handle event_handle = thread->GetHLETimeEvent();
- if (event_handle != InvalidHandle) {
- auto& time_manager = kernel.TimeManager();
- time_manager.UnscheduleTimeEvent(event_handle);
- }
-
- {
- KScopedSchedulerLock lock(kernel);
- auto* sync_object = thread->GetHLESyncObject();
- sync_object->RemoveWaitingThread(SharedFrom(thread));
- }
-
- thread->InvokeHLECallback(SharedFrom(thread));
- }
-
- return thread->GetSignalingResult();
+ KSynchronizationObject* dummy{};
+ return thread->GetWaitResult(std::addressof(dummy));
}
static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
@@ -372,27 +367,29 @@ static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
}
/// Get the ID for the specified thread.
-static ResultCode GetThreadId(Core::System& system, u64* thread_id, Handle thread_handle) {
+static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
+ // Get the thread from its handle.
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
+ const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
if (!thread) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", thread_handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle);
+ return ResultInvalidHandle;
}
- *thread_id = thread->GetThreadID();
+ // Get the thread's id.
+ *out_thread_id = thread->GetThreadID();
return RESULT_SUCCESS;
}
-static ResultCode GetThreadId32(Core::System& system, u32* thread_id_low, u32* thread_id_high,
- Handle thread_handle) {
- u64 thread_id{};
- const ResultCode result{GetThreadId(system, &thread_id, thread_handle)};
+static ResultCode GetThreadId32(Core::System& system, u32* out_thread_id_low,
+ u32* out_thread_id_high, Handle thread_handle) {
+ u64 out_thread_id{};
+ const ResultCode result{GetThreadId(system, &out_thread_id, thread_handle)};
- *thread_id_low = static_cast<u32>(thread_id >> 32);
- *thread_id_high = static_cast<u32>(thread_id & std::numeric_limits<u32>::max());
+ *out_thread_id_low = static_cast<u32>(out_thread_id >> 32);
+ *out_thread_id_high = static_cast<u32>(out_thread_id & std::numeric_limits<u32>::max());
return result;
}
@@ -408,12 +405,12 @@ static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle han
return RESULT_SUCCESS;
}
- const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle);
+ const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle);
if (thread) {
const Process* const owner_process = thread->GetOwnerProcess();
if (!owner_process) {
LOG_ERROR(Kernel_SVC, "Non-existent owning process encountered.");
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
*process_id = owner_process->GetProcessID();
@@ -423,7 +420,7 @@ static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle han
// NOTE: This should also handle debug objects before returning.
LOG_ERROR(Kernel_SVC, "Handle does not exist, handle=0x{:08X}", handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
static ResultCode GetProcessId32(Core::System& system, u32* process_id_low, u32* process_id_high,
@@ -436,7 +433,7 @@ static ResultCode GetProcessId32(Core::System& system, u32* process_id_low, u32*
}
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
-static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr handles_address,
+static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
u64 handle_count, s64 nano_seconds) {
LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}",
handles_address, handle_count, nano_seconds);
@@ -446,7 +443,7 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr
LOG_ERROR(Kernel_SVC,
"Handle address is not a valid virtual address, handle_address=0x{:016X}",
handles_address);
- return ERR_INVALID_POINTER;
+ return ResultInvalidPointer;
}
static constexpr u64 MaxHandles = 0x40;
@@ -454,32 +451,30 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr
if (handle_count > MaxHandles) {
LOG_ERROR(Kernel_SVC, "Handle count specified is too large, expected {} but got {}",
MaxHandles, handle_count);
- return ERR_OUT_OF_RANGE;
+ return ResultOutOfRange;
}
auto& kernel = system.Kernel();
- Thread::ThreadSynchronizationObjects objects(handle_count);
+ std::vector<KSynchronizationObject*> objects(handle_count);
const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
for (u64 i = 0; i < handle_count; ++i) {
const Handle handle = memory.Read32(handles_address + i * sizeof(Handle));
- const auto object = handle_table.Get<SynchronizationObject>(handle);
+ const auto object = handle_table.Get<KSynchronizationObject>(handle);
if (object == nullptr) {
LOG_ERROR(Kernel_SVC, "Object is a nullptr");
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
- objects[i] = object;
+ objects[i] = object.get();
}
- auto& synchronization = kernel.Synchronization();
- const auto [result, handle_result] = synchronization.WaitFor(objects, nano_seconds);
- *index = handle_result;
- return result;
+ return KSynchronizationObject::Wait(kernel, index, objects.data(),
+ static_cast<s32>(objects.size()), nano_seconds);
}
static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
- s32 handle_count, u32 timeout_high, Handle* index) {
+ s32 handle_count, u32 timeout_high, s32* index) {
const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds);
}
@@ -488,15 +483,17 @@ static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u
static ResultCode CancelSynchronization(Core::System& system, Handle thread_handle) {
LOG_TRACE(Kernel_SVC, "called thread=0x{:X}", thread_handle);
+ // Get the thread from its handle.
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
+ std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
+
if (!thread) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
- thread_handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle);
+ return ResultInvalidHandle;
}
- thread->CancelWait();
+ // Cancel the thread's wait.
+ thread->WaitCancel();
return RESULT_SUCCESS;
}
@@ -504,56 +501,53 @@ static ResultCode CancelSynchronization32(Core::System& system, Handle thread_ha
return CancelSynchronization(system, thread_handle);
}
-/// Attempts to locks a mutex, creating it if it does not already exist
-static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_handle,
- VAddr mutex_addr, Handle requesting_thread_handle) {
- LOG_TRACE(Kernel_SVC,
- "called holding_thread_handle=0x{:08X}, mutex_addr=0x{:X}, "
- "requesting_current_thread_handle=0x{:08X}",
- holding_thread_handle, mutex_addr, requesting_thread_handle);
+/// Attempts to locks a mutex
+static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address,
+ u32 tag) {
+ LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
+ thread_handle, address, tag);
- if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
- LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
- mutex_addr);
- return ERR_INVALID_ADDRESS_STATE;
+ // Validate the input address.
+ if (Memory::IsKernelAddress(address)) {
+ LOG_ERROR(Kernel_SVC, "Attempting to arbitrate a lock on a kernel address (address={:08X})",
+ address);
+ return ResultInvalidCurrentMemory;
}
-
- if (!Common::IsWordAligned(mutex_addr)) {
- LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr);
- return ERR_INVALID_ADDRESS;
+ if (!Common::IsAligned(address, sizeof(u32))) {
+ LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address);
+ return ResultInvalidAddress;
}
- auto* const current_process = system.Kernel().CurrentProcess();
- return current_process->GetMutex().TryAcquire(mutex_addr, holding_thread_handle,
- requesting_thread_handle);
+ return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag);
}
-static ResultCode ArbitrateLock32(Core::System& system, Handle holding_thread_handle,
- u32 mutex_addr, Handle requesting_thread_handle) {
- return ArbitrateLock(system, holding_thread_handle, mutex_addr, requesting_thread_handle);
+static ResultCode ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address,
+ u32 tag) {
+ return ArbitrateLock(system, thread_handle, address, tag);
}
/// Unlock a mutex
-static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) {
- LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr);
+static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) {
+ LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
- if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
- LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
- mutex_addr);
- return ERR_INVALID_ADDRESS_STATE;
- }
+ // Validate the input address.
- if (!Common::IsWordAligned(mutex_addr)) {
- LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr);
- return ERR_INVALID_ADDRESS;
+ if (Memory::IsKernelAddress(address)) {
+ LOG_ERROR(Kernel_SVC,
+ "Attempting to arbitrate an unlock on a kernel address (address={:08X})",
+ address);
+ return ResultInvalidCurrentMemory;
+ }
+ if (!Common::IsAligned(address, sizeof(u32))) {
+ LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address);
+ return ResultInvalidAddress;
}
- auto* const current_process = system.Kernel().CurrentProcess();
- return current_process->GetMutex().Release(mutex_addr);
+ return system.Kernel().CurrentProcess()->SignalToAddress(address);
}
-static ResultCode ArbitrateUnlock32(Core::System& system, u32 mutex_addr) {
- return ArbitrateUnlock(system, mutex_addr);
+static ResultCode ArbitrateUnlock32(Core::System& system, u32 address) {
+ return ArbitrateUnlock(system, address);
}
enum class BreakType : u32 {
@@ -664,7 +658,7 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
handle_debug_buffer(info1, info2);
auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
- const auto thread_processor_id = current_thread->GetProcessorID();
+ const auto thread_processor_id = current_thread->GetActiveCore();
system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
}
}
@@ -748,7 +742,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
if (info_sub_id != 0) {
LOG_ERROR(Kernel_SVC, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
info_sub_id);
- return ERR_INVALID_ENUM_VALUE;
+ return ResultInvalidEnumValue;
}
const auto& current_process_handle_table =
@@ -757,7 +751,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
if (!process) {
LOG_ERROR(Kernel_SVC, "Process is not valid! info_id={}, info_sub_id={}, handle={:08X}",
info_id, info_sub_id, handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
switch (info_id_type) {
@@ -839,7 +833,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
}
LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
- return ERR_INVALID_ENUM_VALUE;
+ return ResultInvalidEnumValue;
}
case GetInfoType::IsCurrentProcessBeingDebugged:
@@ -849,13 +843,13 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
case GetInfoType::RegisterResourceLimit: {
if (handle != 0) {
LOG_ERROR(Kernel, "Handle is non zero! handle={:08X}", handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
if (info_sub_id != 0) {
LOG_ERROR(Kernel, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
info_sub_id);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
Process* const current_process = system.Kernel().CurrentProcess();
@@ -880,13 +874,13 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
if (handle != 0) {
LOG_ERROR(Kernel_SVC, "Process Handle is non zero, expected 0 result but got {:016X}",
handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
if (info_sub_id >= Process::RANDOM_ENTROPY_SIZE) {
LOG_ERROR(Kernel_SVC, "Entropy size is out of range, expected {} but got {}",
Process::RANDOM_ENTROPY_SIZE, info_sub_id);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
*result = system.Kernel().CurrentProcess()->GetRandomEntropy(info_sub_id);
@@ -903,15 +897,15 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id >= num_cpus) {
LOG_ERROR(Kernel_SVC, "Core count is out of range, expected {} but got {}", num_cpus,
info_sub_id);
- return ERR_INVALID_COMBINATION;
+ return ResultInvalidCombination;
}
- const auto thread = system.Kernel().CurrentProcess()->GetHandleTable().Get<Thread>(
+ const auto thread = system.Kernel().CurrentProcess()->GetHandleTable().Get<KThread>(
static_cast<Handle>(handle));
if (!thread) {
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}",
static_cast<Handle>(handle));
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
const auto& core_timing = system.CoreTiming();
@@ -922,7 +916,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks();
u64 out_ticks = 0;
if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
- const u64 thread_ticks = current_thread->GetTotalCPUTimeTicks();
+ const u64 thread_ticks = current_thread->GetCpuTime();
out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
} else if (same_thread && info_sub_id == system.CurrentCoreIndex()) {
@@ -935,7 +929,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
default:
LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
- return ERR_INVALID_ENUM_VALUE;
+ return ResultInvalidEnumValue;
}
}
@@ -958,22 +952,22 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size)
if (!Common::Is4KBAligned(addr)) {
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (!Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (size == 0) {
LOG_ERROR(Kernel_SVC, "Size is zero");
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (!(addr < addr + size)) {
LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
Process* const current_process{system.Kernel().CurrentProcess()};
@@ -981,21 +975,21 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size)
if (current_process->GetSystemResourceSize() == 0) {
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
- return ERR_INVALID_STATE;
+ return ResultInvalidState;
}
if (!page_table.IsInsideAddressSpace(addr, size)) {
LOG_ERROR(Kernel_SVC,
"Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
if (page_table.IsOutsideAliasRegion(addr, size)) {
LOG_ERROR(Kernel_SVC,
"Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
return page_table.MapPhysicalMemory(addr, size);
@@ -1012,22 +1006,22 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size
if (!Common::Is4KBAligned(addr)) {
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (!Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (size == 0) {
LOG_ERROR(Kernel_SVC, "Size is zero");
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (!(addr < addr + size)) {
LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
Process* const current_process{system.Kernel().CurrentProcess()};
@@ -1035,21 +1029,21 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size
if (current_process->GetSystemResourceSize() == 0) {
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
- return ERR_INVALID_STATE;
+ return ResultInvalidState;
}
if (!page_table.IsInsideAddressSpace(addr, size)) {
LOG_ERROR(Kernel_SVC,
"Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
if (page_table.IsOutsideAliasRegion(addr, size)) {
LOG_ERROR(Kernel_SVC,
"Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
return page_table.UnmapPhysicalMemory(addr, size);
@@ -1060,128 +1054,139 @@ static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size
}
/// Sets the thread activity
-static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) {
- LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity);
- if (activity > static_cast<u32>(ThreadActivity::Paused)) {
- return ERR_INVALID_ENUM_VALUE;
+static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle,
+ ThreadActivity thread_activity) {
+ LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", thread_handle,
+ thread_activity);
+
+ // Validate the activity.
+ constexpr auto IsValidThreadActivity = [](ThreadActivity activity) {
+ return activity == ThreadActivity::Runnable || activity == ThreadActivity::Paused;
+ };
+ if (!IsValidThreadActivity(thread_activity)) {
+ LOG_ERROR(Kernel_SVC, "Invalid thread activity value provided (activity={})",
+ thread_activity);
+ return ResultInvalidEnumValue;
}
- const auto* current_process = system.Kernel().CurrentProcess();
- const std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
+ // Get the thread from its handle.
+ auto& kernel = system.Kernel();
+ const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
+ const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
if (!thread) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle);
+ return ResultInvalidHandle;
}
- if (thread->GetOwnerProcess() != current_process) {
- LOG_ERROR(Kernel_SVC,
- "The current process does not own the current thread, thread_handle={:08X} "
- "thread_pid={}, "
- "current_process_pid={}",
- handle, thread->GetOwnerProcess()->GetProcessID(),
- current_process->GetProcessID());
- return ERR_INVALID_HANDLE;
+ // Check that the activity is being set on a non-current thread for the current process.
+ if (thread->GetOwnerProcess() != kernel.CurrentProcess()) {
+ LOG_ERROR(Kernel_SVC, "Invalid owning process for the created thread.");
+ return ResultInvalidHandle;
+ }
+ if (thread.get() == GetCurrentThreadPointer(kernel)) {
+ LOG_ERROR(Kernel_SVC, "Thread is busy");
+ return ResultBusy;
}
- if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) {
- LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
- return ERR_BUSY;
+ // Set the activity.
+ const auto set_result = thread->SetActivity(thread_activity);
+ if (set_result.IsError()) {
+ LOG_ERROR(Kernel_SVC, "Failed to set thread activity.");
+ return set_result;
}
- return thread->SetActivity(static_cast<ThreadActivity>(activity));
+ return RESULT_SUCCESS;
}
-static ResultCode SetThreadActivity32(Core::System& system, Handle handle, u32 activity) {
- return SetThreadActivity(system, handle, activity);
+static ResultCode SetThreadActivity32(Core::System& system, Handle thread_handle,
+ Svc::ThreadActivity thread_activity) {
+ return SetThreadActivity(system, thread_handle, thread_activity);
}
/// Gets the thread context
-static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, Handle handle) {
- LOG_DEBUG(Kernel_SVC, "called, context=0x{:08X}, thread=0x{:X}", thread_context, handle);
+static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle) {
+ LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context,
+ thread_handle);
+ // Get the thread from its handle.
const auto* current_process = system.Kernel().CurrentProcess();
- const std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
+ const std::shared_ptr<KThread> thread =
+ current_process->GetHandleTable().Get<KThread>(thread_handle);
if (!thread) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={})", thread_handle);
+ return ResultInvalidHandle;
}
+ // Require the handle be to a non-current thread in the current process.
if (thread->GetOwnerProcess() != current_process) {
- LOG_ERROR(Kernel_SVC,
- "The current process does not own the current thread, thread_handle={:08X} "
- "thread_pid={}, "
- "current_process_pid={}",
- handle, thread->GetOwnerProcess()->GetProcessID(),
- current_process->GetProcessID());
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Thread owning process is not the current process.");
+ return ResultInvalidHandle;
}
-
if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) {
- LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
- return ERR_BUSY;
+ LOG_ERROR(Kernel_SVC, "Current thread is busy.");
+ return ResultBusy;
}
- Core::ARM_Interface::ThreadContext64 ctx = thread->GetContext64();
- // Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
- ctx.pstate &= 0xFF0FFE20;
-
- // If 64-bit, we can just write the context registers directly and we're good.
- // However, if 32-bit, we have to ensure some registers are zeroed out.
- if (!current_process->Is64BitProcess()) {
- std::fill(ctx.cpu_registers.begin() + 15, ctx.cpu_registers.end(), 0);
- std::fill(ctx.vector_registers.begin() + 16, ctx.vector_registers.end(), u128{});
+ // Get the thread context.
+ std::vector<u8> context;
+ const auto context_result = thread->GetThreadContext3(context);
+ if (context_result.IsError()) {
+ LOG_ERROR(Kernel_SVC, "Unable to successfully retrieve thread context (result: {})",
+ context_result.raw);
+ return context_result;
}
- system.Memory().WriteBlock(thread_context, &ctx, sizeof(ctx));
+ // Copy the thread context to user space.
+ system.Memory().WriteBlock(out_context, context.data(), context.size());
+
return RESULT_SUCCESS;
}
-static ResultCode GetThreadContext32(Core::System& system, u32 thread_context, Handle handle) {
- return GetThreadContext(system, thread_context, handle);
+static ResultCode GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle) {
+ return GetThreadContext(system, out_context, thread_handle);
}
/// Gets the priority for the specified thread
-static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle handle) {
+static ResultCode GetThreadPriority(Core::System& system, u32* out_priority, Handle handle) {
LOG_TRACE(Kernel_SVC, "called");
+ // Get the thread from its handle.
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle);
+ const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle);
if (!thread) {
- *priority = 0;
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", handle);
+ return ResultInvalidHandle;
}
- *priority = thread->GetPriority();
+ // Get the thread's priority.
+ *out_priority = thread->GetPriority();
return RESULT_SUCCESS;
}
-static ResultCode GetThreadPriority32(Core::System& system, u32* priority, Handle handle) {
- return GetThreadPriority(system, priority, handle);
+static ResultCode GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle) {
+ return GetThreadPriority(system, out_priority, handle);
}
/// Sets the priority for the specified thread
static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 priority) {
LOG_TRACE(Kernel_SVC, "called");
- if (priority > THREADPRIO_LOWEST) {
- LOG_ERROR(
- Kernel_SVC,
- "An invalid priority was specified, expected {} but got {} for thread_handle={:08X}",
- THREADPRIO_LOWEST, priority, handle);
- return ERR_INVALID_THREAD_PRIORITY;
+ // Validate the priority.
+ if (HighestThreadPriority > priority || priority > LowestThreadPriority) {
+ LOG_ERROR(Kernel_SVC, "Invalid thread priority specified (priority={})", priority);
+ return ResultInvalidPriority;
}
- const auto* const current_process = system.Kernel().CurrentProcess();
-
- std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
+ // Get the thread from its handle.
+ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle);
if (!thread) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid handle provided (handle={:08X})", handle);
+ return ResultInvalidHandle;
}
- thread->SetPriority(priority);
-
+ // Set the thread priority.
+ thread->SetBasePriority(priority);
return RESULT_SUCCESS;
}
@@ -1208,23 +1213,23 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
if (!Common::Is4KBAligned(addr)) {
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, addr=0x{:016X}", addr);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (size == 0) {
LOG_ERROR(Kernel_SVC, "Size is 0");
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (!Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, size=0x{:016X}", size);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (!IsValidAddressRange(addr, size)) {
LOG_ERROR(Kernel_SVC, "Region is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
addr, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
const auto permission_type = static_cast<Memory::MemoryPermission>(permissions);
@@ -1232,7 +1237,7 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
Memory::MemoryPermission::ReadAndWrite) {
LOG_ERROR(Kernel_SVC, "Expected Read or ReadWrite permission but got permissions=0x{:08X}",
permissions);
- return ERR_INVALID_MEMORY_PERMISSIONS;
+ return ResultInvalidMemoryPermissions;
}
auto* const current_process{system.Kernel().CurrentProcess()};
@@ -1243,7 +1248,7 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
"Addr does not fit within the valid region, addr=0x{:016X}, "
"size=0x{:016X}",
addr, size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
if (page_table.IsInsideHeapRegion(addr, size)) {
@@ -1251,7 +1256,7 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
"Addr does not fit within the heap region, addr=0x{:016X}, "
"size=0x{:016X}",
addr, size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
if (page_table.IsInsideAliasRegion(addr, size)) {
@@ -1259,14 +1264,14 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
"Address does not fit within the map region, addr=0x{:016X}, "
"size=0x{:016X}",
addr, size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
auto shared_memory{current_process->GetHandleTable().Get<SharedMemory>(shared_memory_handle)};
if (!shared_memory) {
LOG_ERROR(Kernel_SVC, "Shared memory does not exist, shared_memory_handle=0x{:08X}",
shared_memory_handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
return shared_memory->Map(*current_process, addr, size, permission_type);
@@ -1287,7 +1292,7 @@ static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_add
if (!process) {
LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
process_handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
auto& memory{system.Memory()};
@@ -1334,18 +1339,18 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
if (!Common::Is4KBAligned(src_address)) {
LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
src_address);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (!Common::Is4KBAligned(dst_address)) {
LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
dst_address);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (size == 0 || !Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X})", size);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (!IsValidAddressRange(dst_address, size)) {
@@ -1353,7 +1358,7 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
"Destination address range overflows the address space (dst_address=0x{:016X}, "
"size=0x{:016X}).",
dst_address, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (!IsValidAddressRange(src_address, size)) {
@@ -1361,7 +1366,7 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
"Source address range overflows the address space (src_address=0x{:016X}, "
"size=0x{:016X}).",
src_address, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
@@ -1369,7 +1374,7 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
if (!process) {
LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
process_handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
auto& page_table = process->PageTable();
@@ -1378,7 +1383,7 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
"Source address range is not within the address space (src_address=0x{:016X}, "
"size=0x{:016X}).",
src_address, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (!page_table.IsInsideASLRRegion(dst_address, size)) {
@@ -1386,7 +1391,7 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
"Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
"size=0x{:016X}).",
dst_address, size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
return page_table.MapProcessCodeMemory(dst_address, src_address, size);
@@ -1402,18 +1407,18 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
if (!Common::Is4KBAligned(dst_address)) {
LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
dst_address);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (!Common::Is4KBAligned(src_address)) {
LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
src_address);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (size == 0 || Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
- return ERR_INVALID_SIZE;
+ return ResultInvalidSize;
}
if (!IsValidAddressRange(dst_address, size)) {
@@ -1421,7 +1426,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
"Destination address range overflows the address space (dst_address=0x{:016X}, "
"size=0x{:016X}).",
dst_address, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (!IsValidAddressRange(src_address, size)) {
@@ -1429,7 +1434,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
"Source address range overflows the address space (src_address=0x{:016X}, "
"size=0x{:016X}).",
src_address, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
@@ -1437,7 +1442,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
if (!process) {
LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
process_handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
auto& page_table = process->PageTable();
@@ -1446,7 +1451,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
"Source address range is not within the address space (src_address=0x{:016X}, "
"size=0x{:016X}).",
src_address, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
if (!page_table.IsInsideASLRRegion(dst_address, size)) {
@@ -1454,7 +1459,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
"Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
"size=0x{:016X}).",
dst_address, size);
- return ERR_INVALID_MEMORY_RANGE;
+ return ResultInvalidMemoryRange;
}
return page_table.UnmapProcessCodeMemory(dst_address, src_address, size);
@@ -1472,62 +1477,67 @@ static void ExitProcess(Core::System& system) {
current_process->PrepareForTermination();
// Kill the current thread
- system.Kernel().CurrentScheduler()->GetCurrentThread()->Stop();
+ system.Kernel().CurrentScheduler()->GetCurrentThread()->Exit();
}
static void ExitProcess32(Core::System& system) {
ExitProcess(system);
}
+static constexpr bool IsValidCoreId(int32_t core_id) {
+ return (0 <= core_id && core_id < static_cast<int32_t>(Core::Hardware::NUM_CPU_CORES));
+}
+
/// Creates a new thread
static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
- VAddr stack_top, u32 priority, s32 processor_id) {
+ VAddr stack_bottom, u32 priority, s32 core_id) {
LOG_DEBUG(Kernel_SVC,
- "called entrypoint=0x{:08X}, arg=0x{:08X}, stacktop=0x{:08X}, "
- "threadpriority=0x{:08X}, processorid=0x{:08X} : created handle=0x{:08X}",
- entry_point, arg, stack_top, priority, processor_id, *out_handle);
-
- auto* const current_process = system.Kernel().CurrentProcess();
+ "called entry_point=0x{:08X}, arg=0x{:08X}, stack_bottom=0x{:08X}, "
+ "priority=0x{:08X}, core_id=0x{:08X}",
+ entry_point, arg, stack_bottom, priority, core_id);
- if (processor_id == THREADPROCESSORID_IDEAL) {
- // Set the target CPU to the one specified by the process.
- processor_id = current_process->GetIdealCore();
- ASSERT(processor_id != THREADPROCESSORID_IDEAL);
+ // Adjust core id, if it's the default magic.
+ auto& kernel = system.Kernel();
+ auto& process = *kernel.CurrentProcess();
+ if (core_id == IdealCoreUseProcessValue) {
+ core_id = process.GetIdealCoreId();
}
- if (processor_id < THREADPROCESSORID_0 || processor_id > THREADPROCESSORID_3) {
- LOG_ERROR(Kernel_SVC, "Invalid thread processor ID: {}", processor_id);
- return ERR_INVALID_PROCESSOR_ID;
+ // Validate arguments.
+ if (!IsValidCoreId(core_id)) {
+ LOG_ERROR(Kernel_SVC, "Invalid Core ID specified (id={})", core_id);
+ return ResultInvalidCoreId;
}
-
- const u64 core_mask = current_process->GetCoreMask();
- if ((core_mask | (1ULL << processor_id)) != core_mask) {
- LOG_ERROR(Kernel_SVC, "Invalid thread core specified ({})", processor_id);
- return ERR_INVALID_PROCESSOR_ID;
+ if (((1ULL << core_id) & process.GetCoreMask()) == 0) {
+ LOG_ERROR(Kernel_SVC, "Core ID doesn't fall within allowable cores (id={})", core_id);
+ return ResultInvalidCoreId;
}
- if (priority > THREADPRIO_LOWEST) {
- LOG_ERROR(Kernel_SVC,
- "Invalid thread priority specified ({}). Must be within the range 0-64",
- priority);
- return ERR_INVALID_THREAD_PRIORITY;
+ if (HighestThreadPriority > priority || priority > LowestThreadPriority) {
+ LOG_ERROR(Kernel_SVC, "Invalid priority specified (priority={})", priority);
+ return ResultInvalidPriority;
}
-
- if (((1ULL << priority) & current_process->GetPriorityMask()) == 0) {
- LOG_ERROR(Kernel_SVC, "Invalid thread priority specified ({})", priority);
- return ERR_INVALID_THREAD_PRIORITY;
+ if (!process.CheckThreadPriority(priority)) {
+ LOG_ERROR(Kernel_SVC, "Invalid allowable thread priority (priority={})", priority);
+ return ResultInvalidPriority;
}
- auto& kernel = system.Kernel();
-
- ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Threads, 1));
+ KScopedResourceReservation thread_reservation(
+ kernel.CurrentProcess(), LimitableResource::Threads, 1,
+ system.CoreTiming().GetGlobalTimeNs().count() + 100000000);
+ if (!thread_reservation.Succeeded()) {
+ LOG_ERROR(Kernel_SVC, "Could not reserve a new thread");
+ return ResultResourceLimitedExceeded;
+ }
- ThreadType type = THREADTYPE_USER;
- CASCADE_RESULT(std::shared_ptr<Thread> thread,
- Thread::Create(system, type, "", entry_point, priority, arg, processor_id,
- stack_top, current_process));
+ std::shared_ptr<KThread> thread;
+ {
+ KScopedLightLock lk{process.GetStateLock()};
+ CASCADE_RESULT(thread, KThread::Create(system, ThreadType::User, "", entry_point, priority,
+ arg, core_id, stack_bottom, &process));
+ }
- const auto new_thread_handle = current_process->GetHandleTable().Create(thread);
+ const auto new_thread_handle = process.GetHandleTable().Create(thread);
if (new_thread_handle.Failed()) {
LOG_ERROR(Kernel_SVC, "Failed to create handle with error=0x{:X}",
new_thread_handle.Code().raw);
@@ -1538,6 +1548,7 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
// Set the thread name for debugging purposes.
thread->SetName(
fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle));
+ thread_reservation.Commit();
return RESULT_SUCCESS;
}
@@ -1551,17 +1562,24 @@ static ResultCode CreateThread32(Core::System& system, Handle* out_handle, u32 p
static ResultCode StartThread(Core::System& system, Handle thread_handle) {
LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
+ // Get the thread from its handle.
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
+ const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
if (!thread) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
- thread_handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle);
+ return ResultInvalidHandle;
}
- ASSERT(thread->GetStatus() == ThreadStatus::Dormant);
+ // Try to start the thread.
+ const auto run_result = thread->Run();
+ if (run_result.IsError()) {
+ LOG_ERROR(Kernel_SVC,
+ "Unable to successfuly start thread (thread handle={:08X}, result={})",
+ thread_handle, run_result.raw);
+ return run_result;
+ }
- return thread->Start();
+ return RESULT_SUCCESS;
}
static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
@@ -1574,7 +1592,7 @@ static void ExitThread(Core::System& system) {
auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
system.GlobalSchedulerContext().RemoveThread(SharedFrom(current_thread));
- current_thread->Stop();
+ current_thread->Exit();
}
static void ExitThread32(Core::System& system) {
@@ -1583,34 +1601,28 @@ static void ExitThread32(Core::System& system) {
/// Sleep the current thread
static void SleepThread(Core::System& system, s64 nanoseconds) {
- LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
+ auto& kernel = system.Kernel();
+ const auto yield_type = static_cast<Svc::YieldType>(nanoseconds);
- enum class SleepType : s64 {
- YieldWithoutCoreMigration = 0,
- YieldWithCoreMigration = -1,
- YieldAndWaitForLoadBalancing = -2,
- };
+ LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
- auto& scheduler = *system.Kernel().CurrentScheduler();
- if (nanoseconds <= 0) {
- switch (static_cast<SleepType>(nanoseconds)) {
- case SleepType::YieldWithoutCoreMigration: {
- scheduler.YieldWithoutCoreMigration();
- break;
- }
- case SleepType::YieldWithCoreMigration: {
- scheduler.YieldWithCoreMigration();
- break;
- }
- case SleepType::YieldAndWaitForLoadBalancing: {
- scheduler.YieldToAnyThread();
- break;
- }
- default:
- UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
- }
+ // When the input tick is positive, sleep.
+ if (nanoseconds > 0) {
+ // Convert the timeout from nanoseconds to ticks.
+ // NOTE: Nintendo does not use this conversion logic in WaitSynchronization...
+
+ // Sleep.
+ // NOTE: Nintendo does not check the result of this sleep.
+ static_cast<void>(GetCurrentThread(kernel).Sleep(nanoseconds));
+ } else if (yield_type == Svc::YieldType::WithoutCoreMigration) {
+ KScheduler::YieldWithoutCoreMigration(kernel);
+ } else if (yield_type == Svc::YieldType::WithCoreMigration) {
+ KScheduler::YieldWithCoreMigration(kernel);
+ } else if (yield_type == Svc::YieldType::ToAnyThread) {
+ KScheduler::YieldToAnyThread(kernel);
} else {
- scheduler.GetCurrentThread()->Sleep(nanoseconds);
+ // Nintendo does nothing at all if an otherwise invalid value is passed.
+ UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
}
}
@@ -1620,224 +1632,159 @@ static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanosec
}
/// Wait process wide key atomic
-static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_addr,
- VAddr condition_variable_addr, Handle thread_handle,
- s64 nano_seconds) {
- LOG_TRACE(
- Kernel_SVC,
- "called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}",
- mutex_addr, condition_variable_addr, thread_handle, nano_seconds);
-
- if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
- LOG_ERROR(
- Kernel_SVC,
- "Given mutex address must not be within the kernel address space. address=0x{:016X}",
- mutex_addr);
- return ERR_INVALID_ADDRESS_STATE;
- }
-
- if (!Common::IsWordAligned(mutex_addr)) {
- LOG_ERROR(Kernel_SVC, "Given mutex address must be word-aligned. address=0x{:016X}",
- mutex_addr);
- return ERR_INVALID_ADDRESS;
- }
-
- ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
- auto& kernel = system.Kernel();
- Handle event_handle;
- Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
- auto* const current_process = kernel.CurrentProcess();
- {
- KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds);
- const auto& handle_table = current_process->GetHandleTable();
- std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
- ASSERT(thread);
-
- current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
-
- if (thread->IsPendingTermination()) {
- lock.CancelSleep();
- return ERR_THREAD_TERMINATING;
- }
-
- const auto release_result = current_process->GetMutex().Release(mutex_addr);
- if (release_result.IsError()) {
- lock.CancelSleep();
- return release_result;
- }
-
- if (nano_seconds == 0) {
- lock.CancelSleep();
- return RESULT_TIMEOUT;
- }
-
- current_thread->SetCondVarWaitAddress(condition_variable_addr);
- current_thread->SetMutexWaitAddress(mutex_addr);
- current_thread->SetWaitHandle(thread_handle);
- current_thread->SetStatus(ThreadStatus::WaitCondVar);
- current_process->InsertConditionVariableThread(SharedFrom(current_thread));
- }
-
- if (event_handle != InvalidHandle) {
- auto& time_manager = kernel.TimeManager();
- time_manager.UnscheduleTimeEvent(event_handle);
- }
-
- {
- KScopedSchedulerLock lock(kernel);
-
- auto* owner = current_thread->GetLockOwner();
- if (owner != nullptr) {
- owner->RemoveMutexWaiter(SharedFrom(current_thread));
+static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key,
+ u32 tag, s64 timeout_ns) {
+ LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
+ cv_key, tag, timeout_ns);
+
+ // Validate input.
+ if (Memory::IsKernelAddress(address)) {
+ LOG_ERROR(Kernel_SVC, "Attempted to wait on kernel address (address={:08X})", address);
+ return ResultInvalidCurrentMemory;
+ }
+ if (!Common::IsAligned(address, sizeof(s32))) {
+ LOG_ERROR(Kernel_SVC, "Address must be 4 byte aligned (address={:08X})", address);
+ return ResultInvalidAddress;
+ }
+
+ // Convert timeout from nanoseconds to ticks.
+ s64 timeout{};
+ if (timeout_ns > 0) {
+ const s64 offset_tick(timeout_ns);
+ if (offset_tick > 0) {
+ timeout = offset_tick + 2;
+ if (timeout <= 0) {
+ timeout = std::numeric_limits<s64>::max();
+ }
+ } else {
+ timeout = std::numeric_limits<s64>::max();
}
-
- current_process->RemoveConditionVariableThread(SharedFrom(current_thread));
+ } else {
+ timeout = timeout_ns;
}
- // Note: Deliberately don't attempt to inherit the lock owner's priority.
- return current_thread->GetSignalingResult();
+ // Wait on the condition variable.
+ return system.Kernel().CurrentProcess()->WaitConditionVariable(
+ address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout);
}
-static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 mutex_addr,
- u32 condition_variable_addr, Handle thread_handle,
- u32 nanoseconds_low, u32 nanoseconds_high) {
- const auto nanoseconds = static_cast<s64>(nanoseconds_low | (u64{nanoseconds_high} << 32));
- return WaitProcessWideKeyAtomic(system, mutex_addr, condition_variable_addr, thread_handle,
- nanoseconds);
+static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
+ u32 timeout_ns_low, u32 timeout_ns_high) {
+ const auto timeout_ns = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
+ return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns);
}
/// Signal process wide key
-static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_addr, s32 target) {
- LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}",
- condition_variable_addr, target);
+static void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count) {
+ LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count);
- ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
+ // Signal the condition variable.
+ return system.Kernel().CurrentProcess()->SignalConditionVariable(
+ Common::AlignDown(cv_key, sizeof(u32)), count);
+}
- // Retrieve a list of all threads that are waiting for this condition variable.
- auto& kernel = system.Kernel();
- KScopedSchedulerLock lock(kernel);
- auto* const current_process = kernel.CurrentProcess();
- std::vector<std::shared_ptr<Thread>> waiting_threads =
- current_process->GetConditionVariableThreads(condition_variable_addr);
-
- // Only process up to 'target' threads, unless 'target' is less equal 0, in which case process
- // them all.
- std::size_t last = waiting_threads.size();
- if (target > 0) {
- last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
- }
- for (std::size_t index = 0; index < last; ++index) {
- auto& thread = waiting_threads[index];
-
- ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
-
- // liberate Cond Var Thread.
- current_process->RemoveConditionVariableThread(thread);
-
- const std::size_t current_core = system.CurrentCoreIndex();
- auto& monitor = system.Monitor();
-
- // Atomically read the value of the mutex.
- u32 mutex_val = 0;
- u32 update_val = 0;
- const VAddr mutex_address = thread->GetMutexWaitAddress();
- do {
- // If the mutex is not yet acquired, acquire it.
- mutex_val = monitor.ExclusiveRead32(current_core, mutex_address);
-
- if (mutex_val != 0) {
- update_val = mutex_val | Mutex::MutexHasWaitersFlag;
- } else {
- update_val = thread->GetWaitHandle();
- }
- } while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val));
- monitor.ClearExclusive();
- if (mutex_val == 0) {
- // We were able to acquire the mutex, resume this thread.
- auto* const lock_owner = thread->GetLockOwner();
- if (lock_owner != nullptr) {
- lock_owner->RemoveMutexWaiter(thread);
- }
+static void SignalProcessWideKey32(Core::System& system, u32 cv_key, s32 count) {
+ SignalProcessWideKey(system, cv_key, count);
+}
- thread->SetLockOwner(nullptr);
- thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
- thread->ResumeFromWait();
- } else {
- // The mutex is already owned by some other thread, make this thread wait on it.
- const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- auto owner = handle_table.Get<Thread>(owner_handle);
- ASSERT(owner);
- if (thread->GetStatus() == ThreadStatus::WaitCondVar) {
- thread->SetStatus(ThreadStatus::WaitMutex);
- }
+namespace {
- owner->AddMutexWaiter(thread);
- }
+constexpr bool IsValidSignalType(Svc::SignalType type) {
+ switch (type) {
+ case Svc::SignalType::Signal:
+ case Svc::SignalType::SignalAndIncrementIfEqual:
+ case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
+ return true;
+ default:
+ return false;
}
}
-static void SignalProcessWideKey32(Core::System& system, u32 condition_variable_addr, s32 target) {
- SignalProcessWideKey(system, condition_variable_addr, target);
+constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
+ switch (type) {
+ case Svc::ArbitrationType::WaitIfLessThan:
+ case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
+ case Svc::ArbitrationType::WaitIfEqual:
+ return true;
+ default:
+ return false;
+ }
}
-// Wait for an address (via Address Arbiter)
-static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, s32 value,
- s64 timeout) {
- LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, timeout={}", address,
- type, value, timeout);
-
- // If the passed address is a kernel virtual address, return invalid memory state.
- if (Core::Memory::IsKernelVirtualAddress(address)) {
- LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
- return ERR_INVALID_ADDRESS_STATE;
- }
+} // namespace
- // If the address is not properly aligned to 4 bytes, return invalid address.
- if (!Common::IsWordAligned(address)) {
- LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address);
- return ERR_INVALID_ADDRESS;
+// Wait for an address (via Address Arbiter)
+static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
+ s32 value, s64 timeout_ns) {
+ LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
+ address, arb_type, value, timeout_ns);
+
+ // Validate input.
+ if (Memory::IsKernelAddress(address)) {
+ LOG_ERROR(Kernel_SVC, "Attempting to wait on kernel address (address={:08X})", address);
+ return ResultInvalidCurrentMemory;
+ }
+ if (!Common::IsAligned(address, sizeof(s32))) {
+ LOG_ERROR(Kernel_SVC, "Wait address must be 4 byte aligned (address={:08X})", address);
+ return ResultInvalidAddress;
+ }
+ if (!IsValidArbitrationType(arb_type)) {
+ LOG_ERROR(Kernel_SVC, "Invalid arbitration type specified (type={})", arb_type);
+ return ResultInvalidEnumValue;
+ }
+
+ // Convert timeout from nanoseconds to ticks.
+ s64 timeout{};
+ if (timeout_ns > 0) {
+ const s64 offset_tick(timeout_ns);
+ if (offset_tick > 0) {
+ timeout = offset_tick + 2;
+ if (timeout <= 0) {
+ timeout = std::numeric_limits<s64>::max();
+ }
+ } else {
+ timeout = std::numeric_limits<s64>::max();
+ }
+ } else {
+ timeout = timeout_ns;
}
- const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type);
- auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
- const ResultCode result =
- address_arbiter.WaitForAddress(address, arbitration_type, value, timeout);
- return result;
+ return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout);
}
-static ResultCode WaitForAddress32(Core::System& system, u32 address, u32 type, s32 value,
- u32 timeout_low, u32 timeout_high) {
- const auto timeout = static_cast<s64>(timeout_low | (u64{timeout_high} << 32));
- return WaitForAddress(system, address, type, value, timeout);
+static ResultCode WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
+ s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
+ const auto timeout = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
+ return WaitForAddress(system, address, arb_type, value, timeout);
}
// Signals to an address (via Address Arbiter)
-static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value,
- s32 num_to_wake) {
- LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, num_to_wake=0x{:X}",
- address, type, value, num_to_wake);
+static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
+ s32 value, s32 count) {
+ LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
+ address, signal_type, value, count);
- // If the passed address is a kernel virtual address, return invalid memory state.
- if (Core::Memory::IsKernelVirtualAddress(address)) {
- LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
- return ERR_INVALID_ADDRESS_STATE;
+ // Validate input.
+ if (Memory::IsKernelAddress(address)) {
+ LOG_ERROR(Kernel_SVC, "Attempting to signal to a kernel address (address={:08X})", address);
+ return ResultInvalidCurrentMemory;
}
-
- // If the address is not properly aligned to 4 bytes, return invalid address.
- if (!Common::IsWordAligned(address)) {
- LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address);
- return ERR_INVALID_ADDRESS;
+ if (!Common::IsAligned(address, sizeof(s32))) {
+ LOG_ERROR(Kernel_SVC, "Signaled address must be 4 byte aligned (address={:08X})", address);
+ return ResultInvalidAddress;
+ }
+ if (!IsValidSignalType(signal_type)) {
+ LOG_ERROR(Kernel_SVC, "Invalid signal type specified (type={})", signal_type);
+ return ResultInvalidEnumValue;
}
- const auto signal_type = static_cast<AddressArbiter::SignalType>(type);
- auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
- return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake);
+ return system.Kernel().CurrentProcess()->SignalAddressArbiter(address, signal_type, value,
+ count);
}
-static ResultCode SignalToAddress32(Core::System& system, u32 address, u32 type, s32 value,
- s32 num_to_wake) {
- return SignalToAddress(system, address, type, value, num_to_wake);
+static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
+ s32 value, s32 count) {
+ return SignalToAddress(system, address, signal_type, value, count);
}
static void KernelDebug([[maybe_unused]] Core::System& system,
@@ -1889,20 +1836,28 @@ static ResultCode CloseHandle32(Core::System& system, Handle handle) {
static ResultCode ResetSignal(Core::System& system, Handle handle) {
LOG_DEBUG(Kernel_SVC, "called handle 0x{:08X}", handle);
+ // Get the current handle table.
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- auto event = handle_table.Get<ReadableEvent>(handle);
- if (event) {
- return event->Reset();
+ // Try to reset as readable event.
+ {
+ auto readable_event = handle_table.Get<KReadableEvent>(handle);
+ if (readable_event) {
+ return readable_event->Reset();
+ }
}
- auto process = handle_table.Get<Process>(handle);
- if (process) {
- return process->ClearSignalState();
+ // Try to reset as process.
+ {
+ auto process = handle_table.Get<Process>(handle);
+ if (process) {
+ return process->Reset();
+ }
}
- LOG_ERROR(Kernel_SVC, "Invalid handle (0x{:08X})", handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "invalid handle (0x{:08X})", handle);
+
+ return ResultInvalidHandle;
}
static ResultCode ResetSignal32(Core::System& system, Handle handle) {
@@ -1918,18 +1873,18 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
if (!Common::Is4KBAligned(addr)) {
LOG_ERROR(Kernel_SVC, "Address ({:016X}) is not page aligned!", addr);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (!Common::Is4KBAligned(size) || size == 0) {
LOG_ERROR(Kernel_SVC, "Size ({:016X}) is not page aligned or equal to zero!", size);
- return ERR_INVALID_ADDRESS;
+ return ResultInvalidAddress;
}
if (!IsValidAddressRange(addr, size)) {
LOG_ERROR(Kernel_SVC, "Address and size cause overflow! (address={:016X}, size={:016X})",
addr, size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
const auto perms{static_cast<Memory::MemoryPermission>(permissions)};
@@ -1937,10 +1892,17 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
perms == Memory::MemoryPermission::Write) {
LOG_ERROR(Kernel_SVC, "Invalid memory permissions for transfer memory! (perms={:08X})",
permissions);
- return ERR_INVALID_MEMORY_PERMISSIONS;
+ return ResultInvalidMemoryPermissions;
}
auto& kernel = system.Kernel();
+ // Reserve a new transfer memory from the process resource limit.
+ KScopedResourceReservation trmem_reservation(kernel.CurrentProcess(),
+ LimitableResource::TransferMemory);
+ if (!trmem_reservation.Succeeded()) {
+ LOG_ERROR(Kernel_SVC, "Could not reserve a new transfer memory");
+ return ResultResourceLimitedExceeded;
+ }
auto transfer_mem_handle = TransferMemory::Create(kernel, system.Memory(), addr, size, perms);
if (const auto reserve_result{transfer_mem_handle->Reserve()}; reserve_result.IsError()) {
@@ -1952,6 +1914,7 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
if (result.Failed()) {
return result.Code();
}
+ trmem_reservation.Commit();
*handle = *result;
return RESULT_SUCCESS;
@@ -1962,171 +1925,204 @@ static ResultCode CreateTransferMemory32(Core::System& system, Handle* handle, u
return CreateTransferMemory(system, handle, addr, size, permissions);
}
-static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core,
- u64* mask) {
+static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
+ u64* out_affinity_mask) {
LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
+ // Get the thread from its handle.
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
+ const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
if (!thread) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
- thread_handle);
- *core = 0;
- *mask = 0;
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid thread handle specified (handle={:08X})", thread_handle);
+ return ResultInvalidHandle;
}
- *core = thread->GetIdealCore();
- *mask = thread->GetAffinityMask().GetAffinityMask();
+ // Get the core mask.
+ const auto result = thread->GetCoreMask(out_core_id, out_affinity_mask);
+ if (result.IsError()) {
+ LOG_ERROR(Kernel_SVC, "Unable to successfully retrieve core mask (result={})", result.raw);
+ return result;
+ }
return RESULT_SUCCESS;
}
-static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle, u32* core,
- u32* mask_low, u32* mask_high) {
- u64 mask{};
- const auto result = GetThreadCoreMask(system, thread_handle, core, &mask);
- *mask_high = static_cast<u32>(mask >> 32);
- *mask_low = static_cast<u32>(mask);
+static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
+ u32* out_affinity_mask_low, u32* out_affinity_mask_high) {
+ u64 out_affinity_mask{};
+ const auto result = GetThreadCoreMask(system, thread_handle, out_core_id, &out_affinity_mask);
+ *out_affinity_mask_high = static_cast<u32>(out_affinity_mask >> 32);
+ *out_affinity_mask_low = static_cast<u32>(out_affinity_mask);
return result;
}
-static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, u32 core,
+static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
u64 affinity_mask) {
- LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core=0x{:X}, affinity_mask=0x{:016X}",
- thread_handle, core, affinity_mask);
-
- const auto* const current_process = system.Kernel().CurrentProcess();
+ LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core_id=0x{:X}, affinity_mask=0x{:016X}",
+ thread_handle, core_id, affinity_mask);
- if (core == static_cast<u32>(THREADPROCESSORID_IDEAL)) {
- const u8 ideal_cpu_core = current_process->GetIdealCore();
+ const auto& current_process = *system.Kernel().CurrentProcess();
- ASSERT(ideal_cpu_core != static_cast<u8>(THREADPROCESSORID_IDEAL));
-
- // Set the target CPU to the ideal core specified by the process.
- core = ideal_cpu_core;
- affinity_mask = 1ULL << core;
+ // Determine the core id/affinity mask.
+ if (core_id == Svc::IdealCoreUseProcessValue) {
+ core_id = current_process.GetIdealCoreId();
+ affinity_mask = (1ULL << core_id);
} else {
- const u64 core_mask = current_process->GetCoreMask();
-
- if ((core_mask | affinity_mask) != core_mask) {
- LOG_ERROR(
- Kernel_SVC,
- "Invalid processor ID specified (core_mask=0x{:08X}, affinity_mask=0x{:016X})",
- core_mask, affinity_mask);
- return ERR_INVALID_PROCESSOR_ID;
+ // Validate the affinity mask.
+ const u64 process_core_mask = current_process.GetCoreMask();
+ if ((affinity_mask | process_core_mask) != process_core_mask) {
+ LOG_ERROR(Kernel_SVC,
+ "Affinity mask does match the process core mask (affinity mask={:016X}, core "
+ "mask={:016X})",
+ affinity_mask, process_core_mask);
+ return ResultInvalidCoreId;
}
-
if (affinity_mask == 0) {
- LOG_ERROR(Kernel_SVC, "Specfified affinity mask is zero.");
- return ERR_INVALID_COMBINATION;
+ LOG_ERROR(Kernel_SVC, "Affinity mask is zero.");
+ return ResultInvalidCombination;
}
- if (core < Core::Hardware::NUM_CPU_CORES) {
- if ((affinity_mask & (1ULL << core)) == 0) {
- LOG_ERROR(Kernel_SVC,
- "Core is not enabled for the current mask, core={}, mask={:016X}", core,
- affinity_mask);
- return ERR_INVALID_COMBINATION;
+ // Validate the core id.
+ if (IsValidCoreId(core_id)) {
+ if (((1ULL << core_id) & affinity_mask) == 0) {
+ LOG_ERROR(Kernel_SVC, "Invalid core ID (ID={})", core_id);
+ return ResultInvalidCombination;
+ }
+ } else {
+ if (core_id != IdealCoreNoUpdate && core_id != IdealCoreDontCare) {
+ LOG_ERROR(Kernel_SVC, "Invalid core ID (ID={})", core_id);
+ return ResultInvalidCoreId;
}
- } else if (core != static_cast<u32>(THREADPROCESSORID_DONT_CARE) &&
- core != static_cast<u32>(THREADPROCESSORID_DONT_UPDATE)) {
- LOG_ERROR(Kernel_SVC, "Invalid processor ID specified (core={}).", core);
- return ERR_INVALID_PROCESSOR_ID;
}
}
- const auto& handle_table = current_process->GetHandleTable();
- const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
+ // Get the thread from its handle.
+ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
if (!thread) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
- thread_handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Invalid thread handle (handle={:08X})", thread_handle);
+ return ResultInvalidHandle;
}
- return thread->SetCoreAndAffinityMask(core, affinity_mask);
+ // Set the core mask.
+ const auto set_result = thread->SetCoreMask(core_id, affinity_mask);
+ if (set_result.IsError()) {
+ LOG_ERROR(Kernel_SVC, "Unable to successfully set core mask (result={})", set_result.raw);
+ return set_result;
+ }
+ return RESULT_SUCCESS;
}
-static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core,
+static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
u32 affinity_mask_low, u32 affinity_mask_high) {
const auto affinity_mask = u64{affinity_mask_low} | (u64{affinity_mask_high} << 32);
- return SetThreadCoreMask(system, thread_handle, core, affinity_mask);
+ return SetThreadCoreMask(system, thread_handle, core_id, affinity_mask);
}
-static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) {
- LOG_DEBUG(Kernel_SVC, "called");
+static ResultCode SignalEvent(Core::System& system, Handle event_handle) {
+ LOG_DEBUG(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
auto& kernel = system.Kernel();
- const auto [readable_event, writable_event] =
- WritableEvent::CreateEventPair(kernel, "CreateEvent");
+ // Get the current handle table.
+ const HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable();
- HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable();
-
- const auto write_create_result = handle_table.Create(writable_event);
- if (write_create_result.Failed()) {
- return write_create_result.Code();
+ // Reserve a new event from the process resource limit.
+ KScopedResourceReservation event_reservation(kernel.CurrentProcess(),
+ LimitableResource::Events);
+ if (!event_reservation.Succeeded()) {
+ LOG_ERROR(Kernel, "Could not reserve a new event");
+ return ResultResourceLimitedExceeded;
}
- *write_handle = *write_create_result;
- const auto read_create_result = handle_table.Create(readable_event);
- if (read_create_result.Failed()) {
- handle_table.Close(*write_create_result);
- return read_create_result.Code();
+ // Get the writable event.
+ auto writable_event = handle_table.Get<KWritableEvent>(event_handle);
+ if (!writable_event) {
+ LOG_ERROR(Kernel_SVC, "Invalid event handle provided (handle={:08X})", event_handle);
+ return ResultInvalidHandle;
}
- *read_handle = *read_create_result;
- LOG_DEBUG(Kernel_SVC,
- "successful. Writable event handle=0x{:08X}, Readable event handle=0x{:08X}",
- *write_create_result, *read_create_result);
- return RESULT_SUCCESS;
+ // Commit the successfuly reservation.
+ event_reservation.Commit();
+
+ return writable_event->Signal();
}
-static ResultCode CreateEvent32(Core::System& system, Handle* write_handle, Handle* read_handle) {
- return CreateEvent(system, write_handle, read_handle);
+static ResultCode SignalEvent32(Core::System& system, Handle event_handle) {
+ return SignalEvent(system, event_handle);
}
-static ResultCode ClearEvent(Core::System& system, Handle handle) {
- LOG_TRACE(Kernel_SVC, "called, event=0x{:08X}", handle);
+static ResultCode ClearEvent(Core::System& system, Handle event_handle) {
+ LOG_TRACE(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
+ // Get the current handle table.
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- auto writable_event = handle_table.Get<WritableEvent>(handle);
- if (writable_event) {
- writable_event->Clear();
- return RESULT_SUCCESS;
+ // Try to clear the writable event.
+ {
+ auto writable_event = handle_table.Get<KWritableEvent>(event_handle);
+ if (writable_event) {
+ return writable_event->Clear();
+ }
}
- auto readable_event = handle_table.Get<ReadableEvent>(handle);
- if (readable_event) {
- readable_event->Clear();
- return RESULT_SUCCESS;
+ // Try to clear the readable event.
+ {
+ auto readable_event = handle_table.Get<KReadableEvent>(event_handle);
+ if (readable_event) {
+ return readable_event->Clear();
+ }
}
- LOG_ERROR(Kernel_SVC, "Event handle does not exist, handle=0x{:08X}", handle);
- return ERR_INVALID_HANDLE;
+ LOG_ERROR(Kernel_SVC, "Event handle does not exist, event_handle=0x{:08X}", event_handle);
+
+ return ResultInvalidHandle;
}
-static ResultCode ClearEvent32(Core::System& system, Handle handle) {
- return ClearEvent(system, handle);
+static ResultCode ClearEvent32(Core::System& system, Handle event_handle) {
+ return ClearEvent(system, event_handle);
}
-static ResultCode SignalEvent(Core::System& system, Handle handle) {
- LOG_DEBUG(Kernel_SVC, "called. Handle=0x{:08X}", handle);
+static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
+ LOG_DEBUG(Kernel_SVC, "called");
- HandleTable& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- auto writable_event = handle_table.Get<WritableEvent>(handle);
+ // Get the kernel reference and handle table.
+ auto& kernel = system.Kernel();
+ HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable();
- if (!writable_event) {
- LOG_ERROR(Kernel_SVC, "Non-existent writable event handle used (0x{:08X})", handle);
- return ERR_INVALID_HANDLE;
+ // Create a new event.
+ const auto event = KEvent::Create(kernel, "CreateEvent");
+ if (!event) {
+ LOG_ERROR(Kernel_SVC, "Unable to create new events. Event creation limit reached.");
+ return ResultOutOfResource;
}
- writable_event->Signal();
+ // Initialize the event.
+ event->Initialize();
+
+ // Add the writable event to the handle table.
+ const auto write_create_result = handle_table.Create(event->GetWritableEvent());
+ if (write_create_result.Failed()) {
+ return write_create_result.Code();
+ }
+ *out_write = *write_create_result;
+
+ // Add the writable event to the handle table.
+ auto handle_guard = SCOPE_GUARD({ handle_table.Close(*write_create_result); });
+
+ // Add the readable event to the handle table.
+ const auto read_create_result = handle_table.Create(event->GetReadableEvent());
+ if (read_create_result.Failed()) {
+ return read_create_result.Code();
+ }
+ *out_read = *read_create_result;
+
+ // We succeeded.
+ handle_guard.Cancel();
return RESULT_SUCCESS;
}
-static ResultCode SignalEvent32(Core::System& system, Handle handle) {
- return SignalEvent(system, handle);
+static ResultCode CreateEvent32(Core::System& system, Handle* out_write, Handle* out_read) {
+ return CreateEvent(system, out_write, out_read);
}
static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
@@ -2142,13 +2138,13 @@ static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_
if (!process) {
LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
process_handle);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
const auto info_type = static_cast<InfoType>(type);
if (info_type != InfoType::Status) {
LOG_ERROR(Kernel_SVC, "Expected info_type to be Status but got {} instead", type);
- return ERR_INVALID_ENUM_VALUE;
+ return ResultInvalidEnumValue;
}
*out = static_cast<u64>(process->GetStatus());
@@ -2160,7 +2156,7 @@ static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle)
LOG_DEBUG(Kernel_SVC, "called");
auto& kernel = system.Kernel();
- auto resource_limit = ResourceLimit::Create(kernel);
+ auto resource_limit = std::make_shared<KResourceLimit>(kernel, system);
auto* const current_process = kernel.CurrentProcess();
ASSERT(current_process != nullptr);
@@ -2207,30 +2203,30 @@ static ResultCode SetResourceLimitLimitValue(Core::System& system, Handle resour
LOG_DEBUG(Kernel_SVC, "called. Handle={:08X}, Resource type={}, Value={}", resource_limit,
resource_type, value);
- const auto type = static_cast<ResourceType>(resource_type);
+ const auto type = static_cast<LimitableResource>(resource_type);
if (!IsValidResourceType(type)) {
LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type);
- return ERR_INVALID_ENUM_VALUE;
+ return ResultInvalidEnumValue;
}
auto* const current_process = system.Kernel().CurrentProcess();
ASSERT(current_process != nullptr);
auto resource_limit_object =
- current_process->GetHandleTable().Get<ResourceLimit>(resource_limit);
+ current_process->GetHandleTable().Get<KResourceLimit>(resource_limit);
if (!resource_limit_object) {
LOG_ERROR(Kernel_SVC, "Handle to non-existent resource limit instance used. Handle={:08X}",
resource_limit);
- return ERR_INVALID_HANDLE;
+ return ResultInvalidHandle;
}
const auto set_result = resource_limit_object->SetLimitValue(type, static_cast<s64>(value));
if (set_result.IsError()) {
- LOG_ERROR(
- Kernel_SVC,
- "Attempted to lower resource limit ({}) for category '{}' below its current value ({})",
- resource_limit_object->GetMaxResourceValue(type), resource_type,
- resource_limit_object->GetCurrentResourceValue(type));
+ LOG_ERROR(Kernel_SVC,
+ "Attempted to lower resource limit ({}) for category '{}' below its current "
+ "value ({})",
+ resource_limit_object->GetLimitValue(type), resource_type,
+ resource_limit_object->GetCurrentValue(type));
return set_result;
}
@@ -2247,7 +2243,7 @@ static ResultCode GetProcessList(Core::System& system, u32* out_num_processes,
LOG_ERROR(Kernel_SVC,
"Supplied size outside [0, 0x0FFFFFFF] range. out_process_ids_size={}",
out_process_ids_size);
- return ERR_OUT_OF_RANGE;
+ return ResultOutOfRange;
}
const auto& kernel = system.Kernel();
@@ -2257,7 +2253,7 @@ static ResultCode GetProcessList(Core::System& system, u32* out_num_processes,
out_process_ids, total_copy_size)) {
LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
out_process_ids, out_process_ids + total_copy_size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
auto& memory = system.Memory();
@@ -2286,7 +2282,7 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
if ((out_thread_ids_size & 0xF0000000) != 0) {
LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. size={}",
out_thread_ids_size);
- return ERR_OUT_OF_RANGE;
+ return ResultOutOfRange;
}
const auto* const current_process = system.Kernel().CurrentProcess();
@@ -2296,7 +2292,7 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
!current_process->PageTable().IsInsideAddressSpace(out_thread_ids, total_copy_size)) {
LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
out_thread_ids, out_thread_ids + total_copy_size);
- return ERR_INVALID_ADDRESS_STATE;
+ return ResultInvalidCurrentMemory;
}
auto& memory = system.Memory();
@@ -2614,7 +2610,7 @@ void Call(Core::System& system, u32 immediate) {
kernel.EnterSVCProfile();
auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
- thread->SetContinuousOnSVC(true);
+ thread->SetIsCallingSvc();
const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
: GetSVCInfo32(immediate);
@@ -2630,7 +2626,7 @@ void Call(Core::System& system, u32 immediate) {
kernel.ExitSVCProfile();
- if (!thread->IsContinuousOnSVC()) {
+ if (!thread->IsCallingSvc()) {
auto* host_context = thread->GetHostContext().get();
host_context->Rewind();
}
diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h
new file mode 100644
index 000000000..4af049551
--- /dev/null
+++ b/src/core/hle/kernel/svc_common.h
@@ -0,0 +1,14 @@
+// Copyright 2020 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace Kernel::Svc {
+
+constexpr s32 ArgumentHandleCountMax = 0x40;
+constexpr u32 HandleWaitMask{1u << 30};
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h
new file mode 100644
index 000000000..a26d9f2c9
--- /dev/null
+++ b/src/core/hle/kernel/svc_results.h
@@ -0,0 +1,41 @@
+// Copyright 2018 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+// Confirmed Switch kernel error codes
+
+constexpr ResultCode ResultMaxConnectionsReached{ErrorModule::Kernel, 7};
+constexpr ResultCode ResultInvalidCapabilityDescriptor{ErrorModule::Kernel, 14};
+constexpr ResultCode ResultNoSynchronizationObject{ErrorModule::Kernel, 57};
+constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59};
+constexpr ResultCode ResultInvalidSize{ErrorModule::Kernel, 101};
+constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102};
+constexpr ResultCode ResultOutOfResource{ErrorModule::Kernel, 103};
+constexpr ResultCode ResultOutOfMemory{ErrorModule::Kernel, 104};
+constexpr ResultCode ResultHandleTableFull{ErrorModule::Kernel, 105};
+constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
+constexpr ResultCode ResultInvalidMemoryPermissions{ErrorModule::Kernel, 108};
+constexpr ResultCode ResultInvalidMemoryRange{ErrorModule::Kernel, 110};
+constexpr ResultCode ResultInvalidPriority{ErrorModule::Kernel, 112};
+constexpr ResultCode ResultInvalidCoreId{ErrorModule::Kernel, 113};
+constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114};
+constexpr ResultCode ResultInvalidPointer{ErrorModule::Kernel, 115};
+constexpr ResultCode ResultInvalidCombination{ErrorModule::Kernel, 116};
+constexpr ResultCode ResultTimedOut{ErrorModule::Kernel, 117};
+constexpr ResultCode ResultCancelled{ErrorModule::Kernel, 118};
+constexpr ResultCode ResultOutOfRange{ErrorModule::Kernel, 119};
+constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120};
+constexpr ResultCode ResultNotFound{ErrorModule::Kernel, 121};
+constexpr ResultCode ResultBusy{ErrorModule::Kernel, 122};
+constexpr ResultCode ResultSessionClosedByRemote{ErrorModule::Kernel, 123};
+constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125};
+constexpr ResultCode ResultReservedValue{ErrorModule::Kernel, 126};
+constexpr ResultCode ResultResourceLimitedExceeded{ErrorModule::Kernel, 132};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 11e1d8e2d..ec463b97c 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -65,4 +65,34 @@ struct MemoryInfo {
u32 padding{};
};
+enum class SignalType : u32 {
+ Signal = 0,
+ SignalAndIncrementIfEqual = 1,
+ SignalAndModifyByWaitingCountIfEqual = 2,
+};
+
+enum class ArbitrationType : u32 {
+ WaitIfLessThan = 0,
+ DecrementAndWaitIfLessThan = 1,
+ WaitIfEqual = 2,
+};
+
+enum class YieldType : s64 {
+ WithoutCoreMigration = 0,
+ WithCoreMigration = -1,
+ ToAnyThread = -2,
+};
+
+enum class ThreadActivity : u32 {
+ Runnable = 0,
+ Paused = 1,
+};
+
+constexpr inline s32 IdealCoreDontCare = -1;
+constexpr inline s32 IdealCoreUseProcessValue = -2;
+constexpr inline s32 IdealCoreNoUpdate = -3;
+
+constexpr inline s32 LowestThreadPriority = 63;
+constexpr inline s32 HighestThreadPriority = 0;
+
} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 0b6dd9df0..96afd544b 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -7,6 +7,7 @@
#include "common/common_types.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
+#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
namespace Kernel {
@@ -57,6 +58,14 @@ void SvcWrap64(Core::System& system) {
func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw);
}
+// Used by SetThreadActivity
+template <ResultCode func(Core::System&, Handle, Svc::ThreadActivity)>
+void SvcWrap64(Core::System& system) {
+ FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
+ static_cast<Svc::ThreadActivity>(Param(system, 1)))
+ .raw);
+}
+
template <ResultCode func(Core::System&, u32, u64, u64, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
@@ -157,9 +166,18 @@ void SvcWrap64(Core::System& system) {
.raw);
}
-template <ResultCode func(Core::System&, u32, u32*, u64*)>
+// Used by SetThreadCoreMask
+template <ResultCode func(Core::System&, Handle, s32, u64)>
void SvcWrap64(Core::System& system) {
- u32 param_1 = 0;
+ FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
+ static_cast<s32>(Param(system, 1)), Param(system, 2))
+ .raw);
+}
+
+// Used by GetThreadCoreMask
+template <ResultCode func(Core::System&, Handle, s32*, u64*)>
+void SvcWrap64(Core::System& system) {
+ s32 param_1 = 0;
u64 param_2 = 0;
const ResultCode retval = func(system, static_cast<u32>(Param(system, 2)), &param_1, &param_2);
@@ -215,9 +233,10 @@ void SvcWrap64(Core::System& system) {
func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2)).raw);
}
-template <ResultCode func(Core::System&, u32*, u64, u64, s64)>
+// Used by WaitSynchronization
+template <ResultCode func(Core::System&, s32*, u64, u64, s64)>
void SvcWrap64(Core::System& system) {
- u32 param_1 = 0;
+ s32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), static_cast<u32>(Param(system, 2)),
static_cast<s64>(Param(system, 3)))
.raw;
@@ -276,18 +295,22 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u64, u32, s32, s64)>
+// Used by WaitForAddress
+template <ResultCode func(Core::System&, u64, Svc::ArbitrationType, s32, s64)>
void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
- static_cast<s32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
- .raw);
+ FuncReturn(system,
+ func(system, Param(system, 0), static_cast<Svc::ArbitrationType>(Param(system, 1)),
+ static_cast<s32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
+ .raw);
}
-template <ResultCode func(Core::System&, u64, u32, s32, s32)>
+// Used by SignalToAddress
+template <ResultCode func(Core::System&, u64, Svc::SignalType, s32, s32)>
void SvcWrap64(Core::System& system) {
- FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
- static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
- .raw);
+ FuncReturn(system,
+ func(system, Param(system, 0), static_cast<Svc::SignalType>(Param(system, 1)),
+ static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
+ .raw);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -467,12 +490,35 @@ void SvcWrap32(Core::System& system) {
FuncReturn(system, retval);
}
+// Used by GetThreadCoreMask32
+template <ResultCode func(Core::System&, Handle, s32*, u32*, u32*)>
+void SvcWrap32(Core::System& system) {
+ s32 param_1 = 0;
+ u32 param_2 = 0;
+ u32 param_3 = 0;
+
+ const u32 retval = func(system, Param32(system, 2), &param_1, &param_2, &param_3).raw;
+ system.CurrentArmInterface().SetReg(1, param_1);
+ system.CurrentArmInterface().SetReg(2, param_2);
+ system.CurrentArmInterface().SetReg(3, param_3);
+ FuncReturn(system, retval);
+}
+
// Used by SignalProcessWideKey32
template <void func(Core::System&, u32, s32)>
void SvcWrap32(Core::System& system) {
func(system, static_cast<u32>(Param(system, 0)), static_cast<s32>(Param(system, 1)));
}
+// Used by SetThreadActivity32
+template <ResultCode func(Core::System&, Handle, Svc::ThreadActivity)>
+void SvcWrap32(Core::System& system) {
+ const u32 retval = func(system, static_cast<Handle>(Param(system, 0)),
+ static_cast<Svc::ThreadActivity>(Param(system, 1)))
+ .raw;
+ FuncReturn(system, retval);
+}
+
// Used by SetThreadPriority32
template <ResultCode func(Core::System&, Handle, u32)>
void SvcWrap32(Core::System& system) {
@@ -481,7 +527,7 @@ void SvcWrap32(Core::System& system) {
FuncReturn(system, retval);
}
-// Used by SetThreadCoreMask32
+// Used by SetMemoryAttribute32
template <ResultCode func(Core::System&, Handle, u32, u32, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval =
@@ -491,6 +537,16 @@ void SvcWrap32(Core::System& system) {
FuncReturn(system, retval);
}
+// Used by SetThreadCoreMask32
+template <ResultCode func(Core::System&, Handle, s32, u32, u32)>
+void SvcWrap32(Core::System& system) {
+ const u32 retval =
+ func(system, static_cast<Handle>(Param(system, 0)), static_cast<s32>(Param(system, 1)),
+ static_cast<u32>(Param(system, 2)), static_cast<u32>(Param(system, 3)))
+ .raw;
+ FuncReturn(system, retval);
+}
+
// Used by WaitProcessWideKeyAtomic32
template <ResultCode func(Core::System&, u32, u32, Handle, u32, u32)>
void SvcWrap32(Core::System& system) {
@@ -503,22 +559,23 @@ void SvcWrap32(Core::System& system) {
}
// Used by WaitForAddress32
-template <ResultCode func(Core::System&, u32, u32, s32, u32, u32)>
+template <ResultCode func(Core::System&, u32, Svc::ArbitrationType, s32, u32, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
- static_cast<u32>(Param(system, 1)), static_cast<s32>(Param(system, 2)),
- static_cast<u32>(Param(system, 3)), static_cast<u32>(Param(system, 4)))
+ static_cast<Svc::ArbitrationType>(Param(system, 1)),
+ static_cast<s32>(Param(system, 2)), static_cast<u32>(Param(system, 3)),
+ static_cast<u32>(Param(system, 4)))
.raw;
FuncReturn(system, retval);
}
// Used by SignalToAddress32
-template <ResultCode func(Core::System&, u32, u32, s32, s32)>
+template <ResultCode func(Core::System&, u32, Svc::SignalType, s32, s32)>
void SvcWrap32(Core::System& system) {
- const u32 retval =
- func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
- static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
- .raw;
+ const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
+ static_cast<Svc::SignalType>(Param(system, 1)),
+ static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
+ .raw;
FuncReturn(system, retval);
}
@@ -539,9 +596,9 @@ void SvcWrap32(Core::System& system) {
}
// Used by WaitSynchronization32
-template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)>
+template <ResultCode func(Core::System&, u32, u32, s32, u32, s32*)>
void SvcWrap32(Core::System& system) {
- u32 param_1 = 0;
+ s32 param_1 = 0;
const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2),
Param32(system, 3), &param_1)
.raw;
diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp
deleted file mode 100644
index d3f520ea2..000000000
--- a/src/core/hle/kernel/synchronization.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include "core/core.h"
-#include "core/hle/kernel/errors.h"
-#include "core/hle/kernel/handle_table.h"
-#include "core/hle/kernel/k_scheduler.h"
-#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/synchronization.h"
-#include "core/hle/kernel/synchronization_object.h"
-#include "core/hle/kernel/thread.h"
-#include "core/hle/kernel/time_manager.h"
-
-namespace Kernel {
-
-Synchronization::Synchronization(Core::System& system) : system{system} {}
-
-void Synchronization::SignalObject(SynchronizationObject& obj) const {
- auto& kernel = system.Kernel();
- KScopedSchedulerLock lock(kernel);
- if (obj.IsSignaled()) {
- for (auto thread : obj.GetWaitingThreads()) {
- if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) {
- if (thread->GetStatus() != ThreadStatus::WaitHLEEvent) {
- ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch);
- ASSERT(thread->IsWaitingSync());
- }
- thread->SetSynchronizationResults(&obj, RESULT_SUCCESS);
- thread->ResumeFromWait();
- }
- }
- obj.ClearWaitingThreads();
- }
-}
-
-std::pair<ResultCode, Handle> Synchronization::WaitFor(
- std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) {
- auto& kernel = system.Kernel();
- auto* const thread = kernel.CurrentScheduler()->GetCurrentThread();
- Handle event_handle = InvalidHandle;
- {
- KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
- const auto itr =
- std::find_if(sync_objects.begin(), sync_objects.end(),
- [thread](const std::shared_ptr<SynchronizationObject>& object) {
- return object->IsSignaled();
- });
-
- if (itr != sync_objects.end()) {
- // We found a ready object, acquire it and set the result value
- SynchronizationObject* object = itr->get();
- object->Acquire(thread);
- const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
- lock.CancelSleep();
- return {RESULT_SUCCESS, index};
- }
-
- if (nano_seconds == 0) {
- lock.CancelSleep();
- return {RESULT_TIMEOUT, InvalidHandle};
- }
-
- if (thread->IsPendingTermination()) {
- lock.CancelSleep();
- return {ERR_THREAD_TERMINATING, InvalidHandle};
- }
-
- if (thread->IsSyncCancelled()) {
- thread->SetSyncCancelled(false);
- lock.CancelSleep();
- return {ERR_SYNCHRONIZATION_CANCELED, InvalidHandle};
- }
-
- for (auto& object : sync_objects) {
- object->AddWaitingThread(SharedFrom(thread));
- }
-
- thread->SetSynchronizationObjects(&sync_objects);
- thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
- thread->SetStatus(ThreadStatus::WaitSynch);
- thread->SetWaitingSync(true);
- }
- thread->SetWaitingSync(false);
-
- if (event_handle != InvalidHandle) {
- auto& time_manager = kernel.TimeManager();
- time_manager.UnscheduleTimeEvent(event_handle);
- }
-
- {
- KScopedSchedulerLock lock(kernel);
- ResultCode signaling_result = thread->GetSignalingResult();
- SynchronizationObject* signaling_object = thread->GetSignalingObject();
- thread->SetSynchronizationObjects(nullptr);
- auto shared_thread = SharedFrom(thread);
- for (auto& obj : sync_objects) {
- obj->RemoveWaitingThread(shared_thread);
- }
- if (signaling_object != nullptr) {
- const auto itr = std::find_if(
- sync_objects.begin(), sync_objects.end(),
- [signaling_object](const std::shared_ptr<SynchronizationObject>& object) {
- return object.get() == signaling_object;
- });
- ASSERT(itr != sync_objects.end());
- signaling_object->Acquire(thread);
- const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
- return {signaling_result, index};
- }
- return {signaling_result, -1};
- }
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/synchronization.h b/src/core/hle/kernel/synchronization.h
deleted file mode 100644
index 379f4b1d3..000000000
--- a/src/core/hle/kernel/synchronization.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <memory>
-#include <utility>
-#include <vector>
-
-#include "core/hle/kernel/object.h"
-#include "core/hle/result.h"
-
-namespace Core {
-class System;
-} // namespace Core
-
-namespace Kernel {
-
-class SynchronizationObject;
-
-/**
- * The 'Synchronization' class is an interface for handling synchronization methods
- * used by Synchronization objects and synchronization SVCs. This centralizes processing of
- * such
- */
-class Synchronization {
-public:
- explicit Synchronization(Core::System& system);
-
- /// Signals a synchronization object, waking up all its waiting threads
- void SignalObject(SynchronizationObject& obj) const;
-
- /// Tries to see if waiting for any of the sync_objects is necessary, if not
- /// it returns Success and the handle index of the signaled sync object. In
- /// case not, the current thread will be locked and wait for nano_seconds or
- /// for a synchronization object to signal.
- std::pair<ResultCode, Handle> WaitFor(
- std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds);
-
-private:
- Core::System& system;
-};
-} // namespace Kernel
diff --git a/src/core/hle/kernel/synchronization_object.cpp b/src/core/hle/kernel/synchronization_object.cpp
deleted file mode 100644
index ba4d39157..000000000
--- a/src/core/hle/kernel/synchronization_object.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <algorithm>
-#include "common/assert.h"
-#include "common/common_types.h"
-#include "common/logging/log.h"
-#include "core/core.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/object.h"
-#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/synchronization.h"
-#include "core/hle/kernel/synchronization_object.h"
-#include "core/hle/kernel/thread.h"
-
-namespace Kernel {
-
-SynchronizationObject::SynchronizationObject(KernelCore& kernel) : Object{kernel} {}
-SynchronizationObject::~SynchronizationObject() = default;
-
-void SynchronizationObject::Signal() {
- kernel.Synchronization().SignalObject(*this);
-}
-
-void SynchronizationObject::AddWaitingThread(std::shared_ptr<Thread> thread) {
- auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread);
- if (itr == waiting_threads.end())
- waiting_threads.push_back(std::move(thread));
-}
-
-void SynchronizationObject::RemoveWaitingThread(std::shared_ptr<Thread> thread) {
- auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread);
- // If a thread passed multiple handles to the same object,
- // the kernel might attempt to remove the thread from the object's
- // waiting threads list multiple times.
- if (itr != waiting_threads.end())
- waiting_threads.erase(itr);
-}
-
-void SynchronizationObject::ClearWaitingThreads() {
- waiting_threads.clear();
-}
-
-const std::vector<std::shared_ptr<Thread>>& SynchronizationObject::GetWaitingThreads() const {
- return waiting_threads;
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/synchronization_object.h b/src/core/hle/kernel/synchronization_object.h
deleted file mode 100644
index 7408ed51f..000000000
--- a/src/core/hle/kernel/synchronization_object.h
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <atomic>
-#include <memory>
-#include <vector>
-
-#include "core/hle/kernel/object.h"
-
-namespace Kernel {
-
-class KernelCore;
-class Synchronization;
-class Thread;
-
-/// Class that represents a Kernel object that a thread can be waiting on
-class SynchronizationObject : public Object {
-public:
- explicit SynchronizationObject(KernelCore& kernel);
- ~SynchronizationObject() override;
-
- /**
- * Check if the specified thread should wait until the object is available
- * @param thread The thread about which we're deciding.
- * @return True if the current thread should wait due to this object being unavailable
- */
- virtual bool ShouldWait(const Thread* thread) const = 0;
-
- /// Acquire/lock the object for the specified thread if it is available
- virtual void Acquire(Thread* thread) = 0;
-
- /// Signal this object
- virtual void Signal();
-
- virtual bool IsSignaled() const {
- return is_signaled;
- }
-
- /**
- * Add a thread to wait on this object
- * @param thread Pointer to thread to add
- */
- void AddWaitingThread(std::shared_ptr<Thread> thread);
-
- /**
- * Removes a thread from waiting on this object (e.g. if it was resumed already)
- * @param thread Pointer to thread to remove
- */
- void RemoveWaitingThread(std::shared_ptr<Thread> thread);
-
- /// Get a const reference to the waiting threads list for debug use
- const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const;
-
- void ClearWaitingThreads();
-
-protected:
- std::atomic_bool is_signaled{}; // Tells if this sync object is signaled
-
-private:
- /// Threads waiting for this object to become available
- std::vector<std::shared_ptr<Thread>> waiting_threads;
-};
-
-// Specialization of DynamicObjectCast for SynchronizationObjects
-template <>
-inline std::shared_ptr<SynchronizationObject> DynamicObjectCast<SynchronizationObject>(
- std::shared_ptr<Object> object) {
- if (object != nullptr && object->IsWaitable()) {
- return std::static_pointer_cast<SynchronizationObject>(object);
- }
- return nullptr;
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
deleted file mode 100644
index a4f9e0d97..000000000
--- a/src/core/hle/kernel/thread.cpp
+++ /dev/null
@@ -1,478 +0,0 @@
-// Copyright 2014 Citra Emulator Project / PPSSPP Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <algorithm>
-#include <cinttypes>
-#include <optional>
-#include <vector>
-
-#include "common/assert.h"
-#include "common/common_types.h"
-#include "common/fiber.h"
-#include "common/logging/log.h"
-#include "common/thread_queue_list.h"
-#include "core/core.h"
-#include "core/cpu_manager.h"
-#include "core/hardware_properties.h"
-#include "core/hle/kernel/errors.h"
-#include "core/hle/kernel/handle_table.h"
-#include "core/hle/kernel/k_scheduler.h"
-#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/object.h"
-#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/thread.h"
-#include "core/hle/kernel/time_manager.h"
-#include "core/hle/result.h"
-#include "core/memory.h"
-
-#ifdef ARCHITECTURE_x86_64
-#include "core/arm/dynarmic/arm_dynarmic_32.h"
-#include "core/arm/dynarmic/arm_dynarmic_64.h"
-#endif
-
-namespace Kernel {
-
-bool Thread::ShouldWait(const Thread* thread) const {
- return status != ThreadStatus::Dead;
-}
-
-bool Thread::IsSignaled() const {
- return status == ThreadStatus::Dead;
-}
-
-void Thread::Acquire(Thread* thread) {
- ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
-}
-
-Thread::Thread(KernelCore& kernel) : SynchronizationObject{kernel} {}
-Thread::~Thread() = default;
-
-void Thread::Stop() {
- {
- KScopedSchedulerLock lock(kernel);
- SetStatus(ThreadStatus::Dead);
- Signal();
- kernel.GlobalHandleTable().Close(global_handle);
-
- if (owner_process) {
- owner_process->UnregisterThread(this);
-
- // Mark the TLS slot in the thread's page as free.
- owner_process->FreeTLSRegion(tls_address);
- }
- has_exited = true;
- }
- global_handle = 0;
-}
-
-void Thread::ResumeFromWait() {
- KScopedSchedulerLock lock(kernel);
- switch (status) {
- case ThreadStatus::Paused:
- case ThreadStatus::WaitSynch:
- case ThreadStatus::WaitHLEEvent:
- case ThreadStatus::WaitSleep:
- case ThreadStatus::WaitIPC:
- case ThreadStatus::WaitMutex:
- case ThreadStatus::WaitCondVar:
- case ThreadStatus::WaitArb:
- case ThreadStatus::Dormant:
- break;
-
- case ThreadStatus::Ready:
- // The thread's wakeup callback must have already been cleared when the thread was first
- // awoken.
- ASSERT(hle_callback == nullptr);
- // If the thread is waiting on multiple wait objects, it might be awoken more than once
- // before actually resuming. We can ignore subsequent wakeups if the thread status has
- // already been set to ThreadStatus::Ready.
- return;
- case ThreadStatus::Dead:
- // This should never happen, as threads must complete before being stopped.
- DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.",
- GetObjectId());
- return;
- }
-
- SetStatus(ThreadStatus::Ready);
-}
-
-void Thread::OnWakeUp() {
- KScopedSchedulerLock lock(kernel);
- SetStatus(ThreadStatus::Ready);
-}
-
-ResultCode Thread::Start() {
- KScopedSchedulerLock lock(kernel);
- SetStatus(ThreadStatus::Ready);
- return RESULT_SUCCESS;
-}
-
-void Thread::CancelWait() {
- KScopedSchedulerLock lock(kernel);
- if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) {
- is_sync_cancelled = true;
- return;
- }
- // TODO(Blinkhawk): Implement cancel of server session
- is_sync_cancelled = false;
- SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED);
- SetStatus(ThreadStatus::Ready);
-}
-
-static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
- u32 entry_point, u32 arg) {
- context = {};
- context.cpu_registers[0] = arg;
- context.cpu_registers[15] = entry_point;
- context.cpu_registers[13] = stack_top;
-}
-
-static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top,
- VAddr entry_point, u64 arg) {
- context = {};
- context.cpu_registers[0] = arg;
- context.pc = entry_point;
- context.sp = stack_top;
- // TODO(merry): Perform a hardware test to determine the below value.
- context.fpcr = 0;
-}
-
-std::shared_ptr<Common::Fiber>& Thread::GetHostContext() {
- return host_context;
-}
-
-ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
- std::string name, VAddr entry_point, u32 priority,
- u64 arg, s32 processor_id, VAddr stack_top,
- Process* owner_process) {
- std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
- void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
- return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
- owner_process, std::move(init_func), init_func_parameter);
-}
-
-ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
- std::string name, VAddr entry_point, u32 priority,
- u64 arg, s32 processor_id, VAddr stack_top,
- Process* owner_process,
- std::function<void(void*)>&& thread_start_func,
- void* thread_start_parameter) {
- auto& kernel = system.Kernel();
- // Check if priority is in ranged. Lowest priority -> highest priority id.
- if (priority > THREADPRIO_LOWEST && ((type_flags & THREADTYPE_IDLE) == 0)) {
- LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority);
- return ERR_INVALID_THREAD_PRIORITY;
- }
-
- if (processor_id > THREADPROCESSORID_MAX) {
- LOG_ERROR(Kernel_SVC, "Invalid processor id: {}", processor_id);
- return ERR_INVALID_PROCESSOR_ID;
- }
-
- if (owner_process) {
- if (!system.Memory().IsValidVirtualAddress(*owner_process, entry_point)) {
- LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point);
- // TODO (bunnei): Find the correct error code to use here
- return RESULT_UNKNOWN;
- }
- }
-
- std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel);
-
- thread->thread_id = kernel.CreateNewThreadID();
- thread->status = ThreadStatus::Dormant;
- thread->entry_point = entry_point;
- thread->stack_top = stack_top;
- thread->disable_count = 1;
- thread->tpidr_el0 = 0;
- thread->nominal_priority = thread->current_priority = priority;
- thread->schedule_count = -1;
- thread->last_scheduled_tick = 0;
- thread->processor_id = processor_id;
- thread->ideal_core = processor_id;
- thread->affinity_mask.SetAffinity(processor_id, true);
- thread->wait_objects = nullptr;
- thread->mutex_wait_address = 0;
- thread->condvar_wait_address = 0;
- thread->wait_handle = 0;
- thread->name = std::move(name);
- thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap();
- thread->owner_process = owner_process;
- thread->type = type_flags;
- if ((type_flags & THREADTYPE_IDLE) == 0) {
- auto& scheduler = kernel.GlobalSchedulerContext();
- scheduler.AddThread(thread);
- }
- if (owner_process) {
- thread->tls_address = thread->owner_process->CreateTLSRegion();
- thread->owner_process->RegisterThread(thread.get());
- } else {
- thread->tls_address = 0;
- }
-
- // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
- // to initialize the context
- if ((type_flags & THREADTYPE_HLE) == 0) {
- ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top),
- static_cast<u32>(entry_point), static_cast<u32>(arg));
- ResetThreadContext64(thread->context_64, stack_top, entry_point, arg);
- }
- thread->host_context =
- std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
-
- return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
-}
-
-void Thread::SetPriority(u32 priority) {
- KScopedSchedulerLock lock(kernel);
- ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
- "Invalid priority value.");
- nominal_priority = priority;
- UpdatePriority();
-}
-
-void Thread::SetSynchronizationResults(SynchronizationObject* object, ResultCode result) {
- signaling_object = object;
- signaling_result = result;
-}
-
-s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const {
- ASSERT_MSG(!wait_objects->empty(), "Thread is not waiting for anything");
- const auto match = std::find(wait_objects->rbegin(), wait_objects->rend(), object);
- return static_cast<s32>(std::distance(match, wait_objects->rend()) - 1);
-}
-
-VAddr Thread::GetCommandBufferAddress() const {
- // Offset from the start of TLS at which the IPC command buffer begins.
- constexpr u64 command_header_offset = 0x80;
- return GetTLSAddress() + command_header_offset;
-}
-
-void Thread::SetStatus(ThreadStatus new_status) {
- if (new_status == status) {
- return;
- }
-
- switch (new_status) {
- case ThreadStatus::Ready:
- SetSchedulingStatus(ThreadSchedStatus::Runnable);
- break;
- case ThreadStatus::Dormant:
- SetSchedulingStatus(ThreadSchedStatus::None);
- break;
- case ThreadStatus::Dead:
- SetSchedulingStatus(ThreadSchedStatus::Exited);
- break;
- default:
- SetSchedulingStatus(ThreadSchedStatus::Paused);
- break;
- }
-
- status = new_status;
-}
-
-void Thread::AddMutexWaiter(std::shared_ptr<Thread> thread) {
- if (thread->lock_owner.get() == this) {
- // If the thread is already waiting for this thread to release the mutex, ensure that the
- // waiters list is consistent and return without doing anything.
- const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
- ASSERT(iter != wait_mutex_threads.end());
- return;
- }
-
- // A thread can't wait on two different mutexes at the same time.
- ASSERT(thread->lock_owner == nullptr);
-
- // Ensure that the thread is not already in the list of mutex waiters
- const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
- ASSERT(iter == wait_mutex_threads.end());
-
- // Keep the list in an ordered fashion
- const auto insertion_point = std::find_if(
- wait_mutex_threads.begin(), wait_mutex_threads.end(),
- [&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); });
- wait_mutex_threads.insert(insertion_point, thread);
- thread->lock_owner = SharedFrom(this);
-
- UpdatePriority();
-}
-
-void Thread::RemoveMutexWaiter(std::shared_ptr<Thread> thread) {
- ASSERT(thread->lock_owner.get() == this);
-
- // Ensure that the thread is in the list of mutex waiters
- const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
- ASSERT(iter != wait_mutex_threads.end());
-
- wait_mutex_threads.erase(iter);
-
- thread->lock_owner = nullptr;
- UpdatePriority();
-}
-
-void Thread::UpdatePriority() {
- // If any of the threads waiting on the mutex have a higher priority
- // (taking into account priority inheritance), then this thread inherits
- // that thread's priority.
- u32 new_priority = nominal_priority;
- if (!wait_mutex_threads.empty()) {
- if (wait_mutex_threads.front()->current_priority < new_priority) {
- new_priority = wait_mutex_threads.front()->current_priority;
- }
- }
-
- if (new_priority == current_priority) {
- return;
- }
-
- if (GetStatus() == ThreadStatus::WaitCondVar) {
- owner_process->RemoveConditionVariableThread(SharedFrom(this));
- }
-
- SetCurrentPriority(new_priority);
-
- if (GetStatus() == ThreadStatus::WaitCondVar) {
- owner_process->InsertConditionVariableThread(SharedFrom(this));
- }
-
- if (!lock_owner) {
- return;
- }
-
- // Ensure that the thread is within the correct location in the waiting list.
- auto old_owner = lock_owner;
- lock_owner->RemoveMutexWaiter(SharedFrom(this));
- old_owner->AddMutexWaiter(SharedFrom(this));
-
- // Recursively update the priority of the thread that depends on the priority of this one.
- lock_owner->UpdatePriority();
-}
-
-bool Thread::AllSynchronizationObjectsReady() const {
- return std::none_of(wait_objects->begin(), wait_objects->end(),
- [this](const std::shared_ptr<SynchronizationObject>& object) {
- return object->ShouldWait(this);
- });
-}
-
-bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) {
- ASSERT(hle_callback);
- return hle_callback(std::move(thread));
-}
-
-ResultCode Thread::SetActivity(ThreadActivity value) {
- KScopedSchedulerLock lock(kernel);
-
- auto sched_status = GetSchedulingStatus();
-
- if (sched_status != ThreadSchedStatus::Runnable && sched_status != ThreadSchedStatus::Paused) {
- return ERR_INVALID_STATE;
- }
-
- if (IsPendingTermination()) {
- return RESULT_SUCCESS;
- }
-
- if (value == ThreadActivity::Paused) {
- if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) != 0) {
- return ERR_INVALID_STATE;
- }
- AddSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
- } else {
- if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) == 0) {
- return ERR_INVALID_STATE;
- }
- RemoveSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
- }
- return RESULT_SUCCESS;
-}
-
-ResultCode Thread::Sleep(s64 nanoseconds) {
- Handle event_handle{};
- {
- KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
- SetStatus(ThreadStatus::WaitSleep);
- }
-
- if (event_handle != InvalidHandle) {
- auto& time_manager = kernel.TimeManager();
- time_manager.UnscheduleTimeEvent(event_handle);
- }
- return RESULT_SUCCESS;
-}
-
-void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
- const u32 old_state = scheduling_state;
- pausing_state |= static_cast<u32>(flag);
- const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
- scheduling_state = base_scheduling | pausing_state;
- KScheduler::OnThreadStateChanged(kernel, this, old_state);
-}
-
-void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
- const u32 old_state = scheduling_state;
- pausing_state &= ~static_cast<u32>(flag);
- const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
- scheduling_state = base_scheduling | pausing_state;
- KScheduler::OnThreadStateChanged(kernel, this, old_state);
-}
-
-void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
- const u32 old_state = scheduling_state;
- scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
- static_cast<u32>(new_status);
- KScheduler::OnThreadStateChanged(kernel, this, old_state);
-}
-
-void Thread::SetCurrentPriority(u32 new_priority) {
- const u32 old_priority = std::exchange(current_priority, new_priority);
- KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(),
- old_priority);
-}
-
-ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
- KScopedSchedulerLock lock(kernel);
- const auto HighestSetCore = [](u64 mask, u32 max_cores) {
- for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
- if (((mask >> core) & 1) != 0) {
- return core;
- }
- }
- return -1;
- };
-
- const bool use_override = affinity_override_count != 0;
- if (new_core == THREADPROCESSORID_DONT_UPDATE) {
- new_core = use_override ? ideal_core_override : ideal_core;
- if ((new_affinity_mask & (1ULL << new_core)) == 0) {
- LOG_ERROR(Kernel, "New affinity mask is incorrect! new_core={}, new_affinity_mask={}",
- new_core, new_affinity_mask);
- return ERR_INVALID_COMBINATION;
- }
- }
- if (use_override) {
- ideal_core_override = new_core;
- } else {
- const auto old_affinity_mask = affinity_mask;
- affinity_mask.SetAffinityMask(new_affinity_mask);
- ideal_core = new_core;
- if (old_affinity_mask.GetAffinityMask() != new_affinity_mask) {
- const s32 old_core = processor_id;
- if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) {
- if (static_cast<s32>(ideal_core) < 0) {
- processor_id = HighestSetCore(affinity_mask.GetAffinityMask(),
- Core::Hardware::NUM_CPU_CORES);
- } else {
- processor_id = ideal_core;
- }
- }
- KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_affinity_mask, old_core);
- }
- }
- return RESULT_SUCCESS;
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
deleted file mode 100644
index 11ef29888..000000000
--- a/src/core/hle/kernel/thread.h
+++ /dev/null
@@ -1,731 +0,0 @@
-// Copyright 2014 Citra Emulator Project / PPSSPP Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <array>
-#include <functional>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "common/common_types.h"
-#include "common/spin_lock.h"
-#include "core/arm/arm_interface.h"
-#include "core/hle/kernel/k_affinity_mask.h"
-#include "core/hle/kernel/object.h"
-#include "core/hle/kernel/synchronization_object.h"
-#include "core/hle/result.h"
-
-namespace Common {
-class Fiber;
-}
-
-namespace Core {
-class ARM_Interface;
-class System;
-} // namespace Core
-
-namespace Kernel {
-
-class GlobalSchedulerContext;
-class KernelCore;
-class Process;
-class KScheduler;
-
-enum ThreadPriority : u32 {
- THREADPRIO_HIGHEST = 0, ///< Highest thread priority
- THREADPRIO_MAX_CORE_MIGRATION = 2, ///< Highest priority for a core migration
- THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps
- THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps
- THREADPRIO_LOWEST = 63, ///< Lowest thread priority
- THREADPRIO_COUNT = 64, ///< Total number of possible thread priorities.
-};
-
-enum ThreadType : u32 {
- THREADTYPE_USER = 0x1,
- THREADTYPE_KERNEL = 0x2,
- THREADTYPE_HLE = 0x4,
- THREADTYPE_IDLE = 0x8,
- THREADTYPE_SUSPEND = 0x10,
-};
-
-enum ThreadProcessorId : s32 {
- /// Indicates that no particular processor core is preferred.
- THREADPROCESSORID_DONT_CARE = -1,
-
- /// Run thread on the ideal core specified by the process.
- THREADPROCESSORID_IDEAL = -2,
-
- /// Indicates that the preferred processor ID shouldn't be updated in
- /// a core mask setting operation.
- THREADPROCESSORID_DONT_UPDATE = -3,
-
- THREADPROCESSORID_0 = 0, ///< Run thread on core 0
- THREADPROCESSORID_1 = 1, ///< Run thread on core 1
- THREADPROCESSORID_2 = 2, ///< Run thread on core 2
- THREADPROCESSORID_3 = 3, ///< Run thread on core 3
- THREADPROCESSORID_MAX = 4, ///< Processor ID must be less than this
-
- /// Allowed CPU mask
- THREADPROCESSORID_DEFAULT_MASK = (1 << THREADPROCESSORID_0) | (1 << THREADPROCESSORID_1) |
- (1 << THREADPROCESSORID_2) | (1 << THREADPROCESSORID_3)
-};
-
-enum class ThreadStatus {
- Ready, ///< Ready to run
- Paused, ///< Paused by SetThreadActivity or debug
- WaitHLEEvent, ///< Waiting for hle event to finish
- WaitSleep, ///< Waiting due to a SleepThread SVC
- WaitIPC, ///< Waiting for the reply from an IPC request
- WaitSynch, ///< Waiting due to WaitSynchronization
- WaitMutex, ///< Waiting due to an ArbitrateLock svc
- WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc
- WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc
- Dormant, ///< Created but not yet made ready
- Dead ///< Run to completion, or forcefully terminated
-};
-
-enum class ThreadWakeupReason {
- Signal, // The thread was woken up by WakeupAllWaitingThreads due to an object signal.
- Timeout // The thread was woken up due to a wait timeout.
-};
-
-enum class ThreadActivity : u32 {
- Normal = 0,
- Paused = 1,
-};
-
-enum class ThreadSchedStatus : u32 {
- None = 0,
- Paused = 1,
- Runnable = 2,
- Exited = 3,
-};
-
-enum class ThreadSchedFlags : u32 {
- ProcessPauseFlag = 1 << 4,
- ThreadPauseFlag = 1 << 5,
- ProcessDebugPauseFlag = 1 << 6,
- KernelInitPauseFlag = 1 << 8,
-};
-
-enum class ThreadSchedMasks : u32 {
- LowMask = 0x000f,
- HighMask = 0xfff0,
- ForcePauseMask = 0x0070,
-};
-
-class Thread final : public SynchronizationObject {
-public:
- explicit Thread(KernelCore& kernel);
- ~Thread() override;
-
- using MutexWaitingThreads = std::vector<std::shared_ptr<Thread>>;
-
- using ThreadContext32 = Core::ARM_Interface::ThreadContext32;
- using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
-
- using ThreadSynchronizationObjects = std::vector<std::shared_ptr<SynchronizationObject>>;
-
- using HLECallback = std::function<bool(std::shared_ptr<Thread> thread)>;
-
- /**
- * Creates and returns a new thread. The new thread is immediately scheduled
- * @param system The instance of the whole system
- * @param name The friendly name desired for the thread
- * @param entry_point The address at which the thread should start execution
- * @param priority The thread's priority
- * @param arg User data to pass to the thread
- * @param processor_id The ID(s) of the processors on which the thread is desired to be run
- * @param stack_top The address of the thread's stack top
- * @param owner_process The parent process for the thread, if null, it's a kernel thread
- * @return A shared pointer to the newly created thread
- */
- static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
- std::string name, VAddr entry_point,
- u32 priority, u64 arg, s32 processor_id,
- VAddr stack_top, Process* owner_process);
-
- /**
- * Creates and returns a new thread. The new thread is immediately scheduled
- * @param system The instance of the whole system
- * @param name The friendly name desired for the thread
- * @param entry_point The address at which the thread should start execution
- * @param priority The thread's priority
- * @param arg User data to pass to the thread
- * @param processor_id The ID(s) of the processors on which the thread is desired to be run
- * @param stack_top The address of the thread's stack top
- * @param owner_process The parent process for the thread, if null, it's a kernel thread
- * @param thread_start_func The function where the host context will start.
- * @param thread_start_parameter The parameter which will passed to host context on init
- * @return A shared pointer to the newly created thread
- */
- static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
- std::string name, VAddr entry_point,
- u32 priority, u64 arg, s32 processor_id,
- VAddr stack_top, Process* owner_process,
- std::function<void(void*)>&& thread_start_func,
- void* thread_start_parameter);
-
- std::string GetName() const override {
- return name;
- }
-
- void SetName(std::string new_name) {
- name = std::move(new_name);
- }
-
- std::string GetTypeName() const override {
- return "Thread";
- }
-
- static constexpr HandleType HANDLE_TYPE = HandleType::Thread;
- HandleType GetHandleType() const override {
- return HANDLE_TYPE;
- }
-
- bool ShouldWait(const Thread* thread) const override;
- void Acquire(Thread* thread) override;
- bool IsSignaled() const override;
-
- /**
- * Gets the thread's current priority
- * @return The current thread's priority
- */
- u32 GetPriority() const {
- return current_priority;
- }
-
- /**
- * Gets the thread's nominal priority.
- * @return The current thread's nominal priority.
- */
- u32 GetNominalPriority() const {
- return nominal_priority;
- }
-
- /**
- * Sets the thread's current priority
- * @param priority The new priority
- */
- void SetPriority(u32 priority);
-
- /// Adds a thread to the list of threads that are waiting for a lock held by this thread.
- void AddMutexWaiter(std::shared_ptr<Thread> thread);
-
- /// Removes a thread from the list of threads that are waiting for a lock held by this thread.
- void RemoveMutexWaiter(std::shared_ptr<Thread> thread);
-
- /// Recalculates the current priority taking into account priority inheritance.
- void UpdatePriority();
-
- /// Changes the core that the thread is running or scheduled to run on.
- ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
-
- /**
- * Gets the thread's thread ID
- * @return The thread's ID
- */
- u64 GetThreadID() const {
- return thread_id;
- }
-
- /// Resumes a thread from waiting
- void ResumeFromWait();
-
- void OnWakeUp();
-
- ResultCode Start();
-
- /// Cancels a waiting operation that this thread may or may not be within.
- ///
- /// When the thread is within a waiting state, this will set the thread's
- /// waiting result to signal a canceled wait. The function will then resume
- /// this thread.
- ///
- void CancelWait();
-
- void SetSynchronizationResults(SynchronizationObject* object, ResultCode result);
-
- SynchronizationObject* GetSignalingObject() const {
- return signaling_object;
- }
-
- ResultCode GetSignalingResult() const {
- return signaling_result;
- }
-
- /**
- * Retrieves the index that this particular object occupies in the list of objects
- * that the thread passed to WaitSynchronization, starting the search from the last element.
- *
- * It is used to set the output index of WaitSynchronization when the thread is awakened.
- *
- * When a thread wakes up due to an object signal, the kernel will use the index of the last
- * matching object in the wait objects list in case of having multiple instances of the same
- * object in the list.
- *
- * @param object Object to query the index of.
- */
- s32 GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const;
-
- /**
- * Stops a thread, invalidating it from further use
- */
- void Stop();
-
- /*
- * Returns the Thread Local Storage address of the current thread
- * @returns VAddr of the thread's TLS
- */
- VAddr GetTLSAddress() const {
- return tls_address;
- }
-
- /*
- * Returns the value of the TPIDR_EL0 Read/Write system register for this thread.
- * @returns The value of the TPIDR_EL0 register.
- */
- u64 GetTPIDR_EL0() const {
- return tpidr_el0;
- }
-
- /// Sets the value of the TPIDR_EL0 Read/Write system register for this thread.
- void SetTPIDR_EL0(u64 value) {
- tpidr_el0 = value;
- }
-
- /*
- * Returns the address of the current thread's command buffer, located in the TLS.
- * @returns VAddr of the thread's command buffer.
- */
- VAddr GetCommandBufferAddress() const;
-
- ThreadContext32& GetContext32() {
- return context_32;
- }
-
- const ThreadContext32& GetContext32() const {
- return context_32;
- }
-
- ThreadContext64& GetContext64() {
- return context_64;
- }
-
- const ThreadContext64& GetContext64() const {
- return context_64;
- }
-
- bool IsHLEThread() const {
- return (type & THREADTYPE_HLE) != 0;
- }
-
- bool IsSuspendThread() const {
- return (type & THREADTYPE_SUSPEND) != 0;
- }
-
- bool IsIdleThread() const {
- return (type & THREADTYPE_IDLE) != 0;
- }
-
- bool WasRunning() const {
- return was_running;
- }
-
- void SetWasRunning(bool value) {
- was_running = value;
- }
-
- std::shared_ptr<Common::Fiber>& GetHostContext();
-
- ThreadStatus GetStatus() const {
- return status;
- }
-
- void SetStatus(ThreadStatus new_status);
-
- s64 GetLastScheduledTick() const {
- return this->last_scheduled_tick;
- }
-
- void SetLastScheduledTick(s64 tick) {
- this->last_scheduled_tick = tick;
- }
-
- u64 GetTotalCPUTimeTicks() const {
- return total_cpu_time_ticks;
- }
-
- void UpdateCPUTimeTicks(u64 ticks) {
- total_cpu_time_ticks += ticks;
- }
-
- s32 GetProcessorID() const {
- return processor_id;
- }
-
- s32 GetActiveCore() const {
- return GetProcessorID();
- }
-
- void SetProcessorID(s32 new_core) {
- processor_id = new_core;
- }
-
- void SetActiveCore(s32 new_core) {
- processor_id = new_core;
- }
-
- Process* GetOwnerProcess() {
- return owner_process;
- }
-
- const Process* GetOwnerProcess() const {
- return owner_process;
- }
-
- const ThreadSynchronizationObjects& GetSynchronizationObjects() const {
- return *wait_objects;
- }
-
- void SetSynchronizationObjects(ThreadSynchronizationObjects* objects) {
- wait_objects = objects;
- }
-
- void ClearSynchronizationObjects() {
- for (const auto& waiting_object : *wait_objects) {
- waiting_object->RemoveWaitingThread(SharedFrom(this));
- }
- wait_objects->clear();
- }
-
- /// Determines whether all the objects this thread is waiting on are ready.
- bool AllSynchronizationObjectsReady() const;
-
- const MutexWaitingThreads& GetMutexWaitingThreads() const {
- return wait_mutex_threads;
- }
-
- Thread* GetLockOwner() const {
- return lock_owner.get();
- }
-
- void SetLockOwner(std::shared_ptr<Thread> owner) {
- lock_owner = std::move(owner);
- }
-
- VAddr GetCondVarWaitAddress() const {
- return condvar_wait_address;
- }
-
- void SetCondVarWaitAddress(VAddr address) {
- condvar_wait_address = address;
- }
-
- VAddr GetMutexWaitAddress() const {
- return mutex_wait_address;
- }
-
- void SetMutexWaitAddress(VAddr address) {
- mutex_wait_address = address;
- }
-
- Handle GetWaitHandle() const {
- return wait_handle;
- }
-
- void SetWaitHandle(Handle handle) {
- wait_handle = handle;
- }
-
- VAddr GetArbiterWaitAddress() const {
- return arb_wait_address;
- }
-
- void SetArbiterWaitAddress(VAddr address) {
- arb_wait_address = address;
- }
-
- bool HasHLECallback() const {
- return hle_callback != nullptr;
- }
-
- void SetHLECallback(HLECallback callback) {
- hle_callback = std::move(callback);
- }
-
- void SetHLETimeEvent(Handle time_event) {
- hle_time_event = time_event;
- }
-
- void SetHLESyncObject(SynchronizationObject* object) {
- hle_object = object;
- }
-
- Handle GetHLETimeEvent() const {
- return hle_time_event;
- }
-
- SynchronizationObject* GetHLESyncObject() const {
- return hle_object;
- }
-
- void InvalidateHLECallback() {
- SetHLECallback(nullptr);
- }
-
- bool InvokeHLECallback(std::shared_ptr<Thread> thread);
-
- u32 GetIdealCore() const {
- return ideal_core;
- }
-
- const KAffinityMask& GetAffinityMask() const {
- return affinity_mask;
- }
-
- ResultCode SetActivity(ThreadActivity value);
-
- /// Sleeps this thread for the given amount of nanoseconds.
- ResultCode Sleep(s64 nanoseconds);
-
- s64 GetYieldScheduleCount() const {
- return this->schedule_count;
- }
-
- void SetYieldScheduleCount(s64 count) {
- this->schedule_count = count;
- }
-
- ThreadSchedStatus GetSchedulingStatus() const {
- return static_cast<ThreadSchedStatus>(scheduling_state &
- static_cast<u32>(ThreadSchedMasks::LowMask));
- }
-
- bool IsRunnable() const {
- return scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable);
- }
-
- bool IsRunning() const {
- return is_running;
- }
-
- void SetIsRunning(bool value) {
- is_running = value;
- }
-
- bool IsSyncCancelled() const {
- return is_sync_cancelled;
- }
-
- void SetSyncCancelled(bool value) {
- is_sync_cancelled = value;
- }
-
- Handle GetGlobalHandle() const {
- return global_handle;
- }
-
- bool IsWaitingForArbitration() const {
- return waiting_for_arbitration;
- }
-
- void WaitForArbitration(bool set) {
- waiting_for_arbitration = set;
- }
-
- bool IsWaitingSync() const {
- return is_waiting_on_sync;
- }
-
- void SetWaitingSync(bool is_waiting) {
- is_waiting_on_sync = is_waiting;
- }
-
- bool IsPendingTermination() const {
- return will_be_terminated || GetSchedulingStatus() == ThreadSchedStatus::Exited;
- }
-
- bool IsPaused() const {
- return pausing_state != 0;
- }
-
- bool IsContinuousOnSVC() const {
- return is_continuous_on_svc;
- }
-
- void SetContinuousOnSVC(bool is_continuous) {
- is_continuous_on_svc = is_continuous;
- }
-
- bool IsPhantomMode() const {
- return is_phantom_mode;
- }
-
- void SetPhantomMode(bool phantom) {
- is_phantom_mode = phantom;
- }
-
- bool HasExited() const {
- return has_exited;
- }
-
- class QueueEntry {
- public:
- constexpr QueueEntry() = default;
-
- constexpr void Initialize() {
- this->prev = nullptr;
- this->next = nullptr;
- }
-
- constexpr Thread* GetPrev() const {
- return this->prev;
- }
- constexpr Thread* GetNext() const {
- return this->next;
- }
- constexpr void SetPrev(Thread* thread) {
- this->prev = thread;
- }
- constexpr void SetNext(Thread* thread) {
- this->next = thread;
- }
-
- private:
- Thread* prev{};
- Thread* next{};
- };
-
- QueueEntry& GetPriorityQueueEntry(s32 core) {
- return this->per_core_priority_queue_entry[core];
- }
-
- const QueueEntry& GetPriorityQueueEntry(s32 core) const {
- return this->per_core_priority_queue_entry[core];
- }
-
- s32 GetDisableDispatchCount() const {
- return disable_count;
- }
-
- void DisableDispatch() {
- ASSERT(GetDisableDispatchCount() >= 0);
- disable_count++;
- }
-
- void EnableDispatch() {
- ASSERT(GetDisableDispatchCount() > 0);
- disable_count--;
- }
-
-private:
- friend class GlobalSchedulerContext;
- friend class KScheduler;
- friend class Process;
-
- void SetSchedulingStatus(ThreadSchedStatus new_status);
- void AddSchedulingFlag(ThreadSchedFlags flag);
- void RemoveSchedulingFlag(ThreadSchedFlags flag);
-
- void SetCurrentPriority(u32 new_priority);
-
- Common::SpinLock context_guard{};
- ThreadContext32 context_32{};
- ThreadContext64 context_64{};
- std::shared_ptr<Common::Fiber> host_context{};
-
- ThreadStatus status = ThreadStatus::Dormant;
- u32 scheduling_state = 0;
-
- u64 thread_id = 0;
-
- VAddr entry_point = 0;
- VAddr stack_top = 0;
- std::atomic_int disable_count = 0;
-
- ThreadType type;
-
- /// Nominal thread priority, as set by the emulated application.
- /// The nominal priority is the thread priority without priority
- /// inheritance taken into account.
- u32 nominal_priority = 0;
-
- /// Current thread priority. This may change over the course of the
- /// thread's lifetime in order to facilitate priority inheritance.
- u32 current_priority = 0;
-
- u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
- s64 schedule_count{};
- s64 last_scheduled_tick{};
-
- s32 processor_id = 0;
-
- VAddr tls_address = 0; ///< Virtual address of the Thread Local Storage of the thread
- u64 tpidr_el0 = 0; ///< TPIDR_EL0 read/write system register.
-
- /// Process that owns this thread
- Process* owner_process;
-
- /// Objects that the thread is waiting on, in the same order as they were
- /// passed to WaitSynchronization.
- ThreadSynchronizationObjects* wait_objects;
-
- SynchronizationObject* signaling_object;
- ResultCode signaling_result{RESULT_SUCCESS};
-
- /// List of threads that are waiting for a mutex that is held by this thread.
- MutexWaitingThreads wait_mutex_threads;
-
- /// Thread that owns the lock that this thread is waiting for.
- std::shared_ptr<Thread> lock_owner;
-
- /// If waiting on a ConditionVariable, this is the ConditionVariable address
- VAddr condvar_wait_address = 0;
- /// If waiting on a Mutex, this is the mutex address
- VAddr mutex_wait_address = 0;
- /// The handle used to wait for the mutex.
- Handle wait_handle = 0;
-
- /// If waiting for an AddressArbiter, this is the address being waited on.
- VAddr arb_wait_address{0};
- bool waiting_for_arbitration{};
-
- /// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
- Handle global_handle = 0;
-
- /// Callback for HLE Events
- HLECallback hle_callback;
- Handle hle_time_event;
- SynchronizationObject* hle_object;
-
- KScheduler* scheduler = nullptr;
-
- std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
-
- u32 ideal_core{0xFFFFFFFF};
- KAffinityMask affinity_mask{};
-
- s32 ideal_core_override = -1;
- u32 affinity_override_count = 0;
-
- u32 pausing_state = 0;
- bool is_running = false;
- bool is_waiting_on_sync = false;
- bool is_sync_cancelled = false;
-
- bool is_continuous_on_svc = false;
-
- bool will_be_terminated = false;
- bool is_phantom_mode = false;
- bool has_exited = false;
-
- bool was_running = false;
-
- std::string name;
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index 79628e2b4..fd0630019 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -8,8 +8,8 @@
#include "core/core_timing_util.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/time_manager.h"
namespace Kernel {
@@ -18,53 +18,30 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
time_manager_event_type = Core::Timing::CreateEvent(
"Kernel::TimeManagerCallback",
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
- const KScopedSchedulerLock lock(system.Kernel());
- const auto proper_handle = static_cast<Handle>(thread_handle);
-
- std::shared_ptr<Thread> thread;
+ std::shared_ptr<KThread> thread;
{
std::lock_guard lock{mutex};
- if (cancelled_events[proper_handle]) {
- return;
- }
- thread = system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
- }
-
- if (thread) {
- // Thread can be null if process has exited
- thread->OnWakeUp();
+ thread = SharedFrom<KThread>(reinterpret_cast<KThread*>(thread_handle));
}
+ thread->Wakeup();
});
}
-void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds) {
+void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) {
std::lock_guard lock{mutex};
- event_handle = timetask->GetGlobalHandle();
if (nanoseconds > 0) {
- ASSERT(timetask);
- ASSERT(timetask->GetStatus() != ThreadStatus::Ready);
- ASSERT(timetask->GetStatus() != ThreadStatus::WaitMutex);
+ ASSERT(thread);
+ ASSERT(thread->GetState() != ThreadState::Runnable);
system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds},
- time_manager_event_type, event_handle);
- } else {
- event_handle = InvalidHandle;
- }
- cancelled_events[event_handle] = false;
-}
-
-void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
- std::lock_guard lock{mutex};
- if (event_handle == InvalidHandle) {
- return;
+ time_manager_event_type,
+ reinterpret_cast<uintptr_t>(thread));
}
- system.CoreTiming().UnscheduleEvent(time_manager_event_type, event_handle);
- cancelled_events[event_handle] = true;
}
-void TimeManager::CancelTimeEvent(Thread* time_task) {
+void TimeManager::UnscheduleTimeEvent(KThread* thread) {
std::lock_guard lock{mutex};
- const Handle event_handle = time_task->GetGlobalHandle();
- UnscheduleTimeEvent(event_handle);
+ system.CoreTiming().UnscheduleEvent(time_manager_event_type,
+ reinterpret_cast<uintptr_t>(thread));
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/time_manager.h b/src/core/hle/kernel/time_manager.h
index f39df39a0..0d7f05f30 100644
--- a/src/core/hle/kernel/time_manager.h
+++ b/src/core/hle/kernel/time_manager.h
@@ -20,7 +20,7 @@ struct EventType;
namespace Kernel {
-class Thread;
+class KThread;
/**
* The `TimeManager` takes care of scheduling time events on threads and executes their TimeUp
@@ -31,18 +31,14 @@ public:
explicit TimeManager(Core::System& system);
/// Schedule a time event on `timetask` thread that will expire in 'nanoseconds'
- /// returns a non-invalid handle in `event_handle` if correctly scheduled
- void ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds);
+ void ScheduleTimeEvent(KThread* time_task, s64 nanoseconds);
/// Unschedule an existing time event
- void UnscheduleTimeEvent(Handle event_handle);
-
- void CancelTimeEvent(Thread* time_task);
+ void UnscheduleTimeEvent(KThread* thread);
private:
Core::System& system;
std::shared_ptr<Core::Timing::EventType> time_manager_event_type;
- std::unordered_map<Handle, bool> cancelled_events;
std::mutex mutex;
};
diff --git a/src/core/hle/kernel/transfer_memory.cpp b/src/core/hle/kernel/transfer_memory.cpp
index 765f408c3..6b0fc1591 100644
--- a/src/core/hle/kernel/transfer_memory.cpp
+++ b/src/core/hle/kernel/transfer_memory.cpp
@@ -2,6 +2,7 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory/page_table.h"
#include "core/hle/kernel/process.h"
@@ -17,6 +18,7 @@ TransferMemory::TransferMemory(KernelCore& kernel, Core::Memory::Memory& memory)
TransferMemory::~TransferMemory() {
// Release memory region when transfer memory is destroyed
Reset();
+ owner_process->GetResourceLimit()->Release(LimitableResource::TransferMemory, 1);
}
std::shared_ptr<TransferMemory> TransferMemory::Create(KernelCore& kernel,
diff --git a/src/core/hle/kernel/transfer_memory.h b/src/core/hle/kernel/transfer_memory.h
index 05e9f7464..777799d12 100644
--- a/src/core/hle/kernel/transfer_memory.h
+++ b/src/core/hle/kernel/transfer_memory.h
@@ -72,6 +72,8 @@ public:
/// is closed.
ResultCode Reset();
+ void Finalize() override {}
+
private:
/// The base address for the memory managed by this instance.
VAddr base_address{};
diff --git a/src/core/hle/kernel/writable_event.cpp b/src/core/hle/kernel/writable_event.cpp
deleted file mode 100644
index fc2f7c424..000000000
--- a/src/core/hle/kernel/writable_event.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <algorithm>
-#include "common/assert.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/object.h"
-#include "core/hle/kernel/readable_event.h"
-#include "core/hle/kernel/thread.h"
-#include "core/hle/kernel/writable_event.h"
-
-namespace Kernel {
-
-WritableEvent::WritableEvent(KernelCore& kernel) : Object{kernel} {}
-WritableEvent::~WritableEvent() = default;
-
-EventPair WritableEvent::CreateEventPair(KernelCore& kernel, std::string name) {
- std::shared_ptr<WritableEvent> writable_event(new WritableEvent(kernel));
- std::shared_ptr<ReadableEvent> readable_event(new ReadableEvent(kernel));
-
- writable_event->name = name + ":Writable";
- writable_event->readable = readable_event;
- readable_event->name = name + ":Readable";
-
- return {std::move(readable_event), std::move(writable_event)};
-}
-
-std::shared_ptr<ReadableEvent> WritableEvent::GetReadableEvent() const {
- return readable;
-}
-
-void WritableEvent::Signal() {
- readable->Signal();
-}
-
-void WritableEvent::Clear() {
- readable->Clear();
-}
-
-bool WritableEvent::IsSignaled() const {
- return readable->IsSignaled();
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/writable_event.h b/src/core/hle/kernel/writable_event.h
deleted file mode 100644
index 6189cf65c..000000000
--- a/src/core/hle/kernel/writable_event.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <memory>
-
-#include "core/hle/kernel/object.h"
-
-namespace Kernel {
-
-class KernelCore;
-class ReadableEvent;
-class WritableEvent;
-
-struct EventPair {
- std::shared_ptr<ReadableEvent> readable;
- std::shared_ptr<WritableEvent> writable;
-};
-
-class WritableEvent final : public Object {
-public:
- ~WritableEvent() override;
-
- /**
- * Creates an event
- * @param kernel The kernel instance to create this event under.
- * @param name Optional name of event
- */
- static EventPair CreateEventPair(KernelCore& kernel, std::string name = "Unknown");
-
- std::string GetTypeName() const override {
- return "WritableEvent";
- }
- std::string GetName() const override {
- return name;
- }
-
- static constexpr HandleType HANDLE_TYPE = HandleType::WritableEvent;
- HandleType GetHandleType() const override {
- return HANDLE_TYPE;
- }
-
- std::shared_ptr<ReadableEvent> GetReadableEvent() const;
-
- void Signal();
- void Clear();
- bool IsSignaled() const;
-
-private:
- explicit WritableEvent(KernelCore& kernel);
-
- std::shared_ptr<ReadableEvent> readable;
-
- std::string name; ///< Name of event (optional)
-};
-
-} // namespace Kernel