aboutsummaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp187
-rw-r--r--src/core/hle/kernel/address_arbiter.h80
-rw-r--r--src/core/hle/kernel/client_port.cpp9
-rw-r--r--src/core/hle/kernel/client_session.cpp14
-rw-r--r--src/core/hle/kernel/client_session.h9
-rw-r--r--src/core/hle/kernel/code_set.cpp12
-rw-r--r--src/core/hle/kernel/code_set.h89
-rw-r--r--src/core/hle/kernel/errors.h1
-rw-r--r--src/core/hle/kernel/handle_table.cpp40
-rw-r--r--src/core/hle/kernel/handle_table.h25
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp22
-rw-r--r--src/core/hle/kernel/hle_ipc.h25
-rw-r--r--src/core/hle/kernel/kernel.cpp27
-rw-r--r--src/core/hle/kernel/kernel.h25
-rw-r--r--src/core/hle/kernel/mutex.cpp35
-rw-r--r--src/core/hle/kernel/mutex.h20
-rw-r--r--src/core/hle/kernel/object.cpp2
-rw-r--r--src/core/hle/kernel/object.h2
-rw-r--r--src/core/hle/kernel/process.cpp75
-rw-r--r--src/core/hle/kernel/process.h121
-rw-r--r--src/core/hle/kernel/process_capability.cpp4
-rw-r--r--src/core/hle/kernel/process_capability.h4
-rw-r--r--src/core/hle/kernel/readable_event.cpp2
-rw-r--r--src/core/hle/kernel/readable_event.h2
-rw-r--r--src/core/hle/kernel/resource_limit.cpp7
-rw-r--r--src/core/hle/kernel/resource_limit.h11
-rw-r--r--src/core/hle/kernel/scheduler.cpp80
-rw-r--r--src/core/hle/kernel/scheduler.h12
-rw-r--r--src/core/hle/kernel/server_port.cpp6
-rw-r--r--src/core/hle/kernel/server_port.h35
-rw-r--r--src/core/hle/kernel/server_session.cpp93
-rw-r--r--src/core/hle/kernel/server_session.h55
-rw-r--r--src/core/hle/kernel/shared_memory.cpp16
-rw-r--r--src/core/hle/kernel/shared_memory.h10
-rw-r--r--src/core/hle/kernel/svc.cpp361
-rw-r--r--src/core/hle/kernel/svc_wrap.h8
-rw-r--r--src/core/hle/kernel/thread.cpp113
-rw-r--r--src/core/hle/kernel/thread.h32
-rw-r--r--src/core/hle/kernel/transfer_memory.cpp73
-rw-r--r--src/core/hle/kernel/transfer_memory.h91
-rw-r--r--src/core/hle/kernel/vm_manager.cpp145
-rw-r--r--src/core/hle/kernel/vm_manager.h93
-rw-r--r--src/core/hle/kernel/wait_object.h2
43 files changed, 1433 insertions, 642 deletions
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index 57157beb4..c8842410b 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -9,6 +9,7 @@
#include "common/common_types.h"
#include "core/core.h"
#include "core/core_cpu.h"
+#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h"
@@ -18,58 +19,15 @@
#include "core/memory.h"
namespace Kernel {
-namespace AddressArbiter {
-
-// Performs actual address waiting logic.
-static ResultCode WaitForAddress(VAddr address, s64 timeout) {
- SharedPtr<Thread> current_thread = GetCurrentThread();
- current_thread->SetArbiterWaitAddress(address);
- current_thread->SetStatus(ThreadStatus::WaitArb);
- current_thread->InvalidateWakeupCallback();
-
- current_thread->WakeAfterDelay(timeout);
-
- Core::System::GetInstance().CpuCore(current_thread->GetProcessorID()).PrepareReschedule();
- return RESULT_TIMEOUT;
-}
-
-// Gets the threads waiting on an address.
-static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) {
- const auto RetrieveWaitingThreads = [](std::size_t core_index,
- std::vector<SharedPtr<Thread>>& waiting_threads,
- VAddr arb_addr) {
- const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
- const auto& thread_list = scheduler.GetThreadList();
-
- for (const auto& thread : thread_list) {
- if (thread->GetArbiterWaitAddress() == arb_addr)
- waiting_threads.push_back(thread);
- }
- };
-
- // Retrieve all threads that are waiting for this address.
- std::vector<SharedPtr<Thread>> threads;
- RetrieveWaitingThreads(0, threads, address);
- RetrieveWaitingThreads(1, threads, address);
- RetrieveWaitingThreads(2, threads, address);
- RetrieveWaitingThreads(3, threads, address);
-
- // Sort them by priority, such that the highest priority ones come first.
- std::sort(threads.begin(), threads.end(),
- [](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) {
- return lhs->GetPriority() < rhs->GetPriority();
- });
-
- return threads;
-}
-
+namespace {
// Wake up num_to_wake (or all) threads in a vector.
-static void WakeThreads(std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) {
+void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) {
// Only process up to 'target' threads, unless 'target' is <= 0, in which case process
// them all.
std::size_t last = waiting_threads.size();
- if (num_to_wake > 0)
- last = num_to_wake;
+ if (num_to_wake > 0) {
+ last = std::min(last, static_cast<std::size_t>(num_to_wake));
+ }
// Signal the waiting threads.
for (std::size_t i = 0; i < last; i++) {
@@ -79,88 +37,114 @@ static void WakeThreads(std::vector<SharedPtr<Thread>>& waiting_threads, s32 num
waiting_threads[i]->ResumeFromWait();
}
}
+} // Anonymous namespace
+
+AddressArbiter::AddressArbiter(Core::System& system) : system{system} {}
+AddressArbiter::~AddressArbiter() = default;
+
+ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 value,
+ s32 num_to_wake) {
+ switch (type) {
+ case SignalType::Signal:
+ return SignalToAddressOnly(address, num_to_wake);
+ case SignalType::IncrementAndSignalIfEqual:
+ return IncrementAndSignalToAddressIfEqual(address, value, num_to_wake);
+ case SignalType::ModifyByWaitingCountAndSignalIfEqual:
+ return ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, num_to_wake);
+ default:
+ return ERR_INVALID_ENUM_VALUE;
+ }
+}
-// Signals an address being waited on.
-ResultCode SignalToAddress(VAddr address, s32 num_to_wake) {
- std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address);
-
+ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
+ const std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address);
WakeThreads(waiting_threads, num_to_wake);
return RESULT_SUCCESS;
}
-// Signals an address being waited on and increments its value if equal to the value argument.
-ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake) {
+ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
+ s32 num_to_wake) {
// Ensure that we can write to the address.
if (!Memory::IsValidVirtualAddress(address)) {
return ERR_INVALID_ADDRESS_STATE;
}
- if (static_cast<s32>(Memory::Read32(address)) == value) {
- Memory::Write32(address, static_cast<u32>(value + 1));
- } else {
+ if (static_cast<s32>(Memory::Read32(address)) != value) {
return ERR_INVALID_STATE;
}
- return SignalToAddress(address, num_to_wake);
+ Memory::Write32(address, static_cast<u32>(value + 1));
+ return SignalToAddressOnly(address, num_to_wake);
}
-// Signals an address being waited on and modifies its value based on waiting thread count if equal
-// to the value argument.
-ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
- s32 num_to_wake) {
+ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
+ s32 num_to_wake) {
// Ensure that we can write to the address.
if (!Memory::IsValidVirtualAddress(address)) {
return ERR_INVALID_ADDRESS_STATE;
}
// Get threads waiting on the address.
- std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address);
+ const std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address);
// Determine the modified value depending on the waiting count.
s32 updated_value;
if (waiting_threads.empty()) {
- updated_value = value - 1;
- } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
updated_value = value + 1;
+ } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
+ updated_value = value - 1;
} else {
updated_value = value;
}
- if (static_cast<s32>(Memory::Read32(address)) == value) {
- Memory::Write32(address, static_cast<u32>(updated_value));
- } else {
+ if (static_cast<s32>(Memory::Read32(address)) != value) {
return ERR_INVALID_STATE;
}
+ Memory::Write32(address, static_cast<u32>(updated_value));
WakeThreads(waiting_threads, num_to_wake);
return RESULT_SUCCESS;
}
-// Waits on an address if the value passed is less than the argument value, optionally decrementing.
-ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, bool should_decrement) {
+ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s32 value,
+ s64 timeout_ns) {
+ switch (type) {
+ case ArbitrationType::WaitIfLessThan:
+ return WaitForAddressIfLessThan(address, value, timeout_ns, false);
+ case ArbitrationType::DecrementAndWaitIfLessThan:
+ return WaitForAddressIfLessThan(address, value, timeout_ns, true);
+ case ArbitrationType::WaitIfEqual:
+ return WaitForAddressIfEqual(address, value, timeout_ns);
+ default:
+ return ERR_INVALID_ENUM_VALUE;
+ }
+}
+
+ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
+ bool should_decrement) {
// Ensure that we can read the address.
if (!Memory::IsValidVirtualAddress(address)) {
return ERR_INVALID_ADDRESS_STATE;
}
- s32 cur_value = static_cast<s32>(Memory::Read32(address));
- if (cur_value < value) {
- if (should_decrement) {
- Memory::Write32(address, static_cast<u32>(cur_value - 1));
- }
- } else {
+ const s32 cur_value = static_cast<s32>(Memory::Read32(address));
+ if (cur_value >= value) {
return ERR_INVALID_STATE;
}
+
+ if (should_decrement) {
+ Memory::Write32(address, static_cast<u32>(cur_value - 1));
+ }
+
// Short-circuit without rescheduling, if timeout is zero.
if (timeout == 0) {
return RESULT_TIMEOUT;
}
- return WaitForAddress(address, timeout);
+ return WaitForAddressImpl(address, timeout);
}
-// Waits on an address if the value passed is equal to the argument value.
-ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
+ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
// Ensure that we can read the address.
if (!Memory::IsValidVirtualAddress(address)) {
return ERR_INVALID_ADDRESS_STATE;
@@ -174,7 +158,48 @@ ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
return RESULT_TIMEOUT;
}
- return WaitForAddress(address, timeout);
+ return WaitForAddressImpl(address, timeout);
+}
+
+ResultCode AddressArbiter::WaitForAddressImpl(VAddr address, s64 timeout) {
+ SharedPtr<Thread> current_thread = system.CurrentScheduler().GetCurrentThread();
+ current_thread->SetArbiterWaitAddress(address);
+ current_thread->SetStatus(ThreadStatus::WaitArb);
+ current_thread->InvalidateWakeupCallback();
+
+ current_thread->WakeAfterDelay(timeout);
+
+ system.CpuCore(current_thread->GetProcessorID()).PrepareReschedule();
+ return RESULT_TIMEOUT;
+}
+
+std::vector<SharedPtr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(VAddr address) const {
+ const auto RetrieveWaitingThreads = [this](std::size_t core_index,
+ std::vector<SharedPtr<Thread>>& waiting_threads,
+ VAddr arb_addr) {
+ const auto& scheduler = system.Scheduler(core_index);
+ const auto& thread_list = scheduler.GetThreadList();
+
+ for (const auto& thread : thread_list) {
+ if (thread->GetArbiterWaitAddress() == arb_addr) {
+ waiting_threads.push_back(thread);
+ }
+ }
+ };
+
+ // Retrieve all threads that are waiting for this address.
+ std::vector<SharedPtr<Thread>> threads;
+ RetrieveWaitingThreads(0, threads, address);
+ RetrieveWaitingThreads(1, threads, address);
+ RetrieveWaitingThreads(2, threads, address);
+ RetrieveWaitingThreads(3, threads, address);
+
+ // Sort them by priority, such that the highest priority ones come first.
+ std::sort(threads.begin(), threads.end(),
+ [](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) {
+ return lhs->GetPriority() < rhs->GetPriority();
+ });
+
+ return threads;
}
-} // namespace AddressArbiter
} // namespace Kernel
diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h
index e3657b8e9..ed0d0e69f 100644
--- a/src/core/hle/kernel/address_arbiter.h
+++ b/src/core/hle/kernel/address_arbiter.h
@@ -4,31 +4,77 @@
#pragma once
+#include <vector>
+
#include "common/common_types.h"
+#include "core/hle/kernel/object.h"
union ResultCode;
+namespace Core {
+class System;
+}
+
namespace Kernel {
-namespace AddressArbiter {
-enum class ArbitrationType {
- WaitIfLessThan = 0,
- DecrementAndWaitIfLessThan = 1,
- WaitIfEqual = 2,
-};
+class Thread;
-enum class SignalType {
- Signal = 0,
- IncrementAndSignalIfEqual = 1,
- ModifyByWaitingCountAndSignalIfEqual = 2,
-};
+class AddressArbiter {
+public:
+ enum class ArbitrationType {
+ WaitIfLessThan = 0,
+ DecrementAndWaitIfLessThan = 1,
+ WaitIfEqual = 2,
+ };
+
+ enum class SignalType {
+ Signal = 0,
+ IncrementAndSignalIfEqual = 1,
+ ModifyByWaitingCountAndSignalIfEqual = 2,
+ };
+
+ explicit AddressArbiter(Core::System& system);
+ ~AddressArbiter();
+
+ AddressArbiter(const AddressArbiter&) = delete;
+ AddressArbiter& operator=(const AddressArbiter&) = delete;
+
+ AddressArbiter(AddressArbiter&&) = default;
+ AddressArbiter& operator=(AddressArbiter&&) = delete;
+
+ /// Signals an address being waited on with a particular signaling type.
+ ResultCode SignalToAddress(VAddr address, SignalType type, s32 value, s32 num_to_wake);
-ResultCode SignalToAddress(VAddr address, s32 num_to_wake);
-ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake);
-ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake);
+ /// Waits on an address with a particular arbitration type.
+ ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns);
-ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, bool should_decrement);
-ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout);
-} // namespace AddressArbiter
+private:
+ /// Signals an address being waited on.
+ ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake);
+
+ /// Signals an address being waited on and increments its value if equal to the value argument.
+ ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake);
+
+ /// Signals an address being waited on and modifies its value based on waiting thread count if
+ /// equal to the value argument.
+ ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
+ s32 num_to_wake);
+
+ /// Waits on an address if the value passed is less than the argument value,
+ /// optionally decrementing.
+ ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
+ bool should_decrement);
+
+ /// Waits on an address if the value passed is equal to the argument value.
+ ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout);
+
+ // Waits on the given address with a timeout in nanoseconds
+ ResultCode WaitForAddressImpl(VAddr address, s64 timeout);
+
+ // Gets the threads waiting on an address.
+ std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const;
+
+ Core::System& system;
+};
} // namespace Kernel
diff --git a/src/core/hle/kernel/client_port.cpp b/src/core/hle/kernel/client_port.cpp
index d4c91d529..aa432658e 100644
--- a/src/core/hle/kernel/client_port.cpp
+++ b/src/core/hle/kernel/client_port.cpp
@@ -33,10 +33,11 @@ ResultVal<SharedPtr<ClientSession>> ClientPort::Connect() {
// Create a new session pair, let the created sessions inherit the parent port's HLE handler.
auto sessions = ServerSession::CreateSessionPair(kernel, server_port->GetName(), this);
- if (server_port->hle_handler)
- server_port->hle_handler->ClientConnected(std::get<SharedPtr<ServerSession>>(sessions));
- else
- server_port->pending_sessions.push_back(std::get<SharedPtr<ServerSession>>(sessions));
+ if (server_port->HasHLEHandler()) {
+ server_port->GetHLEHandler()->ClientConnected(std::get<SharedPtr<ServerSession>>(sessions));
+ } else {
+ server_port->AppendPendingSession(std::get<SharedPtr<ServerSession>>(sessions));
+ }
// Wake the threads waiting on the ServerPort
server_port->WakeupAllWaitingThreads();
diff --git a/src/core/hle/kernel/client_session.cpp b/src/core/hle/kernel/client_session.cpp
index 704e82824..c17baa50a 100644
--- a/src/core/hle/kernel/client_session.cpp
+++ b/src/core/hle/kernel/client_session.cpp
@@ -17,21 +17,11 @@ ClientSession::~ClientSession() {
// This destructor will be called automatically when the last ClientSession handle is closed by
// the emulated application.
- // Local references to ServerSession and SessionRequestHandler are necessary to guarantee they
+ // A local reference to the ServerSession is necessary to guarantee it
// will be kept alive until after ClientDisconnected() returns.
SharedPtr<ServerSession> server = parent->server;
if (server) {
- std::shared_ptr<SessionRequestHandler> hle_handler = server->hle_handler;
- if (hle_handler)
- hle_handler->ClientDisconnected(server);
-
- // TODO(Subv): Force a wake up of all the ServerSession's waiting threads and set
- // their WaitSynchronization result to 0xC920181A.
-
- // Clean up the list of client threads with pending requests, they are unneeded now that the
- // client endpoint is closed.
- server->pending_requesting_threads.clear();
- server->currently_handling = nullptr;
+ server->ClientDisconnected();
}
parent->client = nullptr;
diff --git a/src/core/hle/kernel/client_session.h b/src/core/hle/kernel/client_session.h
index 4c18de69c..b1f39aad7 100644
--- a/src/core/hle/kernel/client_session.h
+++ b/src/core/hle/kernel/client_session.h
@@ -36,14 +36,15 @@ public:
ResultCode SendSyncRequest(SharedPtr<Thread> thread);
- std::string name; ///< Name of client port (optional)
+private:
+ explicit ClientSession(KernelCore& kernel);
+ ~ClientSession() override;
/// The parent session, which links to the server endpoint.
std::shared_ptr<Session> parent;
-private:
- explicit ClientSession(KernelCore& kernel);
- ~ClientSession() override;
+ /// Name of the client session (optional)
+ std::string name;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/code_set.cpp b/src/core/hle/kernel/code_set.cpp
new file mode 100644
index 000000000..1f434e9af
--- /dev/null
+++ b/src/core/hle/kernel/code_set.cpp
@@ -0,0 +1,12 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "core/hle/kernel/code_set.h"
+
+namespace Kernel {
+
+CodeSet::CodeSet() = default;
+CodeSet::~CodeSet() = default;
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h
new file mode 100644
index 000000000..879957dcb
--- /dev/null
+++ b/src/core/hle/kernel/code_set.h
@@ -0,0 +1,89 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <cstddef>
+#include <vector>
+
+#include "common/common_types.h"
+
+namespace Kernel {
+
+/**
+ * Represents executable data that may be loaded into a kernel process.
+ *
+ * A code set consists of three basic segments:
+ * - A code (AKA text) segment,
+ * - A read-only data segment (rodata)
+ * - A data segment
+ *
+ * The code segment is the portion of the object file that contains
+ * executable instructions.
+ *
+ * The read-only data segment in the portion of the object file that
+ * contains (as one would expect) read-only data, such as fixed constant
+ * values and data structures.
+ *
+ * The data segment is similar to the read-only data segment -- it contains
+ * variables and data structures that have predefined values, however,
+ * entities within this segment can be modified.
+ */
+struct CodeSet final {
+ /// A single segment within a code set.
+ struct Segment final {
+ /// The byte offset that this segment is located at.
+ std::size_t offset = 0;
+
+ /// The address to map this segment to.
+ VAddr addr = 0;
+
+ /// The size of this segment in bytes.
+ u32 size = 0;
+ };
+
+ explicit CodeSet();
+ ~CodeSet();
+
+ CodeSet(const CodeSet&) = delete;
+ CodeSet& operator=(const CodeSet&) = delete;
+
+ CodeSet(CodeSet&&) = default;
+ CodeSet& operator=(CodeSet&&) = default;
+
+ Segment& CodeSegment() {
+ return segments[0];
+ }
+
+ const Segment& CodeSegment() const {
+ return segments[0];
+ }
+
+ Segment& RODataSegment() {
+ return segments[1];
+ }
+
+ const Segment& RODataSegment() const {
+ return segments[1];
+ }
+
+ Segment& DataSegment() {
+ return segments[2];
+ }
+
+ const Segment& DataSegment() const {
+ return segments[2];
+ }
+
+ /// The overall data that backs this code set.
+ std::vector<u8> memory;
+
+ /// The segments that comprise this code set.
+ std::array<Segment, 3> segments;
+
+ /// The entry point address for this code set.
+ VAddr entrypoint = 0;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h
index d17eb0cb6..8097b3863 100644
--- a/src/core/hle/kernel/errors.h
+++ b/src/core/hle/kernel/errors.h
@@ -14,6 +14,7 @@ constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7};
constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14};
constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101};
constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102};
+constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104};
constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105};
constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106};
constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS{ErrorModule::Kernel, 108};
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index c8acde5b1..bdfaa977f 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -14,32 +14,47 @@
namespace Kernel {
namespace {
constexpr u16 GetSlot(Handle handle) {
- return handle >> 15;
+ return static_cast<u16>(handle >> 15);
}
constexpr u16 GetGeneration(Handle handle) {
- return handle & 0x7FFF;
+ return static_cast<u16>(handle & 0x7FFF);
}
} // Anonymous namespace
HandleTable::HandleTable() {
- next_generation = 1;
Clear();
}
HandleTable::~HandleTable() = default;
+ResultCode HandleTable::SetSize(s32 handle_table_size) {
+ if (static_cast<u32>(handle_table_size) > MAX_COUNT) {
+ return ERR_OUT_OF_MEMORY;
+ }
+
+ // Values less than or equal to zero indicate to use the maximum allowable
+ // size for the handle table in the actual kernel, so we ignore the given
+ // value in that case, since we assume this by default unless this function
+ // is called.
+ if (handle_table_size > 0) {
+ table_size = static_cast<u16>(handle_table_size);
+ }
+
+ return RESULT_SUCCESS;
+}
+
ResultVal<Handle> HandleTable::Create(SharedPtr<Object> obj) {
DEBUG_ASSERT(obj != nullptr);
- u16 slot = next_free_slot;
- if (slot >= generations.size()) {
+ const u16 slot = next_free_slot;
+ if (slot >= table_size) {
LOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use.");
return ERR_HANDLE_TABLE_FULL;
}
next_free_slot = generations[slot];
- u16 generation = next_generation++;
+ const u16 generation = next_generation++;
// Overflow count so it fits in the 15 bits dedicated to the generation in the handle.
// Horizon OS uses zero to represent an invalid handle, so skip to 1.
@@ -64,10 +79,11 @@ ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
}
ResultCode HandleTable::Close(Handle handle) {
- if (!IsValid(handle))
+ if (!IsValid(handle)) {
return ERR_INVALID_HANDLE;
+ }
- u16 slot = GetSlot(handle);
+ const u16 slot = GetSlot(handle);
objects[slot] = nullptr;
@@ -77,10 +93,10 @@ ResultCode HandleTable::Close(Handle handle) {
}
bool HandleTable::IsValid(Handle handle) const {
- std::size_t slot = GetSlot(handle);
- u16 generation = GetGeneration(handle);
+ const std::size_t slot = GetSlot(handle);
+ const u16 generation = GetGeneration(handle);
- return slot < MAX_COUNT && objects[slot] != nullptr && generations[slot] == generation;
+ return slot < table_size && objects[slot] != nullptr && generations[slot] == generation;
}
SharedPtr<Object> HandleTable::GetGeneric(Handle handle) const {
@@ -97,7 +113,7 @@ SharedPtr<Object> HandleTable::GetGeneric(Handle handle) const {
}
void HandleTable::Clear() {
- for (u16 i = 0; i < MAX_COUNT; ++i) {
+ for (u16 i = 0; i < table_size; ++i) {
generations[i] = i + 1;
objects[i] = nullptr;
}
diff --git a/src/core/hle/kernel/handle_table.h b/src/core/hle/kernel/handle_table.h
index 89a3bc740..44901391b 100644
--- a/src/core/hle/kernel/handle_table.h
+++ b/src/core/hle/kernel/handle_table.h
@@ -50,6 +50,20 @@ public:
~HandleTable();
/**
+ * Sets the number of handles that may be in use at one time
+ * for this handle table.
+ *
+ * @param handle_table_size The desired size to limit the handle table to.
+ *
+ * @returns an error code indicating if initialization was successful.
+ * If initialization was not successful, then ERR_OUT_OF_MEMORY
+ * will be returned.
+ *
+ * @pre handle_table_size must be within the range [0, 1024]
+ */
+ ResultCode SetSize(s32 handle_table_size);
+
+ /**
* Allocates a handle for the given object.
* @return The created Handle or one of the following errors:
* - `ERR_HANDLE_TABLE_FULL`: the maximum number of handles has been exceeded.
@@ -104,13 +118,20 @@ private:
std::array<u16, MAX_COUNT> generations;
/**
+ * The limited size of the handle table. This can be specified by process
+ * capabilities in order to restrict the overall number of handles that
+ * can be created in a process instance
+ */
+ u16 table_size = static_cast<u16>(MAX_COUNT);
+
+ /**
* Global counter of the number of created handles. Stored in `generations` when a handle is
* created, and wraps around to 1 when it hits 0x8000.
*/
- u16 next_generation;
+ u16 next_generation = 1;
/// Head of the free slots linked list.
- u16 next_free_slot;
+ u16 next_free_slot = 0;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 5dd855db8..fe710eb6e 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -86,7 +86,7 @@ HLERequestContext::~HLERequestContext() = default;
void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf,
bool incoming) {
IPC::RequestParser rp(src_cmdbuf);
- command_header = std::make_shared<IPC::CommandHeader>(rp.PopRaw<IPC::CommandHeader>());
+ command_header = rp.PopRaw<IPC::CommandHeader>();
if (command_header->type == IPC::CommandType::Close) {
// Close does not populate the rest of the IPC header
@@ -95,8 +95,7 @@ void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_
// If handle descriptor is present, add size of it
if (command_header->enable_handle_descriptor) {
- handle_descriptor_header =
- std::make_shared<IPC::HandleDescriptorHeader>(rp.PopRaw<IPC::HandleDescriptorHeader>());
+ handle_descriptor_header = rp.PopRaw<IPC::HandleDescriptorHeader>();
if (handle_descriptor_header->send_current_pid) {
rp.Skip(2, false);
}
@@ -140,16 +139,15 @@ void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_
// If this is an incoming message, only CommandType "Request" has a domain header
// All outgoing domain messages have the domain header, if only incoming has it
if (incoming || domain_message_header) {
- domain_message_header =
- std::make_shared<IPC::DomainMessageHeader>(rp.PopRaw<IPC::DomainMessageHeader>());
+ domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>();
} else {
- if (Session()->IsDomain())
+ if (Session()->IsDomain()) {
LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!");
+ }
}
}
- data_payload_header =
- std::make_shared<IPC::DataPayloadHeader>(rp.PopRaw<IPC::DataPayloadHeader>());
+ data_payload_header = rp.PopRaw<IPC::DataPayloadHeader>();
data_payload_offset = rp.GetCurrentOffset();
@@ -264,11 +262,11 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) {
// Write the domain objects to the command buffer, these go after the raw untranslated data.
// TODO(Subv): This completely ignores C buffers.
std::size_t domain_offset = size - domain_message_header->num_objects;
- auto& request_handlers = server_session->domain_request_handlers;
- for (auto& object : domain_objects) {
- request_handlers.emplace_back(object);
- dst_cmdbuf[domain_offset++] = static_cast<u32_le>(request_handlers.size());
+ for (const auto& object : domain_objects) {
+ server_session->AppendDomainRequestHandler(object);
+ dst_cmdbuf[domain_offset++] =
+ static_cast<u32_le>(server_session->NumDomainRequestHandlers());
}
}
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index cb1c5aff3..2bdd9f02c 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -6,6 +6,7 @@
#include <array>
#include <memory>
+#include <optional>
#include <string>
#include <type_traits>
#include <vector>
@@ -15,6 +16,8 @@
#include "core/hle/ipc.h"
#include "core/hle/kernel/object.h"
+union ResultCode;
+
namespace Service {
class ServiceFrameworkBase;
}
@@ -166,12 +169,12 @@ public:
return buffer_c_desciptors;
}
- const IPC::DomainMessageHeader* GetDomainMessageHeader() const {
- return domain_message_header.get();
+ const IPC::DomainMessageHeader& GetDomainMessageHeader() const {
+ return domain_message_header.value();
}
bool HasDomainMessageHeader() const {
- return domain_message_header != nullptr;
+ return domain_message_header.has_value();
}
/// Helper function to read a buffer using the appropriate buffer descriptor
@@ -208,14 +211,12 @@ public:
template <typename T>
SharedPtr<T> GetCopyObject(std::size_t index) {
- ASSERT(index < copy_objects.size());
- return DynamicObjectCast<T>(copy_objects[index]);
+ return DynamicObjectCast<T>(copy_objects.at(index));
}
template <typename T>
SharedPtr<T> GetMoveObject(std::size_t index) {
- ASSERT(index < move_objects.size());
- return DynamicObjectCast<T>(move_objects[index]);
+ return DynamicObjectCast<T>(move_objects.at(index));
}
void AddMoveObject(SharedPtr<Object> object) {
@@ -232,7 +233,7 @@ public:
template <typename T>
std::shared_ptr<T> GetDomainRequestHandler(std::size_t index) const {
- return std::static_pointer_cast<T>(domain_request_handlers[index]);
+ return std::static_pointer_cast<T>(domain_request_handlers.at(index));
}
void SetDomainRequestHandlers(
@@ -272,10 +273,10 @@ private:
boost::container::small_vector<SharedPtr<Object>, 8> copy_objects;
boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects;
- std::shared_ptr<IPC::CommandHeader> command_header;
- std::shared_ptr<IPC::HandleDescriptorHeader> handle_descriptor_header;
- std::shared_ptr<IPC::DataPayloadHeader> data_payload_header;
- std::shared_ptr<IPC::DomainMessageHeader> domain_message_header;
+ std::optional<IPC::CommandHeader> command_header;
+ std::optional<IPC::HandleDescriptorHeader> handle_descriptor_header;
+ std::optional<IPC::DataPayloadHeader> data_payload_header;
+ std::optional<IPC::DomainMessageHeader> domain_message_header;
std::vector<IPC::BufferDescriptorX> buffer_x_desciptors;
std::vector<IPC::BufferDescriptorABW> buffer_a_desciptors;
std::vector<IPC::BufferDescriptorABW> buffer_b_desciptors;
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 7a524ce5a..3f14bfa86 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -12,6 +12,7 @@
#include "core/core.h"
#include "core/core_timing.h"
+#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/kernel.h"
@@ -28,12 +29,12 @@ namespace Kernel {
* @param thread_handle The handle of the thread that's been awoken
* @param cycles_late The number of CPU cycles that have passed since the desired wakeup time
*/
-static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_late) {
+static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_late) {
const auto proper_handle = static_cast<Handle>(thread_handle);
const auto& system = Core::System::GetInstance();
// Lock the global kernel mutex when we enter the kernel HLE.
- std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
+ std::lock_guard lock{HLE::g_hle_lock};
SharedPtr<Thread> thread =
system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle);
@@ -61,7 +62,8 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_
if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 ||
thread->GetWaitHandle() != 0) {
- ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
+ ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex ||
+ thread->GetStatus() == ThreadStatus::WaitCondVar);
thread->SetMutexWaitAddress(0);
thread->SetCondVarWaitAddress(0);
thread->SetWaitHandle(0);
@@ -86,6 +88,8 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_
}
struct KernelCore::Impl {
+ explicit Impl(Core::System& system) : system{system} {}
+
void Initialize(KernelCore& kernel) {
Shutdown();
@@ -111,7 +115,7 @@ struct KernelCore::Impl {
// Creates the default system resource limit
void InitializeSystemResourceLimit(KernelCore& kernel) {
- system_resource_limit = ResourceLimit::Create(kernel, "System");
+ system_resource_limit = ResourceLimit::Create(kernel);
// If setting the default system values fails, then something seriously wrong has occurred.
ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x200000000)
@@ -124,7 +128,7 @@ struct KernelCore::Impl {
void InitializeThreads() {
thread_wakeup_event_type =
- CoreTiming::RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback);
+ system.CoreTiming().RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback);
}
std::atomic<u32> next_object_id{0};
@@ -137,7 +141,7 @@ struct KernelCore::Impl {
SharedPtr<ResourceLimit> system_resource_limit;
- CoreTiming::EventType* thread_wakeup_event_type = nullptr;
+ Core::Timing::EventType* thread_wakeup_event_type = nullptr;
// TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future,
// allowing us to simply use a pool index or similar.
Kernel::HandleTable thread_wakeup_callback_handle_table;
@@ -145,9 +149,12 @@ struct KernelCore::Impl {
/// Map of named ports managed by the kernel, which can be retrieved using
/// the ConnectToPort SVC.
NamedPortTable named_ports;
+
+ // System context
+ Core::System& system;
};
-KernelCore::KernelCore() : impl{std::make_unique<Impl>()} {}
+KernelCore::KernelCore(Core::System& system) : impl{std::make_unique<Impl>(system)} {}
KernelCore::~KernelCore() {
Shutdown();
}
@@ -184,6 +191,10 @@ const Process* KernelCore::CurrentProcess() const {
return impl->current_process;
}
+const std::vector<SharedPtr<Process>>& KernelCore::GetProcessList() const {
+ return impl->process_list;
+}
+
void KernelCore::AddNamedPort(std::string name, SharedPtr<ClientPort> port) {
impl->named_ports.emplace(std::move(name), std::move(port));
}
@@ -213,7 +224,7 @@ u64 KernelCore::CreateNewProcessID() {
return impl->next_process_id++;
}
-CoreTiming::EventType* KernelCore::ThreadWakeupCallbackEventType() const {
+Core::Timing::EventType* KernelCore::ThreadWakeupCallbackEventType() const {
return impl->thread_wakeup_event_type;
}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index c643a6401..6b8738599 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -8,15 +8,18 @@
#include <unordered_map>
#include "core/hle/kernel/object.h"
-template <typename T>
-class ResultVal;
+namespace Core {
+class System;
+}
-namespace CoreTiming {
+namespace Core::Timing {
+class CoreTiming;
struct EventType;
-}
+} // namespace Core::Timing
namespace Kernel {
+class AddressArbiter;
class ClientPort;
class HandleTable;
class Process;
@@ -29,7 +32,14 @@ private:
using NamedPortTable = std::unordered_map<std::string, SharedPtr<ClientPort>>;
public:
- KernelCore();
+ /// Constructs an instance of the kernel using the given System
+ /// instance as a context for any necessary system-related state,
+ /// such as threads, CPU core state, etc.
+ ///
+ /// @post After execution of the constructor, the provided System
+ /// object *must* outlive the kernel instance itself.
+ ///
+ explicit KernelCore(Core::System& system);
~KernelCore();
KernelCore(const KernelCore&) = delete;
@@ -62,6 +72,9 @@ public:
/// Retrieves a const pointer to the current process.
const Process* CurrentProcess() const;
+ /// Retrieves the list of processes.
+ const std::vector<SharedPtr<Process>>& GetProcessList() const;
+
/// Adds a port to the named port table
void AddNamedPort(std::string name, SharedPtr<ClientPort> port);
@@ -89,7 +102,7 @@ private:
u64 CreateNewThreadID();
/// Retrieves the event type used for thread wakeup callbacks.
- CoreTiming::EventType* ThreadWakeupCallbackEventType() const;
+ Core::Timing::EventType* ThreadWakeupCallbackEventType() const;
/// Provides a reference to the thread wakeup callback handle table.
Kernel::HandleTable& ThreadWakeupCallbackHandleTable();
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index 0743670ad..98e87313b 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -2,7 +2,6 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include <map>
#include <utility>
#include <vector>
@@ -10,8 +9,11 @@
#include "core/core.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/mutex.h"
#include "core/hle/kernel/object.h"
+#include "core/hle/kernel/process.h"
+#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/result.h"
#include "core/memory.h"
@@ -57,41 +59,47 @@ static void TransferMutexOwnership(VAddr mutex_addr, SharedPtr<Thread> current_t
}
}
-ResultCode Mutex::TryAcquire(HandleTable& handle_table, VAddr address, Handle holding_thread_handle,
+Mutex::Mutex(Core::System& system) : system{system} {}
+Mutex::~Mutex() = default;
+
+ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
Handle requesting_thread_handle) {
// The mutex address must be 4-byte aligned
if ((address % sizeof(u32)) != 0) {
return ERR_INVALID_ADDRESS;
}
+ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ Thread* const current_thread = system.CurrentScheduler().GetCurrentThread();
SharedPtr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
SharedPtr<Thread> requesting_thread = handle_table.Get<Thread>(requesting_thread_handle);
// TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of another
// thread.
- ASSERT(requesting_thread == GetCurrentThread());
+ ASSERT(requesting_thread == current_thread);
- u32 addr_value = Memory::Read32(address);
+ const u32 addr_value = Memory::Read32(address);
// If the mutex isn't being held, just return success.
if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
return RESULT_SUCCESS;
}
- if (holding_thread == nullptr)
+ if (holding_thread == nullptr) {
return ERR_INVALID_HANDLE;
+ }
// Wait until the mutex is released
- GetCurrentThread()->SetMutexWaitAddress(address);
- GetCurrentThread()->SetWaitHandle(requesting_thread_handle);
+ current_thread->SetMutexWaitAddress(address);
+ current_thread->SetWaitHandle(requesting_thread_handle);
- GetCurrentThread()->SetStatus(ThreadStatus::WaitMutex);
- GetCurrentThread()->InvalidateWakeupCallback();
+ current_thread->SetStatus(ThreadStatus::WaitMutex);
+ current_thread->InvalidateWakeupCallback();
// Update the lock holder thread's priority to prevent priority inversion.
- holding_thread->AddMutexWaiter(GetCurrentThread());
+ holding_thread->AddMutexWaiter(current_thread);
- Core::System::GetInstance().PrepareReschedule();
+ system.PrepareReschedule();
return RESULT_SUCCESS;
}
@@ -102,7 +110,8 @@ ResultCode Mutex::Release(VAddr address) {
return ERR_INVALID_ADDRESS;
}
- auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(GetCurrentThread(), address);
+ auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
+ auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(current_thread, address);
// There are no more threads waiting for the mutex, release it completely.
if (thread == nullptr) {
@@ -111,7 +120,7 @@ ResultCode Mutex::Release(VAddr address) {
}
// Transfer the ownership of the mutex from the previous owner to the new one.
- TransferMutexOwnership(address, GetCurrentThread(), thread);
+ TransferMutexOwnership(address, current_thread, thread);
u32 mutex_value = thread->GetWaitHandle();
diff --git a/src/core/hle/kernel/mutex.h b/src/core/hle/kernel/mutex.h
index 81e62d497..b904de2e8 100644
--- a/src/core/hle/kernel/mutex.h
+++ b/src/core/hle/kernel/mutex.h
@@ -5,32 +5,34 @@
#pragma once
#include "common/common_types.h"
-#include "core/hle/kernel/object.h"
union ResultCode;
-namespace Kernel {
+namespace Core {
+class System;
+}
-class HandleTable;
-class Thread;
+namespace Kernel {
class Mutex final {
public:
+ explicit Mutex(Core::System& system);
+ ~Mutex();
+
/// Flag that indicates that a mutex still has threads waiting for it.
static constexpr u32 MutexHasWaitersFlag = 0x40000000;
/// Mask of the bits in a mutex address value that contain the mutex owner.
static constexpr u32 MutexOwnerMask = 0xBFFFFFFF;
/// Attempts to acquire a mutex at the specified address.
- static ResultCode TryAcquire(HandleTable& handle_table, VAddr address,
- Handle holding_thread_handle, Handle requesting_thread_handle);
+ ResultCode TryAcquire(VAddr address, Handle holding_thread_handle,
+ Handle requesting_thread_handle);
/// Releases the mutex at the specified address.
- static ResultCode Release(VAddr address);
+ ResultCode Release(VAddr address);
private:
- Mutex() = default;
- ~Mutex() = default;
+ Core::System& system;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/object.cpp b/src/core/hle/kernel/object.cpp
index 8870463d0..10431e94c 100644
--- a/src/core/hle/kernel/object.cpp
+++ b/src/core/hle/kernel/object.cpp
@@ -23,7 +23,7 @@ bool Object::IsWaitable() const {
case HandleType::Unknown:
case HandleType::WritableEvent:
case HandleType::SharedMemory:
- case HandleType::AddressArbiter:
+ case HandleType::TransferMemory:
case HandleType::ResourceLimit:
case HandleType::ClientPort:
case HandleType::ClientSession:
diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h
index 4c2505908..332876c27 100644
--- a/src/core/hle/kernel/object.h
+++ b/src/core/hle/kernel/object.h
@@ -22,9 +22,9 @@ enum class HandleType : u32 {
WritableEvent,
ReadableEvent,
SharedMemory,
+ TransferMemory,
Thread,
Process,
- AddressArbiter,
ResourceLimit,
ClientPort,
ServerPort,
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index c5aa19afa..041267318 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -5,10 +5,12 @@
#include <algorithm>
#include <memory>
#include <random>
+#include "common/alignment.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/file_sys/program_metadata.h"
+#include "core/hle/kernel/code_set.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
@@ -31,7 +33,7 @@ namespace {
*/
void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_point, u32 priority) {
// Setup page table so we can write to memory
- SetCurrentPageTable(&owner_process.VMManager().page_table);
+ Memory::SetCurrentPageTable(&owner_process.VMManager().page_table);
// Initialize new "main" thread
const VAddr stack_top = owner_process.VMManager().GetTLSIORegionEndAddress();
@@ -50,12 +52,10 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_poi
}
} // Anonymous namespace
-CodeSet::CodeSet() = default;
-CodeSet::~CodeSet() = default;
-
-SharedPtr<Process> Process::Create(KernelCore& kernel, std::string&& name) {
- SharedPtr<Process> process(new Process(kernel));
+SharedPtr<Process> Process::Create(Core::System& system, std::string&& name) {
+ auto& kernel = system.Kernel();
+ SharedPtr<Process> process(new Process(system));
process->name = std::move(name);
process->resource_limit = kernel.GetSystemResourceLimit();
process->status = ProcessStatus::Created;
@@ -76,6 +76,18 @@ SharedPtr<ResourceLimit> Process::GetResourceLimit() const {
return resource_limit;
}
+u64 Process::GetTotalPhysicalMemoryUsed() const {
+ return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size;
+}
+
+void Process::RegisterThread(const Thread* thread) {
+ thread_list.push_back(thread);
+}
+
+void Process::UnregisterThread(const Thread* thread) {
+ thread_list.remove(thread);
+}
+
ResultCode Process::ClearSignalState() {
if (status == ProcessStatus::Exited) {
LOG_ERROR(Kernel, "called on a terminated process instance.");
@@ -99,17 +111,26 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) {
vm_manager.Reset(metadata.GetAddressSpaceType());
const auto& caps = metadata.GetKernelCapabilities();
- return capabilities.InitializeForUserProcess(caps.data(), caps.size(), vm_manager);
+ const auto capability_init_result =
+ capabilities.InitializeForUserProcess(caps.data(), caps.size(), vm_manager);
+ if (capability_init_result.IsError()) {
+ return capability_init_result;
+ }
+
+ return handle_table.SetSize(capabilities.GetHandleTableSize());
}
-void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) {
+void Process::Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size) {
+ // The kernel always ensures that the given stack size is page aligned.
+ main_thread_stack_size = Common::AlignUp(stack_size, Memory::PAGE_SIZE);
+
// Allocate and map the main thread stack
// TODO(bunnei): This is heap area that should be allocated by the kernel and not mapped as part
// of the user address space.
+ const VAddr mapping_address = vm_manager.GetTLSIORegionEndAddress() - main_thread_stack_size;
vm_manager
- .MapMemoryBlock(vm_manager.GetTLSIORegionEndAddress() - stack_size,
- std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size,
- MemoryState::Stack)
+ .MapMemoryBlock(mapping_address, std::make_shared<std::vector<u8>>(main_thread_stack_size),
+ 0, main_thread_stack_size, MemoryState::Stack)
.Unwrap();
vm_manager.LogLayout();
@@ -126,7 +147,7 @@ void Process::PrepareForTermination() {
if (thread->GetOwnerProcess() != this)
continue;
- if (thread == GetCurrentThread())
+ if (thread == system.CurrentScheduler().GetCurrentThread())
continue;
// TODO(Subv): When are the other running/ready threads terminated?
@@ -138,7 +159,6 @@ void Process::PrepareForTermination() {
}
};
- const auto& system = Core::System::GetInstance();
stop_threads(system.Scheduler(0).GetThreadList());
stop_threads(system.Scheduler(1).GetThreadList());
stop_threads(system.Scheduler(2).GetThreadList());
@@ -206,35 +226,38 @@ void Process::FreeTLSSlot(VAddr tls_address) {
}
void Process::LoadModule(CodeSet module_, VAddr base_addr) {
- const auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions,
+ const auto memory = std::make_shared<std::vector<u8>>(std::move(module_.memory));
+
+ const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions,
MemoryState memory_state) {
const auto vma = vm_manager
- .MapMemoryBlock(segment.addr + base_addr, module_.memory,
- segment.offset, segment.size, memory_state)
+ .MapMemoryBlock(segment.addr + base_addr, memory, segment.offset,
+ segment.size, memory_state)
.Unwrap();
vm_manager.Reprotect(vma, permissions);
};
// Map CodeSet segments
- MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::CodeStatic);
- MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeMutable);
- MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeMutable);
+ MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code);
+ MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData);
+ MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData);
+
+ code_memory_size += module_.memory.size();
// Clear instruction cache in CPU JIT
- Core::System::GetInstance().ArmInterface(0).ClearInstructionCache();
- Core::System::GetInstance().ArmInterface(1).ClearInstructionCache();
- Core::System::GetInstance().ArmInterface(2).ClearInstructionCache();
- Core::System::GetInstance().ArmInterface(3).ClearInstructionCache();
+ system.InvalidateCpuInstructionCaches();
}
-Kernel::Process::Process(KernelCore& kernel) : WaitObject{kernel} {}
-Kernel::Process::~Process() {}
+Process::Process(Core::System& system)
+ : WaitObject{system.Kernel()}, address_arbiter{system}, mutex{system}, system{system} {}
+
+Process::~Process() = default;
void Process::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "Object unavailable!");
}
-bool Process::ShouldWait(Thread* thread) const {
+bool Process::ShouldWait(const Thread* thread) const {
return !is_signaled;
}
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index dcc57ae9f..f060f2a3b 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -7,17 +7,23 @@
#include <array>
#include <bitset>
#include <cstddef>
-#include <memory>
+#include <list>
#include <string>
#include <vector>
#include <boost/container/static_vector.hpp>
#include "common/common_types.h"
+#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/mutex.h"
#include "core/hle/kernel/process_capability.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/hle/kernel/wait_object.h"
#include "core/hle/result.h"
+namespace Core {
+class System;
+}
+
namespace FileSys {
class ProgramMetadata;
}
@@ -28,13 +34,7 @@ class KernelCore;
class ResourceLimit;
class Thread;
-struct AddressMapping {
- // Address and size must be page-aligned
- VAddr address;
- u64 size;
- bool read_only;
- bool unk_flag;
-};
+struct CodeSet;
enum class MemoryRegion : u16 {
APPLICATION = 1,
@@ -60,46 +60,6 @@ enum class ProcessStatus {
DebugBreak,
};
-struct CodeSet final {
- struct Segment {
- std::size_t offset = 0;
- VAddr addr = 0;
- u32 size = 0;
- };
-
- explicit CodeSet();
- ~CodeSet();
-
- Segment& CodeSegment() {
- return segments[0];
- }
-
- const Segment& CodeSegment() const {
- return segments[0];
- }
-
- Segment& RODataSegment() {
- return segments[1];
- }
-
- const Segment& RODataSegment() const {
- return segments[1];
- }
-
- Segment& DataSegment() {
- return segments[2];
- }
-
- const Segment& DataSegment() const {
- return segments[2];
- }
-
- std::shared_ptr<std::vector<u8>> memory;
-
- std::array<Segment, 3> segments;
- VAddr entrypoint = 0;
-};
-
class Process final : public WaitObject {
public:
enum : u64 {
@@ -116,7 +76,7 @@ public:
static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
- static SharedPtr<Process> Create(KernelCore& kernel, std::string&& name);
+ static SharedPtr<Process> Create(Core::System& system, std::string&& name);
std::string GetTypeName() const override {
return "Process";
@@ -150,6 +110,26 @@ public:
return handle_table;
}
+ /// Gets a reference to the process' address arbiter.
+ AddressArbiter& GetAddressArbiter() {
+ return address_arbiter;
+ }
+
+ /// Gets a const reference to the process' address arbiter.
+ const AddressArbiter& GetAddressArbiter() const {
+ return address_arbiter;
+ }
+
+ /// Gets a reference to the process' mutex lock.
+ Mutex& GetMutex() {
+ return mutex;
+ }
+
+ /// Gets a const reference to the process' mutex lock
+ const Mutex& GetMutex() const {
+ return mutex;
+ }
+
/// Gets the current status of the process
ProcessStatus GetStatus() const {
return status;
@@ -207,6 +187,22 @@ public:
return random_entropy.at(index);
}
+ /// Retrieves the total physical memory used by this process in bytes.
+ u64 GetTotalPhysicalMemoryUsed() const;
+
+ /// Gets the list of all threads created with this process as their owner.
+ const std::list<const Thread*>& GetThreadList() const {
+ return thread_list;
+ }
+
+ /// Registers a thread as being created under this process,
+ /// adding it to this process' thread list.
+ void RegisterThread(const Thread* thread);
+
+ /// Unregisters a thread from this process, removing it
+ /// from this process' thread list.
+ void UnregisterThread(const Thread* thread);
+
/// Clears the signaled state of the process if and only if it's signaled.
///
/// @pre The process must not be already terminated. If this is called on a
@@ -231,7 +227,7 @@ public:
/**
* Applies address space changes and launches the process main thread.
*/
- void Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size);
+ void Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size);
/**
* Prepares a process for termination by stopping all of its threads
@@ -251,11 +247,11 @@ public:
void FreeTLSSlot(VAddr tls_address);
private:
- explicit Process(KernelCore& kernel);
+ explicit Process(Core::System& system);
~Process() override;
/// Checks if the specified thread should wait until this process is available.
- bool ShouldWait(Thread* thread) const override;
+ bool ShouldWait(const Thread* thread) const override;
/// Acquires/locks this process for the specified thread if it's available.
void Acquire(Thread* thread) override;
@@ -268,6 +264,12 @@ private:
/// Memory manager for this process.
Kernel::VMManager vm_manager;
+ /// Size of the main thread's stack in bytes.
+ u64 main_thread_stack_size = 0;
+
+ /// Size of the loaded code memory in bytes.
+ u64 code_memory_size = 0;
+
/// Current status of the process
ProcessStatus status;
@@ -309,9 +311,24 @@ private:
/// Per-process handle table for storing created object handles in.
HandleTable handle_table;
+ /// Per-process address arbiter.
+ AddressArbiter address_arbiter;
+
+ /// The per-process mutex lock instance used for handling various
+ /// forms of services, such as lock arbitration, and condition
+ /// variable related facilities.
+ Mutex mutex;
+
/// Random values for svcGetInfo RandomEntropy
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy;
+ /// List of threads that are running with this process as their owner.
+ std::list<const Thread*> thread_list;
+
+ /// System context
+ Core::System& system;
+
+ /// Name of this process
std::string name;
};
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
index 3a2164b25..583e35b79 100644
--- a/src/core/hle/kernel/process_capability.cpp
+++ b/src/core/hle/kernel/process_capability.cpp
@@ -96,7 +96,7 @@ void ProcessCapabilities::InitializeForMetadatalessProcess() {
interrupt_capabilities.set();
// Allow using the maximum possible amount of handles
- handle_table_size = static_cast<u32>(HandleTable::MAX_COUNT);
+ handle_table_size = static_cast<s32>(HandleTable::MAX_COUNT);
// Allow all debugging capabilities.
is_debuggable = true;
@@ -337,7 +337,7 @@ ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
return ERR_RESERVED_VALUE;
}
- handle_table_size = (flags >> 16) & 0x3FF;
+ handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF);
return RESULT_SUCCESS;
}
diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h
index fbc8812a3..5cdd80747 100644
--- a/src/core/hle/kernel/process_capability.h
+++ b/src/core/hle/kernel/process_capability.h
@@ -156,7 +156,7 @@ public:
}
/// Gets the number of total allowable handles for the process' handle table.
- u32 GetHandleTableSize() const {
+ s32 GetHandleTableSize() const {
return handle_table_size;
}
@@ -252,7 +252,7 @@ private:
u64 core_mask = 0;
u64 priority_mask = 0;
- u32 handle_table_size = 0;
+ s32 handle_table_size = 0;
u32 kernel_version = 0;
ProgramType program_type = ProgramType::SysModule;
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index 0e5083f70..c2b798a4e 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -14,7 +14,7 @@ namespace Kernel {
ReadableEvent::ReadableEvent(KernelCore& kernel) : WaitObject{kernel} {}
ReadableEvent::~ReadableEvent() = default;
-bool ReadableEvent::ShouldWait(Thread* thread) const {
+bool ReadableEvent::ShouldWait(const Thread* thread) const {
return !signaled;
}
diff --git a/src/core/hle/kernel/readable_event.h b/src/core/hle/kernel/readable_event.h
index 77a9c362c..2eb9dcbb7 100644
--- a/src/core/hle/kernel/readable_event.h
+++ b/src/core/hle/kernel/readable_event.h
@@ -36,7 +36,7 @@ public:
return HANDLE_TYPE;
}
- bool ShouldWait(Thread* thread) const override;
+ bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
/// Unconditionally clears the readable event's state.
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp
index 2f9695005..173f69915 100644
--- a/src/core/hle/kernel/resource_limit.cpp
+++ b/src/core/hle/kernel/resource_limit.cpp
@@ -16,11 +16,8 @@ constexpr std::size_t ResourceTypeToIndex(ResourceType type) {
ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {}
ResourceLimit::~ResourceLimit() = default;
-SharedPtr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel, std::string name) {
- SharedPtr<ResourceLimit> resource_limit(new ResourceLimit(kernel));
-
- resource_limit->name = std::move(name);
- return resource_limit;
+SharedPtr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) {
+ return new ResourceLimit(kernel);
}
s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const {
diff --git a/src/core/hle/kernel/resource_limit.h b/src/core/hle/kernel/resource_limit.h
index 59dc11c22..70e09858a 100644
--- a/src/core/hle/kernel/resource_limit.h
+++ b/src/core/hle/kernel/resource_limit.h
@@ -31,16 +31,14 @@ constexpr bool IsValidResourceType(ResourceType type) {
class ResourceLimit final : public Object {
public:
- /**
- * Creates a resource limit object.
- */
- static SharedPtr<ResourceLimit> Create(KernelCore& kernel, std::string name = "Unknown");
+ /// Creates a resource limit object.
+ static SharedPtr<ResourceLimit> Create(KernelCore& kernel);
std::string GetTypeName() const override {
return "ResourceLimit";
}
std::string GetName() const override {
- return name;
+ return GetTypeName();
}
static const HandleType HANDLE_TYPE = HandleType::ResourceLimit;
@@ -95,9 +93,6 @@ private:
ResourceArray limits{};
/// Current resource limit values.
ResourceArray values{};
-
- /// Name of resource limit object.
- std::string name;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index df4d6cf0a..ac501bf7f 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -19,7 +19,8 @@ namespace Kernel {
std::mutex Scheduler::scheduler_mutex;
-Scheduler::Scheduler(Core::ARM_Interface& cpu_core) : cpu_core(cpu_core) {}
+Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core)
+ : cpu_core{cpu_core}, system{system} {}
Scheduler::~Scheduler() {
for (auto& thread : thread_list) {
@@ -28,8 +29,8 @@ Scheduler::~Scheduler() {
}
bool Scheduler::HaveReadyThreads() const {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
- return ready_queue.get_first() != nullptr;
+ std::lock_guard lock{scheduler_mutex};
+ return !ready_queue.empty();
}
Thread* Scheduler::GetCurrentThread() const {
@@ -45,23 +46,28 @@ Thread* Scheduler::PopNextReadyThread() {
Thread* thread = GetCurrentThread();
if (thread && thread->GetStatus() == ThreadStatus::Running) {
+ if (ready_queue.empty()) {
+ return thread;
+ }
// We have to do better than the current thread.
// This call returns null when that's not possible.
- next = ready_queue.pop_first_better(thread->GetPriority());
- if (!next) {
- // Otherwise just keep going with the current thread
+ next = ready_queue.front();
+ if (next == nullptr || next->GetPriority() >= thread->GetPriority()) {
next = thread;
}
} else {
- next = ready_queue.pop_first();
+ if (ready_queue.empty()) {
+ return nullptr;
+ }
+ next = ready_queue.front();
}
return next;
}
void Scheduler::SwitchContext(Thread* new_thread) {
- Thread* const previous_thread = GetCurrentThread();
- Process* const previous_process = Core::CurrentProcess();
+ Thread* previous_thread = GetCurrentThread();
+ Process* const previous_process = system.Kernel().CurrentProcess();
UpdateLastContextSwitchTime(previous_thread, previous_process);
@@ -74,7 +80,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
if (previous_thread->GetStatus() == ThreadStatus::Running) {
// This is only the case when a reschedule is triggered without the current thread
// yielding execution (i.e. an event triggered, system core time-sliced, etc)
- ready_queue.push_front(previous_thread->GetPriority(), previous_thread);
+ ready_queue.add(previous_thread, previous_thread->GetPriority(), false);
previous_thread->SetStatus(ThreadStatus::Ready);
}
}
@@ -89,13 +95,13 @@ void Scheduler::SwitchContext(Thread* new_thread) {
current_thread = new_thread;
- ready_queue.remove(new_thread->GetPriority(), new_thread);
+ ready_queue.remove(new_thread, new_thread->GetPriority());
new_thread->SetStatus(ThreadStatus::Running);
auto* const thread_owner_process = current_thread->GetOwnerProcess();
if (previous_process != thread_owner_process) {
- Core::System::GetInstance().Kernel().MakeCurrentProcess(thread_owner_process);
- SetCurrentPageTable(&Core::CurrentProcess()->VMManager().page_table);
+ system.Kernel().MakeCurrentProcess(thread_owner_process);
+ Memory::SetCurrentPageTable(&thread_owner_process->VMManager().page_table);
}
cpu_core.LoadContext(new_thread->GetContext());
@@ -111,7 +117,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
const u64 prev_switch_ticks = last_context_switch_time;
- const u64 most_recent_switch_ticks = CoreTiming::GetTicks();
+ const u64 most_recent_switch_ticks = system.CoreTiming().GetTicks();
const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
if (thread != nullptr) {
@@ -126,7 +132,7 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
}
void Scheduler::Reschedule() {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
Thread* cur = GetCurrentThread();
Thread* next = PopNextReadyThread();
@@ -142,51 +148,54 @@ void Scheduler::Reschedule() {
SwitchContext(next);
}
-void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+void Scheduler::AddThread(SharedPtr<Thread> thread) {
+ std::lock_guard lock{scheduler_mutex};
thread_list.push_back(std::move(thread));
- ready_queue.prepare(priority);
}
void Scheduler::RemoveThread(Thread* thread) {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
thread_list.end());
}
void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
- ready_queue.push_back(priority, thread);
+ ready_queue.add(thread, priority);
}
void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
- ready_queue.remove(priority, thread);
+ ready_queue.remove(thread, priority);
}
void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
+ if (thread->GetPriority() == priority) {
+ return;
+ }
// If thread was ready, adjust queues
if (thread->GetStatus() == ThreadStatus::Ready)
- ready_queue.move(thread, thread->GetPriority(), priority);
- else
- ready_queue.prepare(priority);
+ ready_queue.adjust(thread, thread->GetPriority(), priority);
}
Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
- std::lock_guard<std::mutex> lock(scheduler_mutex);
+ std::lock_guard lock{scheduler_mutex};
const u32 mask = 1U << core;
- return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) {
- return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority;
- });
+ for (auto* thread : ready_queue) {
+ if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) {
+ return thread;
+ }
+ }
+ return nullptr;
}
void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
@@ -198,8 +207,7 @@ void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
ASSERT(thread->GetPriority() < THREADPRIO_COUNT);
// Yield this thread -- sleep for zero time and force reschedule to different thread
- WaitCurrentThread_Sleep();
- GetCurrentThread()->WakeAfterDelay(0);
+ GetCurrentThread()->Sleep(0);
}
void Scheduler::YieldWithLoadBalancing(Thread* thread) {
@@ -214,8 +222,7 @@ void Scheduler::YieldWithLoadBalancing(Thread* thread) {
ASSERT(priority < THREADPRIO_COUNT);
// Sleep for zero time to be able to force reschedule to different thread
- WaitCurrentThread_Sleep();
- GetCurrentThread()->WakeAfterDelay(0);
+ GetCurrentThread()->Sleep(0);
Thread* suggested_thread = nullptr;
@@ -223,8 +230,7 @@ void Scheduler::YieldWithLoadBalancing(Thread* thread) {
// Take the first non-nullptr one
for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) {
const auto res =
- Core::System::GetInstance().CpuCore(cur_core).Scheduler().GetNextSuggestedThread(
- core, priority);
+ system.CpuCore(cur_core).Scheduler().GetNextSuggestedThread(core, priority);
// If scheduler provides a suggested thread
if (res != nullptr) {
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 97ced4dfc..b29bf7be8 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -7,13 +7,14 @@
#include <mutex>
#include <vector>
#include "common/common_types.h"
-#include "common/thread_queue_list.h"
+#include "common/multi_level_queue.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/thread.h"
namespace Core {
class ARM_Interface;
-}
+class System;
+} // namespace Core
namespace Kernel {
@@ -21,7 +22,7 @@ class Process;
class Scheduler final {
public:
- explicit Scheduler(Core::ARM_Interface& cpu_core);
+ explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core);
~Scheduler();
/// Returns whether there are any threads that are ready to run.
@@ -37,7 +38,7 @@ public:
u64 GetLastContextSwitchTicks() const;
/// Adds a new thread to the scheduler
- void AddThread(SharedPtr<Thread> thread, u32 priority);
+ void AddThread(SharedPtr<Thread> thread);
/// Removes a thread from the scheduler
void RemoveThread(Thread* thread);
@@ -155,13 +156,14 @@ private:
std::vector<SharedPtr<Thread>> thread_list;
/// Lists only ready thread ids.
- Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
+ Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
SharedPtr<Thread> current_thread = nullptr;
Core::ARM_Interface& cpu_core;
u64 last_context_switch_time = 0;
+ Core::System& system;
static std::mutex scheduler_mutex;
};
diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp
index d6ceeb2da..708fdf9e1 100644
--- a/src/core/hle/kernel/server_port.cpp
+++ b/src/core/hle/kernel/server_port.cpp
@@ -26,7 +26,11 @@ ResultVal<SharedPtr<ServerSession>> ServerPort::Accept() {
return MakeResult(std::move(session));
}
-bool ServerPort::ShouldWait(Thread* thread) const {
+void ServerPort::AppendPendingSession(SharedPtr<ServerSession> pending_session) {
+ pending_sessions.push_back(std::move(pending_session));
+}
+
+bool ServerPort::ShouldWait(const Thread* thread) const {
// If there are no pending sessions, we wait until a new one is added.
return pending_sessions.empty();
}
diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h
index e52f8245f..76293cb8b 100644
--- a/src/core/hle/kernel/server_port.h
+++ b/src/core/hle/kernel/server_port.h
@@ -22,6 +22,8 @@ class SessionRequestHandler;
class ServerPort final : public WaitObject {
public:
+ using HLEHandler = std::shared_ptr<SessionRequestHandler>;
+
/**
* Creates a pair of ServerPort and an associated ClientPort.
*
@@ -51,29 +53,44 @@ public:
*/
ResultVal<SharedPtr<ServerSession>> Accept();
+ /// Whether or not this server port has an HLE handler available.
+ bool HasHLEHandler() const {
+ return hle_handler != nullptr;
+ }
+
+ /// Gets the HLE handler for this port.
+ HLEHandler GetHLEHandler() const {
+ return hle_handler;
+ }
+
/**
* Sets the HLE handler template for the port. ServerSessions crated by connecting to this port
* will inherit a reference to this handler.
*/
- void SetHleHandler(std::shared_ptr<SessionRequestHandler> hle_handler_) {
+ void SetHleHandler(HLEHandler hle_handler_) {
hle_handler = std::move(hle_handler_);
}
- std::string name; ///< Name of port (optional)
+ /// Appends a ServerSession to the collection of ServerSessions
+ /// waiting to be accepted by this port.
+ void AppendPendingSession(SharedPtr<ServerSession> pending_session);
+
+ bool ShouldWait(const Thread* thread) const override;
+ void Acquire(Thread* thread) override;
+
+private:
+ explicit ServerPort(KernelCore& kernel);
+ ~ServerPort() override;
/// ServerSessions waiting to be accepted by the port
std::vector<SharedPtr<ServerSession>> pending_sessions;
/// This session's HLE request handler template (optional)
/// ServerSessions created from this port inherit a reference to this handler.
- std::shared_ptr<SessionRequestHandler> hle_handler;
-
- bool ShouldWait(Thread* thread) const override;
- void Acquire(Thread* thread) override;
+ HLEHandler hle_handler;
-private:
- explicit ServerPort(KernelCore& kernel);
- ~ServerPort() override;
+ /// Name of the port (optional)
+ std::string name;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index 027434f92..40cec143e 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -46,7 +46,7 @@ ResultVal<SharedPtr<ServerSession>> ServerSession::Create(KernelCore& kernel, st
return MakeResult(std::move(server_session));
}
-bool ServerSession::ShouldWait(Thread* thread) const {
+bool ServerSession::ShouldWait(const Thread* thread) const {
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
if (parent->client == nullptr)
return false;
@@ -63,42 +63,71 @@ void ServerSession::Acquire(Thread* thread) {
pending_requesting_threads.pop_back();
}
+void ServerSession::ClientDisconnected() {
+ // We keep a shared pointer to the hle handler to keep it alive throughout
+ // the call to ClientDisconnected, as ClientDisconnected invalidates the
+ // hle_handler member itself during the course of the function executing.
+ std::shared_ptr<SessionRequestHandler> handler = hle_handler;
+ if (handler) {
+ // Note that after this returns, this server session's hle_handler is
+ // invalidated (set to null).
+ handler->ClientDisconnected(this);
+ }
+
+ // TODO(Subv): Force a wake up of all the ServerSession's waiting threads and set
+ // their WaitSynchronization result to 0xC920181A.
+
+ // Clean up the list of client threads with pending requests, they are unneeded now that the
+ // client endpoint is closed.
+ pending_requesting_threads.clear();
+ currently_handling = nullptr;
+}
+
+void ServerSession::AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler) {
+ domain_request_handlers.push_back(std::move(handler));
+}
+
+std::size_t ServerSession::NumDomainRequestHandlers() const {
+ return domain_request_handlers.size();
+}
+
ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
- auto* const domain_message_header = context.GetDomainMessageHeader();
- if (domain_message_header) {
- // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
- context.SetDomainRequestHandlers(domain_request_handlers);
-
- // If there is a DomainMessageHeader, then this is CommandType "Request"
- const u32 object_id{context.GetDomainMessageHeader()->object_id};
- switch (domain_message_header->command) {
- case IPC::DomainMessageHeader::CommandType::SendMessage:
- if (object_id > domain_request_handlers.size()) {
- LOG_CRITICAL(IPC,
- "object_id {} is too big! This probably means a recent service call "
- "to {} needed to return a new interface!",
- object_id, name);
- UNREACHABLE();
- return RESULT_SUCCESS; // Ignore error if asserts are off
- }
- return domain_request_handlers[object_id - 1]->HandleSyncRequest(context);
-
- case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
- LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
-
- domain_request_handlers[object_id - 1] = nullptr;
-
- IPC::ResponseBuilder rb{context, 2};
- rb.Push(RESULT_SUCCESS);
- return RESULT_SUCCESS;
- }
+ if (!context.HasDomainMessageHeader()) {
+ return RESULT_SUCCESS;
+ }
+
+ // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
+ context.SetDomainRequestHandlers(domain_request_handlers);
+
+ // If there is a DomainMessageHeader, then this is CommandType "Request"
+ const auto& domain_message_header = context.GetDomainMessageHeader();
+ const u32 object_id{domain_message_header.object_id};
+ switch (domain_message_header.command) {
+ case IPC::DomainMessageHeader::CommandType::SendMessage:
+ if (object_id > domain_request_handlers.size()) {
+ LOG_CRITICAL(IPC,
+ "object_id {} is too big! This probably means a recent service call "
+ "to {} needed to return a new interface!",
+ object_id, name);
+ UNREACHABLE();
+ return RESULT_SUCCESS; // Ignore error if asserts are off
}
+ return domain_request_handlers[object_id - 1]->HandleSyncRequest(context);
- LOG_CRITICAL(IPC, "Unknown domain command={}",
- static_cast<int>(domain_message_header->command.Value()));
- ASSERT(false);
+ case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
+ LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
+
+ domain_request_handlers[object_id - 1] = nullptr;
+
+ IPC::ResponseBuilder rb{context, 2};
+ rb.Push(RESULT_SUCCESS);
+ return RESULT_SUCCESS;
+ }
}
+ LOG_CRITICAL(IPC, "Unknown domain command={}",
+ static_cast<int>(domain_message_header.command.Value()));
+ ASSERT(false);
return RESULT_SUCCESS;
}
diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h
index e0e9d64c8..79b84bade 100644
--- a/src/core/hle/kernel/server_session.h
+++ b/src/core/hle/kernel/server_session.h
@@ -46,6 +46,14 @@ public:
return HANDLE_TYPE;
}
+ Session* GetParent() {
+ return parent.get();
+ }
+
+ const Session* GetParent() const {
+ return parent.get();
+ }
+
using SessionPair = std::tuple<SharedPtr<ServerSession>, SharedPtr<ClientSession>>;
/**
@@ -74,27 +82,20 @@ public:
*/
ResultCode HandleSyncRequest(SharedPtr<Thread> thread);
- bool ShouldWait(Thread* thread) const override;
+ bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
- std::string name; ///< The name of this session (optional)
- std::shared_ptr<Session> parent; ///< The parent session, which links to the client endpoint.
- std::shared_ptr<SessionRequestHandler>
- hle_handler; ///< This session's HLE request handler (applicable when not a domain)
+ /// Called when a client disconnection occurs.
+ void ClientDisconnected();
- /// This is the list of domain request handlers (after conversion to a domain)
- std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers;
-
- /// List of threads that are pending a response after a sync request. This list is processed in
- /// a LIFO manner, thus, the last request will be dispatched first.
- /// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test.
- std::vector<SharedPtr<Thread>> pending_requesting_threads;
+ /// Adds a new domain request handler to the collection of request handlers within
+ /// this ServerSession instance.
+ void AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler);
- /// Thread whose request is currently being handled. A request is considered "handled" when a
- /// response is sent via svcReplyAndReceive.
- /// TODO(Subv): Find a better name for this.
- SharedPtr<Thread> currently_handling;
+ /// Retrieves the total number of domain request handlers that have been
+ /// appended to this ServerSession instance.
+ std::size_t NumDomainRequestHandlers() const;
/// Returns true if the session has been converted to a domain, otherwise False
bool IsDomain() const {
@@ -129,8 +130,30 @@ private:
/// object handle.
ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context);
+ /// The parent session, which links to the client endpoint.
+ std::shared_ptr<Session> parent;
+
+ /// This session's HLE request handler (applicable when not a domain)
+ std::shared_ptr<SessionRequestHandler> hle_handler;
+
+ /// This is the list of domain request handlers (after conversion to a domain)
+ std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers;
+
+ /// List of threads that are pending a response after a sync request. This list is processed in
+ /// a LIFO manner, thus, the last request will be dispatched first.
+ /// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test.
+ std::vector<SharedPtr<Thread>> pending_requesting_threads;
+
+ /// Thread whose request is currently being handled. A request is considered "handled" when a
+ /// response is sent via svcReplyAndReceive.
+ /// TODO(Subv): Find a better name for this.
+ SharedPtr<Thread> currently_handling;
+
/// When set to True, converts the session to a domain at the end of the command
bool convert_to_domain{};
+
+ /// The name of this session (optional)
+ std::string name;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp
index 22d0c1dd5..f15c5ee36 100644
--- a/src/core/hle/kernel/shared_memory.cpp
+++ b/src/core/hle/kernel/shared_memory.cpp
@@ -6,11 +6,9 @@
#include "common/assert.h"
#include "common/logging/log.h"
-#include "core/core.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/shared_memory.h"
-#include "core/memory.h"
namespace Kernel {
@@ -34,8 +32,8 @@ SharedPtr<SharedMemory> SharedMemory::Create(KernelCore& kernel, Process* owner_
shared_memory->backing_block_offset = 0;
// Refresh the address mappings for the current process.
- if (Core::CurrentProcess() != nullptr) {
- Core::CurrentProcess()->VMManager().RefreshMemoryBlockMappings(
+ if (kernel.CurrentProcess() != nullptr) {
+ kernel.CurrentProcess()->VMManager().RefreshMemoryBlockMappings(
shared_memory->backing_block.get());
}
} else {
@@ -120,7 +118,15 @@ ResultCode SharedMemory::Map(Process& target_process, VAddr address, MemoryPermi
ConvertPermissions(permissions));
}
-ResultCode SharedMemory::Unmap(Process& target_process, VAddr address) {
+ResultCode SharedMemory::Unmap(Process& target_process, VAddr address, u64 unmap_size) {
+ if (unmap_size != size) {
+ LOG_ERROR(Kernel,
+ "Invalid size passed to Unmap. Size must be equal to the size of the "
+ "memory managed. Shared memory size=0x{:016X}, Unmap size=0x{:016X}",
+ size, unmap_size);
+ return ERR_INVALID_SIZE;
+ }
+
// TODO(Subv): Verify what happens if the application tries to unmap an address that is not
// mapped to a SharedMemory.
return target_process.VMManager().UnmapRange(address, size);
diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h
index dab2a6bea..37e18c443 100644
--- a/src/core/hle/kernel/shared_memory.h
+++ b/src/core/hle/kernel/shared_memory.h
@@ -104,11 +104,17 @@ public:
/**
* Unmaps a shared memory block from the specified address in system memory
+ *
* @param target_process Process from which to unmap the memory block.
- * @param address Address in system memory where the shared memory block is mapped
+ * @param address Address in system memory where the shared memory block is mapped.
+ * @param unmap_size The amount of bytes to unmap from this shared memory instance.
+ *
* @return Result code of the unmap operation
+ *
+ * @pre The given size to unmap must be the same size as the amount of memory managed by
+ * the SharedMemory instance itself, otherwise ERR_INVALID_SIZE will be returned.
*/
- ResultCode Unmap(Process& target_process, VAddr address);
+ ResultCode Unmap(Process& target_process, VAddr address, u64 unmap_size);
/**
* Gets a pointer to the shared memory block
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 7cfecb68c..ab10db3df 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -20,6 +20,7 @@
#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
+#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/mutex.h"
@@ -31,6 +32,7 @@
#include "core/hle/kernel/svc.h"
#include "core/hle/kernel/svc_wrap.h"
#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/transfer_memory.h"
#include "core/hle/kernel/writable_event.h"
#include "core/hle/lock.h"
#include "core/hle/result.h"
@@ -47,23 +49,6 @@ constexpr bool IsValidAddressRange(VAddr address, u64 size) {
return address + size > address;
}
-// Checks if a given address range lies within a larger address range.
-constexpr bool IsInsideAddressRange(VAddr address, u64 size, VAddr address_range_begin,
- VAddr address_range_end) {
- const VAddr end_address = address + size - 1;
- return address_range_begin <= address && end_address <= address_range_end - 1;
-}
-
-bool IsInsideAddressSpace(const VMManager& vm, VAddr address, u64 size) {
- return IsInsideAddressRange(address, size, vm.GetAddressSpaceBaseAddress(),
- vm.GetAddressSpaceEndAddress());
-}
-
-bool IsInsideNewMapRegion(const VMManager& vm, VAddr address, u64 size) {
- return IsInsideAddressRange(address, size, vm.GetNewMapRegionBaseAddress(),
- vm.GetNewMapRegionEndAddress());
-}
-
// 8 GiB
constexpr u64 MAIN_MEMORY_SIZE = 0x200000000;
@@ -105,14 +90,14 @@ ResultCode MapUnmapMemorySanityChecks(const VMManager& vm_manager, VAddr dst_add
return ERR_INVALID_ADDRESS_STATE;
}
- if (!IsInsideAddressSpace(vm_manager, src_addr, size)) {
+ if (!vm_manager.IsWithinAddressSpace(src_addr, size)) {
LOG_ERROR(Kernel_SVC,
"Source is not within the address space, addr=0x{:016X}, size=0x{:016X}",
src_addr, size);
return ERR_INVALID_ADDRESS_STATE;
}
- if (!IsInsideNewMapRegion(vm_manager, dst_addr, size)) {
+ if (!vm_manager.IsWithinNewMapRegion(dst_addr, size)) {
LOG_ERROR(Kernel_SVC,
"Destination is not within the new map region, addr=0x{:016X}, size=0x{:016X}",
dst_addr, size);
@@ -190,11 +175,8 @@ static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
return ERR_INVALID_SIZE;
}
- auto& vm_manager = Core::CurrentProcess()->VMManager();
- const VAddr heap_base = vm_manager.GetHeapRegionBaseAddress();
- const auto alloc_result =
- vm_manager.HeapAllocate(heap_base, heap_size, VMAPermission::ReadWrite);
-
+ auto& vm_manager = Core::System::GetInstance().Kernel().CurrentProcess()->VMManager();
+ const auto alloc_result = vm_manager.SetHeapSize(heap_size);
if (alloc_result.Failed()) {
return alloc_result.Code();
}
@@ -238,7 +220,7 @@ static ResultCode SetMemoryPermission(VAddr addr, u64 size, u32 prot) {
auto* const current_process = Core::CurrentProcess();
auto& vm_manager = current_process->VMManager();
- if (!IsInsideAddressSpace(vm_manager, addr, size)) {
+ if (!vm_manager.IsWithinAddressSpace(addr, size)) {
LOG_ERROR(Kernel_SVC,
"Source is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
size);
@@ -299,7 +281,7 @@ static ResultCode SetMemoryAttribute(VAddr address, u64 size, u32 mask, u32 attr
}
auto& vm_manager = Core::CurrentProcess()->VMManager();
- if (!IsInsideAddressSpace(vm_manager, address, size)) {
+ if (!vm_manager.IsWithinAddressSpace(address, size)) {
LOG_ERROR(Kernel_SVC,
"Given address (0x{:016X}) is outside the bounds of the address space.", address);
return ERR_INVALID_ADDRESS_STATE;
@@ -567,9 +549,9 @@ static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr,
return ERR_INVALID_ADDRESS;
}
- auto& handle_table = Core::CurrentProcess()->GetHandleTable();
- return Mutex::TryAcquire(handle_table, mutex_addr, holding_thread_handle,
- requesting_thread_handle);
+ auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess();
+ return current_process->GetMutex().TryAcquire(mutex_addr, holding_thread_handle,
+ requesting_thread_handle);
}
/// Unlock a mutex
@@ -587,7 +569,8 @@ static ResultCode ArbitrateUnlock(VAddr mutex_addr) {
return ERR_INVALID_ADDRESS;
}
- return Mutex::Release(mutex_addr);
+ auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess();
+ return current_process->GetMutex().Release(mutex_addr);
}
enum class BreakType : u32 {
@@ -726,7 +709,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
HeapRegionBaseAddr = 4,
HeapRegionSize = 5,
TotalMemoryUsage = 6,
- TotalHeapUsage = 7,
+ TotalPhysicalMemoryUsed = 7,
IsCurrentProcessBeingDebugged = 8,
RegisterResourceLimit = 9,
IdleTickCount = 10,
@@ -762,7 +745,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
case GetInfoType::NewMapRegionBaseAddr:
case GetInfoType::NewMapRegionSize:
case GetInfoType::TotalMemoryUsage:
- case GetInfoType::TotalHeapUsage:
+ case GetInfoType::TotalPhysicalMemoryUsed:
case GetInfoType::IsVirtualAddressMemoryEnabled:
case GetInfoType::PersonalMmHeapUsage:
case GetInfoType::TitleId:
@@ -822,8 +805,8 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
*result = process->VMManager().GetTotalMemoryUsage();
return RESULT_SUCCESS;
- case GetInfoType::TotalHeapUsage:
- *result = process->VMManager().GetTotalHeapUsage();
+ case GetInfoType::TotalPhysicalMemoryUsed:
+ *result = process->GetTotalPhysicalMemoryUsed();
return RESULT_SUCCESS;
case GetInfoType::IsVirtualAddressMemoryEnabled:
@@ -918,6 +901,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
}
const auto& system = Core::System::GetInstance();
+ const auto& core_timing = system.CoreTiming();
const auto& scheduler = system.CurrentScheduler();
const auto* const current_thread = scheduler.GetCurrentThread();
const bool same_thread = current_thread == thread;
@@ -927,9 +911,9 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
const u64 thread_ticks = current_thread->GetTotalCPUTimeTicks();
- out_ticks = thread_ticks + (CoreTiming::GetTicks() - prev_ctx_ticks);
+ out_ticks = thread_ticks + (core_timing.GetTicks() - prev_ctx_ticks);
} else if (same_thread && info_sub_id == system.CurrentCoreIndex()) {
- out_ticks = CoreTiming::GetTicks() - prev_ctx_ticks;
+ out_ticks = core_timing.GetTicks() - prev_ctx_ticks;
}
*result = out_ticks;
@@ -1156,7 +1140,7 @@ static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64
return ERR_INVALID_MEMORY_RANGE;
}
- return shared_memory->Unmap(*current_process, addr);
+ return shared_memory->Unmap(*current_process, addr, size);
}
static ResultCode QueryProcessMemory(VAddr memory_info_address, VAddr page_info_address,
@@ -1299,10 +1283,14 @@ static ResultCode StartThread(Handle thread_handle) {
/// Called when a thread exits
static void ExitThread() {
- LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", Core::CurrentArmInterface().GetPC());
+ auto& system = Core::System::GetInstance();
- ExitCurrentThread();
- Core::System::GetInstance().PrepareReschedule();
+ LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
+
+ auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
+ current_thread->Stop();
+ system.CurrentScheduler().RemoveThread(current_thread);
+ system.PrepareReschedule();
}
/// Sleep the current thread
@@ -1315,32 +1303,32 @@ static void SleepThread(s64 nanoseconds) {
YieldAndWaitForLoadBalancing = -2,
};
+ auto& system = Core::System::GetInstance();
+ auto& scheduler = system.CurrentScheduler();
+ auto* const current_thread = scheduler.GetCurrentThread();
+
if (nanoseconds <= 0) {
- auto& scheduler{Core::System::GetInstance().CurrentScheduler()};
switch (static_cast<SleepType>(nanoseconds)) {
case SleepType::YieldWithoutLoadBalancing:
- scheduler.YieldWithoutLoadBalancing(GetCurrentThread());
+ scheduler.YieldWithoutLoadBalancing(current_thread);
break;
case SleepType::YieldWithLoadBalancing:
- scheduler.YieldWithLoadBalancing(GetCurrentThread());
+ scheduler.YieldWithLoadBalancing(current_thread);
break;
case SleepType::YieldAndWaitForLoadBalancing:
- scheduler.YieldAndWaitForLoadBalancing(GetCurrentThread());
+ scheduler.YieldAndWaitForLoadBalancing(current_thread);
break;
default:
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
}
} else {
- // Sleep current thread and check for next thread to schedule
- WaitCurrentThread_Sleep();
-
- // Create an event to wake the thread up after the specified nanosecond delay has passed
- GetCurrentThread()->WakeAfterDelay(nanoseconds);
+ current_thread->Sleep(nanoseconds);
}
// Reschedule all CPU cores
- for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i)
- Core::System::GetInstance().CpuCore(i).PrepareReschedule();
+ for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i) {
+ system.CpuCore(i).PrepareReschedule();
+ }
}
/// Wait process wide key atomic
@@ -1351,17 +1339,21 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var
"called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}",
mutex_addr, condition_variable_addr, thread_handle, nano_seconds);
- const auto& handle_table = Core::CurrentProcess()->GetHandleTable();
+ auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess();
+ const auto& handle_table = current_process->GetHandleTable();
SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
ASSERT(thread);
- CASCADE_CODE(Mutex::Release(mutex_addr));
+ const auto release_result = current_process->GetMutex().Release(mutex_addr);
+ if (release_result.IsError()) {
+ return release_result;
+ }
SharedPtr<Thread> current_thread = GetCurrentThread();
current_thread->SetCondVarWaitAddress(condition_variable_addr);
current_thread->SetMutexWaitAddress(mutex_addr);
current_thread->SetWaitHandle(thread_handle);
- current_thread->SetStatus(ThreadStatus::WaitMutex);
+ current_thread->SetStatus(ThreadStatus::WaitCondVar);
current_thread->InvalidateWakeupCallback();
current_thread->WakeAfterDelay(nano_seconds);
@@ -1405,10 +1397,10 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
// them all.
std::size_t last = waiting_threads.size();
if (target != -1)
- last = target;
+ last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
// If there are no threads waiting on this condition variable, just exit
- if (last > waiting_threads.size())
+ if (last == 0)
return RESULT_SUCCESS;
for (std::size_t index = 0; index < last; ++index) {
@@ -1416,6 +1408,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
+ // liberate Cond Var Thread.
+ thread->SetCondVarWaitAddress(0);
+
std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
auto& monitor = Core::System::GetInstance().Monitor();
@@ -1434,10 +1429,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
}
} while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
thread->GetWaitHandle()));
-
if (mutex_val == 0) {
// We were able to acquire the mutex, resume this thread.
- ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
+ ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
thread->ResumeFromWait();
auto* const lock_owner = thread->GetLockOwner();
@@ -1447,8 +1441,8 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
thread->SetLockOwner(nullptr);
thread->SetMutexWaitAddress(0);
- thread->SetCondVarWaitAddress(0);
thread->SetWaitHandle(0);
+ Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
} else {
// Atomically signal that the mutex now has a waiting thread.
do {
@@ -1467,12 +1461,11 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
const auto& handle_table = Core::CurrentProcess()->GetHandleTable();
auto owner = handle_table.Get<Thread>(owner_handle);
ASSERT(owner);
- ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
+ ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
thread->InvalidateWakeupCallback();
+ thread->SetStatus(ThreadStatus::WaitMutex);
owner->AddMutexWaiter(thread);
-
- Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
}
}
@@ -1494,20 +1487,10 @@ static ResultCode WaitForAddress(VAddr address, u32 type, s32 value, s64 timeout
return ERR_INVALID_ADDRESS;
}
- switch (static_cast<AddressArbiter::ArbitrationType>(type)) {
- case AddressArbiter::ArbitrationType::WaitIfLessThan:
- return AddressArbiter::WaitForAddressIfLessThan(address, value, timeout, false);
- case AddressArbiter::ArbitrationType::DecrementAndWaitIfLessThan:
- return AddressArbiter::WaitForAddressIfLessThan(address, value, timeout, true);
- case AddressArbiter::ArbitrationType::WaitIfEqual:
- return AddressArbiter::WaitForAddressIfEqual(address, value, timeout);
- default:
- LOG_ERROR(Kernel_SVC,
- "Invalid arbitration type, expected WaitIfLessThan, DecrementAndWaitIfLessThan "
- "or WaitIfEqual but got {}",
- type);
- return ERR_INVALID_ENUM_VALUE;
- }
+ const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type);
+ auto& address_arbiter =
+ Core::System::GetInstance().Kernel().CurrentProcess()->GetAddressArbiter();
+ return address_arbiter.WaitForAddress(address, arbitration_type, value, timeout);
}
// Signals to an address (via Address Arbiter)
@@ -1525,31 +1508,21 @@ static ResultCode SignalToAddress(VAddr address, u32 type, s32 value, s32 num_to
return ERR_INVALID_ADDRESS;
}
- switch (static_cast<AddressArbiter::SignalType>(type)) {
- case AddressArbiter::SignalType::Signal:
- return AddressArbiter::SignalToAddress(address, num_to_wake);
- case AddressArbiter::SignalType::IncrementAndSignalIfEqual:
- return AddressArbiter::IncrementAndSignalToAddressIfEqual(address, value, num_to_wake);
- case AddressArbiter::SignalType::ModifyByWaitingCountAndSignalIfEqual:
- return AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(address, value,
- num_to_wake);
- default:
- LOG_ERROR(Kernel_SVC,
- "Invalid signal type, expected Signal, IncrementAndSignalIfEqual "
- "or ModifyByWaitingCountAndSignalIfEqual but got {}",
- type);
- return ERR_INVALID_ENUM_VALUE;
- }
+ const auto signal_type = static_cast<AddressArbiter::SignalType>(type);
+ auto& address_arbiter =
+ Core::System::GetInstance().Kernel().CurrentProcess()->GetAddressArbiter();
+ return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake);
}
/// This returns the total CPU ticks elapsed since the CPU was powered-on
static u64 GetSystemTick() {
LOG_TRACE(Kernel_SVC, "called");
- const u64 result{CoreTiming::GetTicks()};
+ auto& core_timing = Core::System::GetInstance().CoreTiming();
+ const u64 result{core_timing.GetTicks()};
// Advance time to defeat dumb games that busy-wait for the frame to end.
- CoreTiming::AddTicks(400);
+ core_timing.AddTicks(400);
return result;
}
@@ -1612,14 +1585,121 @@ static ResultCode CreateTransferMemory(Handle* handle, VAddr addr, u64 size, u32
}
auto& kernel = Core::System::GetInstance().Kernel();
- auto process = kernel.CurrentProcess();
- auto& handle_table = process->GetHandleTable();
- const auto shared_mem_handle = SharedMemory::Create(kernel, process, size, perms, perms, addr);
+ auto transfer_mem_handle = TransferMemory::Create(kernel, addr, size, perms);
- CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle));
+ auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
+ const auto result = handle_table.Create(std::move(transfer_mem_handle));
+ if (result.Failed()) {
+ return result.Code();
+ }
+
+ *handle = *result;
return RESULT_SUCCESS;
}
+static ResultCode MapTransferMemory(Handle handle, VAddr address, u64 size, u32 permission_raw) {
+ LOG_DEBUG(Kernel_SVC,
+ "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}, permissions=0x{:08X}",
+ handle, address, size, permission_raw);
+
+ if (!Common::Is4KBAligned(address)) {
+ LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).",
+ address);
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).",
+ size);
+ return ERR_INVALID_SIZE;
+ }
+
+ if (!IsValidAddressRange(address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Given address and size overflows the 64-bit range (address=0x{:016X}, "
+ "size=0x{:016X}).",
+ address, size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ const auto permissions = static_cast<MemoryPermission>(permission_raw);
+ if (permissions != MemoryPermission::None && permissions != MemoryPermission::Read &&
+ permissions != MemoryPermission::ReadWrite) {
+ LOG_ERROR(Kernel_SVC, "Invalid transfer memory permissions given (permissions=0x{:08X}).",
+ permission_raw);
+ return ERR_INVALID_STATE;
+ }
+
+ const auto& kernel = Core::System::GetInstance().Kernel();
+ const auto* const current_process = kernel.CurrentProcess();
+ const auto& handle_table = current_process->GetHandleTable();
+
+ auto transfer_memory = handle_table.Get<TransferMemory>(handle);
+ if (!transfer_memory) {
+ LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).",
+ handle);
+ return ERR_INVALID_HANDLE;
+ }
+
+ if (!current_process->VMManager().IsWithinASLRRegion(address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Given address and size don't fully fit within the ASLR region "
+ "(address=0x{:016X}, size=0x{:016X}).",
+ address, size);
+ return ERR_INVALID_MEMORY_RANGE;
+ }
+
+ return transfer_memory->MapMemory(address, size, permissions);
+}
+
+static ResultCode UnmapTransferMemory(Handle handle, VAddr address, u64 size) {
+ LOG_DEBUG(Kernel_SVC, "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}", handle,
+ address, size);
+
+ if (!Common::Is4KBAligned(address)) {
+ LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).",
+ address);
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).",
+ size);
+ return ERR_INVALID_SIZE;
+ }
+
+ if (!IsValidAddressRange(address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Given address and size overflows the 64-bit range (address=0x{:016X}, "
+ "size=0x{:016X}).",
+ address, size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ const auto& kernel = Core::System::GetInstance().Kernel();
+ const auto* const current_process = kernel.CurrentProcess();
+ const auto& handle_table = current_process->GetHandleTable();
+
+ auto transfer_memory = handle_table.Get<TransferMemory>(handle);
+ if (!transfer_memory) {
+ LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).",
+ handle);
+ return ERR_INVALID_HANDLE;
+ }
+
+ if (!current_process->VMManager().IsWithinASLRRegion(address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Given address and size don't fully fit within the ASLR region "
+ "(address=0x{:016X}, size=0x{:016X}).",
+ address, size);
+ return ERR_INVALID_MEMORY_RANGE;
+ }
+
+ return transfer_memory->UnmapMemory(address, size);
+}
+
static ResultCode GetThreadCoreMask(Handle thread_handle, u32* core, u64* mask) {
LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
@@ -1903,6 +1983,83 @@ static ResultCode SetResourceLimitLimitValue(Handle resource_limit, u32 resource
return RESULT_SUCCESS;
}
+static ResultCode GetProcessList(u32* out_num_processes, VAddr out_process_ids,
+ u32 out_process_ids_size) {
+ LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}",
+ out_process_ids, out_process_ids_size);
+
+ // If the supplied size is negative or greater than INT32_MAX / sizeof(u64), bail.
+ if ((out_process_ids_size & 0xF0000000) != 0) {
+ LOG_ERROR(Kernel_SVC,
+ "Supplied size outside [0, 0x0FFFFFFF] range. out_process_ids_size={}",
+ out_process_ids_size);
+ return ERR_OUT_OF_RANGE;
+ }
+
+ const auto& kernel = Core::System::GetInstance().Kernel();
+ const auto& vm_manager = kernel.CurrentProcess()->VMManager();
+ const auto total_copy_size = out_process_ids_size * sizeof(u64);
+
+ if (out_process_ids_size > 0 &&
+ !vm_manager.IsWithinAddressSpace(out_process_ids, total_copy_size)) {
+ LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
+ out_process_ids, out_process_ids + total_copy_size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ const auto& process_list = kernel.GetProcessList();
+ const auto num_processes = process_list.size();
+ const auto copy_amount = std::min(std::size_t{out_process_ids_size}, num_processes);
+
+ for (std::size_t i = 0; i < copy_amount; ++i) {
+ Memory::Write64(out_process_ids, process_list[i]->GetProcessID());
+ out_process_ids += sizeof(u64);
+ }
+
+ *out_num_processes = static_cast<u32>(num_processes);
+ return RESULT_SUCCESS;
+}
+
+ResultCode GetThreadList(u32* out_num_threads, VAddr out_thread_ids, u32 out_thread_ids_size,
+ Handle debug_handle) {
+ // TODO: Handle this case when debug events are supported.
+ UNIMPLEMENTED_IF(debug_handle != InvalidHandle);
+
+ LOG_DEBUG(Kernel_SVC, "called. out_thread_ids=0x{:016X}, out_thread_ids_size={}",
+ out_thread_ids, out_thread_ids_size);
+
+ // If the size is negative or larger than INT32_MAX / sizeof(u64)
+ if ((out_thread_ids_size & 0xF0000000) != 0) {
+ LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. size={}",
+ out_thread_ids_size);
+ return ERR_OUT_OF_RANGE;
+ }
+
+ const auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess();
+ const auto& vm_manager = current_process->VMManager();
+ const auto total_copy_size = out_thread_ids_size * sizeof(u64);
+
+ if (out_thread_ids_size > 0 &&
+ !vm_manager.IsWithinAddressSpace(out_thread_ids, total_copy_size)) {
+ LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
+ out_thread_ids, out_thread_ids + total_copy_size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ const auto& thread_list = current_process->GetThreadList();
+ const auto num_threads = thread_list.size();
+ const auto copy_amount = std::min(std::size_t{out_thread_ids_size}, num_threads);
+
+ auto list_iter = thread_list.cbegin();
+ for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
+ Memory::Write64(out_thread_ids, (*list_iter)->GetThreadID());
+ out_thread_ids += sizeof(u64);
+ }
+
+ *out_num_threads = static_cast<u32>(num_threads);
+ return RESULT_SUCCESS;
+}
+
namespace {
struct FunctionDef {
using Func = void();
@@ -1995,8 +2152,8 @@ static const FunctionDef SVC_Table[] = {
{0x4E, nullptr, "ReadWriteRegister"},
{0x4F, nullptr, "SetProcessActivity"},
{0x50, SvcWrap<CreateSharedMemory>, "CreateSharedMemory"},
- {0x51, nullptr, "MapTransferMemory"},
- {0x52, nullptr, "UnmapTransferMemory"},
+ {0x51, SvcWrap<MapTransferMemory>, "MapTransferMemory"},
+ {0x52, SvcWrap<UnmapTransferMemory>, "UnmapTransferMemory"},
{0x53, nullptr, "CreateInterruptEvent"},
{0x54, nullptr, "QueryPhysicalAddress"},
{0x55, nullptr, "QueryIoMapping"},
@@ -2015,8 +2172,8 @@ static const FunctionDef SVC_Table[] = {
{0x62, nullptr, "TerminateDebugProcess"},
{0x63, nullptr, "GetDebugEvent"},
{0x64, nullptr, "ContinueDebugEvent"},
- {0x65, nullptr, "GetProcessList"},
- {0x66, nullptr, "GetThreadList"},
+ {0x65, SvcWrap<GetProcessList>, "GetProcessList"},
+ {0x66, SvcWrap<GetThreadList>, "GetThreadList"},
{0x67, nullptr, "GetDebugThreadContext"},
{0x68, nullptr, "SetDebugThreadContext"},
{0x69, nullptr, "QueryDebugProcessMemory"},
@@ -2058,7 +2215,7 @@ void CallSVC(u32 immediate) {
MICROPROFILE_SCOPE(Kernel_SVC);
// Lock the global kernel mutex when we enter the kernel HLE.
- std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
+ std::lock_guard lock{HLE::g_hle_lock};
const FunctionDef* info = GetSVCInfo(immediate);
if (info) {
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 2a2c2c5ea..b3733680f 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -78,6 +78,14 @@ void SvcWrap() {
FuncReturn(retval);
}
+template <ResultCode func(u32*, u64, u32)>
+void SvcWrap() {
+ u32 param_1 = 0;
+ const u32 retval = func(&param_1, Param(1), static_cast<u32>(Param(2))).raw;
+ Core::CurrentArmInterface().SetReg(1, param_1);
+ FuncReturn(retval);
+}
+
template <ResultCode func(u64*, u32)>
void SvcWrap() {
u64 param_1 = 0;
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index d3984dfc4..1b891f632 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -7,8 +7,6 @@
#include <optional>
#include <vector>
-#include <boost/range/algorithm_ext/erase.hpp>
-
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
@@ -30,7 +28,7 @@
namespace Kernel {
-bool Thread::ShouldWait(Thread* thread) const {
+bool Thread::ShouldWait(const Thread* thread) const {
return status != ThreadStatus::Dead;
}
@@ -43,7 +41,8 @@ Thread::~Thread() = default;
void Thread::Stop() {
// Cancel any outstanding wakeup events for this thread
- CoreTiming::UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(), callback_handle);
+ Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(),
+ callback_handle);
kernel.ThreadWakeupCallbackHandleTable().Close(callback_handle);
callback_handle = 0;
@@ -63,21 +62,12 @@ void Thread::Stop() {
}
wait_objects.clear();
+ owner_process->UnregisterThread(this);
+
// Mark the TLS slot in the thread's page as free.
owner_process->FreeTLSSlot(tls_address);
}
-void WaitCurrentThread_Sleep() {
- Thread* thread = GetCurrentThread();
- thread->SetStatus(ThreadStatus::WaitSleep);
-}
-
-void ExitCurrentThread() {
- Thread* thread = GetCurrentThread();
- thread->Stop();
- Core::System::GetInstance().CurrentScheduler().RemoveThread(thread);
-}
-
void Thread::WakeAfterDelay(s64 nanoseconds) {
// Don't schedule a wakeup if the thread wants to wait forever
if (nanoseconds == -1)
@@ -85,12 +75,14 @@ void Thread::WakeAfterDelay(s64 nanoseconds) {
// This function might be called from any thread so we have to be cautious and use the
// thread-safe version of ScheduleEvent.
- CoreTiming::ScheduleEventThreadsafe(CoreTiming::nsToCycles(nanoseconds),
- kernel.ThreadWakeupCallbackEventType(), callback_handle);
+ Core::System::GetInstance().CoreTiming().ScheduleEventThreadsafe(
+ Core::Timing::nsToCycles(nanoseconds), kernel.ThreadWakeupCallbackEventType(),
+ callback_handle);
}
void Thread::CancelWakeupTimer() {
- CoreTiming::UnscheduleEventThreadsafe(kernel.ThreadWakeupCallbackEventType(), callback_handle);
+ Core::System::GetInstance().CoreTiming().UnscheduleEventThreadsafe(
+ kernel.ThreadWakeupCallbackEventType(), callback_handle);
}
static std::optional<s32> GetNextProcessorId(u64 mask) {
@@ -115,6 +107,7 @@ void Thread::ResumeFromWait() {
case ThreadStatus::WaitSleep:
case ThreadStatus::WaitIPC:
case ThreadStatus::WaitMutex:
+ case ThreadStatus::WaitCondVar:
case ThreadStatus::WaitArb:
break;
@@ -181,14 +174,13 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
return ERR_INVALID_PROCESSOR_ID;
}
- // TODO(yuriks): Other checks, returning 0xD9001BEA
-
if (!Memory::IsValidVirtualAddress(owner_process, entry_point)) {
LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point);
// TODO (bunnei): Find the correct error code to use here
return ResultCode(-1);
}
+ auto& system = Core::System::GetInstance();
SharedPtr<Thread> thread(new Thread(kernel));
thread->thread_id = kernel.CreateNewThreadID();
@@ -197,7 +189,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
thread->stack_top = stack_top;
thread->tpidr_el0 = 0;
thread->nominal_priority = thread->current_priority = priority;
- thread->last_running_ticks = CoreTiming::GetTicks();
+ thread->last_running_ticks = system.CoreTiming().GetTicks();
thread->processor_id = processor_id;
thread->ideal_core = processor_id;
thread->affinity_mask = 1ULL << processor_id;
@@ -208,10 +200,12 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
thread->name = std::move(name);
thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap();
thread->owner_process = &owner_process;
- thread->scheduler = &Core::System::GetInstance().Scheduler(processor_id);
- thread->scheduler->AddThread(thread, priority);
+ thread->scheduler = &system.Scheduler(processor_id);
+ thread->scheduler->AddThread(thread);
thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread);
+ thread->owner_process->RegisterThread(thread.get());
+
// TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
// to initialize the context
ResetThreadContext(thread->context, stack_top, entry_point, arg);
@@ -239,16 +233,16 @@ void Thread::SetWaitSynchronizationOutput(s32 output) {
context.cpu_registers[1] = output;
}
-s32 Thread::GetWaitObjectIndex(WaitObject* object) const {
+s32 Thread::GetWaitObjectIndex(const WaitObject* object) const {
ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything");
- auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object);
+ const auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object);
return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1);
}
VAddr Thread::GetCommandBufferAddress() const {
// Offset from the start of TLS at which the IPC command buffer begins.
- static constexpr int CommandHeaderOffset = 0x80;
- return GetTLSAddress() + CommandHeaderOffset;
+ constexpr u64 command_header_offset = 0x80;
+ return GetTLSAddress() + command_header_offset;
}
void Thread::SetStatus(ThreadStatus new_status) {
@@ -257,7 +251,7 @@ void Thread::SetStatus(ThreadStatus new_status) {
}
if (status == ThreadStatus::Running) {
- last_running_ticks = CoreTiming::GetTicks();
+ last_running_ticks = Core::System::GetInstance().CoreTiming().GetTicks();
}
status = new_status;
@@ -267,8 +261,8 @@ void Thread::AddMutexWaiter(SharedPtr<Thread> thread) {
if (thread->lock_owner == this) {
// If the thread is already waiting for this thread to release the mutex, ensure that the
// waiters list is consistent and return without doing anything.
- auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
- ASSERT(itr != wait_mutex_threads.end());
+ const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
+ ASSERT(iter != wait_mutex_threads.end());
return;
}
@@ -276,11 +270,16 @@ void Thread::AddMutexWaiter(SharedPtr<Thread> thread) {
ASSERT(thread->lock_owner == nullptr);
// Ensure that the thread is not already in the list of mutex waiters
- auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
- ASSERT(itr == wait_mutex_threads.end());
-
+ const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
+ ASSERT(iter == wait_mutex_threads.end());
+
+ // Keep the list in an ordered fashion
+ const auto insertion_point = std::find_if(
+ wait_mutex_threads.begin(), wait_mutex_threads.end(),
+ [&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); });
+ wait_mutex_threads.insert(insertion_point, thread);
thread->lock_owner = this;
- wait_mutex_threads.emplace_back(std::move(thread));
+
UpdatePriority();
}
@@ -288,32 +287,44 @@ void Thread::RemoveMutexWaiter(SharedPtr<Thread> thread) {
ASSERT(thread->lock_owner == this);
// Ensure that the thread is in the list of mutex waiters
- auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
- ASSERT(itr != wait_mutex_threads.end());
+ const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
+ ASSERT(iter != wait_mutex_threads.end());
+
+ wait_mutex_threads.erase(iter);
- boost::remove_erase(wait_mutex_threads, thread);
thread->lock_owner = nullptr;
UpdatePriority();
}
void Thread::UpdatePriority() {
- // Find the highest priority among all the threads that are waiting for this thread's lock
+ // If any of the threads waiting on the mutex have a higher priority
+ // (taking into account priority inheritance), then this thread inherits
+ // that thread's priority.
u32 new_priority = nominal_priority;
- for (const auto& thread : wait_mutex_threads) {
- if (thread->nominal_priority < new_priority)
- new_priority = thread->nominal_priority;
+ if (!wait_mutex_threads.empty()) {
+ if (wait_mutex_threads.front()->current_priority < new_priority) {
+ new_priority = wait_mutex_threads.front()->current_priority;
+ }
}
- if (new_priority == current_priority)
+ if (new_priority == current_priority) {
return;
+ }
scheduler->SetThreadPriority(this, new_priority);
-
current_priority = new_priority;
+ if (!lock_owner) {
+ return;
+ }
+
+ // Ensure that the thread is within the correct location in the waiting list.
+ auto old_owner = lock_owner;
+ lock_owner->RemoveMutexWaiter(this);
+ old_owner->AddMutexWaiter(this);
+
// Recursively update the priority of the thread that depends on the priority of this one.
- if (lock_owner)
- lock_owner->UpdatePriority();
+ lock_owner->UpdatePriority();
}
void Thread::ChangeCore(u32 core, u64 mask) {
@@ -345,7 +356,7 @@ void Thread::ChangeScheduler() {
if (*new_processor_id != processor_id) {
// Remove thread from previous core's scheduler
scheduler->RemoveThread(this);
- next_scheduler.AddThread(this, current_priority);
+ next_scheduler.AddThread(this);
}
processor_id = *new_processor_id;
@@ -360,7 +371,7 @@ void Thread::ChangeScheduler() {
system.CpuCore(processor_id).PrepareReschedule();
}
-bool Thread::AllWaitObjectsReady() {
+bool Thread::AllWaitObjectsReady() const {
return std::none_of(
wait_objects.begin(), wait_objects.end(),
[this](const SharedPtr<WaitObject>& object) { return object->ShouldWait(this); });
@@ -389,6 +400,14 @@ void Thread::SetActivity(ThreadActivity value) {
}
}
+void Thread::Sleep(s64 nanoseconds) {
+ // Sleep current thread and check for next thread to schedule
+ SetStatus(ThreadStatus::WaitSleep);
+
+ // Create an event to wake the thread up after the specified nanosecond delay has passed
+ WakeAfterDelay(nanoseconds);
+}
+
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index c48b21aba..73e5d1bb4 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -51,7 +51,8 @@ enum class ThreadStatus {
WaitIPC, ///< Waiting for the reply from an IPC request
WaitSynchAny, ///< Waiting due to WaitSynch1 or WaitSynchN with wait_all = false
WaitSynchAll, ///< Waiting due to WaitSynchronizationN with wait_all = true
- WaitMutex, ///< Waiting due to an ArbitrateLock/WaitProcessWideKey svc
+ WaitMutex, ///< Waiting due to an ArbitrateLock svc
+ WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc
WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc
Dormant, ///< Created but not yet made ready
Dead ///< Run to completion, or forcefully terminated
@@ -110,7 +111,7 @@ public:
return HANDLE_TYPE;
}
- bool ShouldWait(Thread* thread) const override;
+ bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
/**
@@ -204,7 +205,7 @@ public:
* object in the list.
* @param object Object to query the index of.
*/
- s32 GetWaitObjectIndex(WaitObject* object) const;
+ s32 GetWaitObjectIndex(const WaitObject* object) const;
/**
* Stops a thread, invalidating it from further use
@@ -298,7 +299,7 @@ public:
}
/// Determines whether all the objects this thread is waiting on are ready.
- bool AllWaitObjectsReady();
+ bool AllWaitObjectsReady() const;
const MutexWaitingThreads& GetMutexWaitingThreads() const {
return wait_mutex_threads;
@@ -383,6 +384,9 @@ public:
void SetActivity(ThreadActivity value);
+ /// Sleeps this thread for the given amount of nanoseconds.
+ void Sleep(s64 nanoseconds);
+
private:
explicit Thread(KernelCore& kernel);
~Thread() override;
@@ -398,8 +402,14 @@ private:
VAddr entry_point = 0;
VAddr stack_top = 0;
- u32 nominal_priority = 0; ///< Nominal thread priority, as set by the emulated application
- u32 current_priority = 0; ///< Current thread priority, can be temporarily changed
+ /// Nominal thread priority, as set by the emulated application.
+ /// The nominal priority is the thread priority without priority
+ /// inheritance taken into account.
+ u32 nominal_priority = 0;
+
+ /// Current thread priority. This may change over the course of the
+ /// thread's lifetime in order to facilitate priority inheritance.
+ u32 current_priority = 0;
u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
u64 last_running_ticks = 0; ///< CPU tick when thread was last running
@@ -460,14 +470,4 @@ private:
*/
Thread* GetCurrentThread();
-/**
- * Waits the current thread on a sleep
- */
-void WaitCurrentThread_Sleep();
-
-/**
- * Stops the current thread and removes it from the thread_list
- */
-void ExitCurrentThread();
-
} // namespace Kernel
diff --git a/src/core/hle/kernel/transfer_memory.cpp b/src/core/hle/kernel/transfer_memory.cpp
new file mode 100644
index 000000000..23228e1b5
--- /dev/null
+++ b/src/core/hle/kernel/transfer_memory.cpp
@@ -0,0 +1,73 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "core/hle/kernel/errors.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/process.h"
+#include "core/hle/kernel/shared_memory.h"
+#include "core/hle/kernel/transfer_memory.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+TransferMemory::TransferMemory(KernelCore& kernel) : Object{kernel} {}
+TransferMemory::~TransferMemory() = default;
+
+SharedPtr<TransferMemory> TransferMemory::Create(KernelCore& kernel, VAddr base_address,
+ size_t size, MemoryPermission permissions) {
+ SharedPtr<TransferMemory> transfer_memory{new TransferMemory(kernel)};
+
+ transfer_memory->base_address = base_address;
+ transfer_memory->memory_size = size;
+ transfer_memory->owner_permissions = permissions;
+ transfer_memory->owner_process = kernel.CurrentProcess();
+
+ return transfer_memory;
+}
+
+ResultCode TransferMemory::MapMemory(VAddr address, size_t size, MemoryPermission permissions) {
+ if (memory_size != size) {
+ return ERR_INVALID_SIZE;
+ }
+
+ if (owner_permissions != permissions) {
+ return ERR_INVALID_STATE;
+ }
+
+ if (is_mapped) {
+ return ERR_INVALID_STATE;
+ }
+
+ const auto map_state = owner_permissions == MemoryPermission::None
+ ? MemoryState::TransferMemoryIsolated
+ : MemoryState::TransferMemory;
+ auto& vm_manager = owner_process->VMManager();
+ const auto map_result = vm_manager.MapMemoryBlock(
+ address, std::make_shared<std::vector<u8>>(size), 0, size, map_state);
+
+ if (map_result.Failed()) {
+ return map_result.Code();
+ }
+
+ is_mapped = true;
+ return RESULT_SUCCESS;
+}
+
+ResultCode TransferMemory::UnmapMemory(VAddr address, size_t size) {
+ if (memory_size != size) {
+ return ERR_INVALID_SIZE;
+ }
+
+ auto& vm_manager = owner_process->VMManager();
+ const auto result = vm_manager.UnmapRange(address, size);
+
+ if (result.IsError()) {
+ return result;
+ }
+
+ is_mapped = false;
+ return RESULT_SUCCESS;
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/transfer_memory.h b/src/core/hle/kernel/transfer_memory.h
new file mode 100644
index 000000000..ec294951e
--- /dev/null
+++ b/src/core/hle/kernel/transfer_memory.h
@@ -0,0 +1,91 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "core/hle/kernel/object.h"
+
+union ResultCode;
+
+namespace Kernel {
+
+class KernelCore;
+class Process;
+
+enum class MemoryPermission : u32;
+
+/// Defines the interface for transfer memory objects.
+///
+/// Transfer memory is typically used for the purpose of
+/// transferring memory between separate process instances,
+/// thus the name.
+///
+class TransferMemory final : public Object {
+public:
+ static constexpr HandleType HANDLE_TYPE = HandleType::TransferMemory;
+
+ static SharedPtr<TransferMemory> Create(KernelCore& kernel, VAddr base_address, size_t size,
+ MemoryPermission permissions);
+
+ TransferMemory(const TransferMemory&) = delete;
+ TransferMemory& operator=(const TransferMemory&) = delete;
+
+ TransferMemory(TransferMemory&&) = delete;
+ TransferMemory& operator=(TransferMemory&&) = delete;
+
+ std::string GetTypeName() const override {
+ return "TransferMemory";
+ }
+
+ std::string GetName() const override {
+ return GetTypeName();
+ }
+
+ HandleType GetHandleType() const override {
+ return HANDLE_TYPE;
+ }
+
+ /// Attempts to map transfer memory with the given range and memory permissions.
+ ///
+ /// @param address The base address to being mapping memory at.
+ /// @param size The size of the memory to map, in bytes.
+ /// @param permissions The memory permissions to check against when mapping memory.
+ ///
+ /// @pre The given address, size, and memory permissions must all match
+ /// the same values that were given when creating the transfer memory
+ /// instance.
+ ///
+ ResultCode MapMemory(VAddr address, size_t size, MemoryPermission permissions);
+
+ /// Unmaps the transfer memory with the given range
+ ///
+ /// @param address The base address to begin unmapping memory at.
+ /// @param size The size of the memory to unmap, in bytes.
+ ///
+ /// @pre The given address and size must be the same as the ones used
+ /// to create the transfer memory instance.
+ ///
+ ResultCode UnmapMemory(VAddr address, size_t size);
+
+private:
+ explicit TransferMemory(KernelCore& kernel);
+ ~TransferMemory() override;
+
+ /// The base address for the memory managed by this instance.
+ VAddr base_address = 0;
+
+ /// Size of the memory, in bytes, that this instance manages.
+ size_t memory_size = 0;
+
+ /// The memory permissions that are applied to this instance.
+ MemoryPermission owner_permissions{};
+
+ /// The process that this transfer memory instance was created under.
+ Process* owner_process = nullptr;
+
+ /// Whether or not this transfer memory instance has mapped memory.
+ bool is_mapped = false;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index 10ad94aa6..ec0a480ce 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -7,34 +7,42 @@
#include <utility>
#include "common/assert.h"
#include "common/logging/log.h"
+#include "common/memory_hook.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/memory.h"
-#include "core/memory_hook.h"
#include "core/memory_setup.h"
namespace Kernel {
-
-static const char* GetMemoryStateName(MemoryState state) {
+namespace {
+const char* GetMemoryStateName(MemoryState state) {
static constexpr const char* names[] = {
- "Unmapped", "Io",
- "Normal", "CodeStatic",
- "CodeMutable", "Heap",
- "Shared", "Unknown1",
- "ModuleCodeStatic", "ModuleCodeMutable",
- "IpcBuffer0", "Stack",
- "ThreadLocal", "TransferMemoryIsolated",
- "TransferMemory", "ProcessMemory",
- "Inaccessible", "IpcBuffer1",
- "IpcBuffer3", "KernelStack",
+ "Unmapped", "Io",
+ "Normal", "Code",
+ "CodeData", "Heap",
+ "Shared", "Unknown1",
+ "ModuleCode", "ModuleCodeData",
+ "IpcBuffer0", "Stack",
+ "ThreadLocal", "TransferMemoryIsolated",
+ "TransferMemory", "ProcessMemory",
+ "Inaccessible", "IpcBuffer1",
+ "IpcBuffer3", "KernelStack",
};
return names[ToSvcMemoryState(state)];
}
+// Checks if a given address range lies within a larger address range.
+constexpr bool IsInsideAddressRange(VAddr address, u64 size, VAddr address_range_begin,
+ VAddr address_range_end) {
+ const VAddr end_address = address + size - 1;
+ return address_range_begin <= address && end_address <= address_range_end - 1;
+}
+} // Anonymous namespace
+
bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
ASSERT(base + size == next.base);
if (permissions != next.permissions || state != next.state || attribute != next.attribute ||
@@ -169,7 +177,7 @@ ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const {
ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size,
MemoryState state,
- Memory::MemoryHookPointer mmio_handler) {
+ Common::MemoryHookPointer mmio_handler) {
// This is the appropriately sized VMA that will turn into our allocation.
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
VirtualMemoryArea& final_vma = vma_handle->second;
@@ -248,59 +256,50 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p
return RESULT_SUCCESS;
}
-ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission perms) {
- if (target < GetHeapRegionBaseAddress() || target + size > GetHeapRegionEndAddress() ||
- target + size < target) {
- return ERR_INVALID_ADDRESS;
+ResultVal<VAddr> VMManager::SetHeapSize(u64 size) {
+ if (size > GetHeapRegionSize()) {
+ return ERR_OUT_OF_MEMORY;
+ }
+
+ // No need to do any additional work if the heap is already the given size.
+ if (size == GetCurrentHeapSize()) {
+ return MakeResult(heap_region_base);
}
if (heap_memory == nullptr) {
// Initialize heap
- heap_memory = std::make_shared<std::vector<u8>>();
- heap_start = heap_end = target;
+ heap_memory = std::make_shared<std::vector<u8>>(size);
+ heap_end = heap_region_base + size;
} else {
- UnmapRange(heap_start, heap_end - heap_start);
- }
-
- // If necessary, expand backing vector to cover new heap extents.
- if (target < heap_start) {
- heap_memory->insert(begin(*heap_memory), heap_start - target, 0);
- heap_start = target;
- RefreshMemoryBlockMappings(heap_memory.get());
- }
- if (target + size > heap_end) {
- heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0);
- heap_end = target + size;
- RefreshMemoryBlockMappings(heap_memory.get());
+ UnmapRange(heap_region_base, GetCurrentHeapSize());
}
- ASSERT(heap_end - heap_start == heap_memory->size());
- CASCADE_RESULT(auto vma, MapMemoryBlock(target, heap_memory, target - heap_start, size,
- MemoryState::Heap));
- Reprotect(vma, perms);
+ // If necessary, expand backing vector to cover new heap extents in
+ // the case of allocating. Otherwise, shrink the backing memory,
+ // if a smaller heap has been requested.
+ const u64 old_heap_size = GetCurrentHeapSize();
+ if (size > old_heap_size) {
+ const u64 alloc_size = size - old_heap_size;
- heap_used = size;
-
- return MakeResult<VAddr>(heap_end - size);
-}
+ heap_memory->insert(heap_memory->end(), alloc_size, 0);
+ RefreshMemoryBlockMappings(heap_memory.get());
+ } else if (size < old_heap_size) {
+ heap_memory->resize(size);
+ heap_memory->shrink_to_fit();
-ResultCode VMManager::HeapFree(VAddr target, u64 size) {
- if (target < GetHeapRegionBaseAddress() || target + size > GetHeapRegionEndAddress() ||
- target + size < target) {
- return ERR_INVALID_ADDRESS;
+ RefreshMemoryBlockMappings(heap_memory.get());
}
- if (size == 0) {
- return RESULT_SUCCESS;
- }
+ heap_end = heap_region_base + size;
+ ASSERT(GetCurrentHeapSize() == heap_memory->size());
- const ResultCode result = UnmapRange(target, size);
- if (result.IsError()) {
- return result;
+ const auto mapping_result =
+ MapMemoryBlock(heap_region_base, heap_memory, 0, size, MemoryState::Heap);
+ if (mapping_result.Failed()) {
+ return mapping_result.Code();
}
- heap_used -= size;
- return RESULT_SUCCESS;
+ return MakeResult<VAddr>(heap_region_base);
}
MemoryInfo VMManager::QueryMemory(VAddr address) const {
@@ -592,6 +591,7 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
heap_region_base = map_region_end;
heap_region_end = heap_region_base + heap_region_size;
+ heap_end = heap_region_base;
new_map_region_base = heap_region_end;
new_map_region_end = new_map_region_base + new_map_region_size;
@@ -618,7 +618,7 @@ void VMManager::ClearPageTable() {
std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr);
page_table.special_regions.clear();
std::fill(page_table.attributes.begin(), page_table.attributes.end(),
- Memory::PageType::Unmapped);
+ Common::PageType::Unmapped);
}
VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask,
@@ -686,10 +686,6 @@ u64 VMManager::GetTotalMemoryUsage() const {
return 0xF8000000;
}
-u64 VMManager::GetTotalHeapUsage() const {
- return heap_used;
-}
-
VAddr VMManager::GetAddressSpaceBaseAddress() const {
return address_space_base;
}
@@ -706,6 +702,11 @@ u64 VMManager::GetAddressSpaceWidth() const {
return address_space_width;
}
+bool VMManager::IsWithinAddressSpace(VAddr address, u64 size) const {
+ return IsInsideAddressRange(address, size, GetAddressSpaceBaseAddress(),
+ GetAddressSpaceEndAddress());
+}
+
VAddr VMManager::GetASLRRegionBaseAddress() const {
return aslr_region_base;
}
@@ -750,6 +751,11 @@ u64 VMManager::GetCodeRegionSize() const {
return code_region_end - code_region_base;
}
+bool VMManager::IsWithinCodeRegion(VAddr address, u64 size) const {
+ return IsInsideAddressRange(address, size, GetCodeRegionBaseAddress(),
+ GetCodeRegionEndAddress());
+}
+
VAddr VMManager::GetHeapRegionBaseAddress() const {
return heap_region_base;
}
@@ -762,6 +768,15 @@ u64 VMManager::GetHeapRegionSize() const {
return heap_region_end - heap_region_base;
}
+u64 VMManager::GetCurrentHeapSize() const {
+ return heap_end - heap_region_base;
+}
+
+bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const {
+ return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(),
+ GetHeapRegionEndAddress());
+}
+
VAddr VMManager::GetMapRegionBaseAddress() const {
return map_region_base;
}
@@ -774,6 +789,10 @@ u64 VMManager::GetMapRegionSize() const {
return map_region_end - map_region_base;
}
+bool VMManager::IsWithinMapRegion(VAddr address, u64 size) const {
+ return IsInsideAddressRange(address, size, GetMapRegionBaseAddress(), GetMapRegionEndAddress());
+}
+
VAddr VMManager::GetNewMapRegionBaseAddress() const {
return new_map_region_base;
}
@@ -786,6 +805,11 @@ u64 VMManager::GetNewMapRegionSize() const {
return new_map_region_end - new_map_region_base;
}
+bool VMManager::IsWithinNewMapRegion(VAddr address, u64 size) const {
+ return IsInsideAddressRange(address, size, GetNewMapRegionBaseAddress(),
+ GetNewMapRegionEndAddress());
+}
+
VAddr VMManager::GetTLSIORegionBaseAddress() const {
return tls_io_region_base;
}
@@ -798,4 +822,9 @@ u64 VMManager::GetTLSIORegionSize() const {
return tls_io_region_end - tls_io_region_base;
}
+bool VMManager::IsWithinTLSIORegion(VAddr address, u64 size) const {
+ return IsInsideAddressRange(address, size, GetTLSIORegionBaseAddress(),
+ GetTLSIORegionEndAddress());
+}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 6091533bc..6f484b7bf 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -9,9 +9,10 @@
#include <tuple>
#include <vector>
#include "common/common_types.h"
+#include "common/memory_hook.h"
+#include "common/page_table.h"
#include "core/hle/result.h"
#include "core/memory.h"
-#include "core/memory_hook.h"
namespace FileSys {
enum class ProgramAddressSpaceType : u8;
@@ -164,12 +165,12 @@ enum class MemoryState : u32 {
Unmapped = 0x00,
Io = 0x01 | FlagMapped,
Normal = 0x02 | FlagMapped | FlagQueryPhysicalAddressAllowed,
- CodeStatic = 0x03 | CodeFlags | FlagMapProcess,
- CodeMutable = 0x04 | CodeFlags | FlagMapProcess | FlagCodeMemory,
+ Code = 0x03 | CodeFlags | FlagMapProcess,
+ CodeData = 0x04 | DataFlags | FlagMapProcess | FlagCodeMemory,
Heap = 0x05 | DataFlags | FlagCodeMemory,
Shared = 0x06 | FlagMapped | FlagMemoryPoolAllocated,
- ModuleCodeStatic = 0x08 | CodeFlags | FlagModule | FlagMapProcess,
- ModuleCodeMutable = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory,
+ ModuleCode = 0x08 | CodeFlags | FlagModule | FlagMapProcess,
+ ModuleCodeData = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory,
IpcBuffer0 = 0x0A | FlagMapped | FlagQueryPhysicalAddressAllowed | FlagMemoryPoolAllocated |
IPCFlags | FlagSharedDevice | FlagSharedDeviceAligned,
@@ -290,7 +291,7 @@ struct VirtualMemoryArea {
// Settings for type = MMIO
/// Physical address of the register area this VMA maps to.
PAddr paddr = 0;
- Memory::MemoryHookPointer mmio_handler = nullptr;
+ Common::MemoryHookPointer mmio_handler = nullptr;
/// Tests if this area can be merged to the right with `next`.
bool CanBeMergedWith(const VirtualMemoryArea& next) const;
@@ -368,7 +369,7 @@ public:
* @param mmio_handler The handler that will implement read and write for this MMIO region.
*/
ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state,
- Memory::MemoryHookPointer mmio_handler);
+ Common::MemoryHookPointer mmio_handler);
/// Unmaps a range of addresses, splitting VMAs as necessary.
ResultCode UnmapRange(VAddr target, u64 size);
@@ -379,11 +380,41 @@ public:
/// Changes the permissions of a range of addresses, splitting VMAs as necessary.
ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms);
- ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
- ResultCode HeapFree(VAddr target, u64 size);
-
ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state);
+ /// Attempts to allocate a heap with the given size.
+ ///
+ /// @param size The size of the heap to allocate in bytes.
+ ///
+ /// @note If a heap is currently allocated, and this is called
+ /// with a size that is equal to the size of the current heap,
+ /// then this function will do nothing and return the current
+ /// heap's starting address, as there's no need to perform
+ /// any additional heap allocation work.
+ ///
+ /// @note If a heap is currently allocated, and this is called
+ /// with a size less than the current heap's size, then
+ /// this function will attempt to shrink the heap.
+ ///
+ /// @note If a heap is currently allocated, and this is called
+ /// with a size larger than the current heap's size, then
+ /// this function will attempt to extend the size of the heap.
+ ///
+ /// @returns A result indicating either success or failure.
+ /// <p>
+ /// If successful, this function will return a result
+ /// containing the starting address to the allocated heap.
+ /// <p>
+ /// If unsuccessful, this function will return a result
+ /// containing an error code.
+ ///
+ /// @pre The given size must lie within the allowable heap
+ /// memory region managed by this VMManager instance.
+ /// Failure to abide by this will result in ERR_OUT_OF_MEMORY
+ /// being returned as the result.
+ ///
+ ResultVal<VAddr> SetHeapSize(u64 size);
+
/// Queries the memory manager for information about the given address.
///
/// @param address The address to query the memory manager about for information.
@@ -417,9 +448,6 @@ public:
/// Gets the total memory usage, used by svcGetInfo
u64 GetTotalMemoryUsage() const;
- /// Gets the total heap usage, used by svcGetInfo
- u64 GetTotalHeapUsage() const;
-
/// Gets the address space base address
VAddr GetAddressSpaceBaseAddress() const;
@@ -432,18 +460,21 @@ public:
/// Gets the address space width in bits.
u64 GetAddressSpaceWidth() const;
+ /// Determines whether or not the given address range lies within the address space.
+ bool IsWithinAddressSpace(VAddr address, u64 size) const;
+
/// Gets the base address of the ASLR region.
VAddr GetASLRRegionBaseAddress() const;
/// Gets the end address of the ASLR region.
VAddr GetASLRRegionEndAddress() const;
- /// Determines whether or not the specified address range is within the ASLR region.
- bool IsWithinASLRRegion(VAddr address, u64 size) const;
-
/// Gets the size of the ASLR region
u64 GetASLRRegionSize() const;
+ /// Determines whether or not the specified address range is within the ASLR region.
+ bool IsWithinASLRRegion(VAddr address, u64 size) const;
+
/// Gets the base address of the code region.
VAddr GetCodeRegionBaseAddress() const;
@@ -453,6 +484,9 @@ public:
/// Gets the total size of the code region in bytes.
u64 GetCodeRegionSize() const;
+ /// Determines whether or not the specified range is within the code region.
+ bool IsWithinCodeRegion(VAddr address, u64 size) const;
+
/// Gets the base address of the heap region.
VAddr GetHeapRegionBaseAddress() const;
@@ -462,6 +496,16 @@ public:
/// Gets the total size of the heap region in bytes.
u64 GetHeapRegionSize() const;
+ /// Gets the total size of the current heap in bytes.
+ ///
+ /// @note This is the current allocated heap size, not the size
+ /// of the region it's allowed to exist within.
+ ///
+ u64 GetCurrentHeapSize() const;
+
+ /// Determines whether or not the specified range is within the heap region.
+ bool IsWithinHeapRegion(VAddr address, u64 size) const;
+
/// Gets the base address of the map region.
VAddr GetMapRegionBaseAddress() const;
@@ -471,6 +515,9 @@ public:
/// Gets the total size of the map region in bytes.
u64 GetMapRegionSize() const;
+ /// Determines whether or not the specified range is within the map region.
+ bool IsWithinMapRegion(VAddr address, u64 size) const;
+
/// Gets the base address of the new map region.
VAddr GetNewMapRegionBaseAddress() const;
@@ -480,6 +527,9 @@ public:
/// Gets the total size of the new map region in bytes.
u64 GetNewMapRegionSize() const;
+ /// Determines whether or not the given address range is within the new map region
+ bool IsWithinNewMapRegion(VAddr address, u64 size) const;
+
/// Gets the base address of the TLS IO region.
VAddr GetTLSIORegionBaseAddress() const;
@@ -489,9 +539,12 @@ public:
/// Gets the total size of the TLS IO region in bytes.
u64 GetTLSIORegionSize() const;
+ /// Determines if the given address range is within the TLS IO region.
+ bool IsWithinTLSIORegion(VAddr address, u64 size) const;
+
/// Each VMManager has its own page table, which is set as the main one when the owning process
/// is scheduled.
- Memory::PageTable page_table;
+ Common::PageTable page_table{Memory::PAGE_BITS};
private:
using VMAIter = VMAMap::iterator;
@@ -606,9 +659,9 @@ private:
// This makes deallocation and reallocation of holes fast and keeps process memory contiguous
// in the emulator address space, allowing Memory::GetPointer to be reasonably safe.
std::shared_ptr<std::vector<u8>> heap_memory;
- // The left/right bounds of the address space covered by heap_memory.
- VAddr heap_start = 0;
+
+ // The end of the currently allocated heap. This is not an inclusive
+ // end of the range. This is essentially 'base_address + current_size'.
VAddr heap_end = 0;
- u64 heap_used = 0;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/wait_object.h b/src/core/hle/kernel/wait_object.h
index 5987fb971..04464a51a 100644
--- a/src/core/hle/kernel/wait_object.h
+++ b/src/core/hle/kernel/wait_object.h
@@ -24,7 +24,7 @@ public:
* @param thread The thread about which we're deciding.
* @return True if the current thread should wait due to this object being unavailable
*/
- virtual bool ShouldWait(Thread* thread) const = 0;
+ virtual bool ShouldWait(const Thread* thread) const = 0;
/// Acquire/lock the object for the specified thread if it is available
virtual void Acquire(Thread* thread) = 0;