aboutsummaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp25
-rw-r--r--src/core/hle/kernel/errors.h7
-rw-r--r--src/core/hle/kernel/handle_table.cpp2
-rw-r--r--src/core/hle/kernel/handle_table.h2
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp27
-rw-r--r--src/core/hle/kernel/hle_ipc.h20
-rw-r--r--src/core/hle/kernel/mutex.cpp5
-rw-r--r--src/core/hle/kernel/object.h3
-rw-r--r--src/core/hle/kernel/process.cpp114
-rw-r--r--src/core/hle/kernel/process.h162
-rw-r--r--src/core/hle/kernel/scheduler.cpp16
-rw-r--r--src/core/hle/kernel/scheduler.h4
-rw-r--r--src/core/hle/kernel/shared_memory.cpp16
-rw-r--r--src/core/hle/kernel/shared_memory.h2
-rw-r--r--src/core/hle/kernel/svc.cpp269
-rw-r--r--src/core/hle/kernel/svc_wrap.h78
-rw-r--r--src/core/hle/kernel/thread.cpp69
-rw-r--r--src/core/hle/kernel/thread.h17
-rw-r--r--src/core/hle/kernel/vm_manager.cpp182
-rw-r--r--src/core/hle/kernel/vm_manager.h104
-rw-r--r--src/core/hle/kernel/wait_object.cpp2
-rw-r--r--src/core/hle/kernel/wait_object.h2
22 files changed, 798 insertions, 330 deletions
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index 6657accd5..93577591f 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -35,16 +35,17 @@ static ResultCode WaitForAddress(VAddr address, s64 timeout) {
// Gets the threads waiting on an address.
static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) {
- const auto RetrieveWaitingThreads =
- [](size_t core_index, std::vector<SharedPtr<Thread>>& waiting_threads, VAddr arb_addr) {
- const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
- auto& thread_list = scheduler->GetThreadList();
-
- for (auto& thread : thread_list) {
- if (thread->arb_wait_address == arb_addr)
- waiting_threads.push_back(thread);
- }
- };
+ const auto RetrieveWaitingThreads = [](std::size_t core_index,
+ std::vector<SharedPtr<Thread>>& waiting_threads,
+ VAddr arb_addr) {
+ const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
+ auto& thread_list = scheduler->GetThreadList();
+
+ for (auto& thread : thread_list) {
+ if (thread->arb_wait_address == arb_addr)
+ waiting_threads.push_back(thread);
+ }
+ };
// Retrieve all threads that are waiting for this address.
std::vector<SharedPtr<Thread>> threads;
@@ -66,12 +67,12 @@ static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address)
static void WakeThreads(std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) {
// Only process up to 'target' threads, unless 'target' is <= 0, in which case process
// them all.
- size_t last = waiting_threads.size();
+ std::size_t last = waiting_threads.size();
if (num_to_wake > 0)
last = num_to_wake;
// Signal the waiting threads.
- for (size_t i = 0; i < last; i++) {
+ for (std::size_t i = 0; i < last; i++) {
ASSERT(waiting_threads[i]->status == ThreadStatus::WaitArb);
waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS);
waiting_threads[i]->arb_wait_address = 0;
diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h
index ad39c8271..e5fa67ae8 100644
--- a/src/core/hle/kernel/errors.h
+++ b/src/core/hle/kernel/errors.h
@@ -17,6 +17,7 @@ enum {
// Confirmed Switch OS error codes
MaxConnectionsReached = 7,
+ InvalidSize = 101,
InvalidAddress = 102,
HandleTableFull = 105,
InvalidMemoryState = 106,
@@ -29,6 +30,8 @@ enum {
SynchronizationCanceled = 118,
TooLarge = 119,
InvalidEnumValue = 120,
+ NoSuchEntry = 121,
+ AlreadyRegistered = 122,
InvalidState = 125,
ResourceLimitExceeded = 132,
};
@@ -55,6 +58,8 @@ constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS(ErrorModule::Kernel,
ErrCodes::InvalidMemoryPermissions);
constexpr ResultCode ERR_INVALID_HANDLE(ErrorModule::Kernel, ErrCodes::InvalidHandle);
constexpr ResultCode ERR_INVALID_PROCESSOR_ID(ErrorModule::Kernel, ErrCodes::InvalidProcessorId);
+constexpr ResultCode ERR_INVALID_SIZE(ErrorModule::Kernel, ErrCodes::InvalidSize);
+constexpr ResultCode ERR_ALREADY_REGISTERED(ErrorModule::Kernel, ErrCodes::AlreadyRegistered);
constexpr ResultCode ERR_INVALID_STATE(ErrorModule::Kernel, ErrCodes::InvalidState);
constexpr ResultCode ERR_INVALID_THREAD_PRIORITY(ErrorModule::Kernel,
ErrCodes::InvalidThreadPriority);
@@ -63,7 +68,7 @@ constexpr ResultCode ERR_INVALID_OBJECT_ADDR(-1);
constexpr ResultCode ERR_NOT_AUTHORIZED(-1);
/// Alternate code returned instead of ERR_INVALID_HANDLE in some code paths.
constexpr ResultCode ERR_INVALID_HANDLE_OS(-1);
-constexpr ResultCode ERR_NOT_FOUND(-1);
+constexpr ResultCode ERR_NOT_FOUND(ErrorModule::Kernel, ErrCodes::NoSuchEntry);
constexpr ResultCode RESULT_TIMEOUT(ErrorModule::Kernel, ErrCodes::Timeout);
/// Returned when Accept() is called on a port with no sessions to be accepted.
constexpr ResultCode ERR_NO_PENDING_SESSIONS(-1);
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index 3a079b9a9..5ee5c05e3 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -65,7 +65,7 @@ ResultCode HandleTable::Close(Handle handle) {
}
bool HandleTable::IsValid(Handle handle) const {
- size_t slot = GetSlot(handle);
+ std::size_t slot = GetSlot(handle);
u16 generation = GetGeneration(handle);
return slot < MAX_COUNT && objects[slot] != nullptr && generations[slot] == generation;
diff --git a/src/core/hle/kernel/handle_table.h b/src/core/hle/kernel/handle_table.h
index cac928adb..9e2f33e8a 100644
--- a/src/core/hle/kernel/handle_table.h
+++ b/src/core/hle/kernel/handle_table.h
@@ -93,7 +93,7 @@ private:
* This is the maximum limit of handles allowed per process in CTR-OS. It can be further
* reduced by ExHeader values, but this is not emulated here.
*/
- static const size_t MAX_COUNT = 4096;
+ static const std::size_t MAX_COUNT = 4096;
static u16 GetSlot(Handle handle) {
return handle >> 15;
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 7264be906..72fb9d250 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -42,9 +42,9 @@ SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread,
Kernel::SharedPtr<Kernel::Event> event) {
// Put the client thread to sleep until the wait event is signaled or the timeout expires.
- thread->wakeup_callback =
- [context = *this, callback](ThreadWakeupReason reason, SharedPtr<Thread> thread,
- SharedPtr<WaitObject> object, size_t index) mutable -> bool {
+ thread->wakeup_callback = [context = *this, callback](
+ ThreadWakeupReason reason, SharedPtr<Thread> thread,
+ SharedPtr<WaitObject> object, std::size_t index) mutable -> bool {
ASSERT(thread->status == ThreadStatus::WaitHLEEvent);
callback(thread, context, reason);
context.WriteToOutgoingCommandBuffer(*thread);
@@ -199,8 +199,8 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(u32_le* src_cmdb
}
// The data_size already includes the payload header, the padding and the domain header.
- size_t size = data_payload_offset + command_header->data_size -
- sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
+ std::size_t size = data_payload_offset + command_header->data_size -
+ sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
if (domain_message_header)
size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32);
std::copy_n(src_cmdbuf, size, cmd_buf.begin());
@@ -217,8 +217,8 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread)
ParseCommandBuffer(cmd_buf.data(), false);
// The data_size already includes the payload header, the padding and the domain header.
- size_t size = data_payload_offset + command_header->data_size -
- sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
+ std::size_t size = data_payload_offset + command_header->data_size -
+ sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
if (domain_message_header)
size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32);
@@ -229,7 +229,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread)
"Handle descriptor bit set but no handles to translate");
// We write the translated handles at a specific offset in the command buffer, this space
// was already reserved when writing the header.
- size_t current_offset =
+ std::size_t current_offset =
(sizeof(IPC::CommandHeader) + sizeof(IPC::HandleDescriptorHeader)) / sizeof(u32);
ASSERT_MSG(!handle_descriptor_header->send_current_pid, "Sending PID is not implemented");
@@ -258,7 +258,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread)
ASSERT(domain_message_header->num_objects == domain_objects.size());
// Write the domain objects to the command buffer, these go after the raw untranslated data.
// TODO(Subv): This completely ignores C buffers.
- size_t domain_offset = size - domain_message_header->num_objects;
+ std::size_t domain_offset = size - domain_message_header->num_objects;
auto& request_handlers = server_session->domain_request_handlers;
for (auto& object : domain_objects) {
@@ -291,14 +291,15 @@ std::vector<u8> HLERequestContext::ReadBuffer(int buffer_index) const {
return buffer;
}
-size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size, int buffer_index) const {
+std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
+ int buffer_index) const {
if (size == 0) {
LOG_WARNING(Core, "skip empty buffer write");
return 0;
}
const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()};
- const size_t buffer_size{GetWriteBufferSize(buffer_index)};
+ const std::size_t buffer_size{GetWriteBufferSize(buffer_index)};
if (size > buffer_size) {
LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
buffer_size);
@@ -314,13 +315,13 @@ size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size, int buffe
return size;
}
-size_t HLERequestContext::GetReadBufferSize(int buffer_index) const {
+std::size_t HLERequestContext::GetReadBufferSize(int buffer_index) const {
const bool is_buffer_a{BufferDescriptorA().size() && BufferDescriptorA()[buffer_index].Size()};
return is_buffer_a ? BufferDescriptorA()[buffer_index].Size()
: BufferDescriptorX()[buffer_index].Size();
}
-size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const {
+std::size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const {
const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()};
return is_buffer_b ? BufferDescriptorB()[buffer_index].Size()
: BufferDescriptorC()[buffer_index].Size();
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index f0d07f1b6..894479ee0 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -170,7 +170,7 @@ public:
std::vector<u8> ReadBuffer(int buffer_index = 0) const;
/// Helper function to write a buffer using the appropriate buffer descriptor
- size_t WriteBuffer(const void* buffer, size_t size, int buffer_index = 0) const;
+ std::size_t WriteBuffer(const void* buffer, std::size_t size, int buffer_index = 0) const;
/* Helper function to write a buffer using the appropriate buffer descriptor
*
@@ -182,7 +182,7 @@ public:
*/
template <typename ContiguousContainer,
typename = std::enable_if_t<!std::is_pointer_v<ContiguousContainer>>>
- size_t WriteBuffer(const ContiguousContainer& container, int buffer_index = 0) const {
+ std::size_t WriteBuffer(const ContiguousContainer& container, int buffer_index = 0) const {
using ContiguousType = typename ContiguousContainer::value_type;
static_assert(std::is_trivially_copyable_v<ContiguousType>,
@@ -193,19 +193,19 @@ public:
}
/// Helper function to get the size of the input buffer
- size_t GetReadBufferSize(int buffer_index = 0) const;
+ std::size_t GetReadBufferSize(int buffer_index = 0) const;
/// Helper function to get the size of the output buffer
- size_t GetWriteBufferSize(int buffer_index = 0) const;
+ std::size_t GetWriteBufferSize(int buffer_index = 0) const;
template <typename T>
- SharedPtr<T> GetCopyObject(size_t index) {
+ SharedPtr<T> GetCopyObject(std::size_t index) {
ASSERT(index < copy_objects.size());
return DynamicObjectCast<T>(copy_objects[index]);
}
template <typename T>
- SharedPtr<T> GetMoveObject(size_t index) {
+ SharedPtr<T> GetMoveObject(std::size_t index) {
ASSERT(index < move_objects.size());
return DynamicObjectCast<T>(move_objects[index]);
}
@@ -223,7 +223,7 @@ public:
}
template <typename T>
- std::shared_ptr<T> GetDomainRequestHandler(size_t index) const {
+ std::shared_ptr<T> GetDomainRequestHandler(std::size_t index) const {
return std::static_pointer_cast<T>(domain_request_handlers[index]);
}
@@ -240,15 +240,15 @@ public:
domain_objects.clear();
}
- size_t NumMoveObjects() const {
+ std::size_t NumMoveObjects() const {
return move_objects.size();
}
- size_t NumCopyObjects() const {
+ std::size_t NumCopyObjects() const {
return copy_objects.size();
}
- size_t NumDomainObjects() const {
+ std::size_t NumDomainObjects() const {
return domain_objects.size();
}
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index 36bf0b677..81675eac5 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -16,6 +16,7 @@
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/result.h"
+#include "core/memory.h"
namespace Kernel {
@@ -62,7 +63,7 @@ ResultCode Mutex::TryAcquire(HandleTable& handle_table, VAddr address, Handle ho
Handle requesting_thread_handle) {
// The mutex address must be 4-byte aligned
if ((address % sizeof(u32)) != 0) {
- return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidAddress);
+ return ERR_INVALID_ADDRESS;
}
SharedPtr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
@@ -100,7 +101,7 @@ ResultCode Mutex::TryAcquire(HandleTable& handle_table, VAddr address, Handle ho
ResultCode Mutex::Release(VAddr address) {
// The mutex address must be 4-byte aligned
if ((address % sizeof(u32)) != 0) {
- return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidAddress);
+ return ERR_INVALID_ADDRESS;
}
auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(GetCurrentThread(), address);
diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h
index b054cbf7d..9eb72315c 100644
--- a/src/core/hle/kernel/object.h
+++ b/src/core/hle/kernel/object.h
@@ -6,7 +6,6 @@
#include <atomic>
#include <string>
-#include <utility>
#include <boost/smart_ptr/intrusive_ptr.hpp>
@@ -97,7 +96,7 @@ using SharedPtr = boost::intrusive_ptr<T>;
template <typename T>
inline SharedPtr<T> DynamicObjectCast(SharedPtr<Object> object) {
if (object != nullptr && object->GetHandleType() == T::HANDLE_TYPE) {
- return boost::static_pointer_cast<T>(std::move(object));
+ return boost::static_pointer_cast<T>(object);
}
return nullptr;
}
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index b025e323f..dc9fc8470 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -7,10 +7,13 @@
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/logging/log.h"
+#include "core/core.h"
+#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/resource_limit.h"
+#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/memory.h"
@@ -32,16 +35,24 @@ SharedPtr<Process> Process::Create(KernelCore& kernel, std::string&& name) {
process->name = std::move(name);
process->flags.raw = 0;
process->flags.memory_region.Assign(MemoryRegion::APPLICATION);
+ process->resource_limit = kernel.ResourceLimitForCategory(ResourceLimitCategory::APPLICATION);
process->status = ProcessStatus::Created;
process->program_id = 0;
process->process_id = kernel.CreateNewProcessID();
+ process->svc_access_mask.set();
kernel.AppendNewProcess(process);
return process;
}
-void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
- for (size_t i = 0; i < len; ++i) {
+void Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) {
+ program_id = metadata.GetTitleID();
+ is_64bit_process = metadata.Is64BitProgram();
+ vm_manager.Reset(metadata.GetAddressSpaceType());
+}
+
+void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) {
+ for (std::size_t i = 0; i < len; ++i) {
u32 descriptor = kernel_caps[i];
u32 type = descriptor >> 20;
@@ -117,7 +128,7 @@ void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) {
// TODO(bunnei): This is heap area that should be allocated by the kernel and not mapped as part
// of the user address space.
vm_manager
- .MapMemoryBlock(Memory::STACK_AREA_VADDR_END - stack_size,
+ .MapMemoryBlock(vm_manager.GetTLSIORegionEndAddress() - stack_size,
std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size,
MemoryState::Mapped)
.Unwrap();
@@ -125,7 +136,92 @@ void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) {
vm_manager.LogLayout();
status = ProcessStatus::Running;
- Kernel::SetupMainThread(kernel, entry_point, main_thread_priority, this);
+ Kernel::SetupMainThread(kernel, entry_point, main_thread_priority, *this);
+}
+
+void Process::PrepareForTermination() {
+ status = ProcessStatus::Exited;
+
+ const auto stop_threads = [this](const std::vector<SharedPtr<Thread>>& thread_list) {
+ for (auto& thread : thread_list) {
+ if (thread->owner_process != this)
+ continue;
+
+ if (thread == GetCurrentThread())
+ continue;
+
+ // TODO(Subv): When are the other running/ready threads terminated?
+ ASSERT_MSG(thread->status == ThreadStatus::WaitSynchAny ||
+ thread->status == ThreadStatus::WaitSynchAll,
+ "Exiting processes with non-waiting threads is currently unimplemented");
+
+ thread->Stop();
+ }
+ };
+
+ auto& system = Core::System::GetInstance();
+ stop_threads(system.Scheduler(0)->GetThreadList());
+ stop_threads(system.Scheduler(1)->GetThreadList());
+ stop_threads(system.Scheduler(2)->GetThreadList());
+ stop_threads(system.Scheduler(3)->GetThreadList());
+}
+
+/**
+ * Finds a free location for the TLS section of a thread.
+ * @param tls_slots The TLS page array of the thread's owner process.
+ * Returns a tuple of (page, slot, alloc_needed) where:
+ * page: The index of the first allocated TLS page that has free slots.
+ * slot: The index of the first free slot in the indicated page.
+ * alloc_needed: Whether there's a need to allocate a new TLS page (All pages are full).
+ */
+static std::tuple<std::size_t, std::size_t, bool> FindFreeThreadLocalSlot(
+ const std::vector<std::bitset<8>>& tls_slots) {
+ // Iterate over all the allocated pages, and try to find one where not all slots are used.
+ for (std::size_t page = 0; page < tls_slots.size(); ++page) {
+ const auto& page_tls_slots = tls_slots[page];
+ if (!page_tls_slots.all()) {
+ // We found a page with at least one free slot, find which slot it is
+ for (std::size_t slot = 0; slot < page_tls_slots.size(); ++slot) {
+ if (!page_tls_slots.test(slot)) {
+ return std::make_tuple(page, slot, false);
+ }
+ }
+ }
+ }
+
+ return std::make_tuple(0, 0, true);
+}
+
+VAddr Process::MarkNextAvailableTLSSlotAsUsed(Thread& thread) {
+ auto [available_page, available_slot, needs_allocation] = FindFreeThreadLocalSlot(tls_slots);
+ const VAddr tls_begin = vm_manager.GetTLSIORegionBaseAddress();
+
+ if (needs_allocation) {
+ tls_slots.emplace_back(0); // The page is completely available at the start
+ available_page = tls_slots.size() - 1;
+ available_slot = 0; // Use the first slot in the new page
+
+ // Allocate some memory from the end of the linear heap for this region.
+ auto& tls_memory = thread.GetTLSMemory();
+ tls_memory->insert(tls_memory->end(), Memory::PAGE_SIZE, 0);
+
+ vm_manager.RefreshMemoryBlockMappings(tls_memory.get());
+
+ vm_manager.MapMemoryBlock(tls_begin + available_page * Memory::PAGE_SIZE, tls_memory, 0,
+ Memory::PAGE_SIZE, MemoryState::ThreadLocal);
+ }
+
+ tls_slots[available_page].set(available_slot);
+
+ return tls_begin + available_page * Memory::PAGE_SIZE + available_slot * Memory::TLS_ENTRY_SIZE;
+}
+
+void Process::FreeTLSSlot(VAddr tls_address) {
+ const VAddr tls_base = tls_address - vm_manager.GetTLSIORegionBaseAddress();
+ const VAddr tls_page = tls_base / Memory::PAGE_SIZE;
+ const VAddr tls_slot = (tls_base % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
+
+ tls_slots[tls_page].reset(tls_slot);
}
void Process::LoadModule(SharedPtr<CodeSet> module_, VAddr base_addr) {
@@ -145,8 +241,8 @@ void Process::LoadModule(SharedPtr<CodeSet> module_, VAddr base_addr) {
}
ResultVal<VAddr> Process::HeapAllocate(VAddr target, u64 size, VMAPermission perms) {
- if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END ||
- target + size < target) {
+ if (target < vm_manager.GetHeapRegionBaseAddress() ||
+ target + size > vm_manager.GetHeapRegionEndAddress() || target + size < target) {
return ERR_INVALID_ADDRESS;
}
@@ -181,8 +277,8 @@ ResultVal<VAddr> Process::HeapAllocate(VAddr target, u64 size, VMAPermission per
}
ResultCode Process::HeapFree(VAddr target, u32 size) {
- if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END ||
- target + size < target) {
+ if (target < vm_manager.GetHeapRegionBaseAddress() ||
+ target + size > vm_manager.GetHeapRegionEndAddress() || target + size < target) {
return ERR_INVALID_ADDRESS;
}
@@ -211,7 +307,7 @@ ResultCode Process::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
"Shared memory exceeds bounds of mapped block");
const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block;
- size_t backing_block_offset = vma->second.offset + vma_offset;
+ std::size_t backing_block_offset = vma->second.offset + vma_offset;
CASCADE_RESULT(auto new_vma,
vm_manager.MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size,
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 1587d40c1..590e0c73d 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -17,6 +17,10 @@
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/vm_manager.h"
+namespace FileSys {
+class ProgramMetadata;
+}
+
namespace Kernel {
class KernelCore;
@@ -59,7 +63,7 @@ class ResourceLimit;
struct CodeSet final : public Object {
struct Segment {
- size_t offset = 0;
+ std::size_t offset = 0;
VAddr addr = 0;
u32 size = 0;
};
@@ -131,6 +135,121 @@ public:
return HANDLE_TYPE;
}
+ /// Gets a reference to the process' memory manager.
+ Kernel::VMManager& VMManager() {
+ return vm_manager;
+ }
+
+ /// Gets a const reference to the process' memory manager.
+ const Kernel::VMManager& VMManager() const {
+ return vm_manager;
+ }
+
+ /// Gets the current status of the process
+ ProcessStatus GetStatus() const {
+ return status;
+ }
+
+ /// Gets the unique ID that identifies this particular process.
+ u32 GetProcessID() const {
+ return process_id;
+ }
+
+ /// Gets the title ID corresponding to this process.
+ u64 GetTitleID() const {
+ return program_id;
+ }
+
+ /// Gets the resource limit descriptor for this process
+ ResourceLimit& GetResourceLimit() {
+ return *resource_limit;
+ }
+
+ /// Gets the resource limit descriptor for this process
+ const ResourceLimit& GetResourceLimit() const {
+ return *resource_limit;
+ }
+
+ /// Gets the default CPU ID for this process
+ u8 GetDefaultProcessorID() const {
+ return ideal_processor;
+ }
+
+ /// Gets the bitmask of allowed CPUs that this process' threads can run on.
+ u32 GetAllowedProcessorMask() const {
+ return allowed_processor_mask;
+ }
+
+ /// Gets the bitmask of allowed thread priorities.
+ u32 GetAllowedThreadPriorityMask() const {
+ return allowed_thread_priority_mask;
+ }
+
+ u32 IsVirtualMemoryEnabled() const {
+ return is_virtual_address_memory_enabled;
+ }
+
+ /// Whether this process is an AArch64 or AArch32 process.
+ bool Is64BitProcess() const {
+ return is_64bit_process;
+ }
+
+ /**
+ * Loads process-specifics configuration info with metadata provided
+ * by an executable.
+ *
+ * @param metadata The provided metadata to load process specific info.
+ */
+ void LoadFromMetadata(const FileSys::ProgramMetadata& metadata);
+
+ /**
+ * Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them
+ * to this process.
+ */
+ void ParseKernelCaps(const u32* kernel_caps, std::size_t len);
+
+ /**
+ * Applies address space changes and launches the process main thread.
+ */
+ void Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size);
+
+ /**
+ * Prepares a process for termination by stopping all of its threads
+ * and clearing any other resources.
+ */
+ void PrepareForTermination();
+
+ void LoadModule(SharedPtr<CodeSet> module_, VAddr base_addr);
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////
+ // Memory Management
+
+ // Marks the next available region as used and returns the address of the slot.
+ VAddr MarkNextAvailableTLSSlotAsUsed(Thread& thread);
+
+ // Frees a used TLS slot identified by the given address
+ void FreeTLSSlot(VAddr tls_address);
+
+ ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
+ ResultCode HeapFree(VAddr target, u32 size);
+
+ ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size);
+
+ ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, u64 size);
+
+private:
+ explicit Process(KernelCore& kernel);
+ ~Process() override;
+
+ /// Memory manager for this process.
+ Kernel::VMManager vm_manager;
+
+ /// Current status of the process
+ ProcessStatus status;
+
+ /// The ID of this process
+ u32 process_id = 0;
+
/// Title ID corresponding to the process
u64 program_id;
@@ -140,7 +259,7 @@ public:
/// The process may only call SVCs which have the corresponding bit set.
std::bitset<0x80> svc_access_mask;
/// Maximum size of the handle table for the process.
- unsigned int handle_table_size = 0x200;
+ u32 handle_table_size = 0x200;
/// Special memory ranges mapped into this processes address space. This is used to give
/// processes access to specific I/O regions and device memory.
boost::container::static_vector<AddressMapping, 8> address_mappings;
@@ -154,29 +273,6 @@ public:
u32 allowed_processor_mask = THREADPROCESSORID_DEFAULT_MASK;
u32 allowed_thread_priority_mask = 0xFFFFFFFF;
u32 is_virtual_address_memory_enabled = 0;
- /// Current status of the process
- ProcessStatus status;
-
- /// The ID of this process
- u32 process_id = 0;
-
- /**
- * Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them
- * to this process.
- */
- void ParseKernelCaps(const u32* kernel_caps, size_t len);
-
- /**
- * Applies address space changes and launches the process main thread.
- */
- void Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size);
-
- void LoadModule(SharedPtr<CodeSet> module_, VAddr base_addr);
-
- ///////////////////////////////////////////////////////////////////////////////////////////////
- // Memory Management
-
- VMManager vm_manager;
// Memory used to back the allocations in the regular heap. A single vector is used to cover
// the entire virtual address space extents that bound the allocations, including any holes.
@@ -196,18 +292,12 @@ public:
/// This vector will grow as more pages are allocated for new threads.
std::vector<std::bitset<8>> tls_slots;
- std::string name;
-
- ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
- ResultCode HeapFree(VAddr target, u32 size);
-
- ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size);
+ /// Whether or not this process is AArch64, or AArch32.
+ /// By default, we currently assume this is true, unless otherwise
+ /// specified by metadata provided to the process during loading.
+ bool is_64bit_process = true;
- ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, u64 size);
-
-private:
- explicit Process(KernelCore& kernel);
- ~Process() override;
+ std::string name;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 69c812f16..1e82cfffb 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -17,7 +17,7 @@ namespace Kernel {
std::mutex Scheduler::scheduler_mutex;
-Scheduler::Scheduler(Core::ARM_Interface* cpu_core) : cpu_core(cpu_core) {}
+Scheduler::Scheduler(Core::ARM_Interface& cpu_core) : cpu_core(cpu_core) {}
Scheduler::~Scheduler() {
for (auto& thread : thread_list) {
@@ -59,9 +59,9 @@ void Scheduler::SwitchContext(Thread* new_thread) {
// Save context for previous thread
if (previous_thread) {
previous_thread->last_running_ticks = CoreTiming::GetTicks();
- cpu_core->SaveContext(previous_thread->context);
+ cpu_core.SaveContext(previous_thread->context);
// Save the TPIDR_EL0 system register in case it was modified.
- previous_thread->tpidr_el0 = cpu_core->GetTPIDR_EL0();
+ previous_thread->tpidr_el0 = cpu_core.GetTPIDR_EL0();
if (previous_thread->status == ThreadStatus::Running) {
// This is only the case when a reschedule is triggered without the current thread
@@ -88,13 +88,13 @@ void Scheduler::SwitchContext(Thread* new_thread) {
if (previous_process != current_thread->owner_process) {
Core::CurrentProcess() = current_thread->owner_process;
- SetCurrentPageTable(&Core::CurrentProcess()->vm_manager.page_table);
+ SetCurrentPageTable(&Core::CurrentProcess()->VMManager().page_table);
}
- cpu_core->LoadContext(new_thread->context);
- cpu_core->SetTlsAddress(new_thread->GetTLSAddress());
- cpu_core->SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
- cpu_core->ClearExclusiveState();
+ cpu_core.LoadContext(new_thread->context);
+ cpu_core.SetTlsAddress(new_thread->GetTLSAddress());
+ cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
+ cpu_core.ClearExclusiveState();
} else {
current_thread = nullptr;
// Note: We do not reset the current process and current page table when idling because
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 744990c9b..2c94641ec 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -19,7 +19,7 @@ namespace Kernel {
class Scheduler final {
public:
- explicit Scheduler(Core::ARM_Interface* cpu_core);
+ explicit Scheduler(Core::ARM_Interface& cpu_core);
~Scheduler();
/// Returns whether there are any threads that are ready to run.
@@ -72,7 +72,7 @@ private:
SharedPtr<Thread> current_thread = nullptr;
- Core::ARM_Interface* cpu_core;
+ Core::ARM_Interface& cpu_core;
static std::mutex scheduler_mutex;
};
diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp
index abb1d09cd..d061e6155 100644
--- a/src/core/hle/kernel/shared_memory.cpp
+++ b/src/core/hle/kernel/shared_memory.cpp
@@ -8,6 +8,7 @@
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/kernel/errors.h"
+#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/memory.h"
@@ -34,11 +35,11 @@ SharedPtr<SharedMemory> SharedMemory::Create(KernelCore& kernel, SharedPtr<Proce
// Refresh the address mappings for the current process.
if (Core::CurrentProcess() != nullptr) {
- Core::CurrentProcess()->vm_manager.RefreshMemoryBlockMappings(
+ Core::CurrentProcess()->VMManager().RefreshMemoryBlockMappings(
shared_memory->backing_block.get());
}
} else {
- auto& vm_manager = shared_memory->owner_process->vm_manager;
+ auto& vm_manager = shared_memory->owner_process->VMManager();
// The memory is already available and mapped in the owner process.
auto vma = vm_manager.FindVMA(address);
@@ -71,7 +72,8 @@ SharedPtr<SharedMemory> SharedMemory::CreateForApplet(
shared_memory->other_permissions = other_permissions;
shared_memory->backing_block = std::move(heap_block);
shared_memory->backing_block_offset = offset;
- shared_memory->base_address = Memory::HEAP_VADDR + offset;
+ shared_memory->base_address =
+ kernel.CurrentProcess()->VMManager().GetHeapRegionBaseAddress() + offset;
return shared_memory;
}
@@ -105,7 +107,7 @@ ResultCode SharedMemory::Map(Process* target_process, VAddr address, MemoryPermi
VAddr target_address = address;
// Map the memory block into the target process
- auto result = target_process->vm_manager.MapMemoryBlock(
+ auto result = target_process->VMManager().MapMemoryBlock(
target_address, backing_block, backing_block_offset, size, MemoryState::Shared);
if (result.Failed()) {
LOG_ERROR(
@@ -115,14 +117,14 @@ ResultCode SharedMemory::Map(Process* target_process, VAddr address, MemoryPermi
return result.Code();
}
- return target_process->vm_manager.ReprotectRange(target_address, size,
- ConvertPermissions(permissions));
+ return target_process->VMManager().ReprotectRange(target_address, size,
+ ConvertPermissions(permissions));
}
ResultCode SharedMemory::Unmap(Process* target_process, VAddr address) {
// TODO(Subv): Verify what happens if the application tries to unmap an address that is not
// mapped to a SharedMemory.
- return target_process->vm_manager.UnmapRange(address, size);
+ return target_process->VMManager().UnmapRange(address, size);
}
VMAPermission SharedMemory::ConvertPermissions(MemoryPermission permission) {
diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h
index 2c729afe3..2c06bb7ce 100644
--- a/src/core/hle/kernel/shared_memory.h
+++ b/src/core/hle/kernel/shared_memory.h
@@ -119,7 +119,7 @@ public:
/// Backing memory for this shared memory block.
std::shared_ptr<std::vector<u8>> backing_block;
/// Offset into the backing block for this shared memory.
- size_t backing_block_offset;
+ std::size_t backing_block_offset;
/// Size of the memory block. Page-aligned.
u64 size;
/// Permission restrictions applied to the process which created the block.
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index f500fd2e7..1cdaa740a 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -35,13 +35,25 @@
#include "core/hle/service/service.h"
namespace Kernel {
+namespace {
+constexpr bool Is4KBAligned(VAddr address) {
+ return (address & 0xFFF) == 0;
+}
+} // Anonymous namespace
/// Set the process heap to a given Size. It can both extend and shrink the heap.
static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size);
+
+ // Size must be a multiple of 0x200000 (2MB) and be equal to or less than 4GB.
+ if ((heap_size & 0xFFFFFFFE001FFFFF) != 0) {
+ return ERR_INVALID_SIZE;
+ }
+
auto& process = *Core::CurrentProcess();
+ const VAddr heap_base = process.VMManager().GetHeapRegionBaseAddress();
CASCADE_RESULT(*heap_addr,
- process.HeapAllocate(Memory::HEAP_VADDR, heap_size, VMAPermission::ReadWrite));
+ process.HeapAllocate(heap_base, heap_size, VMAPermission::ReadWrite));
return RESULT_SUCCESS;
}
@@ -56,6 +68,15 @@ static ResultCode SetMemoryAttribute(VAddr addr, u64 size, u32 state0, u32 state
static ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
+
+ if (!Is4KBAligned(dst_addr) || !Is4KBAligned(src_addr)) {
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Is4KBAligned(size)) {
+ return ERR_INVALID_SIZE;
+ }
+
return Core::CurrentProcess()->MirrorMemory(dst_addr, src_addr, size);
}
@@ -63,6 +84,15 @@ static ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
static ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
+
+ if (!Is4KBAligned(dst_addr) || !Is4KBAligned(src_addr)) {
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Is4KBAligned(size)) {
+ return ERR_INVALID_SIZE;
+ }
+
return Core::CurrentProcess()->UnmapMemory(dst_addr, src_addr, size);
}
@@ -140,13 +170,13 @@ static ResultCode GetProcessId(u32* process_id, Handle process_handle) {
return ERR_INVALID_HANDLE;
}
- *process_id = process->process_id;
+ *process_id = process->GetProcessID();
return RESULT_SUCCESS;
}
/// Default thread wakeup callback for WaitSynchronization
static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
- SharedPtr<WaitObject> object, size_t index) {
+ SharedPtr<WaitObject> object, std::size_t index) {
ASSERT(thread->status == ThreadStatus::WaitSynchAny);
if (reason == ThreadWakeupReason::Timeout) {
@@ -251,6 +281,10 @@ static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr,
"requesting_current_thread_handle=0x{:08X}",
holding_thread_handle, mutex_addr, requesting_thread_handle);
+ if (Memory::IsKernelVirtualAddress(mutex_addr)) {
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
auto& handle_table = Core::System::GetInstance().Kernel().HandleTable();
return Mutex::TryAcquire(handle_table, mutex_addr, holding_thread_handle,
requesting_thread_handle);
@@ -260,6 +294,10 @@ static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr,
static ResultCode ArbitrateUnlock(VAddr mutex_addr) {
LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr);
+ if (Memory::IsKernelVirtualAddress(mutex_addr)) {
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
return Mutex::Release(mutex_addr);
}
@@ -288,26 +326,27 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
info_sub_id, handle);
- const auto& vm_manager = Core::CurrentProcess()->vm_manager;
+ const auto& current_process = Core::CurrentProcess();
+ const auto& vm_manager = current_process->VMManager();
switch (static_cast<GetInfoType>(info_id)) {
case GetInfoType::AllowedCpuIdBitmask:
- *result = Core::CurrentProcess()->allowed_processor_mask;
+ *result = current_process->GetAllowedProcessorMask();
break;
case GetInfoType::AllowedThreadPrioBitmask:
- *result = Core::CurrentProcess()->allowed_thread_priority_mask;
+ *result = current_process->GetAllowedThreadPriorityMask();
break;
case GetInfoType::MapRegionBaseAddr:
- *result = Memory::MAP_REGION_VADDR;
+ *result = vm_manager.GetMapRegionBaseAddress();
break;
case GetInfoType::MapRegionSize:
- *result = Memory::MAP_REGION_SIZE;
+ *result = vm_manager.GetMapRegionSize();
break;
case GetInfoType::HeapRegionBaseAddr:
- *result = Memory::HEAP_VADDR;
+ *result = vm_manager.GetHeapRegionBaseAddress();
break;
case GetInfoType::HeapRegionSize:
- *result = Memory::HEAP_SIZE;
+ *result = vm_manager.GetHeapRegionSize();
break;
case GetInfoType::TotalMemoryUsage:
*result = vm_manager.GetTotalMemoryUsage();
@@ -322,22 +361,35 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
*result = 0;
break;
case GetInfoType::AddressSpaceBaseAddr:
- *result = vm_manager.GetAddressSpaceBaseAddr();
+ *result = vm_manager.GetCodeRegionBaseAddress();
break;
- case GetInfoType::AddressSpaceSize:
- *result = vm_manager.GetAddressSpaceSize();
+ case GetInfoType::AddressSpaceSize: {
+ const u64 width = vm_manager.GetAddressSpaceWidth();
+
+ switch (width) {
+ case 32:
+ *result = 0xFFE00000;
+ break;
+ case 36:
+ *result = 0xFF8000000;
+ break;
+ case 39:
+ *result = 0x7FF8000000;
+ break;
+ }
break;
+ }
case GetInfoType::NewMapRegionBaseAddr:
- *result = Memory::NEW_MAP_REGION_VADDR;
+ *result = vm_manager.GetNewMapRegionBaseAddress();
break;
case GetInfoType::NewMapRegionSize:
- *result = Memory::NEW_MAP_REGION_SIZE;
+ *result = vm_manager.GetNewMapRegionSize();
break;
case GetInfoType::IsVirtualAddressMemoryEnabled:
- *result = Core::CurrentProcess()->is_virtual_address_memory_enabled;
+ *result = current_process->IsVirtualMemoryEnabled();
break;
case GetInfoType::TitleId:
- *result = Core::CurrentProcess()->program_id;
+ *result = current_process->GetTitleID();
break;
case GetInfoType::PrivilegedProcessId:
LOG_WARNING(Kernel_SVC,
@@ -363,8 +415,36 @@ static ResultCode SetThreadActivity(Handle handle, u32 unknown) {
}
/// Gets the thread context
-static ResultCode GetThreadContext(Handle handle, VAddr addr) {
- LOG_WARNING(Kernel_SVC, "(STUBBED) called, handle=0x{:08X}, addr=0x{:X}", handle, addr);
+static ResultCode GetThreadContext(VAddr thread_context, Handle handle) {
+ LOG_DEBUG(Kernel_SVC, "called, context=0x{:08X}, thread=0x{:X}", thread_context, handle);
+
+ auto& kernel = Core::System::GetInstance().Kernel();
+ const SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(handle);
+ if (!thread) {
+ return ERR_INVALID_HANDLE;
+ }
+
+ const auto current_process = Core::CurrentProcess();
+ if (thread->owner_process != current_process) {
+ return ERR_INVALID_HANDLE;
+ }
+
+ if (thread == GetCurrentThread()) {
+ return ERR_ALREADY_REGISTERED;
+ }
+
+ Core::ARM_Interface::ThreadContext ctx = thread->context;
+ // Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
+ ctx.pstate &= 0xFF0FFE20;
+
+ // If 64-bit, we can just write the context registers directly and we're good.
+ // However, if 32-bit, we have to ensure some registers are zeroed out.
+ if (!current_process->Is64BitProcess()) {
+ std::fill(ctx.cpu_registers.begin() + 15, ctx.cpu_registers.end(), 0);
+ std::fill(ctx.vector_registers.begin() + 16, ctx.vector_registers.end(), u128{});
+ }
+
+ Memory::WriteBlock(thread_context, &ctx, sizeof(ctx));
return RESULT_SUCCESS;
}
@@ -392,8 +472,8 @@ static ResultCode SetThreadPriority(Handle handle, u32 priority) {
// Note: The kernel uses the current process's resource limit instead of
// the one from the thread owner's resource limit.
- SharedPtr<ResourceLimit>& resource_limit = Core::CurrentProcess()->resource_limit;
- if (resource_limit->GetMaxResourceValue(ResourceType::Priority) > priority) {
+ const ResourceLimit& resource_limit = Core::CurrentProcess()->GetResourceLimit();
+ if (resource_limit.GetMaxResourceValue(ResourceType::Priority) > priority) {
return ERR_NOT_AUTHORIZED;
}
@@ -415,35 +495,43 @@ static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 s
"called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
shared_memory_handle, addr, size, permissions);
+ if (!Is4KBAligned(addr)) {
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Is4KBAligned(size)) {
+ return ERR_INVALID_SIZE;
+ }
+
+ const auto permissions_type = static_cast<MemoryPermission>(permissions);
+ if (permissions_type != MemoryPermission::Read &&
+ permissions_type != MemoryPermission::ReadWrite) {
+ LOG_ERROR(Kernel_SVC, "Invalid permissions=0x{:08X}", permissions);
+ return ERR_INVALID_MEMORY_PERMISSIONS;
+ }
+
auto& kernel = Core::System::GetInstance().Kernel();
auto shared_memory = kernel.HandleTable().Get<SharedMemory>(shared_memory_handle);
if (!shared_memory) {
return ERR_INVALID_HANDLE;
}
- MemoryPermission permissions_type = static_cast<MemoryPermission>(permissions);
- switch (permissions_type) {
- case MemoryPermission::Read:
- case MemoryPermission::Write:
- case MemoryPermission::ReadWrite:
- case MemoryPermission::Execute:
- case MemoryPermission::ReadExecute:
- case MemoryPermission::WriteExecute:
- case MemoryPermission::ReadWriteExecute:
- case MemoryPermission::DontCare:
- return shared_memory->Map(Core::CurrentProcess().get(), addr, permissions_type,
- MemoryPermission::DontCare);
- default:
- LOG_ERROR(Kernel_SVC, "unknown permissions=0x{:08X}", permissions);
- }
-
- return RESULT_SUCCESS;
+ return shared_memory->Map(Core::CurrentProcess().get(), addr, permissions_type,
+ MemoryPermission::DontCare);
}
static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size) {
LOG_WARNING(Kernel_SVC, "called, shared_memory_handle=0x{:08X}, addr=0x{:X}, size=0x{:X}",
shared_memory_handle, addr, size);
+ if (!Is4KBAligned(addr)) {
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Is4KBAligned(size)) {
+ return ERR_INVALID_SIZE;
+ }
+
auto& kernel = Core::System::GetInstance().Kernel();
auto shared_memory = kernel.HandleTable().Get<SharedMemory>(shared_memory_handle);
@@ -459,9 +547,9 @@ static ResultCode QueryProcessMemory(MemoryInfo* memory_info, PageInfo* /*page_i
if (!process) {
return ERR_INVALID_HANDLE;
}
- auto vma = process->vm_manager.FindVMA(addr);
+ auto vma = process->VMManager().FindVMA(addr);
memory_info->attributes = 0;
- if (vma == Core::CurrentProcess()->vm_manager.vma_map.end()) {
+ if (vma == Core::CurrentProcess()->VMManager().vma_map.end()) {
memory_info->base_address = 0;
memory_info->permission = static_cast<u32>(VMAPermission::None);
memory_info->size = 0;
@@ -485,35 +573,13 @@ static ResultCode QueryMemory(MemoryInfo* memory_info, PageInfo* page_info, VAdd
/// Exits the current process
static void ExitProcess() {
- LOG_INFO(Kernel_SVC, "Process {} exiting", Core::CurrentProcess()->process_id);
+ auto& current_process = Core::CurrentProcess();
- ASSERT_MSG(Core::CurrentProcess()->status == ProcessStatus::Running,
+ LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
+ ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running,
"Process has already exited");
- Core::CurrentProcess()->status = ProcessStatus::Exited;
-
- auto stop_threads = [](const std::vector<SharedPtr<Thread>>& thread_list) {
- for (auto& thread : thread_list) {
- if (thread->owner_process != Core::CurrentProcess())
- continue;
-
- if (thread == GetCurrentThread())
- continue;
-
- // TODO(Subv): When are the other running/ready threads terminated?
- ASSERT_MSG(thread->status == ThreadStatus::WaitSynchAny ||
- thread->status == ThreadStatus::WaitSynchAll,
- "Exiting processes with non-waiting threads is currently unimplemented");
-
- thread->Stop();
- }
- };
-
- auto& system = Core::System::GetInstance();
- stop_threads(system.Scheduler(0)->GetThreadList());
- stop_threads(system.Scheduler(1)->GetThreadList());
- stop_threads(system.Scheduler(2)->GetThreadList());
- stop_threads(system.Scheduler(3)->GetThreadList());
+ current_process->PrepareForTermination();
// Kill the current thread
GetCurrentThread()->Stop();
@@ -524,20 +590,20 @@ static void ExitProcess() {
/// Creates a new thread
static ResultCode CreateThread(Handle* out_handle, VAddr entry_point, u64 arg, VAddr stack_top,
u32 priority, s32 processor_id) {
- std::string name = fmt::format("unknown-{:X}", entry_point);
+ std::string name = fmt::format("thread-{:X}", entry_point);
if (priority > THREADPRIO_LOWEST) {
return ERR_INVALID_THREAD_PRIORITY;
}
- SharedPtr<ResourceLimit>& resource_limit = Core::CurrentProcess()->resource_limit;
- if (resource_limit->GetMaxResourceValue(ResourceType::Priority) > priority) {
+ const ResourceLimit& resource_limit = Core::CurrentProcess()->GetResourceLimit();
+ if (resource_limit.GetMaxResourceValue(ResourceType::Priority) > priority) {
return ERR_NOT_AUTHORIZED;
}
if (processor_id == THREADPROCESSORID_DEFAULT) {
// Set the target CPU to the one specified in the process' exheader.
- processor_id = Core::CurrentProcess()->ideal_processor;
+ processor_id = Core::CurrentProcess()->GetDefaultProcessorID();
ASSERT(processor_id != THREADPROCESSORID_DEFAULT);
}
@@ -647,16 +713,17 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}",
condition_variable_addr, target);
- auto RetrieveWaitingThreads =
- [](size_t core_index, std::vector<SharedPtr<Thread>>& waiting_threads, VAddr condvar_addr) {
- const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
- auto& thread_list = scheduler->GetThreadList();
+ auto RetrieveWaitingThreads = [](std::size_t core_index,
+ std::vector<SharedPtr<Thread>>& waiting_threads,
+ VAddr condvar_addr) {
+ const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
+ auto& thread_list = scheduler->GetThreadList();
- for (auto& thread : thread_list) {
- if (thread->condvar_wait_address == condvar_addr)
- waiting_threads.push_back(thread);
- }
- };
+ for (auto& thread : thread_list) {
+ if (thread->condvar_wait_address == condvar_addr)
+ waiting_threads.push_back(thread);
+ }
+ };
// Retrieve a list of all threads that are waiting for this condition variable.
std::vector<SharedPtr<Thread>> waiting_threads;
@@ -672,7 +739,7 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
// Only process up to 'target' threads, unless 'target' is -1, in which case process
// them all.
- size_t last = waiting_threads.size();
+ std::size_t last = waiting_threads.size();
if (target != -1)
last = target;
@@ -680,12 +747,12 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
if (last > waiting_threads.size())
return RESULT_SUCCESS;
- for (size_t index = 0; index < last; ++index) {
+ for (std::size_t index = 0; index < last; ++index) {
auto& thread = waiting_threads[index];
ASSERT(thread->condvar_wait_address == condition_variable_addr);
- size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
+ std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
auto& monitor = Core::System::GetInstance().Monitor();
@@ -863,10 +930,10 @@ static ResultCode SetThreadCoreMask(Handle thread_handle, u32 core, u64 mask) {
}
if (core == static_cast<u32>(THREADPROCESSORID_DEFAULT)) {
- ASSERT(thread->owner_process->ideal_processor !=
+ ASSERT(thread->owner_process->GetDefaultProcessorID() !=
static_cast<u8>(THREADPROCESSORID_DEFAULT));
// Set the target CPU to the one specified in the process' exheader.
- core = thread->owner_process->ideal_processor;
+ core = thread->owner_process->GetDefaultProcessorID();
mask = 1ull << core;
}
@@ -898,12 +965,28 @@ static ResultCode CreateSharedMemory(Handle* handle, u64 size, u32 local_permiss
LOG_TRACE(Kernel_SVC, "called, size=0x{:X}, localPerms=0x{:08X}, remotePerms=0x{:08X}", size,
local_permissions, remote_permissions);
+ // Size must be a multiple of 4KB and be less than or equal to
+ // approx. 8 GB (actually (1GB - 512B) * 8)
+ if (size == 0 || (size & 0xFFFFFFFE00000FFF) != 0) {
+ return ERR_INVALID_SIZE;
+ }
+
+ const auto local_perms = static_cast<MemoryPermission>(local_permissions);
+ if (local_perms != MemoryPermission::Read && local_perms != MemoryPermission::ReadWrite) {
+ return ERR_INVALID_MEMORY_PERMISSIONS;
+ }
+
+ const auto remote_perms = static_cast<MemoryPermission>(remote_permissions);
+ if (remote_perms != MemoryPermission::Read && remote_perms != MemoryPermission::ReadWrite &&
+ remote_perms != MemoryPermission::DontCare) {
+ return ERR_INVALID_MEMORY_PERMISSIONS;
+ }
+
auto& kernel = Core::System::GetInstance().Kernel();
auto& handle_table = kernel.HandleTable();
auto shared_mem_handle =
SharedMemory::Create(kernel, handle_table.Get<Process>(KernelHandle::CurrentProcess), size,
- static_cast<MemoryPermission>(local_permissions),
- static_cast<MemoryPermission>(remote_permissions));
+ local_perms, remote_perms);
CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle));
return RESULT_SUCCESS;
@@ -977,7 +1060,7 @@ static const FunctionDef SVC_Table[] = {
{0x2B, nullptr, "FlushDataCache"},
{0x2C, nullptr, "MapPhysicalMemory"},
{0x2D, nullptr, "UnmapPhysicalMemory"},
- {0x2E, nullptr, "GetNextThreadInfo"},
+ {0x2E, nullptr, "GetFutureThreadInfo"},
{0x2F, nullptr, "GetLastThreadInfo"},
{0x30, nullptr, "GetResourceLimitLimitValue"},
{0x31, nullptr, "GetResourceLimitCurrentValue"},
@@ -1003,11 +1086,11 @@ static const FunctionDef SVC_Table[] = {
{0x45, nullptr, "CreateEvent"},
{0x46, nullptr, "Unknown"},
{0x47, nullptr, "Unknown"},
- {0x48, nullptr, "AllocateUnsafeMemory"},
- {0x49, nullptr, "FreeUnsafeMemory"},
- {0x4A, nullptr, "SetUnsafeAllocationLimit"},
- {0x4B, nullptr, "CreateJitMemory"},
- {0x4C, nullptr, "MapJitMemory"},
+ {0x48, nullptr, "MapPhysicalMemoryUnsafe"},
+ {0x49, nullptr, "UnmapPhysicalMemoryUnsafe"},
+ {0x4A, nullptr, "SetUnsafeLimit"},
+ {0x4B, nullptr, "CreateCodeMemory"},
+ {0x4C, nullptr, "ControlCodeMemory"},
{0x4D, nullptr, "SleepSystem"},
{0x4E, nullptr, "ReadWriteRegister"},
{0x4F, nullptr, "SetProcessActivity"},
@@ -1042,7 +1125,7 @@ static const FunctionDef SVC_Table[] = {
{0x6C, nullptr, "SetHardwareBreakPoint"},
{0x6D, nullptr, "GetDebugThreadParam"},
{0x6E, nullptr, "Unknown"},
- {0x6F, nullptr, "GetMemoryInfo"},
+ {0x6F, nullptr, "GetSystemInfo"},
{0x70, nullptr, "CreatePort"},
{0x71, nullptr, "ManageNamedPort"},
{0x72, nullptr, "ConnectToPort"},
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 1eda5f879..22712e64f 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -13,7 +13,9 @@
namespace Kernel {
-#define PARAM(n) Core::CurrentArmInterface().GetReg(n)
+static inline u64 Param(int n) {
+ return Core::CurrentArmInterface().GetReg(n);
+}
/**
* HLE a function return from the current ARM userland process
@@ -28,23 +30,23 @@ static inline void FuncReturn(u64 res) {
template <ResultCode func(u64)>
void SvcWrap() {
- FuncReturn(func(PARAM(0)).raw);
+ FuncReturn(func(Param(0)).raw);
}
template <ResultCode func(u32)>
void SvcWrap() {
- FuncReturn(func((u32)PARAM(0)).raw);
+ FuncReturn(func((u32)Param(0)).raw);
}
template <ResultCode func(u32, u32)>
void SvcWrap() {
- FuncReturn(func((u32)PARAM(0), (u32)PARAM(1)).raw);
+ FuncReturn(func((u32)Param(0), (u32)Param(1)).raw);
}
template <ResultCode func(u32*, u32)>
void SvcWrap() {
u32 param_1 = 0;
- u32 retval = func(&param_1, (u32)PARAM(1)).raw;
+ u32 retval = func(&param_1, (u32)Param(1)).raw;
Core::CurrentArmInterface().SetReg(1, param_1);
FuncReturn(retval);
}
@@ -52,39 +54,44 @@ void SvcWrap() {
template <ResultCode func(u32*, u64)>
void SvcWrap() {
u32 param_1 = 0;
- u32 retval = func(&param_1, PARAM(1)).raw;
+ u32 retval = func(&param_1, Param(1)).raw;
Core::CurrentArmInterface().SetReg(1, param_1);
FuncReturn(retval);
}
template <ResultCode func(u64, s32)>
void SvcWrap() {
- FuncReturn(func(PARAM(0), (s32)PARAM(1)).raw);
+ FuncReturn(func(Param(0), (s32)Param(1)).raw);
+}
+
+template <ResultCode func(u64, u32)>
+void SvcWrap() {
+ FuncReturn(func(Param(0), static_cast<u32>(Param(1))).raw);
}
template <ResultCode func(u64*, u64)>
void SvcWrap() {
u64 param_1 = 0;
- u32 retval = func(&param_1, PARAM(1)).raw;
+ u32 retval = func(&param_1, Param(1)).raw;
Core::CurrentArmInterface().SetReg(1, param_1);
FuncReturn(retval);
}
template <ResultCode func(u32, u64)>
void SvcWrap() {
- FuncReturn(func((u32)(PARAM(0) & 0xFFFFFFFF), PARAM(1)).raw);
+ FuncReturn(func((u32)(Param(0) & 0xFFFFFFFF), Param(1)).raw);
}
template <ResultCode func(u32, u32, u64)>
void SvcWrap() {
- FuncReturn(func((u32)(PARAM(0) & 0xFFFFFFFF), (u32)(PARAM(1) & 0xFFFFFFFF), PARAM(2)).raw);
+ FuncReturn(func((u32)(Param(0) & 0xFFFFFFFF), (u32)(Param(1) & 0xFFFFFFFF), Param(2)).raw);
}
template <ResultCode func(u32, u32*, u64*)>
void SvcWrap() {
u32 param_1 = 0;
u64 param_2 = 0;
- ResultCode retval = func((u32)(PARAM(2) & 0xFFFFFFFF), &param_1, &param_2);
+ ResultCode retval = func((u32)(Param(2) & 0xFFFFFFFF), &param_1, &param_2);
Core::CurrentArmInterface().SetReg(1, param_1);
Core::CurrentArmInterface().SetReg(2, param_2);
FuncReturn(retval.raw);
@@ -93,46 +100,46 @@ void SvcWrap() {
template <ResultCode func(u64, u64, u32, u32)>
void SvcWrap() {
FuncReturn(
- func(PARAM(0), PARAM(1), (u32)(PARAM(3) & 0xFFFFFFFF), (u32)(PARAM(3) & 0xFFFFFFFF)).raw);
+ func(Param(0), Param(1), (u32)(Param(3) & 0xFFFFFFFF), (u32)(Param(3) & 0xFFFFFFFF)).raw);
}
template <ResultCode func(u32, u64, u32)>
void SvcWrap() {
- FuncReturn(func((u32)PARAM(0), PARAM(1), (u32)PARAM(2)).raw);
+ FuncReturn(func((u32)Param(0), Param(1), (u32)Param(2)).raw);
}
template <ResultCode func(u64, u64, u64)>
void SvcWrap() {
- FuncReturn(func(PARAM(0), PARAM(1), PARAM(2)).raw);
+ FuncReturn(func(Param(0), Param(1), Param(2)).raw);
}
template <ResultCode func(u32, u64, u64, u32)>
void SvcWrap() {
- FuncReturn(func((u32)PARAM(0), PARAM(1), PARAM(2), (u32)PARAM(3)).raw);
+ FuncReturn(func((u32)Param(0), Param(1), Param(2), (u32)Param(3)).raw);
}
template <ResultCode func(u32, u64, u64)>
void SvcWrap() {
- FuncReturn(func((u32)PARAM(0), PARAM(1), PARAM(2)).raw);
+ FuncReturn(func((u32)Param(0), Param(1), Param(2)).raw);
}
template <ResultCode func(u32*, u64, u64, s64)>
void SvcWrap() {
u32 param_1 = 0;
- ResultCode retval = func(&param_1, PARAM(1), (u32)(PARAM(2) & 0xFFFFFFFF), (s64)PARAM(3));
+ ResultCode retval = func(&param_1, Param(1), (u32)(Param(2) & 0xFFFFFFFF), (s64)Param(3));
Core::CurrentArmInterface().SetReg(1, param_1);
FuncReturn(retval.raw);
}
template <ResultCode func(u64, u64, u32, s64)>
void SvcWrap() {
- FuncReturn(func(PARAM(0), PARAM(1), (u32)PARAM(2), (s64)PARAM(3)).raw);
+ FuncReturn(func(Param(0), Param(1), (u32)Param(2), (s64)Param(3)).raw);
}
template <ResultCode func(u64*, u64, u64, u64)>
void SvcWrap() {
u64 param_1 = 0;
- u32 retval = func(&param_1, PARAM(1), PARAM(2), PARAM(3)).raw;
+ u32 retval = func(&param_1, Param(1), Param(2), Param(3)).raw;
Core::CurrentArmInterface().SetReg(1, param_1);
FuncReturn(retval);
}
@@ -141,7 +148,7 @@ template <ResultCode func(u32*, u64, u64, u64, u32, s32)>
void SvcWrap() {
u32 param_1 = 0;
u32 retval =
- func(&param_1, PARAM(1), PARAM(2), PARAM(3), (u32)PARAM(4), (s32)(PARAM(5) & 0xFFFFFFFF))
+ func(&param_1, Param(1), Param(2), Param(3), (u32)Param(4), (s32)(Param(5) & 0xFFFFFFFF))
.raw;
Core::CurrentArmInterface().SetReg(1, param_1);
FuncReturn(retval);
@@ -151,13 +158,13 @@ template <ResultCode func(MemoryInfo*, PageInfo*, u64)>
void SvcWrap() {
MemoryInfo memory_info = {};
PageInfo page_info = {};
- u32 retval = func(&memory_info, &page_info, PARAM(2)).raw;
+ u32 retval = func(&memory_info, &page_info, Param(2)).raw;
- Memory::Write64(PARAM(0), memory_info.base_address);
- Memory::Write64(PARAM(0) + 8, memory_info.size);
- Memory::Write32(PARAM(0) + 16, memory_info.type);
- Memory::Write32(PARAM(0) + 20, memory_info.attributes);
- Memory::Write32(PARAM(0) + 24, memory_info.permission);
+ Memory::Write64(Param(0), memory_info.base_address);
+ Memory::Write64(Param(0) + 8, memory_info.size);
+ Memory::Write32(Param(0) + 16, memory_info.type);
+ Memory::Write32(Param(0) + 20, memory_info.attributes);
+ Memory::Write32(Param(0) + 24, memory_info.permission);
FuncReturn(retval);
}
@@ -165,7 +172,7 @@ void SvcWrap() {
template <ResultCode func(u32*, u64, u64, u32)>
void SvcWrap() {
u32 param_1 = 0;
- u32 retval = func(&param_1, PARAM(1), PARAM(2), (u32)(PARAM(3) & 0xFFFFFFFF)).raw;
+ u32 retval = func(&param_1, Param(1), Param(2), (u32)(Param(3) & 0xFFFFFFFF)).raw;
Core::CurrentArmInterface().SetReg(1, param_1);
FuncReturn(retval);
}
@@ -174,7 +181,7 @@ template <ResultCode func(Handle*, u64, u32, u32)>
void SvcWrap() {
u32 param_1 = 0;
u32 retval =
- func(&param_1, PARAM(1), (u32)(PARAM(2) & 0xFFFFFFFF), (u32)(PARAM(3) & 0xFFFFFFFF)).raw;
+ func(&param_1, Param(1), (u32)(Param(2) & 0xFFFFFFFF), (u32)(Param(3) & 0xFFFFFFFF)).raw;
Core::CurrentArmInterface().SetReg(1, param_1);
FuncReturn(retval);
}
@@ -182,14 +189,14 @@ void SvcWrap() {
template <ResultCode func(u64, u32, s32, s64)>
void SvcWrap() {
FuncReturn(
- func(PARAM(0), (u32)(PARAM(1) & 0xFFFFFFFF), (s32)(PARAM(2) & 0xFFFFFFFF), (s64)PARAM(3))
+ func(Param(0), (u32)(Param(1) & 0xFFFFFFFF), (s32)(Param(2) & 0xFFFFFFFF), (s64)Param(3))
.raw);
}
template <ResultCode func(u64, u32, s32, s32)>
void SvcWrap() {
- FuncReturn(func(PARAM(0), (u32)(PARAM(1) & 0xFFFFFFFF), (s32)(PARAM(2) & 0xFFFFFFFF),
- (s32)(PARAM(3) & 0xFFFFFFFF))
+ FuncReturn(func(Param(0), (u32)(Param(1) & 0xFFFFFFFF), (s32)(Param(2) & 0xFFFFFFFF),
+ (s32)(Param(3) & 0xFFFFFFFF))
.raw);
}
@@ -219,20 +226,17 @@ void SvcWrap() {
template <void func(s64)>
void SvcWrap() {
- func((s64)PARAM(0));
+ func((s64)Param(0));
}
template <void func(u64, u64 len)>
void SvcWrap() {
- func(PARAM(0), PARAM(1));
+ func(Param(0), Param(1));
}
template <void func(u64, u64, u64)>
void SvcWrap() {
- func(PARAM(0), PARAM(1), PARAM(2));
+ func(Param(0), Param(1), Param(2));
}
-#undef PARAM
-#undef FuncReturn
-
} // namespace Kernel
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 3f12a84dc..b5c16cfbb 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -65,10 +65,7 @@ void Thread::Stop() {
wait_objects.clear();
// Mark the TLS slot in the thread's page as free.
- const u64 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::PAGE_SIZE;
- const u64 tls_slot =
- ((tls_address - Memory::TLS_AREA_VADDR) % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
- Core::CurrentProcess()->tls_slots[tls_page].reset(tls_slot);
+ owner_process->FreeTLSSlot(tls_address);
}
void WaitCurrentThread_Sleep() {
@@ -178,32 +175,6 @@ void Thread::ResumeFromWait() {
}
/**
- * Finds a free location for the TLS section of a thread.
- * @param tls_slots The TLS page array of the thread's owner process.
- * Returns a tuple of (page, slot, alloc_needed) where:
- * page: The index of the first allocated TLS page that has free slots.
- * slot: The index of the first free slot in the indicated page.
- * alloc_needed: Whether there's a need to allocate a new TLS page (All pages are full).
- */
-static std::tuple<std::size_t, std::size_t, bool> GetFreeThreadLocalSlot(
- const std::vector<std::bitset<8>>& tls_slots) {
- // Iterate over all the allocated pages, and try to find one where not all slots are used.
- for (std::size_t page = 0; page < tls_slots.size(); ++page) {
- const auto& page_tls_slots = tls_slots[page];
- if (!page_tls_slots.all()) {
- // We found a page with at least one free slot, find which slot it is
- for (std::size_t slot = 0; slot < page_tls_slots.size(); ++slot) {
- if (!page_tls_slots.test(slot)) {
- return std::make_tuple(page, slot, false);
- }
- }
- }
- }
-
- return std::make_tuple(0, 0, true);
-}
-
-/**
* Resets a thread context, making it ready to be scheduled and run by the CPU
* @param context Thread context to reset
* @param stack_top Address of the top of the stack
@@ -217,8 +188,8 @@ static void ResetThreadContext(Core::ARM_Interface::ThreadContext& context, VAdd
context.cpu_registers[0] = arg;
context.pc = entry_point;
context.sp = stack_top;
- context.cpsr = 0;
- context.fpscr = 0;
+ context.pstate = 0;
+ context.fpcr = 0;
}
ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name, VAddr entry_point,
@@ -264,32 +235,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
thread->owner_process = owner_process;
thread->scheduler = Core::System::GetInstance().Scheduler(processor_id);
thread->scheduler->AddThread(thread, priority);
-
- // Find the next available TLS index, and mark it as used
- auto& tls_slots = owner_process->tls_slots;
-
- auto [available_page, available_slot, needs_allocation] = GetFreeThreadLocalSlot(tls_slots);
- if (needs_allocation) {
- tls_slots.emplace_back(0); // The page is completely available at the start
- available_page = tls_slots.size() - 1;
- available_slot = 0; // Use the first slot in the new page
-
- // Allocate some memory from the end of the linear heap for this region.
- const size_t offset = thread->tls_memory->size();
- thread->tls_memory->insert(thread->tls_memory->end(), Memory::PAGE_SIZE, 0);
-
- auto& vm_manager = owner_process->vm_manager;
- vm_manager.RefreshMemoryBlockMappings(thread->tls_memory.get());
-
- vm_manager.MapMemoryBlock(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE,
- thread->tls_memory, 0, Memory::PAGE_SIZE,
- MemoryState::ThreadLocal);
- }
-
- // Mark the slot as used
- tls_slots[available_page].set(available_slot);
- thread->tls_address = Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE +
- available_slot * Memory::TLS_ENTRY_SIZE;
+ thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread);
// TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
// to initialize the context
@@ -311,13 +257,14 @@ void Thread::BoostPriority(u32 priority) {
}
SharedPtr<Thread> SetupMainThread(KernelCore& kernel, VAddr entry_point, u32 priority,
- SharedPtr<Process> owner_process) {
+ Process& owner_process) {
// Setup page table so we can write to memory
- SetCurrentPageTable(&Core::CurrentProcess()->vm_manager.page_table);
+ SetCurrentPageTable(&owner_process.VMManager().page_table);
// Initialize new "main" thread
+ const VAddr stack_top = owner_process.VMManager().GetTLSIORegionEndAddress();
auto thread_res = Thread::Create(kernel, "main", entry_point, priority, 0, THREADPROCESSORID_0,
- Memory::STACK_AREA_VADDR_END, std::move(owner_process));
+ stack_top, &owner_process);
SharedPtr<Thread> thread = std::move(thread_res).Unwrap();
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index cb57ee78a..4250144c3 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -62,6 +62,9 @@ enum class ThreadWakeupReason {
class Thread final : public WaitObject {
public:
+ using TLSMemory = std::vector<u8>;
+ using TLSMemoryPtr = std::shared_ptr<TLSMemory>;
+
/**
* Creates and returns a new thread. The new thread is immediately scheduled
* @param kernel The kernel instance this thread will be created under.
@@ -134,6 +137,14 @@ public:
return thread_id;
}
+ TLSMemoryPtr& GetTLSMemory() {
+ return tls_memory;
+ }
+
+ const TLSMemoryPtr& GetTLSMemory() const {
+ return tls_memory;
+ }
+
/**
* Resumes a thread from waiting
*/
@@ -254,7 +265,7 @@ public:
Handle callback_handle;
using WakeupCallback = bool(ThreadWakeupReason reason, SharedPtr<Thread> thread,
- SharedPtr<WaitObject> object, size_t index);
+ SharedPtr<WaitObject> object, std::size_t index);
// Callback that will be invoked when the thread is resumed from a waiting state. If the thread
// was waiting via WaitSynchronizationN then the object will be the last object that became
// available. In case of a timeout, the object will be nullptr.
@@ -269,7 +280,7 @@ private:
explicit Thread(KernelCore& kernel);
~Thread() override;
- std::shared_ptr<std::vector<u8>> tls_memory = std::make_shared<std::vector<u8>>();
+ TLSMemoryPtr tls_memory = std::make_shared<TLSMemory>();
};
/**
@@ -281,7 +292,7 @@ private:
* @return A shared pointer to the main thread
*/
SharedPtr<Thread> SetupMainThread(KernelCore& kernel, VAddr entry_point, u32 priority,
- SharedPtr<Process> owner_process);
+ Process& owner_process);
/**
* Gets the current thread
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index 479cacb62..e412309fd 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -9,6 +9,7 @@
#include "common/logging/log.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
+#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/memory.h"
@@ -54,30 +55,32 @@ bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
}
VMManager::VMManager() {
- Reset();
+ // Default to assuming a 39-bit address space. This way we have a sane
+ // starting point with executables that don't provide metadata.
+ Reset(FileSys::ProgramAddressSpaceType::Is39Bit);
}
VMManager::~VMManager() {
- Reset();
+ Reset(FileSys::ProgramAddressSpaceType::Is39Bit);
}
-void VMManager::Reset() {
- vma_map.clear();
+void VMManager::Reset(FileSys::ProgramAddressSpaceType type) {
+ Clear();
+
+ InitializeMemoryRegionRanges(type);
+
+ page_table.Resize(address_space_width);
// Initialize the map with a single free region covering the entire managed space.
VirtualMemoryArea initial_vma;
- initial_vma.size = MAX_ADDRESS;
+ initial_vma.size = address_space_end;
vma_map.emplace(initial_vma.base, initial_vma);
- page_table.pointers.fill(nullptr);
- page_table.special_regions.clear();
- page_table.attributes.fill(Memory::PageType::Unmapped);
-
UpdatePageTableForVMA(initial_vma);
}
VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
- if (target >= MAX_ADDRESS) {
+ if (target >= address_space_end) {
return vma_map.end();
} else {
return std::prev(vma_map.upper_bound(target));
@@ -86,7 +89,7 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
std::shared_ptr<std::vector<u8>> block,
- size_t offset, u64 size,
+ std::size_t offset, u64 size,
MemoryState state) {
ASSERT(block != nullptr);
ASSERT(offset + size <= block->size());
@@ -291,7 +294,7 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u64 size) {
const VAddr target_end = target + size;
ASSERT(target_end >= target);
- ASSERT(target_end <= MAX_ADDRESS);
+ ASSERT(target_end <= address_space_end);
ASSERT(size > 0);
VMAIter begin_vma = StripIterConstness(FindVMA(target));
@@ -382,6 +385,85 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
}
}
+void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType type) {
+ u64 map_region_size = 0;
+ u64 heap_region_size = 0;
+ u64 new_map_region_size = 0;
+ u64 tls_io_region_size = 0;
+
+ switch (type) {
+ case FileSys::ProgramAddressSpaceType::Is32Bit:
+ address_space_width = 32;
+ code_region_base = 0x200000;
+ code_region_end = code_region_base + 0x3FE00000;
+ map_region_size = 0x40000000;
+ heap_region_size = 0x40000000;
+ break;
+ case FileSys::ProgramAddressSpaceType::Is36Bit:
+ address_space_width = 36;
+ code_region_base = 0x8000000;
+ code_region_end = code_region_base + 0x78000000;
+ map_region_size = 0x180000000;
+ heap_region_size = 0x180000000;
+ break;
+ case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
+ address_space_width = 32;
+ code_region_base = 0x200000;
+ code_region_end = code_region_base + 0x3FE00000;
+ map_region_size = 0;
+ heap_region_size = 0x80000000;
+ break;
+ case FileSys::ProgramAddressSpaceType::Is39Bit:
+ address_space_width = 39;
+ code_region_base = 0x8000000;
+ code_region_end = code_region_base + 0x80000000;
+ map_region_size = 0x1000000000;
+ heap_region_size = 0x180000000;
+ new_map_region_size = 0x80000000;
+ tls_io_region_size = 0x1000000000;
+ break;
+ default:
+ UNREACHABLE_MSG("Invalid address space type specified: {}", static_cast<u32>(type));
+ return;
+ }
+
+ address_space_base = 0;
+ address_space_end = 1ULL << address_space_width;
+
+ map_region_base = code_region_end;
+ map_region_end = map_region_base + map_region_size;
+
+ heap_region_base = map_region_end;
+ heap_region_end = heap_region_base + heap_region_size;
+
+ new_map_region_base = heap_region_end;
+ new_map_region_end = new_map_region_base + new_map_region_size;
+
+ tls_io_region_base = new_map_region_end;
+ tls_io_region_end = tls_io_region_base + tls_io_region_size;
+
+ if (new_map_region_size == 0) {
+ new_map_region_base = address_space_base;
+ new_map_region_end = address_space_end;
+ }
+}
+
+void VMManager::Clear() {
+ ClearVMAMap();
+ ClearPageTable();
+}
+
+void VMManager::ClearVMAMap() {
+ vma_map.clear();
+}
+
+void VMManager::ClearPageTable() {
+ std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr);
+ page_table.special_regions.clear();
+ std::fill(page_table.attributes.begin(), page_table.attributes.end(),
+ Memory::PageType::Unmapped);
+}
+
u64 VMManager::GetTotalMemoryUsage() const {
LOG_WARNING(Kernel, "(STUBBED) called");
return 0xF8000000;
@@ -392,14 +474,80 @@ u64 VMManager::GetTotalHeapUsage() const {
return 0x0;
}
-VAddr VMManager::GetAddressSpaceBaseAddr() const {
- LOG_WARNING(Kernel, "(STUBBED) called");
- return 0x8000000;
+VAddr VMManager::GetAddressSpaceBaseAddress() const {
+ return address_space_base;
+}
+
+VAddr VMManager::GetAddressSpaceEndAddress() const {
+ return address_space_end;
}
u64 VMManager::GetAddressSpaceSize() const {
- LOG_WARNING(Kernel, "(STUBBED) called");
- return MAX_ADDRESS;
+ return address_space_end - address_space_base;
+}
+
+u64 VMManager::GetAddressSpaceWidth() const {
+ return address_space_width;
+}
+
+VAddr VMManager::GetCodeRegionBaseAddress() const {
+ return code_region_base;
+}
+
+VAddr VMManager::GetCodeRegionEndAddress() const {
+ return code_region_end;
+}
+
+u64 VMManager::GetCodeRegionSize() const {
+ return code_region_end - code_region_base;
+}
+
+VAddr VMManager::GetHeapRegionBaseAddress() const {
+ return heap_region_base;
+}
+
+VAddr VMManager::GetHeapRegionEndAddress() const {
+ return heap_region_end;
+}
+
+u64 VMManager::GetHeapRegionSize() const {
+ return heap_region_end - heap_region_base;
+}
+
+VAddr VMManager::GetMapRegionBaseAddress() const {
+ return map_region_base;
+}
+
+VAddr VMManager::GetMapRegionEndAddress() const {
+ return map_region_end;
+}
+
+u64 VMManager::GetMapRegionSize() const {
+ return map_region_end - map_region_base;
+}
+
+VAddr VMManager::GetNewMapRegionBaseAddress() const {
+ return new_map_region_base;
+}
+
+VAddr VMManager::GetNewMapRegionEndAddress() const {
+ return new_map_region_end;
+}
+
+u64 VMManager::GetNewMapRegionSize() const {
+ return new_map_region_end - new_map_region_base;
+}
+
+VAddr VMManager::GetTLSIORegionBaseAddress() const {
+ return tls_io_region_base;
+}
+
+VAddr VMManager::GetTLSIORegionEndAddress() const {
+ return tls_io_region_end;
+}
+
+u64 VMManager::GetTLSIORegionSize() const {
+ return tls_io_region_end - tls_io_region_base;
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 98bd04bea..015559a64 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -12,6 +12,10 @@
#include "core/memory.h"
#include "core/memory_hook.h"
+namespace FileSys {
+enum class ProgramAddressSpaceType : u8;
+}
+
namespace Kernel {
enum class VMAType : u8 {
@@ -81,7 +85,7 @@ struct VirtualMemoryArea {
/// Memory block backing this VMA.
std::shared_ptr<std::vector<u8>> backing_block = nullptr;
/// Offset into the backing_memory the mapping starts from.
- size_t offset = 0;
+ std::size_t offset = 0;
// Settings for type = BackingMemory
/// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed.
@@ -111,12 +115,6 @@ struct VirtualMemoryArea {
class VMManager final {
public:
/**
- * The maximum amount of address space managed by the kernel.
- * @todo This was selected arbitrarily, and should be verified for Switch OS.
- */
- static constexpr VAddr MAX_ADDRESS{0x1000000000ULL};
-
- /**
* A map covering the entirety of the managed address space, keyed by the `base` field of each
* VMA. It must always be modified by splitting or merging VMAs, so that the invariant
* `elem.base + elem.size == next.base` is preserved, and mergeable regions must always be
@@ -130,7 +128,7 @@ public:
~VMManager();
/// Clears the address space map, re-initializing with a single free area.
- void Reset();
+ void Reset(FileSys::ProgramAddressSpaceType type);
/// Finds the VMA in which the given address is included in, or `vma_map.end()`.
VMAHandle FindVMA(VAddr target) const;
@@ -147,7 +145,7 @@ public:
* @param state MemoryState tag to attach to the VMA.
*/
ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block,
- size_t offset, u64 size, MemoryState state);
+ std::size_t offset, u64 size, MemoryState state);
/**
* Maps an unmanaged host memory pointer at a given address.
@@ -195,12 +193,63 @@ public:
/// Gets the total heap usage, used by svcGetInfo
u64 GetTotalHeapUsage() const;
- /// Gets the total address space base address, used by svcGetInfo
- VAddr GetAddressSpaceBaseAddr() const;
+ /// Gets the address space base address
+ VAddr GetAddressSpaceBaseAddress() const;
- /// Gets the total address space address size, used by svcGetInfo
+ /// Gets the address space end address
+ VAddr GetAddressSpaceEndAddress() const;
+
+ /// Gets the total address space address size in bytes
u64 GetAddressSpaceSize() const;
+ /// Gets the address space width in bits.
+ u64 GetAddressSpaceWidth() const;
+
+ /// Gets the base address of the code region.
+ VAddr GetCodeRegionBaseAddress() const;
+
+ /// Gets the end address of the code region.
+ VAddr GetCodeRegionEndAddress() const;
+
+ /// Gets the total size of the code region in bytes.
+ u64 GetCodeRegionSize() const;
+
+ /// Gets the base address of the heap region.
+ VAddr GetHeapRegionBaseAddress() const;
+
+ /// Gets the end address of the heap region;
+ VAddr GetHeapRegionEndAddress() const;
+
+ /// Gets the total size of the heap region in bytes.
+ u64 GetHeapRegionSize() const;
+
+ /// Gets the base address of the map region.
+ VAddr GetMapRegionBaseAddress() const;
+
+ /// Gets the end address of the map region.
+ VAddr GetMapRegionEndAddress() const;
+
+ /// Gets the total size of the map region in bytes.
+ u64 GetMapRegionSize() const;
+
+ /// Gets the base address of the new map region.
+ VAddr GetNewMapRegionBaseAddress() const;
+
+ /// Gets the end address of the new map region.
+ VAddr GetNewMapRegionEndAddress() const;
+
+ /// Gets the total size of the new map region in bytes.
+ u64 GetNewMapRegionSize() const;
+
+ /// Gets the base address of the TLS IO region.
+ VAddr GetTLSIORegionBaseAddress() const;
+
+ /// Gets the end address of the TLS IO region.
+ VAddr GetTLSIORegionEndAddress() const;
+
+ /// Gets the total size of the TLS IO region in bytes.
+ u64 GetTLSIORegionSize() const;
+
/// Each VMManager has its own page table, which is set as the main one when the owning process
/// is scheduled.
Memory::PageTable page_table;
@@ -240,5 +289,36 @@ private:
/// Updates the pages corresponding to this VMA so they match the VMA's attributes.
void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
+
+ /// Initializes memory region ranges to adhere to a given address space type.
+ void InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType type);
+
+ /// Clears the underlying map and page table.
+ void Clear();
+
+ /// Clears out the VMA map, unmapping any previously mapped ranges.
+ void ClearVMAMap();
+
+ /// Clears out the page table
+ void ClearPageTable();
+
+ u32 address_space_width = 0;
+ VAddr address_space_base = 0;
+ VAddr address_space_end = 0;
+
+ VAddr code_region_base = 0;
+ VAddr code_region_end = 0;
+
+ VAddr heap_region_base = 0;
+ VAddr heap_region_end = 0;
+
+ VAddr map_region_base = 0;
+ VAddr map_region_end = 0;
+
+ VAddr new_map_region_base = 0;
+ VAddr new_map_region_end = 0;
+
+ VAddr tls_io_region_base = 0;
+ VAddr tls_io_region_end = 0;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/wait_object.cpp b/src/core/hle/kernel/wait_object.cpp
index eef00b729..b190ceb98 100644
--- a/src/core/hle/kernel/wait_object.cpp
+++ b/src/core/hle/kernel/wait_object.cpp
@@ -81,7 +81,7 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) {
}
}
- size_t index = thread->GetWaitObjectIndex(this);
+ std::size_t index = thread->GetWaitObjectIndex(this);
for (auto& object : thread->wait_objects)
object->RemoveWaitingThread(thread.get());
diff --git a/src/core/hle/kernel/wait_object.h b/src/core/hle/kernel/wait_object.h
index 0bd97133c..f4367ee28 100644
--- a/src/core/hle/kernel/wait_object.h
+++ b/src/core/hle/kernel/wait_object.h
@@ -69,7 +69,7 @@ private:
template <>
inline SharedPtr<WaitObject> DynamicObjectCast<WaitObject>(SharedPtr<Object> object) {
if (object != nullptr && object->IsWaitable()) {
- return boost::static_pointer_cast<WaitObject>(std::move(object));
+ return boost::static_pointer_cast<WaitObject>(object);
}
return nullptr;
}