aboutsummaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/svc.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel/svc.cpp')
-rw-r--r--src/core/hle/kernel/svc.cpp156
1 files changed, 131 insertions, 25 deletions
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index a6a17efe7..76a8b0191 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -32,6 +32,7 @@
#include "core/hle/kernel/svc.h"
#include "core/hle/kernel/svc_wrap.h"
#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/transfer_memory.h"
#include "core/hle/kernel/writable_event.h"
#include "core/hle/lock.h"
#include "core/hle/result.h"
@@ -174,11 +175,8 @@ static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
return ERR_INVALID_SIZE;
}
- auto& vm_manager = Core::CurrentProcess()->VMManager();
- const VAddr heap_base = vm_manager.GetHeapRegionBaseAddress();
- const auto alloc_result =
- vm_manager.HeapAllocate(heap_base, heap_size, VMAPermission::ReadWrite);
-
+ auto& vm_manager = Core::System::GetInstance().Kernel().CurrentProcess()->VMManager();
+ const auto alloc_result = vm_manager.SetHeapSize(heap_size);
if (alloc_result.Failed()) {
return alloc_result.Code();
}
@@ -711,7 +709,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
HeapRegionBaseAddr = 4,
HeapRegionSize = 5,
TotalMemoryUsage = 6,
- TotalHeapUsage = 7,
+ TotalPhysicalMemoryUsed = 7,
IsCurrentProcessBeingDebugged = 8,
RegisterResourceLimit = 9,
IdleTickCount = 10,
@@ -747,7 +745,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
case GetInfoType::NewMapRegionBaseAddr:
case GetInfoType::NewMapRegionSize:
case GetInfoType::TotalMemoryUsage:
- case GetInfoType::TotalHeapUsage:
+ case GetInfoType::TotalPhysicalMemoryUsed:
case GetInfoType::IsVirtualAddressMemoryEnabled:
case GetInfoType::PersonalMmHeapUsage:
case GetInfoType::TitleId:
@@ -807,8 +805,8 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
*result = process->VMManager().GetTotalMemoryUsage();
return RESULT_SUCCESS;
- case GetInfoType::TotalHeapUsage:
- *result = process->VMManager().GetTotalHeapUsage();
+ case GetInfoType::TotalPhysicalMemoryUsed:
+ *result = process->GetTotalPhysicalMemoryUsed();
return RESULT_SUCCESS;
case GetInfoType::IsVirtualAddressMemoryEnabled:
@@ -1355,7 +1353,7 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var
current_thread->SetCondVarWaitAddress(condition_variable_addr);
current_thread->SetMutexWaitAddress(mutex_addr);
current_thread->SetWaitHandle(thread_handle);
- current_thread->SetStatus(ThreadStatus::WaitMutex);
+ current_thread->SetStatus(ThreadStatus::WaitCondVar);
current_thread->InvalidateWakeupCallback();
current_thread->WakeAfterDelay(nano_seconds);
@@ -1399,10 +1397,10 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
// them all.
std::size_t last = waiting_threads.size();
if (target != -1)
- last = target;
+ last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
// If there are no threads waiting on this condition variable, just exit
- if (last > waiting_threads.size())
+ if (last == 0)
return RESULT_SUCCESS;
for (std::size_t index = 0; index < last; ++index) {
@@ -1410,6 +1408,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
+ // liberate Cond Var Thread.
+ thread->SetCondVarWaitAddress(0);
+
std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
auto& monitor = Core::System::GetInstance().Monitor();
@@ -1428,10 +1429,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
}
} while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
thread->GetWaitHandle()));
-
if (mutex_val == 0) {
// We were able to acquire the mutex, resume this thread.
- ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
+ ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
thread->ResumeFromWait();
auto* const lock_owner = thread->GetLockOwner();
@@ -1441,8 +1441,8 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
thread->SetLockOwner(nullptr);
thread->SetMutexWaitAddress(0);
- thread->SetCondVarWaitAddress(0);
thread->SetWaitHandle(0);
+ Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
} else {
// Atomically signal that the mutex now has a waiting thread.
do {
@@ -1461,12 +1461,11 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
const auto& handle_table = Core::CurrentProcess()->GetHandleTable();
auto owner = handle_table.Get<Thread>(owner_handle);
ASSERT(owner);
- ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
+ ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
thread->InvalidateWakeupCallback();
+ thread->SetStatus(ThreadStatus::WaitMutex);
owner->AddMutexWaiter(thread);
-
- Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
}
}
@@ -1586,14 +1585,121 @@ static ResultCode CreateTransferMemory(Handle* handle, VAddr addr, u64 size, u32
}
auto& kernel = Core::System::GetInstance().Kernel();
- auto process = kernel.CurrentProcess();
- auto& handle_table = process->GetHandleTable();
- const auto shared_mem_handle = SharedMemory::Create(kernel, process, size, perms, perms, addr);
+ auto transfer_mem_handle = TransferMemory::Create(kernel, addr, size, perms);
- CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle));
+ auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
+ const auto result = handle_table.Create(std::move(transfer_mem_handle));
+ if (result.Failed()) {
+ return result.Code();
+ }
+
+ *handle = *result;
return RESULT_SUCCESS;
}
+static ResultCode MapTransferMemory(Handle handle, VAddr address, u64 size, u32 permission_raw) {
+ LOG_DEBUG(Kernel_SVC,
+ "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}, permissions=0x{:08X}",
+ handle, address, size, permission_raw);
+
+ if (!Common::Is4KBAligned(address)) {
+ LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).",
+ address);
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).",
+ size);
+ return ERR_INVALID_SIZE;
+ }
+
+ if (!IsValidAddressRange(address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Given address and size overflows the 64-bit range (address=0x{:016X}, "
+ "size=0x{:016X}).",
+ address, size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ const auto permissions = static_cast<MemoryPermission>(permission_raw);
+ if (permissions != MemoryPermission::None && permissions != MemoryPermission::Read &&
+ permissions != MemoryPermission::ReadWrite) {
+ LOG_ERROR(Kernel_SVC, "Invalid transfer memory permissions given (permissions=0x{:08X}).",
+ permission_raw);
+ return ERR_INVALID_STATE;
+ }
+
+ const auto& kernel = Core::System::GetInstance().Kernel();
+ const auto* const current_process = kernel.CurrentProcess();
+ const auto& handle_table = current_process->GetHandleTable();
+
+ auto transfer_memory = handle_table.Get<TransferMemory>(handle);
+ if (!transfer_memory) {
+ LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).",
+ handle);
+ return ERR_INVALID_HANDLE;
+ }
+
+ if (!current_process->VMManager().IsWithinASLRRegion(address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Given address and size don't fully fit within the ASLR region "
+ "(address=0x{:016X}, size=0x{:016X}).",
+ address, size);
+ return ERR_INVALID_MEMORY_RANGE;
+ }
+
+ return transfer_memory->MapMemory(address, size, permissions);
+}
+
+static ResultCode UnmapTransferMemory(Handle handle, VAddr address, u64 size) {
+ LOG_DEBUG(Kernel_SVC, "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}", handle,
+ address, size);
+
+ if (!Common::Is4KBAligned(address)) {
+ LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).",
+ address);
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).",
+ size);
+ return ERR_INVALID_SIZE;
+ }
+
+ if (!IsValidAddressRange(address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Given address and size overflows the 64-bit range (address=0x{:016X}, "
+ "size=0x{:016X}).",
+ address, size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ const auto& kernel = Core::System::GetInstance().Kernel();
+ const auto* const current_process = kernel.CurrentProcess();
+ const auto& handle_table = current_process->GetHandleTable();
+
+ auto transfer_memory = handle_table.Get<TransferMemory>(handle);
+ if (!transfer_memory) {
+ LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).",
+ handle);
+ return ERR_INVALID_HANDLE;
+ }
+
+ if (!current_process->VMManager().IsWithinASLRRegion(address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Given address and size don't fully fit within the ASLR region "
+ "(address=0x{:016X}, size=0x{:016X}).",
+ address, size);
+ return ERR_INVALID_MEMORY_RANGE;
+ }
+
+ return transfer_memory->UnmapMemory(address, size);
+}
+
static ResultCode GetThreadCoreMask(Handle thread_handle, u32* core, u64* mask) {
LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
@@ -1969,8 +2075,8 @@ static const FunctionDef SVC_Table[] = {
{0x4E, nullptr, "ReadWriteRegister"},
{0x4F, nullptr, "SetProcessActivity"},
{0x50, SvcWrap<CreateSharedMemory>, "CreateSharedMemory"},
- {0x51, nullptr, "MapTransferMemory"},
- {0x52, nullptr, "UnmapTransferMemory"},
+ {0x51, SvcWrap<MapTransferMemory>, "MapTransferMemory"},
+ {0x52, SvcWrap<UnmapTransferMemory>, "UnmapTransferMemory"},
{0x53, nullptr, "CreateInterruptEvent"},
{0x54, nullptr, "QueryPhysicalAddress"},
{0x55, nullptr, "QueryIoMapping"},
@@ -2032,7 +2138,7 @@ void CallSVC(u32 immediate) {
MICROPROFILE_SCOPE(Kernel_SVC);
// Lock the global kernel mutex when we enter the kernel HLE.
- std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
+ std::lock_guard lock{HLE::g_hle_lock};
const FunctionDef* info = GetSVCInfo(immediate);
if (info) {