diff options
Diffstat (limited to 'src/core/hle/kernel')
36 files changed, 2062 insertions, 585 deletions
diff --git a/src/core/hle/kernel/client_session.cpp b/src/core/hle/kernel/client_session.cpp index c114eaf99..704e82824 100644 --- a/src/core/hle/kernel/client_session.cpp +++ b/src/core/hle/kernel/client_session.cpp @@ -8,6 +8,7 @@ #include "core/hle/kernel/server_session.h" #include "core/hle/kernel/session.h" #include "core/hle/kernel/thread.h" +#include "core/hle/result.h" namespace Kernel { diff --git a/src/core/hle/kernel/client_session.h b/src/core/hle/kernel/client_session.h index 439fbdb35..4c18de69c 100644 --- a/src/core/hle/kernel/client_session.h +++ b/src/core/hle/kernel/client_session.h @@ -6,9 +6,9 @@ #include <memory> #include <string> -#include "common/common_types.h" #include "core/hle/kernel/object.h" -#include "core/hle/result.h" + +union ResultCode; namespace Kernel { diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h index 8b58d701d..d17eb0cb6 100644 --- a/src/core/hle/kernel/errors.h +++ b/src/core/hle/kernel/errors.h @@ -11,6 +11,7 @@ namespace Kernel { // Confirmed Switch kernel error codes constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7}; +constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14}; constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101}; constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102}; constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105}; @@ -27,9 +28,10 @@ constexpr ResultCode ERR_SYNCHRONIZATION_CANCELED{ErrorModule::Kernel, 118}; constexpr ResultCode ERR_OUT_OF_RANGE{ErrorModule::Kernel, 119}; constexpr ResultCode ERR_INVALID_ENUM_VALUE{ErrorModule::Kernel, 120}; constexpr ResultCode ERR_NOT_FOUND{ErrorModule::Kernel, 121}; -constexpr ResultCode ERR_ALREADY_REGISTERED{ErrorModule::Kernel, 122}; +constexpr ResultCode ERR_BUSY{ErrorModule::Kernel, 122}; constexpr ResultCode ERR_SESSION_CLOSED_BY_REMOTE{ErrorModule::Kernel, 123}; constexpr ResultCode ERR_INVALID_STATE{ErrorModule::Kernel, 125}; +constexpr ResultCode ERR_RESERVED_VALUE{ErrorModule::Kernel, 126}; constexpr ResultCode ERR_RESOURCE_LIMIT_EXCEEDED{ErrorModule::Kernel, 132}; } // namespace Kernel diff --git a/src/core/hle/kernel/event.cpp b/src/core/hle/kernel/event.cpp deleted file mode 100644 index 8967e602e..000000000 --- a/src/core/hle/kernel/event.cpp +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2014 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include <algorithm> -#include "common/assert.h" -#include "core/hle/kernel/event.h" -#include "core/hle/kernel/object.h" -#include "core/hle/kernel/thread.h" - -namespace Kernel { - -Event::Event(KernelCore& kernel) : WaitObject{kernel} {} -Event::~Event() = default; - -SharedPtr<Event> Event::Create(KernelCore& kernel, ResetType reset_type, std::string name) { - SharedPtr<Event> evt(new Event(kernel)); - - evt->signaled = false; - evt->reset_type = reset_type; - evt->name = std::move(name); - - return evt; -} - -bool Event::ShouldWait(Thread* thread) const { - return !signaled; -} - -void Event::Acquire(Thread* thread) { - ASSERT_MSG(!ShouldWait(thread), "object unavailable!"); - - if (reset_type == ResetType::OneShot) - signaled = false; -} - -void Event::Signal() { - signaled = true; - WakeupAllWaitingThreads(); -} - -void Event::Clear() { - signaled = false; -} - -void Event::WakeupAllWaitingThreads() { - WaitObject::WakeupAllWaitingThreads(); - - if (reset_type == ResetType::Pulse) - signaled = false; -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp index 1bf79b692..c8acde5b1 100644 --- a/src/core/hle/kernel/handle_table.cpp +++ b/src/core/hle/kernel/handle_table.cpp @@ -42,9 +42,10 @@ ResultVal<Handle> HandleTable::Create(SharedPtr<Object> obj) { u16 generation = next_generation++; // Overflow count so it fits in the 15 bits dedicated to the generation in the handle. - // CTR-OS doesn't use generation 0, so skip straight to 1. - if (next_generation >= (1 << 15)) + // Horizon OS uses zero to represent an invalid handle, so skip to 1. + if (next_generation >= (1 << 15)) { next_generation = 1; + } generations[slot] = generation; objects[slot] = std::move(obj); diff --git a/src/core/hle/kernel/handle_table.h b/src/core/hle/kernel/handle_table.h index e3f3e3fb8..89a3bc740 100644 --- a/src/core/hle/kernel/handle_table.h +++ b/src/core/hle/kernel/handle_table.h @@ -13,6 +13,7 @@ namespace Kernel { enum KernelHandle : Handle { + InvalidHandle = 0, CurrentThread = 0xFFFF8000, CurrentProcess = 0xFFFF8001, }; @@ -42,6 +43,9 @@ enum KernelHandle : Handle { */ class HandleTable final : NonCopyable { public: + /// This is the maximum limit of handles allowed per process in Horizon + static constexpr std::size_t MAX_COUNT = 1024; + HandleTable(); ~HandleTable(); @@ -90,9 +94,6 @@ public: void Clear(); private: - /// This is the maximum limit of handles allowed per process in Horizon - static constexpr std::size_t MAX_COUNT = 1024; - /// Stores the Object referenced by the handle or null if the slot is empty. std::array<SharedPtr<Object>, MAX_COUNT> objects; diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index 68d5376cb..5dd855db8 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -15,17 +15,23 @@ #include "common/logging/log.h" #include "core/core.h" #include "core/hle/ipc_helpers.h" -#include "core/hle/kernel/event.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/hle_ipc.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/process.h" +#include "core/hle/kernel/readable_event.h" #include "core/hle/kernel/server_session.h" +#include "core/hle/kernel/thread.h" +#include "core/hle/kernel/writable_event.h" #include "core/memory.h" namespace Kernel { +SessionRequestHandler::SessionRequestHandler() = default; + +SessionRequestHandler::~SessionRequestHandler() = default; + void SessionRequestHandler::ClientConnected(SharedPtr<ServerSession> server_session) { server_session->SetHleHandler(shared_from_this()); connected_sessions.push_back(std::move(server_session)); @@ -36,11 +42,9 @@ void SessionRequestHandler::ClientDisconnected(const SharedPtr<ServerSession>& s boost::range::remove_erase(connected_sessions, server_session); } -SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread, - const std::string& reason, u64 timeout, - WakeupCallback&& callback, - Kernel::SharedPtr<Kernel::Event> event) { - +SharedPtr<WritableEvent> HLERequestContext::SleepClientThread( + SharedPtr<Thread> thread, const std::string& reason, u64 timeout, WakeupCallback&& callback, + SharedPtr<WritableEvent> writable_event) { // Put the client thread to sleep until the wait event is signaled or the timeout expires. thread->SetWakeupCallback([context = *this, callback]( ThreadWakeupReason reason, SharedPtr<Thread> thread, @@ -51,23 +55,25 @@ SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread, return true; }); - if (!event) { + auto& kernel = Core::System::GetInstance().Kernel(); + if (!writable_event) { // Create event if not provided - auto& kernel = Core::System::GetInstance().Kernel(); - event = - Kernel::Event::Create(kernel, Kernel::ResetType::OneShot, "HLE Pause Event: " + reason); + const auto pair = WritableEvent::CreateEventPair(kernel, Kernel::ResetType::OneShot, + "HLE Pause Event: " + reason); + writable_event = pair.writable; } - event->Clear(); + const auto readable_event{writable_event->GetReadableEvent()}; + writable_event->Clear(); thread->SetStatus(ThreadStatus::WaitHLEEvent); - thread->SetWaitObjects({event}); - event->AddWaitingThread(thread); + thread->SetWaitObjects({readable_event}); + readable_event->AddWaitingThread(thread); if (timeout > 0) { thread->WakeAfterDelay(timeout); } - return event; + return writable_event; } HLERequestContext::HLERequestContext(SharedPtr<Kernel::ServerSession> server_session) diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index a38e34b74..cb1c5aff3 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h @@ -14,8 +14,6 @@ #include "common/swap.h" #include "core/hle/ipc.h" #include "core/hle/kernel/object.h" -#include "core/hle/kernel/server_session.h" -#include "core/hle/kernel/thread.h" namespace Service { class ServiceFrameworkBase; @@ -24,10 +22,15 @@ class ServiceFrameworkBase; namespace Kernel { class Domain; -class Event; class HandleTable; class HLERequestContext; class Process; +class ServerSession; +class Thread; +class ReadableEvent; +class WritableEvent; + +enum class ThreadWakeupReason; /** * Interface implemented by HLE Session handlers. @@ -36,7 +39,8 @@ class Process; */ class SessionRequestHandler : public std::enable_shared_from_this<SessionRequestHandler> { public: - virtual ~SessionRequestHandler() = default; + SessionRequestHandler(); + virtual ~SessionRequestHandler(); /** * Handles a sync request from the emulated application. @@ -119,12 +123,13 @@ public: * @param callback Callback to be invoked when the thread is resumed. This callback must write * the entire command response once again, regardless of the state of it before this function * was called. - * @param event Event to use to wake up the thread. If unspecified, an event will be created. + * @param writable_event Event to use to wake up the thread. If unspecified, an event will be + * created. * @returns Event that when signaled will resume the thread and call the callback function. */ - SharedPtr<Event> SleepClientThread(SharedPtr<Thread> thread, const std::string& reason, - u64 timeout, WakeupCallback&& callback, - Kernel::SharedPtr<Kernel::Event> event = nullptr); + SharedPtr<WritableEvent> SleepClientThread(SharedPtr<Thread> thread, const std::string& reason, + u64 timeout, WakeupCallback&& callback, + SharedPtr<WritableEvent> writable_event = nullptr); /// Populates this context with data from the requesting process/thread. ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table, diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index e441c5bc6..67674cd47 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -2,7 +2,6 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include <array> #include <atomic> #include <memory> #include <mutex> @@ -112,7 +111,7 @@ struct KernelCore::Impl { void Shutdown() { next_object_id = 0; - next_process_id = 10; + next_process_id = Process::ProcessIDMin; next_thread_id = 1; process_list.clear(); @@ -153,10 +152,8 @@ struct KernelCore::Impl { } std::atomic<u32> next_object_id{0}; - // TODO(Subv): Start the process ids from 10 for now, as lower PIDs are - // reserved for low-level services - std::atomic<u32> next_process_id{10}; - std::atomic<u32> next_thread_id{1}; + std::atomic<u64> next_process_id{Process::ProcessIDMin}; + std::atomic<u64> next_thread_id{1}; // Lists all processes that exist in the current session. std::vector<SharedPtr<Process>> process_list; @@ -242,11 +239,11 @@ u32 KernelCore::CreateNewObjectID() { return impl->next_object_id++; } -u32 KernelCore::CreateNewThreadID() { +u64 KernelCore::CreateNewThreadID() { return impl->next_thread_id++; } -u32 KernelCore::CreateNewProcessID() { +u64 KernelCore::CreateNewProcessID() { return impl->next_process_id++; } diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index ea00c89f5..58c9d108b 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -88,10 +88,10 @@ private: u32 CreateNewObjectID(); /// Creates a new process ID, incrementing the internal process ID counter; - u32 CreateNewProcessID(); + u64 CreateNewProcessID(); /// Creates a new thread ID, incrementing the internal thread ID counter. - u32 CreateNewThreadID(); + u64 CreateNewThreadID(); /// Creates a timer callback handle for the given timer. ResultVal<Handle> CreateTimerCallbackHandle(const SharedPtr<Timer>& timer); diff --git a/src/core/hle/kernel/object.cpp b/src/core/hle/kernel/object.cpp index d87a62bb9..806078638 100644 --- a/src/core/hle/kernel/object.cpp +++ b/src/core/hle/kernel/object.cpp @@ -13,16 +13,17 @@ Object::~Object() = default; bool Object::IsWaitable() const { switch (GetHandleType()) { - case HandleType::Event: + case HandleType::ReadableEvent: case HandleType::Thread: + case HandleType::Process: case HandleType::Timer: case HandleType::ServerPort: case HandleType::ServerSession: return true; case HandleType::Unknown: + case HandleType::WritableEvent: case HandleType::SharedMemory: - case HandleType::Process: case HandleType::AddressArbiter: case HandleType::ResourceLimit: case HandleType::ClientPort: @@ -31,6 +32,7 @@ bool Object::IsWaitable() const { } UNREACHABLE(); + return false; } } // namespace Kernel diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h index c9f4d0bb3..1541b6e3c 100644 --- a/src/core/hle/kernel/object.h +++ b/src/core/hle/kernel/object.h @@ -19,7 +19,8 @@ using Handle = u32; enum class HandleType : u32 { Unknown, - Event, + WritableEvent, + ReadableEvent, SharedMemory, Thread, Process, @@ -33,9 +34,8 @@ enum class HandleType : u32 { }; enum class ResetType { - OneShot, - Sticky, - Pulse, + OneShot, ///< Reset automatically on object acquisition + Sticky, ///< Never reset automatically }; class Object : NonCopyable { diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index 7ca538401..c5aa19afa 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -9,6 +9,7 @@ #include "common/logging/log.h" #include "core/core.h" #include "core/file_sys/program_metadata.h" +#include "core/hle/kernel/errors.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/resource_limit.h" @@ -19,6 +20,35 @@ #include "core/settings.h" namespace Kernel { +namespace { +/** + * Sets up the primary application thread + * + * @param owner_process The parent process for the main thread + * @param kernel The kernel instance to create the main thread under. + * @param entry_point The address at which the thread should start execution + * @param priority The priority to give the main thread + */ +void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_point, u32 priority) { + // Setup page table so we can write to memory + SetCurrentPageTable(&owner_process.VMManager().page_table); + + // Initialize new "main" thread + const VAddr stack_top = owner_process.VMManager().GetTLSIORegionEndAddress(); + auto thread_res = Thread::Create(kernel, "main", entry_point, priority, 0, + owner_process.GetIdealCore(), stack_top, owner_process); + + SharedPtr<Thread> thread = std::move(thread_res).Unwrap(); + + // Register 1 must be a handle to the main thread + const Handle guest_handle = owner_process.GetHandleTable().Create(thread).Unwrap(); + thread->SetGuestHandle(guest_handle); + thread->GetContext().cpu_registers[1] = guest_handle; + + // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires + thread->ResumeFromWait(); +} +} // Anonymous namespace CodeSet::CodeSet() = default; CodeSet::~CodeSet() = default; @@ -27,13 +57,11 @@ SharedPtr<Process> Process::Create(KernelCore& kernel, std::string&& name) { SharedPtr<Process> process(new Process(kernel)); process->name = std::move(name); - process->flags.raw = 0; - process->flags.memory_region.Assign(MemoryRegion::APPLICATION); process->resource_limit = kernel.GetSystemResourceLimit(); process->status = ProcessStatus::Created; process->program_id = 0; process->process_id = kernel.CreateNewProcessID(); - process->svc_access_mask.set(); + process->capabilities.InitializeForMetadatalessProcess(); std::mt19937 rng(Settings::values.rng_seed.value_or(0)); std::uniform_int_distribution<u64> distribution; @@ -44,82 +72,34 @@ SharedPtr<Process> Process::Create(KernelCore& kernel, std::string&& name) { return process; } -void Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) { - program_id = metadata.GetTitleID(); - is_64bit_process = metadata.Is64BitProgram(); - vm_manager.Reset(metadata.GetAddressSpaceType()); +SharedPtr<ResourceLimit> Process::GetResourceLimit() const { + return resource_limit; } -void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) { - for (std::size_t i = 0; i < len; ++i) { - u32 descriptor = kernel_caps[i]; - u32 type = descriptor >> 20; - - if (descriptor == 0xFFFFFFFF) { - // Unused descriptor entry - continue; - } else if ((type & 0xF00) == 0xE00) { // 0x0FFF - // Allowed interrupts list - LOG_WARNING(Loader, "ExHeader allowed interrupts list ignored"); - } else if ((type & 0xF80) == 0xF00) { // 0x07FF - // Allowed syscalls mask - unsigned int index = ((descriptor >> 24) & 7) * 24; - u32 bits = descriptor & 0xFFFFFF; - - while (bits && index < svc_access_mask.size()) { - svc_access_mask.set(index, bits & 1); - ++index; - bits >>= 1; - } - } else if ((type & 0xFF0) == 0xFE0) { // 0x00FF - // Handle table size - handle_table_size = descriptor & 0x3FF; - } else if ((type & 0xFF8) == 0xFF0) { // 0x007F - // Misc. flags - flags.raw = descriptor & 0xFFFF; - } else if ((type & 0xFFE) == 0xFF8) { // 0x001F - // Mapped memory range - if (i + 1 >= len || ((kernel_caps[i + 1] >> 20) & 0xFFE) != 0xFF8) { - LOG_WARNING(Loader, "Incomplete exheader memory range descriptor ignored."); - continue; - } - u32 end_desc = kernel_caps[i + 1]; - ++i; // Skip over the second descriptor on the next iteration +ResultCode Process::ClearSignalState() { + if (status == ProcessStatus::Exited) { + LOG_ERROR(Kernel, "called on a terminated process instance."); + return ERR_INVALID_STATE; + } - AddressMapping mapping; - mapping.address = descriptor << 12; - VAddr end_address = end_desc << 12; + if (!is_signaled) { + LOG_ERROR(Kernel, "called on a process instance that isn't signaled."); + return ERR_INVALID_STATE; + } - if (mapping.address < end_address) { - mapping.size = end_address - mapping.address; - } else { - mapping.size = 0; - } + is_signaled = false; + return RESULT_SUCCESS; +} - mapping.read_only = (descriptor & (1 << 20)) != 0; - mapping.unk_flag = (end_desc & (1 << 20)) != 0; - - address_mappings.push_back(mapping); - } else if ((type & 0xFFF) == 0xFFE) { // 0x000F - // Mapped memory page - AddressMapping mapping; - mapping.address = descriptor << 12; - mapping.size = Memory::PAGE_SIZE; - mapping.read_only = false; - mapping.unk_flag = false; - - address_mappings.push_back(mapping); - } else if ((type & 0xFE0) == 0xFC0) { // 0x01FF - // Kernel version - kernel_version = descriptor & 0xFFFF; - - int minor = kernel_version & 0xFF; - int major = (kernel_version >> 8) & 0xFF; - LOG_INFO(Loader, "ExHeader kernel version: {}.{}", major, minor); - } else { - LOG_ERROR(Loader, "Unhandled kernel caps descriptor: 0x{:08X}", descriptor); - } - } +ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) { + program_id = metadata.GetTitleID(); + ideal_core = metadata.GetMainThreadCore(); + is_64bit_process = metadata.Is64BitProgram(); + + vm_manager.Reset(metadata.GetAddressSpaceType()); + + const auto& caps = metadata.GetKernelCapabilities(); + return capabilities.InitializeForUserProcess(caps.data(), caps.size(), vm_manager); } void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) { @@ -129,17 +109,17 @@ void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) { vm_manager .MapMemoryBlock(vm_manager.GetTLSIORegionEndAddress() - stack_size, std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size, - MemoryState::Mapped) + MemoryState::Stack) .Unwrap(); vm_manager.LogLayout(); - status = ProcessStatus::Running; + ChangeStatus(ProcessStatus::Running); - Kernel::SetupMainThread(kernel, entry_point, main_thread_priority, *this); + SetupMainThread(*this, kernel, entry_point, main_thread_priority); } void Process::PrepareForTermination() { - status = ProcessStatus::Exited; + ChangeStatus(ProcessStatus::Exiting); const auto stop_threads = [this](const std::vector<SharedPtr<Thread>>& thread_list) { for (auto& thread : thread_list) { @@ -163,6 +143,8 @@ void Process::PrepareForTermination() { stop_threads(system.Scheduler(1).GetThreadList()); stop_threads(system.Scheduler(2).GetThreadList()); stop_threads(system.Scheduler(3).GetThreadList()); + + ChangeStatus(ProcessStatus::Exited); } /** @@ -245,23 +227,25 @@ void Process::LoadModule(CodeSet module_, VAddr base_addr) { Core::System::GetInstance().ArmInterface(3).ClearInstructionCache(); } -ResultVal<VAddr> Process::HeapAllocate(VAddr target, u64 size, VMAPermission perms) { - return vm_manager.HeapAllocate(target, size, perms); -} +Kernel::Process::Process(KernelCore& kernel) : WaitObject{kernel} {} +Kernel::Process::~Process() {} -ResultCode Process::HeapFree(VAddr target, u32 size) { - return vm_manager.HeapFree(target, size); +void Process::Acquire(Thread* thread) { + ASSERT_MSG(!ShouldWait(thread), "Object unavailable!"); } -ResultCode Process::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state) { - return vm_manager.MirrorMemory(dst_addr, src_addr, size, state); +bool Process::ShouldWait(Thread* thread) const { + return !is_signaled; } -ResultCode Process::UnmapMemory(VAddr dst_addr, VAddr /*src_addr*/, u64 size) { - return vm_manager.UnmapRange(dst_addr, size); -} +void Process::ChangeStatus(ProcessStatus new_status) { + if (status == new_status) { + return; + } -Kernel::Process::Process(KernelCore& kernel) : Object{kernel} {} -Kernel::Process::~Process() {} + status = new_status; + is_signaled = true; + WakeupAllWaitingThreads(); +} } // namespace Kernel diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index ada845c7f..dcc57ae9f 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -11,12 +11,12 @@ #include <string> #include <vector> #include <boost/container/static_vector.hpp> -#include "common/bit_field.h" #include "common/common_types.h" #include "core/hle/kernel/handle_table.h" -#include "core/hle/kernel/object.h" -#include "core/hle/kernel/thread.h" +#include "core/hle/kernel/process_capability.h" #include "core/hle/kernel/vm_manager.h" +#include "core/hle/kernel/wait_object.h" +#include "core/hle/result.h" namespace FileSys { class ProgramMetadata; @@ -26,6 +26,7 @@ namespace Kernel { class KernelCore; class ResourceLimit; +class Thread; struct AddressMapping { // Address and size must be page-aligned @@ -41,24 +42,6 @@ enum class MemoryRegion : u16 { BASE = 3, }; -union ProcessFlags { - u16 raw; - - BitField<0, 1, u16> - allow_debug; ///< Allows other processes to attach to and debug this process. - BitField<1, 1, u16> force_debug; ///< Allows this process to attach to processes even if they - /// don't have allow_debug set. - BitField<2, 1, u16> allow_nonalphanum; - BitField<3, 1, u16> shared_page_writable; ///< Shared page is mapped with write permissions. - BitField<4, 1, u16> privileged_priority; ///< Can use priority levels higher than 24. - BitField<5, 1, u16> allow_main_args; - BitField<6, 1, u16> shared_device_mem; - BitField<7, 1, u16> runnable_on_sleep; - BitField<8, 4, MemoryRegion> - memory_region; ///< Default region for memory allocations for this process - BitField<12, 1, u16> loaded_high; ///< Application loaded high (not at 0x00100000). -}; - /** * Indicates the status of a Process instance. * @@ -117,8 +100,20 @@ struct CodeSet final { VAddr entrypoint = 0; }; -class Process final : public Object { +class Process final : public WaitObject { public: + enum : u64 { + /// Lowest allowed process ID for a kernel initial process. + InitialKIPIDMin = 1, + /// Highest allowed process ID for a kernel initial process. + InitialKIPIDMax = 80, + + /// Lowest allowed process ID for a userland process. + ProcessIDMin = 81, + /// Highest allowed process ID for a userland process. + ProcessIDMax = 0xFFFFFFFFFFFFFFFF, + }; + static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4; static SharedPtr<Process> Create(KernelCore& kernel, std::string&& name); @@ -161,7 +156,7 @@ public: } /// Gets the unique ID that identifies this particular process. - u32 GetProcessID() const { + u64 GetProcessID() const { return process_id; } @@ -171,28 +166,21 @@ public: } /// Gets the resource limit descriptor for this process - ResourceLimit& GetResourceLimit() { - return *resource_limit; - } + SharedPtr<ResourceLimit> GetResourceLimit() const; - /// Gets the resource limit descriptor for this process - const ResourceLimit& GetResourceLimit() const { - return *resource_limit; + /// Gets the ideal CPU core ID for this process + u8 GetIdealCore() const { + return ideal_core; } - /// Gets the default CPU ID for this process - u8 GetDefaultProcessorID() const { - return ideal_processor; - } - - /// Gets the bitmask of allowed CPUs that this process' threads can run on. - u32 GetAllowedProcessorMask() const { - return allowed_processor_mask; + /// Gets the bitmask of allowed cores that this process' threads can run on. + u64 GetCoreMask() const { + return capabilities.GetCoreMask(); } /// Gets the bitmask of allowed thread priorities. - u32 GetAllowedThreadPriorityMask() const { - return allowed_thread_priority_mask; + u64 GetPriorityMask() const { + return capabilities.GetPriorityMask(); } u32 IsVirtualMemoryEnabled() const { @@ -219,19 +207,26 @@ public: return random_entropy.at(index); } + /// Clears the signaled state of the process if and only if it's signaled. + /// + /// @pre The process must not be already terminated. If this is called on a + /// terminated process, then ERR_INVALID_STATE will be returned. + /// + /// @pre The process must be in a signaled state. If this is called on a + /// process instance that is not signaled, ERR_INVALID_STATE will be + /// returned. + ResultCode ClearSignalState(); + /** * Loads process-specifics configuration info with metadata provided * by an executable. * - * @param metadata The provided metadata to load process specific info. - */ - void LoadFromMetadata(const FileSys::ProgramMetadata& metadata); - - /** - * Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them - * to this process. + * @param metadata The provided metadata to load process specific info from. + * + * @returns RESULT_SUCCESS if all relevant metadata was able to be + * loaded and parsed. Otherwise, an error code is returned. */ - void ParseKernelCaps(const u32* kernel_caps, std::size_t len); + ResultCode LoadFromMetadata(const FileSys::ProgramMetadata& metadata); /** * Applies address space changes and launches the process main thread. @@ -247,7 +242,7 @@ public: void LoadModule(CodeSet module_, VAddr base_addr); /////////////////////////////////////////////////////////////////////////////////////////////// - // Memory Management + // Thread-local storage management // Marks the next available region as used and returns the address of the slot. VAddr MarkNextAvailableTLSSlotAsUsed(Thread& thread); @@ -255,18 +250,21 @@ public: // Frees a used TLS slot identified by the given address void FreeTLSSlot(VAddr tls_address); - ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms); - ResultCode HeapFree(VAddr target, u32 size); - - ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, - MemoryState state = MemoryState::Mapped); - - ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, u64 size); - private: explicit Process(KernelCore& kernel); ~Process() override; + /// Checks if the specified thread should wait until this process is available. + bool ShouldWait(Thread* thread) const override; + + /// Acquires/locks this process for the specified thread if it's available. + void Acquire(Thread* thread) override; + + /// Changes the process status. If the status is different + /// from the current process status, then this will trigger + /// a process signal. + void ChangeStatus(ProcessStatus new_status); + /// Memory manager for this process. Kernel::VMManager vm_manager; @@ -274,30 +272,16 @@ private: ProcessStatus status; /// The ID of this process - u32 process_id = 0; + u64 process_id = 0; /// Title ID corresponding to the process - u64 program_id; + u64 program_id = 0; /// Resource limit descriptor for this process SharedPtr<ResourceLimit> resource_limit; - /// The process may only call SVCs which have the corresponding bit set. - std::bitset<0x80> svc_access_mask; - /// Maximum size of the handle table for the process. - u32 handle_table_size = 0x200; - /// Special memory ranges mapped into this processes address space. This is used to give - /// processes access to specific I/O regions and device memory. - boost::container::static_vector<AddressMapping, 8> address_mappings; - ProcessFlags flags; - /// Kernel compatibility version for this process - u16 kernel_version = 0; - /// The default CPU for this process, threads are scheduled on this cpu by default. - u8 ideal_processor = 0; - /// Bitmask of allowed CPUs that this process' threads can run on. TODO(Subv): Actually parse - /// this value from the process header. - u32 allowed_processor_mask = THREADPROCESSORID_DEFAULT_MASK; - u32 allowed_thread_priority_mask = 0xFFFFFFFF; + /// The ideal CPU core for this process, threads are scheduled on this core by default. + u8 ideal_core = 0; u32 is_virtual_address_memory_enabled = 0; /// The Thread Local Storage area is allocated as processes create threads, @@ -307,11 +291,18 @@ private: /// This vector will grow as more pages are allocated for new threads. std::vector<std::bitset<8>> tls_slots; + /// Contains the parsed process capability descriptors. + ProcessCapabilities capabilities; + /// Whether or not this process is AArch64, or AArch32. /// By default, we currently assume this is true, unless otherwise /// specified by metadata provided to the process during loading. bool is_64bit_process = true; + /// Whether or not this process is signaled. This occurs + /// upon the process changing to a different state. + bool is_signaled = false; + /// Total running time for the process in ticks. u64 total_process_running_time_ticks = 0; diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp new file mode 100644 index 000000000..3a2164b25 --- /dev/null +++ b/src/core/hle/kernel/process_capability.cpp @@ -0,0 +1,355 @@ +// Copyright 2018 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/bit_util.h" +#include "core/hle/kernel/errors.h" +#include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/process_capability.h" +#include "core/hle/kernel/vm_manager.h" + +namespace Kernel { +namespace { + +// clang-format off + +// Shift offsets for kernel capability types. +enum : u32 { + CapabilityOffset_PriorityAndCoreNum = 3, + CapabilityOffset_Syscall = 4, + CapabilityOffset_MapPhysical = 6, + CapabilityOffset_MapIO = 7, + CapabilityOffset_Interrupt = 11, + CapabilityOffset_ProgramType = 13, + CapabilityOffset_KernelVersion = 14, + CapabilityOffset_HandleTableSize = 15, + CapabilityOffset_Debug = 16, +}; + +// Combined mask of all parameters that may be initialized only once. +constexpr u32 InitializeOnceMask = (1U << CapabilityOffset_PriorityAndCoreNum) | + (1U << CapabilityOffset_ProgramType) | + (1U << CapabilityOffset_KernelVersion) | + (1U << CapabilityOffset_HandleTableSize) | + (1U << CapabilityOffset_Debug); + +// Packed kernel version indicating 10.4.0 +constexpr u32 PackedKernelVersion = 0x520000; + +// Indicates possible types of capabilities that can be specified. +enum class CapabilityType : u32 { + Unset = 0U, + PriorityAndCoreNum = (1U << CapabilityOffset_PriorityAndCoreNum) - 1, + Syscall = (1U << CapabilityOffset_Syscall) - 1, + MapPhysical = (1U << CapabilityOffset_MapPhysical) - 1, + MapIO = (1U << CapabilityOffset_MapIO) - 1, + Interrupt = (1U << CapabilityOffset_Interrupt) - 1, + ProgramType = (1U << CapabilityOffset_ProgramType) - 1, + KernelVersion = (1U << CapabilityOffset_KernelVersion) - 1, + HandleTableSize = (1U << CapabilityOffset_HandleTableSize) - 1, + Debug = (1U << CapabilityOffset_Debug) - 1, + Ignorable = 0xFFFFFFFFU, +}; + +// clang-format on + +constexpr CapabilityType GetCapabilityType(u32 value) { + return static_cast<CapabilityType>((~value & (value + 1)) - 1); +} + +u32 GetFlagBitOffset(CapabilityType type) { + const auto value = static_cast<u32>(type); + return static_cast<u32>(Common::BitSize<u32>() - Common::CountLeadingZeroes32(value)); +} + +} // Anonymous namespace + +ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities, + std::size_t num_capabilities, + VMManager& vm_manager) { + Clear(); + + // Allow all cores and priorities. + core_mask = 0xF; + priority_mask = 0xFFFFFFFFFFFFFFFF; + kernel_version = PackedKernelVersion; + + return ParseCapabilities(capabilities, num_capabilities, vm_manager); +} + +ResultCode ProcessCapabilities::InitializeForUserProcess(const u32* capabilities, + std::size_t num_capabilities, + VMManager& vm_manager) { + Clear(); + + return ParseCapabilities(capabilities, num_capabilities, vm_manager); +} + +void ProcessCapabilities::InitializeForMetadatalessProcess() { + // Allow all cores and priorities + core_mask = 0xF; + priority_mask = 0xFFFFFFFFFFFFFFFF; + kernel_version = PackedKernelVersion; + + // Allow all system calls and interrupts. + svc_capabilities.set(); + interrupt_capabilities.set(); + + // Allow using the maximum possible amount of handles + handle_table_size = static_cast<u32>(HandleTable::MAX_COUNT); + + // Allow all debugging capabilities. + is_debuggable = true; + can_force_debug = true; +} + +ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities, + std::size_t num_capabilities, + VMManager& vm_manager) { + u32 set_flags = 0; + u32 set_svc_bits = 0; + + for (std::size_t i = 0; i < num_capabilities; ++i) { + const u32 descriptor = capabilities[i]; + const auto type = GetCapabilityType(descriptor); + + if (type == CapabilityType::MapPhysical) { + i++; + + // The MapPhysical type uses two descriptor flags for its parameters. + // If there's only one, then there's a problem. + if (i >= num_capabilities) { + return ERR_INVALID_COMBINATION; + } + + const auto size_flags = capabilities[i]; + if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) { + return ERR_INVALID_COMBINATION; + } + + const auto result = HandleMapPhysicalFlags(descriptor, size_flags, vm_manager); + if (result.IsError()) { + return result; + } + } else { + const auto result = + ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, vm_manager); + if (result.IsError()) { + return result; + } + } + } + + return RESULT_SUCCESS; +} + +ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, + u32 flag, VMManager& vm_manager) { + const auto type = GetCapabilityType(flag); + + if (type == CapabilityType::Unset) { + return ERR_INVALID_CAPABILITY_DESCRIPTOR; + } + + // Bail early on ignorable entries, as one would expect, + // ignorable descriptors can be ignored. + if (type == CapabilityType::Ignorable) { + return RESULT_SUCCESS; + } + + // Ensure that the give flag hasn't already been initialized before. + // If it has been, then bail. + const u32 flag_length = GetFlagBitOffset(type); + const u32 set_flag = 1U << flag_length; + if ((set_flag & set_flags & InitializeOnceMask) != 0) { + return ERR_INVALID_COMBINATION; + } + set_flags |= set_flag; + + switch (type) { + case CapabilityType::PriorityAndCoreNum: + return HandlePriorityCoreNumFlags(flag); + case CapabilityType::Syscall: + return HandleSyscallFlags(set_svc_bits, flag); + case CapabilityType::MapIO: + return HandleMapIOFlags(flag, vm_manager); + case CapabilityType::Interrupt: + return HandleInterruptFlags(flag); + case CapabilityType::ProgramType: + return HandleProgramTypeFlags(flag); + case CapabilityType::KernelVersion: + return HandleKernelVersionFlags(flag); + case CapabilityType::HandleTableSize: + return HandleHandleTableFlags(flag); + case CapabilityType::Debug: + return HandleDebugFlags(flag); + default: + break; + } + + return ERR_INVALID_CAPABILITY_DESCRIPTOR; +} + +void ProcessCapabilities::Clear() { + svc_capabilities.reset(); + interrupt_capabilities.reset(); + + core_mask = 0; + priority_mask = 0; + + handle_table_size = 0; + kernel_version = 0; + + program_type = ProgramType::SysModule; + + is_debuggable = false; + can_force_debug = false; +} + +ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) { + if (priority_mask != 0 || core_mask != 0) { + return ERR_INVALID_CAPABILITY_DESCRIPTOR; + } + + const u32 core_num_min = (flags >> 16) & 0xFF; + const u32 core_num_max = (flags >> 24) & 0xFF; + if (core_num_min > core_num_max) { + return ERR_INVALID_COMBINATION; + } + + const u32 priority_min = (flags >> 10) & 0x3F; + const u32 priority_max = (flags >> 4) & 0x3F; + if (priority_min > priority_max) { + return ERR_INVALID_COMBINATION; + } + + // The switch only has 4 usable cores. + if (core_num_max >= 4) { + return ERR_INVALID_PROCESSOR_ID; + } + + const auto make_mask = [](u64 min, u64 max) { + const u64 range = max - min + 1; + const u64 mask = (1ULL << range) - 1; + + return mask << min; + }; + + core_mask = make_mask(core_num_min, core_num_max); + priority_mask = make_mask(priority_min, priority_max); + return RESULT_SUCCESS; +} + +ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) { + const u32 index = flags >> 29; + const u32 svc_bit = 1U << index; + + // If we've already set this svc before, bail. + if ((set_svc_bits & svc_bit) != 0) { + return ERR_INVALID_COMBINATION; + } + set_svc_bits |= svc_bit; + + const u32 svc_mask = (flags >> 5) & 0xFFFFFF; + for (u32 i = 0; i < 24; ++i) { + const u32 svc_number = index * 24 + i; + + if ((svc_mask & (1U << i)) == 0) { + continue; + } + + if (svc_number >= svc_capabilities.size()) { + return ERR_OUT_OF_RANGE; + } + + svc_capabilities[svc_number] = true; + } + + return RESULT_SUCCESS; +} + +ResultCode ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags, + VMManager& vm_manager) { + // TODO(Lioncache): Implement once the memory manager can handle this. + return RESULT_SUCCESS; +} + +ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, VMManager& vm_manager) { + // TODO(Lioncache): Implement once the memory manager can handle this. + return RESULT_SUCCESS; +} + +ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) { + constexpr u32 interrupt_ignore_value = 0x3FF; + const u32 interrupt0 = (flags >> 12) & 0x3FF; + const u32 interrupt1 = (flags >> 22) & 0x3FF; + + for (u32 interrupt : {interrupt0, interrupt1}) { + if (interrupt == interrupt_ignore_value) { + continue; + } + + // NOTE: + // This should be checking a generic interrupt controller value + // as part of the calculation, however, given we don't currently + // emulate that, it's sufficient to mark every interrupt as defined. + + if (interrupt >= interrupt_capabilities.size()) { + return ERR_OUT_OF_RANGE; + } + + interrupt_capabilities[interrupt] = true; + } + + return RESULT_SUCCESS; +} + +ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) { + const u32 reserved = flags >> 17; + if (reserved != 0) { + return ERR_RESERVED_VALUE; + } + + program_type = static_cast<ProgramType>((flags >> 14) & 0b111); + return RESULT_SUCCESS; +} + +ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) { + // Yes, the internal member variable is checked in the actual kernel here. + // This might look odd for options that are only allowed to be initialized + // just once, however the kernel has a separate initialization function for + // kernel processes and userland processes. The kernel variant sets this + // member variable ahead of time. + + const u32 major_version = kernel_version >> 19; + + if (major_version != 0 || flags < 0x80000) { + return ERR_INVALID_CAPABILITY_DESCRIPTOR; + } + + kernel_version = flags; + return RESULT_SUCCESS; +} + +ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) { + const u32 reserved = flags >> 26; + if (reserved != 0) { + return ERR_RESERVED_VALUE; + } + + handle_table_size = (flags >> 16) & 0x3FF; + return RESULT_SUCCESS; +} + +ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) { + const u32 reserved = flags >> 19; + if (reserved != 0) { + return ERR_RESERVED_VALUE; + } + + is_debuggable = (flags & 0x20000) != 0; + can_force_debug = (flags & 0x40000) != 0; + return RESULT_SUCCESS; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h new file mode 100644 index 000000000..fbc8812a3 --- /dev/null +++ b/src/core/hle/kernel/process_capability.h @@ -0,0 +1,264 @@ +// Copyright 2018 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <bitset> + +#include "common/common_types.h" + +union ResultCode; + +namespace Kernel { + +class VMManager; + +/// The possible types of programs that may be indicated +/// by the program type capability descriptor. +enum class ProgramType { + SysModule, + Application, + Applet, +}; + +/// Handles kernel capability descriptors that are provided by +/// application metadata. These descriptors provide information +/// that alters certain parameters for kernel process instance +/// that will run said application (or applet). +/// +/// Capabilities are a sequence of flag descriptors, that indicate various +/// configurations and constraints for a particular process. +/// +/// Flag types are indicated by a sequence of set low bits. E.g. the +/// types are indicated with the low bits as follows (where x indicates "don't care"): +/// +/// - Priority and core mask : 0bxxxxxxxxxxxx0111 +/// - Allowed service call mask: 0bxxxxxxxxxxx01111 +/// - Map physical memory : 0bxxxxxxxxx0111111 +/// - Map IO memory : 0bxxxxxxxx01111111 +/// - Interrupts : 0bxxxx011111111111 +/// - Application type : 0bxx01111111111111 +/// - Kernel version : 0bx011111111111111 +/// - Handle table size : 0b0111111111111111 +/// - Debugger flags : 0b1111111111111111 +/// +/// These are essentially a bit offset subtracted by 1 to create a mask. +/// e.g. The first entry in the above list is simply bit 3 (value 8 -> 0b1000) +/// subtracted by one (7 -> 0b0111) +/// +/// An example of a bit layout (using the map physical layout): +/// <example> +/// The MapPhysical type indicates a sequence entry pair of: +/// +/// [initial, memory_flags], where: +/// +/// initial: +/// bits: +/// 7-24: Starting page to map memory at. +/// 25 : Indicates if the memory should be mapped as read only. +/// +/// memory_flags: +/// bits: +/// 7-20 : Number of pages to map +/// 21-25: Seems to be reserved (still checked against though) +/// 26 : Whether or not the memory being mapped is IO memory, or physical memory +/// </example> +/// +class ProcessCapabilities { +public: + using InterruptCapabilities = std::bitset<1024>; + using SyscallCapabilities = std::bitset<128>; + + ProcessCapabilities() = default; + ProcessCapabilities(const ProcessCapabilities&) = delete; + ProcessCapabilities(ProcessCapabilities&&) = default; + + ProcessCapabilities& operator=(const ProcessCapabilities&) = delete; + ProcessCapabilities& operator=(ProcessCapabilities&&) = default; + + /// Initializes this process capabilities instance for a kernel process. + /// + /// @param capabilities The capabilities to parse + /// @param num_capabilities The number of capabilities to parse. + /// @param vm_manager The memory manager to use for handling any mapping-related + /// operations (such as mapping IO memory, etc). + /// + /// @returns RESULT_SUCCESS if this capabilities instance was able to be initialized, + /// otherwise, an error code upon failure. + /// + ResultCode InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities, + VMManager& vm_manager); + + /// Initializes this process capabilities instance for a userland process. + /// + /// @param capabilities The capabilities to parse. + /// @param num_capabilities The total number of capabilities to parse. + /// @param vm_manager The memory manager to use for handling any mapping-related + /// operations (such as mapping IO memory, etc). + /// + /// @returns RESULT_SUCCESS if this capabilities instance was able to be initialized, + /// otherwise, an error code upon failure. + /// + ResultCode InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities, + VMManager& vm_manager); + + /// Initializes this process capabilities instance for a process that does not + /// have any metadata to parse. + /// + /// This is necessary, as we allow running raw executables, and the internal + /// kernel process capabilities also determine what CPU cores the process is + /// allowed to run on, and what priorities are allowed for threads. It also + /// determines the max handle table size, what the program type is, whether or + /// not the process can be debugged, or whether it's possible for a process to + /// forcibly debug another process. + /// + /// Given the above, this essentially enables all capabilities across the board + /// for the process. It allows the process to: + /// + /// - Run on any core + /// - Use any thread priority + /// - Use the maximum amount of handles a process is allowed to. + /// - Be debuggable + /// - Forcibly debug other processes. + /// + /// Note that this is not a behavior that the kernel allows a process to do via + /// a single function like this. This is yuzu-specific behavior to handle + /// executables with no capability descriptors whatsoever to derive behavior from. + /// It being yuzu-specific is why this is also not the default behavior and not + /// done by default in the constructor. + /// + void InitializeForMetadatalessProcess(); + + /// Gets the allowable core mask + u64 GetCoreMask() const { + return core_mask; + } + + /// Gets the allowable priority mask + u64 GetPriorityMask() const { + return priority_mask; + } + + /// Gets the SVC access permission bits + const SyscallCapabilities& GetServiceCapabilities() const { + return svc_capabilities; + } + + /// Gets the valid interrupt bits. + const InterruptCapabilities& GetInterruptCapabilities() const { + return interrupt_capabilities; + } + + /// Gets the program type for this process. + ProgramType GetProgramType() const { + return program_type; + } + + /// Gets the number of total allowable handles for the process' handle table. + u32 GetHandleTableSize() const { + return handle_table_size; + } + + /// Gets the kernel version value. + u32 GetKernelVersion() const { + return kernel_version; + } + + /// Whether or not this process can be debugged. + bool IsDebuggable() const { + return is_debuggable; + } + + /// Whether or not this process can forcibly debug another + /// process, even if that process is not considered debuggable. + bool CanForceDebug() const { + return can_force_debug; + } + +private: + /// Attempts to parse a given sequence of capability descriptors. + /// + /// @param capabilities The sequence of capability descriptors to parse. + /// @param num_capabilities The number of descriptors within the given sequence. + /// @param vm_manager The memory manager that will perform any memory + /// mapping if necessary. + /// + /// @return RESULT_SUCCESS if no errors occur, otherwise an error code. + /// + ResultCode ParseCapabilities(const u32* capabilities, std::size_t num_capabilities, + VMManager& vm_manager); + + /// Attempts to parse a capability descriptor that is only represented by a + /// single flag set. + /// + /// @param set_flags Running set of flags that are used to catch + /// flags being initialized more than once when they shouldn't be. + /// @param set_svc_bits Running set of bits representing the allowed supervisor calls mask. + /// @param flag The flag to attempt to parse. + /// @param vm_manager The memory manager that will perform any memory + /// mapping if necessary. + /// + /// @return RESULT_SUCCESS if no errors occurred, otherwise an error code. + /// + ResultCode ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag, + VMManager& vm_manager); + + /// Clears the internal state of this process capability instance. Necessary, + /// to have a sane starting point due to us allowing running executables without + /// configuration metadata. We assume a process is not going to have metadata, + /// and if it turns out that the process does, in fact, have metadata, then + /// we attempt to parse it. Thus, we need this to reset data members back to + /// a good state. + /// + /// DO NOT ever make this a public member function. This isn't an invariant + /// anything external should depend upon (and if anything comes to rely on it, + /// you should immediately be questioning the design of that thing, not this + /// class. If the kernel itself can run without depending on behavior like that, + /// then so can yuzu). + /// + void Clear(); + + /// Handles flags related to the priority and core number capability flags. + ResultCode HandlePriorityCoreNumFlags(u32 flags); + + /// Handles flags related to determining the allowable SVC mask. + ResultCode HandleSyscallFlags(u32& set_svc_bits, u32 flags); + + /// Handles flags related to mapping physical memory pages. + ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, VMManager& vm_manager); + + /// Handles flags related to mapping IO pages. + ResultCode HandleMapIOFlags(u32 flags, VMManager& vm_manager); + + /// Handles flags related to the interrupt capability flags. + ResultCode HandleInterruptFlags(u32 flags); + + /// Handles flags related to the program type. + ResultCode HandleProgramTypeFlags(u32 flags); + + /// Handles flags related to the handle table size. + ResultCode HandleHandleTableFlags(u32 flags); + + /// Handles flags related to the kernel version capability flags. + ResultCode HandleKernelVersionFlags(u32 flags); + + /// Handles flags related to debug-specific capabilities. + ResultCode HandleDebugFlags(u32 flags); + + SyscallCapabilities svc_capabilities; + InterruptCapabilities interrupt_capabilities; + + u64 core_mask = 0; + u64 priority_mask = 0; + + u32 handle_table_size = 0; + u32 kernel_version = 0; + + ProgramType program_type = ProgramType::SysModule; + + bool is_debuggable = false; + bool can_force_debug = false; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp new file mode 100644 index 000000000..6973e580c --- /dev/null +++ b/src/core/hle/kernel/readable_event.cpp @@ -0,0 +1,51 @@ +// Copyright 2014 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> +#include "common/assert.h" +#include "core/hle/kernel/errors.h" +#include "core/hle/kernel/object.h" +#include "core/hle/kernel/readable_event.h" +#include "core/hle/kernel/thread.h" + +namespace Kernel { + +ReadableEvent::ReadableEvent(KernelCore& kernel) : WaitObject{kernel} {} +ReadableEvent::~ReadableEvent() = default; + +bool ReadableEvent::ShouldWait(Thread* thread) const { + return !signaled; +} + +void ReadableEvent::Acquire(Thread* thread) { + ASSERT_MSG(!ShouldWait(thread), "object unavailable!"); + + if (reset_type == ResetType::OneShot) + signaled = false; +} + +void ReadableEvent::Signal() { + signaled = true; + WakeupAllWaitingThreads(); +} + +void ReadableEvent::Clear() { + signaled = false; +} + +ResultCode ReadableEvent::Reset() { + if (!signaled) { + return ERR_INVALID_STATE; + } + + Clear(); + + return RESULT_SUCCESS; +} + +void ReadableEvent::WakeupAllWaitingThreads() { + WaitObject::WakeupAllWaitingThreads(); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/event.h b/src/core/hle/kernel/readable_event.h index 27d6126b0..80b3b0aba 100644 --- a/src/core/hle/kernel/event.h +++ b/src/core/hle/kernel/readable_event.h @@ -4,56 +4,62 @@ #pragma once -#include "common/common_types.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/wait_object.h" +union ResultCode; + namespace Kernel { class KernelCore; +class WritableEvent; + +class ReadableEvent final : public WaitObject { + friend class WritableEvent; -class Event final : public WaitObject { public: - /** - * Creates an event - * @param kernel The kernel instance to create this event under. - * @param reset_type ResetType describing how to create event - * @param name Optional name of event - */ - static SharedPtr<Event> Create(KernelCore& kernel, ResetType reset_type, - std::string name = "Unknown"); + ~ReadableEvent() override; std::string GetTypeName() const override { - return "Event"; + return "ReadableEvent"; } std::string GetName() const override { return name; } - static const HandleType HANDLE_TYPE = HandleType::Event; - HandleType GetHandleType() const override { - return HANDLE_TYPE; - } - ResetType GetResetType() const { return reset_type; } + static const HandleType HANDLE_TYPE = HandleType::ReadableEvent; + HandleType GetHandleType() const override { + return HANDLE_TYPE; + } + bool ShouldWait(Thread* thread) const override; void Acquire(Thread* thread) override; void WakeupAllWaitingThreads() override; - void Signal(); + /// Unconditionally clears the readable event's state. void Clear(); + /// Clears the readable event's state if and only if it + /// has already been signaled. + /// + /// @pre The event must be in a signaled state. If this event + /// is in an unsignaled state and this function is called, + /// then ERR_INVALID_STATE will be returned. + ResultCode Reset(); + private: - explicit Event(KernelCore& kernel); - ~Event() override; + explicit ReadableEvent(KernelCore& kernel); + + void Signal(); - ResetType reset_type; ///< Current ResetType + ResetType reset_type; + bool signaled; - bool signaled; ///< Whether the event has already been signaled std::string name; ///< Name of event (optional) }; diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 5a5f4cef1..df4d6cf0a 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -9,6 +9,7 @@ #include "common/logging/log.h" #include "core/arm/arm_interface.h" #include "core/core.h" +#include "core/core_cpu.h" #include "core/core_timing.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/process.h" @@ -179,4 +180,69 @@ void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { ready_queue.prepare(priority); } +Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const { + std::lock_guard<std::mutex> lock(scheduler_mutex); + + const u32 mask = 1U << core; + return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) { + return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority; + }); +} + +void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { + ASSERT(thread != nullptr); + // Avoid yielding if the thread isn't even running. + ASSERT(thread->GetStatus() == ThreadStatus::Running); + + // Sanity check that the priority is valid + ASSERT(thread->GetPriority() < THREADPRIO_COUNT); + + // Yield this thread -- sleep for zero time and force reschedule to different thread + WaitCurrentThread_Sleep(); + GetCurrentThread()->WakeAfterDelay(0); +} + +void Scheduler::YieldWithLoadBalancing(Thread* thread) { + ASSERT(thread != nullptr); + const auto priority = thread->GetPriority(); + const auto core = static_cast<u32>(thread->GetProcessorID()); + + // Avoid yielding if the thread isn't even running. + ASSERT(thread->GetStatus() == ThreadStatus::Running); + + // Sanity check that the priority is valid + ASSERT(priority < THREADPRIO_COUNT); + + // Sleep for zero time to be able to force reschedule to different thread + WaitCurrentThread_Sleep(); + GetCurrentThread()->WakeAfterDelay(0); + + Thread* suggested_thread = nullptr; + + // Search through all of the cpu cores (except this one) for a suggested thread. + // Take the first non-nullptr one + for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) { + const auto res = + Core::System::GetInstance().CpuCore(cur_core).Scheduler().GetNextSuggestedThread( + core, priority); + + // If scheduler provides a suggested thread + if (res != nullptr) { + // And its better than the current suggested thread (or is the first valid one) + if (suggested_thread == nullptr || + suggested_thread->GetPriority() > res->GetPriority()) { + suggested_thread = res; + } + } + } + + // If a suggested thread was found, queue that for this core + if (suggested_thread != nullptr) + suggested_thread->ChangeCore(core, suggested_thread->GetAffinityMask()); +} + +void Scheduler::YieldAndWaitForLoadBalancing(Thread* thread) { + UNIMPLEMENTED_MSG("Wait for load balancing thread yield type is not implemented!"); +} + } // namespace Kernel diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index c63032b7d..97ced4dfc 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h @@ -51,6 +51,75 @@ public: /// Sets the priority of a thread in the scheduler void SetThreadPriority(Thread* thread, u32 priority); + /// Gets the next suggested thread for load balancing + Thread* GetNextSuggestedThread(u32 core, u32 minimum_priority) const; + + /** + * YieldWithoutLoadBalancing -- analogous to normal yield on a system + * Moves the thread to the end of the ready queue for its priority, and then reschedules the + * system to the new head of the queue. + * + * Example (Single Core -- but can be extrapolated to multi): + * ready_queue[prio=0]: ThreadA, ThreadB, ThreadC (->exec order->) + * Currently Running: ThreadR + * + * ThreadR calls YieldWithoutLoadBalancing + * + * ThreadR is moved to the end of ready_queue[prio=0]: + * ready_queue[prio=0]: ThreadA, ThreadB, ThreadC, ThreadR (->exec order->) + * Currently Running: Nothing + * + * System is rescheduled (ThreadA is popped off of queue): + * ready_queue[prio=0]: ThreadB, ThreadC, ThreadR (->exec order->) + * Currently Running: ThreadA + * + * If the queue is empty at time of call, no yielding occurs. This does not cross between cores + * or priorities at all. + */ + void YieldWithoutLoadBalancing(Thread* thread); + + /** + * YieldWithLoadBalancing -- yield but with better selection of the new running thread + * Moves the current thread to the end of the ready queue for its priority, then selects a + * 'suggested thread' (a thread on a different core that could run on this core) from the + * scheduler, changes its core, and reschedules the current core to that thread. + * + * Example (Dual Core -- can be extrapolated to Quad Core, this is just normal yield if it were + * single core): + * ready_queue[core=0][prio=0]: ThreadA, ThreadB (affinities not pictured as irrelevant + * ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only] + * Currently Running: ThreadQ on Core 0 || ThreadP on Core 1 + * + * ThreadQ calls YieldWithLoadBalancing + * + * ThreadQ is moved to the end of ready_queue[core=0][prio=0]: + * ready_queue[core=0][prio=0]: ThreadA, ThreadB + * ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only] + * Currently Running: ThreadQ on Core 0 || ThreadP on Core 1 + * + * A list of suggested threads for each core is compiled + * Suggested Threads: {ThreadC on Core 1} + * If this were quad core (as the switch is), there could be between 0 and 3 threads in this + * list. If there are more than one, the thread is selected by highest prio. + * + * ThreadC is core changed to Core 0: + * ready_queue[core=0][prio=0]: ThreadC, ThreadA, ThreadB, ThreadQ + * ready_queue[core=1][prio=0]: ThreadD + * Currently Running: None on Core 0 || ThreadP on Core 1 + * + * System is rescheduled (ThreadC is popped off of queue): + * ready_queue[core=0][prio=0]: ThreadA, ThreadB, ThreadQ + * ready_queue[core=1][prio=0]: ThreadD + * Currently Running: ThreadC on Core 0 || ThreadP on Core 1 + * + * If no suggested threads can be found this will behave just as normal yield. If there are + * multiple candidates for the suggested thread on a core, the highest prio is taken. + */ + void YieldWithLoadBalancing(Thread* thread); + + /// Currently unknown -- asserts as unimplemented on call + void YieldAndWaitForLoadBalancing(Thread* thread); + /// Returns a list of all threads managed by the scheduler const std::vector<SharedPtr<Thread>>& GetThreadList() const { return thread_list; diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index 80897f3a4..027434f92 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -6,6 +6,7 @@ #include <utility> #include "common/assert.h" +#include "common/common_types.h" #include "common/logging/log.h" #include "core/core.h" #include "core/hle/ipc_helpers.h" diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h index e068db2bf..e0e9d64c8 100644 --- a/src/core/hle/kernel/server_session.h +++ b/src/core/hle/kernel/server_session.h @@ -8,7 +8,6 @@ #include <string> #include <vector> -#include "common/common_types.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/wait_object.h" #include "core/hle/result.h" diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp index 0494581f5..22d0c1dd5 100644 --- a/src/core/hle/kernel/shared_memory.cpp +++ b/src/core/hle/kernel/shared_memory.cpp @@ -17,13 +17,13 @@ namespace Kernel { SharedMemory::SharedMemory(KernelCore& kernel) : Object{kernel} {} SharedMemory::~SharedMemory() = default; -SharedPtr<SharedMemory> SharedMemory::Create(KernelCore& kernel, SharedPtr<Process> owner_process, - u64 size, MemoryPermission permissions, +SharedPtr<SharedMemory> SharedMemory::Create(KernelCore& kernel, Process* owner_process, u64 size, + MemoryPermission permissions, MemoryPermission other_permissions, VAddr address, MemoryRegion region, std::string name) { SharedPtr<SharedMemory> shared_memory(new SharedMemory(kernel)); - shared_memory->owner_process = std::move(owner_process); + shared_memory->owner_process = owner_process; shared_memory->name = std::move(name); shared_memory->size = size; shared_memory->permissions = permissions; @@ -39,15 +39,15 @@ SharedPtr<SharedMemory> SharedMemory::Create(KernelCore& kernel, SharedPtr<Proce shared_memory->backing_block.get()); } } else { - auto& vm_manager = shared_memory->owner_process->VMManager(); + const auto& vm_manager = shared_memory->owner_process->VMManager(); // The memory is already available and mapped in the owner process. - auto vma = vm_manager.FindVMA(address); - ASSERT_MSG(vma != vm_manager.vma_map.end(), "Invalid memory address"); + const auto vma = vm_manager.FindVMA(address); + ASSERT_MSG(vm_manager.IsValidHandle(vma), "Invalid memory address"); ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address"); // The returned VMA might be a bigger one encompassing the desired address. - auto vma_offset = address - vma->first; + const auto vma_offset = address - vma->first; ASSERT_MSG(vma_offset + size <= vma->second.size, "Shared memory exceeds bounds of mapped block"); diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h index 0b48db699..dab2a6bea 100644 --- a/src/core/hle/kernel/shared_memory.h +++ b/src/core/hle/kernel/shared_memory.h @@ -45,8 +45,8 @@ public: * linear heap. * @param name Optional object name, used for debugging purposes. */ - static SharedPtr<SharedMemory> Create(KernelCore& kernel, SharedPtr<Process> owner_process, - u64 size, MemoryPermission permissions, + static SharedPtr<SharedMemory> Create(KernelCore& kernel, Process* owner_process, u64 size, + MemoryPermission permissions, MemoryPermission other_permissions, VAddr address = 0, MemoryRegion region = MemoryRegion::BASE, std::string name = "Unknown"); @@ -139,7 +139,7 @@ private: /// Permission restrictions applied to other processes mapping the block. MemoryPermission other_permissions{}; /// Process that created this shared memory block. - SharedPtr<Process> owner_process; + Process* owner_process; /// Address of shared memory block in the owner process if specified. VAddr base_address = 0; /// Name of shared memory object. diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 3339777c1..6588bd3b8 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -20,21 +20,22 @@ #include "core/hle/kernel/address_arbiter.h" #include "core/hle/kernel/client_port.h" #include "core/hle/kernel/client_session.h" -#include "core/hle/kernel/event.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/mutex.h" #include "core/hle/kernel/process.h" +#include "core/hle/kernel/readable_event.h" #include "core/hle/kernel/resource_limit.h" #include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/svc.h" #include "core/hle/kernel/svc_wrap.h" #include "core/hle/kernel/thread.h" +#include "core/hle/kernel/writable_event.h" #include "core/hle/lock.h" #include "core/hle/result.h" #include "core/hle/service/service.h" -#include "core/settings.h" +#include "core/memory.h" namespace Kernel { namespace { @@ -189,10 +190,16 @@ static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) { return ERR_INVALID_SIZE; } - auto& process = *Core::CurrentProcess(); - const VAddr heap_base = process.VMManager().GetHeapRegionBaseAddress(); - CASCADE_RESULT(*heap_addr, - process.HeapAllocate(heap_base, heap_size, VMAPermission::ReadWrite)); + auto& vm_manager = Core::CurrentProcess()->VMManager(); + const VAddr heap_base = vm_manager.GetHeapRegionBaseAddress(); + const auto alloc_result = + vm_manager.HeapAllocate(heap_base, heap_size, VMAPermission::ReadWrite); + + if (alloc_result.Failed()) { + return alloc_result.Code(); + } + + *heap_addr = *alloc_result; return RESULT_SUCCESS; } @@ -239,7 +246,7 @@ static ResultCode SetMemoryPermission(VAddr addr, u64 size, u32 prot) { } const VMManager::VMAHandle iter = vm_manager.FindVMA(addr); - if (iter == vm_manager.vma_map.end()) { + if (!vm_manager.IsValidHandle(iter)) { LOG_ERROR(Kernel_SVC, "Unable to find VMA for address=0x{:016X}", addr); return ERR_INVALID_ADDRESS_STATE; } @@ -253,11 +260,52 @@ static ResultCode SetMemoryPermission(VAddr addr, u64 size, u32 prot) { return vm_manager.ReprotectRange(addr, size, converted_permissions); } -static ResultCode SetMemoryAttribute(VAddr addr, u64 size, u32 state0, u32 state1) { - LOG_WARNING(Kernel_SVC, - "(STUBBED) called, addr=0x{:X}, size=0x{:X}, state0=0x{:X}, state1=0x{:X}", addr, - size, state0, state1); - return RESULT_SUCCESS; +static ResultCode SetMemoryAttribute(VAddr address, u64 size, u32 mask, u32 attribute) { + LOG_DEBUG(Kernel_SVC, + "called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address, + size, mask, attribute); + + if (!Common::Is4KBAligned(address)) { + LOG_ERROR(Kernel_SVC, "Address not page aligned (0x{:016X})", address); + return ERR_INVALID_ADDRESS; + } + + if (size == 0 || !Common::Is4KBAligned(size)) { + LOG_ERROR(Kernel_SVC, "Invalid size (0x{:X}). Size must be non-zero and page aligned.", + size); + return ERR_INVALID_ADDRESS; + } + + if (!IsValidAddressRange(address, size)) { + LOG_ERROR(Kernel_SVC, "Address range overflowed (Address: 0x{:016X}, Size: 0x{:016X})", + address, size); + return ERR_INVALID_ADDRESS_STATE; + } + + const auto mem_attribute = static_cast<MemoryAttribute>(attribute); + const auto mem_mask = static_cast<MemoryAttribute>(mask); + const auto attribute_with_mask = mem_attribute | mem_mask; + + if (attribute_with_mask != mem_mask) { + LOG_ERROR(Kernel_SVC, + "Memory attribute doesn't match the given mask (Attribute: 0x{:X}, Mask: {:X}", + attribute, mask); + return ERR_INVALID_COMBINATION; + } + + if ((attribute_with_mask | MemoryAttribute::Uncached) != MemoryAttribute::Uncached) { + LOG_ERROR(Kernel_SVC, "Specified attribute isn't equal to MemoryAttributeUncached (8)."); + return ERR_INVALID_COMBINATION; + } + + auto& vm_manager = Core::CurrentProcess()->VMManager(); + if (!IsInsideAddressSpace(vm_manager, address, size)) { + LOG_ERROR(Kernel_SVC, + "Given address (0x{:016X}) is outside the bounds of the address space.", address); + return ERR_INVALID_ADDRESS_STATE; + } + + return vm_manager.SetMemoryAttribute(address, size, mem_mask, mem_attribute); } /// Maps a memory range into a different range. @@ -265,15 +313,14 @@ static ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, u64 size) { LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, src_addr, size); - auto* const current_process = Core::CurrentProcess(); - const auto& vm_manager = current_process->VMManager(); - + auto& vm_manager = Core::CurrentProcess()->VMManager(); const auto result = MapUnmapMemorySanityChecks(vm_manager, dst_addr, src_addr, size); - if (result != RESULT_SUCCESS) { + + if (result.IsError()) { return result; } - return current_process->MirrorMemory(dst_addr, src_addr, size); + return vm_manager.MirrorMemory(dst_addr, src_addr, size, MemoryState::Stack); } /// Unmaps a region that was previously mapped with svcMapMemory @@ -281,15 +328,14 @@ static ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, u64 size) { LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, src_addr, size); - auto* const current_process = Core::CurrentProcess(); - const auto& vm_manager = current_process->VMManager(); - + auto& vm_manager = Core::CurrentProcess()->VMManager(); const auto result = MapUnmapMemorySanityChecks(vm_manager, dst_addr, src_addr, size); - if (result != RESULT_SUCCESS) { + + if (result.IsError()) { return result; } - return current_process->UnmapMemory(dst_addr, src_addr, size); + return vm_manager.UnmapRange(dst_addr, size); } /// Connect to an OS service given the port name, returns the handle to the port to out @@ -349,7 +395,7 @@ static ResultCode SendSyncRequest(Handle handle) { } /// Get the ID for the specified thread. -static ResultCode GetThreadId(u32* thread_id, Handle thread_handle) { +static ResultCode GetThreadId(u64* thread_id, Handle thread_handle) { LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle); const auto& handle_table = Core::CurrentProcess()->GetHandleTable(); @@ -363,20 +409,33 @@ static ResultCode GetThreadId(u32* thread_id, Handle thread_handle) { return RESULT_SUCCESS; } -/// Get the ID of the specified process -static ResultCode GetProcessId(u32* process_id, Handle process_handle) { - LOG_TRACE(Kernel_SVC, "called process=0x{:08X}", process_handle); +/// Gets the ID of the specified process or a specified thread's owning process. +static ResultCode GetProcessId(u64* process_id, Handle handle) { + LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle); const auto& handle_table = Core::CurrentProcess()->GetHandleTable(); - const SharedPtr<Process> process = handle_table.Get<Process>(process_handle); - if (!process) { - LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}", - process_handle); - return ERR_INVALID_HANDLE; + const SharedPtr<Process> process = handle_table.Get<Process>(handle); + if (process) { + *process_id = process->GetProcessID(); + return RESULT_SUCCESS; } - *process_id = process->GetProcessID(); - return RESULT_SUCCESS; + const SharedPtr<Thread> thread = handle_table.Get<Thread>(handle); + if (thread) { + const Process* const owner_process = thread->GetOwnerProcess(); + if (!owner_process) { + LOG_ERROR(Kernel_SVC, "Non-existent owning process encountered."); + return ERR_INVALID_HANDLE; + } + + *process_id = owner_process->GetProcessID(); + return RESULT_SUCCESS; + } + + // NOTE: This should also handle debug objects before returning. + + LOG_ERROR(Kernel_SVC, "Handle does not exist, handle=0x{:08X}", handle); + return ERR_INVALID_HANDLE; } /// Default thread wakeup callback for WaitSynchronization @@ -625,6 +684,9 @@ static void Break(u32 reason, u64 info1, u64 info2) { "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}", reason, info1, info2); handle_debug_buffer(info1, info2); + Core::System::GetInstance() + .ArmInterface(static_cast<std::size_t>(GetCurrentThread()->GetProcessorID())) + .LogBacktrace(); ASSERT(false); Core::CurrentProcess()->PrepareForTermination(); @@ -653,8 +715,8 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) enum class GetInfoType : u64 { // 1.0.0+ - AllowedCpuIdBitmask = 0, - AllowedThreadPrioBitmask = 1, + AllowedCPUCoreMask = 0, + AllowedThreadPriorityMask = 1, MapRegionBaseAddr = 2, MapRegionSize = 3, HeapRegionBaseAddr = 4, @@ -662,7 +724,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) TotalMemoryUsage = 6, TotalHeapUsage = 7, IsCurrentProcessBeingDebugged = 8, - ResourceHandleLimit = 9, + RegisterResourceLimit = 9, IdleTickCount = 10, RandomEntropy = 11, PerformanceCounter = 0xF0000002, @@ -682,37 +744,137 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) ThreadTickCount = 0xF0000002, }; - const auto* current_process = Core::CurrentProcess(); - const auto& vm_manager = current_process->VMManager(); + const auto info_id_type = static_cast<GetInfoType>(info_id); - switch (static_cast<GetInfoType>(info_id)) { - case GetInfoType::AllowedCpuIdBitmask: - *result = current_process->GetAllowedProcessorMask(); - break; - case GetInfoType::AllowedThreadPrioBitmask: - *result = current_process->GetAllowedThreadPriorityMask(); - break; + switch (info_id_type) { + case GetInfoType::AllowedCPUCoreMask: + case GetInfoType::AllowedThreadPriorityMask: case GetInfoType::MapRegionBaseAddr: - *result = vm_manager.GetMapRegionBaseAddress(); - break; case GetInfoType::MapRegionSize: - *result = vm_manager.GetMapRegionSize(); - break; case GetInfoType::HeapRegionBaseAddr: - *result = vm_manager.GetHeapRegionBaseAddress(); - break; case GetInfoType::HeapRegionSize: - *result = vm_manager.GetHeapRegionSize(); - break; + case GetInfoType::ASLRRegionBaseAddr: + case GetInfoType::ASLRRegionSize: + case GetInfoType::NewMapRegionBaseAddr: + case GetInfoType::NewMapRegionSize: case GetInfoType::TotalMemoryUsage: - *result = vm_manager.GetTotalMemoryUsage(); - break; case GetInfoType::TotalHeapUsage: - *result = vm_manager.GetTotalHeapUsage(); - break; + case GetInfoType::IsVirtualAddressMemoryEnabled: + case GetInfoType::PersonalMmHeapUsage: + case GetInfoType::TitleId: + case GetInfoType::UserExceptionContextAddr: { + if (info_sub_id != 0) { + return ERR_INVALID_ENUM_VALUE; + } + + const auto& current_process_handle_table = Core::CurrentProcess()->GetHandleTable(); + const auto process = current_process_handle_table.Get<Process>(static_cast<Handle>(handle)); + if (!process) { + return ERR_INVALID_HANDLE; + } + + switch (info_id_type) { + case GetInfoType::AllowedCPUCoreMask: + *result = process->GetCoreMask(); + return RESULT_SUCCESS; + + case GetInfoType::AllowedThreadPriorityMask: + *result = process->GetPriorityMask(); + return RESULT_SUCCESS; + + case GetInfoType::MapRegionBaseAddr: + *result = process->VMManager().GetMapRegionBaseAddress(); + return RESULT_SUCCESS; + + case GetInfoType::MapRegionSize: + *result = process->VMManager().GetMapRegionSize(); + return RESULT_SUCCESS; + + case GetInfoType::HeapRegionBaseAddr: + *result = process->VMManager().GetHeapRegionBaseAddress(); + return RESULT_SUCCESS; + + case GetInfoType::HeapRegionSize: + *result = process->VMManager().GetHeapRegionSize(); + return RESULT_SUCCESS; + + case GetInfoType::ASLRRegionBaseAddr: + *result = process->VMManager().GetASLRRegionBaseAddress(); + return RESULT_SUCCESS; + + case GetInfoType::ASLRRegionSize: + *result = process->VMManager().GetASLRRegionSize(); + return RESULT_SUCCESS; + + case GetInfoType::NewMapRegionBaseAddr: + *result = process->VMManager().GetNewMapRegionBaseAddress(); + return RESULT_SUCCESS; + + case GetInfoType::NewMapRegionSize: + *result = process->VMManager().GetNewMapRegionSize(); + return RESULT_SUCCESS; + + case GetInfoType::TotalMemoryUsage: + *result = process->VMManager().GetTotalMemoryUsage(); + return RESULT_SUCCESS; + + case GetInfoType::TotalHeapUsage: + *result = process->VMManager().GetTotalHeapUsage(); + return RESULT_SUCCESS; + + case GetInfoType::IsVirtualAddressMemoryEnabled: + *result = process->IsVirtualMemoryEnabled(); + return RESULT_SUCCESS; + + case GetInfoType::TitleId: + *result = process->GetTitleID(); + return RESULT_SUCCESS; + + case GetInfoType::UserExceptionContextAddr: + LOG_WARNING(Kernel_SVC, + "(STUBBED) Attempted to query user exception context address, returned 0"); + *result = 0; + return RESULT_SUCCESS; + + default: + break; + } + + LOG_WARNING(Kernel_SVC, "(STUBBED) Unimplemented svcGetInfo id=0x{:016X}", info_id); + return ERR_INVALID_ENUM_VALUE; + } + case GetInfoType::IsCurrentProcessBeingDebugged: *result = 0; - break; + return RESULT_SUCCESS; + + case GetInfoType::RegisterResourceLimit: { + if (handle != 0) { + return ERR_INVALID_HANDLE; + } + + if (info_sub_id != 0) { + return ERR_INVALID_COMBINATION; + } + + Process* const current_process = Core::CurrentProcess(); + HandleTable& handle_table = current_process->GetHandleTable(); + const auto resource_limit = current_process->GetResourceLimit(); + if (!resource_limit) { + *result = KernelHandle::InvalidHandle; + // Yes, the kernel considers this a successful operation. + return RESULT_SUCCESS; + } + + const auto table_result = handle_table.Create(resource_limit); + if (table_result.Failed()) { + return table_result.Code(); + } + + *result = *table_result; + return RESULT_SUCCESS; + } + case GetInfoType::RandomEntropy: if (handle != 0) { LOG_ERROR(Kernel_SVC, "Process Handle is non zero, expected 0 result but got {:016X}", @@ -726,37 +888,15 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) return ERR_INVALID_COMBINATION; } - *result = current_process->GetRandomEntropy(info_sub_id); + *result = Core::CurrentProcess()->GetRandomEntropy(info_sub_id); return RESULT_SUCCESS; - break; - case GetInfoType::ASLRRegionBaseAddr: - *result = vm_manager.GetASLRRegionBaseAddress(); - break; - case GetInfoType::ASLRRegionSize: - *result = vm_manager.GetASLRRegionSize(); - break; - case GetInfoType::NewMapRegionBaseAddr: - *result = vm_manager.GetNewMapRegionBaseAddress(); - break; - case GetInfoType::NewMapRegionSize: - *result = vm_manager.GetNewMapRegionSize(); - break; - case GetInfoType::IsVirtualAddressMemoryEnabled: - *result = current_process->IsVirtualMemoryEnabled(); - break; - case GetInfoType::TitleId: - *result = current_process->GetTitleID(); - break; + case GetInfoType::PrivilegedProcessId: LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query privileged process id bounds, returned 0"); *result = 0; - break; - case GetInfoType::UserExceptionContextAddr: - LOG_WARNING(Kernel_SVC, - "(STUBBED) Attempted to query user exception context address, returned 0"); - *result = 0; - break; + return RESULT_SUCCESS; + case GetInfoType::ThreadTickCount: { constexpr u64 num_cpus = 4; if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id >= num_cpus) { @@ -766,7 +906,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) } const auto thread = - current_process->GetHandleTable().Get<Thread>(static_cast<Handle>(handle)); + Core::CurrentProcess()->GetHandleTable().Get<Thread>(static_cast<Handle>(handle)); if (!thread) { LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", static_cast<Handle>(handle)); @@ -789,19 +929,45 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) } *result = out_ticks; - break; + return RESULT_SUCCESS; } + default: LOG_WARNING(Kernel_SVC, "(STUBBED) Unimplemented svcGetInfo id=0x{:016X}", info_id); return ERR_INVALID_ENUM_VALUE; } - - return RESULT_SUCCESS; } /// Sets the thread activity -static ResultCode SetThreadActivity(Handle handle, u32 unknown) { - LOG_WARNING(Kernel_SVC, "(STUBBED) called, handle=0x{:08X}, unknown=0x{:08X}", handle, unknown); +static ResultCode SetThreadActivity(Handle handle, u32 activity) { + LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity); + if (activity > static_cast<u32>(ThreadActivity::Paused)) { + return ERR_INVALID_ENUM_VALUE; + } + + const auto* current_process = Core::CurrentProcess(); + const SharedPtr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle); + if (!thread) { + LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle); + return ERR_INVALID_HANDLE; + } + + if (thread->GetOwnerProcess() != current_process) { + LOG_ERROR(Kernel_SVC, + "The current process does not own the current thread, thread_handle={:08X} " + "thread_pid={}, " + "current_process_pid={}", + handle, thread->GetOwnerProcess()->GetProcessID(), + current_process->GetProcessID()); + return ERR_INVALID_HANDLE; + } + + if (thread == GetCurrentThread()) { + LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); + return ERR_BUSY; + } + + thread->SetActivity(static_cast<ThreadActivity>(activity)); return RESULT_SUCCESS; } @@ -828,7 +994,7 @@ static ResultCode GetThreadContext(VAddr thread_context, Handle handle) { if (thread == GetCurrentThread()) { LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); - return ERR_ALREADY_REGISTERED; + return ERR_BUSY; } Core::ARM_Interface::ThreadContext ctx = thread->GetContext(); @@ -989,10 +1155,9 @@ static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 return shared_memory->Unmap(*current_process, addr); } -/// Query process memory -static ResultCode QueryProcessMemory(MemoryInfo* memory_info, PageInfo* /*page_info*/, - Handle process_handle, u64 addr) { - LOG_TRACE(Kernel_SVC, "called process=0x{:08X} addr={:X}", process_handle, addr); +static ResultCode QueryProcessMemory(VAddr memory_info_address, VAddr page_info_address, + Handle process_handle, VAddr address) { + LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address); const auto& handle_table = Core::CurrentProcess()->GetHandleTable(); SharedPtr<Process> process = handle_table.Get<Process>(process_handle); if (!process) { @@ -1000,26 +1165,34 @@ static ResultCode QueryProcessMemory(MemoryInfo* memory_info, PageInfo* /*page_i process_handle); return ERR_INVALID_HANDLE; } - auto vma = process->VMManager().FindVMA(addr); - memory_info->attributes = 0; - if (vma == process->VMManager().vma_map.end()) { - memory_info->base_address = 0; - memory_info->permission = static_cast<u32>(VMAPermission::None); - memory_info->size = 0; - memory_info->type = static_cast<u32>(MemoryState::Unmapped); - } else { - memory_info->base_address = vma->second.base; - memory_info->permission = static_cast<u32>(vma->second.permissions); - memory_info->size = vma->second.size; - memory_info->type = static_cast<u32>(vma->second.meminfo_state); - } + + const auto& vm_manager = process->VMManager(); + const MemoryInfo memory_info = vm_manager.QueryMemory(address); + + Memory::Write64(memory_info_address, memory_info.base_address); + Memory::Write64(memory_info_address + 8, memory_info.size); + Memory::Write32(memory_info_address + 16, memory_info.state); + Memory::Write32(memory_info_address + 20, memory_info.attributes); + Memory::Write32(memory_info_address + 24, memory_info.permission); + Memory::Write32(memory_info_address + 32, memory_info.ipc_ref_count); + Memory::Write32(memory_info_address + 28, memory_info.device_ref_count); + Memory::Write32(memory_info_address + 36, 0); + + // Page info appears to be currently unused by the kernel and is always set to zero. + Memory::Write32(page_info_address, 0); + return RESULT_SUCCESS; } -/// Query memory -static ResultCode QueryMemory(MemoryInfo* memory_info, PageInfo* page_info, VAddr addr) { - LOG_TRACE(Kernel_SVC, "called, addr={:X}", addr); - return QueryProcessMemory(memory_info, page_info, CurrentProcess, addr); +static ResultCode QueryMemory(VAddr memory_info_address, VAddr page_info_address, + VAddr query_address) { + LOG_TRACE(Kernel_SVC, + "called, memory_info_address=0x{:016X}, page_info_address=0x{:016X}, " + "query_address=0x{:016X}", + memory_info_address, page_info_address, query_address); + + return QueryProcessMemory(memory_info_address, page_info_address, CurrentProcess, + query_address); } /// Exits the current process @@ -1042,35 +1215,41 @@ static void ExitProcess() { static ResultCode CreateThread(Handle* out_handle, VAddr entry_point, u64 arg, VAddr stack_top, u32 priority, s32 processor_id) { LOG_TRACE(Kernel_SVC, - "called entrypoint=0x{:08X} ({}), arg=0x{:08X}, stacktop=0x{:08X}, " + "called entrypoint=0x{:08X}, arg=0x{:08X}, stacktop=0x{:08X}, " "threadpriority=0x{:08X}, processorid=0x{:08X} : created handle=0x{:08X}", entry_point, arg, stack_top, priority, processor_id, *out_handle); - if (priority > THREADPRIO_LOWEST) { - LOG_ERROR(Kernel_SVC, "An invalid priority was specified, expected {} but got {}", - THREADPRIO_LOWEST, priority); - return ERR_INVALID_THREAD_PRIORITY; - } - auto* const current_process = Core::CurrentProcess(); - if (processor_id == THREADPROCESSORID_DEFAULT) { - // Set the target CPU to the one specified in the process' exheader. - processor_id = current_process->GetDefaultProcessorID(); - ASSERT(processor_id != THREADPROCESSORID_DEFAULT); + if (processor_id == THREADPROCESSORID_IDEAL) { + // Set the target CPU to the one specified by the process. + processor_id = current_process->GetIdealCore(); + ASSERT(processor_id != THREADPROCESSORID_IDEAL); } - switch (processor_id) { - case THREADPROCESSORID_0: - case THREADPROCESSORID_1: - case THREADPROCESSORID_2: - case THREADPROCESSORID_3: - break; - default: + if (processor_id < THREADPROCESSORID_0 || processor_id > THREADPROCESSORID_3) { LOG_ERROR(Kernel_SVC, "Invalid thread processor ID: {}", processor_id); return ERR_INVALID_PROCESSOR_ID; } + const u64 core_mask = current_process->GetCoreMask(); + if ((core_mask | (1ULL << processor_id)) != core_mask) { + LOG_ERROR(Kernel_SVC, "Invalid thread core specified ({})", processor_id); + return ERR_INVALID_PROCESSOR_ID; + } + + if (priority > THREADPRIO_LOWEST) { + LOG_ERROR(Kernel_SVC, + "Invalid thread priority specified ({}). Must be within the range 0-64", + priority); + return ERR_INVALID_THREAD_PRIORITY; + } + + if (((1ULL << priority) & current_process->GetPriorityMask()) == 0) { + LOG_ERROR(Kernel_SVC, "Invalid thread priority specified ({})", priority); + return ERR_INVALID_THREAD_PRIORITY; + } + const std::string name = fmt::format("thread-{:X}", entry_point); auto& kernel = Core::System::GetInstance().Kernel(); CASCADE_RESULT(SharedPtr<Thread> thread, @@ -1106,7 +1285,10 @@ static ResultCode StartThread(Handle thread_handle) { ASSERT(thread->GetStatus() == ThreadStatus::Dormant); thread->ResumeFromWait(); - Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule(); + + if (thread->GetStatus() == ThreadStatus::Ready) { + Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule(); + } return RESULT_SUCCESS; } @@ -1123,18 +1305,38 @@ static void ExitThread() { static void SleepThread(s64 nanoseconds) { LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds); - // Don't attempt to yield execution if there are no available threads to run, - // this way we avoid a useless reschedule to the idle thread. - if (nanoseconds == 0 && !Core::System::GetInstance().CurrentScheduler().HaveReadyThreads()) - return; + enum class SleepType : s64 { + YieldWithoutLoadBalancing = 0, + YieldWithLoadBalancing = -1, + YieldAndWaitForLoadBalancing = -2, + }; - // Sleep current thread and check for next thread to schedule - WaitCurrentThread_Sleep(); + if (nanoseconds <= 0) { + auto& scheduler{Core::System::GetInstance().CurrentScheduler()}; + switch (static_cast<SleepType>(nanoseconds)) { + case SleepType::YieldWithoutLoadBalancing: + scheduler.YieldWithoutLoadBalancing(GetCurrentThread()); + break; + case SleepType::YieldWithLoadBalancing: + scheduler.YieldWithLoadBalancing(GetCurrentThread()); + break; + case SleepType::YieldAndWaitForLoadBalancing: + scheduler.YieldAndWaitForLoadBalancing(GetCurrentThread()); + break; + default: + UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); + } + } else { + // Sleep current thread and check for next thread to schedule + WaitCurrentThread_Sleep(); - // Create an event to wake the thread up after the specified nanosecond delay has passed - GetCurrentThread()->WakeAfterDelay(nanoseconds); + // Create an event to wake the thread up after the specified nanosecond delay has passed + GetCurrentThread()->WakeAfterDelay(nanoseconds); + } - Core::System::GetInstance().PrepareReschedule(); + // Reschedule all CPU cores + for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i) + Core::System::GetInstance().CpuCore(i).PrepareReschedule(); } /// Wait process wide key atomic @@ -1356,17 +1558,24 @@ static ResultCode CloseHandle(Handle handle) { return handle_table.Close(handle); } -/// Reset an event +/// Clears the signaled state of an event or process. static ResultCode ResetSignal(Handle handle) { LOG_DEBUG(Kernel_SVC, "called handle 0x{:08X}", handle); const auto& handle_table = Core::CurrentProcess()->GetHandleTable(); - auto event = handle_table.Get<Event>(handle); - ASSERT(event != nullptr); + auto event = handle_table.Get<ReadableEvent>(handle); + if (event) { + return event->Reset(); + } - event->Clear(); - return RESULT_SUCCESS; + auto process = handle_table.Get<Process>(handle); + if (process) { + return process->ClearSignalState(); + } + + LOG_ERROR(Kernel_SVC, "Invalid handle (0x{:08X})", handle); + return ERR_INVALID_HANDLE; } /// Creates a TransferMemory object @@ -1399,9 +1608,9 @@ static ResultCode CreateTransferMemory(Handle* handle, VAddr addr, u64 size, u32 } auto& kernel = Core::System::GetInstance().Kernel(); - auto& handle_table = Core::CurrentProcess()->GetHandleTable(); - const auto shared_mem_handle = SharedMemory::Create( - kernel, handle_table.Get<Process>(CurrentProcess), size, perms, perms, addr); + auto process = kernel.CurrentProcess(); + auto& handle_table = process->GetHandleTable(); + const auto shared_mem_handle = SharedMemory::Create(kernel, process, size, perms, perms, addr); CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle)); return RESULT_SUCCESS; @@ -1436,13 +1645,13 @@ static ResultCode SetThreadCoreMask(Handle thread_handle, u32 core, u64 mask) { return ERR_INVALID_HANDLE; } - if (core == static_cast<u32>(THREADPROCESSORID_DEFAULT)) { - const u8 default_processor_id = thread->GetOwnerProcess()->GetDefaultProcessorID(); + if (core == static_cast<u32>(THREADPROCESSORID_IDEAL)) { + const u8 ideal_cpu_core = thread->GetOwnerProcess()->GetIdealCore(); - ASSERT(default_processor_id != static_cast<u8>(THREADPROCESSORID_DEFAULT)); + ASSERT(ideal_cpu_core != static_cast<u8>(THREADPROCESSORID_IDEAL)); - // Set the target CPU to the one specified in the process' exheader. - core = default_processor_id; + // Set the target CPU to the ideal core specified by the process. + core = ideal_cpu_core; mask = 1ULL << core; } @@ -1511,26 +1720,75 @@ static ResultCode CreateSharedMemory(Handle* handle, u64 size, u32 local_permiss } auto& kernel = Core::System::GetInstance().Kernel(); - auto& handle_table = Core::CurrentProcess()->GetHandleTable(); - auto shared_mem_handle = - SharedMemory::Create(kernel, handle_table.Get<Process>(KernelHandle::CurrentProcess), size, - local_perms, remote_perms); + auto process = kernel.CurrentProcess(); + auto& handle_table = process->GetHandleTable(); + auto shared_mem_handle = SharedMemory::Create(kernel, process, size, local_perms, remote_perms); CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle)); return RESULT_SUCCESS; } +static ResultCode CreateEvent(Handle* write_handle, Handle* read_handle) { + LOG_DEBUG(Kernel_SVC, "called"); + + auto& kernel = Core::System::GetInstance().Kernel(); + const auto [readable_event, writable_event] = + WritableEvent::CreateEventPair(kernel, ResetType::Sticky, "CreateEvent"); + + HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable(); + + const auto write_create_result = handle_table.Create(writable_event); + if (write_create_result.Failed()) { + return write_create_result.Code(); + } + *write_handle = *write_create_result; + + const auto read_create_result = handle_table.Create(readable_event); + if (read_create_result.Failed()) { + handle_table.Close(*write_create_result); + return read_create_result.Code(); + } + *read_handle = *read_create_result; + + LOG_DEBUG(Kernel_SVC, + "successful. Writable event handle=0x{:08X}, Readable event handle=0x{:08X}", + *write_create_result, *read_create_result); + return RESULT_SUCCESS; +} + static ResultCode ClearEvent(Handle handle) { LOG_TRACE(Kernel_SVC, "called, event=0x{:08X}", handle); const auto& handle_table = Core::CurrentProcess()->GetHandleTable(); - SharedPtr<Event> evt = handle_table.Get<Event>(handle); - if (evt == nullptr) { - LOG_ERROR(Kernel_SVC, "Event handle does not exist, handle=0x{:08X}", handle); + + auto writable_event = handle_table.Get<WritableEvent>(handle); + if (writable_event) { + writable_event->Clear(); + return RESULT_SUCCESS; + } + + auto readable_event = handle_table.Get<ReadableEvent>(handle); + if (readable_event) { + readable_event->Clear(); + return RESULT_SUCCESS; + } + + LOG_ERROR(Kernel_SVC, "Event handle does not exist, handle=0x{:08X}", handle); + return ERR_INVALID_HANDLE; +} + +static ResultCode SignalEvent(Handle handle) { + LOG_DEBUG(Kernel_SVC, "called. Handle=0x{:08X}", handle); + + HandleTable& handle_table = Core::CurrentProcess()->GetHandleTable(); + auto writable_event = handle_table.Get<WritableEvent>(handle); + + if (!writable_event) { + LOG_ERROR(Kernel_SVC, "Non-existent writable event handle used (0x{:08X})", handle); return ERR_INVALID_HANDLE; } - evt->Clear(); + writable_event->Signal(); return RESULT_SUCCESS; } @@ -1669,7 +1927,7 @@ static const FunctionDef SVC_Table[] = { {0x0E, SvcWrap<GetThreadCoreMask>, "GetThreadCoreMask"}, {0x0F, SvcWrap<SetThreadCoreMask>, "SetThreadCoreMask"}, {0x10, SvcWrap<GetCurrentProcessorNumber>, "GetCurrentProcessorNumber"}, - {0x11, nullptr, "SignalEvent"}, + {0x11, SvcWrap<SignalEvent>, "SignalEvent"}, {0x12, SvcWrap<ClearEvent>, "ClearEvent"}, {0x13, SvcWrap<MapSharedMemory>, "MapSharedMemory"}, {0x14, SvcWrap<UnmapSharedMemory>, "UnmapSharedMemory"}, @@ -1721,7 +1979,7 @@ static const FunctionDef SVC_Table[] = { {0x42, nullptr, "ReplyAndReceiveLight"}, {0x43, nullptr, "ReplyAndReceive"}, {0x44, nullptr, "ReplyAndReceiveWithUserBuffer"}, - {0x45, nullptr, "CreateEvent"}, + {0x45, SvcWrap<CreateEvent>, "CreateEvent"}, {0x46, nullptr, "Unknown"}, {0x47, nullptr, "Unknown"}, {0x48, nullptr, "MapPhysicalMemoryUnsafe"}, @@ -1770,7 +2028,7 @@ static const FunctionDef SVC_Table[] = { {0x73, nullptr, "SetProcessMemoryPermission"}, {0x74, nullptr, "MapProcessMemory"}, {0x75, nullptr, "UnmapProcessMemory"}, - {0x76, nullptr, "QueryProcessMemory"}, + {0x76, SvcWrap<QueryProcessMemory>, "QueryProcessMemory"}, {0x77, nullptr, "MapProcessCodeMemory"}, {0x78, nullptr, "UnmapProcessCodeMemory"}, {0x79, nullptr, "CreateProcess"}, diff --git a/src/core/hle/kernel/svc.h b/src/core/hle/kernel/svc.h index b06aac4ec..c37ae0f98 100644 --- a/src/core/hle/kernel/svc.h +++ b/src/core/hle/kernel/svc.h @@ -8,22 +8,6 @@ namespace Kernel { -struct MemoryInfo { - u64 base_address; - u64 size; - u32 type; - u32 attributes; - u32 permission; - u32 device_refcount; - u32 ipc_refcount; - INSERT_PADDING_WORDS(1); -}; -static_assert(sizeof(MemoryInfo) == 0x28, "MemoryInfo has incorrect size."); - -struct PageInfo { - u64 flags; -}; - void CallSVC(u32 immediate); } // namespace Kernel diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index fa1116624..2a2c2c5ea 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -7,9 +7,7 @@ #include "common/common_types.h" #include "core/arm/arm_interface.h" #include "core/core.h" -#include "core/hle/kernel/svc.h" #include "core/hle/result.h" -#include "core/memory.h" namespace Kernel { @@ -59,10 +57,31 @@ void SvcWrap() { FuncReturn(retval); } +template <ResultCode func(u32*, u32*)> +void SvcWrap() { + u32 param_1 = 0; + u32 param_2 = 0; + const u32 retval = func(¶m_1, ¶m_2).raw; + + auto& arm_interface = Core::CurrentArmInterface(); + arm_interface.SetReg(1, param_1); + arm_interface.SetReg(2, param_2); + + FuncReturn(retval); +} + template <ResultCode func(u32*, u64)> void SvcWrap() { u32 param_1 = 0; - u32 retval = func(¶m_1, Param(1)).raw; + const u32 retval = func(¶m_1, Param(1)).raw; + Core::CurrentArmInterface().SetReg(1, param_1); + FuncReturn(retval); +} + +template <ResultCode func(u64*, u32)> +void SvcWrap() { + u64 param_1 = 0; + const u32 retval = func(¶m_1, static_cast<u32>(Param(1))).raw; Core::CurrentArmInterface().SetReg(1, param_1); FuncReturn(retval); } @@ -116,7 +135,12 @@ void SvcWrap() { template <ResultCode func(u64, u64, u32, u32)> void SvcWrap() { FuncReturn( - func(Param(0), Param(1), static_cast<u32>(Param(3)), static_cast<u32>(Param(3))).raw); + func(Param(0), Param(1), static_cast<u32>(Param(2)), static_cast<u32>(Param(3))).raw); +} + +template <ResultCode func(u64, u64, u32, u64)> +void SvcWrap() { + FuncReturn(func(Param(0), Param(1), static_cast<u32>(Param(2)), Param(3)).raw); } template <ResultCode func(u32, u64, u32)> @@ -178,21 +202,6 @@ void SvcWrap() { FuncReturn(retval); } -template <ResultCode func(MemoryInfo*, PageInfo*, u64)> -void SvcWrap() { - MemoryInfo memory_info = {}; - PageInfo page_info = {}; - u32 retval = func(&memory_info, &page_info, Param(2)).raw; - - Memory::Write64(Param(0), memory_info.base_address); - Memory::Write64(Param(0) + 8, memory_info.size); - Memory::Write32(Param(0) + 16, memory_info.type); - Memory::Write32(Param(0) + 20, memory_info.attributes); - Memory::Write32(Param(0) + 24, memory_info.permission); - - FuncReturn(retval); -} - template <ResultCode func(u32*, u64, u64, u32)> void SvcWrap() { u32 param_1 = 0; diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 4ffb76818..d3984dfc4 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -12,7 +12,6 @@ #include "common/assert.h" #include "common/common_types.h" #include "common/logging/log.h" -#include "common/math_util.h" #include "common/thread_queue_list.h" #include "core/arm/arm_interface.h" #include "core/core.h" @@ -50,7 +49,7 @@ void Thread::Stop() { // Clean up thread from ready queue // This is only needed when the thread is terminated forcefully (SVC TerminateProcess) - if (status == ThreadStatus::Ready) { + if (status == ThreadStatus::Ready || status == ThreadStatus::Paused) { scheduler->UnscheduleThread(this, current_priority); } @@ -140,6 +139,11 @@ void Thread::ResumeFromWait() { wakeup_callback = nullptr; + if (activity == ThreadActivity::Paused) { + status = ThreadStatus::Paused; + return; + } + status = ThreadStatus::Ready; ChangeScheduler(); @@ -158,6 +162,9 @@ static void ResetThreadContext(Core::ARM_Interface::ThreadContext& context, VAdd context.cpu_registers[0] = arg; context.pc = entry_point; context.sp = stack_top; + // TODO(merry): Perform a hardware test to determine the below value. + // AHP = 0, DN = 1, FTZ = 1, RMode = Round towards zero + context.fpcr = 0x03C00000; } ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name, VAddr entry_point, @@ -224,29 +231,6 @@ void Thread::BoostPriority(u32 priority) { current_priority = priority; } -SharedPtr<Thread> SetupMainThread(KernelCore& kernel, VAddr entry_point, u32 priority, - Process& owner_process) { - // Setup page table so we can write to memory - SetCurrentPageTable(&owner_process.VMManager().page_table); - - // Initialize new "main" thread - const VAddr stack_top = owner_process.VMManager().GetTLSIORegionEndAddress(); - auto thread_res = Thread::Create(kernel, "main", entry_point, priority, 0, THREADPROCESSORID_0, - stack_top, owner_process); - - SharedPtr<Thread> thread = std::move(thread_res).Unwrap(); - - // Register 1 must be a handle to the main thread - const Handle guest_handle = owner_process.GetHandleTable().Create(thread).Unwrap(); - thread->SetGuestHandle(guest_handle); - thread->GetContext().cpu_registers[1] = guest_handle; - - // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires - thread->ResumeFromWait(); - - return thread; -} - void Thread::SetWaitSynchronizationResult(ResultCode result) { context.cpu_registers[0] = result.raw; } @@ -388,6 +372,23 @@ bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> t return wakeup_callback(reason, std::move(thread), std::move(object), index); } +void Thread::SetActivity(ThreadActivity value) { + activity = value; + + if (value == ThreadActivity::Paused) { + // Set status if not waiting + if (status == ThreadStatus::Ready) { + status = ThreadStatus::Paused; + } else if (status == ThreadStatus::Running) { + status = ThreadStatus::Paused; + Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule(); + } + } else if (status == ThreadStatus::Paused) { + // Ready to reschedule + ResumeFromWait(); + } +} + //////////////////////////////////////////////////////////////////////////////////////////////////// /** diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index d384d50db..c48b21aba 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -26,15 +26,16 @@ enum ThreadPriority : u32 { THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps THREADPRIO_LOWEST = 63, ///< Lowest thread priority + THREADPRIO_COUNT = 64, ///< Total number of possible thread priorities. }; enum ThreadProcessorId : s32 { - THREADPROCESSORID_DEFAULT = -2, ///< Run thread on default core specified by exheader - THREADPROCESSORID_0 = 0, ///< Run thread on core 0 - THREADPROCESSORID_1 = 1, ///< Run thread on core 1 - THREADPROCESSORID_2 = 2, ///< Run thread on core 2 - THREADPROCESSORID_3 = 3, ///< Run thread on core 3 - THREADPROCESSORID_MAX = 4, ///< Processor ID must be less than this + THREADPROCESSORID_IDEAL = -2, ///< Run thread on the ideal core specified by the process. + THREADPROCESSORID_0 = 0, ///< Run thread on core 0 + THREADPROCESSORID_1 = 1, ///< Run thread on core 1 + THREADPROCESSORID_2 = 2, ///< Run thread on core 2 + THREADPROCESSORID_3 = 3, ///< Run thread on core 3 + THREADPROCESSORID_MAX = 4, ///< Processor ID must be less than this /// Allowed CPU mask THREADPROCESSORID_DEFAULT_MASK = (1 << THREADPROCESSORID_0) | (1 << THREADPROCESSORID_1) | @@ -44,6 +45,7 @@ enum ThreadProcessorId : s32 { enum class ThreadStatus { Running, ///< Currently running Ready, ///< Ready to run + Paused, ///< Paused by SetThreadActivity or debug WaitHLEEvent, ///< Waiting for hle event to finish WaitSleep, ///< Waiting due to a SleepThread SVC WaitIPC, ///< Waiting for the reply from an IPC request @@ -60,6 +62,11 @@ enum class ThreadWakeupReason { Timeout // The thread was woken up due to a wait timeout. }; +enum class ThreadActivity : u32 { + Normal = 0, + Paused = 1, +}; + class Thread final : public WaitObject { public: using TLSMemory = std::vector<u8>; @@ -150,7 +157,7 @@ public: * Gets the thread's thread ID * @return The thread's ID */ - u32 GetThreadID() const { + u64 GetThreadID() const { return thread_id; } @@ -370,6 +377,12 @@ public: return affinity_mask; } + ThreadActivity GetActivity() const { + return activity; + } + + void SetActivity(ThreadActivity value); + private: explicit Thread(KernelCore& kernel); ~Thread() override; @@ -378,7 +391,7 @@ private: Core::ARM_Interface::ThreadContext context{}; - u32 thread_id = 0; + u64 thread_id = 0; ThreadStatus status = ThreadStatus::Dormant; @@ -438,18 +451,9 @@ private: TLSMemoryPtr tls_memory = std::make_shared<TLSMemory>(); std::string name; -}; -/** - * Sets up the primary application thread - * @param kernel The kernel instance to create the main thread under. - * @param entry_point The address at which the thread should start execution - * @param priority The priority to give the main thread - * @param owner_process The parent process for the main thread - * @return A shared pointer to the main thread - */ -SharedPtr<Thread> SetupMainThread(KernelCore& kernel, VAddr entry_point, u32 priority, - Process& owner_process); + ThreadActivity activity = ThreadActivity::Normal; +}; /** * Gets the current thread diff --git a/src/core/hle/kernel/timer.cpp b/src/core/hle/kernel/timer.cpp index 6957b16e0..2c4f50e2b 100644 --- a/src/core/hle/kernel/timer.cpp +++ b/src/core/hle/kernel/timer.cpp @@ -68,9 +68,6 @@ void Timer::Clear() { void Timer::WakeupAllWaitingThreads() { WaitObject::WakeupAllWaitingThreads(); - - if (reset_type == ResetType::Pulse) - signaled = false; } void Timer::Signal(int cycles_late) { diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 100f8f6bf..10ad94aa6 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp @@ -25,19 +25,19 @@ static const char* GetMemoryStateName(MemoryState state) { "CodeMutable", "Heap", "Shared", "Unknown1", "ModuleCodeStatic", "ModuleCodeMutable", - "IpcBuffer0", "Mapped", + "IpcBuffer0", "Stack", "ThreadLocal", "TransferMemoryIsolated", "TransferMemory", "ProcessMemory", - "Unknown2", "IpcBuffer1", + "Inaccessible", "IpcBuffer1", "IpcBuffer3", "KernelStack", }; - return names[static_cast<int>(state)]; + return names[ToSvcMemoryState(state)]; } bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const { ASSERT(base + size == next.base); - if (permissions != next.permissions || meminfo_state != next.meminfo_state || + if (permissions != next.permissions || state != next.state || attribute != next.attribute || type != next.type) { return false; } @@ -87,6 +87,10 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const { } } +bool VMManager::IsValidHandle(VMAHandle handle) const { + return handle != vma_map.cend(); +} + ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block, std::size_t offset, u64 size, @@ -111,7 +115,7 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, final_vma.type = VMAType::AllocatedMemoryBlock; final_vma.permissions = VMAPermission::ReadWrite; - final_vma.meminfo_state = state; + final_vma.state = state; final_vma.backing_block = std::move(block); final_vma.offset = offset; UpdatePageTableForVMA(final_vma); @@ -136,7 +140,7 @@ ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* me final_vma.type = VMAType::BackingMemory; final_vma.permissions = VMAPermission::ReadWrite; - final_vma.meminfo_state = state; + final_vma.state = state; final_vma.backing_memory = memory; UpdatePageTableForVMA(final_vma); @@ -173,7 +177,7 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u6 final_vma.type = VMAType::MMIO; final_vma.permissions = VMAPermission::ReadWrite; - final_vma.meminfo_state = state; + final_vma.state = state; final_vma.paddr = paddr; final_vma.mmio_handler = std::move(mmio_handler); UpdatePageTableForVMA(final_vma); @@ -185,7 +189,8 @@ VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) { VirtualMemoryArea& vma = vma_handle->second; vma.type = VMAType::Free; vma.permissions = VMAPermission::None; - vma.meminfo_state = MemoryState::Unmapped; + vma.state = MemoryState::Unmapped; + vma.attribute = MemoryAttribute::None; vma.backing_block = nullptr; vma.offset = 0; @@ -298,6 +303,54 @@ ResultCode VMManager::HeapFree(VAddr target, u64 size) { return RESULT_SUCCESS; } +MemoryInfo VMManager::QueryMemory(VAddr address) const { + const auto vma = FindVMA(address); + MemoryInfo memory_info{}; + + if (IsValidHandle(vma)) { + memory_info.base_address = vma->second.base; + memory_info.attributes = ToSvcMemoryAttribute(vma->second.attribute); + memory_info.permission = static_cast<u32>(vma->second.permissions); + memory_info.size = vma->second.size; + memory_info.state = ToSvcMemoryState(vma->second.state); + } else { + memory_info.base_address = address_space_end; + memory_info.permission = static_cast<u32>(VMAPermission::None); + memory_info.size = 0 - address_space_end; + memory_info.state = static_cast<u32>(MemoryState::Inaccessible); + } + + return memory_info; +} + +ResultCode VMManager::SetMemoryAttribute(VAddr address, u64 size, MemoryAttribute mask, + MemoryAttribute attribute) { + constexpr auto ignore_mask = MemoryAttribute::Uncached | MemoryAttribute::DeviceMapped; + constexpr auto attribute_mask = ~ignore_mask; + + const auto result = CheckRangeState( + address, size, MemoryState::FlagUncached, MemoryState::FlagUncached, VMAPermission::None, + VMAPermission::None, attribute_mask, MemoryAttribute::None, ignore_mask); + + if (result.Failed()) { + return result.Code(); + } + + const auto [prev_state, prev_permissions, prev_attributes] = *result; + const auto new_attribute = (prev_attributes & ~mask) | (mask & attribute); + + const auto carve_result = CarveVMARange(address, size); + if (carve_result.Failed()) { + return carve_result.Code(); + } + + auto vma_iter = *carve_result; + vma_iter->second.attribute = new_attribute; + + MergeAdjacent(vma_iter); + return RESULT_SUCCESS; +} + ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state) { const auto vma = FindVMA(src_addr); @@ -341,7 +394,7 @@ void VMManager::LogLayout() const { (u8)vma.permissions & (u8)VMAPermission::Read ? 'R' : '-', (u8)vma.permissions & (u8)VMAPermission::Write ? 'W' : '-', (u8)vma.permissions & (u8)VMAPermission::Execute ? 'X' : '-', - GetMemoryStateName(vma.meminfo_state)); + GetMemoryStateName(vma.state)); } } @@ -568,6 +621,66 @@ void VMManager::ClearPageTable() { Memory::PageType::Unmapped); } +VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask, + MemoryState state, VMAPermission permission_mask, + VMAPermission permissions, + MemoryAttribute attribute_mask, + MemoryAttribute attribute, + MemoryAttribute ignore_mask) const { + auto iter = FindVMA(address); + + // If we don't have a valid VMA handle at this point, then it means this is + // being called with an address outside of the address space, which is definitely + // indicative of a bug, as this function only operates on mapped memory regions. + DEBUG_ASSERT(IsValidHandle(iter)); + + const VAddr end_address = address + size - 1; + const MemoryAttribute initial_attributes = iter->second.attribute; + const VMAPermission initial_permissions = iter->second.permissions; + const MemoryState initial_state = iter->second.state; + + while (true) { + // The iterator should be valid throughout the traversal. Hitting the end of + // the mapped VMA regions is unquestionably indicative of a bug. + DEBUG_ASSERT(IsValidHandle(iter)); + + const auto& vma = iter->second; + + if (vma.state != initial_state) { + return ERR_INVALID_ADDRESS_STATE; + } + + if ((vma.state & state_mask) != state) { + return ERR_INVALID_ADDRESS_STATE; + } + + if (vma.permissions != initial_permissions) { + return ERR_INVALID_ADDRESS_STATE; + } + + if ((vma.permissions & permission_mask) != permissions) { + return ERR_INVALID_ADDRESS_STATE; + } + + if ((vma.attribute | ignore_mask) != (initial_attributes | ignore_mask)) { + return ERR_INVALID_ADDRESS_STATE; + } + + if ((vma.attribute & attribute_mask) != attribute) { + return ERR_INVALID_ADDRESS_STATE; + } + + if (end_address <= vma.EndAddress()) { + break; + } + + ++iter; + } + + return MakeResult( + std::make_tuple(initial_state, initial_permissions, initial_attributes & ~ignore_mask)); +} + u64 VMManager::GetTotalMemoryUsage() const { LOG_WARNING(Kernel, "(STUBBED) called"); return 0xF8000000; diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index d522404fe..6091533bc 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h @@ -6,6 +6,7 @@ #include <map> #include <memory> +#include <tuple> #include <vector> #include "common/common_types.h" #include "core/hle/result.h" @@ -43,26 +44,211 @@ enum class VMAPermission : u8 { ReadWriteExecute = Read | Write | Execute, }; -/// Set of values returned in MemoryInfo.state by svcQueryMemory. +constexpr VMAPermission operator|(VMAPermission lhs, VMAPermission rhs) { + return static_cast<VMAPermission>(u32(lhs) | u32(rhs)); +} + +constexpr VMAPermission operator&(VMAPermission lhs, VMAPermission rhs) { + return static_cast<VMAPermission>(u32(lhs) & u32(rhs)); +} + +constexpr VMAPermission operator^(VMAPermission lhs, VMAPermission rhs) { + return static_cast<VMAPermission>(u32(lhs) ^ u32(rhs)); +} + +constexpr VMAPermission operator~(VMAPermission permission) { + return static_cast<VMAPermission>(~u32(permission)); +} + +constexpr VMAPermission& operator|=(VMAPermission& lhs, VMAPermission rhs) { + lhs = lhs | rhs; + return lhs; +} + +constexpr VMAPermission& operator&=(VMAPermission& lhs, VMAPermission rhs) { + lhs = lhs & rhs; + return lhs; +} + +constexpr VMAPermission& operator^=(VMAPermission& lhs, VMAPermission rhs) { + lhs = lhs ^ rhs; + return lhs; +} + +/// Attribute flags that can be applied to a VMA +enum class MemoryAttribute : u32 { + Mask = 0xFF, + + /// No particular qualities + None = 0, + /// Memory locked/borrowed for use. e.g. This would be used by transfer memory. + Locked = 1, + /// Memory locked for use by IPC-related internals. + LockedForIPC = 2, + /// Mapped as part of the device address space. + DeviceMapped = 4, + /// Uncached memory + Uncached = 8, +}; + +constexpr MemoryAttribute operator|(MemoryAttribute lhs, MemoryAttribute rhs) { + return static_cast<MemoryAttribute>(u32(lhs) | u32(rhs)); +} + +constexpr MemoryAttribute operator&(MemoryAttribute lhs, MemoryAttribute rhs) { + return static_cast<MemoryAttribute>(u32(lhs) & u32(rhs)); +} + +constexpr MemoryAttribute operator^(MemoryAttribute lhs, MemoryAttribute rhs) { + return static_cast<MemoryAttribute>(u32(lhs) ^ u32(rhs)); +} + +constexpr MemoryAttribute operator~(MemoryAttribute attribute) { + return static_cast<MemoryAttribute>(~u32(attribute)); +} + +constexpr MemoryAttribute& operator|=(MemoryAttribute& lhs, MemoryAttribute rhs) { + lhs = lhs | rhs; + return lhs; +} + +constexpr MemoryAttribute& operator&=(MemoryAttribute& lhs, MemoryAttribute rhs) { + lhs = lhs & rhs; + return lhs; +} + +constexpr MemoryAttribute& operator^=(MemoryAttribute& lhs, MemoryAttribute rhs) { + lhs = lhs ^ rhs; + return lhs; +} + +constexpr u32 ToSvcMemoryAttribute(MemoryAttribute attribute) { + return static_cast<u32>(attribute & MemoryAttribute::Mask); +} + +// clang-format off +/// Represents memory states and any relevant flags, as used by the kernel. +/// svcQueryMemory interprets these by masking away all but the first eight +/// bits when storing memory state into a MemoryInfo instance. enum class MemoryState : u32 { - Unmapped = 0x0, - Io = 0x1, - Normal = 0x2, - CodeStatic = 0x3, - CodeMutable = 0x4, - Heap = 0x5, - Shared = 0x6, - ModuleCodeStatic = 0x8, - ModuleCodeMutable = 0x9, - IpcBuffer0 = 0xA, - Mapped = 0xB, - ThreadLocal = 0xC, - TransferMemoryIsolated = 0xD, - TransferMemory = 0xE, - ProcessMemory = 0xF, - IpcBuffer1 = 0x11, - IpcBuffer3 = 0x12, - KernelStack = 0x13, + Mask = 0xFF, + FlagProtect = 1U << 8, + FlagDebug = 1U << 9, + FlagIPC0 = 1U << 10, + FlagIPC3 = 1U << 11, + FlagIPC1 = 1U << 12, + FlagMapped = 1U << 13, + FlagCode = 1U << 14, + FlagAlias = 1U << 15, + FlagModule = 1U << 16, + FlagTransfer = 1U << 17, + FlagQueryPhysicalAddressAllowed = 1U << 18, + FlagSharedDevice = 1U << 19, + FlagSharedDeviceAligned = 1U << 20, + FlagIPCBuffer = 1U << 21, + FlagMemoryPoolAllocated = 1U << 22, + FlagMapProcess = 1U << 23, + FlagUncached = 1U << 24, + FlagCodeMemory = 1U << 25, + + // Convenience flag sets to reduce repetition + IPCFlags = FlagIPC0 | FlagIPC3 | FlagIPC1, + + CodeFlags = FlagDebug | IPCFlags | FlagMapped | FlagCode | FlagQueryPhysicalAddressAllowed | + FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated, + + DataFlags = FlagProtect | IPCFlags | FlagMapped | FlagAlias | FlagTransfer | + FlagQueryPhysicalAddressAllowed | FlagSharedDevice | FlagSharedDeviceAligned | + FlagMemoryPoolAllocated | FlagIPCBuffer | FlagUncached, + + Unmapped = 0x00, + Io = 0x01 | FlagMapped, + Normal = 0x02 | FlagMapped | FlagQueryPhysicalAddressAllowed, + CodeStatic = 0x03 | CodeFlags | FlagMapProcess, + CodeMutable = 0x04 | CodeFlags | FlagMapProcess | FlagCodeMemory, + Heap = 0x05 | DataFlags | FlagCodeMemory, + Shared = 0x06 | FlagMapped | FlagMemoryPoolAllocated, + ModuleCodeStatic = 0x08 | CodeFlags | FlagModule | FlagMapProcess, + ModuleCodeMutable = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory, + + IpcBuffer0 = 0x0A | FlagMapped | FlagQueryPhysicalAddressAllowed | FlagMemoryPoolAllocated | + IPCFlags | FlagSharedDevice | FlagSharedDeviceAligned, + + Stack = 0x0B | FlagMapped | IPCFlags | FlagQueryPhysicalAddressAllowed | + FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated, + + ThreadLocal = 0x0C | FlagMapped | FlagMemoryPoolAllocated, + + TransferMemoryIsolated = 0x0D | IPCFlags | FlagMapped | FlagQueryPhysicalAddressAllowed | + FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated | + FlagUncached, + + TransferMemory = 0x0E | FlagIPC3 | FlagIPC1 | FlagMapped | FlagQueryPhysicalAddressAllowed | + FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated, + + ProcessMemory = 0x0F | FlagIPC3 | FlagIPC1 | FlagMapped | FlagMemoryPoolAllocated, + + // Used to signify an inaccessible or invalid memory region with memory queries + Inaccessible = 0x10, + + IpcBuffer1 = 0x11 | FlagIPC3 | FlagIPC1 | FlagMapped | FlagQueryPhysicalAddressAllowed | + FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated, + + IpcBuffer3 = 0x12 | FlagIPC3 | FlagMapped | FlagQueryPhysicalAddressAllowed | + FlagSharedDeviceAligned | FlagMemoryPoolAllocated, + + KernelStack = 0x13 | FlagMapped, +}; +// clang-format on + +constexpr MemoryState operator|(MemoryState lhs, MemoryState rhs) { + return static_cast<MemoryState>(u32(lhs) | u32(rhs)); +} + +constexpr MemoryState operator&(MemoryState lhs, MemoryState rhs) { + return static_cast<MemoryState>(u32(lhs) & u32(rhs)); +} + +constexpr MemoryState operator^(MemoryState lhs, MemoryState rhs) { + return static_cast<MemoryState>(u32(lhs) ^ u32(rhs)); +} + +constexpr MemoryState operator~(MemoryState lhs) { + return static_cast<MemoryState>(~u32(lhs)); +} + +constexpr MemoryState& operator|=(MemoryState& lhs, MemoryState rhs) { + lhs = lhs | rhs; + return lhs; +} + +constexpr MemoryState& operator&=(MemoryState& lhs, MemoryState rhs) { + lhs = lhs & rhs; + return lhs; +} + +constexpr MemoryState& operator^=(MemoryState& lhs, MemoryState rhs) { + lhs = lhs ^ rhs; + return lhs; +} + +constexpr u32 ToSvcMemoryState(MemoryState state) { + return static_cast<u32>(state & MemoryState::Mask); +} + +struct MemoryInfo { + u64 base_address; + u64 size; + u32 state; + u32 attributes; + u32 permission; + u32 ipc_ref_count; + u32 device_ref_count; +}; +static_assert(sizeof(MemoryInfo) == 0x28, "MemoryInfo has incorrect size."); + +struct PageInfo { + u32 flags; }; /** @@ -71,6 +257,16 @@ enum class MemoryState : u32 { * also backed by a single host memory allocation. */ struct VirtualMemoryArea { + /// Gets the starting (base) address of this VMA. + VAddr StartAddress() const { + return base; + } + + /// Gets the ending address of this VMA. + VAddr EndAddress() const { + return base + size - 1; + } + /// Virtual base address of the region. VAddr base = 0; /// Size of the region. @@ -78,8 +274,8 @@ struct VirtualMemoryArea { VMAType type = VMAType::Free; VMAPermission permissions = VMAPermission::None; - /// Tag returned by svcQueryMemory. Not otherwise used. - MemoryState meminfo_state = MemoryState::Unmapped; + MemoryState state = MemoryState::Unmapped; + MemoryAttribute attribute = MemoryAttribute::None; // Settings for type = AllocatedMemoryBlock /// Memory block backing this VMA. @@ -113,16 +309,10 @@ struct VirtualMemoryArea { * - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/ */ class VMManager final { + using VMAMap = std::map<VAddr, VirtualMemoryArea>; + public: - /** - * A map covering the entirety of the managed address space, keyed by the `base` field of each - * VMA. It must always be modified by splitting or merging VMAs, so that the invariant - * `elem.base + elem.size == next.base` is preserved, and mergeable regions must always be - * merged when possible so that no two similar and adjacent regions exist that have not been - * merged. - */ - std::map<VAddr, VirtualMemoryArea> vma_map; - using VMAHandle = decltype(vma_map)::const_iterator; + using VMAHandle = VMAMap::const_iterator; VMManager(); ~VMManager(); @@ -133,6 +323,9 @@ public: /// Finds the VMA in which the given address is included in, or `vma_map.end()`. VMAHandle FindVMA(VAddr target) const; + /// Indicates whether or not the given handle is within the VMA map. + bool IsValidHandle(VMAHandle handle) const; + // TODO(yuriks): Should these functions actually return the handle? /** @@ -189,8 +382,28 @@ public: ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms); ResultCode HeapFree(VAddr target, u64 size); - ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, - MemoryState state = MemoryState::Mapped); + ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state); + + /// Queries the memory manager for information about the given address. + /// + /// @param address The address to query the memory manager about for information. + /// + /// @return A MemoryInfo instance containing information about the given address. + /// + MemoryInfo QueryMemory(VAddr address) const; + + /// Sets an attribute across the given address range. + /// + /// @param address The starting address + /// @param size The size of the range to set the attribute on. + /// @param mask The attribute mask + /// @param attribute The attribute to set across the given address range + /// + /// @returns RESULT_SUCCESS if successful + /// @returns ERR_INVALID_ADDRESS_STATE if the attribute could not be set. + /// + ResultCode SetMemoryAttribute(VAddr address, u64 size, MemoryAttribute mask, + MemoryAttribute attribute); /** * Scans all VMAs and updates the page table range of any that use the given vector as backing @@ -281,7 +494,7 @@ public: Memory::PageTable page_table; private: - using VMAIter = decltype(vma_map)::iterator; + using VMAIter = VMAMap::iterator; /// Converts a VMAHandle to a mutable VMAIter. VMAIter StripIterConstness(const VMAHandle& iter); @@ -328,6 +541,44 @@ private: /// Clears out the page table void ClearPageTable(); + using CheckResults = ResultVal<std::tuple<MemoryState, VMAPermission, MemoryAttribute>>; + + /// Checks if an address range adheres to the specified states provided. + /// + /// @param address The starting address of the address range. + /// @param size The size of the address range. + /// @param state_mask The memory state mask. + /// @param state The state to compare the individual VMA states against, + /// which is done in the form of: (vma.state & state_mask) != state. + /// @param permission_mask The memory permissions mask. + /// @param permissions The permission to compare the individual VMA permissions against, + /// which is done in the form of: + /// (vma.permission & permission_mask) != permission. + /// @param attribute_mask The memory attribute mask. + /// @param attribute The memory attributes to compare the individual VMA attributes + /// against, which is done in the form of: + /// (vma.attributes & attribute_mask) != attribute. + /// @param ignore_mask The memory attributes to ignore during the check. + /// + /// @returns If successful, returns a tuple containing the memory attributes + /// (with ignored bits specified by ignore_mask unset), memory permissions, and + /// memory state across the memory range. + /// @returns If not successful, returns ERR_INVALID_ADDRESS_STATE. + /// + CheckResults CheckRangeState(VAddr address, u64 size, MemoryState state_mask, MemoryState state, + VMAPermission permission_mask, VMAPermission permissions, + MemoryAttribute attribute_mask, MemoryAttribute attribute, + MemoryAttribute ignore_mask) const; + + /** + * A map covering the entirety of the managed address space, keyed by the `base` field of each + * VMA. It must always be modified by splitting or merging VMAs, so that the invariant + * `elem.base + elem.size == next.base` is preserved, and mergeable regions must always be + * merged when possible so that no two similar and adjacent regions exist that have not been + * merged. + */ + VMAMap vma_map; + u32 address_space_width = 0; VAddr address_space_base = 0; VAddr address_space_end = 0; diff --git a/src/core/hle/kernel/wait_object.cpp b/src/core/hle/kernel/wait_object.cpp index 530ee6af7..90580ed93 100644 --- a/src/core/hle/kernel/wait_object.cpp +++ b/src/core/hle/kernel/wait_object.cpp @@ -4,11 +4,11 @@ #include <algorithm> #include "common/assert.h" +#include "common/common_types.h" #include "common/logging/log.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/thread.h" -#include "core/hle/kernel/timer.h" namespace Kernel { diff --git a/src/core/hle/kernel/wait_object.h b/src/core/hle/kernel/wait_object.h index f4367ee28..d70b67893 100644 --- a/src/core/hle/kernel/wait_object.h +++ b/src/core/hle/kernel/wait_object.h @@ -6,7 +6,6 @@ #include <vector> #include <boost/smart_ptr/intrusive_ptr.hpp> -#include "common/common_types.h" #include "core/hle/kernel/object.h" namespace Kernel { diff --git a/src/core/hle/kernel/writable_event.cpp b/src/core/hle/kernel/writable_event.cpp new file mode 100644 index 000000000..a58ea6ec8 --- /dev/null +++ b/src/core/hle/kernel/writable_event.cpp @@ -0,0 +1,52 @@ +// Copyright 2014 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> +#include "common/assert.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/object.h" +#include "core/hle/kernel/readable_event.h" +#include "core/hle/kernel/thread.h" +#include "core/hle/kernel/writable_event.h" + +namespace Kernel { + +WritableEvent::WritableEvent(KernelCore& kernel) : Object{kernel} {} +WritableEvent::~WritableEvent() = default; + +EventPair WritableEvent::CreateEventPair(KernelCore& kernel, ResetType reset_type, + std::string name) { + SharedPtr<WritableEvent> writable_event(new WritableEvent(kernel)); + SharedPtr<ReadableEvent> readable_event(new ReadableEvent(kernel)); + + writable_event->name = name + ":Writable"; + writable_event->readable = readable_event; + readable_event->name = name + ":Readable"; + readable_event->signaled = false; + readable_event->reset_type = reset_type; + + return {std::move(readable_event), std::move(writable_event)}; +} + +SharedPtr<ReadableEvent> WritableEvent::GetReadableEvent() const { + return readable; +} + +ResetType WritableEvent::GetResetType() const { + return readable->reset_type; +} + +void WritableEvent::Signal() { + readable->Signal(); +} + +void WritableEvent::Clear() { + readable->Clear(); +} + +bool WritableEvent::IsSignaled() const { + return readable->signaled; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/writable_event.h b/src/core/hle/kernel/writable_event.h new file mode 100644 index 000000000..c9068dd3d --- /dev/null +++ b/src/core/hle/kernel/writable_event.h @@ -0,0 +1,61 @@ +// Copyright 2014 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "core/hle/kernel/object.h" + +namespace Kernel { + +class KernelCore; +class ReadableEvent; +class WritableEvent; + +struct EventPair { + SharedPtr<ReadableEvent> readable; + SharedPtr<WritableEvent> writable; +}; + +class WritableEvent final : public Object { +public: + ~WritableEvent() override; + + /** + * Creates an event + * @param kernel The kernel instance to create this event under. + * @param reset_type ResetType describing how to create event + * @param name Optional name of event + */ + static EventPair CreateEventPair(KernelCore& kernel, ResetType reset_type, + std::string name = "Unknown"); + + std::string GetTypeName() const override { + return "WritableEvent"; + } + std::string GetName() const override { + return name; + } + + static const HandleType HANDLE_TYPE = HandleType::WritableEvent; + HandleType GetHandleType() const override { + return HANDLE_TYPE; + } + + SharedPtr<ReadableEvent> GetReadableEvent() const; + + ResetType GetResetType() const; + + void Signal(); + void Clear(); + bool IsSignaled() const; + +private: + explicit WritableEvent(KernelCore& kernel); + + SharedPtr<ReadableEvent> readable; + + std::string name; ///< Name of event (optional) +}; + +} // namespace Kernel |
