diff options
Diffstat (limited to 'src/core/hle')
33 files changed, 831 insertions, 342 deletions
diff --git a/src/core/hle/ipc.h b/src/core/hle/ipc.h index 455d1f346..fae54bcc7 100644 --- a/src/core/hle/ipc.h +++ b/src/core/hle/ipc.h @@ -39,10 +39,10 @@ struct CommandHeader { union { u32_le raw_low; BitField<0, 16, CommandType> type; - BitField<16, 4, u32_le> num_buf_x_descriptors; - BitField<20, 4, u32_le> num_buf_a_descriptors; - BitField<24, 4, u32_le> num_buf_b_descriptors; - BitField<28, 4, u32_le> num_buf_w_descriptors; + BitField<16, 4, u32> num_buf_x_descriptors; + BitField<20, 4, u32> num_buf_a_descriptors; + BitField<24, 4, u32> num_buf_b_descriptors; + BitField<28, 4, u32> num_buf_w_descriptors; }; enum class BufferDescriptorCFlag : u32 { @@ -53,28 +53,28 @@ struct CommandHeader { union { u32_le raw_high; - BitField<0, 10, u32_le> data_size; + BitField<0, 10, u32> data_size; BitField<10, 4, BufferDescriptorCFlag> buf_c_descriptor_flags; - BitField<31, 1, u32_le> enable_handle_descriptor; + BitField<31, 1, u32> enable_handle_descriptor; }; }; static_assert(sizeof(CommandHeader) == 8, "CommandHeader size is incorrect"); union HandleDescriptorHeader { u32_le raw_high; - BitField<0, 1, u32_le> send_current_pid; - BitField<1, 4, u32_le> num_handles_to_copy; - BitField<5, 4, u32_le> num_handles_to_move; + BitField<0, 1, u32> send_current_pid; + BitField<1, 4, u32> num_handles_to_copy; + BitField<5, 4, u32> num_handles_to_move; }; static_assert(sizeof(HandleDescriptorHeader) == 4, "HandleDescriptorHeader size is incorrect"); struct BufferDescriptorX { union { - BitField<0, 6, u32_le> counter_bits_0_5; - BitField<6, 3, u32_le> address_bits_36_38; - BitField<9, 3, u32_le> counter_bits_9_11; - BitField<12, 4, u32_le> address_bits_32_35; - BitField<16, 16, u32_le> size; + BitField<0, 6, u32> counter_bits_0_5; + BitField<6, 3, u32> address_bits_36_38; + BitField<9, 3, u32> counter_bits_9_11; + BitField<12, 4, u32> address_bits_32_35; + BitField<16, 16, u32> size; }; u32_le address_bits_0_31; @@ -103,10 +103,10 @@ struct BufferDescriptorABW { u32_le address_bits_0_31; union { - BitField<0, 2, u32_le> flags; - BitField<2, 3, u32_le> address_bits_36_38; - BitField<24, 4, u32_le> size_bits_32_35; - BitField<28, 4, u32_le> address_bits_32_35; + BitField<0, 2, u32> flags; + BitField<2, 3, u32> address_bits_36_38; + BitField<24, 4, u32> size_bits_32_35; + BitField<28, 4, u32> address_bits_32_35; }; VAddr Address() const { @@ -128,8 +128,8 @@ struct BufferDescriptorC { u32_le address_bits_0_31; union { - BitField<0, 16, u32_le> address_bits_32_47; - BitField<16, 16, u32_le> size; + BitField<0, 16, u32> address_bits_32_47; + BitField<16, 16, u32> size; }; VAddr Address() const { @@ -167,8 +167,8 @@ struct DomainMessageHeader { struct { union { BitField<0, 8, CommandType> command; - BitField<8, 8, u32_le> input_object_count; - BitField<16, 16, u32_le> size; + BitField<8, 8, u32> input_object_count; + BitField<16, 16, u32> size; }; u32_le object_id; INSERT_PADDING_WORDS(2); diff --git a/src/core/hle/kernel/code_set.cpp b/src/core/hle/kernel/code_set.cpp new file mode 100644 index 000000000..1f434e9af --- /dev/null +++ b/src/core/hle/kernel/code_set.cpp @@ -0,0 +1,12 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/code_set.h" + +namespace Kernel { + +CodeSet::CodeSet() = default; +CodeSet::~CodeSet() = default; + +} // namespace Kernel diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h new file mode 100644 index 000000000..834fd23d2 --- /dev/null +++ b/src/core/hle/kernel/code_set.h @@ -0,0 +1,90 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <cstddef> +#include <memory> +#include <vector> + +#include "common/common_types.h" + +namespace Kernel { + +/** + * Represents executable data that may be loaded into a kernel process. + * + * A code set consists of three basic segments: + * - A code (AKA text) segment, + * - A read-only data segment (rodata) + * - A data segment + * + * The code segment is the portion of the object file that contains + * executable instructions. + * + * The read-only data segment in the portion of the object file that + * contains (as one would expect) read-only data, such as fixed constant + * values and data structures. + * + * The data segment is similar to the read-only data segment -- it contains + * variables and data structures that have predefined values, however, + * entities within this segment can be modified. + */ +struct CodeSet final { + /// A single segment within a code set. + struct Segment final { + /// The byte offset that this segment is located at. + std::size_t offset = 0; + + /// The address to map this segment to. + VAddr addr = 0; + + /// The size of this segment in bytes. + u32 size = 0; + }; + + explicit CodeSet(); + ~CodeSet(); + + CodeSet(const CodeSet&) = delete; + CodeSet& operator=(const CodeSet&) = delete; + + CodeSet(CodeSet&&) = default; + CodeSet& operator=(CodeSet&&) = default; + + Segment& CodeSegment() { + return segments[0]; + } + + const Segment& CodeSegment() const { + return segments[0]; + } + + Segment& RODataSegment() { + return segments[1]; + } + + const Segment& RODataSegment() const { + return segments[1]; + } + + Segment& DataSegment() { + return segments[2]; + } + + const Segment& DataSegment() const { + return segments[2]; + } + + /// The overall data that backs this code set. + std::shared_ptr<std::vector<u8>> memory; + + /// The segments that comprise this code set. + std::array<Segment, 3> segments; + + /// The entry point address for this code set. + VAddr entrypoint = 0; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 8de62d073..3b73be67b 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -29,7 +29,7 @@ namespace Kernel { * @param thread_handle The handle of the thread that's been awoken * @param cycles_late The number of CPU cycles that have passed since the desired wakeup time */ -static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_late) { +static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_late) { const auto proper_handle = static_cast<Handle>(thread_handle); const auto& system = Core::System::GetInstance(); diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index ff17ff865..03ea5b659 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -8,9 +8,6 @@ #include <unordered_map> #include "core/hle/kernel/object.h" -template <typename T> -class ResultVal; - namespace Core { class System; } diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp index 0743670ad..98e87313b 100644 --- a/src/core/hle/kernel/mutex.cpp +++ b/src/core/hle/kernel/mutex.cpp @@ -2,7 +2,6 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include <map> #include <utility> #include <vector> @@ -10,8 +9,11 @@ #include "core/core.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/mutex.h" #include "core/hle/kernel/object.h" +#include "core/hle/kernel/process.h" +#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" #include "core/hle/result.h" #include "core/memory.h" @@ -57,41 +59,47 @@ static void TransferMutexOwnership(VAddr mutex_addr, SharedPtr<Thread> current_t } } -ResultCode Mutex::TryAcquire(HandleTable& handle_table, VAddr address, Handle holding_thread_handle, +Mutex::Mutex(Core::System& system) : system{system} {} +Mutex::~Mutex() = default; + +ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle, Handle requesting_thread_handle) { // The mutex address must be 4-byte aligned if ((address % sizeof(u32)) != 0) { return ERR_INVALID_ADDRESS; } + const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); + Thread* const current_thread = system.CurrentScheduler().GetCurrentThread(); SharedPtr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle); SharedPtr<Thread> requesting_thread = handle_table.Get<Thread>(requesting_thread_handle); // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of another // thread. - ASSERT(requesting_thread == GetCurrentThread()); + ASSERT(requesting_thread == current_thread); - u32 addr_value = Memory::Read32(address); + const u32 addr_value = Memory::Read32(address); // If the mutex isn't being held, just return success. if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) { return RESULT_SUCCESS; } - if (holding_thread == nullptr) + if (holding_thread == nullptr) { return ERR_INVALID_HANDLE; + } // Wait until the mutex is released - GetCurrentThread()->SetMutexWaitAddress(address); - GetCurrentThread()->SetWaitHandle(requesting_thread_handle); + current_thread->SetMutexWaitAddress(address); + current_thread->SetWaitHandle(requesting_thread_handle); - GetCurrentThread()->SetStatus(ThreadStatus::WaitMutex); - GetCurrentThread()->InvalidateWakeupCallback(); + current_thread->SetStatus(ThreadStatus::WaitMutex); + current_thread->InvalidateWakeupCallback(); // Update the lock holder thread's priority to prevent priority inversion. - holding_thread->AddMutexWaiter(GetCurrentThread()); + holding_thread->AddMutexWaiter(current_thread); - Core::System::GetInstance().PrepareReschedule(); + system.PrepareReschedule(); return RESULT_SUCCESS; } @@ -102,7 +110,8 @@ ResultCode Mutex::Release(VAddr address) { return ERR_INVALID_ADDRESS; } - auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(GetCurrentThread(), address); + auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); + auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(current_thread, address); // There are no more threads waiting for the mutex, release it completely. if (thread == nullptr) { @@ -111,7 +120,7 @@ ResultCode Mutex::Release(VAddr address) { } // Transfer the ownership of the mutex from the previous owner to the new one. - TransferMutexOwnership(address, GetCurrentThread(), thread); + TransferMutexOwnership(address, current_thread, thread); u32 mutex_value = thread->GetWaitHandle(); diff --git a/src/core/hle/kernel/mutex.h b/src/core/hle/kernel/mutex.h index 81e62d497..b904de2e8 100644 --- a/src/core/hle/kernel/mutex.h +++ b/src/core/hle/kernel/mutex.h @@ -5,32 +5,34 @@ #pragma once #include "common/common_types.h" -#include "core/hle/kernel/object.h" union ResultCode; -namespace Kernel { +namespace Core { +class System; +} -class HandleTable; -class Thread; +namespace Kernel { class Mutex final { public: + explicit Mutex(Core::System& system); + ~Mutex(); + /// Flag that indicates that a mutex still has threads waiting for it. static constexpr u32 MutexHasWaitersFlag = 0x40000000; /// Mask of the bits in a mutex address value that contain the mutex owner. static constexpr u32 MutexOwnerMask = 0xBFFFFFFF; /// Attempts to acquire a mutex at the specified address. - static ResultCode TryAcquire(HandleTable& handle_table, VAddr address, - Handle holding_thread_handle, Handle requesting_thread_handle); + ResultCode TryAcquire(VAddr address, Handle holding_thread_handle, + Handle requesting_thread_handle); /// Releases the mutex at the specified address. - static ResultCode Release(VAddr address); + ResultCode Release(VAddr address); private: - Mutex() = default; - ~Mutex() = default; + Core::System& system; }; } // namespace Kernel diff --git a/src/core/hle/kernel/object.cpp b/src/core/hle/kernel/object.cpp index 8870463d0..217144efc 100644 --- a/src/core/hle/kernel/object.cpp +++ b/src/core/hle/kernel/object.cpp @@ -23,6 +23,7 @@ bool Object::IsWaitable() const { case HandleType::Unknown: case HandleType::WritableEvent: case HandleType::SharedMemory: + case HandleType::TransferMemory: case HandleType::AddressArbiter: case HandleType::ResourceLimit: case HandleType::ClientPort: diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h index 4c2505908..3f6baa094 100644 --- a/src/core/hle/kernel/object.h +++ b/src/core/hle/kernel/object.h @@ -22,6 +22,7 @@ enum class HandleType : u32 { WritableEvent, ReadableEvent, SharedMemory, + TransferMemory, Thread, Process, AddressArbiter, diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index 65c51003d..0d782e4ba 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -9,6 +9,7 @@ #include "common/logging/log.h" #include "core/core.h" #include "core/file_sys/program_metadata.h" +#include "core/hle/kernel/code_set.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/process.h" @@ -50,9 +51,6 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_poi } } // Anonymous namespace -CodeSet::CodeSet() = default; -CodeSet::~CodeSet() = default; - SharedPtr<Process> Process::Create(Core::System& system, std::string&& name) { auto& kernel = system.Kernel(); @@ -212,7 +210,7 @@ void Process::FreeTLSSlot(VAddr tls_address) { } void Process::LoadModule(CodeSet module_, VAddr base_addr) { - const auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, + const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions, MemoryState memory_state) { const auto vma = vm_manager .MapMemoryBlock(segment.addr + base_addr, module_.memory, @@ -222,16 +220,17 @@ void Process::LoadModule(CodeSet module_, VAddr base_addr) { }; // Map CodeSet segments - MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::CodeStatic); - MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeMutable); - MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeMutable); + MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code); + MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData); + MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData); // Clear instruction cache in CPU JIT system.InvalidateCpuInstructionCaches(); } Process::Process(Core::System& system) - : WaitObject{system.Kernel()}, address_arbiter{system}, system{system} {} + : WaitObject{system.Kernel()}, address_arbiter{system}, mutex{system}, system{system} {} + Process::~Process() = default; void Process::Acquire(Thread* thread) { diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 47ffd4ad3..a0217d3d8 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -7,13 +7,13 @@ #include <array> #include <bitset> #include <cstddef> -#include <memory> #include <string> #include <vector> #include <boost/container/static_vector.hpp> #include "common/common_types.h" #include "core/hle/kernel/address_arbiter.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/mutex.h" #include "core/hle/kernel/process_capability.h" #include "core/hle/kernel/vm_manager.h" #include "core/hle/kernel/wait_object.h" @@ -33,13 +33,7 @@ class KernelCore; class ResourceLimit; class Thread; -struct AddressMapping { - // Address and size must be page-aligned - VAddr address; - u64 size; - bool read_only; - bool unk_flag; -}; +struct CodeSet; enum class MemoryRegion : u16 { APPLICATION = 1, @@ -65,46 +59,6 @@ enum class ProcessStatus { DebugBreak, }; -struct CodeSet final { - struct Segment { - std::size_t offset = 0; - VAddr addr = 0; - u32 size = 0; - }; - - explicit CodeSet(); - ~CodeSet(); - - Segment& CodeSegment() { - return segments[0]; - } - - const Segment& CodeSegment() const { - return segments[0]; - } - - Segment& RODataSegment() { - return segments[1]; - } - - const Segment& RODataSegment() const { - return segments[1]; - } - - Segment& DataSegment() { - return segments[2]; - } - - const Segment& DataSegment() const { - return segments[2]; - } - - std::shared_ptr<std::vector<u8>> memory; - - std::array<Segment, 3> segments; - VAddr entrypoint = 0; -}; - class Process final : public WaitObject { public: enum : u64 { @@ -165,6 +119,16 @@ public: return address_arbiter; } + /// Gets a reference to the process' mutex lock. + Mutex& GetMutex() { + return mutex; + } + + /// Gets a const reference to the process' mutex lock + const Mutex& GetMutex() const { + return mutex; + } + /// Gets the current status of the process ProcessStatus GetStatus() const { return status; @@ -327,6 +291,11 @@ private: /// Per-process address arbiter. AddressArbiter address_arbiter; + /// The per-process mutex lock instance used for handling various + /// forms of services, such as lock arbitration, and condition + /// variable related facilities. + Mutex mutex; + /// Random values for svcGetInfo RandomEntropy std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy; diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index cc189cc64..6d0f13ecf 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -30,7 +30,7 @@ Scheduler::~Scheduler() { bool Scheduler::HaveReadyThreads() const { std::lock_guard<std::mutex> lock(scheduler_mutex); - return ready_queue.get_first() != nullptr; + return !ready_queue.empty(); } Thread* Scheduler::GetCurrentThread() const { @@ -46,22 +46,27 @@ Thread* Scheduler::PopNextReadyThread() { Thread* thread = GetCurrentThread(); if (thread && thread->GetStatus() == ThreadStatus::Running) { + if (ready_queue.empty()) { + return thread; + } // We have to do better than the current thread. // This call returns null when that's not possible. - next = ready_queue.pop_first_better(thread->GetPriority()); - if (!next) { - // Otherwise just keep going with the current thread + next = ready_queue.front(); + if (next == nullptr || next->GetPriority() >= thread->GetPriority()) { next = thread; } } else { - next = ready_queue.pop_first(); + if (ready_queue.empty()) { + return nullptr; + } + next = ready_queue.front(); } return next; } void Scheduler::SwitchContext(Thread* new_thread) { - Thread* const previous_thread = GetCurrentThread(); + Thread* previous_thread = GetCurrentThread(); Process* const previous_process = system.Kernel().CurrentProcess(); UpdateLastContextSwitchTime(previous_thread, previous_process); @@ -75,7 +80,7 @@ void Scheduler::SwitchContext(Thread* new_thread) { if (previous_thread->GetStatus() == ThreadStatus::Running) { // This is only the case when a reschedule is triggered without the current thread // yielding execution (i.e. an event triggered, system core time-sliced, etc) - ready_queue.push_front(previous_thread->GetPriority(), previous_thread); + ready_queue.add(previous_thread, previous_thread->GetPriority(), false); previous_thread->SetStatus(ThreadStatus::Ready); } } @@ -90,7 +95,7 @@ void Scheduler::SwitchContext(Thread* new_thread) { current_thread = new_thread; - ready_queue.remove(new_thread->GetPriority(), new_thread); + ready_queue.remove(new_thread, new_thread->GetPriority()); new_thread->SetStatus(ThreadStatus::Running); auto* const thread_owner_process = current_thread->GetOwnerProcess(); @@ -147,7 +152,6 @@ void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) { std::lock_guard<std::mutex> lock(scheduler_mutex); thread_list.push_back(std::move(thread)); - ready_queue.prepare(priority); } void Scheduler::RemoveThread(Thread* thread) { @@ -161,33 +165,37 @@ void Scheduler::ScheduleThread(Thread* thread, u32 priority) { std::lock_guard<std::mutex> lock(scheduler_mutex); ASSERT(thread->GetStatus() == ThreadStatus::Ready); - ready_queue.push_back(priority, thread); + ready_queue.add(thread, priority); } void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { std::lock_guard<std::mutex> lock(scheduler_mutex); ASSERT(thread->GetStatus() == ThreadStatus::Ready); - ready_queue.remove(priority, thread); + ready_queue.remove(thread, priority); } void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { std::lock_guard<std::mutex> lock(scheduler_mutex); + if (thread->GetPriority() == priority) { + return; + } // If thread was ready, adjust queues if (thread->GetStatus() == ThreadStatus::Ready) - ready_queue.move(thread, thread->GetPriority(), priority); - else - ready_queue.prepare(priority); + ready_queue.adjust(thread, thread->GetPriority(), priority); } Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const { std::lock_guard<std::mutex> lock(scheduler_mutex); const u32 mask = 1U << core; - return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) { - return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority; - }); + for (auto* thread : ready_queue) { + if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) { + return thread; + } + } + return nullptr; } void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index 1c5bf57d9..44baeb713 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h @@ -7,7 +7,7 @@ #include <mutex> #include <vector> #include "common/common_types.h" -#include "common/thread_queue_list.h" +#include "common/multi_level_queue.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/thread.h" @@ -156,7 +156,7 @@ private: std::vector<SharedPtr<Thread>> thread_list; /// Lists only ready thread ids. - Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST + 1> ready_queue; + Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue; SharedPtr<Thread> current_thread = nullptr; diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index d40a2226b..11796e5e5 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -32,6 +32,7 @@ #include "core/hle/kernel/svc.h" #include "core/hle/kernel/svc_wrap.h" #include "core/hle/kernel/thread.h" +#include "core/hle/kernel/transfer_memory.h" #include "core/hle/kernel/writable_event.h" #include "core/hle/lock.h" #include "core/hle/result.h" @@ -174,11 +175,8 @@ static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) { return ERR_INVALID_SIZE; } - auto& vm_manager = Core::CurrentProcess()->VMManager(); - const VAddr heap_base = vm_manager.GetHeapRegionBaseAddress(); - const auto alloc_result = - vm_manager.HeapAllocate(heap_base, heap_size, VMAPermission::ReadWrite); - + auto& vm_manager = Core::System::GetInstance().Kernel().CurrentProcess()->VMManager(); + const auto alloc_result = vm_manager.SetHeapSize(heap_size); if (alloc_result.Failed()) { return alloc_result.Code(); } @@ -551,9 +549,9 @@ static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr, return ERR_INVALID_ADDRESS; } - auto& handle_table = Core::CurrentProcess()->GetHandleTable(); - return Mutex::TryAcquire(handle_table, mutex_addr, holding_thread_handle, - requesting_thread_handle); + auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess(); + return current_process->GetMutex().TryAcquire(mutex_addr, holding_thread_handle, + requesting_thread_handle); } /// Unlock a mutex @@ -571,7 +569,8 @@ static ResultCode ArbitrateUnlock(VAddr mutex_addr) { return ERR_INVALID_ADDRESS; } - return Mutex::Release(mutex_addr); + auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess(); + return current_process->GetMutex().Release(mutex_addr); } enum class BreakType : u32 { @@ -807,7 +806,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) return RESULT_SUCCESS; case GetInfoType::TotalHeapUsage: - *result = process->VMManager().GetTotalHeapUsage(); + *result = process->VMManager().GetCurrentHeapSize(); return RESULT_SUCCESS; case GetInfoType::IsVirtualAddressMemoryEnabled: @@ -1340,11 +1339,15 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var "called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}", mutex_addr, condition_variable_addr, thread_handle, nano_seconds); - const auto& handle_table = Core::CurrentProcess()->GetHandleTable(); + auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess(); + const auto& handle_table = current_process->GetHandleTable(); SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle); ASSERT(thread); - CASCADE_CODE(Mutex::Release(mutex_addr)); + const auto release_result = current_process->GetMutex().Release(mutex_addr); + if (release_result.IsError()) { + return release_result; + } SharedPtr<Thread> current_thread = GetCurrentThread(); current_thread->SetCondVarWaitAddress(condition_variable_addr); @@ -1582,14 +1585,121 @@ static ResultCode CreateTransferMemory(Handle* handle, VAddr addr, u64 size, u32 } auto& kernel = Core::System::GetInstance().Kernel(); - auto process = kernel.CurrentProcess(); - auto& handle_table = process->GetHandleTable(); - const auto shared_mem_handle = SharedMemory::Create(kernel, process, size, perms, perms, addr); + auto transfer_mem_handle = TransferMemory::Create(kernel, addr, size, perms); - CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle)); + auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); + const auto result = handle_table.Create(std::move(transfer_mem_handle)); + if (result.Failed()) { + return result.Code(); + } + + *handle = *result; return RESULT_SUCCESS; } +static ResultCode MapTransferMemory(Handle handle, VAddr address, u64 size, u32 permission_raw) { + LOG_DEBUG(Kernel_SVC, + "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}, permissions=0x{:08X}", + handle, address, size, permission_raw); + + if (!Common::Is4KBAligned(address)) { + LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).", + address); + return ERR_INVALID_ADDRESS; + } + + if (size == 0 || !Common::Is4KBAligned(size)) { + LOG_ERROR(Kernel_SVC, + "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).", + size); + return ERR_INVALID_SIZE; + } + + if (!IsValidAddressRange(address, size)) { + LOG_ERROR(Kernel_SVC, + "Given address and size overflows the 64-bit range (address=0x{:016X}, " + "size=0x{:016X}).", + address, size); + return ERR_INVALID_ADDRESS_STATE; + } + + const auto permissions = static_cast<MemoryPermission>(permission_raw); + if (permissions != MemoryPermission::None && permissions != MemoryPermission::Read && + permissions != MemoryPermission::ReadWrite) { + LOG_ERROR(Kernel_SVC, "Invalid transfer memory permissions given (permissions=0x{:08X}).", + permission_raw); + return ERR_INVALID_STATE; + } + + const auto& kernel = Core::System::GetInstance().Kernel(); + const auto* const current_process = kernel.CurrentProcess(); + const auto& handle_table = current_process->GetHandleTable(); + + auto transfer_memory = handle_table.Get<TransferMemory>(handle); + if (!transfer_memory) { + LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).", + handle); + return ERR_INVALID_HANDLE; + } + + if (!current_process->VMManager().IsWithinASLRRegion(address, size)) { + LOG_ERROR(Kernel_SVC, + "Given address and size don't fully fit within the ASLR region " + "(address=0x{:016X}, size=0x{:016X}).", + address, size); + return ERR_INVALID_MEMORY_RANGE; + } + + return transfer_memory->MapMemory(address, size, permissions); +} + +static ResultCode UnmapTransferMemory(Handle handle, VAddr address, u64 size) { + LOG_DEBUG(Kernel_SVC, "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}", handle, + address, size); + + if (!Common::Is4KBAligned(address)) { + LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).", + address); + return ERR_INVALID_ADDRESS; + } + + if (size == 0 || !Common::Is4KBAligned(size)) { + LOG_ERROR(Kernel_SVC, + "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).", + size); + return ERR_INVALID_SIZE; + } + + if (!IsValidAddressRange(address, size)) { + LOG_ERROR(Kernel_SVC, + "Given address and size overflows the 64-bit range (address=0x{:016X}, " + "size=0x{:016X}).", + address, size); + return ERR_INVALID_ADDRESS_STATE; + } + + const auto& kernel = Core::System::GetInstance().Kernel(); + const auto* const current_process = kernel.CurrentProcess(); + const auto& handle_table = current_process->GetHandleTable(); + + auto transfer_memory = handle_table.Get<TransferMemory>(handle); + if (!transfer_memory) { + LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).", + handle); + return ERR_INVALID_HANDLE; + } + + if (!current_process->VMManager().IsWithinASLRRegion(address, size)) { + LOG_ERROR(Kernel_SVC, + "Given address and size don't fully fit within the ASLR region " + "(address=0x{:016X}, size=0x{:016X}).", + address, size); + return ERR_INVALID_MEMORY_RANGE; + } + + return transfer_memory->UnmapMemory(address, size); +} + static ResultCode GetThreadCoreMask(Handle thread_handle, u32* core, u64* mask) { LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); @@ -1965,8 +2075,8 @@ static const FunctionDef SVC_Table[] = { {0x4E, nullptr, "ReadWriteRegister"}, {0x4F, nullptr, "SetProcessActivity"}, {0x50, SvcWrap<CreateSharedMemory>, "CreateSharedMemory"}, - {0x51, nullptr, "MapTransferMemory"}, - {0x52, nullptr, "UnmapTransferMemory"}, + {0x51, SvcWrap<MapTransferMemory>, "MapTransferMemory"}, + {0x52, SvcWrap<UnmapTransferMemory>, "UnmapTransferMemory"}, {0x53, nullptr, "CreateInterruptEvent"}, {0x54, nullptr, "QueryPhysicalAddress"}, {0x55, nullptr, "QueryIoMapping"}, diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 79c80bb01..e5853c46f 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -315,8 +315,9 @@ void Thread::UpdatePriority() { } // Ensure that the thread is within the correct location in the waiting list. + auto old_owner = lock_owner; lock_owner->RemoveMutexWaiter(this); - lock_owner->AddMutexWaiter(this); + old_owner->AddMutexWaiter(this); // Recursively update the priority of the thread that depends on the priority of this one. lock_owner->UpdatePriority(); diff --git a/src/core/hle/kernel/transfer_memory.cpp b/src/core/hle/kernel/transfer_memory.cpp new file mode 100644 index 000000000..23228e1b5 --- /dev/null +++ b/src/core/hle/kernel/transfer_memory.cpp @@ -0,0 +1,73 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/errors.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/process.h" +#include "core/hle/kernel/shared_memory.h" +#include "core/hle/kernel/transfer_memory.h" +#include "core/hle/result.h" + +namespace Kernel { + +TransferMemory::TransferMemory(KernelCore& kernel) : Object{kernel} {} +TransferMemory::~TransferMemory() = default; + +SharedPtr<TransferMemory> TransferMemory::Create(KernelCore& kernel, VAddr base_address, + size_t size, MemoryPermission permissions) { + SharedPtr<TransferMemory> transfer_memory{new TransferMemory(kernel)}; + + transfer_memory->base_address = base_address; + transfer_memory->memory_size = size; + transfer_memory->owner_permissions = permissions; + transfer_memory->owner_process = kernel.CurrentProcess(); + + return transfer_memory; +} + +ResultCode TransferMemory::MapMemory(VAddr address, size_t size, MemoryPermission permissions) { + if (memory_size != size) { + return ERR_INVALID_SIZE; + } + + if (owner_permissions != permissions) { + return ERR_INVALID_STATE; + } + + if (is_mapped) { + return ERR_INVALID_STATE; + } + + const auto map_state = owner_permissions == MemoryPermission::None + ? MemoryState::TransferMemoryIsolated + : MemoryState::TransferMemory; + auto& vm_manager = owner_process->VMManager(); + const auto map_result = vm_manager.MapMemoryBlock( + address, std::make_shared<std::vector<u8>>(size), 0, size, map_state); + + if (map_result.Failed()) { + return map_result.Code(); + } + + is_mapped = true; + return RESULT_SUCCESS; +} + +ResultCode TransferMemory::UnmapMemory(VAddr address, size_t size) { + if (memory_size != size) { + return ERR_INVALID_SIZE; + } + + auto& vm_manager = owner_process->VMManager(); + const auto result = vm_manager.UnmapRange(address, size); + + if (result.IsError()) { + return result; + } + + is_mapped = false; + return RESULT_SUCCESS; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/transfer_memory.h b/src/core/hle/kernel/transfer_memory.h new file mode 100644 index 000000000..ec294951e --- /dev/null +++ b/src/core/hle/kernel/transfer_memory.h @@ -0,0 +1,91 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "core/hle/kernel/object.h" + +union ResultCode; + +namespace Kernel { + +class KernelCore; +class Process; + +enum class MemoryPermission : u32; + +/// Defines the interface for transfer memory objects. +/// +/// Transfer memory is typically used for the purpose of +/// transferring memory between separate process instances, +/// thus the name. +/// +class TransferMemory final : public Object { +public: + static constexpr HandleType HANDLE_TYPE = HandleType::TransferMemory; + + static SharedPtr<TransferMemory> Create(KernelCore& kernel, VAddr base_address, size_t size, + MemoryPermission permissions); + + TransferMemory(const TransferMemory&) = delete; + TransferMemory& operator=(const TransferMemory&) = delete; + + TransferMemory(TransferMemory&&) = delete; + TransferMemory& operator=(TransferMemory&&) = delete; + + std::string GetTypeName() const override { + return "TransferMemory"; + } + + std::string GetName() const override { + return GetTypeName(); + } + + HandleType GetHandleType() const override { + return HANDLE_TYPE; + } + + /// Attempts to map transfer memory with the given range and memory permissions. + /// + /// @param address The base address to being mapping memory at. + /// @param size The size of the memory to map, in bytes. + /// @param permissions The memory permissions to check against when mapping memory. + /// + /// @pre The given address, size, and memory permissions must all match + /// the same values that were given when creating the transfer memory + /// instance. + /// + ResultCode MapMemory(VAddr address, size_t size, MemoryPermission permissions); + + /// Unmaps the transfer memory with the given range + /// + /// @param address The base address to begin unmapping memory at. + /// @param size The size of the memory to unmap, in bytes. + /// + /// @pre The given address and size must be the same as the ones used + /// to create the transfer memory instance. + /// + ResultCode UnmapMemory(VAddr address, size_t size); + +private: + explicit TransferMemory(KernelCore& kernel); + ~TransferMemory() override; + + /// The base address for the memory managed by this instance. + VAddr base_address = 0; + + /// Size of the memory, in bytes, that this instance manages. + size_t memory_size = 0; + + /// The memory permissions that are applied to this instance. + MemoryPermission owner_permissions{}; + + /// The process that this transfer memory instance was created under. + Process* owner_process = nullptr; + + /// Whether or not this transfer memory instance has mapped memory. + bool is_mapped = false; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 3def3e52c..ec0a480ce 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp @@ -20,16 +20,16 @@ namespace Kernel { namespace { const char* GetMemoryStateName(MemoryState state) { static constexpr const char* names[] = { - "Unmapped", "Io", - "Normal", "CodeStatic", - "CodeMutable", "Heap", - "Shared", "Unknown1", - "ModuleCodeStatic", "ModuleCodeMutable", - "IpcBuffer0", "Stack", - "ThreadLocal", "TransferMemoryIsolated", - "TransferMemory", "ProcessMemory", - "Inaccessible", "IpcBuffer1", - "IpcBuffer3", "KernelStack", + "Unmapped", "Io", + "Normal", "Code", + "CodeData", "Heap", + "Shared", "Unknown1", + "ModuleCode", "ModuleCodeData", + "IpcBuffer0", "Stack", + "ThreadLocal", "TransferMemoryIsolated", + "TransferMemory", "ProcessMemory", + "Inaccessible", "IpcBuffer1", + "IpcBuffer3", "KernelStack", }; return names[ToSvcMemoryState(state)]; @@ -256,57 +256,50 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p return RESULT_SUCCESS; } -ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission perms) { - if (!IsWithinHeapRegion(target, size)) { - return ERR_INVALID_ADDRESS; +ResultVal<VAddr> VMManager::SetHeapSize(u64 size) { + if (size > GetHeapRegionSize()) { + return ERR_OUT_OF_MEMORY; + } + + // No need to do any additional work if the heap is already the given size. + if (size == GetCurrentHeapSize()) { + return MakeResult(heap_region_base); } if (heap_memory == nullptr) { // Initialize heap - heap_memory = std::make_shared<std::vector<u8>>(); - heap_start = heap_end = target; + heap_memory = std::make_shared<std::vector<u8>>(size); + heap_end = heap_region_base + size; } else { - UnmapRange(heap_start, heap_end - heap_start); - } - - // If necessary, expand backing vector to cover new heap extents. - if (target < heap_start) { - heap_memory->insert(begin(*heap_memory), heap_start - target, 0); - heap_start = target; - RefreshMemoryBlockMappings(heap_memory.get()); - } - if (target + size > heap_end) { - heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0); - heap_end = target + size; - RefreshMemoryBlockMappings(heap_memory.get()); + UnmapRange(heap_region_base, GetCurrentHeapSize()); } - ASSERT(heap_end - heap_start == heap_memory->size()); - CASCADE_RESULT(auto vma, MapMemoryBlock(target, heap_memory, target - heap_start, size, - MemoryState::Heap)); - Reprotect(vma, perms); + // If necessary, expand backing vector to cover new heap extents in + // the case of allocating. Otherwise, shrink the backing memory, + // if a smaller heap has been requested. + const u64 old_heap_size = GetCurrentHeapSize(); + if (size > old_heap_size) { + const u64 alloc_size = size - old_heap_size; - heap_used = size; - - return MakeResult<VAddr>(heap_end - size); -} + heap_memory->insert(heap_memory->end(), alloc_size, 0); + RefreshMemoryBlockMappings(heap_memory.get()); + } else if (size < old_heap_size) { + heap_memory->resize(size); + heap_memory->shrink_to_fit(); -ResultCode VMManager::HeapFree(VAddr target, u64 size) { - if (!IsWithinHeapRegion(target, size)) { - return ERR_INVALID_ADDRESS; + RefreshMemoryBlockMappings(heap_memory.get()); } - if (size == 0) { - return RESULT_SUCCESS; - } + heap_end = heap_region_base + size; + ASSERT(GetCurrentHeapSize() == heap_memory->size()); - const ResultCode result = UnmapRange(target, size); - if (result.IsError()) { - return result; + const auto mapping_result = + MapMemoryBlock(heap_region_base, heap_memory, 0, size, MemoryState::Heap); + if (mapping_result.Failed()) { + return mapping_result.Code(); } - heap_used -= size; - return RESULT_SUCCESS; + return MakeResult<VAddr>(heap_region_base); } MemoryInfo VMManager::QueryMemory(VAddr address) const { @@ -598,6 +591,7 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty heap_region_base = map_region_end; heap_region_end = heap_region_base + heap_region_size; + heap_end = heap_region_base; new_map_region_base = heap_region_end; new_map_region_end = new_map_region_base + new_map_region_size; @@ -692,10 +686,6 @@ u64 VMManager::GetTotalMemoryUsage() const { return 0xF8000000; } -u64 VMManager::GetTotalHeapUsage() const { - return heap_used; -} - VAddr VMManager::GetAddressSpaceBaseAddress() const { return address_space_base; } @@ -778,6 +768,10 @@ u64 VMManager::GetHeapRegionSize() const { return heap_region_end - heap_region_base; } +u64 VMManager::GetCurrentHeapSize() const { + return heap_end - heap_region_base; +} + bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const { return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(), GetHeapRegionEndAddress()); diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index b96980f8f..6f484b7bf 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h @@ -165,12 +165,12 @@ enum class MemoryState : u32 { Unmapped = 0x00, Io = 0x01 | FlagMapped, Normal = 0x02 | FlagMapped | FlagQueryPhysicalAddressAllowed, - CodeStatic = 0x03 | CodeFlags | FlagMapProcess, - CodeMutable = 0x04 | CodeFlags | FlagMapProcess | FlagCodeMemory, + Code = 0x03 | CodeFlags | FlagMapProcess, + CodeData = 0x04 | DataFlags | FlagMapProcess | FlagCodeMemory, Heap = 0x05 | DataFlags | FlagCodeMemory, Shared = 0x06 | FlagMapped | FlagMemoryPoolAllocated, - ModuleCodeStatic = 0x08 | CodeFlags | FlagModule | FlagMapProcess, - ModuleCodeMutable = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory, + ModuleCode = 0x08 | CodeFlags | FlagModule | FlagMapProcess, + ModuleCodeData = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory, IpcBuffer0 = 0x0A | FlagMapped | FlagQueryPhysicalAddressAllowed | FlagMemoryPoolAllocated | IPCFlags | FlagSharedDevice | FlagSharedDeviceAligned, @@ -380,11 +380,41 @@ public: /// Changes the permissions of a range of addresses, splitting VMAs as necessary. ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms); - ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms); - ResultCode HeapFree(VAddr target, u64 size); - ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state); + /// Attempts to allocate a heap with the given size. + /// + /// @param size The size of the heap to allocate in bytes. + /// + /// @note If a heap is currently allocated, and this is called + /// with a size that is equal to the size of the current heap, + /// then this function will do nothing and return the current + /// heap's starting address, as there's no need to perform + /// any additional heap allocation work. + /// + /// @note If a heap is currently allocated, and this is called + /// with a size less than the current heap's size, then + /// this function will attempt to shrink the heap. + /// + /// @note If a heap is currently allocated, and this is called + /// with a size larger than the current heap's size, then + /// this function will attempt to extend the size of the heap. + /// + /// @returns A result indicating either success or failure. + /// <p> + /// If successful, this function will return a result + /// containing the starting address to the allocated heap. + /// <p> + /// If unsuccessful, this function will return a result + /// containing an error code. + /// + /// @pre The given size must lie within the allowable heap + /// memory region managed by this VMManager instance. + /// Failure to abide by this will result in ERR_OUT_OF_MEMORY + /// being returned as the result. + /// + ResultVal<VAddr> SetHeapSize(u64 size); + /// Queries the memory manager for information about the given address. /// /// @param address The address to query the memory manager about for information. @@ -418,9 +448,6 @@ public: /// Gets the total memory usage, used by svcGetInfo u64 GetTotalMemoryUsage() const; - /// Gets the total heap usage, used by svcGetInfo - u64 GetTotalHeapUsage() const; - /// Gets the address space base address VAddr GetAddressSpaceBaseAddress() const; @@ -469,6 +496,13 @@ public: /// Gets the total size of the heap region in bytes. u64 GetHeapRegionSize() const; + /// Gets the total size of the current heap in bytes. + /// + /// @note This is the current allocated heap size, not the size + /// of the region it's allowed to exist within. + /// + u64 GetCurrentHeapSize() const; + /// Determines whether or not the specified range is within the heap region. bool IsWithinHeapRegion(VAddr address, u64 size) const; @@ -625,9 +659,9 @@ private: // This makes deallocation and reallocation of holes fast and keeps process memory contiguous // in the emulator address space, allowing Memory::GetPointer to be reasonably safe. std::shared_ptr<std::vector<u8>> heap_memory; - // The left/right bounds of the address space covered by heap_memory. - VAddr heap_start = 0; + + // The end of the currently allocated heap. This is not an inclusive + // end of the range. This is essentially 'base_address + current_size'. VAddr heap_end = 0; - u64 heap_used = 0; }; } // namespace Kernel diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp index c750d70ac..9c44e27c6 100644 --- a/src/core/hle/service/am/am.cpp +++ b/src/core/hle/service/am/am.cpp @@ -215,7 +215,21 @@ IDisplayController::IDisplayController() : ServiceFramework("IDisplayController" IDisplayController::~IDisplayController() = default; -IDebugFunctions::IDebugFunctions() : ServiceFramework("IDebugFunctions") {} +IDebugFunctions::IDebugFunctions() : ServiceFramework{"IDebugFunctions"} { + // clang-format off + static const FunctionInfo functions[] = { + {0, nullptr, "NotifyMessageToHomeMenuForDebug"}, + {1, nullptr, "OpenMainApplication"}, + {10, nullptr, "EmulateButtonEvent"}, + {20, nullptr, "InvalidateTransitionLayer"}, + {30, nullptr, "RequestLaunchApplicationWithUserAndArgumentForDebug"}, + {40, nullptr, "GetAppletResourceUsageInfo"}, + }; + // clang-format on + + RegisterHandlers(functions); +} + IDebugFunctions::~IDebugFunctions() = default; ISelfController::ISelfController(std::shared_ptr<NVFlinger::NVFlinger> nvflinger) diff --git a/src/core/hle/service/audio/hwopus.cpp b/src/core/hle/service/audio/hwopus.cpp index 377e12cfa..cb4a1160d 100644 --- a/src/core/hle/service/audio/hwopus.cpp +++ b/src/core/hle/service/audio/hwopus.cpp @@ -8,6 +8,7 @@ #include <vector> #include <opus.h> +#include <opus_multistream.h> #include "common/assert.h" #include "common/logging/log.h" @@ -18,12 +19,12 @@ namespace Service::Audio { namespace { struct OpusDeleter { - void operator()(void* ptr) const { - operator delete(ptr); + void operator()(OpusMSDecoder* ptr) const { + opus_multistream_decoder_destroy(ptr); } }; -using OpusDecoderPtr = std::unique_ptr<OpusDecoder, OpusDeleter>; +using OpusDecoderPtr = std::unique_ptr<OpusMSDecoder, OpusDeleter>; struct OpusPacketHeader { // Packet size in bytes. @@ -33,7 +34,7 @@ struct OpusPacketHeader { }; static_assert(sizeof(OpusPacketHeader) == 0x8, "OpusHeader is an invalid size"); -class OpusDecoderStateBase { +class OpusDecoderState { public: /// Describes extra behavior that may be asked of the decoding context. enum class ExtraBehavior { @@ -49,22 +50,13 @@ public: Enabled, }; - virtual ~OpusDecoderStateBase() = default; - - // Decodes interleaved Opus packets. Optionally allows reporting time taken to - // perform the decoding, as well as any relevant extra behavior. - virtual void DecodeInterleaved(Kernel::HLERequestContext& ctx, PerfTime perf_time, - ExtraBehavior extra_behavior) = 0; -}; - -// Represents the decoder state for a non-multistream decoder. -class OpusDecoderState final : public OpusDecoderStateBase { -public: explicit OpusDecoderState(OpusDecoderPtr decoder, u32 sample_rate, u32 channel_count) : decoder{std::move(decoder)}, sample_rate{sample_rate}, channel_count{channel_count} {} + // Decodes interleaved Opus packets. Optionally allows reporting time taken to + // perform the decoding, as well as any relevant extra behavior. void DecodeInterleaved(Kernel::HLERequestContext& ctx, PerfTime perf_time, - ExtraBehavior extra_behavior) override { + ExtraBehavior extra_behavior) { if (perf_time == PerfTime::Disabled) { DecodeInterleavedHelper(ctx, nullptr, extra_behavior); } else { @@ -135,7 +127,7 @@ private: const int frame_size = (static_cast<int>(raw_output_sz / sizeof(s16) / channel_count)); const auto out_sample_count = - opus_decode(decoder.get(), frame, hdr.size, output.data(), frame_size, 0); + opus_multistream_decode(decoder.get(), frame, hdr.size, output.data(), frame_size, 0); if (out_sample_count < 0) { LOG_ERROR(Audio, "Incorrect sample count received from opus_decode, " @@ -158,7 +150,7 @@ private: void ResetDecoderContext() { ASSERT(decoder != nullptr); - opus_decoder_ctl(decoder.get(), OPUS_RESET_STATE); + opus_multistream_decoder_ctl(decoder.get(), OPUS_RESET_STATE); } OpusDecoderPtr decoder; @@ -168,7 +160,7 @@ private: class IHardwareOpusDecoderManager final : public ServiceFramework<IHardwareOpusDecoderManager> { public: - explicit IHardwareOpusDecoderManager(std::unique_ptr<OpusDecoderStateBase> decoder_state) + explicit IHardwareOpusDecoderManager(OpusDecoderState decoder_state) : ServiceFramework("IHardwareOpusDecoderManager"), decoder_state{std::move(decoder_state)} { // clang-format off static const FunctionInfo functions[] = { @@ -190,35 +182,51 @@ private: void DecodeInterleavedOld(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Audio, "called"); - decoder_state->DecodeInterleaved(ctx, OpusDecoderStateBase::PerfTime::Disabled, - OpusDecoderStateBase::ExtraBehavior::None); + decoder_state.DecodeInterleaved(ctx, OpusDecoderState::PerfTime::Disabled, + OpusDecoderState::ExtraBehavior::None); } void DecodeInterleavedWithPerfOld(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Audio, "called"); - decoder_state->DecodeInterleaved(ctx, OpusDecoderStateBase::PerfTime::Enabled, - OpusDecoderStateBase::ExtraBehavior::None); + decoder_state.DecodeInterleaved(ctx, OpusDecoderState::PerfTime::Enabled, + OpusDecoderState::ExtraBehavior::None); } void DecodeInterleaved(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Audio, "called"); IPC::RequestParser rp{ctx}; - const auto extra_behavior = rp.Pop<bool>() - ? OpusDecoderStateBase::ExtraBehavior::ResetContext - : OpusDecoderStateBase::ExtraBehavior::None; + const auto extra_behavior = rp.Pop<bool>() ? OpusDecoderState::ExtraBehavior::ResetContext + : OpusDecoderState::ExtraBehavior::None; - decoder_state->DecodeInterleaved(ctx, OpusDecoderStateBase::PerfTime::Enabled, - extra_behavior); + decoder_state.DecodeInterleaved(ctx, OpusDecoderState::PerfTime::Enabled, extra_behavior); } - std::unique_ptr<OpusDecoderStateBase> decoder_state; + OpusDecoderState decoder_state; }; std::size_t WorkerBufferSize(u32 channel_count) { ASSERT_MSG(channel_count == 1 || channel_count == 2, "Invalid channel count"); - return opus_decoder_get_size(static_cast<int>(channel_count)); + constexpr int num_streams = 1; + const int num_stereo_streams = channel_count == 2 ? 1 : 0; + return opus_multistream_decoder_get_size(num_streams, num_stereo_streams); +} + +// Creates the mapping table that maps the input channels to the particular +// output channels. In the stereo case, we map the left and right input channels +// to the left and right output channels respectively. +// +// However, in the monophonic case, we only map the one available channel +// to the sole output channel. We specify 255 for the would-be right channel +// as this is a special value defined by Opus to indicate to the decoder to +// ignore that channel. +std::array<u8, 2> CreateMappingTable(u32 channel_count) { + if (channel_count == 2) { + return {{0, 1}}; + } + + return {{0, 255}}; } } // Anonymous namespace @@ -259,9 +267,15 @@ void HwOpus::OpenOpusDecoder(Kernel::HLERequestContext& ctx) { const std::size_t worker_sz = WorkerBufferSize(channel_count); ASSERT_MSG(buffer_sz >= worker_sz, "Worker buffer too large"); - OpusDecoderPtr decoder{static_cast<OpusDecoder*>(operator new(worker_sz))}; - if (const int err = opus_decoder_init(decoder.get(), sample_rate, channel_count)) { - LOG_ERROR(Audio, "Failed to init opus decoder with error={}", err); + const int num_stereo_streams = channel_count == 2 ? 1 : 0; + const auto mapping_table = CreateMappingTable(channel_count); + + int error = 0; + OpusDecoderPtr decoder{ + opus_multistream_decoder_create(sample_rate, static_cast<int>(channel_count), 1, + num_stereo_streams, mapping_table.data(), &error)}; + if (error != OPUS_OK || decoder == nullptr) { + LOG_ERROR(Audio, "Failed to create Opus decoder (error={}).", error); IPC::ResponseBuilder rb{ctx, 2}; // TODO(ogniK): Use correct error code rb.Push(ResultCode(-1)); @@ -271,7 +285,7 @@ void HwOpus::OpenOpusDecoder(Kernel::HLERequestContext& ctx) { IPC::ResponseBuilder rb{ctx, 2, 0, 1}; rb.Push(RESULT_SUCCESS); rb.PushIpcInterface<IHardwareOpusDecoderManager>( - std::make_unique<OpusDecoderState>(std::move(decoder), sample_rate, channel_count)); + OpusDecoderState{std::move(decoder), sample_rate, channel_count}); } HwOpus::HwOpus() : ServiceFramework("hwopus") { diff --git a/src/core/hle/service/hid/controllers/debug_pad.h b/src/core/hle/service/hid/controllers/debug_pad.h index 929035034..e584b92ec 100644 --- a/src/core/hle/service/hid/controllers/debug_pad.h +++ b/src/core/hle/service/hid/controllers/debug_pad.h @@ -41,20 +41,20 @@ private: struct PadState { union { u32_le raw{}; - BitField<0, 1, u32_le> a; - BitField<1, 1, u32_le> b; - BitField<2, 1, u32_le> x; - BitField<3, 1, u32_le> y; - BitField<4, 1, u32_le> l; - BitField<5, 1, u32_le> r; - BitField<6, 1, u32_le> zl; - BitField<7, 1, u32_le> zr; - BitField<8, 1, u32_le> plus; - BitField<9, 1, u32_le> minus; - BitField<10, 1, u32_le> d_left; - BitField<11, 1, u32_le> d_up; - BitField<12, 1, u32_le> d_right; - BitField<13, 1, u32_le> d_down; + BitField<0, 1, u32> a; + BitField<1, 1, u32> b; + BitField<2, 1, u32> x; + BitField<3, 1, u32> y; + BitField<4, 1, u32> l; + BitField<5, 1, u32> r; + BitField<6, 1, u32> zl; + BitField<7, 1, u32> zr; + BitField<8, 1, u32> plus; + BitField<9, 1, u32> minus; + BitField<10, 1, u32> d_left; + BitField<11, 1, u32> d_up; + BitField<12, 1, u32> d_right; + BitField<13, 1, u32> d_down; }; }; static_assert(sizeof(PadState) == 0x4, "PadState is an invalid size"); @@ -62,7 +62,7 @@ private: struct Attributes { union { u32_le raw{}; - BitField<0, 1, u32_le> connected; + BitField<0, 1, u32> connected; }; }; static_assert(sizeof(Attributes) == 0x4, "Attributes is an invalid size"); diff --git a/src/core/hle/service/hid/controllers/npad.h b/src/core/hle/service/hid/controllers/npad.h index 18c7a94e6..4ff50b3cd 100644 --- a/src/core/hle/service/hid/controllers/npad.h +++ b/src/core/hle/service/hid/controllers/npad.h @@ -39,13 +39,13 @@ public: union { u32_le raw{}; - BitField<0, 1, u32_le> pro_controller; - BitField<1, 1, u32_le> handheld; - BitField<2, 1, u32_le> joycon_dual; - BitField<3, 1, u32_le> joycon_left; - BitField<4, 1, u32_le> joycon_right; + BitField<0, 1, u32> pro_controller; + BitField<1, 1, u32> handheld; + BitField<2, 1, u32> joycon_dual; + BitField<3, 1, u32> joycon_left; + BitField<4, 1, u32> joycon_right; - BitField<6, 1, u32_le> pokeball; // TODO(ogniK): Confirm when possible + BitField<6, 1, u32> pokeball; // TODO(ogniK): Confirm when possible }; }; static_assert(sizeof(NPadType) == 4, "NPadType is an invalid size"); @@ -150,43 +150,43 @@ private: union { u64_le raw{}; // Button states - BitField<0, 1, u64_le> a; - BitField<1, 1, u64_le> b; - BitField<2, 1, u64_le> x; - BitField<3, 1, u64_le> y; - BitField<4, 1, u64_le> l_stick; - BitField<5, 1, u64_le> r_stick; - BitField<6, 1, u64_le> l; - BitField<7, 1, u64_le> r; - BitField<8, 1, u64_le> zl; - BitField<9, 1, u64_le> zr; - BitField<10, 1, u64_le> plus; - BitField<11, 1, u64_le> minus; + BitField<0, 1, u64> a; + BitField<1, 1, u64> b; + BitField<2, 1, u64> x; + BitField<3, 1, u64> y; + BitField<4, 1, u64> l_stick; + BitField<5, 1, u64> r_stick; + BitField<6, 1, u64> l; + BitField<7, 1, u64> r; + BitField<8, 1, u64> zl; + BitField<9, 1, u64> zr; + BitField<10, 1, u64> plus; + BitField<11, 1, u64> minus; // D-Pad - BitField<12, 1, u64_le> d_left; - BitField<13, 1, u64_le> d_up; - BitField<14, 1, u64_le> d_right; - BitField<15, 1, u64_le> d_down; + BitField<12, 1, u64> d_left; + BitField<13, 1, u64> d_up; + BitField<14, 1, u64> d_right; + BitField<15, 1, u64> d_down; // Left JoyStick - BitField<16, 1, u64_le> l_stick_left; - BitField<17, 1, u64_le> l_stick_up; - BitField<18, 1, u64_le> l_stick_right; - BitField<19, 1, u64_le> l_stick_down; + BitField<16, 1, u64> l_stick_left; + BitField<17, 1, u64> l_stick_up; + BitField<18, 1, u64> l_stick_right; + BitField<19, 1, u64> l_stick_down; // Right JoyStick - BitField<20, 1, u64_le> r_stick_left; - BitField<21, 1, u64_le> r_stick_up; - BitField<22, 1, u64_le> r_stick_right; - BitField<23, 1, u64_le> r_stick_down; + BitField<20, 1, u64> r_stick_left; + BitField<21, 1, u64> r_stick_up; + BitField<22, 1, u64> r_stick_right; + BitField<23, 1, u64> r_stick_down; // Not always active? - BitField<24, 1, u64_le> left_sl; - BitField<25, 1, u64_le> left_sr; + BitField<24, 1, u64> left_sl; + BitField<25, 1, u64> left_sr; - BitField<26, 1, u64_le> right_sl; - BitField<27, 1, u64_le> right_sr; + BitField<26, 1, u64> right_sl; + BitField<27, 1, u64> right_sr; }; }; static_assert(sizeof(ControllerPadState) == 8, "ControllerPadState is an invalid size"); @@ -200,12 +200,12 @@ private: struct ConnectionState { union { u32_le raw{}; - BitField<0, 1, u32_le> IsConnected; - BitField<1, 1, u32_le> IsWired; - BitField<2, 1, u32_le> IsLeftJoyConnected; - BitField<3, 1, u32_le> IsLeftJoyWired; - BitField<4, 1, u32_le> IsRightJoyConnected; - BitField<5, 1, u32_le> IsRightJoyWired; + BitField<0, 1, u32> IsConnected; + BitField<1, 1, u32> IsWired; + BitField<2, 1, u32> IsLeftJoyConnected; + BitField<3, 1, u32> IsLeftJoyWired; + BitField<4, 1, u32> IsRightJoyConnected; + BitField<5, 1, u32> IsRightJoyWired; }; }; static_assert(sizeof(ConnectionState) == 4, "ConnectionState is an invalid size"); @@ -240,23 +240,23 @@ private: struct NPadProperties { union { s64_le raw{}; - BitField<11, 1, s64_le> is_vertical; - BitField<12, 1, s64_le> is_horizontal; - BitField<13, 1, s64_le> use_plus; - BitField<14, 1, s64_le> use_minus; + BitField<11, 1, s64> is_vertical; + BitField<12, 1, s64> is_horizontal; + BitField<13, 1, s64> use_plus; + BitField<14, 1, s64> use_minus; }; }; struct NPadDevice { union { u32_le raw{}; - BitField<0, 1, s32_le> pro_controller; - BitField<1, 1, s32_le> handheld; - BitField<2, 1, s32_le> handheld_left; - BitField<3, 1, s32_le> handheld_right; - BitField<4, 1, s32_le> joycon_left; - BitField<5, 1, s32_le> joycon_right; - BitField<6, 1, s32_le> pokeball; + BitField<0, 1, s32> pro_controller; + BitField<1, 1, s32> handheld; + BitField<2, 1, s32> handheld_left; + BitField<3, 1, s32> handheld_right; + BitField<4, 1, s32> joycon_left; + BitField<5, 1, s32> joycon_right; + BitField<6, 1, s32> pokeball; }; }; diff --git a/src/core/hle/service/hid/controllers/touchscreen.h b/src/core/hle/service/hid/controllers/touchscreen.h index 012b6e0dd..76fc340e9 100644 --- a/src/core/hle/service/hid/controllers/touchscreen.h +++ b/src/core/hle/service/hid/controllers/touchscreen.h @@ -33,8 +33,8 @@ private: struct Attributes { union { u32 raw{}; - BitField<0, 1, u32_le> start_touch; - BitField<1, 1, u32_le> end_touch; + BitField<0, 1, u32> start_touch; + BitField<1, 1, u32> end_touch; }; }; static_assert(sizeof(Attributes) == 0x4, "Attributes is an invalid size"); diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp index 8a6de83a2..63b55758b 100644 --- a/src/core/hle/service/hid/hid.cpp +++ b/src/core/hle/service/hid/hid.cpp @@ -36,9 +36,9 @@ namespace Service::HID { // Updating period for each HID device. // TODO(ogniK): Find actual polling rate of hid -constexpr u64 pad_update_ticks = Core::Timing::BASE_CLOCK_RATE / 66; -constexpr u64 accelerometer_update_ticks = Core::Timing::BASE_CLOCK_RATE / 100; -constexpr u64 gyroscope_update_ticks = Core::Timing::BASE_CLOCK_RATE / 100; +constexpr s64 pad_update_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 66); +constexpr s64 accelerometer_update_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 100); +constexpr s64 gyroscope_update_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 100); constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000; IAppletResource::IAppletResource() : ServiceFramework("IAppletResource") { @@ -75,7 +75,7 @@ IAppletResource::IAppletResource() : ServiceFramework("IAppletResource") { // Register update callbacks auto& core_timing = Core::System::GetInstance().CoreTiming(); pad_update_event = - core_timing.RegisterEvent("HID::UpdatePadCallback", [this](u64 userdata, int cycles_late) { + core_timing.RegisterEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 cycles_late) { UpdateControllers(userdata, cycles_late); }); @@ -106,7 +106,7 @@ void IAppletResource::GetSharedMemoryHandle(Kernel::HLERequestContext& ctx) { rb.PushCopyObjects(shared_mem); } -void IAppletResource::UpdateControllers(u64 userdata, int cycles_late) { +void IAppletResource::UpdateControllers(u64 userdata, s64 cycles_late) { auto& core_timing = Core::System::GetInstance().CoreTiming(); const bool should_reload = Settings::values.is_device_reload_pending.exchange(false); diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h index 7cc58db4c..d3660cad2 100644 --- a/src/core/hle/service/hid/hid.h +++ b/src/core/hle/service/hid/hid.h @@ -4,6 +4,9 @@ #pragma once +#include "core/hle/service/hid/controllers/controller_base.h" +#include "core/hle/service/service.h" + #include "controllers/controller_base.h" #include "core/hle/service/service.h" @@ -62,7 +65,7 @@ private: } void GetSharedMemoryHandle(Kernel::HLERequestContext& ctx); - void UpdateControllers(u64 userdata, int cycles_late); + void UpdateControllers(u64 userdata, s64 cycles_late); Kernel::SharedPtr<Kernel::SharedMemory> shared_mem; diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp index 9df7ac50f..d65693fc7 100644 --- a/src/core/hle/service/ldr/ldr.cpp +++ b/src/core/hle/service/ldr/ldr.cpp @@ -319,15 +319,14 @@ public: } ASSERT(vm_manager - .MirrorMemory(*map_address, nro_addr, nro_size, - Kernel::MemoryState::ModuleCodeStatic) + .MirrorMemory(*map_address, nro_addr, nro_size, Kernel::MemoryState::ModuleCode) .IsSuccess()); ASSERT(vm_manager.UnmapRange(nro_addr, nro_size).IsSuccess()); if (bss_size > 0) { ASSERT(vm_manager .MirrorMemory(*map_address + nro_size, bss_addr, bss_size, - Kernel::MemoryState::ModuleCodeStatic) + Kernel::MemoryState::ModuleCode) .IsSuccess()); ASSERT(vm_manager.UnmapRange(bss_addr, bss_size).IsSuccess()); } @@ -388,8 +387,7 @@ public: const auto& nro_size = iter->second.size; ASSERT(vm_manager - .MirrorMemory(heap_addr, mapped_addr, nro_size, - Kernel::MemoryState::ModuleCodeStatic) + .MirrorMemory(heap_addr, mapped_addr, nro_size, Kernel::MemoryState::ModuleCode) .IsSuccess()); ASSERT(vm_manager.UnmapRange(mapped_addr, nro_size).IsSuccess()); diff --git a/src/core/hle/service/lm/lm.cpp b/src/core/hle/service/lm/lm.cpp index 1f462e087..2a61593e2 100644 --- a/src/core/hle/service/lm/lm.cpp +++ b/src/core/hle/service/lm/lm.cpp @@ -42,7 +42,7 @@ private: union { BitField<0, 16, Flags> flags; BitField<16, 8, Severity> severity; - BitField<24, 8, u32_le> verbosity; + BitField<24, 8, u32> verbosity; }; u32_le payload_size; diff --git a/src/core/hle/service/nvdrv/devices/nvdevice.h b/src/core/hle/service/nvdrv/devices/nvdevice.h index 0f02a1a18..4f6042b00 100644 --- a/src/core/hle/service/nvdrv/devices/nvdevice.h +++ b/src/core/hle/service/nvdrv/devices/nvdevice.h @@ -19,11 +19,11 @@ public: virtual ~nvdevice() = default; union Ioctl { u32_le raw; - BitField<0, 8, u32_le> cmd; - BitField<8, 8, u32_le> group; - BitField<16, 14, u32_le> length; - BitField<30, 1, u32_le> is_in; - BitField<31, 1, u32_le> is_out; + BitField<0, 8, u32> cmd; + BitField<8, 8, u32> group; + BitField<16, 14, u32> length; + BitField<30, 1, u32> is_in; + BitField<31, 1, u32> is_out; }; /** diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index b031ebc66..af62d33d2 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -89,7 +89,7 @@ u32 nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output) for (const auto& entry : entries) { LOG_WARNING(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}", entry.offset, entry.nvmap_handle, entry.pages); - Tegra::GPUVAddr offset = static_cast<Tegra::GPUVAddr>(entry.offset) << 0x10; + GPUVAddr offset = static_cast<GPUVAddr>(entry.offset) << 0x10; auto object = nvmap_dev->GetObject(entry.nvmap_handle); if (!object) { LOG_CRITICAL(Service_NVDRV, "nvmap {} is an invalid handle!", entry.nvmap_handle); @@ -102,7 +102,7 @@ u32 nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output) u64 size = static_cast<u64>(entry.pages) << 0x10; ASSERT(size <= object->size); - Tegra::GPUVAddr returned = gpu.MemoryManager().MapBufferEx(object->addr, offset, size); + GPUVAddr returned = gpu.MemoryManager().MapBufferEx(object->addr, offset, size); ASSERT(returned == offset); } std::memcpy(output.data(), entries.data(), output.size()); @@ -173,16 +173,8 @@ u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& ou return 0; } - auto& system_instance = Core::System::GetInstance(); - - // Remove this memory region from the rasterizer cache. - auto& gpu = system_instance.GPU(); - auto cpu_addr = gpu.MemoryManager().GpuToCpuAddress(params.offset); - ASSERT(cpu_addr); - gpu.FlushAndInvalidateRegion(ToCacheAddr(Memory::GetPointer(*cpu_addr)), itr->second.size); - - params.offset = gpu.MemoryManager().UnmapBuffer(params.offset, itr->second.size); - + params.offset = Core::System::GetInstance().GPU().MemoryManager().UnmapBuffer(params.offset, + itr->second.size); buffer_mappings.erase(itr->second.offset); std::memcpy(output.data(), ¶ms, output.size()); diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index fc496b654..c7f5bbf28 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp @@ -26,7 +26,7 @@ namespace Service::NVFlinger { constexpr std::size_t SCREEN_REFRESH_RATE = 60; -constexpr u64 frame_ticks = static_cast<u64>(Core::Timing::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE); +constexpr s64 frame_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE); NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_timing} { displays.emplace_back(0, "Default"); @@ -37,7 +37,7 @@ NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_t // Schedule the screen composition events composition_event = - core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, int cycles_late) { + core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, s64 cycles_late) { Compose(); this->core_timing.ScheduleEvent(frame_ticks - cycles_late, composition_event); }); diff --git a/src/core/hle/service/set/set_sys.cpp b/src/core/hle/service/set/set_sys.cpp index c9b4da5b0..ecee554bf 100644 --- a/src/core/hle/service/set/set_sys.cpp +++ b/src/core/hle/service/set/set_sys.cpp @@ -2,13 +2,88 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "common/assert.h" #include "common/logging/log.h" +#include "core/file_sys/errors.h" +#include "core/file_sys/system_archive/system_version.h" #include "core/hle/ipc_helpers.h" #include "core/hle/kernel/client_port.h" +#include "core/hle/service/filesystem/filesystem.h" #include "core/hle/service/set/set_sys.h" namespace Service::Set { +namespace { +constexpr u64 SYSTEM_VERSION_FILE_MINOR_REVISION_OFFSET = 0x05; + +enum class GetFirmwareVersionType { + Version1, + Version2, +}; + +void GetFirmwareVersionImpl(Kernel::HLERequestContext& ctx, GetFirmwareVersionType type) { + LOG_WARNING(Service_SET, "called - Using hardcoded firmware version '{}'", + FileSys::SystemArchive::GetLongDisplayVersion()); + + ASSERT_MSG(ctx.GetWriteBufferSize() == 0x100, + "FirmwareVersion output buffer must be 0x100 bytes in size!"); + + // Instead of using the normal procedure of checking for the real system archive and if it + // doesn't exist, synthesizing one, I feel that that would lead to strange bugs because a + // used is using a really old or really new SystemVersion title. The synthesized one ensures + // consistence (currently reports as 5.1.0-0.0) + const auto archive = FileSys::SystemArchive::SystemVersion(); + + const auto early_exit_failure = [&ctx](const std::string& desc, ResultCode code) { + LOG_ERROR(Service_SET, "General failure while attempting to resolve firmware version ({}).", + desc.c_str()); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(code); + }; + + if (archive == nullptr) { + early_exit_failure("The system version archive couldn't be synthesized.", + FileSys::ERROR_FAILED_MOUNT_ARCHIVE); + return; + } + + const auto ver_file = archive->GetFile("file"); + if (ver_file == nullptr) { + early_exit_failure("The system version archive didn't contain the file 'file'.", + FileSys::ERROR_INVALID_ARGUMENT); + return; + } + + auto data = ver_file->ReadAllBytes(); + if (data.size() != 0x100) { + early_exit_failure("The system version file 'file' was not the correct size.", + FileSys::ERROR_OUT_OF_BOUNDS); + return; + } + + // If the command is GetFirmwareVersion (as opposed to GetFirmwareVersion2), hardware will + // zero out the REVISION_MINOR field. + if (type == GetFirmwareVersionType::Version1) { + data[SYSTEM_VERSION_FILE_MINOR_REVISION_OFFSET] = 0; + } + + ctx.WriteBuffer(data); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(RESULT_SUCCESS); +} +} // Anonymous namespace + +void SET_SYS::GetFirmwareVersion(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_SET, "called"); + GetFirmwareVersionImpl(ctx, GetFirmwareVersionType::Version1); +} + +void SET_SYS::GetFirmwareVersion2(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_SET, "called"); + GetFirmwareVersionImpl(ctx, GetFirmwareVersionType::Version2); +} + void SET_SYS::GetColorSetId(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_SET, "called"); @@ -33,8 +108,8 @@ SET_SYS::SET_SYS() : ServiceFramework("set:sys") { {0, nullptr, "SetLanguageCode"}, {1, nullptr, "SetNetworkSettings"}, {2, nullptr, "GetNetworkSettings"}, - {3, nullptr, "GetFirmwareVersion"}, - {4, nullptr, "GetFirmwareVersion2"}, + {3, &SET_SYS::GetFirmwareVersion, "GetFirmwareVersion"}, + {4, &SET_SYS::GetFirmwareVersion2, "GetFirmwareVersion2"}, {5, nullptr, "GetFirmwareVersionDigest"}, {7, nullptr, "GetLockScreenFlag"}, {8, nullptr, "SetLockScreenFlag"}, diff --git a/src/core/hle/service/set/set_sys.h b/src/core/hle/service/set/set_sys.h index f602f3c77..13ee2cf46 100644 --- a/src/core/hle/service/set/set_sys.h +++ b/src/core/hle/service/set/set_sys.h @@ -20,6 +20,8 @@ private: BasicBlack = 1, }; + void GetFirmwareVersion(Kernel::HLERequestContext& ctx); + void GetFirmwareVersion2(Kernel::HLERequestContext& ctx); void GetColorSetId(Kernel::HLERequestContext& ctx); void SetColorSetId(Kernel::HLERequestContext& ctx); |
