From c0d3aef28c0a0c68c18de30228f29e30f0e52533 Mon Sep 17 00:00:00 2001 From: bunnei Date: Wed, 30 Dec 2020 23:01:08 -0800 Subject: core: hle: kernel: Rename Thread to KThread. --- src/core/hle/kernel/k_thread.cpp | 460 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 460 insertions(+) create mode 100644 src/core/hle/kernel/k_thread.cpp (limited to 'src/core/hle/kernel/k_thread.cpp') diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp new file mode 100644 index 000000000..1ec29636c --- /dev/null +++ b/src/core/hle/kernel/k_thread.cpp @@ -0,0 +1,460 @@ +// Copyright 2014 Citra Emulator Project / PPSSPP Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include +#include +#include +#include + +#include "common/assert.h" +#include "common/common_types.h" +#include "common/fiber.h" +#include "common/logging/log.h" +#include "common/thread_queue_list.h" +#include "core/core.h" +#include "core/cpu_manager.h" +#include "core/hardware_properties.h" +#include "core/hle/kernel/errors.h" +#include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_condition_variable.h" +#include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" +#include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/memory/memory_layout.h" +#include "core/hle/kernel/object.h" +#include "core/hle/kernel/process.h" +#include "core/hle/kernel/time_manager.h" +#include "core/hle/result.h" +#include "core/memory.h" + +#ifdef ARCHITECTURE_x86_64 +#include "core/arm/dynarmic/arm_dynarmic_32.h" +#include "core/arm/dynarmic/arm_dynarmic_64.h" +#endif + +namespace Kernel { + +bool KThread::IsSignaled() const { + return signaled; +} + +KThread::KThread(KernelCore& kernel) : KSynchronizationObject{kernel} {} +KThread::~KThread() = default; + +void KThread::Stop() { + { + KScopedSchedulerLock lock(kernel); + SetState(ThreadState::Terminated); + signaled = true; + NotifyAvailable(); + kernel.GlobalHandleTable().Close(global_handle); + + if (owner_process) { + owner_process->UnregisterThread(this); + + // Mark the TLS slot in the thread's page as free. + owner_process->FreeTLSRegion(tls_address); + } + has_exited = true; + } + global_handle = 0; +} + +void KThread::Wakeup() { + KScopedSchedulerLock lock(kernel); + SetState(ThreadState::Runnable); +} + +ResultCode KThread::Start() { + KScopedSchedulerLock lock(kernel); + SetState(ThreadState::Runnable); + return RESULT_SUCCESS; +} + +void KThread::CancelWait() { + KScopedSchedulerLock lock(kernel); + if (GetState() != ThreadState::Waiting || !is_cancellable) { + is_sync_cancelled = true; + return; + } + // TODO(Blinkhawk): Implement cancel of server session + is_sync_cancelled = false; + SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED); + SetState(ThreadState::Runnable); +} + +static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, + u32 entry_point, u32 arg) { + context = {}; + context.cpu_registers[0] = arg; + context.cpu_registers[15] = entry_point; + context.cpu_registers[13] = stack_top; +} + +static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top, + VAddr entry_point, u64 arg) { + context = {}; + context.cpu_registers[0] = arg; + context.pc = entry_point; + context.sp = stack_top; + // TODO(merry): Perform a hardware test to determine the below value. + context.fpcr = 0; +} + +std::shared_ptr& KThread::GetHostContext() { + return host_context; +} + +ResultVal> KThread::Create(Core::System& system, ThreadType type_flags, + std::string name, VAddr entry_point, + u32 priority, u64 arg, s32 processor_id, + VAddr stack_top, Process* owner_process) { + std::function init_func = Core::CpuManager::GetGuestThreadStartFunc(); + void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); + return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top, + owner_process, std::move(init_func), init_func_parameter); +} + +ResultVal> KThread::Create(Core::System& system, ThreadType type_flags, + std::string name, VAddr entry_point, + u32 priority, u64 arg, s32 processor_id, + VAddr stack_top, Process* owner_process, + std::function&& thread_start_func, + void* thread_start_parameter) { + auto& kernel = system.Kernel(); + // Check if priority is in ranged. Lowest priority -> highest priority id. + if (priority > THREADPRIO_LOWEST && ((type_flags & THREADTYPE_IDLE) == 0)) { + LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority); + return ERR_INVALID_THREAD_PRIORITY; + } + + if (processor_id > THREADPROCESSORID_MAX) { + LOG_ERROR(Kernel_SVC, "Invalid processor id: {}", processor_id); + return ERR_INVALID_PROCESSOR_ID; + } + + if (owner_process) { + if (!system.Memory().IsValidVirtualAddress(*owner_process, entry_point)) { + LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point); + // TODO (bunnei): Find the correct error code to use here + return RESULT_UNKNOWN; + } + } + + std::shared_ptr thread = std::make_shared(kernel); + + thread->thread_id = kernel.CreateNewThreadID(); + thread->thread_state = ThreadState::Initialized; + thread->entry_point = entry_point; + thread->stack_top = stack_top; + thread->disable_count = 1; + thread->tpidr_el0 = 0; + thread->current_priority = priority; + thread->base_priority = priority; + thread->lock_owner = nullptr; + thread->schedule_count = -1; + thread->last_scheduled_tick = 0; + thread->processor_id = processor_id; + thread->ideal_core = processor_id; + thread->affinity_mask.SetAffinity(processor_id, true); + thread->name = std::move(name); + thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap(); + thread->owner_process = owner_process; + thread->type = type_flags; + thread->signaled = false; + if ((type_flags & THREADTYPE_IDLE) == 0) { + auto& scheduler = kernel.GlobalSchedulerContext(); + scheduler.AddThread(thread); + } + if (owner_process) { + thread->tls_address = thread->owner_process->CreateTLSRegion(); + thread->owner_process->RegisterThread(thread.get()); + } else { + thread->tls_address = 0; + } + + // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used + // to initialize the context + if ((type_flags & THREADTYPE_HLE) == 0) { + ResetThreadContext32(thread->context_32, static_cast(stack_top), + static_cast(entry_point), static_cast(arg)); + ResetThreadContext64(thread->context_64, stack_top, entry_point, arg); + } + thread->host_context = + std::make_shared(std::move(thread_start_func), thread_start_parameter); + + return MakeResult>(std::move(thread)); +} + +void KThread::SetBasePriority(u32 priority) { + ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, + "Invalid priority value."); + + KScopedSchedulerLock lock(kernel); + + // Change our base priority. + base_priority = priority; + + // Perform a priority restoration. + RestorePriority(kernel, this); +} + +void KThread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) { + signaling_object = object; + signaling_result = result; +} + +VAddr KThread::GetCommandBufferAddress() const { + // Offset from the start of TLS at which the IPC command buffer begins. + constexpr u64 command_header_offset = 0x80; + return GetTLSAddress() + command_header_offset; +} + +void KThread::SetState(ThreadState state) { + KScopedSchedulerLock sl(kernel); + + // Clear debugging state + SetMutexWaitAddressForDebugging({}); + SetWaitReasonForDebugging({}); + + const ThreadState old_state = thread_state; + thread_state = + static_cast((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)); + if (thread_state != old_state) { + KScheduler::OnThreadStateChanged(kernel, this, old_state); + } +} + +void KThread::AddWaiterImpl(KThread* thread) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + // Find the right spot to insert the waiter. + auto it = waiter_list.begin(); + while (it != waiter_list.end()) { + if (it->GetPriority() > thread->GetPriority()) { + break; + } + it++; + } + + // Keep track of how many kernel waiters we have. + if (Memory::IsKernelAddressKey(thread->GetAddressKey())) { + ASSERT((num_kernel_waiters++) >= 0); + } + + // Insert the waiter. + waiter_list.insert(it, *thread); + thread->SetLockOwner(this); +} + +void KThread::RemoveWaiterImpl(KThread* thread) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + // Keep track of how many kernel waiters we have. + if (Memory::IsKernelAddressKey(thread->GetAddressKey())) { + ASSERT((num_kernel_waiters--) > 0); + } + + // Remove the waiter. + waiter_list.erase(waiter_list.iterator_to(*thread)); + thread->SetLockOwner(nullptr); +} + +void KThread::RestorePriority(KernelCore& kernel, KThread* thread) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + while (true) { + // We want to inherit priority where possible. + s32 new_priority = thread->GetBasePriority(); + if (thread->HasWaiters()) { + new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority()); + } + + // If the priority we would inherit is not different from ours, don't do anything. + if (new_priority == thread->GetPriority()) { + return; + } + + // Ensure we don't violate condition variable red black tree invariants. + if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { + BeforeUpdatePriority(kernel, cv_tree, thread); + } + + // Change the priority. + const s32 old_priority = thread->GetPriority(); + thread->SetPriority(new_priority); + + // Restore the condition variable, if relevant. + if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { + AfterUpdatePriority(kernel, cv_tree, thread); + } + + // Update the scheduler. + KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority); + + // Keep the lock owner up to date. + KThread* lock_owner = thread->GetLockOwner(); + if (lock_owner == nullptr) { + return; + } + + // Update the thread in the lock owner's sorted list, and continue inheriting. + lock_owner->RemoveWaiterImpl(thread); + lock_owner->AddWaiterImpl(thread); + thread = lock_owner; + } +} + +void KThread::AddWaiter(KThread* thread) { + AddWaiterImpl(thread); + RestorePriority(kernel, this); +} + +void KThread::RemoveWaiter(KThread* thread) { + RemoveWaiterImpl(thread); + RestorePriority(kernel, this); +} + +KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + s32 num_waiters{}; + KThread* next_lock_owner{}; + auto it = waiter_list.begin(); + while (it != waiter_list.end()) { + if (it->GetAddressKey() == key) { + KThread* thread = std::addressof(*it); + + // Keep track of how many kernel waiters we have. + if (Memory::IsKernelAddressKey(thread->GetAddressKey())) { + ASSERT((num_kernel_waiters--) > 0); + } + it = waiter_list.erase(it); + + // Update the next lock owner. + if (next_lock_owner == nullptr) { + next_lock_owner = thread; + next_lock_owner->SetLockOwner(nullptr); + } else { + next_lock_owner->AddWaiterImpl(thread); + } + num_waiters++; + } else { + it++; + } + } + + // Do priority updates, if we have a next owner. + if (next_lock_owner) { + RestorePriority(kernel, this); + RestorePriority(kernel, next_lock_owner); + } + + // Return output. + *out_num_waiters = num_waiters; + return next_lock_owner; +} + +ResultCode KThread::SetActivity(ThreadActivity value) { + KScopedSchedulerLock lock(kernel); + + auto sched_status = GetState(); + + if (sched_status != ThreadState::Runnable && sched_status != ThreadState::Waiting) { + return ERR_INVALID_STATE; + } + + if (IsTerminationRequested()) { + return RESULT_SUCCESS; + } + + if (value == ThreadActivity::Paused) { + if ((pausing_state & static_cast(ThreadSchedFlags::ThreadPauseFlag)) != 0) { + return ERR_INVALID_STATE; + } + AddSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag); + } else { + if ((pausing_state & static_cast(ThreadSchedFlags::ThreadPauseFlag)) == 0) { + return ERR_INVALID_STATE; + } + RemoveSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag); + } + return RESULT_SUCCESS; +} + +ResultCode KThread::Sleep(s64 nanoseconds) { + Handle event_handle{}; + { + KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); + SetState(ThreadState::Waiting); + SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); + } + + if (event_handle != InvalidHandle) { + auto& time_manager = kernel.TimeManager(); + time_manager.UnscheduleTimeEvent(event_handle); + } + return RESULT_SUCCESS; +} + +void KThread::AddSchedulingFlag(ThreadSchedFlags flag) { + const auto old_state = GetRawState(); + pausing_state |= static_cast(flag); + const auto base_scheduling = GetState(); + thread_state = base_scheduling | static_cast(pausing_state); + KScheduler::OnThreadStateChanged(kernel, this, old_state); +} + +void KThread::RemoveSchedulingFlag(ThreadSchedFlags flag) { + const auto old_state = GetRawState(); + pausing_state &= ~static_cast(flag); + const auto base_scheduling = GetState(); + thread_state = base_scheduling | static_cast(pausing_state); + KScheduler::OnThreadStateChanged(kernel, this, old_state); +} + +ResultCode KThread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { + KScopedSchedulerLock lock(kernel); + const auto HighestSetCore = [](u64 mask, u32 max_cores) { + for (s32 core = static_cast(max_cores - 1); core >= 0; core--) { + if (((mask >> core) & 1) != 0) { + return core; + } + } + return -1; + }; + + const bool use_override = affinity_override_count != 0; + if (new_core == THREADPROCESSORID_DONT_UPDATE) { + new_core = use_override ? ideal_core_override : ideal_core; + if ((new_affinity_mask & (1ULL << new_core)) == 0) { + LOG_ERROR(Kernel, "New affinity mask is incorrect! new_core={}, new_affinity_mask={}", + new_core, new_affinity_mask); + return ERR_INVALID_COMBINATION; + } + } + if (use_override) { + ideal_core_override = new_core; + } else { + const auto old_affinity_mask = affinity_mask; + affinity_mask.SetAffinityMask(new_affinity_mask); + ideal_core = new_core; + if (old_affinity_mask.GetAffinityMask() != new_affinity_mask) { + const s32 old_core = processor_id; + if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) { + if (static_cast(ideal_core) < 0) { + processor_id = HighestSetCore(affinity_mask.GetAffinityMask(), + Core::Hardware::NUM_CPU_CORES); + } else { + processor_id = ideal_core; + } + } + KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_affinity_mask, old_core); + } + } + return RESULT_SUCCESS; +} + +} // namespace Kernel -- cgit v1.2.3 From eea346ba8eed49111d34e2fb1eee8a1ad53c4614 Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 31 Dec 2020 00:46:09 -0800 Subject: hle: kernel: KThread: Remove thread types that do not exist. --- src/core/hle/kernel/k_thread.cpp | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) (limited to 'src/core/hle/kernel/k_thread.cpp') diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 1ec29636c..0f349dad2 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -125,7 +125,7 @@ ResultVal> KThread::Create(Core::System& system, Thread void* thread_start_parameter) { auto& kernel = system.Kernel(); // Check if priority is in ranged. Lowest priority -> highest priority id. - if (priority > THREADPRIO_LOWEST && ((type_flags & THREADTYPE_IDLE) == 0)) { + if (priority > THREADPRIO_LOWEST) { LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority); return ERR_INVALID_THREAD_PRIORITY; } @@ -164,10 +164,10 @@ ResultVal> KThread::Create(Core::System& system, Thread thread->owner_process = owner_process; thread->type = type_flags; thread->signaled = false; - if ((type_flags & THREADTYPE_IDLE) == 0) { - auto& scheduler = kernel.GlobalSchedulerContext(); - scheduler.AddThread(thread); - } + + auto& scheduler = kernel.GlobalSchedulerContext(); + scheduler.AddThread(thread); + if (owner_process) { thread->tls_address = thread->owner_process->CreateTLSRegion(); thread->owner_process->RegisterThread(thread.get()); @@ -175,13 +175,10 @@ ResultVal> KThread::Create(Core::System& system, Thread thread->tls_address = 0; } - // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used - // to initialize the context - if ((type_flags & THREADTYPE_HLE) == 0) { - ResetThreadContext32(thread->context_32, static_cast(stack_top), - static_cast(entry_point), static_cast(arg)); - ResetThreadContext64(thread->context_64, stack_top, entry_point, arg); - } + ResetThreadContext32(thread->context_32, static_cast(stack_top), + static_cast(entry_point), static_cast(arg)); + ResetThreadContext64(thread->context_64, stack_top, entry_point, arg); + thread->host_context = std::make_shared(std::move(thread_start_func), thread_start_parameter); -- cgit v1.2.3 From 1e55498110800623c63e3ef03bfbff6b6de1c522 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 1 Jan 2021 02:06:06 -0800 Subject: hle: kernel: KThread: Reorganize thread priority defaults. --- src/core/hle/kernel/k_thread.cpp | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'src/core/hle/kernel/k_thread.cpp') diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 0f349dad2..518c5d5df 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -8,6 +8,7 @@ #include #include "common/assert.h" +#include "common/common_funcs.h" #include "common/common_types.h" #include "common/fiber.h" #include "common/logging/log.h" @@ -25,6 +26,7 @@ #include "core/hle/kernel/memory/memory_layout.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/process.h" +#include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/time_manager.h" #include "core/hle/result.h" #include "core/memory.h" @@ -124,11 +126,9 @@ ResultVal> KThread::Create(Core::System& system, Thread std::function&& thread_start_func, void* thread_start_parameter) { auto& kernel = system.Kernel(); - // Check if priority is in ranged. Lowest priority -> highest priority id. - if (priority > THREADPRIO_LOWEST) { - LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority); - return ERR_INVALID_THREAD_PRIORITY; - } + + R_UNLESS(Svc::HighestThreadPriority <= priority && priority <= Svc::LowestThreadPriority, + Svc::ResultInvalidPriority); if (processor_id > THREADPROCESSORID_MAX) { LOG_ERROR(Kernel_SVC, "Invalid processor id: {}", processor_id); @@ -186,8 +186,7 @@ ResultVal> KThread::Create(Core::System& system, Thread } void KThread::SetBasePriority(u32 priority) { - ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, - "Invalid priority value."); + ASSERT(Svc::HighestThreadPriority <= priority && priority <= Svc::LowestThreadPriority); KScopedSchedulerLock lock(kernel); -- cgit v1.2.3 From 4dbf3f4880cac69db21cc8f18582814dc986c854 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 3 Jan 2021 01:49:18 -0800 Subject: hle: kernel: KThread: Clean up thread priorities. --- src/core/hle/kernel/k_thread.cpp | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) (limited to 'src/core/hle/kernel/k_thread.cpp') diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 518c5d5df..e5be849bb 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -127,14 +127,6 @@ ResultVal> KThread::Create(Core::System& system, Thread void* thread_start_parameter) { auto& kernel = system.Kernel(); - R_UNLESS(Svc::HighestThreadPriority <= priority && priority <= Svc::LowestThreadPriority, - Svc::ResultInvalidPriority); - - if (processor_id > THREADPROCESSORID_MAX) { - LOG_ERROR(Kernel_SVC, "Invalid processor id: {}", processor_id); - return ERR_INVALID_PROCESSOR_ID; - } - if (owner_process) { if (!system.Memory().IsValidVirtualAddress(*owner_process, entry_point)) { LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point); @@ -423,7 +415,7 @@ ResultCode KThread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) }; const bool use_override = affinity_override_count != 0; - if (new_core == THREADPROCESSORID_DONT_UPDATE) { + if (new_core == Svc::IdealCoreNoUpdate) { new_core = use_override ? ideal_core_override : ideal_core; if ((new_affinity_mask & (1ULL << new_core)) == 0) { LOG_ERROR(Kernel, "New affinity mask is incorrect! new_core={}, new_affinity_mask={}", -- cgit v1.2.3 From cdd14b03e5c8e29bc6cd11bbde0ef726d2f166ce Mon Sep 17 00:00:00 2001 From: bunnei Date: Wed, 20 Jan 2021 13:42:27 -0800 Subject: hle: kernel: Recode implementation of KThread to be more accurate. --- src/core/hle/kernel/k_thread.cpp | 993 +++++++++++++++++++++++++++++++-------- 1 file changed, 796 insertions(+), 197 deletions(-) (limited to 'src/core/hle/kernel/k_thread.cpp') diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index e5be849bb..f021b0550 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -1,4 +1,4 @@ -// Copyright 2014 Citra Emulator Project / PPSSPP Project +// Copyright 2021 yuzu Emulator Project // Licensed under GPLv2 or any later version // Refer to the license.txt file included. @@ -8,10 +8,12 @@ #include #include "common/assert.h" +#include "common/bit_util.h" #include "common/common_funcs.h" #include "common/common_types.h" #include "common/fiber.h" #include "common/logging/log.h" +#include "common/scope_exit.h" #include "common/thread_queue_list.h" #include "core/core.h" #include "core/cpu_manager.h" @@ -22,10 +24,12 @@ #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/memory/memory_layout.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/process.h" +#include "core/hle/kernel/resource_limit.h" #include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/time_manager.h" #include "core/hle/result.h" @@ -36,185 +40,734 @@ #include "core/arm/dynarmic/arm_dynarmic_64.h" #endif -namespace Kernel { +namespace { +static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, + u32 entry_point, u32 arg) { + context = {}; + context.cpu_registers[0] = arg; + context.cpu_registers[15] = entry_point; + context.cpu_registers[13] = stack_top; +} -bool KThread::IsSignaled() const { - return signaled; +static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top, + VAddr entry_point, u64 arg) { + context = {}; + context.cpu_registers[0] = arg; + context.pc = entry_point; + context.sp = stack_top; + // TODO(merry): Perform a hardware test to determine the below value. + context.fpcr = 0; } +} // namespace + +namespace Kernel { -KThread::KThread(KernelCore& kernel) : KSynchronizationObject{kernel} {} +KThread::KThread(KernelCore& kernel) + : KSynchronizationObject{kernel}, activity_pause_lock{kernel} {} KThread::~KThread() = default; -void KThread::Stop() { - { - KScopedSchedulerLock lock(kernel); - SetState(ThreadState::Terminated); - signaled = true; - NotifyAvailable(); - kernel.GlobalHandleTable().Close(global_handle); +ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, + s32 virt_core, Process* owner, ThreadType type) { + // Assert parameters are valid. + ASSERT((type == ThreadType::Main) || + (Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority)); + ASSERT((owner != nullptr) || (type != ThreadType::User)); + ASSERT(0 <= virt_core && virt_core < static_cast(Common::BitSize())); + + // Convert the virtual core to a physical core. + const s32 phys_core = Core::Hardware::VirtualToPhysicalCoreMap[virt_core]; + ASSERT(0 <= phys_core && phys_core < static_cast(Core::Hardware::NUM_CPU_CORES)); + + // First, clear the TLS address. + tls_address = {}; + + // Next, assert things based on the type. + switch (type) { + case ThreadType::Main: + ASSERT(arg == 0); + [[fallthrough]]; + case ThreadType::HighPriority: + [[fallthrough]]; + case ThreadType::User: + ASSERT(((owner == nullptr) || + (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask())); + ASSERT(((owner == nullptr) || + (owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask())); + break; + case ThreadType::Kernel: + UNIMPLEMENTED(); + break; + default: + UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast(type)); + break; + } + + // Set the ideal core ID and affinity mask. + virtual_ideal_core_id = virt_core; + physical_ideal_core_id = phys_core; + virtual_affinity_mask = (static_cast(1) << virt_core); + physical_affinity_mask.SetAffinity(phys_core, true); + + // Set the thread state. + thread_state = (type == ThreadType::Main) ? ThreadState::Runnable : ThreadState::Initialized; + + // Set TLS address. + tls_address = 0; + + // Set parent and condvar tree. + parent = nullptr; + condvar_tree = nullptr; + + // Set sync booleans. + signaled = false; + termination_requested = false; + wait_cancelled = false; + cancellable = false; + + // Set core ID and wait result. + core_id = phys_core; + wait_result = Svc::ResultNoSynchronizationObject; + + // Set priorities. + priority = prio; + base_priority = prio; + + // Set sync object and waiting lock to null. + synced_object = nullptr; + + // Initialize sleeping queue. + sleeping_queue = nullptr; + + // Set suspend flags. + suspend_request_flags = 0; + suspend_allowed_flags = static_cast(ThreadState::SuspendFlagMask); + + // We're neither debug attached, nor are we nesting our priority inheritance. + debug_attached = false; + priority_inheritance_count = 0; + + // We haven't been scheduled, and we have done no light IPC. + schedule_count = -1; + last_scheduled_tick = 0; + light_ipc_data = nullptr; + + // We're not waiting for a lock, and we haven't disabled migration. + lock_owner = nullptr; + num_core_migration_disables = 0; + + // We have no waiters, but we do have an entrypoint. + num_kernel_waiters = 0; + + // Set our current core id. + current_core_id = phys_core; + + // We haven't released our resource limit hint, and we've spent no time on the cpu. + resource_limit_release_hint = false; + cpu_time = 0; + + // Clear our stack parameters. + std::memset(static_cast(std::addressof(GetStackParameters())), 0, + sizeof(StackParameters)); + + // Setup the TLS, if needed. + if (type == ThreadType::User) { + tls_address = owner->CreateTLSRegion(); + } + + // Set parent, if relevant. + if (owner != nullptr) { + parent = owner; + parent->IncrementThreadCount(); + } - if (owner_process) { - owner_process->UnregisterThread(this); + // Initialize thread context. + ResetThreadContext64(thread_context_64, user_stack_top, func, arg); + ResetThreadContext32(thread_context_32, static_cast(user_stack_top), + static_cast(func), static_cast(arg)); - // Mark the TLS slot in the thread's page as free. - owner_process->FreeTLSRegion(tls_address); + // Setup the stack parameters. + StackParameters& sp = GetStackParameters(); + sp.cur_thread = this; + sp.disable_count = 1; + SetInExceptionHandler(); + + // Set thread ID. + thread_id = kernel.CreateNewThreadID(); + + // We initialized! + initialized = true; + + // Register ourselves with our parent process. + if (parent != nullptr) { + parent->RegisterThread(this); + if (parent->IsSuspended()) { + RequestSuspend(SuspendType::Process); } - has_exited = true; } - global_handle = 0; -} -void KThread::Wakeup() { - KScopedSchedulerLock lock(kernel); - SetState(ThreadState::Runnable); + return RESULT_SUCCESS; } -ResultCode KThread::Start() { - KScopedSchedulerLock lock(kernel); - SetState(ThreadState::Runnable); +ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, + VAddr user_stack_top, s32 prio, s32 core, Process* owner, + ThreadType type) { + // Initialize the thread. + R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); + return RESULT_SUCCESS; } -void KThread::CancelWait() { - KScopedSchedulerLock lock(kernel); - if (GetState() != ThreadState::Waiting || !is_cancellable) { - is_sync_cancelled = true; - return; +void KThread::Finalize() { + // If the thread has an owner process, unregister it. + if (parent != nullptr) { + parent->UnregisterThread(this); + } + + // If the thread has a local region, delete it. + if (tls_address != 0) { + parent->FreeTLSRegion(tls_address); + } + + // Release any waiters. + { + ASSERT(lock_owner == nullptr); + KScopedSchedulerLock sl{kernel}; + + auto it = waiter_list.begin(); + while (it != waiter_list.end()) { + // The thread shouldn't be a kernel waiter. + it->SetLockOwner(nullptr); + it->SetSyncedObject(nullptr, Svc::ResultInvalidState); + it->Wakeup(); + it = waiter_list.erase(it); + } + } + + // Decrement the parent process's thread count. + if (parent != nullptr) { + parent->DecrementThreadCount(); } - // TODO(Blinkhawk): Implement cancel of server session - is_sync_cancelled = false; - SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED); - SetState(ThreadState::Runnable); } -static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, - u32 entry_point, u32 arg) { - context = {}; - context.cpu_registers[0] = arg; - context.cpu_registers[15] = entry_point; - context.cpu_registers[13] = stack_top; +bool KThread::IsSignaled() const { + return signaled; } -static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top, - VAddr entry_point, u64 arg) { - context = {}; - context.cpu_registers[0] = arg; - context.pc = entry_point; - context.sp = stack_top; - // TODO(merry): Perform a hardware test to determine the below value. - context.fpcr = 0; +void KThread::Wakeup() { + KScopedSchedulerLock sl{kernel}; + + if (GetState() == ThreadState::Waiting) { + if (sleeping_queue != nullptr) { + sleeping_queue->WakeupThread(this); + } else { + SetState(ThreadState::Runnable); + } + } } -std::shared_ptr& KThread::GetHostContext() { - return host_context; +void KThread::StartTermination() { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + // Release user exception and unpin, if relevant. + if (parent != nullptr) { + parent->ReleaseUserException(this); + if (parent->GetPinnedThread(GetCurrentCoreId(kernel)) == this) { + parent->UnpinCurrentThread(); + } + } + + // Set state to terminated. + SetState(ThreadState::Terminated); + + // Clear the thread's status as running in parent. + if (parent != nullptr) { + parent->ClearRunningThread(this); + } + + // Signal. + signaled = true; + NotifyAvailable(); + + // Clear previous thread in KScheduler. + KScheduler::ClearPreviousThread(kernel, this); + + // Register terminated dpc flag. + RegisterDpc(DpcFlag::Terminated); } -ResultVal> KThread::Create(Core::System& system, ThreadType type_flags, - std::string name, VAddr entry_point, - u32 priority, u64 arg, s32 processor_id, - VAddr stack_top, Process* owner_process) { - std::function init_func = Core::CpuManager::GetGuestThreadStartFunc(); - void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); - return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top, - owner_process, std::move(init_func), init_func_parameter); +void KThread::Pin() { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + // Set ourselves as pinned. + GetStackParameters().is_pinned = true; + + // Disable core migration. + ASSERT(num_core_migration_disables == 0); + { + ++num_core_migration_disables; + + // Save our ideal state to restore when we're unpinned. + original_physical_ideal_core_id = physical_ideal_core_id; + original_physical_affinity_mask = physical_affinity_mask; + + // Bind ourselves to this core. + const s32 active_core = GetActiveCore(); + const s32 current_core = GetCurrentCoreId(kernel); + + SetActiveCore(current_core); + physical_ideal_core_id = current_core; + physical_affinity_mask.SetAffinityMask(1ULL << current_core); + + if (active_core != current_core || physical_affinity_mask.GetAffinityMask() != + original_physical_affinity_mask.GetAffinityMask()) { + KScheduler::OnThreadAffinityMaskChanged(kernel, this, original_physical_affinity_mask, + active_core); + } + } + + // Disallow performing thread suspension. + { + // Update our allow flags. + suspend_allowed_flags &= ~(1 << (static_cast(SuspendType::Thread) + + static_cast(ThreadState::SuspendShift))); + + // Update our state. + const ThreadState old_state = thread_state; + thread_state = static_cast(GetSuspendFlags() | + static_cast(old_state & ThreadState::Mask)); + if (thread_state != old_state) { + KScheduler::OnThreadStateChanged(kernel, this, old_state); + } + } + + // TODO(bunnei): Update our SVC access permissions. + ASSERT(parent != nullptr); } -ResultVal> KThread::Create(Core::System& system, ThreadType type_flags, - std::string name, VAddr entry_point, - u32 priority, u64 arg, s32 processor_id, - VAddr stack_top, Process* owner_process, - std::function&& thread_start_func, - void* thread_start_parameter) { - auto& kernel = system.Kernel(); +void KThread::Unpin() { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + // Set ourselves as unpinned. + GetStackParameters().is_pinned = false; + + // Enable core migration. + ASSERT(num_core_migration_disables == 1); + { + --num_core_migration_disables; + + // Restore our original state. + const KAffinityMask old_mask = physical_affinity_mask; - if (owner_process) { - if (!system.Memory().IsValidVirtualAddress(*owner_process, entry_point)) { - LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point); - // TODO (bunnei): Find the correct error code to use here - return RESULT_UNKNOWN; + physical_ideal_core_id = original_physical_ideal_core_id; + physical_affinity_mask = original_physical_affinity_mask; + + if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { + const s32 active_core = GetActiveCore(); + + if (!physical_affinity_mask.GetAffinity(active_core)) { + if (physical_ideal_core_id >= 0) { + SetActiveCore(physical_ideal_core_id); + } else { + SetActiveCore(static_cast( + Common::BitSize() - 1 - + std::countl_zero(physical_affinity_mask.GetAffinityMask()))); + } + } + KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core); } } - std::shared_ptr thread = std::make_shared(kernel); + // Allow performing thread suspension (if termination hasn't been requested). + { + // Update our allow flags. + if (!IsTerminationRequested()) { + suspend_allowed_flags |= (1 << (static_cast(SuspendType::Thread) + + static_cast(ThreadState::SuspendShift))); + } - thread->thread_id = kernel.CreateNewThreadID(); - thread->thread_state = ThreadState::Initialized; - thread->entry_point = entry_point; - thread->stack_top = stack_top; - thread->disable_count = 1; - thread->tpidr_el0 = 0; - thread->current_priority = priority; - thread->base_priority = priority; - thread->lock_owner = nullptr; - thread->schedule_count = -1; - thread->last_scheduled_tick = 0; - thread->processor_id = processor_id; - thread->ideal_core = processor_id; - thread->affinity_mask.SetAffinity(processor_id, true); - thread->name = std::move(name); - thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap(); - thread->owner_process = owner_process; - thread->type = type_flags; - thread->signaled = false; + // Update our state. + const ThreadState old_state = thread_state; + thread_state = static_cast(GetSuspendFlags() | + static_cast(old_state & ThreadState::Mask)); + if (thread_state != old_state) { + KScheduler::OnThreadStateChanged(kernel, this, old_state); + } + } - auto& scheduler = kernel.GlobalSchedulerContext(); - scheduler.AddThread(thread); + // TODO(bunnei): Update our SVC access permissions. + ASSERT(parent != nullptr); + + // Resume any threads that began waiting on us while we were pinned. + for (auto it = pinned_waiter_list.begin(); it != pinned_waiter_list.end(); ++it) { + if (it->GetState() == ThreadState::Waiting) { + it->SetState(ThreadState::Runnable); + } + } +} + +ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { + KScopedSchedulerLock sl{kernel}; + + // Get the virtual mask. + *out_ideal_core = virtual_ideal_core_id; + *out_affinity_mask = virtual_affinity_mask; + + return RESULT_SUCCESS; +} - if (owner_process) { - thread->tls_address = thread->owner_process->CreateTLSRegion(); - thread->owner_process->RegisterThread(thread.get()); +ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { + KScopedSchedulerLock sl{kernel}; + ASSERT(num_core_migration_disables >= 0); + + // Select between core mask and original core mask. + if (num_core_migration_disables == 0) { + *out_ideal_core = physical_ideal_core_id; + *out_affinity_mask = physical_affinity_mask.GetAffinityMask(); } else { - thread->tls_address = 0; + *out_ideal_core = original_physical_ideal_core_id; + *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask(); } - ResetThreadContext32(thread->context_32, static_cast(stack_top), - static_cast(entry_point), static_cast(arg)); - ResetThreadContext64(thread->context_64, stack_top, entry_point, arg); + return RESULT_SUCCESS; +} - thread->host_context = - std::make_shared(std::move(thread_start_func), thread_start_parameter); +ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) { + ASSERT(parent != nullptr); + ASSERT(v_affinity_mask != 0); + KScopedLightLock lk{activity_pause_lock}; - return MakeResult>(std::move(thread)); + // Set the core mask. + u64 p_affinity_mask = 0; + { + KScopedSchedulerLock sl{kernel}; + ASSERT(num_core_migration_disables >= 0); + + // If the core id is no-update magic, preserve the ideal core id. + if (core_id == Svc::IdealCoreNoUpdate) { + core_id = virtual_ideal_core_id; + R_UNLESS(((1ULL << core_id) & v_affinity_mask) != 0, Svc::ResultInvalidCombination); + } + + // Set the virtual core/affinity mask. + virtual_ideal_core_id = core_id; + virtual_affinity_mask = v_affinity_mask; + + // Translate the virtual core to a physical core. + if (core_id >= 0) { + core_id = Core::Hardware::VirtualToPhysicalCoreMap[core_id]; + } + + // Translate the virtual affinity mask to a physical one. + while (v_affinity_mask != 0) { + const u64 next = std::countr_zero(v_affinity_mask); + v_affinity_mask &= ~(1ULL << next); + p_affinity_mask |= (1ULL << Core::Hardware::VirtualToPhysicalCoreMap[next]); + } + + // If we haven't disabled migration, perform an affinity change. + if (num_core_migration_disables == 0) { + const KAffinityMask old_mask = physical_affinity_mask; + + // Set our new ideals. + physical_ideal_core_id = core_id; + physical_affinity_mask.SetAffinityMask(p_affinity_mask); + + if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { + const s32 active_core = GetActiveCore(); + + if (active_core >= 0 && !physical_affinity_mask.GetAffinity(active_core)) { + const s32 new_core = static_cast( + physical_ideal_core_id >= 0 + ? physical_ideal_core_id + : Common::BitSize() - 1 - + std::countl_zero(physical_affinity_mask.GetAffinityMask())); + SetActiveCore(new_core); + } + KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core); + } + } else { + // Otherwise, we edit the original affinity for restoration later. + original_physical_ideal_core_id = core_id; + original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); + } + } + + // Update the pinned waiter list. + { + bool retry_update = false; + bool thread_is_pinned = false; + do { + // Lock the scheduler. + KScopedSchedulerLock sl{kernel}; + + // Don't do any further management if our termination has been requested. + R_SUCCEED_IF(IsTerminationRequested()); + + // By default, we won't need to retry. + retry_update = false; + + // Check if the thread is currently running. + bool thread_is_current = false; + s32 thread_core; + for (thread_core = 0; thread_core < static_cast(Core::Hardware::NUM_CPU_CORES); + ++thread_core) { + if (kernel.Scheduler(thread_core).GetCurrentThread() == this) { + thread_is_current = true; + break; + } + } + + // If the thread is currently running, check whether it's no longer allowed under the + // new mask. + if (thread_is_current && ((1ULL << thread_core) & p_affinity_mask) == 0) { + // If the thread is pinned, we want to wait until it's not pinned. + if (GetStackParameters().is_pinned) { + // Verify that the current thread isn't terminating. + R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), + Svc::ResultTerminationRequested); + + // Note that the thread was pinned. + thread_is_pinned = true; + + // Wait until the thread isn't pinned any more. + pinned_waiter_list.push_back(GetCurrentThread(kernel)); + GetCurrentThread(kernel).SetState(ThreadState::Waiting); + } else { + // If the thread isn't pinned, release the scheduler lock and retry until it's + // not current. + retry_update = true; + } + } + } while (retry_update); + + // If the thread was pinned, it no longer is, and we should remove the current thread from + // our waiter list. + if (thread_is_pinned) { + // Lock the scheduler. + KScopedSchedulerLock sl{kernel}; + + // Remove from the list. + pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel))); + } + } + + return RESULT_SUCCESS; } -void KThread::SetBasePriority(u32 priority) { - ASSERT(Svc::HighestThreadPriority <= priority && priority <= Svc::LowestThreadPriority); +void KThread::SetBasePriority(s32 value) { + ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority); - KScopedSchedulerLock lock(kernel); + KScopedSchedulerLock sl{kernel}; // Change our base priority. - base_priority = priority; + base_priority = value; // Perform a priority restoration. RestorePriority(kernel, this); } -void KThread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) { - signaling_object = object; - signaling_result = result; -} +void KThread::RequestSuspend(SuspendType type) { + KScopedSchedulerLock sl{kernel}; -VAddr KThread::GetCommandBufferAddress() const { - // Offset from the start of TLS at which the IPC command buffer begins. - constexpr u64 command_header_offset = 0x80; - return GetTLSAddress() + command_header_offset; + // Note the request in our flags. + suspend_request_flags |= + (1u << (static_cast(ThreadState::SuspendShift) + static_cast(type))); + + // Try to perform the suspend. + TrySuspend(); } -void KThread::SetState(ThreadState state) { - KScopedSchedulerLock sl(kernel); +void KThread::Resume(SuspendType type) { + KScopedSchedulerLock sl{kernel}; - // Clear debugging state - SetMutexWaitAddressForDebugging({}); - SetWaitReasonForDebugging({}); + // Clear the request in our flags. + suspend_request_flags &= + ~(1u << (static_cast(ThreadState::SuspendShift) + static_cast(type))); + // Update our state. const ThreadState old_state = thread_state; - thread_state = - static_cast((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)); + thread_state = static_cast(GetSuspendFlags() | + static_cast(old_state & ThreadState::Mask)); if (thread_state != old_state) { KScheduler::OnThreadStateChanged(kernel, this, old_state); } } +void KThread::WaitCancel() { + KScopedSchedulerLock sl{kernel}; + + // Check if we're waiting and cancellable. + if (GetState() == ThreadState::Waiting && cancellable) { + if (sleeping_queue != nullptr) { + sleeping_queue->WakeupThread(this); + wait_cancelled = true; + } else { + SetSyncedObject(nullptr, Svc::ResultCancelled); + SetState(ThreadState::Runnable); + wait_cancelled = false; + } + } else { + // Otherwise, note that we cancelled a wait. + wait_cancelled = true; + } +} + +void KThread::TrySuspend() { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(IsSuspendRequested()); + + // Ensure that we have no waiters. + if (GetNumKernelWaiters() > 0) { + return; + } + ASSERT(GetNumKernelWaiters() == 0); + + // Perform the suspend. + Suspend(); +} + +void KThread::Suspend() { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(IsSuspendRequested()); + + // Set our suspend flags in state. + const auto old_state = thread_state; + thread_state = static_cast(GetSuspendFlags()) | (old_state & ThreadState::Mask); + + // Note the state change in scheduler. + KScheduler::OnThreadStateChanged(kernel, this, old_state); +} + +void KThread::Continue() { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + // Clear our suspend flags in state. + const auto old_state = thread_state; + thread_state = old_state & ThreadState::Mask; + + // Note the state change in scheduler. + KScheduler::OnThreadStateChanged(kernel, this, old_state); +} + +ResultCode KThread::SetActivity(Svc::ThreadActivity activity) { + // Lock ourselves. + KScopedLightLock lk(activity_pause_lock); + + // Set the activity. + { + // Lock the scheduler. + KScopedSchedulerLock sl{kernel}; + + // Verify our state. + const auto cur_state = GetState(); + R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable), + Svc::ResultInvalidState); + + // Either pause or resume. + if (activity == Svc::ThreadActivity::Paused) { + // Verify that we're not suspended. + R_UNLESS(!IsSuspendRequested(SuspendType::Thread), Svc::ResultInvalidState); + + // Suspend. + RequestSuspend(SuspendType::Thread); + } else { + ASSERT(activity == Svc::ThreadActivity::Runnable); + + // Verify that we're suspended. + R_UNLESS(IsSuspendRequested(SuspendType::Thread), Svc::ResultInvalidState); + + // Resume. + Resume(SuspendType::Thread); + } + } + + // If the thread is now paused, update the pinned waiter list. + if (activity == Svc::ThreadActivity::Paused) { + bool thread_is_pinned = false; + bool thread_is_current; + do { + // Lock the scheduler. + KScopedSchedulerLock sl{kernel}; + + // Don't do any further management if our termination has been requested. + R_SUCCEED_IF(IsTerminationRequested()); + + // Check whether the thread is pinned. + if (GetStackParameters().is_pinned) { + // Verify that the current thread isn't terminating. + R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), + Svc::ResultTerminationRequested); + + // Note that the thread was pinned and not current. + thread_is_pinned = true; + thread_is_current = false; + + // Wait until the thread isn't pinned any more. + pinned_waiter_list.push_back(GetCurrentThread(kernel)); + GetCurrentThread(kernel).SetState(ThreadState::Waiting); + } else { + // Check if the thread is currently running. + // If it is, we'll need to retry. + thread_is_current = false; + + for (auto i = 0; i < static_cast(Core::Hardware::NUM_CPU_CORES); ++i) { + if (kernel.Scheduler(i).GetCurrentThread() == this) { + thread_is_current = true; + break; + } + } + } + } while (thread_is_current); + + // If the thread was pinned, it no longer is, and we should remove the current thread from + // our waiter list. + if (thread_is_pinned) { + // Lock the scheduler. + KScopedSchedulerLock sl{kernel}; + + // Remove from the list. + pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel))); + } + } + + return RESULT_SUCCESS; +} + +ResultCode KThread::GetThreadContext3(std::vector& out) { + // Lock ourselves. + KScopedLightLock lk{activity_pause_lock}; + + // Get the context. + { + // Lock the scheduler. + KScopedSchedulerLock sl{kernel}; + + // Verify that we're suspended. + R_UNLESS(IsSuspendRequested(SuspendType::Thread), Svc::ResultInvalidState); + + // If we're not terminating, get the thread's user context. + if (!IsTerminationRequested()) { + if (parent->Is64BitProcess()) { + // Mask away mode bits, interrupt bits, IL bit, and other reserved bits. + auto context = GetContext64(); + context.pstate &= 0xFF0FFE20; + + out.resize(sizeof(context)); + std::memcpy(out.data(), &context, sizeof(context)); + } else { + // Mask away mode bits, interrupt bits, IL bit, and other reserved bits. + auto context = GetContext32(); + context.cpsr &= 0xFF0FFE20; + + out.resize(sizeof(context)); + std::memcpy(out.data(), &context, sizeof(context)); + } + } + } + + return RESULT_SUCCESS; +} + void KThread::AddWaiterImpl(KThread* thread) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); @@ -345,104 +898,150 @@ KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) { return next_lock_owner; } -ResultCode KThread::SetActivity(ThreadActivity value) { - KScopedSchedulerLock lock(kernel); +ResultCode KThread::Run() { + while (true) { + KScopedSchedulerLock lk{kernel}; - auto sched_status = GetState(); + // If either this thread or the current thread are requesting termination, note it. + R_UNLESS(!IsTerminationRequested(), Svc::ResultTerminationRequested); + R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), + Svc::ResultTerminationRequested); - if (sched_status != ThreadState::Runnable && sched_status != ThreadState::Waiting) { - return ERR_INVALID_STATE; - } + // Ensure our thread state is correct. + R_UNLESS(GetState() == ThreadState::Initialized, Svc::ResultInvalidState); - if (IsTerminationRequested()) { + // If the current thread has been asked to suspend, suspend it and retry. + if (GetCurrentThread(kernel).IsSuspended()) { + GetCurrentThread(kernel).Suspend(); + continue; + } + + // If we're not a kernel thread and we've been asked to suspend, suspend ourselves. + if (IsUserThread() && IsSuspended()) { + Suspend(); + } + + // Set our state and finish. + SetState(ThreadState::Runnable); return RESULT_SUCCESS; } +} - if (value == ThreadActivity::Paused) { - if ((pausing_state & static_cast(ThreadSchedFlags::ThreadPauseFlag)) != 0) { - return ERR_INVALID_STATE; - } - AddSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag); - } else { - if ((pausing_state & static_cast(ThreadSchedFlags::ThreadPauseFlag)) == 0) { - return ERR_INVALID_STATE; - } - RemoveSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag); +void KThread::Exit() { + ASSERT(this == GetCurrentThreadPointer(kernel)); + + // Release the thread resource hint from parent. + if (parent != nullptr) { + // TODO(bunnei): Hint that the resource is about to be released. + resource_limit_release_hint = true; + } + + // Perform termination. + { + KScopedSchedulerLock sl{kernel}; + + // Disallow all suspension. + suspend_allowed_flags = 0; + + // Start termination. + StartTermination(); } - return RESULT_SUCCESS; } -ResultCode KThread::Sleep(s64 nanoseconds) { - Handle event_handle{}; +ResultCode KThread::Sleep(s64 timeout) { + ASSERT(!kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(this == GetCurrentThreadPointer(kernel)); + ASSERT(timeout > 0); + { - KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); + // Setup the scheduling lock and sleep. + KScopedSchedulerLockAndSleep slp{kernel, this, timeout}; + + // Check if the thread should terminate. + if (IsTerminationRequested()) { + slp.CancelSleep(); + return Svc::ResultTerminationRequested; + } + + // Mark the thread as waiting. SetState(ThreadState::Waiting); SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); } - if (event_handle != InvalidHandle) { - auto& time_manager = kernel.TimeManager(); - time_manager.UnscheduleTimeEvent(event_handle); - } + // The lock/sleep is done. + + // Cancel the timer. + kernel.TimeManager().UnscheduleTimeEvent(this); + return RESULT_SUCCESS; } -void KThread::AddSchedulingFlag(ThreadSchedFlags flag) { - const auto old_state = GetRawState(); - pausing_state |= static_cast(flag); - const auto base_scheduling = GetState(); - thread_state = base_scheduling | static_cast(pausing_state); - KScheduler::OnThreadStateChanged(kernel, this, old_state); +void KThread::SetState(ThreadState state) { + KScopedSchedulerLock sl{kernel}; + + // Clear debugging state + SetMutexWaitAddressForDebugging({}); + SetWaitReasonForDebugging({}); + + const ThreadState old_state = thread_state; + thread_state = + static_cast((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)); + if (thread_state != old_state) { + KScheduler::OnThreadStateChanged(kernel, this, old_state); + } } -void KThread::RemoveSchedulingFlag(ThreadSchedFlags flag) { - const auto old_state = GetRawState(); - pausing_state &= ~static_cast(flag); - const auto base_scheduling = GetState(); - thread_state = base_scheduling | static_cast(pausing_state); - KScheduler::OnThreadStateChanged(kernel, this, old_state); +std::shared_ptr& KThread::GetHostContext() { + return host_context; } -ResultCode KThread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { - KScopedSchedulerLock lock(kernel); - const auto HighestSetCore = [](u64 mask, u32 max_cores) { - for (s32 core = static_cast(max_cores - 1); core >= 0; core--) { - if (((mask >> core) & 1) != 0) { - return core; - } - } - return -1; - }; - - const bool use_override = affinity_override_count != 0; - if (new_core == Svc::IdealCoreNoUpdate) { - new_core = use_override ? ideal_core_override : ideal_core; - if ((new_affinity_mask & (1ULL << new_core)) == 0) { - LOG_ERROR(Kernel, "New affinity mask is incorrect! new_core={}, new_affinity_mask={}", - new_core, new_affinity_mask); - return ERR_INVALID_COMBINATION; - } - } - if (use_override) { - ideal_core_override = new_core; - } else { - const auto old_affinity_mask = affinity_mask; - affinity_mask.SetAffinityMask(new_affinity_mask); - ideal_core = new_core; - if (old_affinity_mask.GetAffinityMask() != new_affinity_mask) { - const s32 old_core = processor_id; - if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) { - if (static_cast(ideal_core) < 0) { - processor_id = HighestSetCore(affinity_mask.GetAffinityMask(), - Core::Hardware::NUM_CPU_CORES); - } else { - processor_id = ideal_core; - } - } - KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_affinity_mask, old_core); - } +ResultVal> KThread::Create(Core::System& system, ThreadType type_flags, + std::string name, VAddr entry_point, + u32 priority, u64 arg, s32 processor_id, + VAddr stack_top, Process* owner_process) { + std::function init_func = Core::CpuManager::GetGuestThreadStartFunc(); + void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); + return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top, + owner_process, std::move(init_func), init_func_parameter); +} + +ResultVal> KThread::Create(Core::System& system, ThreadType type_flags, + std::string name, VAddr entry_point, + u32 priority, u64 arg, s32 processor_id, + VAddr stack_top, Process* owner_process, + std::function&& thread_start_func, + void* thread_start_parameter) { + auto& kernel = system.Kernel(); + + std::shared_ptr thread = std::make_shared(kernel); + + thread->InitializeThread(thread.get(), entry_point, arg, stack_top, priority, processor_id, + owner_process, type_flags); + thread->name = name; + + auto& scheduler = kernel.GlobalSchedulerContext(); + scheduler.AddThread(thread); + + thread->host_context = + std::make_shared(std::move(thread_start_func), thread_start_parameter); + + return MakeResult>(std::move(thread)); +} + +KThread* GetCurrentThreadPointer(KernelCore& kernel) { + if (!kernel.CurrentScheduler()) { + // We are not called from a core thread + return {}; } - return RESULT_SUCCESS; + return kernel.CurrentScheduler()->GetCurrentThread(); +} + +KThread& GetCurrentThread(KernelCore& kernel) { + return *GetCurrentThreadPointer(kernel); +} + +s32 GetCurrentCoreId(KernelCore& kernel) { + return GetCurrentThread(kernel).GetCurrentCore(); } } // namespace Kernel -- cgit v1.2.3 From 0a1449e04b99b2fe49b047f50174a60c2abb568c Mon Sep 17 00:00:00 2001 From: bunnei Date: Wed, 20 Jan 2021 18:10:07 -0800 Subject: kernel: Fix build errors. --- src/core/hle/kernel/k_thread.cpp | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'src/core/hle/kernel/k_thread.cpp') diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index f021b0550..62ce2fbd5 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -1015,8 +1015,13 @@ ResultVal> KThread::Create(Core::System& system, Thread std::shared_ptr thread = std::make_shared(kernel); - thread->InitializeThread(thread.get(), entry_point, arg, stack_top, priority, processor_id, - owner_process, type_flags); + if (const auto result = + thread->InitializeThread(thread.get(), entry_point, arg, stack_top, priority, + processor_id, owner_process, type_flags); + result.IsError()) { + return result; + } + thread->name = name; auto& scheduler = kernel.GlobalSchedulerContext(); -- cgit v1.2.3 From 6e953f7f0294d945ba9d6f08350d5dccb0d76075 Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 21 Jan 2021 13:00:16 -0800 Subject: hle: kernel: Allocate a dummy KThread for each host thread, and use it for scheduling. --- src/core/hle/kernel/k_thread.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'src/core/hle/kernel/k_thread.cpp') diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 62ce2fbd5..f57e98047 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -1034,11 +1034,7 @@ ResultVal> KThread::Create(Core::System& system, Thread } KThread* GetCurrentThreadPointer(KernelCore& kernel) { - if (!kernel.CurrentScheduler()) { - // We are not called from a core thread - return {}; - } - return kernel.CurrentScheduler()->GetCurrentThread(); + return kernel.GetCurrentEmuThread(); } KThread& GetCurrentThread(KernelCore& kernel) { -- cgit v1.2.3 From 10738839ad7b9abbcf8ac64c6e58de63a9fbae76 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 24 Jan 2021 22:55:08 -0800 Subject: yuzu: debugger: Ignore HLE threads. --- src/core/hle/kernel/k_thread.cpp | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'src/core/hle/kernel/k_thread.cpp') diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index f57e98047..45ad589d9 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -101,11 +101,12 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast(type)); break; } + thread_type_for_debugging = type; // Set the ideal core ID and affinity mask. virtual_ideal_core_id = virt_core; physical_ideal_core_id = phys_core; - virtual_affinity_mask = (static_cast(1) << virt_core); + virtual_affinity_mask = 1ULL << virt_core; physical_affinity_mask.SetAffinity(phys_core, true); // Set the thread state. @@ -353,7 +354,7 @@ void KThread::Unpin() { // Enable core migration. ASSERT(num_core_migration_disables == 1); { - --num_core_migration_disables; + num_core_migration_disables--; // Restore our original state. const KAffinityMask old_mask = physical_affinity_mask; @@ -494,8 +495,8 @@ ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) { // Update the pinned waiter list. { - bool retry_update = false; - bool thread_is_pinned = false; + bool retry_update{}; + bool thread_is_pinned{}; do { // Lock the scheduler. KScopedSchedulerLock sl{kernel}; @@ -507,7 +508,7 @@ ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) { retry_update = false; // Check if the thread is currently running. - bool thread_is_current = false; + bool thread_is_current{}; s32 thread_core; for (thread_core = 0; thread_core < static_cast(Core::Hardware::NUM_CPU_CORES); ++thread_core) { @@ -683,8 +684,8 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) { // If the thread is now paused, update the pinned waiter list. if (activity == Svc::ThreadActivity::Paused) { - bool thread_is_pinned = false; - bool thread_is_current; + bool thread_is_pinned{}; + bool thread_is_current{}; do { // Lock the scheduler. KScopedSchedulerLock sl{kernel}; -- cgit v1.2.3 From e24c6dab93d00676dfb22743534825bb6aa90d7c Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 28 Jan 2021 21:49:47 -0800 Subject: hle: kernel: KThread: Release thread resource on thread exit. --- src/core/hle/kernel/k_thread.cpp | 1 + 1 file changed, 1 insertion(+) (limited to 'src/core/hle/kernel/k_thread.cpp') diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 45ad589d9..aa100e139 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -247,6 +247,7 @@ void KThread::Finalize() { // Decrement the parent process's thread count. if (parent != nullptr) { parent->DecrementThreadCount(); + parent->GetResourceLimit()->Release(ResourceType::Threads, 1); } } -- cgit v1.2.3