diff options
| author | TSR Berry <20988865+TSRBerry@users.noreply.github.com> | 2023-04-08 01:22:00 +0200 |
|---|---|---|
| committer | Mary <thog@protonmail.com> | 2023-04-27 23:51:14 +0200 |
| commit | cee712105850ac3385cd0091a923438167433f9f (patch) | |
| tree | 4a5274b21d8b7f938c0d0ce18736d3f2993b11b1 /src/Ryujinx.HLE/HOS/Kernel/Threading | |
| parent | cd124bda587ef09668a971fa1cac1c3f0cfc9f21 (diff) | |
Move solution and projects to src
Diffstat (limited to 'src/Ryujinx.HLE/HOS/Kernel/Threading')
15 files changed, 3427 insertions, 0 deletions
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/ArbitrationType.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/ArbitrationType.cs new file mode 100644 index 00000000..89c1bf1f --- /dev/null +++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/ArbitrationType.cs @@ -0,0 +1,9 @@ +namespace Ryujinx.HLE.HOS.Kernel.Threading +{ + enum ArbitrationType + { + WaitIfLessThan = 0, + DecrementAndWaitIfLessThan = 1, + WaitIfEqual = 2 + } +}
\ No newline at end of file diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KAddressArbiter.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KAddressArbiter.cs new file mode 100644 index 00000000..74867b44 --- /dev/null +++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KAddressArbiter.cs @@ -0,0 +1,581 @@ +using Ryujinx.HLE.HOS.Kernel.Common; +using Ryujinx.HLE.HOS.Kernel.Process; +using Ryujinx.Horizon.Common; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; + +namespace Ryujinx.HLE.HOS.Kernel.Threading +{ + class KAddressArbiter + { + private const int HasListenersMask = 0x40000000; + + private readonly KernelContext _context; + + private readonly List<KThread> _condVarThreads; + private readonly List<KThread> _arbiterThreads; + + public KAddressArbiter(KernelContext context) + { + _context = context; + + _condVarThreads = new List<KThread>(); + _arbiterThreads = new List<KThread>(); + } + + public Result ArbitrateLock(int ownerHandle, ulong mutexAddress, int requesterHandle) + { + KThread currentThread = KernelStatic.GetCurrentThread(); + + _context.CriticalSection.Enter(); + + if (currentThread.TerminationRequested) + { + _context.CriticalSection.Leave(); + + return KernelResult.ThreadTerminating; + } + + currentThread.SignaledObj = null; + currentThread.ObjSyncResult = Result.Success; + + KProcess currentProcess = KernelStatic.GetCurrentProcess(); + + if (!KernelTransfer.UserToKernel(out int mutexValue, mutexAddress)) + { + _context.CriticalSection.Leave(); + + return KernelResult.InvalidMemState; + } + + if (mutexValue != (ownerHandle | HasListenersMask)) + { + _context.CriticalSection.Leave(); + + return Result.Success; + } + + KThread mutexOwner = currentProcess.HandleTable.GetObject<KThread>(ownerHandle); + + if (mutexOwner == null) + { + _context.CriticalSection.Leave(); + + return KernelResult.InvalidHandle; + } + + currentThread.MutexAddress = mutexAddress; + currentThread.ThreadHandleForUserMutex = requesterHandle; + + mutexOwner.AddMutexWaiter(currentThread); + + currentThread.Reschedule(ThreadSchedState.Paused); + + _context.CriticalSection.Leave(); + _context.CriticalSection.Enter(); + + if (currentThread.MutexOwner != null) + { + currentThread.MutexOwner.RemoveMutexWaiter(currentThread); + } + + _context.CriticalSection.Leave(); + + return currentThread.ObjSyncResult; + } + + public Result ArbitrateUnlock(ulong mutexAddress) + { + _context.CriticalSection.Enter(); + + KThread currentThread = KernelStatic.GetCurrentThread(); + + (int mutexValue, KThread newOwnerThread) = MutexUnlock(currentThread, mutexAddress); + + Result result = Result.Success; + + if (!KernelTransfer.KernelToUser(mutexAddress, mutexValue)) + { + result = KernelResult.InvalidMemState; + } + + if (result != Result.Success && newOwnerThread != null) + { + newOwnerThread.SignaledObj = null; + newOwnerThread.ObjSyncResult = result; + } + + _context.CriticalSection.Leave(); + + return result; + } + + public Result WaitProcessWideKeyAtomic(ulong mutexAddress, ulong condVarAddress, int threadHandle, long timeout) + { + _context.CriticalSection.Enter(); + + KThread currentThread = KernelStatic.GetCurrentThread(); + + currentThread.SignaledObj = null; + currentThread.ObjSyncResult = KernelResult.TimedOut; + + if (currentThread.TerminationRequested) + { + _context.CriticalSection.Leave(); + + return KernelResult.ThreadTerminating; + } + + (int mutexValue, _) = MutexUnlock(currentThread, mutexAddress); + + KernelTransfer.KernelToUser(condVarAddress, 1); + + if (!KernelTransfer.KernelToUser(mutexAddress, mutexValue)) + { + _context.CriticalSection.Leave(); + + return KernelResult.InvalidMemState; + } + + currentThread.MutexAddress = mutexAddress; + currentThread.ThreadHandleForUserMutex = threadHandle; + currentThread.CondVarAddress = condVarAddress; + + _condVarThreads.Add(currentThread); + + if (timeout != 0) + { + currentThread.Reschedule(ThreadSchedState.Paused); + + if (timeout > 0) + { + _context.TimeManager.ScheduleFutureInvocation(currentThread, timeout); + } + } + + _context.CriticalSection.Leave(); + + if (timeout > 0) + { + _context.TimeManager.UnscheduleFutureInvocation(currentThread); + } + + _context.CriticalSection.Enter(); + + if (currentThread.MutexOwner != null) + { + currentThread.MutexOwner.RemoveMutexWaiter(currentThread); + } + + _condVarThreads.Remove(currentThread); + + _context.CriticalSection.Leave(); + + return currentThread.ObjSyncResult; + } + + private (int, KThread) MutexUnlock(KThread currentThread, ulong mutexAddress) + { + KThread newOwnerThread = currentThread.RelinquishMutex(mutexAddress, out int count); + + int mutexValue = 0; + + if (newOwnerThread != null) + { + mutexValue = newOwnerThread.ThreadHandleForUserMutex; + + if (count >= 2) + { + mutexValue |= HasListenersMask; + } + + newOwnerThread.SignaledObj = null; + newOwnerThread.ObjSyncResult = Result.Success; + + newOwnerThread.ReleaseAndResume(); + } + + return (mutexValue, newOwnerThread); + } + + public void SignalProcessWideKey(ulong address, int count) + { + _context.CriticalSection.Enter(); + + WakeThreads(_condVarThreads, count, TryAcquireMutex, x => x.CondVarAddress == address); + + if (!_condVarThreads.Any(x => x.CondVarAddress == address)) + { + KernelTransfer.KernelToUser(address, 0); + } + + _context.CriticalSection.Leave(); + } + + private static void TryAcquireMutex(KThread requester) + { + ulong address = requester.MutexAddress; + + KProcess currentProcess = KernelStatic.GetCurrentProcess(); + + if (!currentProcess.CpuMemory.IsMapped(address)) + { + // Invalid address. + requester.SignaledObj = null; + requester.ObjSyncResult = KernelResult.InvalidMemState; + + return; + } + + ref int mutexRef = ref currentProcess.CpuMemory.GetRef<int>(address); + + int mutexValue, newMutexValue; + + do + { + mutexValue = mutexRef; + + if (mutexValue != 0) + { + // Update value to indicate there is a mutex waiter now. + newMutexValue = mutexValue | HasListenersMask; + } + else + { + // No thread owning the mutex, assign to requesting thread. + newMutexValue = requester.ThreadHandleForUserMutex; + } + } + while (Interlocked.CompareExchange(ref mutexRef, newMutexValue, mutexValue) != mutexValue); + + if (mutexValue == 0) + { + // We now own the mutex. + requester.SignaledObj = null; + requester.ObjSyncResult = Result.Success; + + requester.ReleaseAndResume(); + + return; + } + + mutexValue &= ~HasListenersMask; + + KThread mutexOwner = currentProcess.HandleTable.GetObject<KThread>(mutexValue); + + if (mutexOwner != null) + { + // Mutex already belongs to another thread, wait for it. + mutexOwner.AddMutexWaiter(requester); + } + else + { + // Invalid mutex owner. + requester.SignaledObj = null; + requester.ObjSyncResult = KernelResult.InvalidHandle; + + requester.ReleaseAndResume(); + } + } + + public Result WaitForAddressIfEqual(ulong address, int value, long timeout) + { + KThread currentThread = KernelStatic.GetCurrentThread(); + + _context.CriticalSection.Enter(); + + if (currentThread.TerminationRequested) + { + _context.CriticalSection.Leave(); + + return KernelResult.ThreadTerminating; + } + + currentThread.SignaledObj = null; + currentThread.ObjSyncResult = KernelResult.TimedOut; + + if (!KernelTransfer.UserToKernel(out int currentValue, address)) + { + _context.CriticalSection.Leave(); + + return KernelResult.InvalidMemState; + } + + if (currentValue == value) + { + if (timeout == 0) + { + _context.CriticalSection.Leave(); + + return KernelResult.TimedOut; + } + + currentThread.MutexAddress = address; + currentThread.WaitingInArbitration = true; + + _arbiterThreads.Add(currentThread); + + currentThread.Reschedule(ThreadSchedState.Paused); + + if (timeout > 0) + { + _context.TimeManager.ScheduleFutureInvocation(currentThread, timeout); + } + + _context.CriticalSection.Leave(); + + if (timeout > 0) + { + _context.TimeManager.UnscheduleFutureInvocation(currentThread); + } + + _context.CriticalSection.Enter(); + + if (currentThread.WaitingInArbitration) + { + _arbiterThreads.Remove(currentThread); + + currentThread.WaitingInArbitration = false; + } + + _context.CriticalSection.Leave(); + + return currentThread.ObjSyncResult; + } + + _context.CriticalSection.Leave(); + + return KernelResult.InvalidState; + } + + public Result WaitForAddressIfLessThan(ulong address, int value, bool shouldDecrement, long timeout) + { + KThread currentThread = KernelStatic.GetCurrentThread(); + + _context.CriticalSection.Enter(); + + if (currentThread.TerminationRequested) + { + _context.CriticalSection.Leave(); + + return KernelResult.ThreadTerminating; + } + + currentThread.SignaledObj = null; + currentThread.ObjSyncResult = KernelResult.TimedOut; + + KProcess currentProcess = KernelStatic.GetCurrentProcess(); + + if (!KernelTransfer.UserToKernel(out int currentValue, address)) + { + _context.CriticalSection.Leave(); + + return KernelResult.InvalidMemState; + } + + if (shouldDecrement) + { + currentValue = Interlocked.Decrement(ref currentProcess.CpuMemory.GetRef<int>(address)) + 1; + } + + if (currentValue < value) + { + if (timeout == 0) + { + _context.CriticalSection.Leave(); + + return KernelResult.TimedOut; + } + + currentThread.MutexAddress = address; + currentThread.WaitingInArbitration = true; + + _arbiterThreads.Add(currentThread); + + currentThread.Reschedule(ThreadSchedState.Paused); + + if (timeout > 0) + { + _context.TimeManager.ScheduleFutureInvocation(currentThread, timeout); + } + + _context.CriticalSection.Leave(); + + if (timeout > 0) + { + _context.TimeManager.UnscheduleFutureInvocation(currentThread); + } + + _context.CriticalSection.Enter(); + + if (currentThread.WaitingInArbitration) + { + _arbiterThreads.Remove(currentThread); + + currentThread.WaitingInArbitration = false; + } + + _context.CriticalSection.Leave(); + + return currentThread.ObjSyncResult; + } + + _context.CriticalSection.Leave(); + + return KernelResult.InvalidState; + } + + public Result Signal(ulong address, int count) + { + _context.CriticalSection.Enter(); + + WakeArbiterThreads(address, count); + + _context.CriticalSection.Leave(); + + return Result.Success; + } + + public Result SignalAndIncrementIfEqual(ulong address, int value, int count) + { + _context.CriticalSection.Enter(); + + KProcess currentProcess = KernelStatic.GetCurrentProcess(); + + if (!currentProcess.CpuMemory.IsMapped(address)) + { + _context.CriticalSection.Leave(); + + return KernelResult.InvalidMemState; + } + + ref int valueRef = ref currentProcess.CpuMemory.GetRef<int>(address); + + int currentValue; + + do + { + currentValue = valueRef; + + if (currentValue != value) + { + _context.CriticalSection.Leave(); + + return KernelResult.InvalidState; + } + } + while (Interlocked.CompareExchange(ref valueRef, currentValue + 1, currentValue) != currentValue); + + WakeArbiterThreads(address, count); + + _context.CriticalSection.Leave(); + + return Result.Success; + } + + public Result SignalAndModifyIfEqual(ulong address, int value, int count) + { + _context.CriticalSection.Enter(); + + int addend; + + // The value is decremented if the number of threads waiting is less + // or equal to the Count of threads to be signaled, or Count is zero + // or negative. It is incremented if there are no threads waiting. + int waitingCount = 0; + + foreach (KThread thread in _arbiterThreads.Where(x => x.MutexAddress == address)) + { + if (++waitingCount >= count) + { + break; + } + } + + if (waitingCount > 0) + { + if (count <= 0) + { + addend = -2; + } + else if (waitingCount < count) + { + addend = -1; + } + else + { + addend = 0; + } + } + else + { + addend = 1; + } + + KProcess currentProcess = KernelStatic.GetCurrentProcess(); + + if (!currentProcess.CpuMemory.IsMapped(address)) + { + _context.CriticalSection.Leave(); + + return KernelResult.InvalidMemState; + } + + ref int valueRef = ref currentProcess.CpuMemory.GetRef<int>(address); + + int currentValue; + + do + { + currentValue = valueRef; + + if (currentValue != value) + { + _context.CriticalSection.Leave(); + + return KernelResult.InvalidState; + } + } + while (Interlocked.CompareExchange(ref valueRef, currentValue + addend, currentValue) != currentValue); + + WakeArbiterThreads(address, count); + + _context.CriticalSection.Leave(); + + return Result.Success; + } + + private void WakeArbiterThreads(ulong address, int count) + { + static void RemoveArbiterThread(KThread thread) + { + thread.SignaledObj = null; + thread.ObjSyncResult = Result.Success; + + thread.ReleaseAndResume(); + + thread.WaitingInArbitration = false; + } + + WakeThreads(_arbiterThreads, count, RemoveArbiterThread, x => x.MutexAddress == address); + } + + private static void WakeThreads( + List<KThread> threads, + int count, + Action<KThread> removeCallback, + Func<KThread, bool> predicate) + { + var candidates = threads.Where(predicate).OrderBy(x => x.DynamicPriority); + var toSignal = (count > 0 ? candidates.Take(count) : candidates).ToArray(); + + foreach (KThread thread in toSignal) + { + removeCallback(thread); + threads.Remove(thread); + } + } + } +} diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KConditionVariable.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KConditionVariable.cs new file mode 100644 index 00000000..891e632f --- /dev/null +++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KConditionVariable.cs @@ -0,0 +1,70 @@ +using System.Collections.Generic; +using System.Threading; + +namespace Ryujinx.HLE.HOS.Kernel.Threading +{ + static class KConditionVariable + { + public static void Wait(KernelContext context, LinkedList<KThread> threadList, object mutex, long timeout) + { + KThread currentThread = KernelStatic.GetCurrentThread(); + + context.CriticalSection.Enter(); + + Monitor.Exit(mutex); + + currentThread.Withholder = threadList; + + currentThread.Reschedule(ThreadSchedState.Paused); + + currentThread.WithholderNode = threadList.AddLast(currentThread); + + if (currentThread.TerminationRequested) + { + threadList.Remove(currentThread.WithholderNode); + + currentThread.Reschedule(ThreadSchedState.Running); + + currentThread.Withholder = null; + + context.CriticalSection.Leave(); + } + else + { + if (timeout > 0) + { + context.TimeManager.ScheduleFutureInvocation(currentThread, timeout); + } + + context.CriticalSection.Leave(); + + if (timeout > 0) + { + context.TimeManager.UnscheduleFutureInvocation(currentThread); + } + } + + Monitor.Enter(mutex); + } + + public static void NotifyAll(KernelContext context, LinkedList<KThread> threadList) + { + context.CriticalSection.Enter(); + + LinkedListNode<KThread> node = threadList.First; + + for (; node != null; node = threadList.First) + { + KThread thread = node.Value; + + threadList.Remove(thread.WithholderNode); + + thread.Withholder = null; + + thread.Reschedule(ThreadSchedState.Running); + } + + context.CriticalSection.Leave(); + } + } +}
\ No newline at end of file diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KCriticalSection.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KCriticalSection.cs new file mode 100644 index 00000000..1d61f2f0 --- /dev/null +++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KCriticalSection.cs @@ -0,0 +1,64 @@ +using System.Threading; + +namespace Ryujinx.HLE.HOS.Kernel.Threading +{ + class KCriticalSection + { + private readonly KernelContext _context; + private readonly object _lock; + private int _recursionCount; + + public object Lock => _lock; + + public KCriticalSection(KernelContext context) + { + _context = context; + _lock = new object(); + } + + public void Enter() + { + Monitor.Enter(_lock); + + _recursionCount++; + } + + public void Leave() + { + if (_recursionCount == 0) + { + return; + } + + if (--_recursionCount == 0) + { + ulong scheduledCoresMask = KScheduler.SelectThreads(_context); + + Monitor.Exit(_lock); + + KThread currentThread = KernelStatic.GetCurrentThread(); + bool isCurrentThreadSchedulable = currentThread != null && currentThread.IsSchedulable; + if (isCurrentThreadSchedulable) + { + KScheduler.EnableScheduling(_context, scheduledCoresMask); + } + else + { + KScheduler.EnableSchedulingFromForeignThread(_context, scheduledCoresMask); + + // If the thread exists but is not schedulable, we still want to suspend + // it if it's not runnable. That allows the kernel to still block HLE threads + // even if they are not scheduled on guest cores. + if (currentThread != null && !currentThread.IsSchedulable && currentThread.Context.Running) + { + currentThread.SchedulerWaitEvent.WaitOne(); + } + } + } + else + { + Monitor.Exit(_lock); + } + } + } +}
\ No newline at end of file diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KEvent.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KEvent.cs new file mode 100644 index 00000000..da3e217b --- /dev/null +++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KEvent.cs @@ -0,0 +1,14 @@ +namespace Ryujinx.HLE.HOS.Kernel.Threading +{ + class KEvent + { + public KReadableEvent ReadableEvent { get; private set; } + public KWritableEvent WritableEvent { get; private set; } + + public KEvent(KernelContext context) + { + ReadableEvent = new KReadableEvent(context, this); + WritableEvent = new KWritableEvent(context, this); + } + } +}
\ No newline at end of file diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KPriorityQueue.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KPriorityQueue.cs new file mode 100644 index 00000000..14fba704 --- /dev/null +++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KPriorityQueue.cs @@ -0,0 +1,286 @@ +using System.Collections.Generic; +using System.Numerics; + +namespace Ryujinx.HLE.HOS.Kernel.Threading +{ + class KPriorityQueue + { + private readonly LinkedList<KThread>[][] _scheduledThreadsPerPrioPerCore; + private readonly LinkedList<KThread>[][] _suggestedThreadsPerPrioPerCore; + + private readonly long[] _scheduledPrioritiesPerCore; + private readonly long[] _suggestedPrioritiesPerCore; + + public KPriorityQueue() + { + _suggestedThreadsPerPrioPerCore = new LinkedList<KThread>[KScheduler.PrioritiesCount][]; + _scheduledThreadsPerPrioPerCore = new LinkedList<KThread>[KScheduler.PrioritiesCount][]; + + for (int prio = 0; prio < KScheduler.PrioritiesCount; prio++) + { + _suggestedThreadsPerPrioPerCore[prio] = new LinkedList<KThread>[KScheduler.CpuCoresCount]; + _scheduledThreadsPerPrioPerCore[prio] = new LinkedList<KThread>[KScheduler.CpuCoresCount]; + + for (int core = 0; core < KScheduler.CpuCoresCount; core++) + { + _suggestedThreadsPerPrioPerCore[prio][core] = new LinkedList<KThread>(); + _scheduledThreadsPerPrioPerCore[prio][core] = new LinkedList<KThread>(); + } + } + + _scheduledPrioritiesPerCore = new long[KScheduler.CpuCoresCount]; + _suggestedPrioritiesPerCore = new long[KScheduler.CpuCoresCount]; + } + + public readonly ref struct KThreadEnumerable + { + readonly LinkedList<KThread>[][] _listPerPrioPerCore; + readonly long[] _prios; + readonly int _core; + + public KThreadEnumerable(LinkedList<KThread>[][] listPerPrioPerCore, long[] prios, int core) + { + _listPerPrioPerCore = listPerPrioPerCore; + _prios = prios; + _core = core; + } + + public Enumerator GetEnumerator() + { + return new Enumerator(_listPerPrioPerCore, _prios, _core); + } + + public ref struct Enumerator + { + private readonly LinkedList<KThread>[][] _listPerPrioPerCore; + private readonly int _core; + private long _prioMask; + private int _prio; + private LinkedList<KThread> _list; + private LinkedListNode<KThread> _node; + + public Enumerator(LinkedList<KThread>[][] listPerPrioPerCore, long[] prios, int core) + { + _listPerPrioPerCore = listPerPrioPerCore; + _core = core; + _prioMask = prios[core]; + _prio = BitOperations.TrailingZeroCount(_prioMask); + _prioMask &= ~(1L << _prio); + } + + public KThread Current => _node?.Value; + + public bool MoveNext() + { + _node = _node?.Next; + + if (_node == null) + { + if (!MoveNextListAndFirstNode()) + { + return false; + } + } + + return _node != null; + } + + private bool MoveNextListAndFirstNode() + { + if (_prio < KScheduler.PrioritiesCount) + { + _list = _listPerPrioPerCore[_prio][_core]; + + _node = _list.First; + + _prio = BitOperations.TrailingZeroCount(_prioMask); + + _prioMask &= ~(1L << _prio); + + return true; + } + else + { + _list = null; + _node = null; + return false; + } + } + } + } + + public KThreadEnumerable ScheduledThreads(int core) + { + return new KThreadEnumerable(_scheduledThreadsPerPrioPerCore, _scheduledPrioritiesPerCore, core); + } + + public KThreadEnumerable SuggestedThreads(int core) + { + return new KThreadEnumerable(_suggestedThreadsPerPrioPerCore, _suggestedPrioritiesPerCore, core); + } + + public KThread ScheduledThreadsFirstOrDefault(int core) + { + return ScheduledThreadsElementAtOrDefault(core, 0); + } + + public KThread ScheduledThreadsElementAtOrDefault(int core, int index) + { + int currentIndex = 0; + foreach (var scheduledThread in ScheduledThreads(core)) + { + if (currentIndex == index) + { + return scheduledThread; + } + else + { + currentIndex++; + } + } + + return null; + } + + public KThread ScheduledThreadsWithDynamicPriorityFirstOrDefault(int core, int dynamicPriority) + { + foreach (var scheduledThread in ScheduledThreads(core)) + { + if (scheduledThread.DynamicPriority == dynamicPriority) + { + return scheduledThread; + } + } + + return null; + } + + public bool HasScheduledThreads(int core) + { + return ScheduledThreadsFirstOrDefault(core) != null; + } + + public void TransferToCore(int prio, int dstCore, KThread thread) + { + int srcCore = thread.ActiveCore; + if (srcCore == dstCore) + { + return; + } + + thread.ActiveCore = dstCore; + + if (srcCore >= 0) + { + Unschedule(prio, srcCore, thread); + } + + if (dstCore >= 0) + { + Unsuggest(prio, dstCore, thread); + Schedule(prio, dstCore, thread); + } + + if (srcCore >= 0) + { + Suggest(prio, srcCore, thread); + } + } + + public void Suggest(int prio, int core, KThread thread) + { + if (prio >= KScheduler.PrioritiesCount) + { + return; + } + + thread.SiblingsPerCore[core] = SuggestedQueue(prio, core).AddFirst(thread); + + _suggestedPrioritiesPerCore[core] |= 1L << prio; + } + + public void Unsuggest(int prio, int core, KThread thread) + { + if (prio >= KScheduler.PrioritiesCount) + { + return; + } + + LinkedList<KThread> queue = SuggestedQueue(prio, core); + + queue.Remove(thread.SiblingsPerCore[core]); + + if (queue.First == null) + { + _suggestedPrioritiesPerCore[core] &= ~(1L << prio); + } + } + + public void Schedule(int prio, int core, KThread thread) + { + if (prio >= KScheduler.PrioritiesCount) + { + return; + } + + thread.SiblingsPerCore[core] = ScheduledQueue(prio, core).AddLast(thread); + + _scheduledPrioritiesPerCore[core] |= 1L << prio; + } + + public void SchedulePrepend(int prio, int core, KThread thread) + { + if (prio >= KScheduler.PrioritiesCount) + { + return; + } + + thread.SiblingsPerCore[core] = ScheduledQueue(prio, core).AddFirst(thread); + + _scheduledPrioritiesPerCore[core] |= 1L << prio; + } + + public KThread Reschedule(int prio, int core, KThread thread) + { + if (prio >= KScheduler.PrioritiesCount) + { + return null; + } + + LinkedList<KThread> queue = ScheduledQueue(prio, core); + + queue.Remove(thread.SiblingsPerCore[core]); + + thread.SiblingsPerCore[core] = queue.AddLast(thread); + + return queue.First.Value; + } + + public void Unschedule(int prio, int core, KThread thread) + { + if (prio >= KScheduler.PrioritiesCount) + { + return; + } + + LinkedList<KThread> queue = ScheduledQueue(prio, core); + + queue.Remove(thread.SiblingsPerCore[core]); + + if (queue.First == null) + { + _scheduledPrioritiesPerCore[core] &= ~(1L << prio); + } + } + + private LinkedList<KThread> SuggestedQueue(int prio, int core) + { + return _suggestedThreadsPerPrioPerCore[prio][core]; + } + + private LinkedList<KThread> ScheduledQueue(int prio, int core) + { + return _scheduledThreadsPerPrioPerCore[prio][core]; + } + } +}
\ No newline at end of file diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KReadableEvent.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KReadableEvent.cs new file mode 100644 index 00000000..d9e7befa --- /dev/null +++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KReadableEvent.cs @@ -0,0 +1,65 @@ +using Ryujinx.HLE.HOS.Kernel.Common; +using Ryujinx.Horizon.Common; + +namespace Ryujinx.HLE.HOS.Kernel.Threading +{ + class KReadableEvent : KSynchronizationObject + { + private readonly KEvent _parent; + + private bool _signaled; + + public KReadableEvent(KernelContext context, KEvent parent) : base(context) + { + _parent = parent; + } + + public override void Signal() + { + KernelContext.CriticalSection.Enter(); + + if (!_signaled) + { + _signaled = true; + + base.Signal(); + } + + KernelContext.CriticalSection.Leave(); + } + + public Result Clear() + { + _signaled = false; + + return Result.Success; + } + + public Result ClearIfSignaled() + { + Result result; + + KernelContext.CriticalSection.Enter(); + + if (_signaled) + { + _signaled = false; + + result = Result.Success; + } + else + { + result = KernelResult.InvalidState; + } + + KernelContext.CriticalSection.Leave(); + + return result; + } + + public override bool IsSignaled() + { + return _signaled; + } + } +}
\ No newline at end of file diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KScheduler.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KScheduler.cs new file mode 100644 index 00000000..b9de7d9c --- /dev/null +++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KScheduler.cs @@ -0,0 +1,661 @@ +using Ryujinx.Common; +using Ryujinx.HLE.HOS.Kernel.Process; +using System; +using System.Numerics; +using System.Threading; + +namespace Ryujinx.HLE.HOS.Kernel.Threading +{ + partial class KScheduler : IDisposable + { + public const int PrioritiesCount = 64; + public const int CpuCoresCount = 4; + + private const int RoundRobinTimeQuantumMs = 10; + + private static readonly int[] PreemptionPriorities = new int[] { 59, 59, 59, 63 }; + + private static readonly int[] _srcCoresHighestPrioThreads = new int[CpuCoresCount]; + + private readonly KernelContext _context; + private readonly int _coreId; + + private struct SchedulingState + { + public volatile bool NeedsScheduling; + public volatile KThread SelectedThread; + } + + private SchedulingState _state; + + private AutoResetEvent _idleInterruptEvent; + private readonly object _idleInterruptEventLock; + + private KThread _previousThread; + private KThread _currentThread; + private readonly KThread _idleThread; + + public KThread PreviousThread => _previousThread; + public KThread CurrentThread => _currentThread; + public long LastContextSwitchTime { get; private set; } + public long TotalIdleTimeTicks => _idleThread.TotalTimeRunning; + + public KScheduler(KernelContext context, int coreId) + { + _context = context; + _coreId = coreId; + + _idleInterruptEvent = new AutoResetEvent(false); + _idleInterruptEventLock = new object(); + + KThread idleThread = CreateIdleThread(context, coreId); + + _currentThread = idleThread; + _idleThread = idleThread; + + idleThread.StartHostThread(); + idleThread.SchedulerWaitEvent.Set(); + } + + private KThread CreateIdleThread(KernelContext context, int cpuCore) + { + KThread idleThread = new KThread(context); + + idleThread.Initialize(0UL, 0UL, 0UL, PrioritiesCount, cpuCore, null, ThreadType.Dummy, IdleThreadLoop); + + return idleThread; + } + + public static ulong SelectThreads(KernelContext context) + { + if (context.ThreadReselectionRequested) + { + return SelectThreadsImpl(context); + } + else + { + return 0UL; + } + } + + private static ulong SelectThreadsImpl(KernelContext context) + { + context.ThreadReselectionRequested = false; + + ulong scheduledCoresMask = 0UL; + + for (int core = 0; core < CpuCoresCount; core++) + { + KThread thread = context.PriorityQueue.ScheduledThreadsFirstOrDefault(core); + + if (thread != null && + thread.Owner != null && + thread.Owner.PinnedThreads[core] != null && + thread.Owner.PinnedThreads[core] != thread) + { + KThread candidate = thread.Owner.PinnedThreads[core]; + + if (candidate.KernelWaitersCount == 0 && !thread.Owner.IsExceptionUserThread(candidate)) + { + if (candidate.SchedFlags == ThreadSchedState.Running) + { + thread = candidate; + } + else + { + thread = null; + } + } + } + + scheduledCoresMask |= context.Schedulers[core].SelectThread(thread); + } + + for (int core = 0; core < CpuCoresCount; core++) + { + // If the core is not idle (there's already a thread running on it), + // then we don't need to attempt load balancing. + if (context.PriorityQueue.HasScheduledThreads(core)) + { + continue; + } + + Array.Fill(_srcCoresHighestPrioThreads, 0); + + int srcCoresHighestPrioThreadsCount = 0; + + KThread dst = null; + + // Select candidate threads that could run on this core. + // Give preference to threads that are not yet selected. + foreach (KThread suggested in context.PriorityQueue.SuggestedThreads(core)) + { + if (suggested.ActiveCore < 0 || suggested != context.Schedulers[suggested.ActiveCore]._state.SelectedThread) + { + dst = suggested; + break; + } + + _srcCoresHighestPrioThreads[srcCoresHighestPrioThreadsCount++] = suggested.ActiveCore; + } + + // Not yet selected candidate found. + if (dst != null) + { + // Priorities < 2 are used for the kernel message dispatching + // threads, we should skip load balancing entirely. + if (dst.DynamicPriority >= 2) + { + context.PriorityQueue.TransferToCore(dst.DynamicPriority, core, dst); + + scheduledCoresMask |= context.Schedulers[core].SelectThread(dst); + } + + continue; + } + + // All candidates are already selected, choose the best one + // (the first one that doesn't make the source core idle if moved). + for (int index = 0; index < srcCoresHighestPrioThreadsCount; index++) + { + int srcCore = _srcCoresHighestPrioThreads[index]; + + KThread src = context.PriorityQueue.ScheduledThreadsElementAtOrDefault(srcCore, 1); + + if (src != null) + { + // Run the second thread on the queue on the source core, + // move the first one to the current core. + KThread origSelectedCoreSrc = context.Schedulers[srcCore]._state.SelectedThread; + + scheduledCoresMask |= context.Schedulers[srcCore].SelectThread(src); + + context.PriorityQueue.TransferToCore(origSelectedCoreSrc.DynamicPriority, core, origSelectedCoreSrc); + + scheduledCoresMask |= context.Schedulers[core].SelectThread(origSelectedCoreSrc); + } + } + } + + return scheduledCoresMask; + } + + private ulong SelectThread(KThread nextThread) + { + KThread previousThread = _state.SelectedThread; + + if (previousThread != nextThread) + { + if (previousThread != null) + { + previousThread.LastScheduledTime = PerformanceCounter.ElapsedTicks; + } + + _state.SelectedThread = nextThread; + _state.NeedsScheduling = true; + return 1UL << _coreId; + } + else + { + return 0UL; + } + } + + public static void EnableScheduling(KernelContext context, ulong scheduledCoresMask) + { + KScheduler currentScheduler = context.Schedulers[KernelStatic.GetCurrentThread().CurrentCore]; + + // Note that "RescheduleCurrentCore" will block, so "RescheduleOtherCores" must be done first. + currentScheduler.RescheduleOtherCores(scheduledCoresMask); + currentScheduler.RescheduleCurrentCore(); + } + + public static void EnableSchedulingFromForeignThread(KernelContext context, ulong scheduledCoresMask) + { + RescheduleOtherCores(context, scheduledCoresMask); + } + + private void RescheduleCurrentCore() + { + if (_state.NeedsScheduling) + { + Schedule(); + } + } + + private void RescheduleOtherCores(ulong scheduledCoresMask) + { + RescheduleOtherCores(_context, scheduledCoresMask & ~(1UL << _coreId)); + } + + private static void RescheduleOtherCores(KernelContext context, ulong scheduledCoresMask) + { + while (scheduledCoresMask != 0) + { + int coreToSignal = BitOperations.TrailingZeroCount(scheduledCoresMask); + + KThread threadToSignal = context.Schedulers[coreToSignal]._currentThread; + + // Request the thread running on that core to stop and reschedule, if we have one. + if (threadToSignal != context.Schedulers[coreToSignal]._idleThread) + { + threadToSignal.Context.RequestInterrupt(); + } + + // If the core is idle, ensure that the idle thread is awaken. + context.Schedulers[coreToSignal]._idleInterruptEvent.Set(); + + scheduledCoresMask &= ~(1UL << coreToSignal); + } + } + + private void IdleThreadLoop() + { + while (_context.Running) + { + _state.NeedsScheduling = false; + Thread.MemoryBarrier(); + KThread nextThread = PickNextThread(_state.SelectedThread); + + if (_idleThread != nextThread) + { + _idleThread.SchedulerWaitEvent.Reset(); + WaitHandle.SignalAndWait(nextThread.SchedulerWaitEvent, _idleThread.SchedulerWaitEvent); + } + + _idleInterruptEvent.WaitOne(); + } + + lock (_idleInterruptEventLock) + { + _idleInterruptEvent.Dispose(); + _idleInterruptEvent = null; + } + } + + public void Schedule() + { + _state.NeedsScheduling = false; + Thread.MemoryBarrier(); + KThread currentThread = KernelStatic.GetCurrentThread(); + KThread selectedThread = _state.SelectedThread; + + // If the thread is already scheduled and running on the core, we have nothing to do. + if (currentThread == selectedThread) + { + return; + } + + currentThread.SchedulerWaitEvent.Reset(); + currentThread.ThreadContext.Unlock(); + + // Wake all the threads that might be waiting until this thread context is unlocked. + for (int core = 0; core < CpuCoresCount; core++) + { + _context.Schedulers[core]._idleInterruptEvent.Set(); + } + + KThread nextThread = PickNextThread(selectedThread); + + if (currentThread.Context.Running) + { + // Wait until this thread is scheduled again, and allow the next thread to run. + WaitHandle.SignalAndWait(nextThread.SchedulerWaitEvent, currentThread.SchedulerWaitEvent); + } + else + { + // Allow the next thread to run. + nextThread.SchedulerWaitEvent.Set(); + + // We don't need to wait since the thread is exiting, however we need to + // make sure this thread will never call the scheduler again, since it is + // no longer assigned to a core. + currentThread.MakeUnschedulable(); + + // Just to be sure, set the core to a invalid value. + // This will trigger a exception if it attempts to call schedule again, + // rather than leaving the scheduler in a invalid state. + currentThread.CurrentCore = -1; + } + } + + private KThread PickNextThread(KThread selectedThread) + { + while (true) + { + if (selectedThread != null) + { + // Try to run the selected thread. + // We need to acquire the context lock to be sure the thread is not + // already running on another core. If it is, then we return here + // and the caller should try again once there is something available for scheduling. + // The thread currently running on the core should have been requested to + // interrupt so this is not expected to take long. + // The idle thread must also be paused if we are scheduling a thread + // on the core, as the scheduled thread will handle the next switch. + if (selectedThread.ThreadContext.Lock()) + { + SwitchTo(selectedThread); + + if (!_state.NeedsScheduling) + { + return selectedThread; + } + + selectedThread.ThreadContext.Unlock(); + } + else + { + return _idleThread; + } + } + else + { + // The core is idle now, make sure that the idle thread can run + // and switch the core when a thread is available. + SwitchTo(null); + return _idleThread; + } + + _state.NeedsScheduling = false; + Thread.MemoryBarrier(); + selectedThread = _state.SelectedThread; + } + } + + private void SwitchTo(KThread nextThread) + { + KProcess currentProcess = KernelStatic.GetCurrentProcess(); + KThread currentThread = KernelStatic.GetCurrentThread(); + + nextThread ??= _idleThread; + + if (currentThread != nextThread) + { + long previousTicks = LastContextSwitchTime; + long currentTicks = PerformanceCounter.ElapsedTicks; + long ticksDelta = currentTicks - previousTicks; + + currentThread.AddCpuTime(ticksDelta); + + if (currentProcess != null) + { + currentProcess.AddCpuTime(ticksDelta); + } + + LastContextSwitchTime = currentTicks; + + if (currentProcess != null) + { + _previousThread = !currentThread.TerminationRequested && currentThread.ActiveCore == _coreId ? currentThread : null; + } + else if (currentThread == _idleThread) + { + _previousThread = null; + } + } + + if (nextThread.CurrentCore != _coreId) + { + nextThread.CurrentCore = _coreId; + } + + _currentThread = nextThread; + } + + public static void PreemptionThreadLoop(KernelContext context) + { + while (context.Running) + { + context.CriticalSection.Enter(); + + for (int core = 0; core < CpuCoresCount; core++) + { + RotateScheduledQueue(context, core, PreemptionPriorities[core]); + } + + context.CriticalSection.Leave(); + + Thread.Sleep(RoundRobinTimeQuantumMs); + } + } + + private static void RotateScheduledQueue(KernelContext context, int core, int prio) + { + KThread selectedThread = context.PriorityQueue.ScheduledThreadsWithDynamicPriorityFirstOrDefault(core, prio); + KThread nextThread = null; + + // Yield priority queue. + if (selectedThread != null) + { + nextThread = context.PriorityQueue.Reschedule(prio, core, selectedThread); + } + + static KThread FirstSuitableCandidateOrDefault(KernelContext context, int core, KThread selectedThread, KThread nextThread, Predicate< KThread> predicate) + { + foreach (KThread suggested in context.PriorityQueue.SuggestedThreads(core)) + { + int suggestedCore = suggested.ActiveCore; + if (suggestedCore >= 0) + { + KThread selectedSuggestedCore = context.PriorityQueue.ScheduledThreadsFirstOrDefault(suggestedCore); + + if (selectedSuggestedCore == suggested || (selectedSuggestedCore != null && selectedSuggestedCore.DynamicPriority < 2)) + { + continue; + } + } + + // If the candidate was scheduled after the current thread, then it's not worth it. + if (nextThread == selectedThread || + nextThread == null || + nextThread.LastScheduledTime >= suggested.LastScheduledTime) + { + if (predicate(suggested)) + { + return suggested; + } + } + } + + return null; + } + + // Select candidate threads that could run on this core. + // Only take into account threads that are not yet selected. + KThread dst = FirstSuitableCandidateOrDefault(context, core, selectedThread, nextThread, x => x.DynamicPriority == prio); + + if (dst != null) + { + context.PriorityQueue.TransferToCore(prio, core, dst); + } + + // If the priority of the currently selected thread is lower or same as the preemption priority, + // then try to migrate a thread with lower priority. + KThread bestCandidate = context.PriorityQueue.ScheduledThreadsFirstOrDefault(core); + + if (bestCandidate != null && bestCandidate.DynamicPriority >= prio) + { + dst = FirstSuitableCandidateOrDefault(context, core, selectedThread, nextThread, x => x.DynamicPriority < bestCandidate.DynamicPriority); + + if (dst != null) + { + context.PriorityQueue.TransferToCore(dst.DynamicPriority, core, dst); + } + } + + context.ThreadReselectionRequested = true; + } + + public static void Yield(KernelContext context) + { + KThread currentThread = KernelStatic.GetCurrentThread(); + + if (!currentThread.IsSchedulable) + { + return; + } + + context.CriticalSection.Enter(); + + if (currentThread.SchedFlags != ThreadSchedState.Running) + { + context.CriticalSection.Leave(); + return; + } + + KThread nextThread = context.PriorityQueue.Reschedule(currentThread.DynamicPriority, currentThread.ActiveCore, currentThread); + + if (nextThread != currentThread) + { + context.ThreadReselectionRequested = true; + } + + context.CriticalSection.Leave(); + } + + public static void YieldWithLoadBalancing(KernelContext context) + { + KThread currentThread = KernelStatic.GetCurrentThread(); + + if (!currentThread.IsSchedulable) + { + return; + } + + context.CriticalSection.Enter(); + + if (currentThread.SchedFlags != ThreadSchedState.Running) + { + context.CriticalSection.Leave(); + return; + } + + int prio = currentThread.DynamicPriority; + int core = currentThread.ActiveCore; + + // Move current thread to the end of the queue. + KThread nextThread = context.PriorityQueue.Reschedule(prio, core, currentThread); + + static KThread FirstSuitableCandidateOrDefault(KernelContext context, int core, KThread nextThread, int lessThanOrEqualPriority) + { + foreach (KThread suggested in context.PriorityQueue.SuggestedThreads(core)) + { + int suggestedCore = suggested.ActiveCore; + if (suggestedCore >= 0) + { + KThread selectedSuggestedCore = context.Schedulers[suggestedCore]._state.SelectedThread; + + if (selectedSuggestedCore == suggested || (selectedSuggestedCore != null && selectedSuggestedCore.DynamicPriority < 2)) + { + continue; + } + } + + // If the candidate was scheduled after the current thread, then it's not worth it, + // unless the priority is higher than the current one. + if (suggested.LastScheduledTime <= nextThread.LastScheduledTime || + suggested.DynamicPriority < nextThread.DynamicPriority) + { + if (suggested.DynamicPriority <= lessThanOrEqualPriority) + { + return suggested; + } + } + } + + return null; + } + + KThread dst = FirstSuitableCandidateOrDefault(context, core, nextThread, prio); + + if (dst != null) + { + context.PriorityQueue.TransferToCore(dst.DynamicPriority, core, dst); + + context.ThreadReselectionRequested = true; + } + else if (currentThread != nextThread) + { + context.ThreadReselectionRequested = true; + } + + context.CriticalSection.Leave(); + } + + public static void YieldToAnyThread(KernelContext context) + { + KThread currentThread = KernelStatic.GetCurrentThread(); + + if (!currentThread.IsSchedulable) + { + return; + } + + context.CriticalSection.Enter(); + + if (currentThread.SchedFlags != ThreadSchedState.Running) + { + context.CriticalSection.Leave(); + return; + } + + int core = currentThread.ActiveCore; + + context.PriorityQueue.TransferToCore(currentThread.DynamicPriority, -1, currentThread); + + if (!context.PriorityQueue.HasScheduledThreads(core)) + { + KThread selectedThread = null; + + foreach (KThread suggested in context.PriorityQueue.SuggestedThreads(core)) + { + int suggestedCore = suggested.ActiveCore; + + if (suggestedCore < 0) + { + continue; + } + + KThread firstCandidate = context.PriorityQueue.ScheduledThreadsFirstOrDefault(suggestedCore); + + if (firstCandidate == suggested) + { + continue; + } + + if (firstCandidate == null || firstCandidate.DynamicPriority >= 2) + { + context.PriorityQueue.TransferToCore(suggested.DynamicPriority, core, suggested); + } + + selectedThread = suggested; + break; + } + + if (currentThread != selectedThread) + { + context.ThreadReselectionRequested = true; + } + } + else + { + context.ThreadReselectionRequested = true; + } + + context.CriticalSection.Leave(); + } + + public void Dispose() + { + // Ensure that the idle thread is not blocked and can exit. + lock (_idleInterruptEventLock) + { + if (_idleInterruptEvent != null) + { + _idleInterruptEvent.Set(); + } + } + } + } +}
\ No newline at end of file diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KSynchronization.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KSynchronization.cs new file mode 100644 index 00000000..9c196810 --- /dev/null +++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KSynchronization.cs @@ -0,0 +1,142 @@ +using Ryujinx.HLE.HOS.Kernel.Common; +using Ryujinx.Horizon.Common; +using System; +using System.Buffers; +using System.Collections.Generic; + +namespace Ryujinx.HLE.HOS.Kernel.Threading +{ + class KSynchronization + { + private KernelContext _context; + + public KSynchronization(KernelContext context) + { + _context = context; + } + + public Result WaitFor(Span<KSynchronizationObject> syncObjs, long timeout, out int handleIndex) + { + handleIndex = 0; + + Result result = KernelResult.TimedOut; + + _context.CriticalSection.Enter(); + + // Check if objects are already signaled before waiting. + for (int index = 0; index < syncObjs.Length; index++) + { + if (!syncObjs[index].IsSignaled()) + { + continue; + } + + handleIndex = index; + + _context.CriticalSection.Leave(); + + return Result.Success; + } + + if (timeout == 0) + { + _context.CriticalSection.Leave(); + + return result; + } + + KThread currentThread = KernelStatic.GetCurrentThread(); + + if (currentThread.TerminationRequested) + { + result = KernelResult.ThreadTerminating; + } + else if (currentThread.SyncCancelled) + { + currentThread.SyncCancelled = false; + + result = KernelResult.Cancelled; + } + else + { + LinkedListNode<KThread>[] syncNodesArray = ArrayPool<LinkedListNode<KThread>>.Shared.Rent(syncObjs.Length); + + Span<LinkedListNode<KThread>> syncNodes = syncNodesArray.AsSpan(0, syncObjs.Length); + + for (int index = 0; index < syncObjs.Length; index++) + { + syncNodes[index] = syncObjs[index].AddWaitingThread(currentThread); + } + + currentThread.WaitingSync = true; + currentThread.SignaledObj = null; + currentThread.ObjSyncResult = result; + + currentThread.Reschedule(ThreadSchedState.Paused); + + if (timeout > 0) + { + _context.TimeManager.ScheduleFutureInvocation(currentThread, timeout); + } + + _context.CriticalSection.Leave(); + + currentThread.WaitingSync = false; + + if (timeout > 0) + { + _context.TimeManager.UnscheduleFutureInvocation(currentThread); + } + + _context.CriticalSection.Enter(); + + result = currentThread.ObjSyncResult; + + handleIndex = -1; + + for (int index = 0; index < syncObjs.Length; index++) + { + syncObjs[index].RemoveWaitingThread(syncNodes[index]); + + if (syncObjs[index] == currentThread.SignaledObj) + { + handleIndex = index; + } + } + + ArrayPool<LinkedListNode<KThread>>.Shared.Return(syncNodesArray); + } + + _context.CriticalSection.Leave(); + + return result; + } + + public void SignalObject(KSynchronizationObject syncObj) + { + _context.CriticalSection.Enter(); + + if (syncObj.IsSignaled()) + { + LinkedListNode<KThread> node = syncObj.WaitingThreads.First; + + while (node != null) + { + KThread thread = node.Value; + + if ((thread.SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused) + { + thread.SignaledObj = syncObj; + thread.ObjSyncResult = Result.Success; + + thread.Reschedule(ThreadSchedState.Running); + } + + node = node.Next; + } + } + + _context.CriticalSection.Leave(); + } + } +}
\ No newline at end of file diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KThread.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KThread.cs new file mode 100644 index 00000000..63396468 --- /dev/null +++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KThread.cs @@ -0,0 +1,1438 @@ +using Ryujinx.Common.Logging; +using Ryujinx.Cpu; +using Ryujinx.HLE.HOS.Kernel.Common; +using Ryujinx.HLE.HOS.Kernel.Process; +using Ryujinx.HLE.HOS.Kernel.SupervisorCall; +using Ryujinx.Horizon.Common; +using System; +using System.Collections.Generic; +using System.Numerics; +using System.Threading; + +namespace Ryujinx.HLE.HOS.Kernel.Threading +{ + class KThread : KSynchronizationObject, IKFutureSchedulerObject + { + private const int TlsUserDisableCountOffset = 0x100; + private const int TlsUserInterruptFlagOffset = 0x102; + + public const int MaxWaitSyncObjects = 64; + + private ManualResetEvent _schedulerWaitEvent; + + public ManualResetEvent SchedulerWaitEvent => _schedulerWaitEvent; + + public Thread HostThread { get; private set; } + + public IExecutionContext Context { get; private set; } + + public KThreadContext ThreadContext { get; private set; } + + public int DynamicPriority { get; set; } + public ulong AffinityMask { get; set; } + + public ulong ThreadUid { get; private set; } + + private long _totalTimeRunning; + + public long TotalTimeRunning => _totalTimeRunning; + + public KSynchronizationObject SignaledObj { get; set; } + + public ulong CondVarAddress { get; set; } + + private ulong _entrypoint; + private ThreadStart _customThreadStart; + private bool _forcedUnschedulable; + + public bool IsSchedulable => _customThreadStart == null && !_forcedUnschedulable; + + public ulong MutexAddress { get; set; } + public int KernelWaitersCount { get; private set; } + + public KProcess Owner { get; private set; } + + private ulong _tlsAddress; + + public ulong TlsAddress => _tlsAddress; + + public KSynchronizationObject[] WaitSyncObjects { get; } + public int[] WaitSyncHandles { get; } + + public long LastScheduledTime { get; set; } + + public LinkedListNode<KThread>[] SiblingsPerCore { get; private set; } + + public LinkedList<KThread> Withholder { get; set; } + public LinkedListNode<KThread> WithholderNode { get; set; } + + public LinkedListNode<KThread> ProcessListNode { get; set; } + + private LinkedList<KThread> _mutexWaiters; + private LinkedListNode<KThread> _mutexWaiterNode; + + private LinkedList<KThread> _pinnedWaiters; + + public KThread MutexOwner { get; private set; } + + public int ThreadHandleForUserMutex { get; set; } + + private ThreadSchedState _forcePauseFlags; + private ThreadSchedState _forcePausePermissionFlags; + + public Result ObjSyncResult { get; set; } + + public int BasePriority { get; set; } + public int PreferredCore { get; set; } + + public int CurrentCore { get; set; } + public int ActiveCore { get; set; } + + public bool IsPinned { get; private set; } + + private ulong _originalAffinityMask; + private int _originalPreferredCore; + private int _originalBasePriority; + private int _coreMigrationDisableCount; + + public ThreadSchedState SchedFlags { get; private set; } + + private int _shallBeTerminated; + + private bool ShallBeTerminated => _shallBeTerminated != 0; + + public bool TerminationRequested => ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending; + + public bool SyncCancelled { get; set; } + public bool WaitingSync { get; set; } + + private int _hasExited; + private bool _hasBeenInitialized; + private bool _hasBeenReleased; + + public bool WaitingInArbitration { get; set; } + + private object _activityOperationLock; + + public KThread(KernelContext context) : base(context) + { + WaitSyncObjects = new KSynchronizationObject[MaxWaitSyncObjects]; + WaitSyncHandles = new int[MaxWaitSyncObjects]; + + SiblingsPerCore = new LinkedListNode<KThread>[KScheduler.CpuCoresCount]; + + _mutexWaiters = new LinkedList<KThread>(); + _pinnedWaiters = new LinkedList<KThread>(); + + _activityOperationLock = new object(); + } + + public Result Initialize( + ulong entrypoint, + ulong argsPtr, + ulong stackTop, + int priority, + int cpuCore, + KProcess owner, + ThreadType type, + ThreadStart customThreadStart = null) + { + if ((uint)type > 3) + { + throw new ArgumentException($"Invalid thread type \"{type}\"."); + } + + PreferredCore = cpuCore; + AffinityMask |= 1UL << cpuCore; + + SchedFlags = type == ThreadType.Dummy + ? ThreadSchedState.Running + : ThreadSchedState.None; + + ActiveCore = cpuCore; + ObjSyncResult = KernelResult.ThreadNotStarted; + DynamicPriority = priority; + BasePriority = priority; + CurrentCore = cpuCore; + IsPinned = false; + + _entrypoint = entrypoint; + _customThreadStart = customThreadStart; + + if (type == ThreadType.User) + { + if (owner.AllocateThreadLocalStorage(out _tlsAddress) != Result.Success) + { + return KernelResult.OutOfMemory; + } + + MemoryHelper.FillWithZeros(owner.CpuMemory, _tlsAddress, KTlsPageInfo.TlsEntrySize); + } + + bool is64Bits; + + if (owner != null) + { + Owner = owner; + + owner.IncrementReferenceCount(); + owner.IncrementThreadCount(); + + is64Bits = owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit); + } + else + { + is64Bits = true; + } + + HostThread = new Thread(ThreadStart); + + Context = owner?.CreateExecutionContext() ?? new ProcessExecutionContext(); + + ThreadContext = new KThreadContext(Context); + + Context.IsAarch32 = !is64Bits; + + Context.SetX(0, argsPtr); + + if (is64Bits) + { + Context.SetX(18, KSystemControl.GenerateRandom() | 1); + Context.SetX(31, stackTop); + } + else + { + Context.SetX(13, (uint)stackTop); + } + + Context.TpidrroEl0 = (long)_tlsAddress; + + ThreadUid = KernelContext.NewThreadUid(); + + HostThread.Name = customThreadStart != null ? $"HLE.OsThread.{ThreadUid}" : $"HLE.GuestThread.{ThreadUid}"; + + _hasBeenInitialized = true; + + _forcePausePermissionFlags = ThreadSchedState.ForcePauseMask; + + if (owner != null) + { + owner.AddThread(this); + + if (owner.IsPaused) + { + KernelContext.CriticalSection.Enter(); + + if (TerminationRequested) + { + KernelContext.CriticalSection.Leave(); + + return Result.Success; + } + + _forcePauseFlags |= ThreadSchedState.ProcessPauseFlag; + + CombineForcePauseFlags(); + + KernelContext.CriticalSection.Leave(); + } + } + + return Result.Success; + } + + public Result Start() + { + if (!KernelContext.KernelInitialized) + { + KernelContext.CriticalSection.Enter(); + + if (!TerminationRequested) + { + _forcePauseFlags |= ThreadSchedState.KernelInitPauseFlag; + + CombineForcePauseFlags(); + } + + KernelContext.CriticalSection.Leave(); + } + + Result result = KernelResult.ThreadTerminating; + + KernelContext.CriticalSection.Enter(); + + if (!ShallBeTerminated) + { + KThread currentThread = KernelStatic.GetCurrentThread(); + + while (SchedFlags != ThreadSchedState.TerminationPending && (currentThread == null || !currentThread.TerminationRequested)) + { + if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.None) + { + result = KernelResult.InvalidState; + break; + } + + if (currentThread == null || currentThread._forcePauseFlags == ThreadSchedState.None) + { + if (Owner != null && _forcePauseFlags != ThreadSchedState.None) + { + CombineForcePauseFlags(); + } + + SetNewSchedFlags(ThreadSchedState.Running); + + StartHostThread(); + + result = Result.Success; + break; + } + else + { + currentThread.CombineForcePauseFlags(); + + KernelContext.CriticalSection.Leave(); + KernelContext.CriticalSection.Enter(); + + if (currentThread.ShallBeTerminated) + { + break; + } + } + } + } + + KernelContext.CriticalSection.Leave(); + + return result; + } + + public ThreadSchedState PrepareForTermination() + { + KernelContext.CriticalSection.Enter(); + + if (Owner != null && Owner.PinnedThreads[KernelStatic.GetCurrentThread().CurrentCore] == this) + { + Owner.UnpinThread(this); + } + + ThreadSchedState result; + + if (Interlocked.Exchange(ref _shallBeTerminated, 1) == 0) + { + if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.None) + { + SchedFlags = ThreadSchedState.TerminationPending; + } + else + { + if (_forcePauseFlags != ThreadSchedState.None) + { + _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag; + + ThreadSchedState oldSchedFlags = SchedFlags; + + SchedFlags &= ThreadSchedState.LowMask; + + AdjustScheduling(oldSchedFlags); + } + + if (BasePriority >= 0x10) + { + SetPriority(0xF); + } + + if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Running) + { + // TODO: GIC distributor stuffs (sgir changes ect) + Context.RequestInterrupt(); + } + + SignaledObj = null; + ObjSyncResult = KernelResult.ThreadTerminating; + + ReleaseAndResume(); + } + } + + result = SchedFlags; + + KernelContext.CriticalSection.Leave(); + + return result & ThreadSchedState.LowMask; + } + + public void Terminate() + { + ThreadSchedState state = PrepareForTermination(); + + if (state != ThreadSchedState.TerminationPending) + { + KernelContext.Synchronization.WaitFor(new KSynchronizationObject[] { this }, -1, out _); + } + } + + public void HandlePostSyscall() + { + ThreadSchedState state; + + do + { + if (TerminationRequested) + { + Exit(); + + // As the death of the thread is handled by the CPU emulator, we differ from the official kernel and return here. + break; + } + + KernelContext.CriticalSection.Enter(); + + if (TerminationRequested) + { + state = ThreadSchedState.TerminationPending; + } + else + { + if (_forcePauseFlags != ThreadSchedState.None) + { + CombineForcePauseFlags(); + } + + state = ThreadSchedState.Running; + } + + KernelContext.CriticalSection.Leave(); + } while (state == ThreadSchedState.TerminationPending); + } + + public void Exit() + { + // TODO: Debug event. + + if (Owner != null) + { + Owner.ResourceLimit?.Release(LimitableResource.Thread, 0, 1); + + _hasBeenReleased = true; + } + + KernelContext.CriticalSection.Enter(); + + _forcePauseFlags &= ~ThreadSchedState.ForcePauseMask; + _forcePausePermissionFlags = 0; + + bool decRef = ExitImpl(); + + Context.StopRunning(); + + KernelContext.CriticalSection.Leave(); + + if (decRef) + { + DecrementReferenceCount(); + } + } + + private bool ExitImpl() + { + KernelContext.CriticalSection.Enter(); + + SetNewSchedFlags(ThreadSchedState.TerminationPending); + + bool decRef = Interlocked.Exchange(ref _hasExited, 1) == 0; + + Signal(); + + KernelContext.CriticalSection.Leave(); + + return decRef; + } + + private int GetEffectiveRunningCore() + { + for (int coreNumber = 0; coreNumber < KScheduler.CpuCoresCount; coreNumber++) + { + if (KernelContext.Schedulers[coreNumber].CurrentThread == this) + { + return coreNumber; + } + } + + return -1; + } + + public Result Sleep(long timeout) + { + KernelContext.CriticalSection.Enter(); + + if (TerminationRequested) + { + KernelContext.CriticalSection.Leave(); + + return KernelResult.ThreadTerminating; + } + + SetNewSchedFlags(ThreadSchedState.Paused); + + if (timeout > 0) + { + KernelContext.TimeManager.ScheduleFutureInvocation(this, timeout); + } + + KernelContext.CriticalSection.Leave(); + + if (timeout > 0) + { + KernelContext.TimeManager.UnscheduleFutureInvocation(this); + } + + return Result.Success; + } + + public void SetPriority(int priority) + { + KernelContext.CriticalSection.Enter(); + + if (IsPinned) + { + _originalBasePriority = priority; + } + else + { + BasePriority = priority; + } + + UpdatePriorityInheritance(); + + KernelContext.CriticalSection.Leave(); + } + + public void Suspend(ThreadSchedState type) + { + _forcePauseFlags |= type; + + CombineForcePauseFlags(); + } + + public void Resume(ThreadSchedState type) + { + ThreadSchedState oldForcePauseFlags = _forcePauseFlags; + + _forcePauseFlags &= ~type; + + if ((oldForcePauseFlags & ~type) == ThreadSchedState.None) + { + ThreadSchedState oldSchedFlags = SchedFlags; + + SchedFlags &= ThreadSchedState.LowMask; + + AdjustScheduling(oldSchedFlags); + } + } + + public Result SetActivity(bool pause) + { + lock (_activityOperationLock) + { + Result result = Result.Success; + + KernelContext.CriticalSection.Enter(); + + ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask; + + if (lowNibble != ThreadSchedState.Paused && lowNibble != ThreadSchedState.Running) + { + KernelContext.CriticalSection.Leave(); + + return KernelResult.InvalidState; + } + + if (!TerminationRequested) + { + if (pause) + { + // Pause, the force pause flag should be clear (thread is NOT paused). + if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0) + { + Suspend(ThreadSchedState.ThreadPauseFlag); + } + else + { + result = KernelResult.InvalidState; + } + } + else + { + // Unpause, the force pause flag should be set (thread is paused). + if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) != 0) + { + Resume(ThreadSchedState.ThreadPauseFlag); + } + else + { + result = KernelResult.InvalidState; + } + } + } + + KernelContext.CriticalSection.Leave(); + + if (result == Result.Success && pause) + { + bool isThreadRunning = true; + + while (isThreadRunning) + { + KernelContext.CriticalSection.Enter(); + + if (TerminationRequested) + { + KernelContext.CriticalSection.Leave(); + + break; + } + + isThreadRunning = false; + + if (IsPinned) + { + KThread currentThread = KernelStatic.GetCurrentThread(); + + if (currentThread.TerminationRequested) + { + KernelContext.CriticalSection.Leave(); + + result = KernelResult.ThreadTerminating; + + break; + } + + _pinnedWaiters.AddLast(currentThread); + + currentThread.Reschedule(ThreadSchedState.Paused); + } + else + { + isThreadRunning = GetEffectiveRunningCore() >= 0; + } + + KernelContext.CriticalSection.Leave(); + } + } + + return result; + } + } + + public Result GetThreadContext3(out ThreadContext context) + { + context = default; + + lock (_activityOperationLock) + { + KernelContext.CriticalSection.Enter(); + + if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0) + { + KernelContext.CriticalSection.Leave(); + + return KernelResult.InvalidState; + } + + if (!TerminationRequested) + { + context = GetCurrentContext(); + } + + KernelContext.CriticalSection.Leave(); + } + + return Result.Success; + } + + private static uint GetPsr(IExecutionContext context) + { + return context.Pstate & 0xFF0FFE20; + } + + private ThreadContext GetCurrentContext() + { + const int MaxRegistersAArch32 = 15; + const int MaxFpuRegistersAArch32 = 16; + + ThreadContext context = new ThreadContext(); + + if (Owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit)) + { + for (int i = 0; i < context.Registers.Length; i++) + { + context.Registers[i] = Context.GetX(i); + } + + for (int i = 0; i < context.FpuRegisters.Length; i++) + { + context.FpuRegisters[i] = Context.GetV(i); + } + + context.Fp = Context.GetX(29); + context.Lr = Context.GetX(30); + context.Sp = Context.GetX(31); + context.Pc = Context.Pc; + context.Pstate = GetPsr(Context); + context.Tpidr = (ulong)Context.TpidrroEl0; + } + else + { + for (int i = 0; i < MaxRegistersAArch32; i++) + { + context.Registers[i] = (uint)Context.GetX(i); + } + + for (int i = 0; i < MaxFpuRegistersAArch32; i++) + { + context.FpuRegisters[i] = Context.GetV(i); + } + + context.Pc = (uint)Context.Pc; + context.Pstate = GetPsr(Context); + context.Tpidr = (uint)Context.TpidrroEl0; + } + + context.Fpcr = (uint)Context.Fpcr; + context.Fpsr = (uint)Context.Fpsr; + + return context; + } + + public void CancelSynchronization() + { + KernelContext.CriticalSection.Enter(); + + if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.Paused || !WaitingSync) + { + SyncCancelled = true; + } + else if (Withholder != null) + { + Withholder.Remove(WithholderNode); + + SetNewSchedFlags(ThreadSchedState.Running); + + Withholder = null; + + SyncCancelled = true; + } + else + { + SignaledObj = null; + ObjSyncResult = KernelResult.Cancelled; + + SetNewSchedFlags(ThreadSchedState.Running); + + SyncCancelled = false; + } + + KernelContext.CriticalSection.Leave(); + } + + public Result SetCoreAndAffinityMask(int newCore, ulong newAffinityMask) + { + lock (_activityOperationLock) + { + KernelContext.CriticalSection.Enter(); + + bool isCoreMigrationDisabled = _coreMigrationDisableCount != 0; + + // The value -3 is "do not change the preferred core". + if (newCore == -3) + { + newCore = isCoreMigrationDisabled ? _originalPreferredCore : PreferredCore; + + if ((newAffinityMask & (1UL << newCore)) == 0) + { + KernelContext.CriticalSection.Leave(); + + return KernelResult.InvalidCombination; + } + } + + if (isCoreMigrationDisabled) + { + _originalPreferredCore = newCore; + _originalAffinityMask = newAffinityMask; + } + else + { + ulong oldAffinityMask = AffinityMask; + + PreferredCore = newCore; + AffinityMask = newAffinityMask; + + if (oldAffinityMask != newAffinityMask) + { + int oldCore = ActiveCore; + + if (oldCore >= 0 && ((AffinityMask >> oldCore) & 1) == 0) + { + if (PreferredCore < 0) + { + ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount(AffinityMask); + } + else + { + ActiveCore = PreferredCore; + } + } + + AdjustSchedulingForNewAffinity(oldAffinityMask, oldCore); + } + } + + KernelContext.CriticalSection.Leave(); + + bool targetThreadPinned = true; + + while (targetThreadPinned) + { + KernelContext.CriticalSection.Enter(); + + if (TerminationRequested) + { + KernelContext.CriticalSection.Leave(); + + break; + } + + targetThreadPinned = false; + + int coreNumber = GetEffectiveRunningCore(); + bool isPinnedThreadCurrentlyRunning = coreNumber >= 0; + + if (isPinnedThreadCurrentlyRunning && ((1UL << coreNumber) & AffinityMask) == 0) + { + if (IsPinned) + { + KThread currentThread = KernelStatic.GetCurrentThread(); + + if (currentThread.TerminationRequested) + { + KernelContext.CriticalSection.Leave(); + + return KernelResult.ThreadTerminating; + } + + _pinnedWaiters.AddLast(currentThread); + + currentThread.Reschedule(ThreadSchedState.Paused); + } + else + { + targetThreadPinned = true; + } + } + + KernelContext.CriticalSection.Leave(); + } + + return Result.Success; + } + } + + private void CombineForcePauseFlags() + { + ThreadSchedState oldFlags = SchedFlags; + ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask; + + SchedFlags = lowNibble | (_forcePauseFlags & _forcePausePermissionFlags); + + AdjustScheduling(oldFlags); + } + + private void SetNewSchedFlags(ThreadSchedState newFlags) + { + KernelContext.CriticalSection.Enter(); + + ThreadSchedState oldFlags = SchedFlags; + + SchedFlags = (oldFlags & ThreadSchedState.HighMask) | newFlags; + + if ((oldFlags & ThreadSchedState.LowMask) != newFlags) + { + AdjustScheduling(oldFlags); + } + + KernelContext.CriticalSection.Leave(); + } + + public void ReleaseAndResume() + { + KernelContext.CriticalSection.Enter(); + + if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused) + { + if (Withholder != null) + { + Withholder.Remove(WithholderNode); + + SetNewSchedFlags(ThreadSchedState.Running); + + Withholder = null; + } + else + { + SetNewSchedFlags(ThreadSchedState.Running); + } + } + + KernelContext.CriticalSection.Leave(); + } + + public void Reschedule(ThreadSchedState newFlags) + { + KernelContext.CriticalSection.Enter(); + + ThreadSchedState oldFlags = SchedFlags; + + SchedFlags = (oldFlags & ThreadSchedState.HighMask) | + (newFlags & ThreadSchedState.LowMask); + + AdjustScheduling(oldFlags); + + KernelContext.CriticalSection.Leave(); + } + + public void AddMutexWaiter(KThread requester) + { + AddToMutexWaitersList(requester); + + requester.MutexOwner = this; + + UpdatePriorityInheritance(); + } + + public void RemoveMutexWaiter(KThread thread) + { + if (thread._mutexWaiterNode?.List != null) + { + _mutexWaiters.Remove(thread._mutexWaiterNode); + } + + thread.MutexOwner = null; + + UpdatePriorityInheritance(); + } + + public KThread RelinquishMutex(ulong mutexAddress, out int count) + { + count = 0; + + if (_mutexWaiters.First == null) + { + return null; + } + + KThread newMutexOwner = null; + + LinkedListNode<KThread> currentNode = _mutexWaiters.First; + + do + { + // Skip all threads that are not waiting for this mutex. + while (currentNode != null && currentNode.Value.MutexAddress != mutexAddress) + { + currentNode = currentNode.Next; + } + + if (currentNode == null) + { + break; + } + + LinkedListNode<KThread> nextNode = currentNode.Next; + + _mutexWaiters.Remove(currentNode); + + currentNode.Value.MutexOwner = newMutexOwner; + + if (newMutexOwner != null) + { + // New owner was already selected, re-insert on new owner list. + newMutexOwner.AddToMutexWaitersList(currentNode.Value); + } + else + { + // New owner not selected yet, use current thread. + newMutexOwner = currentNode.Value; + } + + count++; + + currentNode = nextNode; + } + while (currentNode != null); + + if (newMutexOwner != null) + { + UpdatePriorityInheritance(); + + newMutexOwner.UpdatePriorityInheritance(); + } + + return newMutexOwner; + } + + private void UpdatePriorityInheritance() + { + // If any of the threads waiting for the mutex has + // higher priority than the current thread, then + // the current thread inherits that priority. + int highestPriority = BasePriority; + + if (_mutexWaiters.First != null) + { + int waitingDynamicPriority = _mutexWaiters.First.Value.DynamicPriority; + + if (waitingDynamicPriority < highestPriority) + { + highestPriority = waitingDynamicPriority; + } + } + + if (highestPriority != DynamicPriority) + { + int oldPriority = DynamicPriority; + + DynamicPriority = highestPriority; + + AdjustSchedulingForNewPriority(oldPriority); + + if (MutexOwner != null) + { + // Remove and re-insert to ensure proper sorting based on new priority. + MutexOwner._mutexWaiters.Remove(_mutexWaiterNode); + + MutexOwner.AddToMutexWaitersList(this); + + MutexOwner.UpdatePriorityInheritance(); + } + } + } + + private void AddToMutexWaitersList(KThread thread) + { + LinkedListNode<KThread> nextPrio = _mutexWaiters.First; + + int currentPriority = thread.DynamicPriority; + + while (nextPrio != null && nextPrio.Value.DynamicPriority <= currentPriority) + { + nextPrio = nextPrio.Next; + } + + if (nextPrio != null) + { + thread._mutexWaiterNode = _mutexWaiters.AddBefore(nextPrio, thread); + } + else + { + thread._mutexWaiterNode = _mutexWaiters.AddLast(thread); + } + } + + private void AdjustScheduling(ThreadSchedState oldFlags) + { + if (oldFlags == SchedFlags) + { + return; + } + + if (!IsSchedulable) + { + if (!_forcedUnschedulable) + { + // Ensure our thread is running and we have an event. + StartHostThread(); + + // If the thread is not schedulable, we want to just run or pause + // it directly as we don't care about priority or the core it is + // running on in this case. + if (SchedFlags == ThreadSchedState.Running) + { + _schedulerWaitEvent.Set(); + } + else + { + _schedulerWaitEvent.Reset(); + } + } + + return; + } + + if (oldFlags == ThreadSchedState.Running) + { + // Was running, now it's stopped. + if (ActiveCore >= 0) + { + KernelContext.PriorityQueue.Unschedule(DynamicPriority, ActiveCore, this); + } + + for (int core = 0; core < KScheduler.CpuCoresCount; core++) + { + if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0) + { + KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this); + } + } + } + else if (SchedFlags == ThreadSchedState.Running) + { + // Was stopped, now it's running. + if (ActiveCore >= 0) + { + KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this); + } + + for (int core = 0; core < KScheduler.CpuCoresCount; core++) + { + if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0) + { + KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this); + } + } + } + + KernelContext.ThreadReselectionRequested = true; + } + + private void AdjustSchedulingForNewPriority(int oldPriority) + { + if (SchedFlags != ThreadSchedState.Running || !IsSchedulable) + { + return; + } + + // Remove thread from the old priority queues. + if (ActiveCore >= 0) + { + KernelContext.PriorityQueue.Unschedule(oldPriority, ActiveCore, this); + } + + for (int core = 0; core < KScheduler.CpuCoresCount; core++) + { + if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0) + { + KernelContext.PriorityQueue.Unsuggest(oldPriority, core, this); + } + } + + // Add thread to the new priority queues. + KThread currentThread = KernelStatic.GetCurrentThread(); + + if (ActiveCore >= 0) + { + if (currentThread == this) + { + KernelContext.PriorityQueue.SchedulePrepend(DynamicPriority, ActiveCore, this); + } + else + { + KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this); + } + } + + for (int core = 0; core < KScheduler.CpuCoresCount; core++) + { + if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0) + { + KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this); + } + } + + KernelContext.ThreadReselectionRequested = true; + } + + private void AdjustSchedulingForNewAffinity(ulong oldAffinityMask, int oldCore) + { + if (SchedFlags != ThreadSchedState.Running || DynamicPriority >= KScheduler.PrioritiesCount || !IsSchedulable) + { + return; + } + + // Remove thread from the old priority queues. + for (int core = 0; core < KScheduler.CpuCoresCount; core++) + { + if (((oldAffinityMask >> core) & 1) != 0) + { + if (core == oldCore) + { + KernelContext.PriorityQueue.Unschedule(DynamicPriority, core, this); + } + else + { + KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this); + } + } + } + + // Add thread to the new priority queues. + for (int core = 0; core < KScheduler.CpuCoresCount; core++) + { + if (((AffinityMask >> core) & 1) != 0) + { + if (core == ActiveCore) + { + KernelContext.PriorityQueue.Schedule(DynamicPriority, core, this); + } + else + { + KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this); + } + } + } + + KernelContext.ThreadReselectionRequested = true; + } + + public void SetEntryArguments(long argsPtr, int threadHandle) + { + Context.SetX(0, (ulong)argsPtr); + Context.SetX(1, (ulong)threadHandle); + } + + public void TimeUp() + { + ReleaseAndResume(); + } + + public string GetGuestStackTrace() + { + return Owner.Debugger.GetGuestStackTrace(this); + } + + public string GetGuestRegisterPrintout() + { + return Owner.Debugger.GetCpuRegisterPrintout(this); + } + + public void PrintGuestStackTrace() + { + Logger.Info?.Print(LogClass.Cpu, $"Guest stack trace:\n{GetGuestStackTrace()}\n"); + } + + public void PrintGuestRegisterPrintout() + { + Logger.Info?.Print(LogClass.Cpu, $"Guest CPU registers:\n{GetGuestRegisterPrintout()}\n"); + } + + public void AddCpuTime(long ticks) + { + Interlocked.Add(ref _totalTimeRunning, ticks); + } + + public void StartHostThread() + { + if (_schedulerWaitEvent == null) + { + var schedulerWaitEvent = new ManualResetEvent(false); + + if (Interlocked.Exchange(ref _schedulerWaitEvent, schedulerWaitEvent) == null) + { + HostThread.Start(); + } + else + { + schedulerWaitEvent.Dispose(); + } + } + } + + private void ThreadStart() + { + _schedulerWaitEvent.WaitOne(); + KernelStatic.SetKernelContext(KernelContext, this); + + if (_customThreadStart != null) + { + _customThreadStart(); + + // Ensure that anything trying to join the HLE thread is unblocked. + Exit(); + HandlePostSyscall(); + } + else + { + Owner.Context.Execute(Context, _entrypoint); + } + + Context.Dispose(); + _schedulerWaitEvent.Dispose(); + } + + public void MakeUnschedulable() + { + _forcedUnschedulable = true; + } + + public override bool IsSignaled() + { + return _hasExited != 0; + } + + protected override void Destroy() + { + if (_hasBeenInitialized) + { + FreeResources(); + + bool released = Owner != null || _hasBeenReleased; + + if (Owner != null) + { + Owner.ResourceLimit?.Release(LimitableResource.Thread, 1, released ? 0 : 1); + + Owner.DecrementReferenceCount(); + } + else + { + KernelContext.ResourceLimit.Release(LimitableResource.Thread, 1, released ? 0 : 1); + } + } + } + + private void FreeResources() + { + Owner?.RemoveThread(this); + + if (_tlsAddress != 0 && Owner.FreeThreadLocalStorage(_tlsAddress) != Result.Success) + { + throw new InvalidOperationException("Unexpected failure freeing thread local storage."); + } + + KernelContext.CriticalSection.Enter(); + + // Wake up all threads that may be waiting for a mutex being held by this thread. + foreach (KThread thread in _mutexWaiters) + { + thread.MutexOwner = null; + thread._originalPreferredCore = 0; + thread.ObjSyncResult = KernelResult.InvalidState; + + thread.ReleaseAndResume(); + } + + KernelContext.CriticalSection.Leave(); + + Owner?.DecrementThreadCountAndTerminateIfZero(); + } + + public void Pin() + { + IsPinned = true; + _coreMigrationDisableCount++; + + int activeCore = ActiveCore; + + _originalPreferredCore = PreferredCore; + _originalAffinityMask = AffinityMask; + + ActiveCore = CurrentCore; + PreferredCore = CurrentCore; + AffinityMask = 1UL << CurrentCore; + + if (activeCore != CurrentCore || _originalAffinityMask != AffinityMask) + { + AdjustSchedulingForNewAffinity(_originalAffinityMask, activeCore); + } + + _originalBasePriority = BasePriority; + BasePriority = Math.Min(_originalBasePriority, BitOperations.TrailingZeroCount(Owner.Capabilities.AllowedThreadPriosMask) - 1); + UpdatePriorityInheritance(); + + // Disallows thread pausing + _forcePausePermissionFlags &= ~ThreadSchedState.ThreadPauseFlag; + CombineForcePauseFlags(); + + // TODO: Assign reduced SVC permissions + } + + public void Unpin() + { + IsPinned = false; + _coreMigrationDisableCount--; + + ulong affinityMask = AffinityMask; + int activeCore = ActiveCore; + + PreferredCore = _originalPreferredCore; + AffinityMask = _originalAffinityMask; + + if (AffinityMask != affinityMask) + { + if ((AffinityMask & 1UL << ActiveCore) != 0) + { + if (PreferredCore >= 0) + { + ActiveCore = PreferredCore; + } + else + { + ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount((ulong)AffinityMask); + } + + AdjustSchedulingForNewAffinity(affinityMask, activeCore); + } + } + + BasePriority = _originalBasePriority; + UpdatePriorityInheritance(); + + if (!TerminationRequested) + { + // Allows thread pausing + _forcePausePermissionFlags |= ThreadSchedState.ThreadPauseFlag; + CombineForcePauseFlags(); + + // TODO: Restore SVC permissions + } + + // Wake up waiters + foreach (KThread waiter in _pinnedWaiters) + { + waiter.ReleaseAndResume(); + } + + _pinnedWaiters.Clear(); + } + + public void SynchronizePreemptionState() + { + KernelContext.CriticalSection.Enter(); + + if (Owner != null && Owner.PinnedThreads[CurrentCore] == this) + { + ClearUserInterruptFlag(); + + Owner.UnpinThread(this); + } + + KernelContext.CriticalSection.Leave(); + } + + public ushort GetUserDisableCount() + { + return Owner.CpuMemory.Read<ushort>(_tlsAddress + TlsUserDisableCountOffset); + } + + public void SetUserInterruptFlag() + { + Owner.CpuMemory.Write<ushort>(_tlsAddress + TlsUserInterruptFlagOffset, 1); + } + + public void ClearUserInterruptFlag() + { + Owner.CpuMemory.Write<ushort>(_tlsAddress + TlsUserInterruptFlagOffset, 0); + } + } +}
\ No newline at end of file diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KThreadContext.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KThreadContext.cs new file mode 100644 index 00000000..e8ad53c2 --- /dev/null +++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KThreadContext.cs @@ -0,0 +1,33 @@ +using Ryujinx.Cpu; +using Ryujinx.Horizon.Common; +using System.Threading; + +namespace Ryujinx.HLE.HOS.Kernel.Threading +{ + class KThreadContext : IThreadContext + { + private readonly IExecutionContext _context; + + public bool Running => _context.Running; + public ulong TlsAddress => (ulong)_context.TpidrroEl0; + + public ulong GetX(int index) => _context.GetX(index); + + private int _locked; + + public KThreadContext(IExecutionContext context) + { + _context = context; + } + + public bool Lock() + { + return Interlocked.Exchange(ref _locked, 1) == 0; + } + + public void Unlock() + { + Interlocked.Exchange(ref _locked, 0); + } + } +} diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KWritableEvent.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KWritableEvent.cs new file mode 100644 index 00000000..b46122be --- /dev/null +++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KWritableEvent.cs @@ -0,0 +1,25 @@ +using Ryujinx.HLE.HOS.Kernel.Common; +using Ryujinx.Horizon.Common; + +namespace Ryujinx.HLE.HOS.Kernel.Threading +{ + class KWritableEvent : KAutoObject + { + private readonly KEvent _parent; + + public KWritableEvent(KernelContext context, KEvent parent) : base(context) + { + _parent = parent; + } + + public void Signal() + { + _parent.ReadableEvent.Signal(); + } + + public Result Clear() + { + return _parent.ReadableEvent.Clear(); + } + } +}
\ No newline at end of file diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/SignalType.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/SignalType.cs new file mode 100644 index 00000000..e72b719b --- /dev/null +++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/SignalType.cs @@ -0,0 +1,9 @@ +namespace Ryujinx.HLE.HOS.Kernel.Threading +{ + enum SignalType + { + Signal = 0, + SignalAndIncrementIfEqual = 1, + SignalAndModifyIfEqual = 2 + } +} diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/ThreadSchedState.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/ThreadSchedState.cs new file mode 100644 index 00000000..9577075c --- /dev/null +++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/ThreadSchedState.cs @@ -0,0 +1,20 @@ +namespace Ryujinx.HLE.HOS.Kernel.Threading +{ + enum ThreadSchedState : ushort + { + LowMask = 0xf, + HighMask = 0xfff0, + ForcePauseMask = 0x1f0, + + ProcessPauseFlag = 1 << 4, + ThreadPauseFlag = 1 << 5, + ProcessDebugPauseFlag = 1 << 6, + BacktracePauseFlag = 1 << 7, + KernelInitPauseFlag = 1 << 8, + + None = 0, + Paused = 1, + Running = 2, + TerminationPending = 3 + } +}
\ No newline at end of file diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/ThreadType.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/ThreadType.cs new file mode 100644 index 00000000..0b44b57f --- /dev/null +++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/ThreadType.cs @@ -0,0 +1,10 @@ +namespace Ryujinx.HLE.HOS.Kernel.Threading +{ + enum ThreadType + { + Dummy, + Kernel, + Kernel2, + User + } +}
\ No newline at end of file |
