aboutsummaryrefslogtreecommitdiff
path: root/src/Ryujinx.HLE/HOS/Kernel
diff options
context:
space:
mode:
authorTSR Berry <20988865+TSRBerry@users.noreply.github.com>2023-04-08 01:22:00 +0200
committerMary <thog@protonmail.com>2023-04-27 23:51:14 +0200
commitcee712105850ac3385cd0091a923438167433f9f (patch)
tree4a5274b21d8b7f938c0d0ce18736d3f2993b11b1 /src/Ryujinx.HLE/HOS/Kernel
parentcd124bda587ef09668a971fa1cac1c3f0cfc9f21 (diff)
Move solution and projects to src
Diffstat (limited to 'src/Ryujinx.HLE/HOS/Kernel')
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Common/IKFutureSchedulerObject.cs7
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Common/KAutoObject.cs73
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Common/KResourceLimit.cs188
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Common/KSynchronizationObject.cs35
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Common/KSystemControl.cs78
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Common/KTimeManager.cs218
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Common/KernelInit.cs89
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Common/KernelTransfer.cs73
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Common/LimitableResource.cs13
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Common/MemoryArrange.cs12
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Common/MemroySize.cs9
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Common/MersenneTwister.cs128
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Ipc/ChannelState.cs10
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Ipc/KBufferDescriptor.cs20
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Ipc/KBufferDescriptorTable.cs217
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Ipc/KClientPort.cs144
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Ipc/KClientSession.cs84
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Ipc/KLightClientSession.cs14
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Ipc/KLightServerSession.cs14
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Ipc/KLightSession.cs16
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Ipc/KPort.cs72
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Ipc/KServerPort.cs87
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Ipc/KServerSession.cs1246
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Ipc/KSession.cs54
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Ipc/KSessionRequest.cs33
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/KernelConstants.cs20
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/KernelContext.cs160
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/KernelStatic.cs73
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/AddressSpaceType.cs10
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/DramMemoryMap.cs18
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KCodeMemory.cs169
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs156
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockManager.cs288
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockSlabManager.cs19
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryInfo.cs36
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryManager.cs65
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs242
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KPageBitmap.cs298
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KPageHeap.cs283
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs97
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KPageNode.cs14
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs229
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs3043
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KScopedPageList.cs27
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs75
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KSlabHeap.cs50
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KTransferMemory.cs130
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryAttribute.cs22
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryFillValue.cs10
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryPermission.cs20
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryRegion.cs10
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryState.cs50
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs49
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/CapabilityExtensions.cs22
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/CapabilityType.cs19
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/HleProcessDebugger.cs465
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/IProcessContext.cs15
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/IProcessContextFactory.cs9
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/KContextIdManager.cs83
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/KHandleEntry.cs19
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/KHandleTable.cs285
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/KProcess.cs1196
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/KProcessCapabilities.cs328
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/KTlsPageInfo.cs77
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/KTlsPageManager.cs61
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/ProcessContext.cs34
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/ProcessContextFactory.cs12
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/ProcessCreationFlags.cs41
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/ProcessCreationInfo.cs37
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/ProcessExecutionContext.cs46
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/ProcessState.cs14
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Process/ProcessTamperInfo.cs24
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/CodeMemoryOperation.cs10
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/InfoType.cs34
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/MemoryInfo.cs37
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/PointerSizedAttribute.cs9
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SvcAttribute.cs15
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SvcImplAttribute.cs9
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall.cs3010
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SyscallHandler.cs44
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/ThreadContext.cs22
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Threading/ArbitrationType.cs9
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Threading/KAddressArbiter.cs581
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Threading/KConditionVariable.cs70
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Threading/KCriticalSection.cs64
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Threading/KEvent.cs14
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Threading/KPriorityQueue.cs286
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Threading/KReadableEvent.cs65
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Threading/KScheduler.cs661
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Threading/KSynchronization.cs142
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Threading/KThread.cs1438
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Threading/KThreadContext.cs33
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Threading/KWritableEvent.cs25
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Threading/SignalType.cs9
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Threading/ThreadSchedState.cs20
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Threading/ThreadType.cs10
96 files changed, 18001 insertions, 0 deletions
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Common/IKFutureSchedulerObject.cs b/src/Ryujinx.HLE/HOS/Kernel/Common/IKFutureSchedulerObject.cs
new file mode 100644
index 00000000..473683ff
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Common/IKFutureSchedulerObject.cs
@@ -0,0 +1,7 @@
+namespace Ryujinx.HLE.HOS.Kernel.Common
+{
+ interface IKFutureSchedulerObject
+ {
+ void TimeUp();
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Common/KAutoObject.cs b/src/Ryujinx.HLE/HOS/Kernel/Common/KAutoObject.cs
new file mode 100644
index 00000000..424bf788
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Common/KAutoObject.cs
@@ -0,0 +1,73 @@
+using Ryujinx.Horizon.Common;
+using System.Diagnostics;
+using System.Threading;
+
+namespace Ryujinx.HLE.HOS.Kernel.Common
+{
+ class KAutoObject
+ {
+ protected KernelContext KernelContext;
+
+ private int _referenceCount;
+
+ public KAutoObject(KernelContext context)
+ {
+ KernelContext = context;
+
+ _referenceCount = 1;
+ }
+
+ public virtual Result SetName(string name)
+ {
+ if (!KernelContext.AutoObjectNames.TryAdd(name, this))
+ {
+ return KernelResult.InvalidState;
+ }
+
+ return Result.Success;
+ }
+
+ public static Result RemoveName(KernelContext context, string name)
+ {
+ if (!context.AutoObjectNames.TryRemove(name, out _))
+ {
+ return KernelResult.NotFound;
+ }
+
+ return Result.Success;
+ }
+
+ public static KAutoObject FindNamedObject(KernelContext context, string name)
+ {
+ if (context.AutoObjectNames.TryGetValue(name, out KAutoObject obj))
+ {
+ return obj;
+ }
+
+ return null;
+ }
+
+ public void IncrementReferenceCount()
+ {
+ int newRefCount = Interlocked.Increment(ref _referenceCount);
+
+ Debug.Assert(newRefCount >= 2);
+ }
+
+ public void DecrementReferenceCount()
+ {
+ int newRefCount = Interlocked.Decrement(ref _referenceCount);
+
+ Debug.Assert(newRefCount >= 0);
+
+ if (newRefCount == 0)
+ {
+ Destroy();
+ }
+ }
+
+ protected virtual void Destroy()
+ {
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Common/KResourceLimit.cs b/src/Ryujinx.HLE/HOS/Kernel/Common/KResourceLimit.cs
new file mode 100644
index 00000000..b1a602f1
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Common/KResourceLimit.cs
@@ -0,0 +1,188 @@
+using Ryujinx.Common;
+using Ryujinx.HLE.HOS.Kernel.Threading;
+using Ryujinx.Horizon.Common;
+using System.Collections.Generic;
+
+namespace Ryujinx.HLE.HOS.Kernel.Common
+{
+ class KResourceLimit : KAutoObject
+ {
+ private const int DefaultTimeoutMs = 10000; // 10s
+
+ private readonly long[] _current;
+ private readonly long[] _limit;
+ private readonly long[] _current2;
+ private readonly long[] _peak;
+
+ private readonly object _lock;
+
+ private readonly LinkedList<KThread> _waitingThreads;
+
+ private int _waitingThreadsCount;
+
+ public KResourceLimit(KernelContext context) : base(context)
+ {
+ _current = new long[(int)LimitableResource.Count];
+ _limit = new long[(int)LimitableResource.Count];
+ _current2 = new long[(int)LimitableResource.Count];
+ _peak = new long[(int)LimitableResource.Count];
+
+ _lock = new object();
+
+ _waitingThreads = new LinkedList<KThread>();
+ }
+
+ public bool Reserve(LimitableResource resource, ulong amount)
+ {
+ return Reserve(resource, (long)amount);
+ }
+
+ public bool Reserve(LimitableResource resource, long amount)
+ {
+ return Reserve(resource, amount, KTimeManager.ConvertMillisecondsToNanoseconds(DefaultTimeoutMs));
+ }
+
+ public bool Reserve(LimitableResource resource, long amount, long timeout)
+ {
+ long endTimePoint = KTimeManager.ConvertNanosecondsToMilliseconds(timeout);
+
+ endTimePoint += PerformanceCounter.ElapsedMilliseconds;
+
+ bool success = false;
+
+ int index = GetIndex(resource);
+
+ lock (_lock)
+ {
+ if (_current2[index] >= _limit[index])
+ {
+ return false;
+ }
+
+ long newCurrent = _current[index] + amount;
+
+ while (newCurrent > _limit[index] && _current2[index] + amount <= _limit[index])
+ {
+ _waitingThreadsCount++;
+
+ KConditionVariable.Wait(KernelContext, _waitingThreads, _lock, timeout);
+
+ _waitingThreadsCount--;
+
+ newCurrent = _current[index] + amount;
+
+ if (timeout >= 0 && PerformanceCounter.ElapsedMilliseconds > endTimePoint)
+ {
+ break;
+ }
+ }
+
+ if (newCurrent <= _limit[index])
+ {
+ _current[index] = newCurrent;
+ _current2[index] += amount;
+
+ if (_current[index] > _peak[index])
+ {
+ _peak[index] = _current[index];
+ }
+
+ success = true;
+ }
+ }
+
+ return success;
+ }
+
+ public void Release(LimitableResource resource, ulong amount)
+ {
+ Release(resource, (long)amount);
+ }
+
+ public void Release(LimitableResource resource, long amount)
+ {
+ Release(resource, amount, amount);
+ }
+
+ public void Release(LimitableResource resource, long amount, long amount2)
+ {
+ int index = GetIndex(resource);
+
+ lock (_lock)
+ {
+ _current[index] -= amount;
+ _current2[index] -= amount2;
+
+ if (_waitingThreadsCount > 0)
+ {
+ KConditionVariable.NotifyAll(KernelContext, _waitingThreads);
+ }
+ }
+ }
+
+ public long GetRemainingValue(LimitableResource resource)
+ {
+ int index = GetIndex(resource);
+
+ lock (_lock)
+ {
+ return _limit[index] - _current[index];
+ }
+ }
+
+ public long GetCurrentValue(LimitableResource resource)
+ {
+ int index = GetIndex(resource);
+
+ lock (_lock)
+ {
+ return _current[index];
+ }
+ }
+
+ public long GetLimitValue(LimitableResource resource)
+ {
+ int index = GetIndex(resource);
+
+ lock (_lock)
+ {
+ return _limit[index];
+ }
+ }
+
+ public long GetPeakValue(LimitableResource resource)
+ {
+ int index = GetIndex(resource);
+
+ lock (_lock)
+ {
+ return _peak[index];
+ }
+ }
+
+ public Result SetLimitValue(LimitableResource resource, long limit)
+ {
+ int index = GetIndex(resource);
+
+ lock (_lock)
+ {
+ if (_current[index] <= limit)
+ {
+ _limit[index] = limit;
+ _peak[index] = _current[index];
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidState;
+ }
+ }
+ }
+
+ private static int GetIndex(LimitableResource resource)
+ {
+ return (int)resource;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Common/KSynchronizationObject.cs b/src/Ryujinx.HLE/HOS/Kernel/Common/KSynchronizationObject.cs
new file mode 100644
index 00000000..ddc0069d
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Common/KSynchronizationObject.cs
@@ -0,0 +1,35 @@
+using Ryujinx.HLE.HOS.Kernel.Threading;
+using System.Collections.Generic;
+
+namespace Ryujinx.HLE.HOS.Kernel.Common
+{
+ class KSynchronizationObject : KAutoObject
+ {
+ public LinkedList<KThread> WaitingThreads { get; }
+
+ public KSynchronizationObject(KernelContext context) : base(context)
+ {
+ WaitingThreads = new LinkedList<KThread>();
+ }
+
+ public LinkedListNode<KThread> AddWaitingThread(KThread thread)
+ {
+ return WaitingThreads.AddLast(thread);
+ }
+
+ public void RemoveWaitingThread(LinkedListNode<KThread> node)
+ {
+ WaitingThreads.Remove(node);
+ }
+
+ public virtual void Signal()
+ {
+ KernelContext.Synchronization.SignalObject(this);
+ }
+
+ public virtual bool IsSignaled()
+ {
+ return false;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Common/KSystemControl.cs b/src/Ryujinx.HLE/HOS/Kernel/Common/KSystemControl.cs
new file mode 100644
index 00000000..8a727c30
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Common/KSystemControl.cs
@@ -0,0 +1,78 @@
+using Ryujinx.HLE.HOS.Kernel.Memory;
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Common
+{
+ static class KSystemControl
+ {
+ private const ulong KiB = 1024;
+ private const ulong MiB = 1024 * KiB;
+ private const ulong GiB = 1024 * MiB;
+
+ private const ulong PageSize = 4 * KiB;
+
+ private const ulong RequiredNonSecureSystemPoolSizeVi = 0x2238 * PageSize;
+ private const ulong RequiredNonSecureSystemPoolSizeNvservices = 0x710 * PageSize;
+ private const ulong RequiredNonSecureSystemPoolSizeOther = 0x80 * PageSize;
+
+ private const ulong RequiredNonSecureSystemPoolSize =
+ RequiredNonSecureSystemPoolSizeVi +
+ RequiredNonSecureSystemPoolSizeNvservices +
+ RequiredNonSecureSystemPoolSizeOther;
+
+ public static ulong GetApplicationPoolSize(MemoryArrange arrange)
+ {
+ return arrange switch
+ {
+ MemoryArrange.MemoryArrange4GiB or
+ MemoryArrange.MemoryArrange4GiBSystemDev or
+ MemoryArrange.MemoryArrange6GiBAppletDev => 3285 * MiB,
+ MemoryArrange.MemoryArrange4GiBAppletDev => 2048 * MiB,
+ MemoryArrange.MemoryArrange6GiB or
+ MemoryArrange.MemoryArrange8GiB => 4916 * MiB,
+ _ => throw new ArgumentException($"Invalid memory arrange \"{arrange}\".")
+ };
+ }
+
+ public static ulong GetAppletPoolSize(MemoryArrange arrange)
+ {
+ return arrange switch
+ {
+ MemoryArrange.MemoryArrange4GiB => 507 * MiB,
+ MemoryArrange.MemoryArrange4GiBAppletDev => 1554 * MiB,
+ MemoryArrange.MemoryArrange4GiBSystemDev => 448 * MiB,
+ MemoryArrange.MemoryArrange6GiB => 562 * MiB,
+ MemoryArrange.MemoryArrange6GiBAppletDev or
+ MemoryArrange.MemoryArrange8GiB => 2193 * MiB,
+ _ => throw new ArgumentException($"Invalid memory arrange \"{arrange}\".")
+ };
+ }
+
+ public static ulong GetMinimumNonSecureSystemPoolSize()
+ {
+ return RequiredNonSecureSystemPoolSize;
+ }
+
+ public static ulong GetDramEndAddress(MemorySize size)
+ {
+ return DramMemoryMap.DramBase + GetDramSize(size);
+ }
+
+ public static ulong GenerateRandom()
+ {
+ // TODO
+ return 0;
+ }
+
+ public static ulong GetDramSize(MemorySize size)
+ {
+ return size switch
+ {
+ MemorySize.MemorySize4GiB => 4 * GiB,
+ MemorySize.MemorySize6GiB => 6 * GiB,
+ MemorySize.MemorySize8GiB => 8 * GiB,
+ _ => throw new ArgumentException($"Invalid memory size \"{size}\".")
+ };
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Common/KTimeManager.cs b/src/Ryujinx.HLE/HOS/Kernel/Common/KTimeManager.cs
new file mode 100644
index 00000000..c0cd9ce9
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Common/KTimeManager.cs
@@ -0,0 +1,218 @@
+using Ryujinx.Common;
+using System;
+using System.Collections.Generic;
+using System.Threading;
+
+namespace Ryujinx.HLE.HOS.Kernel.Common
+{
+ class KTimeManager : IDisposable
+ {
+ public static readonly long DefaultTimeIncrementNanoseconds = ConvertGuestTicksToNanoseconds(2);
+
+ private class WaitingObject
+ {
+ public IKFutureSchedulerObject Object { get; }
+ public long TimePoint { get; }
+
+ public WaitingObject(IKFutureSchedulerObject schedulerObj, long timePoint)
+ {
+ Object = schedulerObj;
+ TimePoint = timePoint;
+ }
+ }
+
+ private readonly KernelContext _context;
+ private readonly List<WaitingObject> _waitingObjects;
+ private AutoResetEvent _waitEvent;
+ private bool _keepRunning;
+ private long _enforceWakeupFromSpinWait;
+
+ private const long NanosecondsPerSecond = 1000000000L;
+ private const long NanosecondsPerMillisecond = 1000000L;
+
+ public KTimeManager(KernelContext context)
+ {
+ _context = context;
+ _waitingObjects = new List<WaitingObject>();
+ _keepRunning = true;
+
+ Thread work = new Thread(WaitAndCheckScheduledObjects)
+ {
+ Name = "HLE.TimeManager"
+ };
+
+ work.Start();
+ }
+
+ public void ScheduleFutureInvocation(IKFutureSchedulerObject schedulerObj, long timeout)
+ {
+ long startTime = PerformanceCounter.ElapsedTicks;
+ long timePoint = startTime + ConvertNanosecondsToHostTicks(timeout);
+
+ if (timePoint < startTime)
+ {
+ timePoint = long.MaxValue;
+ }
+
+ lock (_context.CriticalSection.Lock)
+ {
+ _waitingObjects.Add(new WaitingObject(schedulerObj, timePoint));
+
+ if (timeout < NanosecondsPerMillisecond)
+ {
+ Interlocked.Exchange(ref _enforceWakeupFromSpinWait, 1);
+ }
+ }
+
+ _waitEvent.Set();
+ }
+
+ public void UnscheduleFutureInvocation(IKFutureSchedulerObject schedulerObj)
+ {
+ lock (_context.CriticalSection.Lock)
+ {
+ for (int index = _waitingObjects.Count - 1; index >= 0; index--)
+ {
+ if (_waitingObjects[index].Object == schedulerObj)
+ {
+ _waitingObjects.RemoveAt(index);
+ }
+ }
+ }
+ }
+
+ private void WaitAndCheckScheduledObjects()
+ {
+ SpinWait spinWait = new SpinWait();
+ WaitingObject next;
+
+ using (_waitEvent = new AutoResetEvent(false))
+ {
+ while (_keepRunning)
+ {
+ lock (_context.CriticalSection.Lock)
+ {
+ Interlocked.Exchange(ref _enforceWakeupFromSpinWait, 0);
+
+ next = GetNextWaitingObject();
+ }
+
+ if (next != null)
+ {
+ long timePoint = PerformanceCounter.ElapsedTicks;
+
+ if (next.TimePoint > timePoint)
+ {
+ long ms = Math.Min((next.TimePoint - timePoint) / PerformanceCounter.TicksPerMillisecond, int.MaxValue);
+
+ if (ms > 0)
+ {
+ _waitEvent.WaitOne((int)ms);
+ }
+ else
+ {
+ while (Interlocked.Read(ref _enforceWakeupFromSpinWait) != 1 && PerformanceCounter.ElapsedTicks < next.TimePoint)
+ {
+ // Our time is close - don't let SpinWait go off and potentially Thread.Sleep().
+ if (spinWait.NextSpinWillYield)
+ {
+ Thread.Yield();
+
+ spinWait.Reset();
+ }
+ else
+ {
+ spinWait.SpinOnce();
+ }
+ }
+
+ spinWait.Reset();
+ }
+ }
+
+ bool timeUp = PerformanceCounter.ElapsedTicks >= next.TimePoint;
+
+ if (timeUp)
+ {
+ lock (_context.CriticalSection.Lock)
+ {
+ if (_waitingObjects.Remove(next))
+ {
+ next.Object.TimeUp();
+ }
+ }
+ }
+ }
+ else
+ {
+ _waitEvent.WaitOne();
+ }
+ }
+ }
+ }
+
+ private WaitingObject GetNextWaitingObject()
+ {
+ WaitingObject selected = null;
+
+ long lowestTimePoint = long.MaxValue;
+
+ for (int index = _waitingObjects.Count - 1; index >= 0; index--)
+ {
+ WaitingObject current = _waitingObjects[index];
+
+ if (current.TimePoint <= lowestTimePoint)
+ {
+ selected = current;
+ lowestTimePoint = current.TimePoint;
+ }
+ }
+
+ return selected;
+ }
+
+ public static long ConvertNanosecondsToMilliseconds(long time)
+ {
+ time /= NanosecondsPerMillisecond;
+
+ if ((ulong)time > int.MaxValue)
+ {
+ return int.MaxValue;
+ }
+
+ return time;
+ }
+
+ public static long ConvertMillisecondsToNanoseconds(long time)
+ {
+ return time * NanosecondsPerMillisecond;
+ }
+
+ public static long ConvertNanosecondsToHostTicks(long ns)
+ {
+ long nsDiv = ns / NanosecondsPerSecond;
+ long nsMod = ns % NanosecondsPerSecond;
+ long tickDiv = PerformanceCounter.TicksPerSecond / NanosecondsPerSecond;
+ long tickMod = PerformanceCounter.TicksPerSecond % NanosecondsPerSecond;
+
+ long baseTicks = (nsMod * tickMod + PerformanceCounter.TicksPerSecond - 1) / NanosecondsPerSecond;
+ return (nsDiv * tickDiv) * NanosecondsPerSecond + nsDiv * tickMod + nsMod * tickDiv + baseTicks;
+ }
+
+ public static long ConvertGuestTicksToNanoseconds(long ticks)
+ {
+ return (long)Math.Ceiling(ticks * (1000000000.0 / 19200000.0));
+ }
+
+ public static long ConvertHostTicksToTicks(long time)
+ {
+ return (long)((time / (double)PerformanceCounter.TicksPerSecond) * 19200000.0);
+ }
+
+ public void Dispose()
+ {
+ _keepRunning = false;
+ _waitEvent?.Set();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Common/KernelInit.cs b/src/Ryujinx.HLE/HOS/Kernel/Common/KernelInit.cs
new file mode 100644
index 00000000..efa2a480
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Common/KernelInit.cs
@@ -0,0 +1,89 @@
+using Ryujinx.HLE.HOS.Kernel.Memory;
+using Ryujinx.Horizon.Common;
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Common
+{
+ static class KernelInit
+ {
+ private readonly struct MemoryRegion
+ {
+ public ulong Address { get; }
+ public ulong Size { get; }
+
+ public ulong EndAddress => Address + Size;
+
+ public MemoryRegion(ulong address, ulong size)
+ {
+ Address = address;
+ Size = size;
+ }
+ }
+
+ public static void InitializeResourceLimit(KResourceLimit resourceLimit, MemorySize size)
+ {
+ void EnsureSuccess(Result result)
+ {
+ if (result != Result.Success)
+ {
+ throw new InvalidOperationException($"Unexpected result \"{result}\".");
+ }
+ }
+
+ ulong ramSize = KSystemControl.GetDramSize(size);
+
+ EnsureSuccess(resourceLimit.SetLimitValue(LimitableResource.Memory, (long)ramSize));
+ EnsureSuccess(resourceLimit.SetLimitValue(LimitableResource.Thread, 800));
+ EnsureSuccess(resourceLimit.SetLimitValue(LimitableResource.Event, 700));
+ EnsureSuccess(resourceLimit.SetLimitValue(LimitableResource.TransferMemory, 200));
+ EnsureSuccess(resourceLimit.SetLimitValue(LimitableResource.Session, 900));
+
+ if (!resourceLimit.Reserve(LimitableResource.Memory, 0) ||
+ !resourceLimit.Reserve(LimitableResource.Memory, 0x60000))
+ {
+ throw new InvalidOperationException("Unexpected failure reserving memory on resource limit.");
+ }
+ }
+
+ public static KMemoryRegionManager[] GetMemoryRegions(MemorySize size, MemoryArrange arrange)
+ {
+ ulong poolEnd = KSystemControl.GetDramEndAddress(size);
+ ulong applicationPoolSize = KSystemControl.GetApplicationPoolSize(arrange);
+ ulong appletPoolSize = KSystemControl.GetAppletPoolSize(arrange);
+
+ MemoryRegion servicePool;
+ MemoryRegion nvServicesPool;
+ MemoryRegion appletPool;
+ MemoryRegion applicationPool;
+
+ ulong nvServicesPoolSize = KSystemControl.GetMinimumNonSecureSystemPoolSize();
+
+ applicationPool = new MemoryRegion(poolEnd - applicationPoolSize, applicationPoolSize);
+
+ ulong nvServicesPoolEnd = applicationPool.Address - appletPoolSize;
+
+ nvServicesPool = new MemoryRegion(nvServicesPoolEnd - nvServicesPoolSize, nvServicesPoolSize);
+ appletPool = new MemoryRegion(nvServicesPoolEnd, appletPoolSize);
+
+ // Note: There is an extra region used by the kernel, however
+ // since we are doing HLE we are not going to use that memory, so give all
+ // the remaining memory space to services.
+ ulong servicePoolSize = nvServicesPool.Address - DramMemoryMap.SlabHeapEnd;
+
+ servicePool = new MemoryRegion(DramMemoryMap.SlabHeapEnd, servicePoolSize);
+
+ return new KMemoryRegionManager[]
+ {
+ GetMemoryRegion(applicationPool),
+ GetMemoryRegion(appletPool),
+ GetMemoryRegion(servicePool),
+ GetMemoryRegion(nvServicesPool)
+ };
+ }
+
+ private static KMemoryRegionManager GetMemoryRegion(MemoryRegion region)
+ {
+ return new KMemoryRegionManager(region.Address, region.Size, region.EndAddress);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Common/KernelTransfer.cs b/src/Ryujinx.HLE/HOS/Kernel/Common/KernelTransfer.cs
new file mode 100644
index 00000000..cbc276c5
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Common/KernelTransfer.cs
@@ -0,0 +1,73 @@
+using Ryujinx.Cpu;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using System;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.HLE.HOS.Kernel.Common
+{
+ static class KernelTransfer
+ {
+ public static bool UserToKernel<T>(out T value, ulong address) where T : unmanaged
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (currentProcess.CpuMemory.IsRangeMapped(address, (ulong)Unsafe.SizeOf<T>()))
+ {
+ value = currentProcess.CpuMemory.Read<T>(address);
+
+ return true;
+ }
+
+ value = default;
+
+ return false;
+ }
+
+ public static bool UserToKernelArray<T>(ulong address, Span<T> values) where T : unmanaged
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ Span<byte> data = MemoryMarshal.Cast<T, byte>(values);
+
+ if (currentProcess.CpuMemory.IsRangeMapped(address, (ulong)data.Length))
+ {
+ currentProcess.CpuMemory.Read(address, data);
+
+ return true;
+ }
+
+ return false;
+ }
+
+ public static bool UserToKernelString(out string value, ulong address, uint size)
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (currentProcess.CpuMemory.IsRangeMapped(address, size))
+ {
+ value = MemoryHelper.ReadAsciiString(currentProcess.CpuMemory, address, size);
+
+ return true;
+ }
+
+ value = null;
+
+ return false;
+ }
+
+ public static bool KernelToUser<T>(ulong address, T value) where T: unmanaged
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (currentProcess.CpuMemory.IsRangeMapped(address, (ulong)Unsafe.SizeOf<T>()))
+ {
+ currentProcess.CpuMemory.Write(address, value);
+
+ return true;
+ }
+
+ return false;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Common/LimitableResource.cs b/src/Ryujinx.HLE/HOS/Kernel/Common/LimitableResource.cs
new file mode 100644
index 00000000..2e6a3e45
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Common/LimitableResource.cs
@@ -0,0 +1,13 @@
+namespace Ryujinx.HLE.HOS.Kernel.Common
+{
+ enum LimitableResource : byte
+ {
+ Memory = 0,
+ Thread = 1,
+ Event = 2,
+ TransferMemory = 3,
+ Session = 4,
+
+ Count = 5
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Common/MemoryArrange.cs b/src/Ryujinx.HLE/HOS/Kernel/Common/MemoryArrange.cs
new file mode 100644
index 00000000..d2bcfd62
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Common/MemoryArrange.cs
@@ -0,0 +1,12 @@
+namespace Ryujinx.HLE.HOS.Kernel.Common
+{
+ enum MemoryArrange : byte
+ {
+ MemoryArrange4GiB,
+ MemoryArrange4GiBAppletDev,
+ MemoryArrange4GiBSystemDev,
+ MemoryArrange6GiB,
+ MemoryArrange6GiBAppletDev,
+ MemoryArrange8GiB
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Common/MemroySize.cs b/src/Ryujinx.HLE/HOS/Kernel/Common/MemroySize.cs
new file mode 100644
index 00000000..159385b6
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Common/MemroySize.cs
@@ -0,0 +1,9 @@
+namespace Ryujinx.HLE.HOS.Kernel.Common
+{
+ enum MemorySize : byte
+ {
+ MemorySize4GiB = 0,
+ MemorySize6GiB = 1,
+ MemorySize8GiB = 2
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Common/MersenneTwister.cs b/src/Ryujinx.HLE/HOS/Kernel/Common/MersenneTwister.cs
new file mode 100644
index 00000000..4c99f425
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Common/MersenneTwister.cs
@@ -0,0 +1,128 @@
+using System.Numerics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Common
+{
+ class MersenneTwister
+ {
+ private int _index;
+ private uint[] _mt;
+
+ public MersenneTwister(uint seed)
+ {
+ _mt = new uint[624];
+
+ _mt[0] = seed;
+
+ for (int mtIdx = 1; mtIdx < _mt.Length; mtIdx++)
+ {
+ uint prev = _mt[mtIdx - 1];
+
+ _mt[mtIdx] = (uint)(0x6c078965 * (prev ^ (prev >> 30)) + mtIdx);
+ }
+
+ _index = _mt.Length;
+ }
+
+ public long GenRandomNumber(long min, long max)
+ {
+ long range = max - min;
+
+ if (min == max)
+ {
+ return min;
+ }
+
+ if (range == -1)
+ {
+ // Increment would cause a overflow, special case.
+ return GenRandomNumber(2, 2, 32, 0xffffffffu, 0xffffffffu);
+ }
+
+ range++;
+
+ // This is log2(Range) plus one.
+ int nextRangeLog2 = 64 - BitOperations.LeadingZeroCount((ulong)range);
+
+ // If Range is already power of 2, subtract one to use log2(Range) directly.
+ int rangeLog2 = nextRangeLog2 - (BitOperations.IsPow2(range) ? 1 : 0);
+
+ int parts = rangeLog2 > 32 ? 2 : 1;
+ int bitsPerPart = rangeLog2 / parts;
+
+ int fullParts = parts - (rangeLog2 - parts * bitsPerPart);
+
+ uint mask = 0xffffffffu >> (32 - bitsPerPart);
+ uint maskPlus1 = 0xffffffffu >> (31 - bitsPerPart);
+
+ long randomNumber;
+
+ do
+ {
+ randomNumber = GenRandomNumber(parts, fullParts, bitsPerPart, mask, maskPlus1);
+ }
+ while ((ulong)randomNumber >= (ulong)range);
+
+ return min + randomNumber;
+ }
+
+ private long GenRandomNumber(
+ int parts,
+ int fullParts,
+ int bitsPerPart,
+ uint mask,
+ uint maskPlus1)
+ {
+ long randomNumber = 0;
+
+ int part = 0;
+
+ for (; part < fullParts; part++)
+ {
+ randomNumber <<= bitsPerPart;
+ randomNumber |= GenRandomNumber() & mask;
+ }
+
+ for (; part < parts; part++)
+ {
+ randomNumber <<= bitsPerPart + 1;
+ randomNumber |= GenRandomNumber() & maskPlus1;
+ }
+
+ return randomNumber;
+ }
+
+ private uint GenRandomNumber()
+ {
+ if (_index >= _mt.Length)
+ {
+ Twist();
+ }
+
+ uint value = _mt[_index++];
+
+ value ^= value >> 11;
+ value ^= (value << 7) & 0x9d2c5680;
+ value ^= (value << 15) & 0xefc60000;
+ value ^= value >> 18;
+
+ return value;
+ }
+
+ private void Twist()
+ {
+ for (int mtIdx = 0; mtIdx < _mt.Length; mtIdx++)
+ {
+ uint value = (_mt[mtIdx] & 0x80000000) + (_mt[(mtIdx + 1) % _mt.Length] & 0x7fffffff);
+
+ _mt[mtIdx] = _mt[(mtIdx + 397) % _mt.Length] ^ (value >> 1);
+
+ if ((value & 1) != 0)
+ {
+ _mt[mtIdx] ^= 0x9908b0df;
+ }
+ }
+
+ _index = 0;
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Ipc/ChannelState.cs b/src/Ryujinx.HLE/HOS/Kernel/Ipc/ChannelState.cs
new file mode 100644
index 00000000..4827384e
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Ipc/ChannelState.cs
@@ -0,0 +1,10 @@
+namespace Ryujinx.HLE.HOS.Kernel.Ipc
+{
+ enum ChannelState
+ {
+ NotInitialized,
+ Open,
+ ClientDisconnected,
+ ServerDisconnected
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Ipc/KBufferDescriptor.cs b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KBufferDescriptor.cs
new file mode 100644
index 00000000..e28244d4
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KBufferDescriptor.cs
@@ -0,0 +1,20 @@
+using Ryujinx.HLE.HOS.Kernel.Memory;
+
+namespace Ryujinx.HLE.HOS.Kernel.Ipc
+{
+ class KBufferDescriptor
+ {
+ public ulong ClientAddress { get; }
+ public ulong ServerAddress { get; }
+ public ulong Size { get; }
+ public MemoryState State { get; }
+
+ public KBufferDescriptor(ulong src, ulong dst, ulong size, MemoryState state)
+ {
+ ClientAddress = src;
+ ServerAddress = dst;
+ Size = size;
+ State = state;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Ipc/KBufferDescriptorTable.cs b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KBufferDescriptorTable.cs
new file mode 100644
index 00000000..593d2c9d
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KBufferDescriptorTable.cs
@@ -0,0 +1,217 @@
+using Ryujinx.Common;
+using Ryujinx.HLE.HOS.Kernel.Memory;
+using Ryujinx.Horizon.Common;
+using System.Collections.Generic;
+
+namespace Ryujinx.HLE.HOS.Kernel.Ipc
+{
+ class KBufferDescriptorTable
+ {
+ private const int MaxInternalBuffersCount = 8;
+
+ private List<KBufferDescriptor> _sendBufferDescriptors;
+ private List<KBufferDescriptor> _receiveBufferDescriptors;
+ private List<KBufferDescriptor> _exchangeBufferDescriptors;
+
+ public KBufferDescriptorTable()
+ {
+ _sendBufferDescriptors = new List<KBufferDescriptor>(MaxInternalBuffersCount);
+ _receiveBufferDescriptors = new List<KBufferDescriptor>(MaxInternalBuffersCount);
+ _exchangeBufferDescriptors = new List<KBufferDescriptor>(MaxInternalBuffersCount);
+ }
+
+ public Result AddSendBuffer(ulong src, ulong dst, ulong size, MemoryState state)
+ {
+ return Add(_sendBufferDescriptors, src, dst, size, state);
+ }
+
+ public Result AddReceiveBuffer(ulong src, ulong dst, ulong size, MemoryState state)
+ {
+ return Add(_receiveBufferDescriptors, src, dst, size, state);
+ }
+
+ public Result AddExchangeBuffer(ulong src, ulong dst, ulong size, MemoryState state)
+ {
+ return Add(_exchangeBufferDescriptors, src, dst, size, state);
+ }
+
+ private Result Add(List<KBufferDescriptor> list, ulong src, ulong dst, ulong size, MemoryState state)
+ {
+ if (list.Count < MaxInternalBuffersCount)
+ {
+ list.Add(new KBufferDescriptor(src, dst, size, state));
+
+ return Result.Success;
+ }
+
+ return KernelResult.OutOfMemory;
+ }
+
+ public Result CopyBuffersToClient(KPageTableBase memoryManager)
+ {
+ Result result = CopyToClient(memoryManager, _receiveBufferDescriptors);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ return CopyToClient(memoryManager, _exchangeBufferDescriptors);
+ }
+
+ private Result CopyToClient(KPageTableBase memoryManager, List<KBufferDescriptor> list)
+ {
+ foreach (KBufferDescriptor desc in list)
+ {
+ MemoryState stateMask;
+
+ switch (desc.State)
+ {
+ case MemoryState.IpcBuffer0: stateMask = MemoryState.IpcSendAllowedType0; break;
+ case MemoryState.IpcBuffer1: stateMask = MemoryState.IpcSendAllowedType1; break;
+ case MemoryState.IpcBuffer3: stateMask = MemoryState.IpcSendAllowedType3; break;
+
+ default: return KernelResult.InvalidCombination;
+ }
+
+ MemoryAttribute attributeMask = MemoryAttribute.Borrowed | MemoryAttribute.Uncached;
+
+ if (desc.State == MemoryState.IpcBuffer0)
+ {
+ attributeMask |= MemoryAttribute.DeviceMapped;
+ }
+
+ ulong clientAddrTruncated = BitUtils.AlignDown<ulong>(desc.ClientAddress, KPageTableBase.PageSize);
+ ulong clientAddrRounded = BitUtils.AlignUp<ulong>(desc.ClientAddress, KPageTableBase.PageSize);
+
+ // Check if address is not aligned, in this case we need to perform 2 copies.
+ if (clientAddrTruncated != clientAddrRounded)
+ {
+ ulong copySize = clientAddrRounded - desc.ClientAddress;
+
+ if (copySize > desc.Size)
+ {
+ copySize = desc.Size;
+ }
+
+ Result result = memoryManager.CopyDataFromCurrentProcess(
+ desc.ClientAddress,
+ copySize,
+ stateMask,
+ stateMask,
+ KMemoryPermission.ReadAndWrite,
+ attributeMask,
+ MemoryAttribute.None,
+ desc.ServerAddress);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+ }
+
+ ulong clientEndAddr = desc.ClientAddress + desc.Size;
+ ulong serverEndAddr = desc.ServerAddress + desc.Size;
+
+ ulong clientEndAddrTruncated = BitUtils.AlignDown<ulong>(clientEndAddr, (ulong)KPageTableBase.PageSize);
+ ulong clientEndAddrRounded = BitUtils.AlignUp<ulong>(clientEndAddr, KPageTableBase.PageSize);
+ ulong serverEndAddrTruncated = BitUtils.AlignDown<ulong>(serverEndAddr, (ulong)KPageTableBase.PageSize);
+
+ if (clientEndAddrTruncated < clientEndAddrRounded &&
+ (clientAddrTruncated == clientAddrRounded || clientAddrTruncated < clientEndAddrTruncated))
+ {
+ Result result = memoryManager.CopyDataFromCurrentProcess(
+ clientEndAddrTruncated,
+ clientEndAddr - clientEndAddrTruncated,
+ stateMask,
+ stateMask,
+ KMemoryPermission.ReadAndWrite,
+ attributeMask,
+ MemoryAttribute.None,
+ serverEndAddrTruncated);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+ }
+ }
+
+ return Result.Success;
+ }
+
+ public Result UnmapServerBuffers(KPageTableBase memoryManager)
+ {
+ Result result = UnmapServer(memoryManager, _sendBufferDescriptors);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ result = UnmapServer(memoryManager, _receiveBufferDescriptors);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ return UnmapServer(memoryManager, _exchangeBufferDescriptors);
+ }
+
+ private Result UnmapServer(KPageTableBase memoryManager, List<KBufferDescriptor> list)
+ {
+ foreach (KBufferDescriptor descriptor in list)
+ {
+ Result result = memoryManager.UnmapNoAttributeIfStateEquals(
+ descriptor.ServerAddress,
+ descriptor.Size,
+ descriptor.State);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+ }
+
+ return Result.Success;
+ }
+
+ public Result RestoreClientBuffers(KPageTableBase memoryManager)
+ {
+ Result result = RestoreClient(memoryManager, _sendBufferDescriptors);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ result = RestoreClient(memoryManager, _receiveBufferDescriptors);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ return RestoreClient(memoryManager, _exchangeBufferDescriptors);
+ }
+
+ private Result RestoreClient(KPageTableBase memoryManager, List<KBufferDescriptor> list)
+ {
+ foreach (KBufferDescriptor descriptor in list)
+ {
+ Result result = memoryManager.UnmapIpcRestorePermission(
+ descriptor.ClientAddress,
+ descriptor.Size,
+ descriptor.State);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+ }
+
+ return Result.Success;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Ipc/KClientPort.cs b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KClientPort.cs
new file mode 100644
index 00000000..eb7c5a41
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KClientPort.cs
@@ -0,0 +1,144 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.Horizon.Common;
+using System.Threading;
+
+namespace Ryujinx.HLE.HOS.Kernel.Ipc
+{
+ class KClientPort : KSynchronizationObject
+ {
+ private int _sessionsCount;
+ private readonly int _maxSessions;
+
+ private readonly KPort _parent;
+
+ public bool IsLight => _parent.IsLight;
+
+ public KClientPort(KernelContext context, KPort parent, int maxSessions) : base(context)
+ {
+ _maxSessions = maxSessions;
+ _parent = parent;
+ }
+
+ public Result Connect(out KClientSession clientSession)
+ {
+ clientSession = null;
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (currentProcess.ResourceLimit != null &&
+ !currentProcess.ResourceLimit.Reserve(LimitableResource.Session, 1))
+ {
+ return KernelResult.ResLimitExceeded;
+ }
+
+ if (!IncrementSessionsCount())
+ {
+ currentProcess.ResourceLimit?.Release(LimitableResource.Session, 1);
+
+ return KernelResult.SessionCountExceeded;
+ }
+
+ KSession session = new KSession(KernelContext, this);
+
+ Result result = _parent.EnqueueIncomingSession(session.ServerSession);
+
+ if (result != Result.Success)
+ {
+ session.ClientSession.DecrementReferenceCount();
+ session.ServerSession.DecrementReferenceCount();
+
+ return result;
+ }
+
+ clientSession = session.ClientSession;
+
+ return result;
+ }
+
+ public Result ConnectLight(out KLightClientSession clientSession)
+ {
+ clientSession = null;
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (currentProcess.ResourceLimit != null &&
+ !currentProcess.ResourceLimit.Reserve(LimitableResource.Session, 1))
+ {
+ return KernelResult.ResLimitExceeded;
+ }
+
+ if (!IncrementSessionsCount())
+ {
+ currentProcess.ResourceLimit?.Release(LimitableResource.Session, 1);
+
+ return KernelResult.SessionCountExceeded;
+ }
+
+ KLightSession session = new KLightSession(KernelContext);
+
+ Result result = _parent.EnqueueIncomingLightSession(session.ServerSession);
+
+ if (result != Result.Success)
+ {
+ session.ClientSession.DecrementReferenceCount();
+ session.ServerSession.DecrementReferenceCount();
+
+ return result;
+ }
+
+ clientSession = session.ClientSession;
+
+ return result;
+ }
+
+ private bool IncrementSessionsCount()
+ {
+ while (true)
+ {
+ int currentCount = _sessionsCount;
+
+ if (currentCount < _maxSessions)
+ {
+ if (Interlocked.CompareExchange(ref _sessionsCount, currentCount + 1, currentCount) == currentCount)
+ {
+ return true;
+ }
+ }
+ else
+ {
+ return false;
+ }
+ }
+ }
+
+ public void Disconnect()
+ {
+ KernelContext.CriticalSection.Enter();
+
+ SignalIfMaximumReached(Interlocked.Decrement(ref _sessionsCount));
+
+ KernelContext.CriticalSection.Leave();
+ }
+
+ private void SignalIfMaximumReached(int value)
+ {
+ if (value == _maxSessions)
+ {
+ Signal();
+ }
+ }
+
+ public new static Result RemoveName(KernelContext context, string name)
+ {
+ KAutoObject foundObj = FindNamedObject(context, name);
+
+ if (!(foundObj is KClientPort))
+ {
+ return KernelResult.NotFound;
+ }
+
+ return KAutoObject.RemoveName(context, name);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Ipc/KClientSession.cs b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KClientSession.cs
new file mode 100644
index 00000000..a24bcc31
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KClientSession.cs
@@ -0,0 +1,84 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.HLE.HOS.Kernel.Threading;
+using Ryujinx.Horizon.Common;
+
+namespace Ryujinx.HLE.HOS.Kernel.Ipc
+{
+ class KClientSession : KSynchronizationObject
+ {
+ public KProcess CreatorProcess { get; }
+
+ private KSession _parent;
+
+ public ChannelState State { get; set; }
+
+ public KClientPort ParentPort { get; }
+
+ public KClientSession(KernelContext context, KSession parent, KClientPort parentPort) : base(context)
+ {
+ _parent = parent;
+ ParentPort = parentPort;
+
+ parentPort?.IncrementReferenceCount();
+
+ State = ChannelState.Open;
+
+ CreatorProcess = KernelStatic.GetCurrentProcess();
+ CreatorProcess.IncrementReferenceCount();
+ }
+
+ public Result SendSyncRequest(ulong customCmdBuffAddr = 0, ulong customCmdBuffSize = 0)
+ {
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ KSessionRequest request = new KSessionRequest(currentThread, customCmdBuffAddr, customCmdBuffSize);
+
+ KernelContext.CriticalSection.Enter();
+
+ currentThread.SignaledObj = null;
+ currentThread.ObjSyncResult = Result.Success;
+
+ Result result = _parent.ServerSession.EnqueueRequest(request);
+
+ KernelContext.CriticalSection.Leave();
+
+ if (result == Result.Success)
+ {
+ result = currentThread.ObjSyncResult;
+ }
+
+ return result;
+ }
+
+ public Result SendAsyncRequest(KWritableEvent asyncEvent, ulong customCmdBuffAddr = 0, ulong customCmdBuffSize = 0)
+ {
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ KSessionRequest request = new KSessionRequest(currentThread, customCmdBuffAddr, customCmdBuffSize, asyncEvent);
+
+ KernelContext.CriticalSection.Enter();
+
+ Result result = _parent.ServerSession.EnqueueRequest(request);
+
+ KernelContext.CriticalSection.Leave();
+
+ return result;
+ }
+
+ public void DisconnectFromPort()
+ {
+ if (ParentPort != null)
+ {
+ ParentPort.Disconnect();
+ ParentPort.DecrementReferenceCount();
+ }
+ }
+
+ protected override void Destroy()
+ {
+ _parent.DisconnectClient();
+ _parent.DecrementReferenceCount();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Ipc/KLightClientSession.cs b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KLightClientSession.cs
new file mode 100644
index 00000000..27a9732b
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KLightClientSession.cs
@@ -0,0 +1,14 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+
+namespace Ryujinx.HLE.HOS.Kernel.Ipc
+{
+ class KLightClientSession : KAutoObject
+ {
+ private readonly KLightSession _parent;
+
+ public KLightClientSession(KernelContext context, KLightSession parent) : base(context)
+ {
+ _parent = parent;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Ipc/KLightServerSession.cs b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KLightServerSession.cs
new file mode 100644
index 00000000..0edbba6c
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KLightServerSession.cs
@@ -0,0 +1,14 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+
+namespace Ryujinx.HLE.HOS.Kernel.Ipc
+{
+ class KLightServerSession : KAutoObject
+ {
+ private readonly KLightSession _parent;
+
+ public KLightServerSession(KernelContext context, KLightSession parent) : base(context)
+ {
+ _parent = parent;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Ipc/KLightSession.cs b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KLightSession.cs
new file mode 100644
index 00000000..3abb1ab0
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KLightSession.cs
@@ -0,0 +1,16 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+
+namespace Ryujinx.HLE.HOS.Kernel.Ipc
+{
+ class KLightSession : KAutoObject
+ {
+ public KLightServerSession ServerSession { get; }
+ public KLightClientSession ClientSession { get; }
+
+ public KLightSession(KernelContext context) : base(context)
+ {
+ ServerSession = new KLightServerSession(context, this);
+ ClientSession = new KLightClientSession(context, this);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Ipc/KPort.cs b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KPort.cs
new file mode 100644
index 00000000..93f0f34c
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KPort.cs
@@ -0,0 +1,72 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.Horizon.Common;
+
+namespace Ryujinx.HLE.HOS.Kernel.Ipc
+{
+ class KPort : KAutoObject
+ {
+ public KServerPort ServerPort { get; }
+ public KClientPort ClientPort { get; }
+
+ private string _name;
+
+ private ChannelState _state;
+
+ public bool IsLight { get; private set; }
+
+ public KPort(KernelContext context, int maxSessions, bool isLight, string name) : base(context)
+ {
+ ServerPort = new KServerPort(context, this);
+ ClientPort = new KClientPort(context, this, maxSessions);
+
+ IsLight = isLight;
+ _name = name;
+
+ _state = ChannelState.Open;
+ }
+
+ public Result EnqueueIncomingSession(KServerSession session)
+ {
+ Result result;
+
+ KernelContext.CriticalSection.Enter();
+
+ if (_state == ChannelState.Open)
+ {
+ ServerPort.EnqueueIncomingSession(session);
+
+ result = Result.Success;
+ }
+ else
+ {
+ result = KernelResult.PortClosed;
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ return result;
+ }
+
+ public Result EnqueueIncomingLightSession(KLightServerSession session)
+ {
+ Result result;
+
+ KernelContext.CriticalSection.Enter();
+
+ if (_state == ChannelState.Open)
+ {
+ ServerPort.EnqueueIncomingLightSession(session);
+
+ result = Result.Success;
+ }
+ else
+ {
+ result = KernelResult.PortClosed;
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ return result;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Ipc/KServerPort.cs b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KServerPort.cs
new file mode 100644
index 00000000..21a3919c
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KServerPort.cs
@@ -0,0 +1,87 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+using System.Collections.Generic;
+
+namespace Ryujinx.HLE.HOS.Kernel.Ipc
+{
+ class KServerPort : KSynchronizationObject
+ {
+ private readonly LinkedList<KServerSession> _incomingConnections;
+ private readonly LinkedList<KLightServerSession> _lightIncomingConnections;
+
+ private readonly KPort _parent;
+
+ public bool IsLight => _parent.IsLight;
+
+ public KServerPort(KernelContext context, KPort parent) : base(context)
+ {
+ _parent = parent;
+
+ _incomingConnections = new LinkedList<KServerSession>();
+ _lightIncomingConnections = new LinkedList<KLightServerSession>();
+ }
+
+ public void EnqueueIncomingSession(KServerSession session)
+ {
+ AcceptIncomingConnection(_incomingConnections, session);
+ }
+
+ public void EnqueueIncomingLightSession(KLightServerSession session)
+ {
+ AcceptIncomingConnection(_lightIncomingConnections, session);
+ }
+
+ private void AcceptIncomingConnection<T>(LinkedList<T> list, T session)
+ {
+ KernelContext.CriticalSection.Enter();
+
+ list.AddLast(session);
+
+ if (list.Count == 1)
+ {
+ Signal();
+ }
+
+ KernelContext.CriticalSection.Leave();
+ }
+
+ public KServerSession AcceptIncomingConnection()
+ {
+ return AcceptIncomingConnection(_incomingConnections);
+ }
+
+ public KLightServerSession AcceptIncomingLightConnection()
+ {
+ return AcceptIncomingConnection(_lightIncomingConnections);
+ }
+
+ private T AcceptIncomingConnection<T>(LinkedList<T> list)
+ {
+ T session = default;
+
+ KernelContext.CriticalSection.Enter();
+
+ if (list.Count != 0)
+ {
+ session = list.First.Value;
+
+ list.RemoveFirst();
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ return session;
+ }
+
+ public override bool IsSignaled()
+ {
+ if (_parent.IsLight)
+ {
+ return _lightIncomingConnections.Count != 0;
+ }
+ else
+ {
+ return _incomingConnections.Count != 0;
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Ipc/KServerSession.cs b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KServerSession.cs
new file mode 100644
index 00000000..86469c03
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KServerSession.cs
@@ -0,0 +1,1246 @@
+using Ryujinx.Common;
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Memory;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.HLE.HOS.Kernel.Threading;
+using Ryujinx.Horizon.Common;
+using System.Collections.Generic;
+
+namespace Ryujinx.HLE.HOS.Kernel.Ipc
+{
+ class KServerSession : KSynchronizationObject
+ {
+ private static readonly MemoryState[] IpcMemoryStates = new MemoryState[]
+ {
+ MemoryState.IpcBuffer3,
+ MemoryState.IpcBuffer0,
+ MemoryState.IpcBuffer1,
+ (MemoryState)0xfffce5d4 //This is invalid, shouldn't be accessed.
+ };
+
+ private readonly struct Message
+ {
+ public ulong Address { get; }
+ public ulong Size { get; }
+ public bool IsCustom { get; }
+
+ public Message(KThread thread, ulong customCmdBuffAddress, ulong customCmdBuffSize)
+ {
+ IsCustom = customCmdBuffAddress != 0;
+
+ if (IsCustom)
+ {
+ Address = customCmdBuffAddress;
+ Size = customCmdBuffSize;
+ }
+ else
+ {
+ Address = thread.TlsAddress;
+ Size = 0x100;
+ }
+ }
+
+ public Message(KSessionRequest request) : this(
+ request.ClientThread,
+ request.CustomCmdBuffAddr,
+ request.CustomCmdBuffSize) { }
+ }
+
+ private readonly struct MessageHeader
+ {
+ public uint Word0 { get; }
+ public uint Word1 { get; }
+ public uint Word2 { get; }
+
+ public uint PointerBuffersCount { get; }
+ public uint SendBuffersCount { get; }
+ public uint ReceiveBuffersCount { get; }
+ public uint ExchangeBuffersCount { get; }
+
+ public uint RawDataSizeInWords { get; }
+
+ public uint ReceiveListType { get; }
+
+ public uint MessageSizeInWords { get; }
+ public uint ReceiveListOffsetInWords { get; }
+ public uint ReceiveListOffset { get; }
+
+ public bool HasHandles { get; }
+
+ public bool HasPid { get; }
+
+ public uint CopyHandlesCount { get; }
+ public uint MoveHandlesCount { get; }
+
+ public MessageHeader(uint word0, uint word1, uint word2)
+ {
+ Word0 = word0;
+ Word1 = word1;
+ Word2 = word2;
+
+ HasHandles = word1 >> 31 != 0;
+
+ uint handleDescSizeInWords = 0;
+
+ if (HasHandles)
+ {
+ uint pidSize = (word2 & 1) * 8;
+
+ HasPid = pidSize != 0;
+
+ CopyHandlesCount = (word2 >> 1) & 0xf;
+ MoveHandlesCount = (word2 >> 5) & 0xf;
+
+ handleDescSizeInWords = (pidSize + CopyHandlesCount * 4 + MoveHandlesCount * 4) / 4;
+ }
+ else
+ {
+ HasPid = false;
+
+ CopyHandlesCount = 0;
+ MoveHandlesCount = 0;
+ }
+
+ PointerBuffersCount = (word0 >> 16) & 0xf;
+ SendBuffersCount = (word0 >> 20) & 0xf;
+ ReceiveBuffersCount = (word0 >> 24) & 0xf;
+ ExchangeBuffersCount = word0 >> 28;
+
+ uint pointerDescSizeInWords = PointerBuffersCount * 2;
+ uint sendDescSizeInWords = SendBuffersCount * 3;
+ uint receiveDescSizeInWords = ReceiveBuffersCount * 3;
+ uint exchangeDescSizeInWords = ExchangeBuffersCount * 3;
+
+ RawDataSizeInWords = word1 & 0x3ff;
+
+ ReceiveListType = (word1 >> 10) & 0xf;
+
+ ReceiveListOffsetInWords = (word1 >> 20) & 0x7ff;
+
+ uint paddingSizeInWords = HasHandles ? 3u : 2u;
+
+ MessageSizeInWords = pointerDescSizeInWords +
+ sendDescSizeInWords +
+ receiveDescSizeInWords +
+ exchangeDescSizeInWords +
+ RawDataSizeInWords +
+ paddingSizeInWords +
+ handleDescSizeInWords;
+
+ if (ReceiveListOffsetInWords == 0)
+ {
+ ReceiveListOffsetInWords = MessageSizeInWords;
+ }
+
+ ReceiveListOffset = ReceiveListOffsetInWords * 4;
+ }
+ }
+
+ private struct PointerBufferDesc
+ {
+ public uint ReceiveIndex { get; }
+
+ public uint BufferSize { get; }
+ public ulong BufferAddress { get; set; }
+
+ public PointerBufferDesc(ulong dword)
+ {
+ ReceiveIndex = (uint)dword & 0xf;
+ BufferSize = (uint)dword >> 16;
+
+ BufferAddress = (dword >> 2) & 0x70;
+ BufferAddress |= (dword >> 12) & 0xf;
+
+ BufferAddress = (BufferAddress << 32) | (dword >> 32);
+ }
+
+ public ulong Pack()
+ {
+ ulong dword = (ReceiveIndex & 0xf) | ((BufferSize & 0xffff) << 16);
+
+ dword |= BufferAddress << 32;
+ dword |= (BufferAddress >> 20) & 0xf000;
+ dword |= (BufferAddress >> 30) & 0xffc0;
+
+ return dword;
+ }
+ }
+
+ private KSession _parent;
+
+ private LinkedList<KSessionRequest> _requests;
+
+ private KSessionRequest _activeRequest;
+
+ public KServerSession(KernelContext context, KSession parent) : base(context)
+ {
+ _parent = parent;
+
+ _requests = new LinkedList<KSessionRequest>();
+ }
+
+ public Result EnqueueRequest(KSessionRequest request)
+ {
+ if (_parent.ClientSession.State != ChannelState.Open)
+ {
+ return KernelResult.PortRemoteClosed;
+ }
+
+ if (request.AsyncEvent == null)
+ {
+ if (request.ClientThread.TerminationRequested)
+ {
+ return KernelResult.ThreadTerminating;
+ }
+
+ request.ClientThread.Reschedule(ThreadSchedState.Paused);
+ }
+
+ _requests.AddLast(request);
+
+ if (_requests.Count == 1)
+ {
+ Signal();
+ }
+
+ return Result.Success;
+ }
+
+ public Result Receive(ulong customCmdBuffAddr = 0, ulong customCmdBuffSize = 0)
+ {
+ KThread serverThread = KernelStatic.GetCurrentThread();
+ KProcess serverProcess = serverThread.Owner;
+
+ KernelContext.CriticalSection.Enter();
+
+ if (_parent.ClientSession.State != ChannelState.Open)
+ {
+ KernelContext.CriticalSection.Leave();
+
+ return KernelResult.PortRemoteClosed;
+ }
+
+ if (_activeRequest != null || !DequeueRequest(out KSessionRequest request))
+ {
+ KernelContext.CriticalSection.Leave();
+
+ return KernelResult.NotFound;
+ }
+
+ if (request.ClientThread == null)
+ {
+ KernelContext.CriticalSection.Leave();
+
+ return KernelResult.PortRemoteClosed;
+ }
+
+ KThread clientThread = request.ClientThread;
+ KProcess clientProcess = clientThread.Owner;
+
+ KernelContext.CriticalSection.Leave();
+
+ _activeRequest = request;
+
+ request.ServerProcess = serverProcess;
+
+ Message clientMsg = new Message(request);
+ Message serverMsg = new Message(serverThread, customCmdBuffAddr, customCmdBuffSize);
+
+ MessageHeader clientHeader = GetClientMessageHeader(clientProcess, clientMsg);
+ MessageHeader serverHeader = GetServerMessageHeader(serverMsg);
+
+ Result serverResult = KernelResult.NotFound;
+ Result clientResult = Result.Success;
+
+ void CleanUpForError()
+ {
+ if (request.BufferDescriptorTable.UnmapServerBuffers(serverProcess.MemoryManager) == Result.Success)
+ {
+ request.BufferDescriptorTable.RestoreClientBuffers(clientProcess.MemoryManager);
+ }
+
+ CloseAllHandles(serverMsg, clientHeader, serverProcess);
+
+ KernelContext.CriticalSection.Enter();
+
+ _activeRequest = null;
+
+ if (_requests.Count != 0)
+ {
+ Signal();
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ WakeClientThread(request, clientResult);
+ }
+
+ if (clientHeader.ReceiveListType < 2 &&
+ clientHeader.ReceiveListOffset > clientMsg.Size)
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidCombination;
+ }
+ else if (clientHeader.ReceiveListType == 2 &&
+ clientHeader.ReceiveListOffset + 8 > clientMsg.Size)
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidCombination;
+ }
+ else if (clientHeader.ReceiveListType > 2 &&
+ clientHeader.ReceiveListType * 8 - 0x10 + clientHeader.ReceiveListOffset > clientMsg.Size)
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidCombination;
+ }
+
+ if (clientHeader.ReceiveListOffsetInWords < clientHeader.MessageSizeInWords)
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidCombination;
+ }
+
+ if (clientHeader.MessageSizeInWords * 4 > clientMsg.Size)
+ {
+ CleanUpForError();
+
+ return KernelResult.CmdBufferTooSmall;
+ }
+
+ ulong[] receiveList = GetReceiveList(
+ serverProcess,
+ serverMsg,
+ serverHeader.ReceiveListType,
+ serverHeader.ReceiveListOffset);
+
+ serverProcess.CpuMemory.Write(serverMsg.Address + 0, clientHeader.Word0);
+ serverProcess.CpuMemory.Write(serverMsg.Address + 4, clientHeader.Word1);
+
+ uint offset;
+
+ // Copy handles.
+ if (clientHeader.HasHandles)
+ {
+ if (clientHeader.MoveHandlesCount != 0)
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidCombination;
+ }
+
+ serverProcess.CpuMemory.Write(serverMsg.Address + 8, clientHeader.Word2);
+
+ offset = 3;
+
+ if (clientHeader.HasPid)
+ {
+ serverProcess.CpuMemory.Write(serverMsg.Address + offset * 4, clientProcess.Pid);
+
+ offset += 2;
+ }
+
+ for (int index = 0; index < clientHeader.CopyHandlesCount; index++)
+ {
+ int newHandle = 0;
+ int handle = clientProcess.CpuMemory.Read<int>(clientMsg.Address + offset * 4);
+
+ if (clientResult == Result.Success && handle != 0)
+ {
+ clientResult = GetCopyObjectHandle(clientThread, serverProcess, handle, out newHandle);
+ }
+
+ serverProcess.CpuMemory.Write(serverMsg.Address + offset * 4, newHandle);
+
+ offset++;
+ }
+
+ for (int index = 0; index < clientHeader.MoveHandlesCount; index++)
+ {
+ int newHandle = 0;
+ int handle = clientProcess.CpuMemory.Read<int>(clientMsg.Address + offset * 4);
+
+ if (handle != 0)
+ {
+ if (clientResult == Result.Success)
+ {
+ clientResult = GetMoveObjectHandle(clientProcess, serverProcess, handle, out newHandle);
+ }
+ else
+ {
+ clientProcess.HandleTable.CloseHandle(handle);
+ }
+ }
+
+ serverProcess.CpuMemory.Write(serverMsg.Address + offset * 4, newHandle);
+
+ offset++;
+ }
+
+ if (clientResult != Result.Success)
+ {
+ CleanUpForError();
+
+ return serverResult;
+ }
+ }
+ else
+ {
+ offset = 2;
+ }
+
+ // Copy pointer/receive list buffers.
+ uint recvListDstOffset = 0;
+
+ for (int index = 0; index < clientHeader.PointerBuffersCount; index++)
+ {
+ ulong pointerDesc = clientProcess.CpuMemory.Read<ulong>(clientMsg.Address + offset * 4);
+
+ PointerBufferDesc descriptor = new PointerBufferDesc(pointerDesc);
+
+ if (descriptor.BufferSize != 0)
+ {
+ clientResult = GetReceiveListAddress(
+ descriptor,
+ serverMsg,
+ serverHeader.ReceiveListType,
+ clientHeader.MessageSizeInWords,
+ receiveList,
+ ref recvListDstOffset,
+ out ulong recvListBufferAddress);
+
+ if (clientResult != Result.Success)
+ {
+ CleanUpForError();
+
+ return serverResult;
+ }
+
+ clientResult = clientProcess.MemoryManager.CopyDataToCurrentProcess(
+ recvListBufferAddress,
+ descriptor.BufferSize,
+ descriptor.BufferAddress,
+ MemoryState.IsPoolAllocated,
+ MemoryState.IsPoolAllocated,
+ KMemoryPermission.Read,
+ MemoryAttribute.Uncached,
+ MemoryAttribute.None);
+
+ if (clientResult != Result.Success)
+ {
+ CleanUpForError();
+
+ return serverResult;
+ }
+
+ descriptor.BufferAddress = recvListBufferAddress;
+ }
+ else
+ {
+ descriptor.BufferAddress = 0;
+ }
+
+ serverProcess.CpuMemory.Write(serverMsg.Address + offset * 4, descriptor.Pack());
+
+ offset += 2;
+ }
+
+ // Copy send, receive and exchange buffers.
+ uint totalBuffersCount =
+ clientHeader.SendBuffersCount +
+ clientHeader.ReceiveBuffersCount +
+ clientHeader.ExchangeBuffersCount;
+
+ for (int index = 0; index < totalBuffersCount; index++)
+ {
+ ulong clientDescAddress = clientMsg.Address + offset * 4;
+
+ uint descWord0 = clientProcess.CpuMemory.Read<uint>(clientDescAddress + 0);
+ uint descWord1 = clientProcess.CpuMemory.Read<uint>(clientDescAddress + 4);
+ uint descWord2 = clientProcess.CpuMemory.Read<uint>(clientDescAddress + 8);
+
+ bool isSendDesc = index < clientHeader.SendBuffersCount;
+ bool isExchangeDesc = index >= clientHeader.SendBuffersCount + clientHeader.ReceiveBuffersCount;
+
+ bool notReceiveDesc = isSendDesc || isExchangeDesc;
+ bool isReceiveDesc = !notReceiveDesc;
+
+ KMemoryPermission permission = index >= clientHeader.SendBuffersCount
+ ? KMemoryPermission.ReadAndWrite
+ : KMemoryPermission.Read;
+
+ uint sizeHigh4 = (descWord2 >> 24) & 0xf;
+
+ ulong bufferSize = descWord0 | (ulong)sizeHigh4 << 32;
+
+ ulong dstAddress = 0;
+
+ if (bufferSize != 0)
+ {
+ ulong bufferAddress;
+
+ bufferAddress = descWord2 >> 28;
+ bufferAddress |= ((descWord2 >> 2) & 7) << 4;
+
+ bufferAddress = (bufferAddress << 32) | descWord1;
+
+ MemoryState state = IpcMemoryStates[(descWord2 + 1) & 3];
+
+ clientResult = serverProcess.MemoryManager.MapBufferFromClientProcess(
+ bufferSize,
+ bufferAddress,
+ clientProcess.MemoryManager,
+ permission,
+ state,
+ notReceiveDesc,
+ out dstAddress);
+
+ if (clientResult != Result.Success)
+ {
+ CleanUpForError();
+
+ return serverResult;
+ }
+
+ if (isSendDesc)
+ {
+ clientResult = request.BufferDescriptorTable.AddSendBuffer(bufferAddress, dstAddress, bufferSize, state);
+ }
+ else if (isReceiveDesc)
+ {
+ clientResult = request.BufferDescriptorTable.AddReceiveBuffer(bufferAddress, dstAddress, bufferSize, state);
+ }
+ else /* if (isExchangeDesc) */
+ {
+ clientResult = request.BufferDescriptorTable.AddExchangeBuffer(bufferAddress, dstAddress, bufferSize, state);
+ }
+
+ if (clientResult != Result.Success)
+ {
+ CleanUpForError();
+
+ return serverResult;
+ }
+ }
+
+ descWord1 = (uint)dstAddress;
+
+ descWord2 &= 3;
+
+ descWord2 |= sizeHigh4 << 24;
+
+ descWord2 |= (uint)(dstAddress >> 34) & 0x3ffffffc;
+ descWord2 |= (uint)(dstAddress >> 4) & 0xf0000000;
+
+ ulong serverDescAddress = serverMsg.Address + offset * 4;
+
+ serverProcess.CpuMemory.Write(serverDescAddress + 0, descWord0);
+ serverProcess.CpuMemory.Write(serverDescAddress + 4, descWord1);
+ serverProcess.CpuMemory.Write(serverDescAddress + 8, descWord2);
+
+ offset += 3;
+ }
+
+ // Copy raw data.
+ if (clientHeader.RawDataSizeInWords != 0)
+ {
+ ulong copySrc = clientMsg.Address + offset * 4;
+ ulong copyDst = serverMsg.Address + offset * 4;
+
+ ulong copySize = clientHeader.RawDataSizeInWords * 4;
+
+ if (serverMsg.IsCustom || clientMsg.IsCustom)
+ {
+ KMemoryPermission permission = clientMsg.IsCustom
+ ? KMemoryPermission.None
+ : KMemoryPermission.Read;
+
+ clientResult = clientProcess.MemoryManager.CopyDataToCurrentProcess(
+ copyDst,
+ copySize,
+ copySrc,
+ MemoryState.IsPoolAllocated,
+ MemoryState.IsPoolAllocated,
+ permission,
+ MemoryAttribute.Uncached,
+ MemoryAttribute.None);
+ }
+ else
+ {
+ serverProcess.CpuMemory.Write(copyDst, clientProcess.CpuMemory.GetSpan(copySrc, (int)copySize));
+ }
+
+ if (clientResult != Result.Success)
+ {
+ CleanUpForError();
+
+ return serverResult;
+ }
+ }
+
+ return Result.Success;
+ }
+
+ public Result Reply(ulong customCmdBuffAddr = 0, ulong customCmdBuffSize = 0)
+ {
+ KThread serverThread = KernelStatic.GetCurrentThread();
+ KProcess serverProcess = serverThread.Owner;
+
+ KernelContext.CriticalSection.Enter();
+
+ if (_activeRequest == null)
+ {
+ KernelContext.CriticalSection.Leave();
+
+ return KernelResult.InvalidState;
+ }
+
+ KSessionRequest request = _activeRequest;
+
+ _activeRequest = null;
+
+ if (_requests.Count != 0)
+ {
+ Signal();
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ KThread clientThread = request.ClientThread;
+ KProcess clientProcess = clientThread.Owner;
+
+ Message clientMsg = new Message(request);
+ Message serverMsg = new Message(serverThread, customCmdBuffAddr, customCmdBuffSize);
+
+ MessageHeader clientHeader = GetClientMessageHeader(clientProcess, clientMsg);
+ MessageHeader serverHeader = GetServerMessageHeader(serverMsg);
+
+ Result clientResult = Result.Success;
+ Result serverResult = Result.Success;
+
+ void CleanUpForError()
+ {
+ CloseAllHandles(clientMsg, serverHeader, clientProcess);
+
+ FinishRequest(request, clientResult);
+ }
+
+ if (clientHeader.ReceiveListType < 2 &&
+ clientHeader.ReceiveListOffset > clientMsg.Size)
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidCombination;
+ }
+ else if (clientHeader.ReceiveListType == 2 &&
+ clientHeader.ReceiveListOffset + 8 > clientMsg.Size)
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidCombination;
+ }
+ else if (clientHeader.ReceiveListType > 2 &&
+ clientHeader.ReceiveListType * 8 - 0x10 + clientHeader.ReceiveListOffset > clientMsg.Size)
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidCombination;
+ }
+
+ if (clientHeader.ReceiveListOffsetInWords < clientHeader.MessageSizeInWords)
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidCombination;
+ }
+
+ if (serverHeader.MessageSizeInWords * 4 > clientMsg.Size)
+ {
+ CleanUpForError();
+
+ return KernelResult.CmdBufferTooSmall;
+ }
+
+ if (serverHeader.SendBuffersCount != 0 ||
+ serverHeader.ReceiveBuffersCount != 0 ||
+ serverHeader.ExchangeBuffersCount != 0)
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidCombination;
+ }
+
+ // Read receive list.
+ ulong[] receiveList = GetReceiveList(
+ clientProcess,
+ clientMsg,
+ clientHeader.ReceiveListType,
+ clientHeader.ReceiveListOffset);
+
+ // Copy receive and exchange buffers.
+ clientResult = request.BufferDescriptorTable.CopyBuffersToClient(clientProcess.MemoryManager);
+
+ if (clientResult != Result.Success)
+ {
+ CleanUpForError();
+
+ return serverResult;
+ }
+
+ // Copy header.
+ clientProcess.CpuMemory.Write(clientMsg.Address + 0, serverHeader.Word0);
+ clientProcess.CpuMemory.Write(clientMsg.Address + 4, serverHeader.Word1);
+
+ // Copy handles.
+ uint offset;
+
+ if (serverHeader.HasHandles)
+ {
+ offset = 3;
+
+ clientProcess.CpuMemory.Write(clientMsg.Address + 8, serverHeader.Word2);
+
+ if (serverHeader.HasPid)
+ {
+ clientProcess.CpuMemory.Write(clientMsg.Address + offset * 4, serverProcess.Pid);
+
+ offset += 2;
+ }
+
+ for (int index = 0; index < serverHeader.CopyHandlesCount; index++)
+ {
+ int newHandle = 0;
+
+ int handle = serverProcess.CpuMemory.Read<int>(serverMsg.Address + offset * 4);
+
+ if (handle != 0)
+ {
+ GetCopyObjectHandle(serverThread, clientProcess, handle, out newHandle);
+ }
+
+ clientProcess.CpuMemory.Write(clientMsg.Address + offset * 4, newHandle);
+
+ offset++;
+ }
+
+ for (int index = 0; index < serverHeader.MoveHandlesCount; index++)
+ {
+ int newHandle = 0;
+
+ int handle = serverProcess.CpuMemory.Read<int>(serverMsg.Address + offset * 4);
+
+ if (handle != 0)
+ {
+ if (clientResult == Result.Success)
+ {
+ clientResult = GetMoveObjectHandle(serverProcess, clientProcess, handle, out newHandle);
+ }
+ else
+ {
+ serverProcess.HandleTable.CloseHandle(handle);
+ }
+ }
+
+ clientProcess.CpuMemory.Write(clientMsg.Address + offset * 4, newHandle);
+
+ offset++;
+ }
+ }
+ else
+ {
+ offset = 2;
+ }
+
+ // Copy pointer/receive list buffers.
+ uint recvListDstOffset = 0;
+
+ for (int index = 0; index < serverHeader.PointerBuffersCount; index++)
+ {
+ ulong pointerDesc = serverProcess.CpuMemory.Read<ulong>(serverMsg.Address + offset * 4);
+
+ PointerBufferDesc descriptor = new PointerBufferDesc(pointerDesc);
+
+ ulong recvListBufferAddress = 0;
+
+ if (descriptor.BufferSize != 0)
+ {
+ clientResult = GetReceiveListAddress(
+ descriptor,
+ clientMsg,
+ clientHeader.ReceiveListType,
+ serverHeader.MessageSizeInWords,
+ receiveList,
+ ref recvListDstOffset,
+ out recvListBufferAddress);
+
+ if (clientResult != Result.Success)
+ {
+ CleanUpForError();
+
+ return serverResult;
+ }
+
+ clientResult = clientProcess.MemoryManager.CopyDataFromCurrentProcess(
+ recvListBufferAddress,
+ descriptor.BufferSize,
+ MemoryState.IsPoolAllocated,
+ MemoryState.IsPoolAllocated,
+ KMemoryPermission.Read,
+ MemoryAttribute.Uncached,
+ MemoryAttribute.None,
+ descriptor.BufferAddress);
+
+ if (clientResult != Result.Success)
+ {
+ CleanUpForError();
+
+ return serverResult;
+ }
+ }
+
+ ulong dstDescAddress = clientMsg.Address + offset * 4;
+
+ ulong clientPointerDesc =
+ (recvListBufferAddress << 32) |
+ ((recvListBufferAddress >> 20) & 0xf000) |
+ ((recvListBufferAddress >> 30) & 0xffc0);
+
+ clientPointerDesc |= pointerDesc & 0xffff000f;
+
+ clientProcess.CpuMemory.Write(dstDescAddress + 0, clientPointerDesc);
+
+ offset += 2;
+ }
+
+ // Set send, receive and exchange buffer descriptors to zero.
+ uint totalBuffersCount =
+ serverHeader.SendBuffersCount +
+ serverHeader.ReceiveBuffersCount +
+ serverHeader.ExchangeBuffersCount;
+
+ for (int index = 0; index < totalBuffersCount; index++)
+ {
+ ulong dstDescAddress = clientMsg.Address + offset * 4;
+
+ clientProcess.CpuMemory.Write(dstDescAddress + 0, 0);
+ clientProcess.CpuMemory.Write(dstDescAddress + 4, 0);
+ clientProcess.CpuMemory.Write(dstDescAddress + 8, 0);
+
+ offset += 3;
+ }
+
+ // Copy raw data.
+ if (serverHeader.RawDataSizeInWords != 0)
+ {
+ ulong copyDst = clientMsg.Address + offset * 4;
+ ulong copySrc = serverMsg.Address + offset * 4;
+
+ ulong copySize = serverHeader.RawDataSizeInWords * 4;
+
+ if (serverMsg.IsCustom || clientMsg.IsCustom)
+ {
+ KMemoryPermission permission = clientMsg.IsCustom
+ ? KMemoryPermission.None
+ : KMemoryPermission.Read;
+
+ clientResult = clientProcess.MemoryManager.CopyDataFromCurrentProcess(
+ copyDst,
+ copySize,
+ MemoryState.IsPoolAllocated,
+ MemoryState.IsPoolAllocated,
+ permission,
+ MemoryAttribute.Uncached,
+ MemoryAttribute.None,
+ copySrc);
+ }
+ else
+ {
+ clientProcess.CpuMemory.Write(copyDst, serverProcess.CpuMemory.GetSpan(copySrc, (int)copySize));
+ }
+ }
+
+ // Unmap buffers from server.
+ FinishRequest(request, clientResult);
+
+ return serverResult;
+ }
+
+ private MessageHeader GetClientMessageHeader(KProcess clientProcess, Message clientMsg)
+ {
+ uint word0 = clientProcess.CpuMemory.Read<uint>(clientMsg.Address + 0);
+ uint word1 = clientProcess.CpuMemory.Read<uint>(clientMsg.Address + 4);
+ uint word2 = clientProcess.CpuMemory.Read<uint>(clientMsg.Address + 8);
+
+ return new MessageHeader(word0, word1, word2);
+ }
+
+ private MessageHeader GetServerMessageHeader(Message serverMsg)
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ uint word0 = currentProcess.CpuMemory.Read<uint>(serverMsg.Address + 0);
+ uint word1 = currentProcess.CpuMemory.Read<uint>(serverMsg.Address + 4);
+ uint word2 = currentProcess.CpuMemory.Read<uint>(serverMsg.Address + 8);
+
+ return new MessageHeader(word0, word1, word2);
+ }
+
+ private Result GetCopyObjectHandle(KThread srcThread, KProcess dstProcess, int srcHandle, out int dstHandle)
+ {
+ dstHandle = 0;
+
+ KProcess srcProcess = srcThread.Owner;
+
+ KAutoObject obj;
+
+ if (srcHandle == KHandleTable.SelfProcessHandle)
+ {
+ obj = srcProcess;
+ }
+ else if (srcHandle == KHandleTable.SelfThreadHandle)
+ {
+ obj = srcThread;
+ }
+ else
+ {
+ obj = srcProcess.HandleTable.GetObject<KAutoObject>(srcHandle);
+ }
+
+ if (obj != null)
+ {
+ return dstProcess.HandleTable.GenerateHandle(obj, out dstHandle);
+ }
+ else
+ {
+ return KernelResult.InvalidHandle;
+ }
+ }
+
+ private Result GetMoveObjectHandle(KProcess srcProcess, KProcess dstProcess, int srcHandle, out int dstHandle)
+ {
+ dstHandle = 0;
+
+ KAutoObject obj = srcProcess.HandleTable.GetObject<KAutoObject>(srcHandle);
+
+ if (obj != null)
+ {
+ Result result = dstProcess.HandleTable.GenerateHandle(obj, out dstHandle);
+
+ srcProcess.HandleTable.CloseHandle(srcHandle);
+
+ return result;
+ }
+ else
+ {
+ return KernelResult.InvalidHandle;
+ }
+ }
+
+ private ulong[] GetReceiveList(KProcess ownerProcess, Message message, uint recvListType, uint recvListOffset)
+ {
+ int recvListSize = 0;
+
+ if (recvListType >= 3)
+ {
+ recvListSize = (int)recvListType - 2;
+ }
+ else if (recvListType == 2)
+ {
+ recvListSize = 1;
+ }
+
+ ulong[] receiveList = new ulong[recvListSize];
+
+ ulong recvListAddress = message.Address + recvListOffset;
+
+ for (int index = 0; index < recvListSize; index++)
+ {
+ receiveList[index] = ownerProcess.CpuMemory.Read<ulong>(recvListAddress + (ulong)index * 8);
+ }
+
+ return receiveList;
+ }
+
+ private Result GetReceiveListAddress(
+ PointerBufferDesc descriptor,
+ Message message,
+ uint recvListType,
+ uint messageSizeInWords,
+ ulong[] receiveList,
+ ref uint dstOffset,
+ out ulong address)
+ {
+ ulong recvListBufferAddress = address = 0;
+
+ if (recvListType == 0)
+ {
+ return KernelResult.OutOfResource;
+ }
+ else if (recvListType == 1 || recvListType == 2)
+ {
+ ulong recvListBaseAddr;
+ ulong recvListEndAddr;
+
+ if (recvListType == 1)
+ {
+ recvListBaseAddr = message.Address + messageSizeInWords * 4;
+ recvListEndAddr = message.Address + message.Size;
+ }
+ else /* if (recvListType == 2) */
+ {
+ ulong packed = receiveList[0];
+
+ recvListBaseAddr = packed & 0x7fffffffff;
+
+ uint size = (uint)(packed >> 48);
+
+ if (size == 0)
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ recvListEndAddr = recvListBaseAddr + size;
+ }
+
+ recvListBufferAddress = BitUtils.AlignUp<ulong>(recvListBaseAddr + dstOffset, 0x10);
+
+ ulong endAddress = recvListBufferAddress + descriptor.BufferSize;
+
+ dstOffset = (uint)endAddress - (uint)recvListBaseAddr;
+
+ if (recvListBufferAddress + descriptor.BufferSize <= recvListBufferAddress ||
+ recvListBufferAddress + descriptor.BufferSize > recvListEndAddr)
+ {
+ return KernelResult.OutOfResource;
+ }
+ }
+ else /* if (recvListType > 2) */
+ {
+ if (descriptor.ReceiveIndex >= receiveList.Length)
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong packed = receiveList[descriptor.ReceiveIndex];
+
+ recvListBufferAddress = packed & 0x7fffffffff;
+
+ uint size = (uint)(packed >> 48);
+
+ if (recvListBufferAddress == 0 || size == 0 || size < descriptor.BufferSize)
+ {
+ return KernelResult.OutOfResource;
+ }
+ }
+
+ address = recvListBufferAddress;
+
+ return Result.Success;
+ }
+
+ private void CloseAllHandles(Message message, MessageHeader header, KProcess process)
+ {
+ if (header.HasHandles)
+ {
+ uint totalHandeslCount = header.CopyHandlesCount + header.MoveHandlesCount;
+
+ uint offset = 3;
+
+ if (header.HasPid)
+ {
+ process.CpuMemory.Write(message.Address + offset * 4, 0L);
+
+ offset += 2;
+ }
+
+ for (int index = 0; index < totalHandeslCount; index++)
+ {
+ int handle = process.CpuMemory.Read<int>(message.Address + offset * 4);
+
+ if (handle != 0)
+ {
+ process.HandleTable.CloseHandle(handle);
+
+ process.CpuMemory.Write(message.Address + offset * 4, 0);
+ }
+
+ offset++;
+ }
+ }
+ }
+
+ public override bool IsSignaled()
+ {
+ if (_parent.ClientSession.State != ChannelState.Open)
+ {
+ return true;
+ }
+
+ return _requests.Count != 0 && _activeRequest == null;
+ }
+
+ protected override void Destroy()
+ {
+ _parent.DisconnectServer();
+
+ CancelAllRequestsServerDisconnected();
+
+ _parent.DecrementReferenceCount();
+ }
+
+ private void CancelAllRequestsServerDisconnected()
+ {
+ foreach (KSessionRequest request in IterateWithRemovalOfAllRequests())
+ {
+ FinishRequest(request, KernelResult.PortRemoteClosed);
+ }
+ }
+
+ public void CancelAllRequestsClientDisconnected()
+ {
+ foreach (KSessionRequest request in IterateWithRemovalOfAllRequests())
+ {
+ if (request.ClientThread.TerminationRequested)
+ {
+ continue;
+ }
+
+ // Client sessions can only be disconnected on async requests (because
+ // the client would be otherwise blocked waiting for the response), so
+ // we only need to handle the async case here.
+ if (request.AsyncEvent != null)
+ {
+ SendResultToAsyncRequestClient(request, KernelResult.PortRemoteClosed);
+ }
+ }
+
+ WakeServerThreads(KernelResult.PortRemoteClosed);
+ }
+
+ private IEnumerable<KSessionRequest> IterateWithRemovalOfAllRequests()
+ {
+ KernelContext.CriticalSection.Enter();
+
+ if (_activeRequest != null)
+ {
+ KSessionRequest request = _activeRequest;
+
+ _activeRequest = null;
+
+ KernelContext.CriticalSection.Leave();
+
+ yield return request;
+ }
+ else
+ {
+ KernelContext.CriticalSection.Leave();
+ }
+
+ while (DequeueRequest(out KSessionRequest request))
+ {
+ yield return request;
+ }
+ }
+
+ private bool DequeueRequest(out KSessionRequest request)
+ {
+ request = null;
+
+ KernelContext.CriticalSection.Enter();
+
+ bool hasRequest = _requests.First != null;
+
+ if (hasRequest)
+ {
+ request = _requests.First.Value;
+
+ _requests.RemoveFirst();
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ return hasRequest;
+ }
+
+ private void FinishRequest(KSessionRequest request, Result result)
+ {
+ KProcess clientProcess = request.ClientThread.Owner;
+ KProcess serverProcess = request.ServerProcess;
+
+ Result unmapResult = Result.Success;
+
+ if (serverProcess != null)
+ {
+ unmapResult = request.BufferDescriptorTable.UnmapServerBuffers(serverProcess.MemoryManager);
+ }
+
+ if (unmapResult == Result.Success)
+ {
+ request.BufferDescriptorTable.RestoreClientBuffers(clientProcess.MemoryManager);
+ }
+
+ WakeClientThread(request, result);
+ }
+
+ private void WakeClientThread(KSessionRequest request, Result result)
+ {
+ // Wait client thread waiting for a response for the given request.
+ if (request.AsyncEvent != null)
+ {
+ SendResultToAsyncRequestClient(request, result);
+ }
+ else
+ {
+ KernelContext.CriticalSection.Enter();
+
+ WakeAndSetResult(request.ClientThread, result);
+
+ KernelContext.CriticalSection.Leave();
+ }
+ }
+
+ private void SendResultToAsyncRequestClient(KSessionRequest request, Result result)
+ {
+ KProcess clientProcess = request.ClientThread.Owner;
+
+ if (result != Result.Success)
+ {
+ ulong address = request.CustomCmdBuffAddr;
+
+ clientProcess.CpuMemory.Write<ulong>(address, 0);
+ clientProcess.CpuMemory.Write(address + 8, result.ErrorCode);
+ }
+
+ clientProcess.MemoryManager.UnborrowIpcBuffer(request.CustomCmdBuffAddr, request.CustomCmdBuffSize);
+
+ request.AsyncEvent.Signal();
+ }
+
+ private void WakeServerThreads(Result result)
+ {
+ // Wake all server threads waiting for requests.
+ KernelContext.CriticalSection.Enter();
+
+ foreach (KThread thread in WaitingThreads)
+ {
+ WakeAndSetResult(thread, result, this);
+ }
+
+ KernelContext.CriticalSection.Leave();
+ }
+
+ private void WakeAndSetResult(KThread thread, Result result, KSynchronizationObject signaledObj = null)
+ {
+ if ((thread.SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused)
+ {
+ thread.SignaledObj = signaledObj;
+ thread.ObjSyncResult = result;
+
+ thread.Reschedule(ThreadSchedState.Running);
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Ipc/KSession.cs b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KSession.cs
new file mode 100644
index 00000000..13cf4b51
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KSession.cs
@@ -0,0 +1,54 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Process;
+
+namespace Ryujinx.HLE.HOS.Kernel.Ipc
+{
+ class KSession : KAutoObject
+ {
+ public KServerSession ServerSession { get; }
+ public KClientSession ClientSession { get; }
+
+ private bool _hasBeenInitialized;
+
+ public KSession(KernelContext context, KClientPort parentPort = null) : base(context)
+ {
+ IncrementReferenceCount();
+
+ ServerSession = new KServerSession(context, this);
+ ClientSession = new KClientSession(context, this, parentPort);
+
+ _hasBeenInitialized = true;
+ }
+
+ public void DisconnectClient()
+ {
+ if (ClientSession.State == ChannelState.Open)
+ {
+ ClientSession.State = ChannelState.ClientDisconnected;
+
+ ServerSession.CancelAllRequestsClientDisconnected();
+ }
+ }
+
+ public void DisconnectServer()
+ {
+ if (ClientSession.State == ChannelState.Open)
+ {
+ ClientSession.State = ChannelState.ServerDisconnected;
+ }
+ }
+
+ protected override void Destroy()
+ {
+ if (_hasBeenInitialized)
+ {
+ ClientSession.DisconnectFromPort();
+
+ KProcess creatorProcess = ClientSession.CreatorProcess;
+
+ creatorProcess.ResourceLimit?.Release(LimitableResource.Session, 1);
+ creatorProcess.DecrementReferenceCount();
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Ipc/KSessionRequest.cs b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KSessionRequest.cs
new file mode 100644
index 00000000..31ddfc9c
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Ipc/KSessionRequest.cs
@@ -0,0 +1,33 @@
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.HLE.HOS.Kernel.Threading;
+
+namespace Ryujinx.HLE.HOS.Kernel.Ipc
+{
+ class KSessionRequest
+ {
+ public KBufferDescriptorTable BufferDescriptorTable { get; }
+
+ public KThread ClientThread { get; }
+
+ public KProcess ServerProcess { get; set; }
+
+ public KWritableEvent AsyncEvent { get; }
+
+ public ulong CustomCmdBuffAddr { get; }
+ public ulong CustomCmdBuffSize { get; }
+
+ public KSessionRequest(
+ KThread clientThread,
+ ulong customCmdBuffAddr,
+ ulong customCmdBuffSize,
+ KWritableEvent asyncEvent = null)
+ {
+ ClientThread = clientThread;
+ CustomCmdBuffAddr = customCmdBuffAddr;
+ CustomCmdBuffSize = customCmdBuffSize;
+ AsyncEvent = asyncEvent;
+
+ BufferDescriptorTable = new KBufferDescriptorTable();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/KernelConstants.cs b/src/Ryujinx.HLE/HOS/Kernel/KernelConstants.cs
new file mode 100644
index 00000000..28db750c
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/KernelConstants.cs
@@ -0,0 +1,20 @@
+using Ryujinx.HLE.HOS.Kernel.Memory;
+
+namespace Ryujinx.HLE.HOS.Kernel
+{
+ static class KernelConstants
+ {
+ public const int InitialKipId = 1;
+ public const int InitialProcessId = 0x51;
+
+ public const int SupervisorCallCount = 0xC0;
+
+ public const int MemoryBlockAllocatorSize = 0x2710;
+
+ public const ulong UserSlabHeapBase = DramMemoryMap.SlabHeapBase;
+ public const ulong UserSlabHeapItemSize = KPageTableBase.PageSize;
+ public const ulong UserSlabHeapSize = 0x3de000;
+
+ public const ulong CounterFrequency = 19200000;
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/KernelContext.cs b/src/Ryujinx.HLE/HOS/Kernel/KernelContext.cs
new file mode 100644
index 00000000..ccc5c0f0
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/KernelContext.cs
@@ -0,0 +1,160 @@
+using Ryujinx.Cpu;
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Memory;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.HLE.HOS.Kernel.SupervisorCall;
+using Ryujinx.HLE.HOS.Kernel.Threading;
+using Ryujinx.Memory;
+using System;
+using System.Collections.Concurrent;
+using System.Threading;
+
+namespace Ryujinx.HLE.HOS.Kernel
+{
+ class KernelContext : IDisposable
+ {
+ public long PrivilegedProcessLowestId { get; set; } = 1;
+ public long PrivilegedProcessHighestId { get; set; } = 8;
+
+ public bool EnableVersionChecks { get; set; }
+
+ public bool KernelInitialized { get; }
+
+ public bool Running { get; private set; }
+
+ public Switch Device { get; }
+ public MemoryBlock Memory { get; }
+ public ITickSource TickSource { get; }
+ public Syscall Syscall { get; }
+ public SyscallHandler SyscallHandler { get; }
+
+ public KResourceLimit ResourceLimit { get; }
+
+ public KMemoryManager MemoryManager { get; }
+
+ public KMemoryBlockSlabManager LargeMemoryBlockSlabManager { get; }
+ public KMemoryBlockSlabManager SmallMemoryBlockSlabManager { get; }
+
+ public KSlabHeap UserSlabHeapPages { get; }
+
+ public KCriticalSection CriticalSection { get; }
+ public KScheduler[] Schedulers { get; }
+ public KPriorityQueue PriorityQueue { get; }
+ public KTimeManager TimeManager { get; }
+ public KSynchronization Synchronization { get; }
+ public KContextIdManager ContextIdManager { get; }
+
+ public ConcurrentDictionary<ulong, KProcess> Processes { get; }
+ public ConcurrentDictionary<string, KAutoObject> AutoObjectNames { get; }
+
+ public bool ThreadReselectionRequested { get; set; }
+
+ private ulong _kipId;
+ private ulong _processId;
+ private ulong _threadUid;
+
+ public KernelContext(
+ ITickSource tickSource,
+ Switch device,
+ MemoryBlock memory,
+ MemorySize memorySize,
+ MemoryArrange memoryArrange)
+ {
+ TickSource = tickSource;
+ Device = device;
+ Memory = memory;
+
+ Running = true;
+
+ Syscall = new Syscall(this);
+
+ SyscallHandler = new SyscallHandler(this);
+
+ ResourceLimit = new KResourceLimit(this);
+
+ KernelInit.InitializeResourceLimit(ResourceLimit, memorySize);
+
+ MemoryManager = new KMemoryManager(memorySize, memoryArrange);
+
+ LargeMemoryBlockSlabManager = new KMemoryBlockSlabManager(KernelConstants.MemoryBlockAllocatorSize * 2);
+ SmallMemoryBlockSlabManager = new KMemoryBlockSlabManager(KernelConstants.MemoryBlockAllocatorSize);
+
+ UserSlabHeapPages = new KSlabHeap(
+ KernelConstants.UserSlabHeapBase,
+ KernelConstants.UserSlabHeapItemSize,
+ KernelConstants.UserSlabHeapSize);
+
+ CommitMemory(KernelConstants.UserSlabHeapBase - DramMemoryMap.DramBase, KernelConstants.UserSlabHeapSize);
+
+ CriticalSection = new KCriticalSection(this);
+ Schedulers = new KScheduler[KScheduler.CpuCoresCount];
+ PriorityQueue = new KPriorityQueue();
+ TimeManager = new KTimeManager(this);
+ Synchronization = new KSynchronization(this);
+ ContextIdManager = new KContextIdManager();
+
+ for (int core = 0; core < KScheduler.CpuCoresCount; core++)
+ {
+ Schedulers[core] = new KScheduler(this, core);
+ }
+
+ StartPreemptionThread();
+
+ KernelInitialized = true;
+
+ Processes = new ConcurrentDictionary<ulong, KProcess>();
+ AutoObjectNames = new ConcurrentDictionary<string, KAutoObject>();
+
+ _kipId = KernelConstants.InitialKipId;
+ _processId = KernelConstants.InitialProcessId;
+ }
+
+ private void StartPreemptionThread()
+ {
+ void PreemptionThreadStart()
+ {
+ KScheduler.PreemptionThreadLoop(this);
+ }
+
+ new Thread(PreemptionThreadStart) { Name = "HLE.PreemptionThread" }.Start();
+ }
+
+ public void CommitMemory(ulong address, ulong size)
+ {
+ ulong alignment = MemoryBlock.GetPageSize();
+ ulong endAddress = address + size;
+
+ address &= ~(alignment - 1);
+ endAddress = (endAddress + (alignment - 1)) & ~(alignment - 1);
+
+ Memory.Commit(address, endAddress - address);
+ }
+
+ public ulong NewThreadUid()
+ {
+ return Interlocked.Increment(ref _threadUid) - 1;
+ }
+
+ public ulong NewKipId()
+ {
+ return Interlocked.Increment(ref _kipId) - 1;
+ }
+
+ public ulong NewProcessId()
+ {
+ return Interlocked.Increment(ref _processId) - 1;
+ }
+
+ public void Dispose()
+ {
+ Running = false;
+
+ for (int i = 0; i < KScheduler.CpuCoresCount; i++)
+ {
+ Schedulers[i].Dispose();
+ }
+
+ TimeManager.Dispose();
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/KernelStatic.cs b/src/Ryujinx.HLE/HOS/Kernel/KernelStatic.cs
new file mode 100644
index 00000000..c66f4b57
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/KernelStatic.cs
@@ -0,0 +1,73 @@
+using Ryujinx.HLE.HOS.Kernel.Memory;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.HLE.HOS.Kernel.Threading;
+using Ryujinx.Horizon.Common;
+using System;
+using System.Threading;
+
+namespace Ryujinx.HLE.HOS.Kernel
+{
+ static class KernelStatic
+ {
+ [ThreadStatic]
+ private static KernelContext Context;
+
+ [ThreadStatic]
+ private static KThread CurrentThread;
+
+ public static Result StartInitialProcess(
+ KernelContext context,
+ ProcessCreationInfo creationInfo,
+ ReadOnlySpan<uint> capabilities,
+ int mainThreadPriority,
+ ThreadStart customThreadStart)
+ {
+ KProcess process = new KProcess(context);
+
+ Result result = process.Initialize(
+ creationInfo,
+ capabilities,
+ context.ResourceLimit,
+ MemoryRegion.Service,
+ null,
+ customThreadStart);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ process.DefaultCpuCore = 3;
+
+ context.Processes.TryAdd(process.Pid, process);
+
+ return process.Start(mainThreadPriority, 0x1000UL);
+ }
+
+ internal static void SetKernelContext(KernelContext context, KThread thread)
+ {
+ Context = context;
+ CurrentThread = thread;
+ }
+
+ internal static KThread GetCurrentThread()
+ {
+ return CurrentThread;
+ }
+
+ internal static KProcess GetCurrentProcess()
+ {
+ return GetCurrentThread().Owner;
+ }
+
+ internal static KProcess GetProcessByPid(ulong pid)
+ {
+ if (Context.Processes.TryGetValue(pid, out KProcess process))
+ {
+ return process;
+ }
+
+ return null;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/AddressSpaceType.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/AddressSpaceType.cs
new file mode 100644
index 00000000..8395c577
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/AddressSpaceType.cs
@@ -0,0 +1,10 @@
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ enum AddressSpaceType
+ {
+ Addr32Bits = 0,
+ Addr36Bits = 1,
+ Addr32BitsNoMap = 2,
+ Addr39Bits = 3
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/DramMemoryMap.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/DramMemoryMap.cs
new file mode 100644
index 00000000..4941d5b7
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/DramMemoryMap.cs
@@ -0,0 +1,18 @@
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ static class DramMemoryMap
+ {
+ public const ulong DramBase = 0x80000000;
+
+ public const ulong KernelReserveBase = DramBase + 0x60000;
+
+ public const ulong SlabHeapBase = KernelReserveBase + 0x85000;
+ public const ulong SlapHeapSize = 0xa21000;
+ public const ulong SlabHeapEnd = SlabHeapBase + SlapHeapSize;
+
+ public static bool IsHeapPhysicalAddress(ulong address)
+ {
+ return address >= SlabHeapEnd;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KCodeMemory.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KCodeMemory.cs
new file mode 100644
index 00000000..11474e49
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KCodeMemory.cs
@@ -0,0 +1,169 @@
+using Ryujinx.Common;
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.Horizon.Common;
+using System;
+using System.Diagnostics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KCodeMemory : KAutoObject
+ {
+ public KProcess Owner { get; private set; }
+ private readonly KPageList _pageList;
+ private readonly object _lock;
+ private ulong _address;
+ private bool _isOwnerMapped;
+ private bool _isMapped;
+
+ public KCodeMemory(KernelContext context) : base(context)
+ {
+ _pageList = new KPageList();
+ _lock = new object();
+ }
+
+ public Result Initialize(ulong address, ulong size)
+ {
+ Owner = KernelStatic.GetCurrentProcess();
+
+ Result result = Owner.MemoryManager.BorrowCodeMemory(_pageList, address, size);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ Owner.CpuMemory.Fill(address, size, 0xff);
+ Owner.IncrementReferenceCount();
+
+ _address = address;
+ _isMapped = false;
+ _isOwnerMapped = false;
+
+ return Result.Success;
+ }
+
+ public Result Map(ulong address, ulong size, KMemoryPermission perm)
+ {
+ if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, (ulong)KPageTableBase.PageSize))
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ lock (_lock)
+ {
+ if (_isMapped)
+ {
+ return KernelResult.InvalidState;
+ }
+
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ Result result = process.MemoryManager.MapPages(address, _pageList, MemoryState.CodeWritable, KMemoryPermission.ReadAndWrite);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ _isMapped = true;
+ }
+
+ return Result.Success;
+ }
+
+ public Result MapToOwner(ulong address, ulong size, KMemoryPermission permission)
+ {
+ if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, (ulong)KPageTableBase.PageSize))
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ lock (_lock)
+ {
+ if (_isOwnerMapped)
+ {
+ return KernelResult.InvalidState;
+ }
+
+ Debug.Assert(permission == KMemoryPermission.Read || permission == KMemoryPermission.ReadAndExecute);
+
+ Result result = Owner.MemoryManager.MapPages(address, _pageList, MemoryState.CodeReadOnly, permission);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ _isOwnerMapped = true;
+ }
+
+ return Result.Success;
+ }
+
+ public Result Unmap(ulong address, ulong size)
+ {
+ if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, (ulong)KPageTableBase.PageSize))
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ lock (_lock)
+ {
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ Result result = process.MemoryManager.UnmapPages(address, _pageList, MemoryState.CodeWritable);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ Debug.Assert(_isMapped);
+
+ _isMapped = false;
+ }
+
+ return Result.Success;
+ }
+
+ public Result UnmapFromOwner(ulong address, ulong size)
+ {
+ if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, KPageTableBase.PageSize))
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ lock (_lock)
+ {
+ Result result = Owner.MemoryManager.UnmapPages(address, _pageList, MemoryState.CodeReadOnly);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ Debug.Assert(_isOwnerMapped);
+
+ _isOwnerMapped = false;
+ }
+
+ return Result.Success;
+ }
+
+ protected override void Destroy()
+ {
+ if (!_isMapped && !_isOwnerMapped)
+ {
+ ulong size = _pageList.GetPagesCount() * KPageTableBase.PageSize;
+
+ if (Owner.MemoryManager.UnborrowCodeMemory(_address, size, _pageList) != Result.Success)
+ {
+ throw new InvalidOperationException("Unexpected failure restoring transfer memory attributes.");
+ }
+ }
+
+ Owner.DecrementReferenceCount();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs
new file mode 100644
index 00000000..e082105b
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs
@@ -0,0 +1,156 @@
+using Ryujinx.Common.Collections;
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KMemoryBlock : IntrusiveRedBlackTreeNode<KMemoryBlock>, IComparable<KMemoryBlock>, IComparable<ulong>
+ {
+ public ulong BaseAddress { get; private set; }
+ public ulong PagesCount { get; private set; }
+
+ public MemoryState State { get; private set; }
+ public KMemoryPermission Permission { get; private set; }
+ public MemoryAttribute Attribute { get; private set; }
+ public KMemoryPermission SourcePermission { get; private set; }
+
+ public int IpcRefCount { get; private set; }
+ public int DeviceRefCount { get; private set; }
+
+ public KMemoryBlock(
+ ulong baseAddress,
+ ulong pagesCount,
+ MemoryState state,
+ KMemoryPermission permission,
+ MemoryAttribute attribute,
+ int ipcRefCount = 0,
+ int deviceRefCount = 0)
+ {
+ BaseAddress = baseAddress;
+ PagesCount = pagesCount;
+ State = state;
+ Attribute = attribute;
+ Permission = permission;
+ IpcRefCount = ipcRefCount;
+ DeviceRefCount = deviceRefCount;
+ }
+
+ public void SetState(KMemoryPermission permission, MemoryState state, MemoryAttribute attribute)
+ {
+ Permission = permission;
+ State = state;
+ Attribute &= MemoryAttribute.IpcAndDeviceMapped;
+ Attribute |= attribute;
+ }
+
+ public void SetIpcMappingPermission(KMemoryPermission newPermission)
+ {
+ int oldIpcRefCount = IpcRefCount++;
+
+ if ((ushort)IpcRefCount == 0)
+ {
+ throw new InvalidOperationException("IPC reference count increment overflowed.");
+ }
+
+ if (oldIpcRefCount == 0)
+ {
+ SourcePermission = Permission;
+
+ Permission &= ~KMemoryPermission.ReadAndWrite;
+ Permission |= KMemoryPermission.ReadAndWrite & newPermission;
+ }
+
+ Attribute |= MemoryAttribute.IpcMapped;
+ }
+
+ public void RestoreIpcMappingPermission()
+ {
+ int oldIpcRefCount = IpcRefCount--;
+
+ if (oldIpcRefCount == 0)
+ {
+ throw new InvalidOperationException("IPC reference count decrement underflowed.");
+ }
+
+ if (oldIpcRefCount == 1)
+ {
+ Permission = SourcePermission;
+
+ SourcePermission = KMemoryPermission.None;
+
+ Attribute &= ~MemoryAttribute.IpcMapped;
+ }
+ }
+
+ public KMemoryBlock SplitRightAtAddress(ulong address)
+ {
+ ulong leftAddress = BaseAddress;
+
+ ulong leftPagesCount = (address - leftAddress) / KPageTableBase.PageSize;
+
+ BaseAddress = address;
+
+ PagesCount -= leftPagesCount;
+
+ return new KMemoryBlock(
+ leftAddress,
+ leftPagesCount,
+ State,
+ Permission,
+ Attribute,
+ IpcRefCount,
+ DeviceRefCount);
+ }
+
+ public void AddPages(ulong pagesCount)
+ {
+ PagesCount += pagesCount;
+ }
+
+ public KMemoryInfo GetInfo()
+ {
+ ulong size = PagesCount * KPageTableBase.PageSize;
+
+ return new KMemoryInfo(
+ BaseAddress,
+ size,
+ State,
+ Permission,
+ Attribute,
+ SourcePermission,
+ IpcRefCount,
+ DeviceRefCount);
+ }
+
+ public int CompareTo(KMemoryBlock other)
+ {
+ if (BaseAddress < other.BaseAddress)
+ {
+ return -1;
+ }
+ else if (BaseAddress <= other.BaseAddress + other.PagesCount * KPageTableBase.PageSize - 1UL)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+ }
+
+ public int CompareTo(ulong address)
+ {
+ if (address < BaseAddress)
+ {
+ return 1;
+ }
+ else if (address <= BaseAddress + PagesCount * KPageTableBase.PageSize - 1UL)
+ {
+ return 0;
+ }
+ else
+ {
+ return -1;
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockManager.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockManager.cs
new file mode 100644
index 00000000..e9146aeb
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockManager.cs
@@ -0,0 +1,288 @@
+using Ryujinx.Common.Collections;
+using Ryujinx.Horizon.Common;
+using System.Diagnostics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KMemoryBlockManager
+ {
+ private const int PageSize = KPageTableBase.PageSize;
+
+ private readonly IntrusiveRedBlackTree<KMemoryBlock> _blockTree;
+
+ public int BlocksCount => _blockTree.Count;
+
+ private KMemoryBlockSlabManager _slabManager;
+
+ private ulong _addrSpaceStart;
+ private ulong _addrSpaceEnd;
+
+ public KMemoryBlockManager()
+ {
+ _blockTree = new IntrusiveRedBlackTree<KMemoryBlock>();
+ }
+
+ public Result Initialize(ulong addrSpaceStart, ulong addrSpaceEnd, KMemoryBlockSlabManager slabManager)
+ {
+ _slabManager = slabManager;
+ _addrSpaceStart = addrSpaceStart;
+ _addrSpaceEnd = addrSpaceEnd;
+
+ // First insertion will always need only a single block, because there's nothing to split.
+ if (!slabManager.CanAllocate(1))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong addrSpacePagesCount = (addrSpaceEnd - addrSpaceStart) / PageSize;
+
+ _blockTree.Add(new KMemoryBlock(
+ addrSpaceStart,
+ addrSpacePagesCount,
+ MemoryState.Unmapped,
+ KMemoryPermission.None,
+ MemoryAttribute.None));
+
+ return Result.Success;
+ }
+
+ public void InsertBlock(
+ ulong baseAddress,
+ ulong pagesCount,
+ MemoryState oldState,
+ KMemoryPermission oldPermission,
+ MemoryAttribute oldAttribute,
+ MemoryState newState,
+ KMemoryPermission newPermission,
+ MemoryAttribute newAttribute)
+ {
+ // Insert new block on the list only on areas where the state
+ // of the block matches the state specified on the old* state
+ // arguments, otherwise leave it as is.
+
+ int oldCount = _blockTree.Count;
+
+ oldAttribute |= MemoryAttribute.IpcAndDeviceMapped;
+
+ ulong endAddr = baseAddress + pagesCount * PageSize;
+
+ KMemoryBlock currBlock = FindBlock(baseAddress);
+
+ while (currBlock != null)
+ {
+ ulong currBaseAddr = currBlock.BaseAddress;
+ ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr;
+
+ if (baseAddress < currEndAddr && currBaseAddr < endAddr)
+ {
+ MemoryAttribute currBlockAttr = currBlock.Attribute | MemoryAttribute.IpcAndDeviceMapped;
+
+ if (currBlock.State != oldState ||
+ currBlock.Permission != oldPermission ||
+ currBlockAttr != oldAttribute)
+ {
+ currBlock = currBlock.Successor;
+
+ continue;
+ }
+
+ if (baseAddress > currBaseAddr)
+ {
+ KMemoryBlock newBlock = currBlock.SplitRightAtAddress(baseAddress);
+ _blockTree.Add(newBlock);
+ }
+
+ if (endAddr < currEndAddr)
+ {
+ KMemoryBlock newBlock = currBlock.SplitRightAtAddress(endAddr);
+ _blockTree.Add(newBlock);
+ currBlock = newBlock;
+ }
+
+ currBlock.SetState(newPermission, newState, newAttribute);
+
+ currBlock = MergeEqualStateNeighbors(currBlock);
+ }
+
+ if (currEndAddr - 1 >= endAddr - 1)
+ {
+ break;
+ }
+
+ currBlock = currBlock.Successor;
+ }
+
+ _slabManager.Count += _blockTree.Count - oldCount;
+
+ ValidateInternalState();
+ }
+
+ public void InsertBlock(
+ ulong baseAddress,
+ ulong pagesCount,
+ MemoryState state,
+ KMemoryPermission permission = KMemoryPermission.None,
+ MemoryAttribute attribute = MemoryAttribute.None)
+ {
+ // Inserts new block at the list, replacing and splitting
+ // existing blocks as needed.
+
+ int oldCount = _blockTree.Count;
+
+ ulong endAddr = baseAddress + pagesCount * PageSize;
+
+ KMemoryBlock currBlock = FindBlock(baseAddress);
+
+ while (currBlock != null)
+ {
+ ulong currBaseAddr = currBlock.BaseAddress;
+ ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr;
+
+ if (baseAddress < currEndAddr && currBaseAddr < endAddr)
+ {
+ if (baseAddress > currBaseAddr)
+ {
+ KMemoryBlock newBlock = currBlock.SplitRightAtAddress(baseAddress);
+ _blockTree.Add(newBlock);
+ }
+
+ if (endAddr < currEndAddr)
+ {
+ KMemoryBlock newBlock = currBlock.SplitRightAtAddress(endAddr);
+ _blockTree.Add(newBlock);
+ currBlock = newBlock;
+ }
+
+ currBlock.SetState(permission, state, attribute);
+
+ currBlock = MergeEqualStateNeighbors(currBlock);
+ }
+
+ if (currEndAddr - 1 >= endAddr - 1)
+ {
+ break;
+ }
+
+ currBlock = currBlock.Successor;
+ }
+
+ _slabManager.Count += _blockTree.Count - oldCount;
+
+ ValidateInternalState();
+ }
+
+ public delegate void BlockMutator(KMemoryBlock block, KMemoryPermission newPerm);
+
+ public void InsertBlock(
+ ulong baseAddress,
+ ulong pagesCount,
+ BlockMutator blockMutate,
+ KMemoryPermission permission = KMemoryPermission.None)
+ {
+ // Inserts new block at the list, replacing and splitting
+ // existing blocks as needed, then calling the callback
+ // function on the new block.
+
+ int oldCount = _blockTree.Count;
+
+ ulong endAddr = baseAddress + pagesCount * PageSize;
+
+ KMemoryBlock currBlock = FindBlock(baseAddress);
+
+ while (currBlock != null)
+ {
+ ulong currBaseAddr = currBlock.BaseAddress;
+ ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr;
+
+ if (baseAddress < currEndAddr && currBaseAddr < endAddr)
+ {
+ if (baseAddress > currBaseAddr)
+ {
+ KMemoryBlock newBlock = currBlock.SplitRightAtAddress(baseAddress);
+ _blockTree.Add(newBlock);
+ }
+
+ if (endAddr < currEndAddr)
+ {
+ KMemoryBlock newBlock = currBlock.SplitRightAtAddress(endAddr);
+ _blockTree.Add(newBlock);
+ currBlock = newBlock;
+ }
+
+ blockMutate(currBlock, permission);
+
+ currBlock = MergeEqualStateNeighbors(currBlock);
+ }
+
+ if (currEndAddr - 1 >= endAddr - 1)
+ {
+ break;
+ }
+
+ currBlock = currBlock.Successor;
+ }
+
+ _slabManager.Count += _blockTree.Count - oldCount;
+
+ ValidateInternalState();
+ }
+
+ [Conditional("DEBUG")]
+ private void ValidateInternalState()
+ {
+ ulong expectedAddress = 0;
+
+ KMemoryBlock currBlock = FindBlock(_addrSpaceStart);
+
+ while (currBlock != null)
+ {
+ Debug.Assert(currBlock.BaseAddress == expectedAddress);
+
+ expectedAddress = currBlock.BaseAddress + currBlock.PagesCount * PageSize;
+
+ currBlock = currBlock.Successor;
+ }
+
+ Debug.Assert(expectedAddress == _addrSpaceEnd);
+ }
+
+ private KMemoryBlock MergeEqualStateNeighbors(KMemoryBlock block)
+ {
+ KMemoryBlock previousBlock = block.Predecessor;
+ KMemoryBlock nextBlock = block.Successor;
+
+ if (previousBlock != null && BlockStateEquals(block, previousBlock))
+ {
+ _blockTree.Remove(block);
+
+ previousBlock.AddPages(block.PagesCount);
+
+ block = previousBlock;
+ }
+
+ if (nextBlock != null && BlockStateEquals(block, nextBlock))
+ {
+ _blockTree.Remove(nextBlock);
+
+ block.AddPages(nextBlock.PagesCount);
+ }
+
+ return block;
+ }
+
+ private static bool BlockStateEquals(KMemoryBlock lhs, KMemoryBlock rhs)
+ {
+ return lhs.State == rhs.State &&
+ lhs.Permission == rhs.Permission &&
+ lhs.Attribute == rhs.Attribute &&
+ lhs.SourcePermission == rhs.SourcePermission &&
+ lhs.DeviceRefCount == rhs.DeviceRefCount &&
+ lhs.IpcRefCount == rhs.IpcRefCount;
+ }
+
+ public KMemoryBlock FindBlock(ulong address)
+ {
+ return _blockTree.GetNodeByKey(address);
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockSlabManager.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockSlabManager.cs
new file mode 100644
index 00000000..8732b507
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockSlabManager.cs
@@ -0,0 +1,19 @@
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KMemoryBlockSlabManager
+ {
+ private ulong _capacityElements;
+
+ public int Count { get; set; }
+
+ public KMemoryBlockSlabManager(ulong capacityElements)
+ {
+ _capacityElements = capacityElements;
+ }
+
+ public bool CanAllocate(int count)
+ {
+ return (ulong)(Count + count) <= _capacityElements;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryInfo.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryInfo.cs
new file mode 100644
index 00000000..af070ac2
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryInfo.cs
@@ -0,0 +1,36 @@
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KMemoryInfo
+ {
+ public ulong Address { get; }
+ public ulong Size { get; }
+
+ public MemoryState State { get; }
+ public KMemoryPermission Permission { get; }
+ public MemoryAttribute Attribute { get; }
+ public KMemoryPermission SourcePermission { get; }
+
+ public int IpcRefCount { get; }
+ public int DeviceRefCount { get; }
+
+ public KMemoryInfo(
+ ulong address,
+ ulong size,
+ MemoryState state,
+ KMemoryPermission permission,
+ MemoryAttribute attribute,
+ KMemoryPermission sourcePermission,
+ int ipcRefCount,
+ int deviceRefCount)
+ {
+ Address = address;
+ Size = size;
+ State = state;
+ Permission = permission;
+ Attribute = attribute;
+ SourcePermission = sourcePermission;
+ IpcRefCount = ipcRefCount;
+ DeviceRefCount = deviceRefCount;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryManager.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryManager.cs
new file mode 100644
index 00000000..6d0a1658
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryManager.cs
@@ -0,0 +1,65 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KMemoryManager
+ {
+ public KMemoryRegionManager[] MemoryRegions { get; }
+
+ public KMemoryManager(MemorySize size, MemoryArrange arrange)
+ {
+ MemoryRegions = KernelInit.GetMemoryRegions(size, arrange);
+ }
+
+ private KMemoryRegionManager GetMemoryRegion(ulong address)
+ {
+ for (int i = 0; i < MemoryRegions.Length; i++)
+ {
+ var region = MemoryRegions[i];
+
+ if (address >= region.Address && address < region.EndAddr)
+ {
+ return region;
+ }
+ }
+
+ return null;
+ }
+
+ public void IncrementPagesReferenceCount(ulong address, ulong pagesCount)
+ {
+ IncrementOrDecrementPagesReferenceCount(address, pagesCount, true);
+ }
+
+ public void DecrementPagesReferenceCount(ulong address, ulong pagesCount)
+ {
+ IncrementOrDecrementPagesReferenceCount(address, pagesCount, false);
+ }
+
+ private void IncrementOrDecrementPagesReferenceCount(ulong address, ulong pagesCount, bool increment)
+ {
+ while (pagesCount != 0)
+ {
+ var region = GetMemoryRegion(address);
+
+ ulong countToProcess = Math.Min(pagesCount, region.GetPageOffsetFromEnd(address));
+
+ lock (region)
+ {
+ if (increment)
+ {
+ region.IncrementPagesReferenceCount(address, countToProcess);
+ }
+ else
+ {
+ region.DecrementPagesReferenceCount(address, countToProcess);
+ }
+ }
+
+ pagesCount -= countToProcess;
+ address += countToProcess * KPageTableBase.PageSize;
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs
new file mode 100644
index 00000000..4596b15d
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs
@@ -0,0 +1,242 @@
+using Ryujinx.Horizon.Common;
+using System.Diagnostics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KMemoryRegionManager
+ {
+ private readonly KPageHeap _pageHeap;
+
+ public ulong Address { get; }
+ public ulong Size { get; }
+ public ulong EndAddr => Address + Size;
+
+ private readonly ushort[] _pageReferenceCounts;
+
+ public KMemoryRegionManager(ulong address, ulong size, ulong endAddr)
+ {
+ Address = address;
+ Size = size;
+
+ _pageReferenceCounts = new ushort[size / KPageTableBase.PageSize];
+
+ _pageHeap = new KPageHeap(address, size);
+ _pageHeap.Free(address, size / KPageTableBase.PageSize);
+ _pageHeap.UpdateUsedSize();
+ }
+
+ public Result AllocatePages(out KPageList pageList, ulong pagesCount)
+ {
+ if (pagesCount == 0)
+ {
+ pageList = new KPageList();
+
+ return Result.Success;
+ }
+
+ lock (_pageHeap)
+ {
+ Result result = AllocatePagesImpl(out pageList, pagesCount, false);
+
+ if (result == Result.Success)
+ {
+ foreach (var node in pageList)
+ {
+ IncrementPagesReferenceCount(node.Address, node.PagesCount);
+ }
+ }
+
+ return result;
+ }
+ }
+
+ public ulong AllocatePagesContiguous(KernelContext context, ulong pagesCount, bool backwards)
+ {
+ if (pagesCount == 0)
+ {
+ return 0;
+ }
+
+ lock (_pageHeap)
+ {
+ ulong address = AllocatePagesContiguousImpl(pagesCount, 1, backwards);
+
+ if (address != 0)
+ {
+ IncrementPagesReferenceCount(address, pagesCount);
+ context.CommitMemory(address - DramMemoryMap.DramBase, pagesCount * KPageTableBase.PageSize);
+ }
+
+ return address;
+ }
+ }
+
+ private Result AllocatePagesImpl(out KPageList pageList, ulong pagesCount, bool random)
+ {
+ pageList = new KPageList();
+
+ int heapIndex = KPageHeap.GetBlockIndex(pagesCount);
+
+ if (heapIndex < 0)
+ {
+ return KernelResult.OutOfMemory;
+ }
+
+ for (int index = heapIndex; index >= 0; index--)
+ {
+ ulong pagesPerAlloc = KPageHeap.GetBlockPagesCount(index);
+
+ while (pagesCount >= pagesPerAlloc)
+ {
+ ulong allocatedBlock = _pageHeap.AllocateBlock(index, random);
+
+ if (allocatedBlock == 0)
+ {
+ break;
+ }
+
+ Result result = pageList.AddRange(allocatedBlock, pagesPerAlloc);
+
+ if (result != Result.Success)
+ {
+ FreePages(pageList);
+ _pageHeap.Free(allocatedBlock, pagesPerAlloc);
+
+ return result;
+ }
+
+ pagesCount -= pagesPerAlloc;
+ }
+ }
+
+ if (pagesCount != 0)
+ {
+ FreePages(pageList);
+
+ return KernelResult.OutOfMemory;
+ }
+
+ return Result.Success;
+ }
+
+ private ulong AllocatePagesContiguousImpl(ulong pagesCount, ulong alignPages, bool random)
+ {
+ int heapIndex = KPageHeap.GetAlignedBlockIndex(pagesCount, alignPages);
+
+ ulong allocatedBlock = _pageHeap.AllocateBlock(heapIndex, random);
+
+ if (allocatedBlock == 0)
+ {
+ return 0;
+ }
+
+ ulong allocatedPages = KPageHeap.GetBlockPagesCount(heapIndex);
+
+ if (allocatedPages > pagesCount)
+ {
+ _pageHeap.Free(allocatedBlock + pagesCount * KPageTableBase.PageSize, allocatedPages - pagesCount);
+ }
+
+ return allocatedBlock;
+ }
+
+ public void FreePage(ulong address)
+ {
+ lock (_pageHeap)
+ {
+ _pageHeap.Free(address, 1);
+ }
+ }
+
+ public void FreePages(KPageList pageList)
+ {
+ lock (_pageHeap)
+ {
+ foreach (KPageNode pageNode in pageList)
+ {
+ _pageHeap.Free(pageNode.Address, pageNode.PagesCount);
+ }
+ }
+ }
+
+ public void FreePages(ulong address, ulong pagesCount)
+ {
+ lock (_pageHeap)
+ {
+ _pageHeap.Free(address, pagesCount);
+ }
+ }
+
+ public ulong GetFreePages()
+ {
+ lock (_pageHeap)
+ {
+ return _pageHeap.GetFreePagesCount();
+ }
+ }
+
+ public void IncrementPagesReferenceCount(ulong address, ulong pagesCount)
+ {
+ ulong index = GetPageOffset(address);
+ ulong endIndex = index + pagesCount;
+
+ while (index < endIndex)
+ {
+ ushort referenceCount = ++_pageReferenceCounts[index];
+ Debug.Assert(referenceCount >= 1);
+
+ index++;
+ }
+ }
+
+ public void DecrementPagesReferenceCount(ulong address, ulong pagesCount)
+ {
+ ulong index = GetPageOffset(address);
+ ulong endIndex = index + pagesCount;
+
+ ulong freeBaseIndex = 0;
+ ulong freePagesCount = 0;
+
+ while (index < endIndex)
+ {
+ Debug.Assert(_pageReferenceCounts[index] > 0);
+ ushort referenceCount = --_pageReferenceCounts[index];
+
+ if (referenceCount == 0)
+ {
+ if (freePagesCount != 0)
+ {
+ freePagesCount++;
+ }
+ else
+ {
+ freeBaseIndex = index;
+ freePagesCount = 1;
+ }
+ }
+ else if (freePagesCount != 0)
+ {
+ FreePages(Address + freeBaseIndex * KPageTableBase.PageSize, freePagesCount);
+ freePagesCount = 0;
+ }
+
+ index++;
+ }
+
+ if (freePagesCount != 0)
+ {
+ FreePages(Address + freeBaseIndex * KPageTableBase.PageSize, freePagesCount);
+ }
+ }
+
+ public ulong GetPageOffset(ulong address)
+ {
+ return (address - Address) / KPageTableBase.PageSize;
+ }
+
+ public ulong GetPageOffsetFromEnd(ulong address)
+ {
+ return (EndAddr - address) / KPageTableBase.PageSize;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageBitmap.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageBitmap.cs
new file mode 100644
index 00000000..fa090b02
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageBitmap.cs
@@ -0,0 +1,298 @@
+using Ryujinx.Common;
+using System;
+using System.Numerics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KPageBitmap
+ {
+ private struct RandomNumberGenerator
+ {
+ private uint _entropy;
+ private uint _bitsAvailable;
+
+ private void RefreshEntropy()
+ {
+ _entropy = 0;
+ _bitsAvailable = sizeof(uint) * 8;
+ }
+
+ private bool GenerateRandomBit()
+ {
+ if (_bitsAvailable == 0)
+ {
+ RefreshEntropy();
+ }
+
+ bool bit = (_entropy & 1) != 0;
+
+ _entropy >>= 1;
+ _bitsAvailable--;
+
+ return bit;
+ }
+
+ public int SelectRandomBit(ulong bitmap)
+ {
+ int selected = 0;
+
+ int bitsCount = UInt64BitSize / 2;
+ ulong mask = (1UL << bitsCount) - 1;
+
+ while (bitsCount != 0)
+ {
+ ulong low = bitmap & mask;
+ ulong high = (bitmap >> bitsCount) & mask;
+
+ bool chooseLow;
+
+ if (high == 0)
+ {
+ chooseLow = true;
+ }
+ else if (low == 0)
+ {
+ chooseLow = false;
+ }
+ else
+ {
+ chooseLow = GenerateRandomBit();
+ }
+
+ if (chooseLow)
+ {
+ bitmap = low;
+ }
+ else
+ {
+ bitmap = high;
+ selected += bitsCount;
+ }
+
+ bitsCount /= 2;
+ mask >>= bitsCount;
+ }
+
+ return selected;
+ }
+ }
+
+ private const int UInt64BitSize = sizeof(ulong) * 8;
+ private const int MaxDepth = 4;
+
+ private readonly RandomNumberGenerator _rng;
+ private readonly ArraySegment<ulong>[] _bitStorages;
+ private int _usedDepths;
+
+ public int BitsCount { get; private set; }
+
+ public int HighestDepthIndex => _usedDepths - 1;
+
+ public KPageBitmap()
+ {
+ _rng = new RandomNumberGenerator();
+ _bitStorages = new ArraySegment<ulong>[MaxDepth];
+ }
+
+ public ArraySegment<ulong> Initialize(ArraySegment<ulong> storage, ulong size)
+ {
+ _usedDepths = GetRequiredDepth(size);
+
+ for (int depth = HighestDepthIndex; depth >= 0; depth--)
+ {
+ _bitStorages[depth] = storage;
+ size = BitUtils.DivRoundUp<ulong>(size, (ulong)UInt64BitSize);
+ storage = storage.Slice((int)size);
+ }
+
+ return storage;
+ }
+
+ public ulong FindFreeBlock(bool random)
+ {
+ ulong offset = 0;
+ int depth = 0;
+
+ if (random)
+ {
+ do
+ {
+ ulong v = _bitStorages[depth][(int)offset];
+
+ if (v == 0)
+ {
+ return ulong.MaxValue;
+ }
+
+ offset = offset * UInt64BitSize + (ulong)_rng.SelectRandomBit(v);
+ }
+ while (++depth < _usedDepths);
+ }
+ else
+ {
+ do
+ {
+ ulong v = _bitStorages[depth][(int)offset];
+
+ if (v == 0)
+ {
+ return ulong.MaxValue;
+ }
+
+ offset = offset * UInt64BitSize + (ulong)BitOperations.TrailingZeroCount(v);
+ }
+ while (++depth < _usedDepths);
+ }
+
+ return offset;
+ }
+
+ public void SetBit(ulong offset)
+ {
+ SetBit(HighestDepthIndex, offset);
+ BitsCount++;
+ }
+
+ public void ClearBit(ulong offset)
+ {
+ ClearBit(HighestDepthIndex, offset);
+ BitsCount--;
+ }
+
+ public bool ClearRange(ulong offset, int count)
+ {
+ int depth = HighestDepthIndex;
+ var bits = _bitStorages[depth];
+
+ int bitInd = (int)(offset / UInt64BitSize);
+
+ if (count < UInt64BitSize)
+ {
+ int shift = (int)(offset % UInt64BitSize);
+
+ ulong mask = ((1UL << count) - 1) << shift;
+
+ ulong v = bits[bitInd];
+
+ if ((v & mask) != mask)
+ {
+ return false;
+ }
+
+ v &= ~mask;
+ bits[bitInd] = v;
+
+ if (v == 0)
+ {
+ ClearBit(depth - 1, (ulong)bitInd);
+ }
+ }
+ else
+ {
+ int remaining = count;
+ int i = 0;
+
+ do
+ {
+ if (bits[bitInd + i++] != ulong.MaxValue)
+ {
+ return false;
+ }
+
+ remaining -= UInt64BitSize;
+ }
+ while (remaining > 0);
+
+ remaining = count;
+ i = 0;
+
+ do
+ {
+ bits[bitInd + i] = 0;
+ ClearBit(depth - 1, (ulong)(bitInd + i));
+ i++;
+ remaining -= UInt64BitSize;
+ }
+ while (remaining > 0);
+ }
+
+ BitsCount -= count;
+ return true;
+ }
+
+ private void SetBit(int depth, ulong offset)
+ {
+ while (depth >= 0)
+ {
+ int ind = (int)(offset / UInt64BitSize);
+ int which = (int)(offset % UInt64BitSize);
+
+ ulong mask = 1UL << which;
+
+ ulong v = _bitStorages[depth][ind];
+
+ _bitStorages[depth][ind] = v | mask;
+
+ if (v != 0)
+ {
+ break;
+ }
+
+ offset = (ulong)ind;
+ depth--;
+ }
+ }
+
+ private void ClearBit(int depth, ulong offset)
+ {
+ while (depth >= 0)
+ {
+ int ind = (int)(offset / UInt64BitSize);
+ int which = (int)(offset % UInt64BitSize);
+
+ ulong mask = 1UL << which;
+
+ ulong v = _bitStorages[depth][ind];
+
+ v &= ~mask;
+
+ _bitStorages[depth][ind] = v;
+
+ if (v != 0)
+ {
+ break;
+ }
+
+ offset = (ulong)ind;
+ depth--;
+ }
+ }
+
+ private static int GetRequiredDepth(ulong regionSize)
+ {
+ int depth = 0;
+
+ do
+ {
+ regionSize /= UInt64BitSize;
+ depth++;
+ }
+ while (regionSize != 0);
+
+ return depth;
+ }
+
+ public static int CalculateManagementOverheadSize(ulong regionSize)
+ {
+ int overheadBits = 0;
+
+ for (int depth = GetRequiredDepth(regionSize) - 1; depth >= 0; depth--)
+ {
+ regionSize = BitUtils.DivRoundUp<ulong>(regionSize, UInt64BitSize);
+ overheadBits += (int)regionSize;
+ }
+
+ return overheadBits * sizeof(ulong);
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageHeap.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageHeap.cs
new file mode 100644
index 00000000..c3586ed7
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageHeap.cs
@@ -0,0 +1,283 @@
+using Ryujinx.Common;
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KPageHeap
+ {
+ private class Block
+ {
+ private KPageBitmap _bitmap = new KPageBitmap();
+ private ulong _heapAddress;
+ private ulong _endOffset;
+
+ public int Shift { get; private set; }
+ public int NextShift { get; private set; }
+ public ulong Size => 1UL << Shift;
+ public int PagesCount => (int)(Size / KPageTableBase.PageSize);
+ public int FreeBlocksCount => _bitmap.BitsCount;
+ public int FreePagesCount => FreeBlocksCount * PagesCount;
+
+ public ArraySegment<ulong> Initialize(ulong address, ulong size, int blockShift, int nextBlockShift, ArraySegment<ulong> bitStorage)
+ {
+ Shift = blockShift;
+ NextShift = nextBlockShift;
+
+ ulong endAddress = address + size;
+
+ ulong align = nextBlockShift != 0
+ ? 1UL << nextBlockShift
+ : 1UL << blockShift;
+
+ address = BitUtils.AlignDown(address, align);
+ endAddress = BitUtils.AlignUp (endAddress, align);
+
+ _heapAddress = address;
+ _endOffset = (endAddress - address) / (1UL << blockShift);
+
+ return _bitmap.Initialize(bitStorage, _endOffset);
+ }
+
+ public ulong PushBlock(ulong address)
+ {
+ ulong offset = (address - _heapAddress) >> Shift;
+
+ _bitmap.SetBit(offset);
+
+ if (NextShift != 0)
+ {
+ int diff = 1 << (NextShift - Shift);
+
+ offset = BitUtils.AlignDown(offset, (ulong)diff);
+
+ if (_bitmap.ClearRange(offset, diff))
+ {
+ return _heapAddress + (offset << Shift);
+ }
+ }
+
+ return 0;
+ }
+
+ public ulong PopBlock(bool random)
+ {
+ long sOffset = (long)_bitmap.FindFreeBlock(random);
+
+ if (sOffset < 0L)
+ {
+ return 0;
+ }
+
+ ulong offset = (ulong)sOffset;
+
+ _bitmap.ClearBit(offset);
+
+ return _heapAddress + (offset << Shift);
+ }
+
+ public static int CalculateManagementOverheadSize(ulong regionSize, int currBlockShift, int nextBlockShift)
+ {
+ ulong currBlockSize = 1UL << currBlockShift;
+ ulong nextBlockSize = 1UL << nextBlockShift;
+ ulong align = nextBlockShift != 0 ? nextBlockSize : currBlockSize;
+ return KPageBitmap.CalculateManagementOverheadSize((align * 2 + BitUtils.AlignUp(regionSize, align)) / currBlockSize);
+ }
+ }
+
+ private static readonly int[] _memoryBlockPageShifts = new int[] { 12, 16, 21, 22, 25, 29, 30 };
+
+ private readonly ulong _heapAddress;
+ private readonly ulong _heapSize;
+ private ulong _usedSize;
+ private readonly int _blocksCount;
+ private readonly Block[] _blocks;
+
+ public KPageHeap(ulong address, ulong size) : this(address, size, _memoryBlockPageShifts)
+ {
+ }
+
+ public KPageHeap(ulong address, ulong size, int[] blockShifts)
+ {
+ _heapAddress = address;
+ _heapSize = size;
+ _blocksCount = blockShifts.Length;
+ _blocks = new Block[_memoryBlockPageShifts.Length];
+
+ var currBitmapStorage = new ArraySegment<ulong>(new ulong[CalculateManagementOverheadSize(size, blockShifts)]);
+
+ for (int i = 0; i < blockShifts.Length; i++)
+ {
+ int currBlockShift = blockShifts[i];
+ int nextBlockShift = i != blockShifts.Length - 1 ? blockShifts[i + 1] : 0;
+
+ _blocks[i] = new Block();
+
+ currBitmapStorage = _blocks[i].Initialize(address, size, currBlockShift, nextBlockShift, currBitmapStorage);
+ }
+ }
+
+ public void UpdateUsedSize()
+ {
+ _usedSize = _heapSize - (GetFreePagesCount() * KPageTableBase.PageSize);
+ }
+
+ public ulong GetFreePagesCount()
+ {
+ ulong freeCount = 0;
+
+ for (int i = 0; i < _blocksCount; i++)
+ {
+ freeCount += (ulong)_blocks[i].FreePagesCount;
+ }
+
+ return freeCount;
+ }
+
+ public ulong AllocateBlock(int index, bool random)
+ {
+ ulong neededSize = _blocks[index].Size;
+
+ for (int i = index; i < _blocksCount; i++)
+ {
+ ulong address = _blocks[i].PopBlock(random);
+
+ if (address != 0)
+ {
+ ulong allocatedSize = _blocks[i].Size;
+
+ if (allocatedSize > neededSize)
+ {
+ Free(address + neededSize, (allocatedSize - neededSize) / KPageTableBase.PageSize);
+ }
+
+ return address;
+ }
+ }
+
+ return 0;
+ }
+
+ private void FreeBlock(ulong block, int index)
+ {
+ do
+ {
+ block = _blocks[index++].PushBlock(block);
+ }
+ while (block != 0);
+ }
+
+ public void Free(ulong address, ulong pagesCount)
+ {
+ if (pagesCount == 0)
+ {
+ return;
+ }
+
+ int bigIndex = _blocksCount - 1;
+
+ ulong start = address;
+ ulong end = address + pagesCount * KPageTableBase.PageSize;
+ ulong beforeStart = start;
+ ulong beforeEnd = start;
+ ulong afterStart = end;
+ ulong afterEnd = end;
+
+ while (bigIndex >= 0)
+ {
+ ulong blockSize = _blocks[bigIndex].Size;
+
+ ulong bigStart = BitUtils.AlignUp (start, blockSize);
+ ulong bigEnd = BitUtils.AlignDown(end, blockSize);
+
+ if (bigStart < bigEnd)
+ {
+ for (ulong block = bigStart; block < bigEnd; block += blockSize)
+ {
+ FreeBlock(block, bigIndex);
+ }
+
+ beforeEnd = bigStart;
+ afterStart = bigEnd;
+
+ break;
+ }
+
+ bigIndex--;
+ }
+
+ for (int i = bigIndex - 1; i >= 0; i--)
+ {
+ ulong blockSize = _blocks[i].Size;
+
+ while (beforeStart + blockSize <= beforeEnd)
+ {
+ beforeEnd -= blockSize;
+ FreeBlock(beforeEnd, i);
+ }
+ }
+
+ for (int i = bigIndex - 1; i >= 0; i--)
+ {
+ ulong blockSize = _blocks[i].Size;
+
+ while (afterStart + blockSize <= afterEnd)
+ {
+ FreeBlock(afterStart, i);
+ afterStart += blockSize;
+ }
+ }
+ }
+
+ public static int GetAlignedBlockIndex(ulong pagesCount, ulong alignPages)
+ {
+ ulong targetPages = Math.Max(pagesCount, alignPages);
+
+ for (int i = 0; i < _memoryBlockPageShifts.Length; i++)
+ {
+ if (targetPages <= GetBlockPagesCount(i))
+ {
+ return i;
+ }
+ }
+
+ return -1;
+ }
+
+ public static int GetBlockIndex(ulong pagesCount)
+ {
+ for (int i = _memoryBlockPageShifts.Length - 1; i >= 0; i--)
+ {
+ if (pagesCount >= GetBlockPagesCount(i))
+ {
+ return i;
+ }
+ }
+
+ return -1;
+ }
+
+ public static ulong GetBlockSize(int index)
+ {
+ return 1UL << _memoryBlockPageShifts[index];
+ }
+
+ public static ulong GetBlockPagesCount(int index)
+ {
+ return GetBlockSize(index) / KPageTableBase.PageSize;
+ }
+
+ private static int CalculateManagementOverheadSize(ulong regionSize, int[] blockShifts)
+ {
+ int overheadSize = 0;
+
+ for (int i = 0; i < blockShifts.Length; i++)
+ {
+ int currBlockShift = blockShifts[i];
+ int nextBlockShift = i != blockShifts.Length - 1 ? blockShifts[i + 1] : 0;
+ overheadSize += Block.CalculateManagementOverheadSize(regionSize, currBlockShift, nextBlockShift);
+ }
+
+ return BitUtils.AlignUp(overheadSize, KPageTableBase.PageSize);
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs
new file mode 100644
index 00000000..3149faa9
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs
@@ -0,0 +1,97 @@
+using Ryujinx.Horizon.Common;
+using System.Collections;
+using System.Collections.Generic;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KPageList : IEnumerable<KPageNode>
+ {
+ public LinkedList<KPageNode> Nodes { get; }
+
+ public KPageList()
+ {
+ Nodes = new LinkedList<KPageNode>();
+ }
+
+ public Result AddRange(ulong address, ulong pagesCount)
+ {
+ if (pagesCount != 0)
+ {
+ if (Nodes.Last != null)
+ {
+ KPageNode lastNode = Nodes.Last.Value;
+
+ if (lastNode.Address + lastNode.PagesCount * KPageTableBase.PageSize == address)
+ {
+ address = lastNode.Address;
+ pagesCount += lastNode.PagesCount;
+
+ Nodes.RemoveLast();
+ }
+ }
+
+ Nodes.AddLast(new KPageNode(address, pagesCount));
+ }
+
+ return Result.Success;
+ }
+
+ public ulong GetPagesCount()
+ {
+ ulong sum = 0;
+
+ foreach (KPageNode node in Nodes)
+ {
+ sum += node.PagesCount;
+ }
+
+ return sum;
+ }
+
+ public bool IsEqual(KPageList other)
+ {
+ LinkedListNode<KPageNode> thisNode = Nodes.First;
+ LinkedListNode<KPageNode> otherNode = other.Nodes.First;
+
+ while (thisNode != null && otherNode != null)
+ {
+ if (thisNode.Value.Address != otherNode.Value.Address ||
+ thisNode.Value.PagesCount != otherNode.Value.PagesCount)
+ {
+ return false;
+ }
+
+ thisNode = thisNode.Next;
+ otherNode = otherNode.Next;
+ }
+
+ return thisNode == null && otherNode == null;
+ }
+
+ public void IncrementPagesReferenceCount(KMemoryManager manager)
+ {
+ foreach (var node in this)
+ {
+ manager.IncrementPagesReferenceCount(node.Address, node.PagesCount);
+ }
+ }
+
+ public void DecrementPagesReferenceCount(KMemoryManager manager)
+ {
+ foreach (var node in this)
+ {
+ manager.DecrementPagesReferenceCount(node.Address, node.PagesCount);
+ }
+ }
+
+ public IEnumerator<KPageNode> GetEnumerator()
+ {
+ return Nodes.GetEnumerator();
+ }
+
+ IEnumerator IEnumerable.GetEnumerator()
+ {
+ return GetEnumerator();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageNode.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageNode.cs
new file mode 100644
index 00000000..ada41687
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageNode.cs
@@ -0,0 +1,14 @@
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ struct KPageNode
+ {
+ public ulong Address;
+ public ulong PagesCount;
+
+ public KPageNode(ulong address, ulong pagesCount)
+ {
+ Address = address;
+ PagesCount = pagesCount;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs
new file mode 100644
index 00000000..28e9f90a
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs
@@ -0,0 +1,229 @@
+using Ryujinx.Horizon.Common;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Range;
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KPageTable : KPageTableBase
+ {
+ private readonly IVirtualMemoryManager _cpuMemory;
+
+ protected override bool Supports4KBPages => _cpuMemory.Supports4KBPages;
+
+ public KPageTable(KernelContext context, IVirtualMemoryManager cpuMemory) : base(context)
+ {
+ _cpuMemory = cpuMemory;
+ }
+
+ /// <inheritdoc/>
+ protected override IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size)
+ {
+ return _cpuMemory.GetHostRegions(va, size);
+ }
+
+ /// <inheritdoc/>
+ protected override void GetPhysicalRegions(ulong va, ulong size, KPageList pageList)
+ {
+ var ranges = _cpuMemory.GetPhysicalRegions(va, size);
+ foreach (var range in ranges)
+ {
+ pageList.AddRange(range.Address + DramMemoryMap.DramBase, range.Size / PageSize);
+ }
+ }
+
+ /// <inheritdoc/>
+ protected override ReadOnlySpan<byte> GetSpan(ulong va, int size)
+ {
+ return _cpuMemory.GetSpan(va, size);
+ }
+
+ /// <inheritdoc/>
+ protected override Result MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission)
+ {
+ KPageList pageList = new KPageList();
+ GetPhysicalRegions(src, pagesCount * PageSize, pageList);
+
+ Result result = Reprotect(src, pagesCount, KMemoryPermission.None);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ result = MapPages(dst, pageList, newDstPermission, MemoryMapFlags.Private, false, 0);
+
+ if (result != Result.Success)
+ {
+ Result reprotectResult = Reprotect(src, pagesCount, oldSrcPermission);
+ Debug.Assert(reprotectResult == Result.Success);
+ }
+
+ return result;
+ }
+
+ /// <inheritdoc/>
+ protected override Result UnmapMemory(ulong dst, ulong src, ulong pagesCount, KMemoryPermission oldDstPermission, KMemoryPermission newSrcPermission)
+ {
+ ulong size = pagesCount * PageSize;
+
+ KPageList srcPageList = new KPageList();
+ KPageList dstPageList = new KPageList();
+
+ GetPhysicalRegions(src, size, srcPageList);
+ GetPhysicalRegions(dst, size, dstPageList);
+
+ if (!dstPageList.IsEqual(srcPageList))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ Result result = Unmap(dst, pagesCount);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ result = Reprotect(src, pagesCount, newSrcPermission);
+
+ if (result != Result.Success)
+ {
+ Result mapResult = MapPages(dst, dstPageList, oldDstPermission, MemoryMapFlags.Private, false, 0);
+ Debug.Assert(mapResult == Result.Success);
+ }
+
+ return result;
+ }
+
+ /// <inheritdoc/>
+ protected override Result MapPages(
+ ulong dstVa,
+ ulong pagesCount,
+ ulong srcPa,
+ KMemoryPermission permission,
+ MemoryMapFlags flags,
+ bool shouldFillPages,
+ byte fillValue)
+ {
+ ulong size = pagesCount * PageSize;
+
+ Context.CommitMemory(srcPa - DramMemoryMap.DramBase, size);
+
+ _cpuMemory.Map(dstVa, srcPa - DramMemoryMap.DramBase, size, flags);
+
+ if (DramMemoryMap.IsHeapPhysicalAddress(srcPa))
+ {
+ Context.MemoryManager.IncrementPagesReferenceCount(srcPa, pagesCount);
+ }
+
+ if (shouldFillPages)
+ {
+ _cpuMemory.Fill(dstVa, size, fillValue);
+ }
+
+ return Result.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override Result MapPages(
+ ulong address,
+ KPageList pageList,
+ KMemoryPermission permission,
+ MemoryMapFlags flags,
+ bool shouldFillPages,
+ byte fillValue)
+ {
+ using var scopedPageList = new KScopedPageList(Context.MemoryManager, pageList);
+
+ ulong currentVa = address;
+
+ foreach (var pageNode in pageList)
+ {
+ ulong addr = pageNode.Address - DramMemoryMap.DramBase;
+ ulong size = pageNode.PagesCount * PageSize;
+
+ Context.CommitMemory(addr, size);
+
+ _cpuMemory.Map(currentVa, addr, size, flags);
+
+ if (shouldFillPages)
+ {
+ _cpuMemory.Fill(currentVa, size, fillValue);
+ }
+
+ currentVa += size;
+ }
+
+ scopedPageList.SignalSuccess();
+
+ return Result.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override Result MapForeign(IEnumerable<HostMemoryRange> regions, ulong va, ulong size)
+ {
+ ulong offset = 0;
+
+ foreach (var region in regions)
+ {
+ _cpuMemory.MapForeign(va + offset, region.Address, region.Size);
+
+ offset += region.Size;
+ }
+
+ return Result.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override Result Unmap(ulong address, ulong pagesCount)
+ {
+ KPageList pagesToClose = new KPageList();
+
+ var regions = _cpuMemory.GetPhysicalRegions(address, pagesCount * PageSize);
+
+ foreach (var region in regions)
+ {
+ ulong pa = region.Address + DramMemoryMap.DramBase;
+ if (DramMemoryMap.IsHeapPhysicalAddress(pa))
+ {
+ pagesToClose.AddRange(pa, region.Size / PageSize);
+ }
+ }
+
+ _cpuMemory.Unmap(address, pagesCount * PageSize);
+
+ pagesToClose.DecrementPagesReferenceCount(Context.MemoryManager);
+
+ return Result.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override Result Reprotect(ulong address, ulong pagesCount, KMemoryPermission permission)
+ {
+ // TODO.
+ return Result.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override Result ReprotectWithAttributes(ulong address, ulong pagesCount, KMemoryPermission permission)
+ {
+ // TODO.
+ return Result.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override void SignalMemoryTracking(ulong va, ulong size, bool write)
+ {
+ _cpuMemory.SignalMemoryTracking(va, size, write);
+ }
+
+ /// <inheritdoc/>
+ protected override void Write(ulong va, ReadOnlySpan<byte> data)
+ {
+ _cpuMemory.Write(va, data);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs
new file mode 100644
index 00000000..614eb527
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs
@@ -0,0 +1,3043 @@
+using Ryujinx.Common;
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.Horizon.Common;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Range;
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ abstract class KPageTableBase
+ {
+ private static readonly int[] MappingUnitSizes = new int[]
+ {
+ 0x1000,
+ 0x10000,
+ 0x200000,
+ 0x400000,
+ 0x2000000,
+ 0x40000000
+ };
+
+ public const int PageSize = 0x1000;
+
+ private const int KMemoryBlockSize = 0x40;
+
+ // We need 2 blocks for the case where a big block
+ // needs to be split in 2, plus one block that will be the new one inserted.
+ private const int MaxBlocksNeededForInsertion = 2;
+
+ protected readonly KernelContext Context;
+ protected virtual bool Supports4KBPages => true;
+
+ public ulong AddrSpaceStart { get; private set; }
+ public ulong AddrSpaceEnd { get; private set; }
+
+ public ulong CodeRegionStart { get; private set; }
+ public ulong CodeRegionEnd { get; private set; }
+
+ public ulong HeapRegionStart { get; private set; }
+ public ulong HeapRegionEnd { get; private set; }
+
+ private ulong _currentHeapAddr;
+
+ public ulong AliasRegionStart { get; private set; }
+ public ulong AliasRegionEnd { get; private set; }
+
+ public ulong StackRegionStart { get; private set; }
+ public ulong StackRegionEnd { get; private set; }
+
+ public ulong TlsIoRegionStart { get; private set; }
+ public ulong TlsIoRegionEnd { get; private set; }
+
+ private ulong _heapCapacity;
+
+ public ulong PhysicalMemoryUsage { get; private set; }
+
+ private readonly KMemoryBlockManager _blockManager;
+
+ private MemoryRegion _memRegion;
+
+ private bool _aslrDisabled;
+
+ public int AddrSpaceWidth { get; private set; }
+
+ private bool _isKernel;
+
+ private bool _aslrEnabled;
+
+ private KMemoryBlockSlabManager _slabManager;
+
+ private int _contextId;
+
+ private MersenneTwister _randomNumberGenerator;
+
+ private MemoryFillValue _heapFillValue;
+ private MemoryFillValue _ipcFillValue;
+
+ public KPageTableBase(KernelContext context)
+ {
+ Context = context;
+
+ _blockManager = new KMemoryBlockManager();
+
+ _isKernel = false;
+
+ _heapFillValue = MemoryFillValue.Zero;
+ _ipcFillValue = MemoryFillValue.Zero;
+ }
+
+ private static readonly int[] AddrSpaceSizes = new int[] { 32, 36, 32, 39 };
+
+ public Result InitializeForProcess(
+ AddressSpaceType addrSpaceType,
+ bool aslrEnabled,
+ bool aslrDisabled,
+ MemoryRegion memRegion,
+ ulong address,
+ ulong size,
+ KMemoryBlockSlabManager slabManager)
+ {
+ if ((uint)addrSpaceType > (uint)AddressSpaceType.Addr39Bits)
+ {
+ throw new ArgumentException(nameof(addrSpaceType));
+ }
+
+ _contextId = Context.ContextIdManager.GetId();
+
+ ulong addrSpaceBase = 0;
+ ulong addrSpaceSize = 1UL << AddrSpaceSizes[(int)addrSpaceType];
+
+ Result result = CreateUserAddressSpace(
+ addrSpaceType,
+ aslrEnabled,
+ aslrDisabled,
+ addrSpaceBase,
+ addrSpaceSize,
+ memRegion,
+ address,
+ size,
+ slabManager);
+
+ if (result != Result.Success)
+ {
+ Context.ContextIdManager.PutId(_contextId);
+ }
+
+ return result;
+ }
+
+ private class Region
+ {
+ public ulong Start;
+ public ulong End;
+ public ulong Size;
+ public ulong AslrOffset;
+ }
+
+ private Result CreateUserAddressSpace(
+ AddressSpaceType addrSpaceType,
+ bool aslrEnabled,
+ bool aslrDisabled,
+ ulong addrSpaceStart,
+ ulong addrSpaceEnd,
+ MemoryRegion memRegion,
+ ulong address,
+ ulong size,
+ KMemoryBlockSlabManager slabManager)
+ {
+ ulong endAddr = address + size;
+
+ Region aliasRegion = new Region();
+ Region heapRegion = new Region();
+ Region stackRegion = new Region();
+ Region tlsIoRegion = new Region();
+
+ ulong codeRegionSize;
+ ulong stackAndTlsIoStart;
+ ulong stackAndTlsIoEnd;
+ ulong baseAddress;
+
+ switch (addrSpaceType)
+ {
+ case AddressSpaceType.Addr32Bits:
+ aliasRegion.Size = 0x40000000;
+ heapRegion.Size = 0x40000000;
+ stackRegion.Size = 0;
+ tlsIoRegion.Size = 0;
+ CodeRegionStart = 0x200000;
+ codeRegionSize = 0x3fe00000;
+ stackAndTlsIoStart = 0x200000;
+ stackAndTlsIoEnd = 0x40000000;
+ baseAddress = 0x200000;
+ AddrSpaceWidth = 32;
+ break;
+
+ case AddressSpaceType.Addr36Bits:
+ aliasRegion.Size = 0x180000000;
+ heapRegion.Size = 0x180000000;
+ stackRegion.Size = 0;
+ tlsIoRegion.Size = 0;
+ CodeRegionStart = 0x8000000;
+ codeRegionSize = 0x78000000;
+ stackAndTlsIoStart = 0x8000000;
+ stackAndTlsIoEnd = 0x80000000;
+ baseAddress = 0x8000000;
+ AddrSpaceWidth = 36;
+ break;
+
+ case AddressSpaceType.Addr32BitsNoMap:
+ aliasRegion.Size = 0;
+ heapRegion.Size = 0x80000000;
+ stackRegion.Size = 0;
+ tlsIoRegion.Size = 0;
+ CodeRegionStart = 0x200000;
+ codeRegionSize = 0x3fe00000;
+ stackAndTlsIoStart = 0x200000;
+ stackAndTlsIoEnd = 0x40000000;
+ baseAddress = 0x200000;
+ AddrSpaceWidth = 32;
+ break;
+
+ case AddressSpaceType.Addr39Bits:
+ aliasRegion.Size = 0x1000000000;
+ heapRegion.Size = 0x180000000;
+ stackRegion.Size = 0x80000000;
+ tlsIoRegion.Size = 0x1000000000;
+ CodeRegionStart = BitUtils.AlignDown<ulong>(address, 0x200000);
+ codeRegionSize = BitUtils.AlignUp<ulong>(endAddr, 0x200000) - CodeRegionStart;
+ stackAndTlsIoStart = 0;
+ stackAndTlsIoEnd = 0;
+ baseAddress = 0x8000000;
+ AddrSpaceWidth = 39;
+ break;
+
+ default: throw new ArgumentException(nameof(addrSpaceType));
+ }
+
+ CodeRegionEnd = CodeRegionStart + codeRegionSize;
+
+ ulong mapBaseAddress;
+ ulong mapAvailableSize;
+
+ if (CodeRegionStart - baseAddress >= addrSpaceEnd - CodeRegionEnd)
+ {
+ // Has more space before the start of the code region.
+ mapBaseAddress = baseAddress;
+ mapAvailableSize = CodeRegionStart - baseAddress;
+ }
+ else
+ {
+ // Has more space after the end of the code region.
+ mapBaseAddress = CodeRegionEnd;
+ mapAvailableSize = addrSpaceEnd - CodeRegionEnd;
+ }
+
+ ulong mapTotalSize = aliasRegion.Size + heapRegion.Size + stackRegion.Size + tlsIoRegion.Size;
+
+ ulong aslrMaxOffset = mapAvailableSize - mapTotalSize;
+
+ _aslrEnabled = aslrEnabled;
+
+ AddrSpaceStart = addrSpaceStart;
+ AddrSpaceEnd = addrSpaceEnd;
+
+ _slabManager = slabManager;
+
+ if (mapAvailableSize < mapTotalSize)
+ {
+ return KernelResult.OutOfMemory;
+ }
+
+ if (aslrEnabled)
+ {
+ aliasRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21;
+ heapRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21;
+ stackRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21;
+ tlsIoRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21;
+ }
+
+ // Regions are sorted based on ASLR offset.
+ // When ASLR is disabled, the order is Map, Heap, NewMap and TlsIo.
+ aliasRegion.Start = mapBaseAddress + aliasRegion.AslrOffset;
+ aliasRegion.End = aliasRegion.Start + aliasRegion.Size;
+ heapRegion.Start = mapBaseAddress + heapRegion.AslrOffset;
+ heapRegion.End = heapRegion.Start + heapRegion.Size;
+ stackRegion.Start = mapBaseAddress + stackRegion.AslrOffset;
+ stackRegion.End = stackRegion.Start + stackRegion.Size;
+ tlsIoRegion.Start = mapBaseAddress + tlsIoRegion.AslrOffset;
+ tlsIoRegion.End = tlsIoRegion.Start + tlsIoRegion.Size;
+
+ SortRegion(heapRegion, aliasRegion);
+
+ if (stackRegion.Size != 0)
+ {
+ SortRegion(stackRegion, aliasRegion);
+ SortRegion(stackRegion, heapRegion);
+ }
+ else
+ {
+ stackRegion.Start = stackAndTlsIoStart;
+ stackRegion.End = stackAndTlsIoEnd;
+ }
+
+ if (tlsIoRegion.Size != 0)
+ {
+ SortRegion(tlsIoRegion, aliasRegion);
+ SortRegion(tlsIoRegion, heapRegion);
+ SortRegion(tlsIoRegion, stackRegion);
+ }
+ else
+ {
+ tlsIoRegion.Start = stackAndTlsIoStart;
+ tlsIoRegion.End = stackAndTlsIoEnd;
+ }
+
+ AliasRegionStart = aliasRegion.Start;
+ AliasRegionEnd = aliasRegion.End;
+ HeapRegionStart = heapRegion.Start;
+ HeapRegionEnd = heapRegion.End;
+ StackRegionStart = stackRegion.Start;
+ StackRegionEnd = stackRegion.End;
+ TlsIoRegionStart = tlsIoRegion.Start;
+ TlsIoRegionEnd = tlsIoRegion.End;
+
+ // TODO: Check kernel configuration via secure monitor call when implemented to set memory fill values.
+
+ _currentHeapAddr = HeapRegionStart;
+ _heapCapacity = 0;
+ PhysicalMemoryUsage = 0;
+
+ _memRegion = memRegion;
+ _aslrDisabled = aslrDisabled;
+
+ return _blockManager.Initialize(addrSpaceStart, addrSpaceEnd, slabManager);
+ }
+
+ private ulong GetRandomValue(ulong min, ulong max)
+ {
+ return (ulong)GetRandomValue((long)min, (long)max);
+ }
+
+ private long GetRandomValue(long min, long max)
+ {
+ if (_randomNumberGenerator == null)
+ {
+ _randomNumberGenerator = new MersenneTwister(0);
+ }
+
+ return _randomNumberGenerator.GenRandomNumber(min, max);
+ }
+
+ private static void SortRegion(Region lhs, Region rhs)
+ {
+ if (lhs.AslrOffset < rhs.AslrOffset)
+ {
+ rhs.Start += lhs.Size;
+ rhs.End += lhs.Size;
+ }
+ else
+ {
+ lhs.Start += rhs.Size;
+ lhs.End += rhs.Size;
+ }
+ }
+
+ public Result MapPages(ulong address, KPageList pageList, MemoryState state, KMemoryPermission permission)
+ {
+ ulong pagesCount = pageList.GetPagesCount();
+
+ ulong size = pagesCount * PageSize;
+
+ if (!CanContain(address, size, state))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (!IsUnmapped(address, pagesCount * PageSize))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ Result result = MapPages(address, pageList, permission, MemoryMapFlags.None);
+
+ if (result == Result.Success)
+ {
+ _blockManager.InsertBlock(address, pagesCount, state, permission);
+ }
+
+ return result;
+ }
+ }
+
+ public Result UnmapPages(ulong address, KPageList pageList, MemoryState stateExpected)
+ {
+ ulong pagesCount = pageList.GetPagesCount();
+ ulong size = pagesCount * PageSize;
+
+ ulong endAddr = address + size;
+
+ ulong addrSpacePagesCount = (AddrSpaceEnd - AddrSpaceStart) / PageSize;
+
+ if (AddrSpaceStart > address)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (addrSpacePagesCount < pagesCount)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (endAddr - 1 > AddrSpaceEnd - 1)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ KPageList currentPageList = new KPageList();
+
+ GetPhysicalRegions(address, size, currentPageList);
+
+ if (!currentPageList.IsEqual(pageList))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ if (CheckRange(
+ address,
+ size,
+ MemoryState.Mask,
+ stateExpected,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState state,
+ out _,
+ out _))
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ Result result = Unmap(address, pagesCount);
+
+ if (result == Result.Success)
+ {
+ _blockManager.InsertBlock(address, pagesCount, MemoryState.Unmapped);
+ }
+
+ return result;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result MapNormalMemory(long address, long size, KMemoryPermission permission)
+ {
+ // TODO.
+ return Result.Success;
+ }
+
+ public Result MapIoMemory(long address, long size, KMemoryPermission permission)
+ {
+ // TODO.
+ return Result.Success;
+ }
+
+ public Result MapPages(
+ ulong pagesCount,
+ int alignment,
+ ulong srcPa,
+ bool paIsValid,
+ ulong regionStart,
+ ulong regionPagesCount,
+ MemoryState state,
+ KMemoryPermission permission,
+ out ulong address)
+ {
+ address = 0;
+
+ ulong regionSize = regionPagesCount * PageSize;
+
+ if (!CanContain(regionStart, regionSize, state))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (regionPagesCount <= pagesCount)
+ {
+ return KernelResult.OutOfMemory;
+ }
+
+ lock (_blockManager)
+ {
+ address = AllocateVa(regionStart, regionPagesCount, pagesCount, alignment);
+
+ if (address == 0)
+ {
+ return KernelResult.OutOfMemory;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ Result result;
+
+ if (paIsValid)
+ {
+ result = MapPages(address, pagesCount, srcPa, permission, MemoryMapFlags.Private);
+ }
+ else
+ {
+ result = AllocateAndMapPages(address, pagesCount, permission);
+ }
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ _blockManager.InsertBlock(address, pagesCount, state, permission);
+ }
+
+ return Result.Success;
+ }
+
+ public Result MapPages(ulong address, ulong pagesCount, MemoryState state, KMemoryPermission permission)
+ {
+ ulong size = pagesCount * PageSize;
+
+ if (!CanContain(address, size, state))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (!IsUnmapped(address, size))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ Result result = AllocateAndMapPages(address, pagesCount, permission);
+
+ if (result == Result.Success)
+ {
+ _blockManager.InsertBlock(address, pagesCount, state, permission);
+ }
+
+ return result;
+ }
+ }
+
+ private Result AllocateAndMapPages(ulong address, ulong pagesCount, KMemoryPermission permission)
+ {
+ KMemoryRegionManager region = GetMemoryRegionManager();
+
+ Result result = region.AllocatePages(out KPageList pageList, pagesCount);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ using var _ = new OnScopeExit(() => pageList.DecrementPagesReferenceCount(Context.MemoryManager));
+
+ return MapPages(address, pageList, permission, MemoryMapFlags.Private);
+ }
+
+ public Result MapProcessCodeMemory(ulong dst, ulong src, ulong size)
+ {
+ lock (_blockManager)
+ {
+ bool success = CheckRange(
+ src,
+ size,
+ MemoryState.Mask,
+ MemoryState.Heap,
+ KMemoryPermission.Mask,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState state,
+ out KMemoryPermission permission,
+ out _);
+
+ success &= IsUnmapped(dst, size);
+
+ if (success)
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion * 2))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ Result result = MapMemory(src, dst, pagesCount, permission, KMemoryPermission.None);
+
+ _blockManager.InsertBlock(src, pagesCount, state, KMemoryPermission.None, MemoryAttribute.Borrowed);
+ _blockManager.InsertBlock(dst, pagesCount, MemoryState.ModCodeStatic);
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result UnmapProcessCodeMemory(ulong dst, ulong src, ulong size)
+ {
+ lock (_blockManager)
+ {
+ bool success = CheckRange(
+ src,
+ size,
+ MemoryState.Mask,
+ MemoryState.Heap,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.Borrowed,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _);
+
+ success &= CheckRange(
+ dst,
+ PageSize,
+ MemoryState.UnmapProcessCodeMemoryAllowed,
+ MemoryState.UnmapProcessCodeMemoryAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState state,
+ out _,
+ out _);
+
+ success &= CheckRange(
+ dst,
+ size,
+ MemoryState.Mask,
+ state,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None);
+
+ if (success)
+ {
+ ulong pagesCount = size / PageSize;
+
+ Result result = Unmap(dst, pagesCount);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ // TODO: Missing some checks here.
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion * 2))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ _blockManager.InsertBlock(dst, pagesCount, MemoryState.Unmapped);
+ _blockManager.InsertBlock(src, pagesCount, MemoryState.Heap, KMemoryPermission.ReadAndWrite);
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result SetHeapSize(ulong size, out ulong address)
+ {
+ address = 0;
+
+ if (size > HeapRegionEnd - HeapRegionStart)
+ {
+ return KernelResult.OutOfMemory;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ lock (_blockManager)
+ {
+ ulong currentHeapSize = GetHeapSize();
+
+ if (currentHeapSize <= size)
+ {
+ // Expand.
+ ulong sizeDelta = size - currentHeapSize;
+
+ if (currentProcess.ResourceLimit != null && sizeDelta != 0 &&
+ !currentProcess.ResourceLimit.Reserve(LimitableResource.Memory, sizeDelta))
+ {
+ return KernelResult.ResLimitExceeded;
+ }
+
+ ulong pagesCount = sizeDelta / PageSize;
+
+ KMemoryRegionManager region = GetMemoryRegionManager();
+
+ Result result = region.AllocatePages(out KPageList pageList, pagesCount);
+
+ using var _ = new OnScopeExit(() => pageList.DecrementPagesReferenceCount(Context.MemoryManager));
+
+ void CleanUpForError()
+ {
+ if (currentProcess.ResourceLimit != null && sizeDelta != 0)
+ {
+ currentProcess.ResourceLimit.Release(LimitableResource.Memory, sizeDelta);
+ }
+ }
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ CleanUpForError();
+
+ return KernelResult.OutOfResource;
+ }
+
+ if (!IsUnmapped(_currentHeapAddr, sizeDelta))
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidMemState;
+ }
+
+ result = MapPages(_currentHeapAddr, pageList, KMemoryPermission.ReadAndWrite, MemoryMapFlags.Private, true, (byte)_heapFillValue);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ _blockManager.InsertBlock(_currentHeapAddr, pagesCount, MemoryState.Heap, KMemoryPermission.ReadAndWrite);
+ }
+ else
+ {
+ // Shrink.
+ ulong freeAddr = HeapRegionStart + size;
+ ulong sizeDelta = currentHeapSize - size;
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ if (!CheckRange(
+ freeAddr,
+ sizeDelta,
+ MemoryState.Mask,
+ MemoryState.Heap,
+ KMemoryPermission.Mask,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong pagesCount = sizeDelta / PageSize;
+
+ Result result = Unmap(freeAddr, pagesCount);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ currentProcess.ResourceLimit?.Release(LimitableResource.Memory, sizeDelta);
+
+ _blockManager.InsertBlock(freeAddr, pagesCount, MemoryState.Unmapped);
+ }
+
+ _currentHeapAddr = HeapRegionStart + size;
+ }
+
+ address = HeapRegionStart;
+
+ return Result.Success;
+ }
+
+ public Result SetMemoryPermission(ulong address, ulong size, KMemoryPermission permission)
+ {
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ MemoryState.PermissionChangeAllowed,
+ MemoryState.PermissionChangeAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState oldState,
+ out KMemoryPermission oldPermission,
+ out _))
+ {
+ if (permission != oldPermission)
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ Result result = Reprotect(address, pagesCount, permission);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ _blockManager.InsertBlock(address, pagesCount, oldState, permission);
+ }
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public ulong GetTotalHeapSize()
+ {
+ lock (_blockManager)
+ {
+ return GetHeapSize() + PhysicalMemoryUsage;
+ }
+ }
+
+ private ulong GetHeapSize()
+ {
+ return _currentHeapAddr - HeapRegionStart;
+ }
+
+ public Result SetHeapCapacity(ulong capacity)
+ {
+ lock (_blockManager)
+ {
+ _heapCapacity = capacity;
+ }
+
+ return Result.Success;
+ }
+
+ public Result SetMemoryAttribute(
+ ulong address,
+ ulong size,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeValue)
+ {
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ MemoryState.AttributeChangeAllowed,
+ MemoryState.AttributeChangeAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.BorrowedAndIpcMapped,
+ MemoryAttribute.None,
+ MemoryAttribute.DeviceMappedAndUncached,
+ out MemoryState state,
+ out KMemoryPermission permission,
+ out MemoryAttribute attribute))
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ attribute &= ~attributeMask;
+ attribute |= attributeMask & attributeValue;
+
+ _blockManager.InsertBlock(address, pagesCount, state, permission, attribute);
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public KMemoryInfo QueryMemory(ulong address)
+ {
+ if (address >= AddrSpaceStart &&
+ address < AddrSpaceEnd)
+ {
+ lock (_blockManager)
+ {
+ return _blockManager.FindBlock(address).GetInfo();
+ }
+ }
+ else
+ {
+ return new KMemoryInfo(
+ AddrSpaceEnd,
+ ~AddrSpaceEnd + 1,
+ MemoryState.Reserved,
+ KMemoryPermission.None,
+ MemoryAttribute.None,
+ KMemoryPermission.None,
+ 0,
+ 0);
+ }
+ }
+
+ public Result Map(ulong dst, ulong src, ulong size)
+ {
+ bool success;
+
+ lock (_blockManager)
+ {
+ success = CheckRange(
+ src,
+ size,
+ MemoryState.MapAllowed,
+ MemoryState.MapAllowed,
+ KMemoryPermission.Mask,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState srcState,
+ out _,
+ out _);
+
+ success &= IsUnmapped(dst, size);
+
+ if (success)
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion * 2))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ Result result = MapMemory(src, dst, pagesCount, KMemoryPermission.ReadAndWrite, KMemoryPermission.ReadAndWrite);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ _blockManager.InsertBlock(src, pagesCount, srcState, KMemoryPermission.None, MemoryAttribute.Borrowed);
+ _blockManager.InsertBlock(dst, pagesCount, MemoryState.Stack, KMemoryPermission.ReadAndWrite);
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result UnmapForKernel(ulong address, ulong pagesCount, MemoryState stateExpected)
+ {
+ ulong size = pagesCount * PageSize;
+
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ MemoryState.Mask,
+ stateExpected,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _))
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ Result result = Unmap(address, pagesCount);
+
+ if (result == Result.Success)
+ {
+ _blockManager.InsertBlock(address, pagesCount, MemoryState.Unmapped);
+ }
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result Unmap(ulong dst, ulong src, ulong size)
+ {
+ bool success;
+
+ lock (_blockManager)
+ {
+ success = CheckRange(
+ src,
+ size,
+ MemoryState.MapAllowed,
+ MemoryState.MapAllowed,
+ KMemoryPermission.Mask,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.Borrowed,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState srcState,
+ out _,
+ out _);
+
+ success &= CheckRange(
+ dst,
+ size,
+ MemoryState.Mask,
+ MemoryState.Stack,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out KMemoryPermission dstPermission,
+ out _);
+
+ if (success)
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion * 2))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ Result result = UnmapMemory(dst, src, pagesCount, dstPermission, KMemoryPermission.ReadAndWrite);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ _blockManager.InsertBlock(src, pagesCount, srcState, KMemoryPermission.ReadAndWrite);
+ _blockManager.InsertBlock(dst, pagesCount, MemoryState.Unmapped);
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result UnmapProcessMemory(ulong dst, ulong size, KPageTableBase srcPageTable, ulong src)
+ {
+ lock (_blockManager)
+ {
+ lock (srcPageTable._blockManager)
+ {
+ bool success = CheckRange(
+ dst,
+ size,
+ MemoryState.Mask,
+ MemoryState.ProcessMemory,
+ KMemoryPermission.ReadAndWrite,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _);
+
+ success &= srcPageTable.CheckRange(
+ src,
+ size,
+ MemoryState.MapProcessAllowed,
+ MemoryState.MapProcessAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _);
+
+ if (!success)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ KPageList srcPageList = new KPageList();
+ KPageList dstPageList = new KPageList();
+
+ srcPageTable.GetPhysicalRegions(src, size, srcPageList);
+ GetPhysicalRegions(dst, size, dstPageList);
+
+ if (!dstPageList.IsEqual(srcPageList))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ Result result = Unmap(dst, pagesCount);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ _blockManager.InsertBlock(dst, pagesCount, MemoryState.Unmapped);
+
+ return Result.Success;
+ }
+ }
+
+ public Result SetProcessMemoryPermission(ulong address, ulong size, KMemoryPermission permission)
+ {
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ MemoryState.ProcessPermissionChangeAllowed,
+ MemoryState.ProcessPermissionChangeAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState oldState,
+ out KMemoryPermission oldPermission,
+ out _))
+ {
+ MemoryState newState = oldState;
+
+ // If writing into the code region is allowed, then we need
+ // to change it to mutable.
+ if ((permission & KMemoryPermission.Write) != 0)
+ {
+ if (oldState == MemoryState.CodeStatic)
+ {
+ newState = MemoryState.CodeMutable;
+ }
+ else if (oldState == MemoryState.ModCodeStatic)
+ {
+ newState = MemoryState.ModCodeMutable;
+ }
+ else
+ {
+ throw new InvalidOperationException($"Memory state \"{oldState}\" not valid for this operation.");
+ }
+ }
+
+ if (newState != oldState || permission != oldPermission)
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ Result result;
+
+ if ((oldPermission & KMemoryPermission.Execute) != 0)
+ {
+ result = ReprotectWithAttributes(address, pagesCount, permission);
+ }
+ else
+ {
+ result = Reprotect(address, pagesCount, permission);
+ }
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ _blockManager.InsertBlock(address, pagesCount, newState, permission);
+ }
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result MapPhysicalMemory(ulong address, ulong size)
+ {
+ ulong endAddr = address + size;
+
+ lock (_blockManager)
+ {
+ ulong mappedSize = 0;
+
+ foreach (KMemoryInfo info in IterateOverRange(address, endAddr))
+ {
+ if (info.State != MemoryState.Unmapped)
+ {
+ mappedSize += GetSizeInRange(info, address, endAddr);
+ }
+ }
+
+ if (mappedSize == size)
+ {
+ return Result.Success;
+ }
+
+ ulong remainingSize = size - mappedSize;
+
+ ulong remainingPages = remainingSize / PageSize;
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (currentProcess.ResourceLimit != null &&
+ !currentProcess.ResourceLimit.Reserve(LimitableResource.Memory, remainingSize))
+ {
+ return KernelResult.ResLimitExceeded;
+ }
+
+ KMemoryRegionManager region = GetMemoryRegionManager();
+
+ Result result = region.AllocatePages(out KPageList pageList, remainingPages);
+
+ using var _ = new OnScopeExit(() => pageList.DecrementPagesReferenceCount(Context.MemoryManager));
+
+ void CleanUpForError()
+ {
+ currentProcess.ResourceLimit?.Release(LimitableResource.Memory, remainingSize);
+ }
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ CleanUpForError();
+
+ return KernelResult.OutOfResource;
+ }
+
+ LinkedListNode<KPageNode> pageListNode = pageList.Nodes.First;
+
+ KPageNode pageNode = pageListNode.Value;
+
+ ulong srcPa = pageNode.Address;
+ ulong srcPaPages = pageNode.PagesCount;
+
+ foreach (KMemoryInfo info in IterateOverRange(address, endAddr))
+ {
+ if (info.State != MemoryState.Unmapped)
+ {
+ continue;
+ }
+
+ ulong blockSize = GetSizeInRange(info, address, endAddr);
+
+ ulong dstVaPages = blockSize / PageSize;
+
+ ulong dstVa = GetAddrInRange(info, address);
+
+ while (dstVaPages > 0)
+ {
+ if (srcPaPages == 0)
+ {
+ pageListNode = pageListNode.Next;
+
+ pageNode = pageListNode.Value;
+
+ srcPa = pageNode.Address;
+ srcPaPages = pageNode.PagesCount;
+ }
+
+ ulong currentPagesCount = Math.Min(srcPaPages, dstVaPages);
+
+ MapPages(dstVa, currentPagesCount, srcPa, KMemoryPermission.ReadAndWrite, MemoryMapFlags.Private);
+
+ dstVa += currentPagesCount * PageSize;
+ srcPa += currentPagesCount * PageSize;
+ srcPaPages -= currentPagesCount;
+ dstVaPages -= currentPagesCount;
+ }
+ }
+
+ PhysicalMemoryUsage += remainingSize;
+
+ ulong pagesCount = size / PageSize;
+
+ _blockManager.InsertBlock(
+ address,
+ pagesCount,
+ MemoryState.Unmapped,
+ KMemoryPermission.None,
+ MemoryAttribute.None,
+ MemoryState.Heap,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.None);
+ }
+
+ return Result.Success;
+ }
+
+ public Result UnmapPhysicalMemory(ulong address, ulong size)
+ {
+ ulong endAddr = address + size;
+
+ lock (_blockManager)
+ {
+ // Scan, ensure that the region can be unmapped (all blocks are heap or
+ // already unmapped), fill pages list for freeing memory.
+ ulong heapMappedSize = 0;
+
+ foreach (KMemoryInfo info in IterateOverRange(address, endAddr))
+ {
+ if (info.State == MemoryState.Heap)
+ {
+ if (info.Attribute != MemoryAttribute.None)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong blockSize = GetSizeInRange(info, address, endAddr);
+
+ heapMappedSize += blockSize;
+ }
+ else if (info.State != MemoryState.Unmapped)
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+
+ if (heapMappedSize == 0)
+ {
+ return Result.Success;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ // Try to unmap all the heap mapped memory inside range.
+ Result result = Result.Success;
+
+ foreach (KMemoryInfo info in IterateOverRange(address, endAddr))
+ {
+ if (info.State == MemoryState.Heap)
+ {
+ ulong blockSize = GetSizeInRange(info, address, endAddr);
+ ulong blockAddress = GetAddrInRange(info, address);
+
+ ulong blockPagesCount = blockSize / PageSize;
+
+ result = Unmap(blockAddress, blockPagesCount);
+
+ // The kernel would attempt to remap if this fails, but we don't because:
+ // - The implementation may not support remapping if memory aliasing is not supported on the platform.
+ // - Unmap can't ever fail here anyway.
+ Debug.Assert(result == Result.Success);
+ }
+ }
+
+ if (result == Result.Success)
+ {
+ PhysicalMemoryUsage -= heapMappedSize;
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ currentProcess.ResourceLimit?.Release(LimitableResource.Memory, heapMappedSize);
+
+ ulong pagesCount = size / PageSize;
+
+ _blockManager.InsertBlock(address, pagesCount, MemoryState.Unmapped);
+ }
+
+ return result;
+ }
+ }
+
+ public Result CopyDataToCurrentProcess(
+ ulong dst,
+ ulong size,
+ ulong src,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permission,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected)
+ {
+ // Client -> server.
+ return CopyDataFromOrToCurrentProcess(
+ size,
+ src,
+ dst,
+ stateMask,
+ stateExpected,
+ permission,
+ attributeMask,
+ attributeExpected,
+ toServer: true);
+ }
+
+ public Result CopyDataFromCurrentProcess(
+ ulong dst,
+ ulong size,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permission,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected,
+ ulong src)
+ {
+ // Server -> client.
+ return CopyDataFromOrToCurrentProcess(
+ size,
+ dst,
+ src,
+ stateMask,
+ stateExpected,
+ permission,
+ attributeMask,
+ attributeExpected,
+ toServer: false);
+ }
+
+ private Result CopyDataFromOrToCurrentProcess(
+ ulong size,
+ ulong clientAddress,
+ ulong serverAddress,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permission,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected,
+ bool toServer)
+ {
+ if (AddrSpaceStart > clientAddress)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong srcEndAddr = clientAddress + size;
+
+ if (srcEndAddr <= clientAddress || srcEndAddr - 1 > AddrSpaceEnd - 1)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ clientAddress,
+ size,
+ stateMask,
+ stateExpected,
+ permission,
+ permission,
+ attributeMask | MemoryAttribute.Uncached,
+ attributeExpected))
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ while (size > 0)
+ {
+ ulong copySize = 0x100000; // Copy chunck size. Any value will do, moderate sizes are recommended.
+
+ if (copySize > size)
+ {
+ copySize = size;
+ }
+
+ if (toServer)
+ {
+ currentProcess.CpuMemory.Write(serverAddress, GetSpan(clientAddress, (int)copySize));
+ }
+ else
+ {
+ Write(clientAddress, currentProcess.CpuMemory.GetSpan(serverAddress, (int)copySize));
+ }
+
+ serverAddress += copySize;
+ clientAddress += copySize;
+ size -= copySize;
+ }
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result MapBufferFromClientProcess(
+ ulong size,
+ ulong src,
+ KPageTableBase srcPageTable,
+ KMemoryPermission permission,
+ MemoryState state,
+ bool send,
+ out ulong dst)
+ {
+ dst = 0;
+
+ lock (srcPageTable._blockManager)
+ {
+ lock (_blockManager)
+ {
+ Result result = srcPageTable.ReprotectClientProcess(
+ src,
+ size,
+ permission,
+ state,
+ out int blocksNeeded);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ if (!srcPageTable._slabManager.CanAllocate(blocksNeeded))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong srcMapAddress = BitUtils.AlignUp<ulong>(src, PageSize);
+ ulong srcMapEndAddr = BitUtils.AlignDown<ulong>(src + size, PageSize);
+ ulong srcMapSize = srcMapEndAddr - srcMapAddress;
+
+ result = MapPagesFromClientProcess(size, src, permission, state, srcPageTable, send, out ulong va);
+
+ if (result != Result.Success)
+ {
+ if (srcMapEndAddr > srcMapAddress)
+ {
+ srcPageTable.UnmapIpcRestorePermission(src, size, state);
+ }
+
+ return result;
+ }
+
+ if (srcMapAddress < srcMapEndAddr)
+ {
+ KMemoryPermission permissionMask = permission == KMemoryPermission.ReadAndWrite
+ ? KMemoryPermission.None
+ : KMemoryPermission.Read;
+
+ srcPageTable._blockManager.InsertBlock(srcMapAddress, srcMapSize / PageSize, SetIpcMappingPermissions, permissionMask);
+ }
+
+ dst = va;
+ }
+ }
+
+ return Result.Success;
+ }
+
+ private Result ReprotectClientProcess(
+ ulong address,
+ ulong size,
+ KMemoryPermission permission,
+ MemoryState state,
+ out int blocksNeeded)
+ {
+ blocksNeeded = 0;
+
+ if (AddrSpaceStart > address)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong endAddr = address + size;
+
+ if (endAddr <= address || endAddr - 1 > AddrSpaceEnd - 1)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ MemoryState stateMask;
+
+ switch (state)
+ {
+ case MemoryState.IpcBuffer0: stateMask = MemoryState.IpcSendAllowedType0; break;
+ case MemoryState.IpcBuffer1: stateMask = MemoryState.IpcSendAllowedType1; break;
+ case MemoryState.IpcBuffer3: stateMask = MemoryState.IpcSendAllowedType3; break;
+
+ default: return KernelResult.InvalidCombination;
+ }
+
+ KMemoryPermission permissionMask = permission == KMemoryPermission.ReadAndWrite
+ ? KMemoryPermission.None
+ : KMemoryPermission.Read;
+
+ MemoryAttribute attributeMask = MemoryAttribute.Borrowed | MemoryAttribute.Uncached;
+
+ if (state == MemoryState.IpcBuffer0)
+ {
+ attributeMask |= MemoryAttribute.DeviceMapped;
+ }
+
+ ulong addressRounded = BitUtils.AlignUp<ulong>(address, PageSize);
+ ulong addressTruncated = BitUtils.AlignDown<ulong>(address, PageSize);
+ ulong endAddrRounded = BitUtils.AlignUp<ulong>(endAddr, PageSize);
+ ulong endAddrTruncated = BitUtils.AlignDown<ulong>(endAddr, PageSize);
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong visitedSize = 0;
+
+ void CleanUpForError()
+ {
+ if (visitedSize == 0)
+ {
+ return;
+ }
+
+ ulong endAddrVisited = address + visitedSize;
+
+ foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrVisited))
+ {
+ if ((info.Permission & KMemoryPermission.ReadAndWrite) != permissionMask && info.IpcRefCount == 0)
+ {
+ ulong blockAddress = GetAddrInRange(info, addressRounded);
+ ulong blockSize = GetSizeInRange(info, addressRounded, endAddrVisited);
+
+ ulong blockPagesCount = blockSize / PageSize;
+
+ Result reprotectResult = Reprotect(blockAddress, blockPagesCount, info.Permission);
+ Debug.Assert(reprotectResult == Result.Success);
+ }
+ }
+ }
+
+ // Signal a read for any resources tracking reads in the region, as the other process is likely to use their data.
+ SignalMemoryTracking(addressTruncated, endAddrRounded - addressTruncated, false);
+
+ // Reprotect the aligned pages range on the client to make them inaccessible from the client process.
+ Result result;
+
+ if (addressRounded < endAddrTruncated)
+ {
+ foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrTruncated))
+ {
+ // Check if the block state matches what we expect.
+ if ((info.State & stateMask) != stateMask ||
+ (info.Permission & permission) != permission ||
+ (info.Attribute & attributeMask) != MemoryAttribute.None)
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong blockAddress = GetAddrInRange(info, addressRounded);
+ ulong blockSize = GetSizeInRange(info, addressRounded, endAddrTruncated);
+
+ ulong blockPagesCount = blockSize / PageSize;
+
+ // If the first block starts before the aligned range, it will need to be split.
+ if (info.Address < addressRounded)
+ {
+ blocksNeeded++;
+ }
+
+ // If the last block ends after the aligned range, it will need to be split.
+ if (endAddrTruncated - 1 < info.Address + info.Size - 1)
+ {
+ blocksNeeded++;
+ }
+
+ if ((info.Permission & KMemoryPermission.ReadAndWrite) != permissionMask && info.IpcRefCount == 0)
+ {
+ result = Reprotect(blockAddress, blockPagesCount, permissionMask);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+ }
+
+ visitedSize += blockSize;
+ }
+ }
+
+ return Result.Success;
+ }
+
+ private Result MapPagesFromClientProcess(
+ ulong size,
+ ulong address,
+ KMemoryPermission permission,
+ MemoryState state,
+ KPageTableBase srcPageTable,
+ bool send,
+ out ulong dst)
+ {
+ dst = 0;
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong endAddr = address + size;
+
+ ulong addressTruncated = BitUtils.AlignDown<ulong>(address, PageSize);
+ ulong addressRounded = BitUtils.AlignUp<ulong>(address, PageSize);
+ ulong endAddrTruncated = BitUtils.AlignDown<ulong>(endAddr, PageSize);
+ ulong endAddrRounded = BitUtils.AlignUp<ulong>(endAddr, PageSize);
+
+ ulong neededSize = endAddrRounded - addressTruncated;
+
+ ulong neededPagesCount = neededSize / PageSize;
+
+ ulong regionPagesCount = (AliasRegionEnd - AliasRegionStart) / PageSize;
+
+ ulong va = 0;
+
+ for (int unit = MappingUnitSizes.Length - 1; unit >= 0 && va == 0; unit--)
+ {
+ int alignment = MappingUnitSizes[unit];
+
+ va = AllocateVa(AliasRegionStart, regionPagesCount, neededPagesCount, alignment);
+ }
+
+ if (va == 0)
+ {
+ return KernelResult.OutOfVaSpace;
+ }
+
+ ulong dstFirstPagePa = 0;
+ ulong dstLastPagePa = 0;
+ ulong currentVa = va;
+
+ using var _ = new OnScopeExit(() =>
+ {
+ if (dstFirstPagePa != 0)
+ {
+ Context.MemoryManager.DecrementPagesReferenceCount(dstFirstPagePa, 1);
+ }
+
+ if (dstLastPagePa != 0)
+ {
+ Context.MemoryManager.DecrementPagesReferenceCount(dstLastPagePa, 1);
+ }
+ });
+
+ void CleanUpForError()
+ {
+ if (currentVa != va)
+ {
+ Unmap(va, (currentVa - va) / PageSize);
+ }
+ }
+
+ // Is the first page address aligned?
+ // If not, allocate a new page and copy the unaligned chunck.
+ if (addressTruncated < addressRounded)
+ {
+ dstFirstPagePa = GetMemoryRegionManager().AllocatePagesContiguous(Context, 1, _aslrDisabled);
+
+ if (dstFirstPagePa == 0)
+ {
+ CleanUpForError();
+
+ return KernelResult.OutOfMemory;
+ }
+ }
+
+ // Is the last page end address aligned?
+ // If not, allocate a new page and copy the unaligned chunck.
+ if (endAddrTruncated < endAddrRounded && (addressTruncated == addressRounded || addressTruncated < endAddrTruncated))
+ {
+ dstLastPagePa = GetMemoryRegionManager().AllocatePagesContiguous(Context, 1, _aslrDisabled);
+
+ if (dstLastPagePa == 0)
+ {
+ CleanUpForError();
+
+ return KernelResult.OutOfMemory;
+ }
+ }
+
+ if (dstFirstPagePa != 0)
+ {
+ ulong firstPageFillAddress = dstFirstPagePa;
+ ulong unusedSizeAfter;
+
+ if (send)
+ {
+ ulong unusedSizeBefore = address - addressTruncated;
+
+ Context.Memory.Fill(GetDramAddressFromPa(dstFirstPagePa), unusedSizeBefore, (byte)_ipcFillValue);
+
+ ulong copySize = addressRounded <= endAddr ? addressRounded - address : size;
+ var data = srcPageTable.GetSpan(addressTruncated + unusedSizeBefore, (int)copySize);
+
+ Context.Memory.Write(GetDramAddressFromPa(dstFirstPagePa + unusedSizeBefore), data);
+
+ firstPageFillAddress += unusedSizeBefore + copySize;
+
+ unusedSizeAfter = addressRounded > endAddr ? addressRounded - endAddr : 0;
+ }
+ else
+ {
+ unusedSizeAfter = PageSize;
+ }
+
+ if (unusedSizeAfter != 0)
+ {
+ Context.Memory.Fill(GetDramAddressFromPa(firstPageFillAddress), unusedSizeAfter, (byte)_ipcFillValue);
+ }
+
+ Result result = MapPages(currentVa, 1, dstFirstPagePa, permission, MemoryMapFlags.Private);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ currentVa += PageSize;
+ }
+
+ if (endAddrTruncated > addressRounded)
+ {
+ ulong alignedSize = endAddrTruncated - addressRounded;
+
+ Result result;
+
+ if (srcPageTable.Supports4KBPages)
+ {
+ KPageList pageList = new KPageList();
+ srcPageTable.GetPhysicalRegions(addressRounded, alignedSize, pageList);
+
+ result = MapPages(currentVa, pageList, permission, MemoryMapFlags.None);
+ }
+ else
+ {
+ result = MapForeign(srcPageTable.GetHostRegions(addressRounded, alignedSize), currentVa, alignedSize);
+ }
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ currentVa += alignedSize;
+ }
+
+ if (dstLastPagePa != 0)
+ {
+ ulong lastPageFillAddr = dstLastPagePa;
+ ulong unusedSizeAfter;
+
+ if (send)
+ {
+ ulong copySize = endAddr - endAddrTruncated;
+ var data = srcPageTable.GetSpan(endAddrTruncated, (int)copySize);
+
+ Context.Memory.Write(GetDramAddressFromPa(dstLastPagePa), data);
+
+ lastPageFillAddr += copySize;
+
+ unusedSizeAfter = PageSize - copySize;
+ }
+ else
+ {
+ unusedSizeAfter = PageSize;
+ }
+
+ Context.Memory.Fill(GetDramAddressFromPa(lastPageFillAddr), unusedSizeAfter, (byte)_ipcFillValue);
+
+ Result result = MapPages(currentVa, 1, dstLastPagePa, permission, MemoryMapFlags.Private);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+ }
+
+ _blockManager.InsertBlock(va, neededPagesCount, state, permission);
+
+ dst = va + (address - addressTruncated);
+
+ return Result.Success;
+ }
+
+ public Result UnmapNoAttributeIfStateEquals(ulong address, ulong size, MemoryState state)
+ {
+ if (AddrSpaceStart > address)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong endAddr = address + size;
+
+ if (endAddr <= address || endAddr - 1 > AddrSpaceEnd - 1)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ MemoryState.Mask,
+ state,
+ KMemoryPermission.Read,
+ KMemoryPermission.Read,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _))
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong addressTruncated = BitUtils.AlignDown<ulong>(address, PageSize);
+ ulong addressRounded = BitUtils.AlignUp<ulong>(address, PageSize);
+ ulong endAddrTruncated = BitUtils.AlignDown<ulong>(endAddr, PageSize);
+ ulong endAddrRounded = BitUtils.AlignUp<ulong>(endAddr, PageSize);
+
+ ulong pagesCount = (endAddrRounded - addressTruncated) / PageSize;
+
+ Result result = Unmap(addressTruncated, pagesCount);
+
+ if (result == Result.Success)
+ {
+ _blockManager.InsertBlock(addressTruncated, pagesCount, MemoryState.Unmapped);
+ }
+
+ return result;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result UnmapIpcRestorePermission(ulong address, ulong size, MemoryState state)
+ {
+ ulong endAddr = address + size;
+
+ ulong addressRounded = BitUtils.AlignUp<ulong>(address, PageSize);
+ ulong addressTruncated = BitUtils.AlignDown<ulong>(address, PageSize);
+ ulong endAddrRounded = BitUtils.AlignUp<ulong>(endAddr, PageSize);
+ ulong endAddrTruncated = BitUtils.AlignDown<ulong>(endAddr, PageSize);
+
+ ulong pagesCount = addressRounded < endAddrTruncated ? (endAddrTruncated - addressRounded) / PageSize : 0;
+
+ if (pagesCount == 0)
+ {
+ return Result.Success;
+ }
+
+ MemoryState stateMask;
+
+ switch (state)
+ {
+ case MemoryState.IpcBuffer0: stateMask = MemoryState.IpcSendAllowedType0; break;
+ case MemoryState.IpcBuffer1: stateMask = MemoryState.IpcSendAllowedType1; break;
+ case MemoryState.IpcBuffer3: stateMask = MemoryState.IpcSendAllowedType3; break;
+
+ default: return KernelResult.InvalidCombination;
+ }
+
+ MemoryAttribute attributeMask =
+ MemoryAttribute.Borrowed |
+ MemoryAttribute.IpcMapped |
+ MemoryAttribute.Uncached;
+
+ if (state == MemoryState.IpcBuffer0)
+ {
+ attributeMask |= MemoryAttribute.DeviceMapped;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ // Anything on the client side should see this memory as modified.
+ SignalMemoryTracking(addressTruncated, endAddrRounded - addressTruncated, true);
+
+ lock (_blockManager)
+ {
+ foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrTruncated))
+ {
+ // Check if the block state matches what we expect.
+ if ((info.State & stateMask) != stateMask ||
+ (info.Attribute & attributeMask) != MemoryAttribute.IpcMapped)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (info.Permission != info.SourcePermission && info.IpcRefCount == 1)
+ {
+ ulong blockAddress = GetAddrInRange(info, addressRounded);
+ ulong blockSize = GetSizeInRange(info, addressRounded, endAddrTruncated);
+
+ ulong blockPagesCount = blockSize / PageSize;
+
+ Result result = Reprotect(blockAddress, blockPagesCount, info.SourcePermission);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+ }
+ }
+
+ _blockManager.InsertBlock(addressRounded, pagesCount, RestoreIpcMappingPermissions);
+
+ return Result.Success;
+ }
+ }
+
+ private static void SetIpcMappingPermissions(KMemoryBlock block, KMemoryPermission permission)
+ {
+ block.SetIpcMappingPermission(permission);
+ }
+
+ private static void RestoreIpcMappingPermissions(KMemoryBlock block, KMemoryPermission permission)
+ {
+ block.RestoreIpcMappingPermission();
+ }
+
+ public Result GetPagesIfStateEquals(
+ ulong address,
+ ulong size,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permissionMask,
+ KMemoryPermission permissionExpected,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected,
+ KPageList pageList)
+ {
+ if (!InsideAddrSpace(address, size))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ stateMask | MemoryState.IsPoolAllocated,
+ stateExpected | MemoryState.IsPoolAllocated,
+ permissionMask,
+ permissionExpected,
+ attributeMask,
+ attributeExpected,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _))
+ {
+ GetPhysicalRegions(address, size, pageList);
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result BorrowIpcBuffer(ulong address, ulong size)
+ {
+ return SetAttributesAndChangePermission(
+ address,
+ size,
+ MemoryState.IpcBufferAllowed,
+ MemoryState.IpcBufferAllowed,
+ KMemoryPermission.Mask,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Borrowed);
+ }
+
+ public Result BorrowTransferMemory(KPageList pageList, ulong address, ulong size, KMemoryPermission permission)
+ {
+ return SetAttributesAndChangePermission(
+ address,
+ size,
+ MemoryState.TransferMemoryAllowed,
+ MemoryState.TransferMemoryAllowed,
+ KMemoryPermission.Mask,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ permission,
+ MemoryAttribute.Borrowed,
+ pageList);
+ }
+
+ public Result BorrowCodeMemory(KPageList pageList, ulong address, ulong size)
+ {
+ return SetAttributesAndChangePermission(
+ address,
+ size,
+ MemoryState.CodeMemoryAllowed,
+ MemoryState.CodeMemoryAllowed,
+ KMemoryPermission.Mask,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Borrowed,
+ pageList);
+ }
+
+ private Result SetAttributesAndChangePermission(
+ ulong address,
+ ulong size,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permissionMask,
+ KMemoryPermission permissionExpected,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected,
+ KMemoryPermission newPermission,
+ MemoryAttribute attributeSetMask,
+ KPageList pageList = null)
+ {
+ if (address + size <= address || !InsideAddrSpace(address, size))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ stateMask | MemoryState.IsPoolAllocated,
+ stateExpected | MemoryState.IsPoolAllocated,
+ permissionMask,
+ permissionExpected,
+ attributeMask,
+ attributeExpected,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState oldState,
+ out KMemoryPermission oldPermission,
+ out MemoryAttribute oldAttribute))
+ {
+ ulong pagesCount = size / PageSize;
+
+ if (pageList != null)
+ {
+ GetPhysicalRegions(address, pagesCount * PageSize, pageList);
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ if (newPermission == KMemoryPermission.None)
+ {
+ newPermission = oldPermission;
+ }
+
+ if (newPermission != oldPermission)
+ {
+ Result result = Reprotect(address, pagesCount, newPermission);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+ }
+
+ MemoryAttribute newAttribute = oldAttribute | attributeSetMask;
+
+ _blockManager.InsertBlock(address, pagesCount, oldState, newPermission, newAttribute);
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result UnborrowIpcBuffer(ulong address, ulong size)
+ {
+ return ClearAttributesAndChangePermission(
+ address,
+ size,
+ MemoryState.IpcBufferAllowed,
+ MemoryState.IpcBufferAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.Borrowed,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Borrowed);
+ }
+
+ public Result UnborrowTransferMemory(ulong address, ulong size, KPageList pageList)
+ {
+ return ClearAttributesAndChangePermission(
+ address,
+ size,
+ MemoryState.TransferMemoryAllowed,
+ MemoryState.TransferMemoryAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.Borrowed,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Borrowed,
+ pageList);
+ }
+
+ public Result UnborrowCodeMemory(ulong address, ulong size, KPageList pageList)
+ {
+ return ClearAttributesAndChangePermission(
+ address,
+ size,
+ MemoryState.CodeMemoryAllowed,
+ MemoryState.CodeMemoryAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.Borrowed,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Borrowed,
+ pageList);
+ }
+
+ private Result ClearAttributesAndChangePermission(
+ ulong address,
+ ulong size,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permissionMask,
+ KMemoryPermission permissionExpected,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected,
+ KMemoryPermission newPermission,
+ MemoryAttribute attributeClearMask,
+ KPageList pageList = null)
+ {
+ if (address + size <= address || !InsideAddrSpace(address, size))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ stateMask | MemoryState.IsPoolAllocated,
+ stateExpected | MemoryState.IsPoolAllocated,
+ permissionMask,
+ permissionExpected,
+ attributeMask,
+ attributeExpected,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState oldState,
+ out KMemoryPermission oldPermission,
+ out MemoryAttribute oldAttribute))
+ {
+ ulong pagesCount = size / PageSize;
+
+ if (pageList != null)
+ {
+ KPageList currentPageList = new KPageList();
+
+ GetPhysicalRegions(address, pagesCount * PageSize, currentPageList);
+
+ if (!currentPageList.IsEqual(pageList))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ if (newPermission == KMemoryPermission.None)
+ {
+ newPermission = oldPermission;
+ }
+
+ if (newPermission != oldPermission)
+ {
+ Result result = Reprotect(address, pagesCount, newPermission);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+ }
+
+ MemoryAttribute newAttribute = oldAttribute & ~attributeClearMask;
+
+ _blockManager.InsertBlock(address, pagesCount, oldState, newPermission, newAttribute);
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ private static ulong GetAddrInRange(KMemoryInfo info, ulong start)
+ {
+ if (info.Address < start)
+ {
+ return start;
+ }
+
+ return info.Address;
+ }
+
+ private static ulong GetSizeInRange(KMemoryInfo info, ulong start, ulong end)
+ {
+ ulong endAddr = info.Size + info.Address;
+ ulong size = info.Size;
+
+ if (info.Address < start)
+ {
+ size -= start - info.Address;
+ }
+
+ if (endAddr > end)
+ {
+ size -= endAddr - end;
+ }
+
+ return size;
+ }
+
+ private bool IsUnmapped(ulong address, ulong size)
+ {
+ return CheckRange(
+ address,
+ size,
+ MemoryState.Mask,
+ MemoryState.Unmapped,
+ KMemoryPermission.Mask,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _);
+ }
+
+ private bool CheckRange(
+ ulong address,
+ ulong size,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permissionMask,
+ KMemoryPermission permissionExpected,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected,
+ MemoryAttribute attributeIgnoreMask,
+ out MemoryState outState,
+ out KMemoryPermission outPermission,
+ out MemoryAttribute outAttribute)
+ {
+ ulong endAddr = address + size;
+
+ KMemoryBlock currBlock = _blockManager.FindBlock(address);
+
+ KMemoryInfo info = currBlock.GetInfo();
+
+ MemoryState firstState = info.State;
+ KMemoryPermission firstPermission = info.Permission;
+ MemoryAttribute firstAttribute = info.Attribute;
+
+ do
+ {
+ info = currBlock.GetInfo();
+
+ // Check if the block state matches what we expect.
+ if (firstState != info.State ||
+ firstPermission != info.Permission ||
+ (info.Attribute & attributeMask) != attributeExpected ||
+ (firstAttribute | attributeIgnoreMask) != (info.Attribute | attributeIgnoreMask) ||
+ (firstState & stateMask) != stateExpected ||
+ (firstPermission & permissionMask) != permissionExpected)
+ {
+ outState = MemoryState.Unmapped;
+ outPermission = KMemoryPermission.None;
+ outAttribute = MemoryAttribute.None;
+
+ return false;
+ }
+ }
+ while (info.Address + info.Size - 1 < endAddr - 1 && (currBlock = currBlock.Successor) != null);
+
+ outState = firstState;
+ outPermission = firstPermission;
+ outAttribute = firstAttribute & ~attributeIgnoreMask;
+
+ return true;
+ }
+
+ private bool CheckRange(
+ ulong address,
+ ulong size,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permissionMask,
+ KMemoryPermission permissionExpected,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected)
+ {
+ foreach (KMemoryInfo info in IterateOverRange(address, address + size))
+ {
+ // Check if the block state matches what we expect.
+ if ((info.State & stateMask) != stateExpected ||
+ (info.Permission & permissionMask) != permissionExpected ||
+ (info.Attribute & attributeMask) != attributeExpected)
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ private IEnumerable<KMemoryInfo> IterateOverRange(ulong start, ulong end)
+ {
+ KMemoryBlock currBlock = _blockManager.FindBlock(start);
+
+ KMemoryInfo info;
+
+ do
+ {
+ info = currBlock.GetInfo();
+
+ yield return info;
+ }
+ while (info.Address + info.Size - 1 < end - 1 && (currBlock = currBlock.Successor) != null);
+ }
+
+ private ulong AllocateVa(ulong regionStart, ulong regionPagesCount, ulong neededPagesCount, int alignment)
+ {
+ ulong address = 0;
+
+ ulong regionEndAddr = regionStart + regionPagesCount * PageSize;
+
+ ulong reservedPagesCount = _isKernel ? 1UL : 4UL;
+
+ if (_aslrEnabled)
+ {
+ ulong totalNeededSize = (reservedPagesCount + neededPagesCount) * PageSize;
+
+ ulong remainingPages = regionPagesCount - neededPagesCount;
+
+ ulong aslrMaxOffset = ((remainingPages + reservedPagesCount) * PageSize) / (ulong)alignment;
+
+ for (int attempt = 0; attempt < 8; attempt++)
+ {
+ ulong aslrAddress = BitUtils.AlignDown(regionStart + GetRandomValue(0, aslrMaxOffset) * (ulong)alignment, (ulong)alignment);
+ ulong aslrEndAddr = aslrAddress + totalNeededSize;
+
+ KMemoryInfo info = _blockManager.FindBlock(aslrAddress).GetInfo();
+
+ if (info.State != MemoryState.Unmapped)
+ {
+ continue;
+ }
+
+ ulong currBaseAddr = info.Address + reservedPagesCount * PageSize;
+ ulong currEndAddr = info.Address + info.Size;
+
+ if (aslrAddress >= regionStart &&
+ aslrAddress >= currBaseAddr &&
+ aslrEndAddr - 1 <= regionEndAddr - 1 &&
+ aslrEndAddr - 1 <= currEndAddr - 1)
+ {
+ address = aslrAddress;
+ break;
+ }
+ }
+
+ if (address == 0)
+ {
+ ulong aslrPage = GetRandomValue(0, aslrMaxOffset);
+
+ address = FindFirstFit(
+ regionStart + aslrPage * PageSize,
+ regionPagesCount - aslrPage,
+ neededPagesCount,
+ alignment,
+ 0,
+ reservedPagesCount);
+ }
+ }
+
+ if (address == 0)
+ {
+ address = FindFirstFit(
+ regionStart,
+ regionPagesCount,
+ neededPagesCount,
+ alignment,
+ 0,
+ reservedPagesCount);
+ }
+
+ return address;
+ }
+
+ private ulong FindFirstFit(
+ ulong regionStart,
+ ulong regionPagesCount,
+ ulong neededPagesCount,
+ int alignment,
+ ulong reservedStart,
+ ulong reservedPagesCount)
+ {
+ ulong reservedSize = reservedPagesCount * PageSize;
+
+ ulong totalNeededSize = reservedSize + neededPagesCount * PageSize;
+
+ ulong regionEndAddr = (regionStart + regionPagesCount * PageSize) - 1;
+
+ KMemoryBlock currBlock = _blockManager.FindBlock(regionStart);
+
+ KMemoryInfo info = currBlock.GetInfo();
+
+ while (regionEndAddr >= info.Address)
+ {
+ if (info.State == MemoryState.Unmapped)
+ {
+ ulong currBaseAddr = info.Address <= regionStart ? regionStart : info.Address;
+ ulong currEndAddr = info.Address + info.Size - 1;
+
+ currBaseAddr += reservedSize;
+
+ ulong address = BitUtils.AlignDown<ulong>(currBaseAddr, (ulong)alignment) + reservedStart;
+
+ if (currBaseAddr > address)
+ {
+ address += (ulong)alignment;
+ }
+
+ ulong allocationEndAddr = address + totalNeededSize - 1;
+
+ if (info.Address <= address &&
+ address < allocationEndAddr &&
+ allocationEndAddr <= regionEndAddr &&
+ allocationEndAddr <= currEndAddr)
+ {
+ return address;
+ }
+ }
+
+ currBlock = currBlock.Successor;
+
+ if (currBlock == null)
+ {
+ break;
+ }
+
+ info = currBlock.GetInfo();
+ }
+
+ return 0;
+ }
+
+ public bool CanContain(ulong address, ulong size, MemoryState state)
+ {
+ ulong endAddr = address + size;
+
+ ulong regionBaseAddr = GetBaseAddress(state);
+ ulong regionEndAddr = regionBaseAddr + GetSize(state);
+
+ bool InsideRegion()
+ {
+ return regionBaseAddr <= address &&
+ endAddr > address &&
+ endAddr - 1 <= regionEndAddr - 1;
+ }
+
+ bool OutsideHeapRegion()
+ {
+ return endAddr <= HeapRegionStart || address >= HeapRegionEnd;
+ }
+
+ bool OutsideAliasRegion()
+ {
+ return endAddr <= AliasRegionStart || address >= AliasRegionEnd;
+ }
+
+ switch (state)
+ {
+ case MemoryState.Io:
+ case MemoryState.Normal:
+ case MemoryState.CodeStatic:
+ case MemoryState.CodeMutable:
+ case MemoryState.SharedMemory:
+ case MemoryState.ModCodeStatic:
+ case MemoryState.ModCodeMutable:
+ case MemoryState.Stack:
+ case MemoryState.ThreadLocal:
+ case MemoryState.TransferMemoryIsolated:
+ case MemoryState.TransferMemory:
+ case MemoryState.ProcessMemory:
+ case MemoryState.CodeReadOnly:
+ case MemoryState.CodeWritable:
+ return InsideRegion() && OutsideHeapRegion() && OutsideAliasRegion();
+
+ case MemoryState.Heap:
+ return InsideRegion() && OutsideAliasRegion();
+
+ case MemoryState.IpcBuffer0:
+ case MemoryState.IpcBuffer1:
+ case MemoryState.IpcBuffer3:
+ return InsideRegion() && OutsideHeapRegion();
+
+ case MemoryState.KernelStack:
+ return InsideRegion();
+ }
+
+ throw new ArgumentException($"Invalid state value \"{state}\".");
+ }
+
+ private ulong GetBaseAddress(MemoryState state)
+ {
+ switch (state)
+ {
+ case MemoryState.Io:
+ case MemoryState.Normal:
+ case MemoryState.ThreadLocal:
+ return TlsIoRegionStart;
+
+ case MemoryState.CodeStatic:
+ case MemoryState.CodeMutable:
+ case MemoryState.SharedMemory:
+ case MemoryState.ModCodeStatic:
+ case MemoryState.ModCodeMutable:
+ case MemoryState.TransferMemoryIsolated:
+ case MemoryState.TransferMemory:
+ case MemoryState.ProcessMemory:
+ case MemoryState.CodeReadOnly:
+ case MemoryState.CodeWritable:
+ return GetAddrSpaceBaseAddr();
+
+ case MemoryState.Heap:
+ return HeapRegionStart;
+
+ case MemoryState.IpcBuffer0:
+ case MemoryState.IpcBuffer1:
+ case MemoryState.IpcBuffer3:
+ return AliasRegionStart;
+
+ case MemoryState.Stack:
+ return StackRegionStart;
+
+ case MemoryState.KernelStack:
+ return AddrSpaceStart;
+ }
+
+ throw new ArgumentException($"Invalid state value \"{state}\".");
+ }
+
+ private ulong GetSize(MemoryState state)
+ {
+ switch (state)
+ {
+ case MemoryState.Io:
+ case MemoryState.Normal:
+ case MemoryState.ThreadLocal:
+ return TlsIoRegionEnd - TlsIoRegionStart;
+
+ case MemoryState.CodeStatic:
+ case MemoryState.CodeMutable:
+ case MemoryState.SharedMemory:
+ case MemoryState.ModCodeStatic:
+ case MemoryState.ModCodeMutable:
+ case MemoryState.TransferMemoryIsolated:
+ case MemoryState.TransferMemory:
+ case MemoryState.ProcessMemory:
+ case MemoryState.CodeReadOnly:
+ case MemoryState.CodeWritable:
+ return GetAddrSpaceSize();
+
+ case MemoryState.Heap:
+ return HeapRegionEnd - HeapRegionStart;
+
+ case MemoryState.IpcBuffer0:
+ case MemoryState.IpcBuffer1:
+ case MemoryState.IpcBuffer3:
+ return AliasRegionEnd - AliasRegionStart;
+
+ case MemoryState.Stack:
+ return StackRegionEnd - StackRegionStart;
+
+ case MemoryState.KernelStack:
+ return AddrSpaceEnd - AddrSpaceStart;
+ }
+
+ throw new ArgumentException($"Invalid state value \"{state}\".");
+ }
+
+ public ulong GetAddrSpaceBaseAddr()
+ {
+ if (AddrSpaceWidth == 36 || AddrSpaceWidth == 39)
+ {
+ return 0x8000000;
+ }
+ else if (AddrSpaceWidth == 32)
+ {
+ return 0x200000;
+ }
+ else
+ {
+ throw new InvalidOperationException("Invalid address space width!");
+ }
+ }
+
+ public ulong GetAddrSpaceSize()
+ {
+ if (AddrSpaceWidth == 36)
+ {
+ return 0xff8000000;
+ }
+ else if (AddrSpaceWidth == 39)
+ {
+ return 0x7ff8000000;
+ }
+ else if (AddrSpaceWidth == 32)
+ {
+ return 0xffe00000;
+ }
+ else
+ {
+ throw new InvalidOperationException("Invalid address space width!");
+ }
+ }
+
+ private static ulong GetDramAddressFromPa(ulong pa)
+ {
+ return pa - DramMemoryMap.DramBase;
+ }
+
+ protected KMemoryRegionManager GetMemoryRegionManager()
+ {
+ return Context.MemoryManager.MemoryRegions[(int)_memRegion];
+ }
+
+ public ulong GetMmUsedPages()
+ {
+ lock (_blockManager)
+ {
+ return BitUtils.DivRoundUp<ulong>(GetMmUsedSize(), PageSize);
+ }
+ }
+
+ private ulong GetMmUsedSize()
+ {
+ return (ulong)(_blockManager.BlocksCount * KMemoryBlockSize);
+ }
+
+ public bool IsInvalidRegion(ulong address, ulong size)
+ {
+ return address + size - 1 > GetAddrSpaceBaseAddr() + GetAddrSpaceSize() - 1;
+ }
+
+ public bool InsideAddrSpace(ulong address, ulong size)
+ {
+ return AddrSpaceStart <= address && address + size - 1 <= AddrSpaceEnd - 1;
+ }
+
+ public bool InsideAliasRegion(ulong address, ulong size)
+ {
+ return address + size > AliasRegionStart && AliasRegionEnd > address;
+ }
+
+ public bool InsideHeapRegion(ulong address, ulong size)
+ {
+ return address + size > HeapRegionStart && HeapRegionEnd > address;
+ }
+
+ public bool InsideStackRegion(ulong address, ulong size)
+ {
+ return address + size > StackRegionStart && StackRegionEnd > address;
+ }
+
+ public bool OutsideAliasRegion(ulong address, ulong size)
+ {
+ return AliasRegionStart > address || address + size - 1 > AliasRegionEnd - 1;
+ }
+
+ public bool OutsideAddrSpace(ulong address, ulong size)
+ {
+ return AddrSpaceStart > address || address + size - 1 > AddrSpaceEnd - 1;
+ }
+
+ public bool OutsideStackRegion(ulong address, ulong size)
+ {
+ return StackRegionStart > address || address + size - 1 > StackRegionEnd - 1;
+ }
+
+ /// <summary>
+ /// Gets the host regions that make up the given virtual address region.
+ /// If any part of the virtual region is unmapped, null is returned.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range</param>
+ /// <returns>The host regions</returns>
+ /// <exception cref="Ryujinx.Memory.InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
+ protected abstract IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size);
+
+ /// <summary>
+ /// Gets the physical regions that make up the given virtual address region.
+ /// If any part of the virtual region is unmapped, null is returned.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range</param>
+ /// <param name="pageList">Page list where the ranges will be added</param>
+ protected abstract void GetPhysicalRegions(ulong va, ulong size, KPageList pageList);
+
+ /// <summary>
+ /// Gets a read-only span of data from CPU mapped memory.
+ /// </summary>
+ /// <remarks>
+ /// This may perform a allocation if the data is not contiguous in memory.
+ /// For this reason, the span is read-only, you can't modify the data.
+ /// </remarks>
+ /// <param name="va">Virtual address of the data</param>
+ /// <param name="size">Size of the data</param>
+ /// <param name="tracked">True if read tracking is triggered on the span</param>
+ /// <returns>A read-only span of the data</returns>
+ /// <exception cref="Ryujinx.Memory.InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
+ protected abstract ReadOnlySpan<byte> GetSpan(ulong va, int size);
+
+ /// <summary>
+ /// Maps a new memory region with the contents of a existing memory region.
+ /// </summary>
+ /// <param name="src">Source memory region where the data will be taken from</param>
+ /// <param name="dst">Destination memory region to map</param>
+ /// <param name="pagesCount">Number of pages to map</param>
+ /// <param name="oldSrcPermission">Current protection of the source memory region</param>
+ /// <param name="newDstPermission">Desired protection for the destination memory region</param>
+ /// <returns>Result of the mapping operation</returns>
+ protected abstract Result MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission);
+
+ /// <summary>
+ /// Unmaps a region of memory that was previously mapped with <see cref="MapMemory"/>.
+ /// </summary>
+ /// <param name="dst">Destination memory region to be unmapped</param>
+ /// <param name="src">Source memory region that was originally remapped</param>
+ /// <param name="pagesCount">Number of pages to unmap</param>
+ /// <param name="oldDstPermission">Current protection of the destination memory region</param>
+ /// <param name="newSrcPermission">Desired protection of the source memory region</param>
+ /// <returns>Result of the unmapping operation</returns>
+ protected abstract Result UnmapMemory(ulong dst, ulong src, ulong pagesCount, KMemoryPermission oldDstPermission, KMemoryPermission newSrcPermission);
+
+ /// <summary>
+ /// Maps a region of memory into the specified physical memory region.
+ /// </summary>
+ /// <param name="dstVa">Destination virtual address that should be mapped</param>
+ /// <param name="pagesCount">Number of pages to map</param>
+ /// <param name="srcPa">Physical address where the pages should be mapped. May be ignored if aliasing is not supported</param>
+ /// <param name="permission">Permission of the region to be mapped</param>
+ /// <param name="flags">Flags controlling the memory map operation</param>
+ /// <param name="shouldFillPages">Indicate if the pages should be filled with the <paramref name="fillValue"/> value</param>
+ /// <param name="fillValue">The value used to fill pages when <paramref name="shouldFillPages"/> is set to true</param>
+ /// <returns>Result of the mapping operation</returns>
+ protected abstract Result MapPages(
+ ulong dstVa,
+ ulong pagesCount,
+ ulong srcPa,
+ KMemoryPermission permission,
+ MemoryMapFlags flags,
+ bool shouldFillPages = false,
+ byte fillValue = 0);
+
+ /// <summary>
+ /// Maps a region of memory into the specified physical memory region.
+ /// </summary>
+ /// <param name="address">Destination virtual address that should be mapped</param>
+ /// <param name="pageList">List of physical memory pages where the pages should be mapped. May be ignored if aliasing is not supported</param>
+ /// <param name="permission">Permission of the region to be mapped</param>
+ /// <param name="flags">Flags controlling the memory map operation</param>
+ /// <param name="shouldFillPages">Indicate if the pages should be filled with the <paramref name="fillValue"/> value</param>
+ /// <param name="fillValue">The value used to fill pages when <paramref name="shouldFillPages"/> is set to true</param>
+ /// <returns>Result of the mapping operation</returns>
+ protected abstract Result MapPages(
+ ulong address,
+ KPageList pageList,
+ KMemoryPermission permission,
+ MemoryMapFlags flags,
+ bool shouldFillPages = false,
+ byte fillValue = 0);
+
+ /// <summary>
+ /// Maps pages into an arbitrary host memory location.
+ /// </summary>
+ /// <param name="regions">Host regions to be mapped into the specified virtual memory region</param>
+ /// <param name="va">Destination virtual address of the range on this page table</param>
+ /// <param name="size">Size of the range</param>
+ /// <returns>Result of the mapping operation</returns>
+ protected abstract Result MapForeign(IEnumerable<HostMemoryRange> regions, ulong va, ulong size);
+
+ /// <summary>
+ /// Unmaps a region of memory that was previously mapped with one of the page mapping methods.
+ /// </summary>
+ /// <param name="address">Virtual address of the region to unmap</param>
+ /// <param name="pagesCount">Number of pages to unmap</param>
+ /// <returns>Result of the unmapping operation</returns>
+ protected abstract Result Unmap(ulong address, ulong pagesCount);
+
+ /// <summary>
+ /// Changes the permissions of a given virtual memory region.
+ /// </summary>
+ /// <param name="address">Virtual address of the region to have the permission changes</param>
+ /// <param name="pagesCount">Number of pages to have their permissions changed</param>
+ /// <param name="permission">New permission</param>
+ /// <returns>Result of the permission change operation</returns>
+ protected abstract Result Reprotect(ulong address, ulong pagesCount, KMemoryPermission permission);
+
+ /// <summary>
+ /// Changes the permissions of a given virtual memory region.
+ /// </summary>
+ /// <param name="address">Virtual address of the region to have the permission changes</param>
+ /// <param name="pagesCount">Number of pages to have their permissions changed</param>
+ /// <param name="permission">New permission</param>
+ /// <returns>Result of the permission change operation</returns>
+ protected abstract Result ReprotectWithAttributes(ulong address, ulong pagesCount, KMemoryPermission permission);
+
+ /// <summary>
+ /// Alerts the memory tracking that a given region has been read from or written to.
+ /// This should be called before read/write is performed.
+ /// </summary>
+ /// <param name="va">Virtual address of the region</param>
+ /// <param name="size">Size of the region</param>
+ protected abstract void SignalMemoryTracking(ulong va, ulong size, bool write);
+
+ /// <summary>
+ /// Writes data to CPU mapped memory, with write tracking.
+ /// </summary>
+ /// <param name="va">Virtual address to write the data into</param>
+ /// <param name="data">Data to be written</param>
+ /// <exception cref="Ryujinx.Memory.InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
+ protected abstract void Write(ulong va, ReadOnlySpan<byte> data);
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KScopedPageList.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KScopedPageList.cs
new file mode 100644
index 00000000..a0c19f9c
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KScopedPageList.cs
@@ -0,0 +1,27 @@
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ struct KScopedPageList : IDisposable
+ {
+ private readonly KMemoryManager _manager;
+ private KPageList _pageList;
+
+ public KScopedPageList(KMemoryManager manager, KPageList pageList)
+ {
+ _manager = manager;
+ _pageList = pageList;
+ pageList.IncrementPagesReferenceCount(manager);
+ }
+
+ public void SignalSuccess()
+ {
+ _pageList = null;
+ }
+
+ public void Dispose()
+ {
+ _pageList?.DecrementPagesReferenceCount(_manager);
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs
new file mode 100644
index 00000000..5ec3cd72
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs
@@ -0,0 +1,75 @@
+using Ryujinx.Common;
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.Horizon.Common;
+using Ryujinx.Memory;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KSharedMemory : KAutoObject
+ {
+ private readonly KPageList _pageList;
+
+ private readonly ulong _ownerPid;
+
+ private readonly KMemoryPermission _ownerPermission;
+ private readonly KMemoryPermission _userPermission;
+
+ public KSharedMemory(
+ KernelContext context,
+ SharedMemoryStorage storage,
+ ulong ownerPid,
+ KMemoryPermission ownerPermission,
+ KMemoryPermission userPermission) : base(context)
+ {
+ _pageList = storage.GetPageList();
+ _ownerPid = ownerPid;
+ _ownerPermission = ownerPermission;
+ _userPermission = userPermission;
+ }
+
+ public Result MapIntoProcess(
+ KPageTableBase memoryManager,
+ ulong address,
+ ulong size,
+ KProcess process,
+ KMemoryPermission permission)
+ {
+ if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, KPageTableBase.PageSize))
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ KMemoryPermission expectedPermission = process.Pid == _ownerPid
+ ? _ownerPermission
+ : _userPermission;
+
+ if (permission != expectedPermission)
+ {
+ return KernelResult.InvalidPermission;
+ }
+
+ // On platforms with page size > 4 KB, this can fail due to the address not being page aligned,
+ // we can return an error to force the application to retry with a different address.
+
+ try
+ {
+ return memoryManager.MapPages(address, _pageList, MemoryState.SharedMemory, permission);
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+
+ public Result UnmapFromProcess(KPageTableBase memoryManager, ulong address, ulong size, KProcess process)
+ {
+ if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, KPageTableBase.PageSize))
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ return memoryManager.UnmapPages(address, _pageList, MemoryState.SharedMemory);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KSlabHeap.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KSlabHeap.cs
new file mode 100644
index 00000000..9051e84c
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KSlabHeap.cs
@@ -0,0 +1,50 @@
+using System.Collections.Generic;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KSlabHeap
+ {
+ private LinkedList<ulong> _items;
+
+ public KSlabHeap(ulong pa, ulong itemSize, ulong size)
+ {
+ _items = new LinkedList<ulong>();
+
+ int itemsCount = (int)(size / itemSize);
+
+ for (int index = 0; index < itemsCount; index++)
+ {
+ _items.AddLast(pa);
+
+ pa += itemSize;
+ }
+ }
+
+ public bool TryGetItem(out ulong pa)
+ {
+ lock (_items)
+ {
+ if (_items.First != null)
+ {
+ pa = _items.First.Value;
+
+ _items.RemoveFirst();
+
+ return true;
+ }
+ }
+
+ pa = 0;
+
+ return false;
+ }
+
+ public void Free(ulong pa)
+ {
+ lock (_items)
+ {
+ _items.AddFirst(pa);
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KTransferMemory.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KTransferMemory.cs
new file mode 100644
index 00000000..b2449598
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KTransferMemory.cs
@@ -0,0 +1,130 @@
+using Ryujinx.Common;
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.Horizon.Common;
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KTransferMemory : KAutoObject
+ {
+ private KProcess _creator;
+
+ // TODO: Remove when we no longer need to read it from the owner directly.
+ public KProcess Creator => _creator;
+
+ private readonly KPageList _pageList;
+
+ public ulong Address { get; private set; }
+ public ulong Size { get; private set; }
+
+ public KMemoryPermission Permission { get; private set; }
+
+ private bool _hasBeenInitialized;
+ private bool _isMapped;
+
+ public KTransferMemory(KernelContext context) : base(context)
+ {
+ _pageList = new KPageList();
+ }
+
+ public KTransferMemory(KernelContext context, SharedMemoryStorage storage) : base(context)
+ {
+ _pageList = storage.GetPageList();
+ Permission = KMemoryPermission.ReadAndWrite;
+
+ _hasBeenInitialized = true;
+ _isMapped = false;
+ }
+
+ public Result Initialize(ulong address, ulong size, KMemoryPermission permission)
+ {
+ KProcess creator = KernelStatic.GetCurrentProcess();
+
+ _creator = creator;
+
+ Result result = creator.MemoryManager.BorrowTransferMemory(_pageList, address, size, permission);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ creator.IncrementReferenceCount();
+
+ Permission = permission;
+ Address = address;
+ Size = size;
+ _hasBeenInitialized = true;
+ _isMapped = false;
+
+ return result;
+ }
+
+ public Result MapIntoProcess(
+ KPageTableBase memoryManager,
+ ulong address,
+ ulong size,
+ KProcess process,
+ KMemoryPermission permission)
+ {
+ if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, KPageTableBase.PageSize))
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (permission != Permission || _isMapped)
+ {
+ return KernelResult.InvalidState;
+ }
+
+ MemoryState state = Permission == KMemoryPermission.None ? MemoryState.TransferMemoryIsolated : MemoryState.TransferMemory;
+
+ Result result = memoryManager.MapPages(address, _pageList, state, KMemoryPermission.ReadAndWrite);
+
+ if (result == Result.Success)
+ {
+ _isMapped = true;
+ }
+
+ return result;
+ }
+
+ public Result UnmapFromProcess(
+ KPageTableBase memoryManager,
+ ulong address,
+ ulong size,
+ KProcess process)
+ {
+ if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, (ulong)KPageTableBase.PageSize))
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ MemoryState state = Permission == KMemoryPermission.None ? MemoryState.TransferMemoryIsolated : MemoryState.TransferMemory;
+
+ Result result = memoryManager.UnmapPages(address, _pageList, state);
+
+ if (result == Result.Success)
+ {
+ _isMapped = false;
+ }
+
+ return result;
+ }
+
+ protected override void Destroy()
+ {
+ if (_hasBeenInitialized)
+ {
+ if (!_isMapped && _creator.MemoryManager.UnborrowTransferMemory(Address, Size, _pageList) != Result.Success)
+ {
+ throw new InvalidOperationException("Unexpected failure restoring transfer memory attributes.");
+ }
+
+ _creator.ResourceLimit?.Release(LimitableResource.TransferMemory, 1);
+ _creator.DecrementReferenceCount();
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryAttribute.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryAttribute.cs
new file mode 100644
index 00000000..42407ffe
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryAttribute.cs
@@ -0,0 +1,22 @@
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ [Flags]
+ enum MemoryAttribute : byte
+ {
+ None = 0,
+ Mask = 0xff,
+
+ Borrowed = 1 << 0,
+ IpcMapped = 1 << 1,
+ DeviceMapped = 1 << 2,
+ Uncached = 1 << 3,
+
+ IpcAndDeviceMapped = IpcMapped | DeviceMapped,
+
+ BorrowedAndIpcMapped = Borrowed | IpcMapped,
+
+ DeviceMappedAndUncached = DeviceMapped | Uncached
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryFillValue.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryFillValue.cs
new file mode 100644
index 00000000..cdc892fc
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryFillValue.cs
@@ -0,0 +1,10 @@
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ enum MemoryFillValue : byte
+ {
+ Zero = 0,
+ Stack = 0x58,
+ Ipc = 0x59,
+ Heap = 0x5A,
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryPermission.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryPermission.cs
new file mode 100644
index 00000000..563b817d
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryPermission.cs
@@ -0,0 +1,20 @@
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ [Flags]
+ enum KMemoryPermission : uint
+ {
+ None = 0,
+ UserMask = Read | Write | Execute,
+ Mask = uint.MaxValue,
+
+ Read = 1 << 0,
+ Write = 1 << 1,
+ Execute = 1 << 2,
+ DontCare = 1 << 28,
+
+ ReadAndWrite = Read | Write,
+ ReadAndExecute = Read | Execute
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryRegion.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryRegion.cs
new file mode 100644
index 00000000..ad719bde
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryRegion.cs
@@ -0,0 +1,10 @@
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ enum MemoryRegion
+ {
+ Application = 0,
+ Applet = 1,
+ Service = 2,
+ NvServices = 3
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryState.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryState.cs
new file mode 100644
index 00000000..d3b61780
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryState.cs
@@ -0,0 +1,50 @@
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ [Flags]
+ enum MemoryState : uint
+ {
+ Unmapped = 0x00000000,
+ Io = 0x00002001,
+ Normal = 0x00042002,
+ CodeStatic = 0x00DC7E03,
+ CodeMutable = 0x03FEBD04,
+ Heap = 0x037EBD05,
+ SharedMemory = 0x00402006,
+ ModCodeStatic = 0x00DD7E08,
+ ModCodeMutable = 0x03FFBD09,
+ IpcBuffer0 = 0x005C3C0A,
+ Stack = 0x005C3C0B,
+ ThreadLocal = 0x0040200C,
+ TransferMemoryIsolated = 0x015C3C0D,
+ TransferMemory = 0x005C380E,
+ ProcessMemory = 0x0040380F,
+ Reserved = 0x00000010,
+ IpcBuffer1 = 0x005C3811,
+ IpcBuffer3 = 0x004C2812,
+ KernelStack = 0x00002013,
+ CodeReadOnly = 0x00402214,
+ CodeWritable = 0x00402015,
+ UserMask = 0xff,
+ Mask = 0xffffffff,
+
+ PermissionChangeAllowed = 1 << 8,
+ ForceReadWritableByDebugSyscalls = 1 << 9,
+ IpcSendAllowedType0 = 1 << 10,
+ IpcSendAllowedType3 = 1 << 11,
+ IpcSendAllowedType1 = 1 << 12,
+ ProcessPermissionChangeAllowed = 1 << 14,
+ MapAllowed = 1 << 15,
+ UnmapProcessCodeMemoryAllowed = 1 << 16,
+ TransferMemoryAllowed = 1 << 17,
+ QueryPhysicalAddressAllowed = 1 << 18,
+ MapDeviceAllowed = 1 << 19,
+ MapDeviceAlignedAllowed = 1 << 20,
+ IpcBufferAllowed = 1 << 21,
+ IsPoolAllocated = 1 << 22,
+ MapProcessAllowed = 1 << 23,
+ AttributeChangeAllowed = 1 << 24,
+ CodeMemoryAllowed = 1 << 25
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs
new file mode 100644
index 00000000..c68b7369
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs
@@ -0,0 +1,49 @@
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class SharedMemoryStorage
+ {
+ private readonly KernelContext _context;
+ private readonly KPageList _pageList;
+ private readonly ulong _size;
+
+ public SharedMemoryStorage(KernelContext context, KPageList pageList)
+ {
+ _context = context;
+ _pageList = pageList;
+ _size = pageList.GetPagesCount() * KPageTableBase.PageSize;
+
+ foreach (KPageNode pageNode in pageList)
+ {
+ ulong address = pageNode.Address - DramMemoryMap.DramBase;
+ ulong size = pageNode.PagesCount * KPageTableBase.PageSize;
+ context.CommitMemory(address, size);
+ }
+ }
+
+ public void ZeroFill()
+ {
+ for (ulong offset = 0; offset < _size; offset += sizeof(ulong))
+ {
+ GetRef<ulong>(offset) = 0;
+ }
+ }
+
+ public ref T GetRef<T>(ulong offset) where T : unmanaged
+ {
+ if (_pageList.Nodes.Count == 1)
+ {
+ ulong address = _pageList.Nodes.First.Value.Address - DramMemoryMap.DramBase;
+ return ref _context.Memory.GetRef<T>(address + offset);
+ }
+
+ throw new NotImplementedException("Non-contiguous shared memory is not yet supported.");
+ }
+
+ public KPageList GetPageList()
+ {
+ return _pageList;
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/CapabilityExtensions.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/CapabilityExtensions.cs
new file mode 100644
index 00000000..66d56fe3
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/CapabilityExtensions.cs
@@ -0,0 +1,22 @@
+using System.Numerics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ static class CapabilityExtensions
+ {
+ public static CapabilityType GetCapabilityType(this uint cap)
+ {
+ return (CapabilityType)(((cap + 1) & ~cap) - 1);
+ }
+
+ public static uint GetFlag(this CapabilityType type)
+ {
+ return (uint)type + 1;
+ }
+
+ public static uint GetId(this CapabilityType type)
+ {
+ return (uint)BitOperations.TrailingZeroCount(type.GetFlag());
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/CapabilityType.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/CapabilityType.cs
new file mode 100644
index 00000000..51d92316
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/CapabilityType.cs
@@ -0,0 +1,19 @@
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ enum CapabilityType : uint
+ {
+ CorePriority = (1u << 3) - 1,
+ SyscallMask = (1u << 4) - 1,
+ MapRange = (1u << 6) - 1,
+ MapIoPage = (1u << 7) - 1,
+ MapRegion = (1u << 10) - 1,
+ InterruptPair = (1u << 11) - 1,
+ ProgramType = (1u << 13) - 1,
+ KernelVersion = (1u << 14) - 1,
+ HandleTable = (1u << 15) - 1,
+ DebugFlags = (1u << 16) - 1,
+
+ Invalid = 0u,
+ Padding = ~0u
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/HleProcessDebugger.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/HleProcessDebugger.cs
new file mode 100644
index 00000000..8fee5c0d
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/HleProcessDebugger.cs
@@ -0,0 +1,465 @@
+using Ryujinx.HLE.HOS.Diagnostics.Demangler;
+using Ryujinx.HLE.HOS.Kernel.Memory;
+using Ryujinx.HLE.HOS.Kernel.Threading;
+using Ryujinx.HLE.Loaders.Elf;
+using Ryujinx.Memory;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading;
+
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ class HleProcessDebugger
+ {
+ private const int Mod0 = 'M' << 0 | 'O' << 8 | 'D' << 16 | '0' << 24;
+
+ private KProcess _owner;
+
+ private class Image
+ {
+ public ulong BaseAddress { get; }
+ public ulong Size { get; }
+ public ulong EndAddress => BaseAddress + Size;
+
+ public ElfSymbol[] Symbols { get; }
+
+ public Image(ulong baseAddress, ulong size, ElfSymbol[] symbols)
+ {
+ BaseAddress = baseAddress;
+ Size = size;
+ Symbols = symbols;
+ }
+ }
+
+ private List<Image> _images;
+
+ private int _loaded;
+
+ public HleProcessDebugger(KProcess owner)
+ {
+ _owner = owner;
+
+ _images = new List<Image>();
+ }
+
+ public string GetGuestStackTrace(KThread thread)
+ {
+ EnsureLoaded();
+
+ var context = thread.Context;
+
+ StringBuilder trace = new StringBuilder();
+
+ trace.AppendLine($"Process: {_owner.Name}, PID: {_owner.Pid}");
+
+ void AppendTrace(ulong address)
+ {
+ if (AnalyzePointer(out PointerInfo info, address, thread))
+ {
+ trace.AppendLine($" 0x{address:x16}\t{info.ImageDisplay}\t{info.SubDisplay}");
+ }
+ else
+ {
+ trace.AppendLine($" 0x{address:x16}");
+ }
+ }
+
+ if (context.IsAarch32)
+ {
+ ulong framePointer = context.GetX(11);
+
+ while (framePointer != 0)
+ {
+ if ((framePointer & 3) != 0 ||
+ !_owner.CpuMemory.IsMapped(framePointer) ||
+ !_owner.CpuMemory.IsMapped(framePointer + 4))
+ {
+ break;
+ }
+
+ AppendTrace(_owner.CpuMemory.Read<uint>(framePointer + 4));
+
+ framePointer = _owner.CpuMemory.Read<uint>(framePointer);
+ }
+ }
+ else
+ {
+ ulong framePointer = context.GetX(29);
+
+ while (framePointer != 0)
+ {
+ if ((framePointer & 7) != 0 ||
+ !_owner.CpuMemory.IsMapped(framePointer) ||
+ !_owner.CpuMemory.IsMapped(framePointer + 8))
+ {
+ break;
+ }
+
+ AppendTrace(_owner.CpuMemory.Read<ulong>(framePointer + 8));
+
+ framePointer = _owner.CpuMemory.Read<ulong>(framePointer);
+ }
+ }
+
+ return trace.ToString();
+ }
+
+ public string GetCpuRegisterPrintout(KThread thread)
+ {
+ EnsureLoaded();
+
+ var context = thread.Context;
+
+ StringBuilder sb = new StringBuilder();
+
+ string GetReg(int x)
+ {
+ var v = x == 32 ? context.Pc : context.GetX(x);
+ if (!AnalyzePointer(out PointerInfo info, v, thread))
+ {
+ return $"0x{v:x16}";
+ }
+ else
+ {
+ if (!string.IsNullOrEmpty(info.ImageName))
+ {
+ return $"0x{v:x16} ({info.ImageDisplay})\t=> {info.SubDisplay}";
+ }
+ else
+ {
+ return $"0x{v:x16} ({info.SpDisplay})";
+ }
+ }
+ }
+
+ for (int i = 0; i <= 28; i++)
+ {
+ sb.AppendLine($"\tX[{i:d2}]:\t{GetReg(i)}");
+ }
+ sb.AppendLine($"\tFP:\t{GetReg(29)}");
+ sb.AppendLine($"\tLR:\t{GetReg(30)}");
+ sb.AppendLine($"\tSP:\t{GetReg(31)}");
+ sb.AppendLine($"\tPC:\t{GetReg(32)}");
+
+ return sb.ToString();
+ }
+
+ private bool TryGetSubName(Image image, ulong address, out ElfSymbol symbol)
+ {
+ address -= image.BaseAddress;
+
+ int left = 0;
+ int right = image.Symbols.Length - 1;
+
+ while (left <= right)
+ {
+ int size = right - left;
+
+ int middle = left + (size >> 1);
+
+ symbol = image.Symbols[middle];
+
+ ulong endAddr = symbol.Value + symbol.Size;
+
+ if (address >= symbol.Value && address < endAddr)
+ {
+ return true;
+ }
+
+ if (address < symbol.Value)
+ {
+ right = middle - 1;
+ }
+ else
+ {
+ left = middle + 1;
+ }
+ }
+
+ symbol = default;
+
+ return false;
+ }
+
+ struct PointerInfo
+ {
+ public string ImageName;
+ public string SubName;
+
+ public ulong Offset;
+ public ulong SubOffset;
+
+ public string ImageDisplay => $"{ImageName}:0x{Offset:x4}";
+ public string SubDisplay => SubOffset == 0 ? SubName : $"{SubName}:0x{SubOffset:x4}";
+ public string SpDisplay => SubOffset == 0 ? "SP" : $"SP:-0x{SubOffset:x4}";
+ }
+
+ private bool AnalyzePointer(out PointerInfo info, ulong address, KThread thread)
+ {
+ if (AnalyzePointerFromImages(out info, address))
+ {
+ return true;
+ }
+
+ if (AnalyzePointerFromStack(out info, address, thread))
+ {
+ return true;
+ }
+
+ return false;
+ }
+
+ private bool AnalyzePointerFromImages(out PointerInfo info, ulong address)
+ {
+ info = default;
+
+ Image image = GetImage(address, out int imageIndex);
+
+ if (image == null)
+ {
+ // Value isn't a pointer to a known image...
+ return false;
+ }
+
+ info.Offset = address - image.BaseAddress;
+
+ // Try to find what this pointer is referring to
+ if (TryGetSubName(image, address, out ElfSymbol symbol))
+ {
+ info.SubName = symbol.Name;
+
+ // Demangle string if possible
+ if (info.SubName.StartsWith("_Z"))
+ {
+ info.SubName = Demangler.Parse(info.SubName);
+ }
+ info.SubOffset = info.Offset - symbol.Value;
+ }
+ else
+ {
+ info.SubName = "";
+ }
+
+ info.ImageName = GetGuessedNsoNameFromIndex(imageIndex);
+
+ return true;
+ }
+
+ private bool AnalyzePointerFromStack(out PointerInfo info, ulong address, KThread thread)
+ {
+ info = default;
+
+ ulong sp = thread.Context.GetX(31);
+ var memoryInfo = _owner.MemoryManager.QueryMemory(address);
+ MemoryState memoryState = memoryInfo.State;
+
+ if (!memoryState.HasFlag(MemoryState.Stack)) // Is this pointer within the stack?
+ {
+ return false;
+ }
+
+ info.SubOffset = address - sp;
+
+ return true;
+ }
+
+ private Image GetImage(ulong address, out int index)
+ {
+ lock (_images)
+ {
+ for (index = _images.Count - 1; index >= 0; index--)
+ {
+ if (address >= _images[index].BaseAddress && address < _images[index].EndAddress)
+ {
+ return _images[index];
+ }
+ }
+ }
+
+ return null;
+ }
+
+ private string GetGuessedNsoNameFromIndex(int index)
+ {
+ if ((uint)index > 11)
+ {
+ return "???";
+ }
+
+ if (index == 0)
+ {
+ return "rtld";
+ }
+ else if (index == 1)
+ {
+ return "main";
+ }
+ else if (index == GetImagesCount() - 1)
+ {
+ return "sdk";
+ }
+ else
+ {
+ return "subsdk" + (index - 2);
+ }
+ }
+
+ private int GetImagesCount()
+ {
+ lock (_images)
+ {
+ return _images.Count;
+ }
+ }
+
+ private void EnsureLoaded()
+ {
+ if (Interlocked.CompareExchange(ref _loaded, 1, 0) == 0)
+ {
+ ScanMemoryForTextSegments();
+ }
+ }
+
+ private void ScanMemoryForTextSegments()
+ {
+ ulong oldAddress = 0;
+ ulong address = 0;
+
+ while (address >= oldAddress)
+ {
+ KMemoryInfo info = _owner.MemoryManager.QueryMemory(address);
+
+ if (info.State == MemoryState.Reserved)
+ {
+ break;
+ }
+
+ if (info.State == MemoryState.CodeStatic && info.Permission == KMemoryPermission.ReadAndExecute)
+ {
+ LoadMod0Symbols(_owner.CpuMemory, info.Address, info.Size);
+ }
+
+ oldAddress = address;
+
+ address = info.Address + info.Size;
+ }
+ }
+
+ private void LoadMod0Symbols(IVirtualMemoryManager memory, ulong textOffset, ulong textSize)
+ {
+ ulong mod0Offset = textOffset + memory.Read<uint>(textOffset + 4);
+
+ if (mod0Offset < textOffset || !memory.IsMapped(mod0Offset) || (mod0Offset & 3) != 0)
+ {
+ return;
+ }
+
+ Dictionary<ElfDynamicTag, ulong> dynamic = new Dictionary<ElfDynamicTag, ulong>();
+
+ int mod0Magic = memory.Read<int>(mod0Offset + 0x0);
+
+ if (mod0Magic != Mod0)
+ {
+ return;
+ }
+
+ ulong dynamicOffset = memory.Read<uint>(mod0Offset + 0x4) + mod0Offset;
+ ulong bssStartOffset = memory.Read<uint>(mod0Offset + 0x8) + mod0Offset;
+ ulong bssEndOffset = memory.Read<uint>(mod0Offset + 0xc) + mod0Offset;
+ ulong ehHdrStartOffset = memory.Read<uint>(mod0Offset + 0x10) + mod0Offset;
+ ulong ehHdrEndOffset = memory.Read<uint>(mod0Offset + 0x14) + mod0Offset;
+ ulong modObjOffset = memory.Read<uint>(mod0Offset + 0x18) + mod0Offset;
+
+ bool isAArch32 = memory.Read<ulong>(dynamicOffset) > 0xFFFFFFFF || memory.Read<ulong>(dynamicOffset + 0x10) > 0xFFFFFFFF;
+
+ while (true)
+ {
+ ulong tagVal;
+ ulong value;
+
+ if (isAArch32)
+ {
+ tagVal = memory.Read<uint>(dynamicOffset + 0);
+ value = memory.Read<uint>(dynamicOffset + 4);
+
+ dynamicOffset += 0x8;
+ }
+ else
+ {
+ tagVal = memory.Read<ulong>(dynamicOffset + 0);
+ value = memory.Read<ulong>(dynamicOffset + 8);
+
+ dynamicOffset += 0x10;
+ }
+
+ ElfDynamicTag tag = (ElfDynamicTag)tagVal;
+
+ if (tag == ElfDynamicTag.DT_NULL)
+ {
+ break;
+ }
+
+ dynamic[tag] = value;
+ }
+
+ if (!dynamic.TryGetValue(ElfDynamicTag.DT_STRTAB, out ulong strTab) ||
+ !dynamic.TryGetValue(ElfDynamicTag.DT_SYMTAB, out ulong symTab) ||
+ !dynamic.TryGetValue(ElfDynamicTag.DT_SYMENT, out ulong symEntSize))
+ {
+ return;
+ }
+
+ ulong strTblAddr = textOffset + strTab;
+ ulong symTblAddr = textOffset + symTab;
+
+ List<ElfSymbol> symbols = new List<ElfSymbol>();
+
+ while (symTblAddr < strTblAddr)
+ {
+ ElfSymbol sym = isAArch32 ? GetSymbol32(memory, symTblAddr, strTblAddr) : GetSymbol64(memory, symTblAddr, strTblAddr);
+
+ symbols.Add(sym);
+
+ symTblAddr += symEntSize;
+ }
+
+ lock (_images)
+ {
+ _images.Add(new Image(textOffset, textSize, symbols.OrderBy(x => x.Value).ToArray()));
+ }
+ }
+
+ private ElfSymbol GetSymbol64(IVirtualMemoryManager memory, ulong address, ulong strTblAddr)
+ {
+ ElfSymbol64 sym = memory.Read<ElfSymbol64>(address);
+
+ uint nameIndex = sym.NameOffset;
+
+ string name = string.Empty;
+
+ for (int chr; (chr = memory.Read<byte>(strTblAddr + nameIndex++)) != 0;)
+ {
+ name += (char)chr;
+ }
+
+ return new ElfSymbol(name, sym.Info, sym.Other, sym.SectionIndex, sym.ValueAddress, sym.Size);
+ }
+
+ private ElfSymbol GetSymbol32(IVirtualMemoryManager memory, ulong address, ulong strTblAddr)
+ {
+ ElfSymbol32 sym = memory.Read<ElfSymbol32>(address);
+
+ uint nameIndex = sym.NameOffset;
+
+ string name = string.Empty;
+
+ for (int chr; (chr = memory.Read<byte>(strTblAddr + nameIndex++)) != 0;)
+ {
+ name += (char)chr;
+ }
+
+ return new ElfSymbol(name, sym.Info, sym.Other, sym.SectionIndex, sym.ValueAddress, sym.Size);
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/IProcessContext.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/IProcessContext.cs
new file mode 100644
index 00000000..c8063a62
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/IProcessContext.cs
@@ -0,0 +1,15 @@
+using Ryujinx.Cpu;
+using Ryujinx.Memory;
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ interface IProcessContext : IDisposable
+ {
+ IVirtualMemoryManager AddressSpace { get; }
+
+ IExecutionContext CreateExecutionContext(ExceptionCallbacks exceptionCallbacks);
+ void Execute(IExecutionContext context, ulong codeAddress);
+ void InvalidateCacheRegion(ulong address, ulong size);
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/IProcessContextFactory.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/IProcessContextFactory.cs
new file mode 100644
index 00000000..0a24a524
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/IProcessContextFactory.cs
@@ -0,0 +1,9 @@
+using Ryujinx.Memory;
+
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ interface IProcessContextFactory
+ {
+ IProcessContext Create(KernelContext context, ulong pid, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler, bool for64Bit);
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/KContextIdManager.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/KContextIdManager.cs
new file mode 100644
index 00000000..104fe578
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/KContextIdManager.cs
@@ -0,0 +1,83 @@
+using System;
+using System.Numerics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ class KContextIdManager
+ {
+ private const int IdMasksCount = 8;
+
+ private int[] _idMasks;
+
+ private int _nextFreeBitHint;
+
+ public KContextIdManager()
+ {
+ _idMasks = new int[IdMasksCount];
+ }
+
+ public int GetId()
+ {
+ lock (_idMasks)
+ {
+ int id = 0;
+
+ if (!TestBit(_nextFreeBitHint))
+ {
+ id = _nextFreeBitHint;
+ }
+ else
+ {
+ for (int index = 0; index < IdMasksCount; index++)
+ {
+ int mask = _idMasks[index];
+
+ int firstFreeBit = BitOperations.LeadingZeroCount((uint)((mask + 1) & ~mask));
+
+ if (firstFreeBit < 32)
+ {
+ int baseBit = index * 32 + 31;
+
+ id = baseBit - firstFreeBit;
+
+ break;
+ }
+ else if (index == IdMasksCount - 1)
+ {
+ throw new InvalidOperationException("Maximum number of Ids reached!");
+ }
+ }
+ }
+
+ _nextFreeBitHint = id + 1;
+
+ SetBit(id);
+
+ return id;
+ }
+ }
+
+ public void PutId(int id)
+ {
+ lock (_idMasks)
+ {
+ ClearBit(id);
+ }
+ }
+
+ private bool TestBit(int bit)
+ {
+ return (_idMasks[_nextFreeBitHint / 32] & (1 << (_nextFreeBitHint & 31))) != 0;
+ }
+
+ private void SetBit(int bit)
+ {
+ _idMasks[_nextFreeBitHint / 32] |= (1 << (_nextFreeBitHint & 31));
+ }
+
+ private void ClearBit(int bit)
+ {
+ _idMasks[_nextFreeBitHint / 32] &= ~(1 << (_nextFreeBitHint & 31));
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/KHandleEntry.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/KHandleEntry.cs
new file mode 100644
index 00000000..b5ca9b5e
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/KHandleEntry.cs
@@ -0,0 +1,19 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ class KHandleEntry
+ {
+ public KHandleEntry Next { get; set; }
+
+ public int Index { get; private set; }
+
+ public ushort HandleId { get; set; }
+ public KAutoObject Obj { get; set; }
+
+ public KHandleEntry(int index)
+ {
+ Index = index;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/KHandleTable.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/KHandleTable.cs
new file mode 100644
index 00000000..50f04e90
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/KHandleTable.cs
@@ -0,0 +1,285 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Threading;
+using Ryujinx.Horizon.Common;
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ class KHandleTable
+ {
+ public const int SelfThreadHandle = (0x1ffff << 15) | 0;
+ public const int SelfProcessHandle = (0x1ffff << 15) | 1;
+
+ private readonly KernelContext _context;
+
+ private KHandleEntry[] _table;
+
+ private KHandleEntry _tableHead;
+ private KHandleEntry _nextFreeEntry;
+
+ private int _activeSlotsCount;
+
+ private uint _size;
+
+ private ushort _idCounter;
+
+ public KHandleTable(KernelContext context)
+ {
+ _context = context;
+ }
+
+ public Result Initialize(uint size)
+ {
+ if (size > 1024)
+ {
+ return KernelResult.OutOfMemory;
+ }
+
+ if (size < 1)
+ {
+ size = 1024;
+ }
+
+ _size = size;
+
+ _idCounter = 1;
+
+ _table = new KHandleEntry[size];
+
+ _tableHead = new KHandleEntry(0);
+
+ KHandleEntry entry = _tableHead;
+
+ for (int index = 0; index < size; index++)
+ {
+ _table[index] = entry;
+
+ entry.Next = new KHandleEntry(index + 1);
+
+ entry = entry.Next;
+ }
+
+ _table[size - 1].Next = null;
+
+ _nextFreeEntry = _tableHead;
+
+ return Result.Success;
+ }
+
+ public Result GenerateHandle(KAutoObject obj, out int handle)
+ {
+ handle = 0;
+
+ lock (_table)
+ {
+ if (_activeSlotsCount >= _size)
+ {
+ return KernelResult.HandleTableFull;
+ }
+
+ KHandleEntry entry = _nextFreeEntry;
+
+ _nextFreeEntry = entry.Next;
+
+ entry.Obj = obj;
+ entry.HandleId = _idCounter;
+
+ _activeSlotsCount++;
+
+ handle = (_idCounter << 15) | entry.Index;
+
+ obj.IncrementReferenceCount();
+
+ if ((short)(_idCounter + 1) >= 0)
+ {
+ _idCounter++;
+ }
+ else
+ {
+ _idCounter = 1;
+ }
+ }
+
+ return Result.Success;
+ }
+
+ public Result ReserveHandle(out int handle)
+ {
+ handle = 0;
+
+ lock (_table)
+ {
+ if (_activeSlotsCount >= _size)
+ {
+ return KernelResult.HandleTableFull;
+ }
+
+ KHandleEntry entry = _nextFreeEntry;
+
+ _nextFreeEntry = entry.Next;
+
+ _activeSlotsCount++;
+
+ handle = (_idCounter << 15) | entry.Index;
+
+ if ((short)(_idCounter + 1) >= 0)
+ {
+ _idCounter++;
+ }
+ else
+ {
+ _idCounter = 1;
+ }
+ }
+
+ return Result.Success;
+ }
+
+ public void CancelHandleReservation(int handle)
+ {
+ int index = (handle >> 0) & 0x7fff;
+
+ lock (_table)
+ {
+ KHandleEntry entry = _table[index];
+
+ entry.Obj = null;
+ entry.Next = _nextFreeEntry;
+
+ _nextFreeEntry = entry;
+
+ _activeSlotsCount--;
+ }
+ }
+
+ public void SetReservedHandleObj(int handle, KAutoObject obj)
+ {
+ int index = (handle >> 0) & 0x7fff;
+ int handleId = (handle >> 15);
+
+ lock (_table)
+ {
+ KHandleEntry entry = _table[index];
+
+ entry.Obj = obj;
+ entry.HandleId = (ushort)handleId;
+
+ obj.IncrementReferenceCount();
+ }
+ }
+
+ public bool CloseHandle(int handle)
+ {
+ if ((handle >> 30) != 0 ||
+ handle == SelfThreadHandle ||
+ handle == SelfProcessHandle)
+ {
+ return false;
+ }
+
+ int index = (handle >> 0) & 0x7fff;
+ int handleId = (handle >> 15);
+
+ KAutoObject obj = null;
+
+ bool result = false;
+
+ lock (_table)
+ {
+ if (handleId != 0 && index < _size)
+ {
+ KHandleEntry entry = _table[index];
+
+ if ((obj = entry.Obj) != null && entry.HandleId == handleId)
+ {
+ entry.Obj = null;
+ entry.Next = _nextFreeEntry;
+
+ _nextFreeEntry = entry;
+
+ _activeSlotsCount--;
+
+ result = true;
+ }
+ }
+ }
+
+ if (result)
+ {
+ obj.DecrementReferenceCount();
+ }
+
+ return result;
+ }
+
+ public T GetObject<T>(int handle) where T : KAutoObject
+ {
+ int index = (handle >> 0) & 0x7fff;
+ int handleId = (handle >> 15);
+
+ lock (_table)
+ {
+ if ((handle >> 30) == 0 && handleId != 0 && index < _size)
+ {
+ KHandleEntry entry = _table[index];
+
+ if (entry.HandleId == handleId && entry.Obj is T obj)
+ {
+ return obj;
+ }
+ }
+ }
+
+ return default;
+ }
+
+ public KThread GetKThread(int handle)
+ {
+ if (handle == SelfThreadHandle)
+ {
+ return KernelStatic.GetCurrentThread();
+ }
+ else
+ {
+ return GetObject<KThread>(handle);
+ }
+ }
+
+ public KProcess GetKProcess(int handle)
+ {
+ if (handle == SelfProcessHandle)
+ {
+ return KernelStatic.GetCurrentProcess();
+ }
+ else
+ {
+ return GetObject<KProcess>(handle);
+ }
+ }
+
+ public void Destroy()
+ {
+ lock (_table)
+ {
+ for (int index = 0; index < _size; index++)
+ {
+ KHandleEntry entry = _table[index];
+
+ if (entry.Obj != null)
+ {
+ if (entry.Obj is IDisposable disposableObj)
+ {
+ disposableObj.Dispose();
+ }
+
+ entry.Obj.DecrementReferenceCount();
+ entry.Obj = null;
+ entry.Next = _nextFreeEntry;
+
+ _nextFreeEntry = entry;
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/KProcess.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/KProcess.cs
new file mode 100644
index 00000000..21e89944
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/KProcess.cs
@@ -0,0 +1,1196 @@
+using Ryujinx.Common;
+using Ryujinx.Common.Logging;
+using Ryujinx.Cpu;
+using Ryujinx.HLE.Exceptions;
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Memory;
+using Ryujinx.HLE.HOS.Kernel.Threading;
+using Ryujinx.Horizon.Common;
+using Ryujinx.Memory;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Threading;
+
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ class KProcess : KSynchronizationObject
+ {
+ public const uint KernelVersionMajor = 10;
+ public const uint KernelVersionMinor = 4;
+ public const uint KernelVersionRevision = 0;
+
+ public const uint KernelVersionPacked =
+ (KernelVersionMajor << 19) |
+ (KernelVersionMinor << 15) |
+ (KernelVersionRevision << 0);
+
+ public KPageTableBase MemoryManager { get; private set; }
+
+ private SortedDictionary<ulong, KTlsPageInfo> _fullTlsPages;
+ private SortedDictionary<ulong, KTlsPageInfo> _freeTlsPages;
+
+ public int DefaultCpuCore { get; set; }
+
+ public bool Debug { get; private set; }
+
+ public KResourceLimit ResourceLimit { get; private set; }
+
+ public ulong PersonalMmHeapPagesCount { get; private set; }
+
+ public ProcessState State { get; private set; }
+
+ private object _processLock;
+ private object _threadingLock;
+
+ public KAddressArbiter AddressArbiter { get; private set; }
+
+ public ulong[] RandomEntropy { get; private set; }
+ public KThread[] PinnedThreads { get; private set; }
+
+ private bool _signaled;
+
+ public string Name { get; private set; }
+
+ private int _threadCount;
+
+ public ProcessCreationFlags Flags { get; private set; }
+
+ private MemoryRegion _memRegion;
+
+ public KProcessCapabilities Capabilities { get; private set; }
+
+ public bool AllowCodeMemoryForJit { get; private set; }
+
+ public ulong TitleId { get; private set; }
+ public bool IsApplication { get; private set; }
+ public ulong Pid { get; private set; }
+
+ private long _creationTimestamp;
+ private ulong _entrypoint;
+ private ThreadStart _customThreadStart;
+ private ulong _imageSize;
+ private ulong _mainThreadStackSize;
+ private ulong _memoryUsageCapacity;
+ private int _version;
+
+ public KHandleTable HandleTable { get; private set; }
+
+ public ulong UserExceptionContextAddress { get; private set; }
+
+ private LinkedList<KThread> _threads;
+
+ public bool IsPaused { get; private set; }
+
+ private long _totalTimeRunning;
+
+ public long TotalTimeRunning => _totalTimeRunning;
+
+ private IProcessContextFactory _contextFactory;
+ public IProcessContext Context { get; private set; }
+ public IVirtualMemoryManager CpuMemory => Context.AddressSpace;
+
+ public HleProcessDebugger Debugger { get; private set; }
+
+ public KProcess(KernelContext context, bool allowCodeMemoryForJit = false) : base(context)
+ {
+ _processLock = new object();
+ _threadingLock = new object();
+
+ AddressArbiter = new KAddressArbiter(context);
+
+ _fullTlsPages = new SortedDictionary<ulong, KTlsPageInfo>();
+ _freeTlsPages = new SortedDictionary<ulong, KTlsPageInfo>();
+
+ Capabilities = new KProcessCapabilities();
+
+ AllowCodeMemoryForJit = allowCodeMemoryForJit;
+
+ RandomEntropy = new ulong[KScheduler.CpuCoresCount];
+ PinnedThreads = new KThread[KScheduler.CpuCoresCount];
+
+ // TODO: Remove once we no longer need to initialize it externally.
+ HandleTable = new KHandleTable(context);
+
+ _threads = new LinkedList<KThread>();
+
+ Debugger = new HleProcessDebugger(this);
+ }
+
+ public Result InitializeKip(
+ ProcessCreationInfo creationInfo,
+ ReadOnlySpan<uint> capabilities,
+ KPageList pageList,
+ KResourceLimit resourceLimit,
+ MemoryRegion memRegion,
+ IProcessContextFactory contextFactory,
+ ThreadStart customThreadStart = null)
+ {
+ ResourceLimit = resourceLimit;
+ _memRegion = memRegion;
+ _contextFactory = contextFactory ?? new ProcessContextFactory();
+ _customThreadStart = customThreadStart;
+
+ AddressSpaceType addrSpaceType = (AddressSpaceType)((int)(creationInfo.Flags & ProcessCreationFlags.AddressSpaceMask) >> (int)ProcessCreationFlags.AddressSpaceShift);
+
+ Pid = KernelContext.NewKipId();
+
+ if (Pid == 0 || Pid >= KernelConstants.InitialProcessId)
+ {
+ throw new InvalidOperationException($"Invalid KIP Id {Pid}.");
+ }
+
+ InitializeMemoryManager(creationInfo.Flags);
+
+ bool aslrEnabled = creationInfo.Flags.HasFlag(ProcessCreationFlags.EnableAslr);
+
+ ulong codeAddress = creationInfo.CodeAddress;
+
+ ulong codeSize = (ulong)creationInfo.CodePagesCount * KPageTableBase.PageSize;
+
+ KMemoryBlockSlabManager slabManager = creationInfo.Flags.HasFlag(ProcessCreationFlags.IsApplication)
+ ? KernelContext.LargeMemoryBlockSlabManager
+ : KernelContext.SmallMemoryBlockSlabManager;
+
+ Result result = MemoryManager.InitializeForProcess(
+ addrSpaceType,
+ aslrEnabled,
+ !aslrEnabled,
+ memRegion,
+ codeAddress,
+ codeSize,
+ slabManager);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ if (!MemoryManager.CanContain(codeAddress, codeSize, MemoryState.CodeStatic))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ result = MemoryManager.MapPages(codeAddress, pageList, MemoryState.CodeStatic, KMemoryPermission.None);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ result = Capabilities.InitializeForKernel(capabilities, MemoryManager);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ return ParseProcessInfo(creationInfo);
+ }
+
+ public Result Initialize(
+ ProcessCreationInfo creationInfo,
+ ReadOnlySpan<uint> capabilities,
+ KResourceLimit resourceLimit,
+ MemoryRegion memRegion,
+ IProcessContextFactory contextFactory,
+ ThreadStart customThreadStart = null)
+ {
+ ResourceLimit = resourceLimit;
+ _memRegion = memRegion;
+ _contextFactory = contextFactory ?? new ProcessContextFactory();
+ _customThreadStart = customThreadStart;
+ IsApplication = creationInfo.Flags.HasFlag(ProcessCreationFlags.IsApplication);
+
+ ulong personalMmHeapSize = GetPersonalMmHeapSize((ulong)creationInfo.SystemResourcePagesCount, memRegion);
+
+ ulong codePagesCount = (ulong)creationInfo.CodePagesCount;
+
+ ulong neededSizeForProcess = personalMmHeapSize + codePagesCount * KPageTableBase.PageSize;
+
+ if (neededSizeForProcess != 0 && resourceLimit != null)
+ {
+ if (!resourceLimit.Reserve(LimitableResource.Memory, neededSizeForProcess))
+ {
+ return KernelResult.ResLimitExceeded;
+ }
+ }
+
+ void CleanUpForError()
+ {
+ if (neededSizeForProcess != 0 && resourceLimit != null)
+ {
+ resourceLimit.Release(LimitableResource.Memory, neededSizeForProcess);
+ }
+ }
+
+ PersonalMmHeapPagesCount = (ulong)creationInfo.SystemResourcePagesCount;
+
+ KMemoryBlockSlabManager slabManager;
+
+ if (PersonalMmHeapPagesCount != 0)
+ {
+ slabManager = new KMemoryBlockSlabManager(PersonalMmHeapPagesCount * KPageTableBase.PageSize);
+ }
+ else
+ {
+ slabManager = creationInfo.Flags.HasFlag(ProcessCreationFlags.IsApplication)
+ ? KernelContext.LargeMemoryBlockSlabManager
+ : KernelContext.SmallMemoryBlockSlabManager;
+ }
+
+ AddressSpaceType addrSpaceType = (AddressSpaceType)((int)(creationInfo.Flags & ProcessCreationFlags.AddressSpaceMask) >> (int)ProcessCreationFlags.AddressSpaceShift);
+
+ Pid = KernelContext.NewProcessId();
+
+ if (Pid == ulong.MaxValue || Pid < KernelConstants.InitialProcessId)
+ {
+ throw new InvalidOperationException($"Invalid Process Id {Pid}.");
+ }
+
+ InitializeMemoryManager(creationInfo.Flags);
+
+ bool aslrEnabled = creationInfo.Flags.HasFlag(ProcessCreationFlags.EnableAslr);
+
+ ulong codeAddress = creationInfo.CodeAddress;
+
+ ulong codeSize = codePagesCount * KPageTableBase.PageSize;
+
+ Result result = MemoryManager.InitializeForProcess(
+ addrSpaceType,
+ aslrEnabled,
+ !aslrEnabled,
+ memRegion,
+ codeAddress,
+ codeSize,
+ slabManager);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ if (!MemoryManager.CanContain(codeAddress, codeSize, MemoryState.CodeStatic))
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidMemRange;
+ }
+
+ result = MemoryManager.MapPages(
+ codeAddress,
+ codePagesCount,
+ MemoryState.CodeStatic,
+ KMemoryPermission.None);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ result = Capabilities.InitializeForUser(capabilities, MemoryManager);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ result = ParseProcessInfo(creationInfo);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+ }
+
+ return result;
+ }
+
+ private Result ParseProcessInfo(ProcessCreationInfo creationInfo)
+ {
+ // Ensure that the current kernel version is equal or above to the minimum required.
+ uint requiredKernelVersionMajor = (uint)Capabilities.KernelReleaseVersion >> 19;
+ uint requiredKernelVersionMinor = ((uint)Capabilities.KernelReleaseVersion >> 15) & 0xf;
+
+ if (KernelContext.EnableVersionChecks)
+ {
+ if (requiredKernelVersionMajor > KernelVersionMajor)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ if (requiredKernelVersionMajor != KernelVersionMajor && requiredKernelVersionMajor < 3)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ if (requiredKernelVersionMinor > KernelVersionMinor)
+ {
+ return KernelResult.InvalidCombination;
+ }
+ }
+
+ Result result = AllocateThreadLocalStorage(out ulong userExceptionContextAddress);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ UserExceptionContextAddress = userExceptionContextAddress;
+
+ MemoryHelper.FillWithZeros(CpuMemory, userExceptionContextAddress, KTlsPageInfo.TlsEntrySize);
+
+ Name = creationInfo.Name;
+
+ State = ProcessState.Created;
+
+ _creationTimestamp = PerformanceCounter.ElapsedMilliseconds;
+
+ Flags = creationInfo.Flags;
+ _version = creationInfo.Version;
+ TitleId = creationInfo.TitleId;
+ _entrypoint = creationInfo.CodeAddress;
+ _imageSize = (ulong)creationInfo.CodePagesCount * KPageTableBase.PageSize;
+
+ switch (Flags & ProcessCreationFlags.AddressSpaceMask)
+ {
+ case ProcessCreationFlags.AddressSpace32Bit:
+ case ProcessCreationFlags.AddressSpace64BitDeprecated:
+ case ProcessCreationFlags.AddressSpace64Bit:
+ _memoryUsageCapacity = MemoryManager.HeapRegionEnd -
+ MemoryManager.HeapRegionStart;
+ break;
+
+ case ProcessCreationFlags.AddressSpace32BitWithoutAlias:
+ _memoryUsageCapacity = MemoryManager.HeapRegionEnd -
+ MemoryManager.HeapRegionStart +
+ MemoryManager.AliasRegionEnd -
+ MemoryManager.AliasRegionStart;
+ break;
+
+ default: throw new InvalidOperationException($"Invalid MMU flags value 0x{Flags:x2}.");
+ }
+
+ GenerateRandomEntropy();
+
+ return Result.Success;
+ }
+
+ public Result AllocateThreadLocalStorage(out ulong address)
+ {
+ KernelContext.CriticalSection.Enter();
+
+ Result result;
+
+ if (_freeTlsPages.Count > 0)
+ {
+ // If we have free TLS pages available, just use the first one.
+ KTlsPageInfo pageInfo = _freeTlsPages.Values.First();
+
+ if (!pageInfo.TryGetFreePage(out address))
+ {
+ throw new InvalidOperationException("Unexpected failure getting free TLS page!");
+ }
+
+ if (pageInfo.IsFull())
+ {
+ _freeTlsPages.Remove(pageInfo.PageVirtualAddress);
+
+ _fullTlsPages.Add(pageInfo.PageVirtualAddress, pageInfo);
+ }
+
+ result = Result.Success;
+ }
+ else
+ {
+ // Otherwise, we need to create a new one.
+ result = AllocateTlsPage(out KTlsPageInfo pageInfo);
+
+ if (result == Result.Success)
+ {
+ if (!pageInfo.TryGetFreePage(out address))
+ {
+ throw new InvalidOperationException("Unexpected failure getting free TLS page!");
+ }
+
+ _freeTlsPages.Add(pageInfo.PageVirtualAddress, pageInfo);
+ }
+ else
+ {
+ address = 0;
+ }
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ return result;
+ }
+
+ private Result AllocateTlsPage(out KTlsPageInfo pageInfo)
+ {
+ pageInfo = default;
+
+ if (!KernelContext.UserSlabHeapPages.TryGetItem(out ulong tlsPagePa))
+ {
+ return KernelResult.OutOfMemory;
+ }
+
+ ulong regionStart = MemoryManager.TlsIoRegionStart;
+ ulong regionSize = MemoryManager.TlsIoRegionEnd - regionStart;
+
+ ulong regionPagesCount = regionSize / KPageTableBase.PageSize;
+
+ Result result = MemoryManager.MapPages(
+ 1,
+ KPageTableBase.PageSize,
+ tlsPagePa,
+ true,
+ regionStart,
+ regionPagesCount,
+ MemoryState.ThreadLocal,
+ KMemoryPermission.ReadAndWrite,
+ out ulong tlsPageVa);
+
+ if (result != Result.Success)
+ {
+ KernelContext.UserSlabHeapPages.Free(tlsPagePa);
+ }
+ else
+ {
+ pageInfo = new KTlsPageInfo(tlsPageVa, tlsPagePa);
+
+ MemoryHelper.FillWithZeros(CpuMemory, tlsPageVa, KPageTableBase.PageSize);
+ }
+
+ return result;
+ }
+
+ public Result FreeThreadLocalStorage(ulong tlsSlotAddr)
+ {
+ ulong tlsPageAddr = BitUtils.AlignDown<ulong>(tlsSlotAddr, KPageTableBase.PageSize);
+
+ KernelContext.CriticalSection.Enter();
+
+ Result result = Result.Success;
+
+ KTlsPageInfo pageInfo;
+
+ if (_fullTlsPages.TryGetValue(tlsPageAddr, out pageInfo))
+ {
+ // TLS page was full, free slot and move to free pages tree.
+ _fullTlsPages.Remove(tlsPageAddr);
+
+ _freeTlsPages.Add(tlsPageAddr, pageInfo);
+ }
+ else if (!_freeTlsPages.TryGetValue(tlsPageAddr, out pageInfo))
+ {
+ result = KernelResult.InvalidAddress;
+ }
+
+ if (pageInfo != null)
+ {
+ pageInfo.FreeTlsSlot(tlsSlotAddr);
+
+ if (pageInfo.IsEmpty())
+ {
+ // TLS page is now empty, we should ensure it is removed
+ // from all trees, and free the memory it was using.
+ _freeTlsPages.Remove(tlsPageAddr);
+
+ KernelContext.CriticalSection.Leave();
+
+ FreeTlsPage(pageInfo);
+
+ return Result.Success;
+ }
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ return result;
+ }
+
+ private Result FreeTlsPage(KTlsPageInfo pageInfo)
+ {
+ Result result = MemoryManager.UnmapForKernel(pageInfo.PageVirtualAddress, 1, MemoryState.ThreadLocal);
+
+ if (result == Result.Success)
+ {
+ KernelContext.UserSlabHeapPages.Free(pageInfo.PagePhysicalAddress);
+ }
+
+ return result;
+ }
+
+ private void GenerateRandomEntropy()
+ {
+ // TODO.
+ }
+
+ public Result Start(int mainThreadPriority, ulong stackSize)
+ {
+ lock (_processLock)
+ {
+ if (State > ProcessState.CreatedAttached)
+ {
+ return KernelResult.InvalidState;
+ }
+
+ if (ResourceLimit != null && !ResourceLimit.Reserve(LimitableResource.Thread, 1))
+ {
+ return KernelResult.ResLimitExceeded;
+ }
+
+ KResourceLimit threadResourceLimit = ResourceLimit;
+ KResourceLimit memoryResourceLimit = null;
+
+ if (_mainThreadStackSize != 0)
+ {
+ throw new InvalidOperationException("Trying to start a process with a invalid state!");
+ }
+
+ ulong stackSizeRounded = BitUtils.AlignUp<ulong>(stackSize, KPageTableBase.PageSize);
+
+ ulong neededSize = stackSizeRounded + _imageSize;
+
+ // Check if the needed size for the code and the stack will fit on the
+ // memory usage capacity of this Process. Also check for possible overflow
+ // on the above addition.
+ if (neededSize > _memoryUsageCapacity || neededSize < stackSizeRounded)
+ {
+ threadResourceLimit?.Release(LimitableResource.Thread, 1);
+
+ return KernelResult.OutOfMemory;
+ }
+
+ if (stackSizeRounded != 0 && ResourceLimit != null)
+ {
+ memoryResourceLimit = ResourceLimit;
+
+ if (!memoryResourceLimit.Reserve(LimitableResource.Memory, stackSizeRounded))
+ {
+ threadResourceLimit?.Release(LimitableResource.Thread, 1);
+
+ return KernelResult.ResLimitExceeded;
+ }
+ }
+
+ Result result;
+
+ KThread mainThread = null;
+
+ ulong stackTop = 0;
+
+ void CleanUpForError()
+ {
+ HandleTable.Destroy();
+
+ mainThread?.DecrementReferenceCount();
+
+ if (_mainThreadStackSize != 0)
+ {
+ ulong stackBottom = stackTop - _mainThreadStackSize;
+
+ ulong stackPagesCount = _mainThreadStackSize / KPageTableBase.PageSize;
+
+ MemoryManager.UnmapForKernel(stackBottom, stackPagesCount, MemoryState.Stack);
+
+ _mainThreadStackSize = 0;
+ }
+
+ memoryResourceLimit?.Release(LimitableResource.Memory, stackSizeRounded);
+ threadResourceLimit?.Release(LimitableResource.Thread, 1);
+ }
+
+ if (stackSizeRounded != 0)
+ {
+ ulong stackPagesCount = stackSizeRounded / KPageTableBase.PageSize;
+
+ ulong regionStart = MemoryManager.StackRegionStart;
+ ulong regionSize = MemoryManager.StackRegionEnd - regionStart;
+
+ ulong regionPagesCount = regionSize / KPageTableBase.PageSize;
+
+ result = MemoryManager.MapPages(
+ stackPagesCount,
+ KPageTableBase.PageSize,
+ 0,
+ false,
+ regionStart,
+ regionPagesCount,
+ MemoryState.Stack,
+ KMemoryPermission.ReadAndWrite,
+ out ulong stackBottom);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ _mainThreadStackSize += stackSizeRounded;
+
+ stackTop = stackBottom + stackSizeRounded;
+ }
+
+ ulong heapCapacity = _memoryUsageCapacity - _mainThreadStackSize - _imageSize;
+
+ result = MemoryManager.SetHeapCapacity(heapCapacity);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ HandleTable = new KHandleTable(KernelContext);
+
+ result = HandleTable.Initialize(Capabilities.HandleTableSize);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ mainThread = new KThread(KernelContext);
+
+ result = mainThread.Initialize(
+ _entrypoint,
+ 0,
+ stackTop,
+ mainThreadPriority,
+ DefaultCpuCore,
+ this,
+ ThreadType.User,
+ _customThreadStart);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ result = HandleTable.GenerateHandle(mainThread, out int mainThreadHandle);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ mainThread.SetEntryArguments(0, mainThreadHandle);
+
+ ProcessState oldState = State;
+ ProcessState newState = State != ProcessState.Created
+ ? ProcessState.Attached
+ : ProcessState.Started;
+
+ SetState(newState);
+
+ result = mainThread.Start();
+
+ if (result != Result.Success)
+ {
+ SetState(oldState);
+
+ CleanUpForError();
+ }
+
+ if (result == Result.Success)
+ {
+ mainThread.IncrementReferenceCount();
+ }
+
+ mainThread.DecrementReferenceCount();
+
+ return result;
+ }
+ }
+
+ private void SetState(ProcessState newState)
+ {
+ if (State != newState)
+ {
+ State = newState;
+ _signaled = true;
+
+ Signal();
+ }
+ }
+
+ public Result InitializeThread(
+ KThread thread,
+ ulong entrypoint,
+ ulong argsPtr,
+ ulong stackTop,
+ int priority,
+ int cpuCore,
+ ThreadStart customThreadStart = null)
+ {
+ lock (_processLock)
+ {
+ return thread.Initialize(entrypoint, argsPtr, stackTop, priority, cpuCore, this, ThreadType.User, customThreadStart);
+ }
+ }
+
+ public IExecutionContext CreateExecutionContext()
+ {
+ return Context?.CreateExecutionContext(new ExceptionCallbacks(
+ InterruptHandler,
+ null,
+ KernelContext.SyscallHandler.SvcCall,
+ UndefinedInstructionHandler));
+ }
+
+ private void InterruptHandler(IExecutionContext context)
+ {
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ if (currentThread.Context.Running &&
+ currentThread.Owner != null &&
+ currentThread.GetUserDisableCount() != 0 &&
+ currentThread.Owner.PinnedThreads[currentThread.CurrentCore] == null)
+ {
+ KernelContext.CriticalSection.Enter();
+
+ currentThread.Owner.PinThread(currentThread);
+
+ currentThread.SetUserInterruptFlag();
+
+ KernelContext.CriticalSection.Leave();
+ }
+
+ if (currentThread.IsSchedulable)
+ {
+ KernelContext.Schedulers[currentThread.CurrentCore].Schedule();
+ }
+
+ currentThread.HandlePostSyscall();
+ }
+
+ public void IncrementThreadCount()
+ {
+ Interlocked.Increment(ref _threadCount);
+ }
+
+ public void DecrementThreadCountAndTerminateIfZero()
+ {
+ if (Interlocked.Decrement(ref _threadCount) == 0)
+ {
+ Terminate();
+ }
+ }
+
+ public void DecrementToZeroWhileTerminatingCurrent()
+ {
+ while (Interlocked.Decrement(ref _threadCount) != 0)
+ {
+ Destroy();
+ TerminateCurrentProcess();
+ }
+
+ // Nintendo panic here because if it reaches this point, the current thread should be already dead.
+ // As we handle the death of the thread in the post SVC handler and inside the CPU emulator, we don't panic here.
+ }
+
+ public ulong GetMemoryCapacity()
+ {
+ ulong totalCapacity = (ulong)ResourceLimit.GetRemainingValue(LimitableResource.Memory);
+
+ totalCapacity += MemoryManager.GetTotalHeapSize();
+
+ totalCapacity += GetPersonalMmHeapSize();
+
+ totalCapacity += _imageSize + _mainThreadStackSize;
+
+ if (totalCapacity <= _memoryUsageCapacity)
+ {
+ return totalCapacity;
+ }
+
+ return _memoryUsageCapacity;
+ }
+
+ public ulong GetMemoryUsage()
+ {
+ return _imageSize + _mainThreadStackSize + MemoryManager.GetTotalHeapSize() + GetPersonalMmHeapSize();
+ }
+
+ public ulong GetMemoryCapacityWithoutPersonalMmHeap()
+ {
+ return GetMemoryCapacity() - GetPersonalMmHeapSize();
+ }
+
+ public ulong GetMemoryUsageWithoutPersonalMmHeap()
+ {
+ return GetMemoryUsage() - GetPersonalMmHeapSize();
+ }
+
+ private ulong GetPersonalMmHeapSize()
+ {
+ return GetPersonalMmHeapSize(PersonalMmHeapPagesCount, _memRegion);
+ }
+
+ private static ulong GetPersonalMmHeapSize(ulong personalMmHeapPagesCount, MemoryRegion memRegion)
+ {
+ if (memRegion == MemoryRegion.Applet)
+ {
+ return 0;
+ }
+
+ return personalMmHeapPagesCount * KPageTableBase.PageSize;
+ }
+
+ public void AddCpuTime(long ticks)
+ {
+ Interlocked.Add(ref _totalTimeRunning, ticks);
+ }
+
+ public void AddThread(KThread thread)
+ {
+ lock (_threadingLock)
+ {
+ thread.ProcessListNode = _threads.AddLast(thread);
+ }
+ }
+
+ public void RemoveThread(KThread thread)
+ {
+ lock (_threadingLock)
+ {
+ _threads.Remove(thread.ProcessListNode);
+ }
+ }
+
+ public bool IsCpuCoreAllowed(int core)
+ {
+ return (Capabilities.AllowedCpuCoresMask & (1UL << core)) != 0;
+ }
+
+ public bool IsPriorityAllowed(int priority)
+ {
+ return (Capabilities.AllowedThreadPriosMask & (1UL << priority)) != 0;
+ }
+
+ public override bool IsSignaled()
+ {
+ return _signaled;
+ }
+
+ public Result Terminate()
+ {
+ Result result;
+
+ bool shallTerminate = false;
+
+ KernelContext.CriticalSection.Enter();
+
+ lock (_processLock)
+ {
+ if (State >= ProcessState.Started)
+ {
+ if (State == ProcessState.Started ||
+ State == ProcessState.Crashed ||
+ State == ProcessState.Attached ||
+ State == ProcessState.DebugSuspended)
+ {
+ SetState(ProcessState.Exiting);
+
+ shallTerminate = true;
+ }
+
+ result = Result.Success;
+ }
+ else
+ {
+ result = KernelResult.InvalidState;
+ }
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ if (shallTerminate)
+ {
+ UnpauseAndTerminateAllThreadsExcept(KernelStatic.GetCurrentThread());
+
+ HandleTable.Destroy();
+
+ SignalExitToDebugTerminated();
+ SignalExit();
+ }
+
+ return result;
+ }
+
+ public void TerminateCurrentProcess()
+ {
+ bool shallTerminate = false;
+
+ KernelContext.CriticalSection.Enter();
+
+ lock (_processLock)
+ {
+ if (State >= ProcessState.Started)
+ {
+ if (State == ProcessState.Started ||
+ State == ProcessState.Attached ||
+ State == ProcessState.DebugSuspended)
+ {
+ SetState(ProcessState.Exiting);
+
+ shallTerminate = true;
+ }
+ }
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ if (shallTerminate)
+ {
+ UnpauseAndTerminateAllThreadsExcept(KernelStatic.GetCurrentThread());
+
+ HandleTable.Destroy();
+
+ // NOTE: this is supposed to be called in receiving of the mailbox.
+ SignalExitToDebugExited();
+ SignalExit();
+ }
+
+ KernelStatic.GetCurrentThread().Exit();
+ }
+
+ private void UnpauseAndTerminateAllThreadsExcept(KThread currentThread)
+ {
+ lock (_threadingLock)
+ {
+ KernelContext.CriticalSection.Enter();
+
+ if (currentThread != null && PinnedThreads[currentThread.CurrentCore] == currentThread)
+ {
+ UnpinThread(currentThread);
+ }
+
+ foreach (KThread thread in _threads)
+ {
+ if (thread != currentThread && (thread.SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.TerminationPending)
+ {
+ thread.PrepareForTermination();
+ }
+ }
+
+ KernelContext.CriticalSection.Leave();
+ }
+
+ while (true)
+ {
+ KThread blockedThread = null;
+
+ lock (_threadingLock)
+ {
+ foreach (KThread thread in _threads)
+ {
+ if (thread != currentThread && (thread.SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.TerminationPending)
+ {
+ thread.IncrementReferenceCount();
+
+ blockedThread = thread;
+ break;
+ }
+ }
+ }
+
+ if (blockedThread == null)
+ {
+ break;
+ }
+
+ blockedThread.Terminate();
+ blockedThread.DecrementReferenceCount();
+ }
+ }
+
+ private void SignalExitToDebugTerminated()
+ {
+ // TODO: Debug events.
+ }
+
+ private void SignalExitToDebugExited()
+ {
+ // TODO: Debug events.
+ }
+
+ private void SignalExit()
+ {
+ if (ResourceLimit != null)
+ {
+ ResourceLimit.Release(LimitableResource.Memory, GetMemoryUsage());
+ }
+
+ KernelContext.CriticalSection.Enter();
+
+ SetState(ProcessState.Exited);
+
+ KernelContext.CriticalSection.Leave();
+ }
+
+ public Result ClearIfNotExited()
+ {
+ Result result;
+
+ KernelContext.CriticalSection.Enter();
+
+ lock (_processLock)
+ {
+ if (State != ProcessState.Exited && _signaled)
+ {
+ _signaled = false;
+
+ result = Result.Success;
+ }
+ else
+ {
+ result = KernelResult.InvalidState;
+ }
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ return result;
+ }
+
+ private void InitializeMemoryManager(ProcessCreationFlags flags)
+ {
+ int addrSpaceBits = (flags & ProcessCreationFlags.AddressSpaceMask) switch
+ {
+ ProcessCreationFlags.AddressSpace32Bit => 32,
+ ProcessCreationFlags.AddressSpace64BitDeprecated => 36,
+ ProcessCreationFlags.AddressSpace32BitWithoutAlias => 32,
+ ProcessCreationFlags.AddressSpace64Bit => 39,
+ _ => 39
+ };
+
+ bool for64Bit = flags.HasFlag(ProcessCreationFlags.Is64Bit);
+
+ Context = _contextFactory.Create(KernelContext, Pid, 1UL << addrSpaceBits, InvalidAccessHandler, for64Bit);
+
+ MemoryManager = new KPageTable(KernelContext, CpuMemory);
+ }
+
+ private bool InvalidAccessHandler(ulong va)
+ {
+ KernelStatic.GetCurrentThread()?.PrintGuestStackTrace();
+ KernelStatic.GetCurrentThread()?.PrintGuestRegisterPrintout();
+
+ Logger.Error?.Print(LogClass.Cpu, $"Invalid memory access at virtual address 0x{va:X16}.");
+
+ return false;
+ }
+
+ private void UndefinedInstructionHandler(IExecutionContext context, ulong address, int opCode)
+ {
+ KernelStatic.GetCurrentThread().PrintGuestStackTrace();
+ KernelStatic.GetCurrentThread()?.PrintGuestRegisterPrintout();
+
+ throw new UndefinedInstructionException(address, opCode);
+ }
+
+ protected override void Destroy() => Context.Dispose();
+
+ public Result SetActivity(bool pause)
+ {
+ KernelContext.CriticalSection.Enter();
+
+ if (State != ProcessState.Exiting && State != ProcessState.Exited)
+ {
+ if (pause)
+ {
+ if (IsPaused)
+ {
+ KernelContext.CriticalSection.Leave();
+
+ return KernelResult.InvalidState;
+ }
+
+ lock (_threadingLock)
+ {
+ foreach (KThread thread in _threads)
+ {
+ thread.Suspend(ThreadSchedState.ProcessPauseFlag);
+ }
+ }
+
+ IsPaused = true;
+ }
+ else
+ {
+ if (!IsPaused)
+ {
+ KernelContext.CriticalSection.Leave();
+
+ return KernelResult.InvalidState;
+ }
+
+ lock (_threadingLock)
+ {
+ foreach (KThread thread in _threads)
+ {
+ thread.Resume(ThreadSchedState.ProcessPauseFlag);
+ }
+ }
+
+ IsPaused = false;
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ return Result.Success;
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ return KernelResult.InvalidState;
+ }
+
+ public void PinThread(KThread thread)
+ {
+ if (!thread.TerminationRequested)
+ {
+ PinnedThreads[thread.CurrentCore] = thread;
+
+ thread.Pin();
+
+ KernelContext.ThreadReselectionRequested = true;
+ }
+ }
+
+ public void UnpinThread(KThread thread)
+ {
+ if (!thread.TerminationRequested)
+ {
+ thread.Unpin();
+
+ PinnedThreads[thread.CurrentCore] = null;
+
+ KernelContext.ThreadReselectionRequested = true;
+ }
+ }
+
+ public bool IsExceptionUserThread(KThread thread)
+ {
+ // TODO
+ return false;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/KProcessCapabilities.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/KProcessCapabilities.cs
new file mode 100644
index 00000000..c99e3112
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/KProcessCapabilities.cs
@@ -0,0 +1,328 @@
+using Ryujinx.HLE.HOS.Kernel.Memory;
+using Ryujinx.HLE.HOS.Kernel.Threading;
+using Ryujinx.Horizon.Common;
+using System;
+using System.Numerics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ class KProcessCapabilities
+ {
+ public byte[] SvcAccessMask { get; }
+ public byte[] IrqAccessMask { get; }
+
+ public ulong AllowedCpuCoresMask { get; private set; }
+ public ulong AllowedThreadPriosMask { get; private set; }
+
+ public uint DebuggingFlags { get; private set; }
+ public uint HandleTableSize { get; private set; }
+ public uint KernelReleaseVersion { get; private set; }
+ public uint ApplicationType { get; private set; }
+
+ public KProcessCapabilities()
+ {
+ // length / number of bits of the underlying type
+ SvcAccessMask = new byte[KernelConstants.SupervisorCallCount / 8];
+ IrqAccessMask = new byte[0x80];
+ }
+
+ public Result InitializeForKernel(ReadOnlySpan<uint> capabilities, KPageTableBase memoryManager)
+ {
+ AllowedCpuCoresMask = 0xf;
+ AllowedThreadPriosMask = ulong.MaxValue;
+ DebuggingFlags &= ~3u;
+ KernelReleaseVersion = KProcess.KernelVersionPacked;
+
+ return Parse(capabilities, memoryManager);
+ }
+
+ public Result InitializeForUser(ReadOnlySpan<uint> capabilities, KPageTableBase memoryManager)
+ {
+ return Parse(capabilities, memoryManager);
+ }
+
+ private Result Parse(ReadOnlySpan<uint> capabilities, KPageTableBase memoryManager)
+ {
+ int mask0 = 0;
+ int mask1 = 0;
+
+ for (int index = 0; index < capabilities.Length; index++)
+ {
+ uint cap = capabilities[index];
+
+ if (cap.GetCapabilityType() != CapabilityType.MapRange)
+ {
+ Result result = ParseCapability(cap, ref mask0, ref mask1, memoryManager);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+ }
+ else
+ {
+ if ((uint)index + 1 >= capabilities.Length)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ uint prevCap = cap;
+
+ cap = capabilities[++index];
+
+ if (((cap + 1) & ~cap) != 0x40)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ if ((cap & 0x78000000) != 0)
+ {
+ return KernelResult.MaximumExceeded;
+ }
+
+ if ((cap & 0x7ffff80) == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ long address = ((long)prevCap << 5) & 0xffffff000;
+ long size = ((long)cap << 5) & 0xfffff000;
+
+ if (((ulong)(address + size - 1) >> 36) != 0)
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ KMemoryPermission perm = (prevCap >> 31) != 0
+ ? KMemoryPermission.Read
+ : KMemoryPermission.ReadAndWrite;
+
+ Result result;
+
+ if ((cap >> 31) != 0)
+ {
+ result = memoryManager.MapNormalMemory(address, size, perm);
+ }
+ else
+ {
+ result = memoryManager.MapIoMemory(address, size, perm);
+ }
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+ }
+ }
+
+ return Result.Success;
+ }
+
+ private Result ParseCapability(uint cap, ref int mask0, ref int mask1, KPageTableBase memoryManager)
+ {
+ CapabilityType code = cap.GetCapabilityType();
+
+ if (code == CapabilityType.Invalid)
+ {
+ return KernelResult.InvalidCapability;
+ }
+ else if (code == CapabilityType.Padding)
+ {
+ return Result.Success;
+ }
+
+ int codeMask = 1 << (32 - BitOperations.LeadingZeroCount(code.GetFlag() + 1));
+
+ // Check if the property was already set.
+ if (((mask0 & codeMask) & 0x1e008) != 0)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ mask0 |= codeMask;
+
+ switch (code)
+ {
+ case CapabilityType.CorePriority:
+ {
+ if (AllowedCpuCoresMask != 0 || AllowedThreadPriosMask != 0)
+ {
+ return KernelResult.InvalidCapability;
+ }
+
+ uint lowestCpuCore = (cap >> 16) & 0xff;
+ uint highestCpuCore = (cap >> 24) & 0xff;
+
+ if (lowestCpuCore > highestCpuCore)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ uint highestThreadPrio = (cap >> 4) & 0x3f;
+ uint lowestThreadPrio = (cap >> 10) & 0x3f;
+
+ if (lowestThreadPrio > highestThreadPrio)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ if (highestCpuCore >= KScheduler.CpuCoresCount)
+ {
+ return KernelResult.InvalidCpuCore;
+ }
+
+ AllowedCpuCoresMask = GetMaskFromMinMax(lowestCpuCore, highestCpuCore);
+ AllowedThreadPriosMask = GetMaskFromMinMax(lowestThreadPrio, highestThreadPrio);
+
+ break;
+ }
+
+ case CapabilityType.SyscallMask:
+ {
+ int slot = ((int)cap >> 29) & 7;
+
+ int svcSlotMask = 1 << slot;
+
+ if ((mask1 & svcSlotMask) != 0)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ mask1 |= svcSlotMask;
+
+ uint svcMask = (cap >> 5) & 0xffffff;
+
+ int baseSvc = slot * 24;
+
+ for (int index = 0; index < 24; index++)
+ {
+ if (((svcMask >> index) & 1) == 0)
+ {
+ continue;
+ }
+
+ int svcId = baseSvc + index;
+
+ if (svcId >= KernelConstants.SupervisorCallCount)
+ {
+ return KernelResult.MaximumExceeded;
+ }
+
+ SvcAccessMask[svcId / 8] |= (byte)(1 << (svcId & 7));
+ }
+
+ break;
+ }
+
+ case CapabilityType.MapIoPage:
+ {
+ long address = ((long)cap << 4) & 0xffffff000;
+
+ memoryManager.MapIoMemory(address, KPageTableBase.PageSize, KMemoryPermission.ReadAndWrite);
+
+ break;
+ }
+
+ case CapabilityType.MapRegion:
+ {
+ // TODO: Implement capabilities for MapRegion
+
+ break;
+ }
+
+ case CapabilityType.InterruptPair:
+ {
+ // TODO: GIC distributor check.
+ int irq0 = ((int)cap >> 12) & 0x3ff;
+ int irq1 = ((int)cap >> 22) & 0x3ff;
+
+ if (irq0 != 0x3ff)
+ {
+ IrqAccessMask[irq0 / 8] |= (byte)(1 << (irq0 & 7));
+ }
+
+ if (irq1 != 0x3ff)
+ {
+ IrqAccessMask[irq1 / 8] |= (byte)(1 << (irq1 & 7));
+ }
+
+ break;
+ }
+
+ case CapabilityType.ProgramType:
+ {
+ uint applicationType = (cap >> 14);
+
+ if (applicationType > 7)
+ {
+ return KernelResult.ReservedValue;
+ }
+
+ ApplicationType = applicationType;
+
+ break;
+ }
+
+ case CapabilityType.KernelVersion:
+ {
+ // Note: This check is bugged on kernel too, we are just replicating the bug here.
+ if ((KernelReleaseVersion >> 17) != 0 || cap < 0x80000)
+ {
+ return KernelResult.ReservedValue;
+ }
+
+ KernelReleaseVersion = cap;
+
+ break;
+ }
+
+ case CapabilityType.HandleTable:
+ {
+ uint handleTableSize = cap >> 26;
+
+ if (handleTableSize > 0x3ff)
+ {
+ return KernelResult.ReservedValue;
+ }
+
+ HandleTableSize = handleTableSize;
+
+ break;
+ }
+
+ case CapabilityType.DebugFlags:
+ {
+ uint debuggingFlags = cap >> 19;
+
+ if (debuggingFlags > 3)
+ {
+ return KernelResult.ReservedValue;
+ }
+
+ DebuggingFlags &= ~3u;
+ DebuggingFlags |= debuggingFlags;
+
+ break;
+ }
+
+ default: return KernelResult.InvalidCapability;
+ }
+
+ return Result.Success;
+ }
+
+ private static ulong GetMaskFromMinMax(uint min, uint max)
+ {
+ uint range = max - min + 1;
+
+ if (range == 64)
+ {
+ return ulong.MaxValue;
+ }
+
+ ulong mask = (1UL << (int)range) - 1;
+
+ return mask << (int)min;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/KTlsPageInfo.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/KTlsPageInfo.cs
new file mode 100644
index 00000000..f55e3c10
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/KTlsPageInfo.cs
@@ -0,0 +1,77 @@
+using Ryujinx.HLE.HOS.Kernel.Memory;
+
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ class KTlsPageInfo
+ {
+ public const int TlsEntrySize = 0x200;
+
+ public ulong PageVirtualAddress { get; }
+ public ulong PagePhysicalAddress { get; }
+
+ private readonly bool[] _isSlotFree;
+
+ public KTlsPageInfo(ulong pageVirtualAddress, ulong pagePhysicalAddress)
+ {
+ PageVirtualAddress = pageVirtualAddress;
+ PagePhysicalAddress = pagePhysicalAddress;
+
+ _isSlotFree = new bool[KPageTableBase.PageSize / TlsEntrySize];
+
+ for (int index = 0; index < _isSlotFree.Length; index++)
+ {
+ _isSlotFree[index] = true;
+ }
+ }
+
+ public bool TryGetFreePage(out ulong address)
+ {
+ address = PageVirtualAddress;
+
+ for (int index = 0; index < _isSlotFree.Length; index++)
+ {
+ if (_isSlotFree[index])
+ {
+ _isSlotFree[index] = false;
+
+ return true;
+ }
+
+ address += TlsEntrySize;
+ }
+
+ address = 0;
+
+ return false;
+ }
+
+ public bool IsFull()
+ {
+ bool hasFree = false;
+
+ for (int index = 0; index < _isSlotFree.Length; index++)
+ {
+ hasFree |= _isSlotFree[index];
+ }
+
+ return !hasFree;
+ }
+
+ public bool IsEmpty()
+ {
+ bool allFree = true;
+
+ for (int index = 0; index < _isSlotFree.Length; index++)
+ {
+ allFree &= _isSlotFree[index];
+ }
+
+ return allFree;
+ }
+
+ public void FreeTlsSlot(ulong address)
+ {
+ _isSlotFree[(address - PageVirtualAddress) / TlsEntrySize] = true;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/KTlsPageManager.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/KTlsPageManager.cs
new file mode 100644
index 00000000..0fde495c
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/KTlsPageManager.cs
@@ -0,0 +1,61 @@
+using Ryujinx.HLE.HOS.Kernel.Memory;
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ class KTlsPageManager
+ {
+ private const int TlsEntrySize = 0x200;
+
+ private long _pagePosition;
+
+ private int _usedSlots;
+
+ private bool[] _slots;
+
+ public bool IsEmpty => _usedSlots == 0;
+ public bool IsFull => _usedSlots == _slots.Length;
+
+ public KTlsPageManager(long pagePosition)
+ {
+ _pagePosition = pagePosition;
+
+ _slots = new bool[KPageTableBase.PageSize / TlsEntrySize];
+ }
+
+ public bool TryGetFreeTlsAddr(out long position)
+ {
+ position = _pagePosition;
+
+ for (int index = 0; index < _slots.Length; index++)
+ {
+ if (!_slots[index])
+ {
+ _slots[index] = true;
+
+ _usedSlots++;
+
+ return true;
+ }
+
+ position += TlsEntrySize;
+ }
+
+ position = 0;
+
+ return false;
+ }
+
+ public void FreeTlsSlot(int slot)
+ {
+ if ((uint)slot > _slots.Length)
+ {
+ throw new ArgumentOutOfRangeException(nameof(slot));
+ }
+
+ _slots[slot] = false;
+
+ _usedSlots--;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessContext.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessContext.cs
new file mode 100644
index 00000000..87296830
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessContext.cs
@@ -0,0 +1,34 @@
+using Ryujinx.Cpu;
+using Ryujinx.Memory;
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ class ProcessContext : IProcessContext
+ {
+ public IVirtualMemoryManager AddressSpace { get; }
+
+ public ProcessContext(IVirtualMemoryManager asManager)
+ {
+ AddressSpace = asManager;
+ }
+
+ public IExecutionContext CreateExecutionContext(ExceptionCallbacks exceptionCallbacks)
+ {
+ return new ProcessExecutionContext();
+ }
+
+ public void Execute(IExecutionContext context, ulong codeAddress)
+ {
+ throw new NotSupportedException();
+ }
+
+ public void InvalidateCacheRegion(ulong address, ulong size)
+ {
+ }
+
+ public void Dispose()
+ {
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessContextFactory.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessContextFactory.cs
new file mode 100644
index 00000000..1c5798b4
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessContextFactory.cs
@@ -0,0 +1,12 @@
+using Ryujinx.Memory;
+
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ class ProcessContextFactory : IProcessContextFactory
+ {
+ public IProcessContext Create(KernelContext context, ulong pid, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler, bool for64Bit)
+ {
+ return new ProcessContext(new AddressSpaceManager(context.Memory, addressSpaceSize));
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessCreationFlags.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessCreationFlags.cs
new file mode 100644
index 00000000..a79978ac
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessCreationFlags.cs
@@ -0,0 +1,41 @@
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ [Flags]
+ enum ProcessCreationFlags
+ {
+ Is64Bit = 1 << 0,
+
+ AddressSpaceShift = 1,
+ AddressSpace32Bit = 0 << AddressSpaceShift,
+ AddressSpace64BitDeprecated = 1 << AddressSpaceShift,
+ AddressSpace32BitWithoutAlias = 2 << AddressSpaceShift,
+ AddressSpace64Bit = 3 << AddressSpaceShift,
+ AddressSpaceMask = 7 << AddressSpaceShift,
+
+ EnableDebug = 1 << 4,
+ EnableAslr = 1 << 5,
+ IsApplication = 1 << 6,
+ DeprecatedUseSecureMemory = 1 << 7,
+
+ PoolPartitionShift = 7,
+ PoolPartitionApplication = 0 << PoolPartitionShift,
+ PoolPartitionApplet = 1 << PoolPartitionShift,
+ PoolPartitionSystem = 2 << PoolPartitionShift,
+ PoolPartitionSystemNonSecure = 3 << PoolPartitionShift,
+ PoolPartitionMask = 0xf << PoolPartitionShift,
+
+ OptimizeMemoryAllocation = 1 << 11,
+
+ All =
+ Is64Bit |
+ AddressSpaceMask |
+ EnableDebug |
+ EnableAslr |
+ IsApplication |
+ DeprecatedUseSecureMemory |
+ PoolPartitionMask |
+ OptimizeMemoryAllocation
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessCreationInfo.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessCreationInfo.cs
new file mode 100644
index 00000000..c05bb574
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessCreationInfo.cs
@@ -0,0 +1,37 @@
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ readonly struct ProcessCreationInfo
+ {
+ public string Name { get; }
+
+ public int Version { get; }
+ public ulong TitleId { get; }
+
+ public ulong CodeAddress { get; }
+ public int CodePagesCount { get; }
+
+ public ProcessCreationFlags Flags { get; }
+ public int ResourceLimitHandle { get; }
+ public int SystemResourcePagesCount { get; }
+
+ public ProcessCreationInfo(
+ string name,
+ int version,
+ ulong titleId,
+ ulong codeAddress,
+ int codePagesCount,
+ ProcessCreationFlags flags,
+ int resourceLimitHandle,
+ int systemResourcePagesCount)
+ {
+ Name = name;
+ Version = version;
+ TitleId = titleId;
+ CodeAddress = codeAddress;
+ CodePagesCount = codePagesCount;
+ Flags = flags;
+ ResourceLimitHandle = resourceLimitHandle;
+ SystemResourcePagesCount = systemResourcePagesCount;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessExecutionContext.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessExecutionContext.cs
new file mode 100644
index 00000000..77fcdf33
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessExecutionContext.cs
@@ -0,0 +1,46 @@
+using ARMeilleure.State;
+using Ryujinx.Cpu;
+
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ class ProcessExecutionContext : IExecutionContext
+ {
+ public ulong Pc => 0UL;
+
+ public ulong CntfrqEl0 { get; set; }
+ public ulong CntpctEl0 => 0UL;
+
+ public long TpidrEl0 { get; set; }
+ public long TpidrroEl0 { get; set; }
+
+ public uint Pstate { get; set; }
+
+ public uint Fpcr { get; set; }
+ public uint Fpsr { get; set; }
+
+ public bool IsAarch32 { get => false; set { } }
+
+ public bool Running { get; private set; } = true;
+
+ private readonly ulong[] _x = new ulong[32];
+
+ public ulong GetX(int index) => _x[index];
+ public void SetX(int index, ulong value) => _x[index] = value;
+
+ public V128 GetV(int index) => default;
+ public void SetV(int index, V128 value) { }
+
+ public void RequestInterrupt()
+ {
+ }
+
+ public void StopRunning()
+ {
+ Running = false;
+ }
+
+ public void Dispose()
+ {
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessState.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessState.cs
new file mode 100644
index 00000000..5ef3077e
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessState.cs
@@ -0,0 +1,14 @@
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ enum ProcessState : byte
+ {
+ Created = 0,
+ CreatedAttached = 1,
+ Started = 2,
+ Crashed = 3,
+ Attached = 4,
+ Exiting = 5,
+ Exited = 6,
+ DebugSuspended = 7
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessTamperInfo.cs b/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessTamperInfo.cs
new file mode 100644
index 00000000..4cf67172
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Process/ProcessTamperInfo.cs
@@ -0,0 +1,24 @@
+using System.Collections.Generic;
+
+namespace Ryujinx.HLE.HOS.Kernel.Process
+{
+ class ProcessTamperInfo
+ {
+ public KProcess Process { get; }
+ public IEnumerable<string> BuildIds { get; }
+ public IEnumerable<ulong> CodeAddresses { get; }
+ public ulong HeapAddress { get; }
+ public ulong AliasAddress { get; }
+ public ulong AslrAddress { get; }
+
+ public ProcessTamperInfo(KProcess process, IEnumerable<string> buildIds, IEnumerable<ulong> codeAddresses, ulong heapAddress, ulong aliasAddress, ulong aslrAddress)
+ {
+ Process = process;
+ BuildIds = buildIds;
+ CodeAddresses = codeAddresses;
+ HeapAddress = heapAddress;
+ AliasAddress = aliasAddress;
+ AslrAddress = aslrAddress;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/CodeMemoryOperation.cs b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/CodeMemoryOperation.cs
new file mode 100644
index 00000000..511ee99a
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/CodeMemoryOperation.cs
@@ -0,0 +1,10 @@
+namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
+{
+ enum CodeMemoryOperation : uint
+ {
+ Map,
+ MapToOwner,
+ Unmap,
+ UnmapFromOwner
+ };
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/InfoType.cs b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/InfoType.cs
new file mode 100644
index 00000000..3cf7ba74
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/InfoType.cs
@@ -0,0 +1,34 @@
+namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
+{
+ enum InfoType : uint
+ {
+ CoreMask,
+ PriorityMask,
+ AliasRegionAddress,
+ AliasRegionSize,
+ HeapRegionAddress,
+ HeapRegionSize,
+ TotalMemorySize,
+ UsedMemorySize,
+ DebuggerAttached,
+ ResourceLimit,
+ IdleTickCount,
+ RandomEntropy,
+ AslrRegionAddress,
+ AslrRegionSize,
+ StackRegionAddress,
+ StackRegionSize,
+ SystemResourceSizeTotal,
+ SystemResourceSizeUsed,
+ ProgramId,
+ // NOTE: Added in 4.0.0, removed in 5.0.0.
+ InitialProcessIdRange,
+ UserExceptionContextAddress,
+ TotalNonSystemMemorySize,
+ UsedNonSystemMemorySize,
+ IsApplication,
+ FreeThreadCount,
+ ThreadTickCount,
+ MesosphereCurrentProcess = 65001
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/MemoryInfo.cs b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/MemoryInfo.cs
new file mode 100644
index 00000000..a71cce1f
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/MemoryInfo.cs
@@ -0,0 +1,37 @@
+using Ryujinx.HLE.HOS.Kernel.Memory;
+
+namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
+{
+ struct MemoryInfo
+ {
+ public ulong Address;
+ public ulong Size;
+ public MemoryState State;
+ public MemoryAttribute Attribute;
+ public KMemoryPermission Permission;
+ public int IpcRefCount;
+ public int DeviceRefCount;
+#pragma warning disable CS0414
+ private int _padding;
+#pragma warning restore CS0414
+
+ public MemoryInfo(
+ ulong address,
+ ulong size,
+ MemoryState state,
+ MemoryAttribute attribute,
+ KMemoryPermission permission,
+ int ipcRefCount,
+ int deviceRefCount)
+ {
+ Address = address;
+ Size = size;
+ State = state;
+ Attribute = attribute;
+ Permission = permission;
+ IpcRefCount = ipcRefCount;
+ DeviceRefCount = deviceRefCount;
+ _padding = 0;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/PointerSizedAttribute.cs b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/PointerSizedAttribute.cs
new file mode 100644
index 00000000..154164fb
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/PointerSizedAttribute.cs
@@ -0,0 +1,9 @@
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
+{
+ [AttributeUsage(AttributeTargets.Parameter, AllowMultiple = false, Inherited = true)]
+ class PointerSizedAttribute : Attribute
+ {
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SvcAttribute.cs b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SvcAttribute.cs
new file mode 100644
index 00000000..b8839d1d
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SvcAttribute.cs
@@ -0,0 +1,15 @@
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
+{
+ [AttributeUsage(AttributeTargets.Method, AllowMultiple = false, Inherited = true)]
+ class SvcAttribute : Attribute
+ {
+ public int Id { get; }
+
+ public SvcAttribute(int id)
+ {
+ Id = id;
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SvcImplAttribute.cs b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SvcImplAttribute.cs
new file mode 100644
index 00000000..a32d851f
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SvcImplAttribute.cs
@@ -0,0 +1,9 @@
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
+{
+ [AttributeUsage(AttributeTargets.Class, AllowMultiple = false, Inherited = true)]
+ class SvcImplAttribute : Attribute
+ {
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall.cs b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall.cs
new file mode 100644
index 00000000..3163c348
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall.cs
@@ -0,0 +1,3010 @@
+using Ryujinx.Common;
+using Ryujinx.Common.Logging;
+using Ryujinx.Cpu;
+using Ryujinx.HLE.Exceptions;
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Ipc;
+using Ryujinx.HLE.HOS.Kernel.Memory;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.HLE.HOS.Kernel.Threading;
+using Ryujinx.Horizon.Common;
+using System;
+using System.Buffers;
+using System.Threading;
+
+namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
+{
+ [SvcImpl]
+ class Syscall : ISyscallApi
+ {
+ private readonly KernelContext _context;
+
+ public Syscall(KernelContext context)
+ {
+ _context = context;
+ }
+
+ // Process
+
+ [Svc(0x24)]
+ public Result GetProcessId(out ulong pid, int handle)
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ KProcess process = currentProcess.HandleTable.GetKProcess(handle);
+
+ if (process == null)
+ {
+ KThread thread = currentProcess.HandleTable.GetKThread(handle);
+
+ if (thread != null)
+ {
+ process = thread.Owner;
+ }
+
+ // TODO: KDebugEvent.
+ }
+
+ pid = process?.Pid ?? 0;
+
+ return process != null
+ ? Result.Success
+ : KernelResult.InvalidHandle;
+ }
+
+ public Result CreateProcess(
+ out int handle,
+ ProcessCreationInfo info,
+ ReadOnlySpan<uint> capabilities,
+ IProcessContextFactory contextFactory,
+ ThreadStart customThreadStart = null)
+ {
+ handle = 0;
+
+ if ((info.Flags & ~ProcessCreationFlags.All) != 0)
+ {
+ return KernelResult.InvalidEnumValue;
+ }
+
+ // TODO: Address space check.
+
+ if ((info.Flags & ProcessCreationFlags.PoolPartitionMask) > ProcessCreationFlags.PoolPartitionSystemNonSecure)
+ {
+ return KernelResult.InvalidEnumValue;
+ }
+
+ if ((info.CodeAddress & 0x1fffff) != 0)
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (info.CodePagesCount < 0 || info.SystemResourcePagesCount < 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (info.Flags.HasFlag(ProcessCreationFlags.OptimizeMemoryAllocation) &&
+ !info.Flags.HasFlag(ProcessCreationFlags.IsApplication))
+ {
+ return KernelResult.InvalidThread;
+ }
+
+ KHandleTable handleTable = KernelStatic.GetCurrentProcess().HandleTable;
+
+ KProcess process = new KProcess(_context);
+
+ using var _ = new OnScopeExit(process.DecrementReferenceCount);
+
+ KResourceLimit resourceLimit;
+
+ if (info.ResourceLimitHandle != 0)
+ {
+ resourceLimit = handleTable.GetObject<KResourceLimit>(info.ResourceLimitHandle);
+
+ if (resourceLimit == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+ }
+ else
+ {
+ resourceLimit = _context.ResourceLimit;
+ }
+
+ MemoryRegion memRegion = (info.Flags & ProcessCreationFlags.PoolPartitionMask) switch
+ {
+ ProcessCreationFlags.PoolPartitionApplication => MemoryRegion.Application,
+ ProcessCreationFlags.PoolPartitionApplet => MemoryRegion.Applet,
+ ProcessCreationFlags.PoolPartitionSystem => MemoryRegion.Service,
+ ProcessCreationFlags.PoolPartitionSystemNonSecure => MemoryRegion.NvServices,
+ _ => MemoryRegion.NvServices
+ };
+
+ Result result = process.Initialize(
+ info,
+ capabilities,
+ resourceLimit,
+ memRegion,
+ contextFactory,
+ customThreadStart);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ _context.Processes.TryAdd(process.Pid, process);
+
+ return handleTable.GenerateHandle(process, out handle);
+ }
+
+ public Result StartProcess(int handle, int priority, int cpuCore, ulong mainThreadStackSize)
+ {
+ KProcess process = KernelStatic.GetCurrentProcess().HandleTable.GetObject<KProcess>(handle);
+
+ if (process == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if ((uint)cpuCore >= KScheduler.CpuCoresCount || !process.IsCpuCoreAllowed(cpuCore))
+ {
+ return KernelResult.InvalidCpuCore;
+ }
+
+ if ((uint)priority >= KScheduler.PrioritiesCount || !process.IsPriorityAllowed(priority))
+ {
+ return KernelResult.InvalidPriority;
+ }
+
+ process.DefaultCpuCore = cpuCore;
+
+ Result result = process.Start(priority, mainThreadStackSize);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ process.IncrementReferenceCount();
+
+ return Result.Success;
+ }
+
+ [Svc(0x5f)]
+ public Result FlushProcessDataCache(int processHandle, ulong address, ulong size)
+ {
+ // FIXME: This needs to be implemented as ARMv7 doesn't have any way to do cache maintenance operations on EL0.
+ // As we don't support (and don't actually need) to flush the cache, this is stubbed.
+ return Result.Success;
+ }
+
+ // IPC
+
+ [Svc(0x1f)]
+ public Result ConnectToNamedPort(out int handle, [PointerSized] ulong namePtr)
+ {
+ handle = 0;
+
+ if (!KernelTransfer.UserToKernelString(out string name, namePtr, 12))
+ {
+ return KernelResult.UserCopyFailed;
+ }
+
+ return ConnectToNamedPort(out handle, name);
+ }
+
+ public Result ConnectToNamedPort(out int handle, string name)
+ {
+ handle = 0;
+
+ if (name.Length > 11)
+ {
+ return KernelResult.MaximumExceeded;
+ }
+
+ KAutoObject autoObj = KAutoObject.FindNamedObject(_context, name);
+
+ if (autoObj is not KClientPort clientPort)
+ {
+ return KernelResult.NotFound;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ Result result = currentProcess.HandleTable.ReserveHandle(out handle);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ result = clientPort.Connect(out KClientSession clientSession);
+
+ if (result != Result.Success)
+ {
+ currentProcess.HandleTable.CancelHandleReservation(handle);
+
+ return result;
+ }
+
+ currentProcess.HandleTable.SetReservedHandleObj(handle, clientSession);
+
+ clientSession.DecrementReferenceCount();
+
+ return result;
+ }
+
+ [Svc(0x21)]
+ public Result SendSyncRequest(int handle)
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ KClientSession session = currentProcess.HandleTable.GetObject<KClientSession>(handle);
+
+ if (session == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ return session.SendSyncRequest();
+ }
+
+ [Svc(0x22)]
+ public Result SendSyncRequestWithUserBuffer(
+ [PointerSized] ulong messagePtr,
+ [PointerSized] ulong messageSize,
+ int handle)
+ {
+ if (!PageAligned(messagePtr))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(messageSize) || messageSize == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (messagePtr + messageSize <= messagePtr)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ Result result = currentProcess.MemoryManager.BorrowIpcBuffer(messagePtr, messageSize);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ KClientSession session = currentProcess.HandleTable.GetObject<KClientSession>(handle);
+
+ if (session == null)
+ {
+ result = KernelResult.InvalidHandle;
+ }
+ else
+ {
+ result = session.SendSyncRequest(messagePtr, messageSize);
+ }
+
+ Result result2 = currentProcess.MemoryManager.UnborrowIpcBuffer(messagePtr, messageSize);
+
+ if (result == Result.Success)
+ {
+ result = result2;
+ }
+
+ return result;
+ }
+
+ [Svc(0x23)]
+ public Result SendAsyncRequestWithUserBuffer(
+ out int doneEventHandle,
+ [PointerSized] ulong messagePtr,
+ [PointerSized] ulong messageSize,
+ int handle)
+ {
+ doneEventHandle = 0;
+
+ if (!PageAligned(messagePtr))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(messageSize) || messageSize == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (messagePtr + messageSize <= messagePtr)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ Result result = currentProcess.MemoryManager.BorrowIpcBuffer(messagePtr, messageSize);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ KResourceLimit resourceLimit = currentProcess.ResourceLimit;
+
+ if (resourceLimit != null && !resourceLimit.Reserve(LimitableResource.Event, 1))
+ {
+ currentProcess.MemoryManager.UnborrowIpcBuffer(messagePtr, messageSize);
+
+ return KernelResult.ResLimitExceeded;
+ }
+
+ KClientSession session = currentProcess.HandleTable.GetObject<KClientSession>(handle);
+
+ if (session == null)
+ {
+ result = KernelResult.InvalidHandle;
+ }
+ else
+ {
+ KEvent doneEvent = new KEvent(_context);
+
+ result = currentProcess.HandleTable.GenerateHandle(doneEvent.ReadableEvent, out doneEventHandle);
+
+ if (result == Result.Success)
+ {
+ result = session.SendAsyncRequest(doneEvent.WritableEvent, messagePtr, messageSize);
+
+ if (result != Result.Success)
+ {
+ currentProcess.HandleTable.CloseHandle(doneEventHandle);
+ }
+ }
+ }
+
+ if (result != Result.Success)
+ {
+ resourceLimit?.Release(LimitableResource.Event, 1);
+
+ currentProcess.MemoryManager.UnborrowIpcBuffer(messagePtr, messageSize);
+ }
+
+ return result;
+ }
+
+ [Svc(0x40)]
+ public Result CreateSession(
+ out int serverSessionHandle,
+ out int clientSessionHandle,
+ bool isLight,
+ [PointerSized] ulong namePtr)
+ {
+ return CreateSession(out serverSessionHandle, out clientSessionHandle, isLight, null);
+ }
+
+ public Result CreateSession(
+ out int serverSessionHandle,
+ out int clientSessionHandle,
+ bool isLight,
+ string name)
+ {
+ serverSessionHandle = 0;
+ clientSessionHandle = 0;
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ KResourceLimit resourceLimit = currentProcess.ResourceLimit;
+
+ if (resourceLimit != null && !resourceLimit.Reserve(LimitableResource.Session, 1))
+ {
+ return KernelResult.ResLimitExceeded;
+ }
+
+ Result result;
+
+ if (isLight)
+ {
+ KLightSession session = new KLightSession(_context);
+
+ result = currentProcess.HandleTable.GenerateHandle(session.ServerSession, out serverSessionHandle);
+
+ if (result == Result.Success)
+ {
+ result = currentProcess.HandleTable.GenerateHandle(session.ClientSession, out clientSessionHandle);
+
+ if (result != Result.Success)
+ {
+ currentProcess.HandleTable.CloseHandle(serverSessionHandle);
+
+ serverSessionHandle = 0;
+ }
+ }
+
+ session.ServerSession.DecrementReferenceCount();
+ session.ClientSession.DecrementReferenceCount();
+ }
+ else
+ {
+ KSession session = new KSession(_context);
+
+ result = currentProcess.HandleTable.GenerateHandle(session.ServerSession, out serverSessionHandle);
+
+ if (result == Result.Success)
+ {
+ result = currentProcess.HandleTable.GenerateHandle(session.ClientSession, out clientSessionHandle);
+
+ if (result != Result.Success)
+ {
+ currentProcess.HandleTable.CloseHandle(serverSessionHandle);
+
+ serverSessionHandle = 0;
+ }
+ }
+
+ session.ServerSession.DecrementReferenceCount();
+ session.ClientSession.DecrementReferenceCount();
+ }
+
+ return result;
+ }
+
+ [Svc(0x41)]
+ public Result AcceptSession(out int sessionHandle, int portHandle)
+ {
+ sessionHandle = 0;
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ KServerPort serverPort = currentProcess.HandleTable.GetObject<KServerPort>(portHandle);
+
+ if (serverPort == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ Result result = currentProcess.HandleTable.ReserveHandle(out int handle);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ KAutoObject session;
+
+ if (serverPort.IsLight)
+ {
+ session = serverPort.AcceptIncomingLightConnection();
+ }
+ else
+ {
+ session = serverPort.AcceptIncomingConnection();
+ }
+
+ if (session != null)
+ {
+ currentProcess.HandleTable.SetReservedHandleObj(handle, session);
+
+ session.DecrementReferenceCount();
+
+ sessionHandle = handle;
+
+ result = Result.Success;
+ }
+ else
+ {
+ currentProcess.HandleTable.CancelHandleReservation(handle);
+
+ result = KernelResult.NotFound;
+ }
+
+ return result;
+ }
+
+ [Svc(0x43)]
+ public Result ReplyAndReceive(
+ out int handleIndex,
+ [PointerSized] ulong handlesPtr,
+ int handlesCount,
+ int replyTargetHandle,
+ long timeout)
+ {
+ handleIndex = 0;
+
+ if ((uint)handlesCount > 0x40)
+ {
+ return KernelResult.MaximumExceeded;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ ulong copySize = (ulong)((long)handlesCount * 4);
+
+ if (!currentProcess.MemoryManager.InsideAddrSpace(handlesPtr, copySize))
+ {
+ return KernelResult.UserCopyFailed;
+ }
+
+ if (handlesPtr + copySize < handlesPtr)
+ {
+ return KernelResult.UserCopyFailed;
+ }
+
+ int[] handles = new int[handlesCount];
+
+ if (!KernelTransfer.UserToKernelArray<int>(handlesPtr, handles))
+ {
+ return KernelResult.UserCopyFailed;
+ }
+
+ if (timeout > 0)
+ {
+ timeout += KTimeManager.DefaultTimeIncrementNanoseconds;
+ }
+
+ return ReplyAndReceive(out handleIndex, handles, replyTargetHandle, timeout);
+ }
+
+ public Result ReplyAndReceive(out int handleIndex, ReadOnlySpan<int> handles, int replyTargetHandle, long timeout)
+ {
+ handleIndex = 0;
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ KSynchronizationObject[] syncObjsArray = ArrayPool<KSynchronizationObject>.Shared.Rent(handles.Length);
+
+ Span<KSynchronizationObject> syncObjs = syncObjsArray.AsSpan(0, handles.Length);
+
+ for (int index = 0; index < handles.Length; index++)
+ {
+ KSynchronizationObject obj = currentProcess.HandleTable.GetObject<KSynchronizationObject>(handles[index]);
+
+ if (obj == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ syncObjs[index] = obj;
+ }
+
+ Result result = Result.Success;
+
+ if (replyTargetHandle != 0)
+ {
+ KServerSession replyTarget = currentProcess.HandleTable.GetObject<KServerSession>(replyTargetHandle);
+
+ if (replyTarget == null)
+ {
+ result = KernelResult.InvalidHandle;
+ }
+ else
+ {
+ result = replyTarget.Reply();
+ }
+ }
+
+ if (result == Result.Success)
+ {
+ if (timeout > 0)
+ {
+ timeout += KTimeManager.DefaultTimeIncrementNanoseconds;
+ }
+
+ while ((result = _context.Synchronization.WaitFor(syncObjs, timeout, out handleIndex)) == Result.Success)
+ {
+ KServerSession session = currentProcess.HandleTable.GetObject<KServerSession>(handles[handleIndex]);
+
+ if (session == null)
+ {
+ break;
+ }
+
+ if ((result = session.Receive()) != KernelResult.NotFound)
+ {
+ break;
+ }
+ }
+ }
+
+ ArrayPool<KSynchronizationObject>.Shared.Return(syncObjsArray);
+
+ return result;
+ }
+
+ [Svc(0x44)]
+ public Result ReplyAndReceiveWithUserBuffer(
+ out int handleIndex,
+ [PointerSized] ulong messagePtr,
+ [PointerSized] ulong messageSize,
+ [PointerSized] ulong handlesPtr,
+ int handlesCount,
+ int replyTargetHandle,
+ long timeout)
+ {
+ handleIndex = 0;
+
+ if ((uint)handlesCount > 0x40)
+ {
+ return KernelResult.MaximumExceeded;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ ulong copySize = (ulong)((long)handlesCount * 4);
+
+ if (!currentProcess.MemoryManager.InsideAddrSpace(handlesPtr, copySize))
+ {
+ return KernelResult.UserCopyFailed;
+ }
+
+ if (handlesPtr + copySize < handlesPtr)
+ {
+ return KernelResult.UserCopyFailed;
+ }
+
+ Result result = currentProcess.MemoryManager.BorrowIpcBuffer(messagePtr, messageSize);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ int[] handles = new int[handlesCount];
+
+ if (!KernelTransfer.UserToKernelArray<int>(handlesPtr, handles))
+ {
+ currentProcess.MemoryManager.UnborrowIpcBuffer(messagePtr, messageSize);
+
+ return KernelResult.UserCopyFailed;
+ }
+
+ KSynchronizationObject[] syncObjs = new KSynchronizationObject[handlesCount];
+
+ for (int index = 0; index < handlesCount; index++)
+ {
+ KSynchronizationObject obj = currentProcess.HandleTable.GetObject<KSynchronizationObject>(handles[index]);
+
+ if (obj == null)
+ {
+ currentProcess.MemoryManager.UnborrowIpcBuffer(messagePtr, messageSize);
+
+ return KernelResult.InvalidHandle;
+ }
+
+ syncObjs[index] = obj;
+ }
+
+ if (replyTargetHandle != 0)
+ {
+ KServerSession replyTarget = currentProcess.HandleTable.GetObject<KServerSession>(replyTargetHandle);
+
+ if (replyTarget == null)
+ {
+ result = KernelResult.InvalidHandle;
+ }
+ else
+ {
+ result = replyTarget.Reply(messagePtr, messageSize);
+ }
+ }
+
+ if (result == Result.Success)
+ {
+ if (timeout > 0)
+ {
+ timeout += KTimeManager.DefaultTimeIncrementNanoseconds;
+ }
+
+ while ((result = _context.Synchronization.WaitFor(syncObjs, timeout, out handleIndex)) == Result.Success)
+ {
+ KServerSession session = currentProcess.HandleTable.GetObject<KServerSession>(handles[handleIndex]);
+
+ if (session == null)
+ {
+ break;
+ }
+
+ if ((result = session.Receive(messagePtr, messageSize)) != KernelResult.NotFound)
+ {
+ break;
+ }
+ }
+ }
+
+ currentProcess.MemoryManager.UnborrowIpcBuffer(messagePtr, messageSize);
+
+ return result;
+ }
+
+ [Svc(0x70)]
+ public Result CreatePort(
+ out int serverPortHandle,
+ out int clientPortHandle,
+ int maxSessions,
+ bool isLight,
+ [PointerSized] ulong namePtr)
+ {
+ // The kernel doesn't use the name pointer, so we can just pass null as the name.
+ return CreatePort(out serverPortHandle, out clientPortHandle, maxSessions, isLight, null);
+ }
+
+ public Result CreatePort(
+ out int serverPortHandle,
+ out int clientPortHandle,
+ int maxSessions,
+ bool isLight,
+ string name)
+ {
+ serverPortHandle = clientPortHandle = 0;
+
+ if (maxSessions < 1)
+ {
+ return KernelResult.MaximumExceeded;
+ }
+
+ KPort port = new KPort(_context, maxSessions, isLight, name);
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ Result result = currentProcess.HandleTable.GenerateHandle(port.ClientPort, out clientPortHandle);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ result = currentProcess.HandleTable.GenerateHandle(port.ServerPort, out serverPortHandle);
+
+ if (result != Result.Success)
+ {
+ currentProcess.HandleTable.CloseHandle(clientPortHandle);
+ }
+
+ return result;
+ }
+
+ [Svc(0x71)]
+ public Result ManageNamedPort(out int handle, [PointerSized] ulong namePtr, int maxSessions)
+ {
+ handle = 0;
+
+ if (!KernelTransfer.UserToKernelString(out string name, namePtr, 12))
+ {
+ return KernelResult.UserCopyFailed;
+ }
+
+ if (name.Length > 11)
+ {
+ return KernelResult.MaximumExceeded;
+ }
+
+ return ManageNamedPort(out handle, name, maxSessions);
+ }
+
+ public Result ManageNamedPort(out int handle, string name, int maxSessions)
+ {
+ handle = 0;
+
+ if (maxSessions < 0)
+ {
+ return KernelResult.MaximumExceeded;
+ }
+
+ if (maxSessions == 0)
+ {
+ return KAutoObject.RemoveName(_context, name);
+ }
+
+ KPort port = new KPort(_context, maxSessions, false, null);
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ Result result = currentProcess.HandleTable.GenerateHandle(port.ServerPort, out handle);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ result = port.ClientPort.SetName(name);
+
+ if (result != Result.Success)
+ {
+ currentProcess.HandleTable.CloseHandle(handle);
+ }
+
+ return result;
+ }
+
+ [Svc(0x72)]
+ public Result ConnectToPort(out int clientSessionHandle, int clientPortHandle)
+ {
+ clientSessionHandle = 0;
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ KClientPort clientPort = currentProcess.HandleTable.GetObject<KClientPort>(clientPortHandle);
+
+ if (clientPort == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ Result result = currentProcess.HandleTable.ReserveHandle(out int handle);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ KAutoObject session;
+
+ if (clientPort.IsLight)
+ {
+ result = clientPort.ConnectLight(out KLightClientSession clientSession);
+
+ session = clientSession;
+ }
+ else
+ {
+ result = clientPort.Connect(out KClientSession clientSession);
+
+ session = clientSession;
+ }
+
+ if (result != Result.Success)
+ {
+ currentProcess.HandleTable.CancelHandleReservation(handle);
+
+ return result;
+ }
+
+ currentProcess.HandleTable.SetReservedHandleObj(handle, session);
+
+ session.DecrementReferenceCount();
+
+ clientSessionHandle = handle;
+
+ return result;
+ }
+
+ // Memory
+
+ [Svc(1)]
+ public Result SetHeapSize([PointerSized] out ulong address, [PointerSized] ulong size)
+ {
+ if ((size & 0xfffffffe001fffff) != 0)
+ {
+ address = 0;
+
+ return KernelResult.InvalidSize;
+ }
+
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ return process.MemoryManager.SetHeapSize(size, out address);
+ }
+
+ [Svc(2)]
+ public Result SetMemoryPermission([PointerSized] ulong address, [PointerSized] ulong size, KMemoryPermission permission)
+ {
+ if (!PageAligned(address))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(size) || size == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (address + size <= address)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (permission != KMemoryPermission.None && (permission | KMemoryPermission.Write) != KMemoryPermission.ReadAndWrite)
+ {
+ return KernelResult.InvalidPermission;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (!currentProcess.MemoryManager.InsideAddrSpace(address, size))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ return currentProcess.MemoryManager.SetMemoryPermission(address, size, permission);
+ }
+
+ [Svc(3)]
+ public Result SetMemoryAttribute(
+ [PointerSized] ulong address,
+ [PointerSized] ulong size,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeValue)
+ {
+ if (!PageAligned(address))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(size) || size == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ MemoryAttribute attributes = attributeMask | attributeValue;
+
+ if (attributes != attributeMask ||
+ (attributes | MemoryAttribute.Uncached) != MemoryAttribute.Uncached)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ if (!process.MemoryManager.InsideAddrSpace(address, size))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ Result result = process.MemoryManager.SetMemoryAttribute(
+ address,
+ size,
+ attributeMask,
+ attributeValue);
+
+ return result;
+ }
+
+ [Svc(4)]
+ public Result MapMemory([PointerSized] ulong dst, [PointerSized] ulong src, [PointerSized] ulong size)
+ {
+ if (!PageAligned(src | dst))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(size) || size == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (src + size <= src || dst + size <= dst)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (!currentProcess.MemoryManager.InsideAddrSpace(src, size))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (currentProcess.MemoryManager.OutsideStackRegion(dst, size) ||
+ currentProcess.MemoryManager.InsideHeapRegion(dst, size) ||
+ currentProcess.MemoryManager.InsideAliasRegion(dst, size))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ return process.MemoryManager.Map(dst, src, size);
+ }
+
+ [Svc(5)]
+ public Result UnmapMemory([PointerSized] ulong dst, [PointerSized] ulong src, [PointerSized] ulong size)
+ {
+ if (!PageAligned(src | dst))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(size) || size == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (src + size <= src || dst + size <= dst)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (!currentProcess.MemoryManager.InsideAddrSpace(src, size))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (currentProcess.MemoryManager.OutsideStackRegion(dst, size) ||
+ currentProcess.MemoryManager.InsideHeapRegion(dst, size) ||
+ currentProcess.MemoryManager.InsideAliasRegion(dst, size))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ return process.MemoryManager.Unmap(dst, src, size);
+ }
+
+ [Svc(6)]
+ public Result QueryMemory([PointerSized] ulong infoPtr, [PointerSized] out ulong pageInfo, [PointerSized] ulong address)
+ {
+ Result result = QueryMemory(out MemoryInfo info, out pageInfo, address);
+
+ if (result == Result.Success)
+ {
+ return KernelTransfer.KernelToUser(infoPtr, info)
+ ? Result.Success
+ : KernelResult.InvalidMemState;
+ }
+
+ return result;
+ }
+
+ public Result QueryMemory(out MemoryInfo info, out ulong pageInfo, ulong address)
+ {
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ KMemoryInfo blockInfo = process.MemoryManager.QueryMemory(address);
+
+ info = new MemoryInfo(
+ blockInfo.Address,
+ blockInfo.Size,
+ blockInfo.State & MemoryState.UserMask,
+ blockInfo.Attribute,
+ blockInfo.Permission & KMemoryPermission.UserMask,
+ blockInfo.IpcRefCount,
+ blockInfo.DeviceRefCount);
+
+ pageInfo = 0;
+
+ return Result.Success;
+ }
+
+ [Svc(0x13)]
+ public Result MapSharedMemory(int handle, [PointerSized] ulong address, [PointerSized] ulong size, KMemoryPermission permission)
+ {
+ if (!PageAligned(address))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(size) || size == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (address + size <= address)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if ((permission | KMemoryPermission.Write) != KMemoryPermission.ReadAndWrite)
+ {
+ return KernelResult.InvalidPermission;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ KSharedMemory sharedMemory = currentProcess.HandleTable.GetObject<KSharedMemory>(handle);
+
+ if (sharedMemory == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if (currentProcess.MemoryManager.IsInvalidRegion(address, size) ||
+ currentProcess.MemoryManager.InsideHeapRegion(address, size) ||
+ currentProcess.MemoryManager.InsideAliasRegion(address, size))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ return sharedMemory.MapIntoProcess(
+ currentProcess.MemoryManager,
+ address,
+ size,
+ currentProcess,
+ permission);
+ }
+
+ [Svc(0x14)]
+ public Result UnmapSharedMemory(int handle, [PointerSized] ulong address, [PointerSized] ulong size)
+ {
+ if (!PageAligned(address))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(size) || size == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (address + size <= address)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ KSharedMemory sharedMemory = currentProcess.HandleTable.GetObject<KSharedMemory>(handle);
+
+ if (sharedMemory == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if (currentProcess.MemoryManager.IsInvalidRegion(address, size) ||
+ currentProcess.MemoryManager.InsideHeapRegion(address, size) ||
+ currentProcess.MemoryManager.InsideAliasRegion(address, size))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ return sharedMemory.UnmapFromProcess(
+ currentProcess.MemoryManager,
+ address,
+ size,
+ currentProcess);
+ }
+
+ [Svc(0x15)]
+ public Result CreateTransferMemory(out int handle, [PointerSized] ulong address, [PointerSized] ulong size, KMemoryPermission permission)
+ {
+ handle = 0;
+
+ if (!PageAligned(address))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(size) || size == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (address + size <= address)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (permission > KMemoryPermission.ReadAndWrite || permission == KMemoryPermission.Write)
+ {
+ return KernelResult.InvalidPermission;
+ }
+
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ KResourceLimit resourceLimit = process.ResourceLimit;
+
+ if (resourceLimit != null && !resourceLimit.Reserve(LimitableResource.TransferMemory, 1))
+ {
+ return KernelResult.ResLimitExceeded;
+ }
+
+ void CleanUpForError()
+ {
+ resourceLimit?.Release(LimitableResource.TransferMemory, 1);
+ }
+
+ if (!process.MemoryManager.InsideAddrSpace(address, size))
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidMemState;
+ }
+
+ KTransferMemory transferMemory = new KTransferMemory(_context);
+
+ Result result = transferMemory.Initialize(address, size, permission);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ result = process.HandleTable.GenerateHandle(transferMemory, out handle);
+
+ transferMemory.DecrementReferenceCount();
+
+ return result;
+ }
+
+ [Svc(0x51)]
+ public Result MapTransferMemory(int handle, [PointerSized] ulong address, [PointerSized] ulong size, KMemoryPermission permission)
+ {
+ if (!PageAligned(address))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(size) || size == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (address + size <= address)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (permission > KMemoryPermission.ReadAndWrite || permission == KMemoryPermission.Write)
+ {
+ return KernelResult.InvalidPermission;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ KTransferMemory transferMemory = currentProcess.HandleTable.GetObject<KTransferMemory>(handle);
+
+ if (transferMemory == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if (currentProcess.MemoryManager.IsInvalidRegion(address, size) ||
+ currentProcess.MemoryManager.InsideHeapRegion(address, size) ||
+ currentProcess.MemoryManager.InsideAliasRegion(address, size))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ return transferMemory.MapIntoProcess(
+ currentProcess.MemoryManager,
+ address,
+ size,
+ currentProcess,
+ permission);
+ }
+
+ [Svc(0x52)]
+ public Result UnmapTransferMemory(int handle, [PointerSized] ulong address, [PointerSized] ulong size)
+ {
+ if (!PageAligned(address))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(size) || size == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (address + size <= address)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ KTransferMemory transferMemory = currentProcess.HandleTable.GetObject<KTransferMemory>(handle);
+
+ if (transferMemory == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if (currentProcess.MemoryManager.IsInvalidRegion(address, size) ||
+ currentProcess.MemoryManager.InsideHeapRegion(address, size) ||
+ currentProcess.MemoryManager.InsideAliasRegion(address, size))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ return transferMemory.UnmapFromProcess(
+ currentProcess.MemoryManager,
+ address,
+ size,
+ currentProcess);
+ }
+
+ [Svc(0x2c)]
+ public Result MapPhysicalMemory([PointerSized] ulong address, [PointerSized] ulong size)
+ {
+ if (!PageAligned(address))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(size) || size == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (address + size <= address)
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if ((currentProcess.PersonalMmHeapPagesCount & 0xfffffffffffff) == 0)
+ {
+ return KernelResult.InvalidState;
+ }
+
+ if (!currentProcess.MemoryManager.InsideAddrSpace(address, size) ||
+ currentProcess.MemoryManager.OutsideAliasRegion(address, size))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ return process.MemoryManager.MapPhysicalMemory(address, size);
+ }
+
+ [Svc(0x2d)]
+ public Result UnmapPhysicalMemory([PointerSized] ulong address, [PointerSized] ulong size)
+ {
+ if (!PageAligned(address))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(size) || size == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (address + size <= address)
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if ((currentProcess.PersonalMmHeapPagesCount & 0xfffffffffffff) == 0)
+ {
+ return KernelResult.InvalidState;
+ }
+
+ if (!currentProcess.MemoryManager.InsideAddrSpace(address, size) ||
+ currentProcess.MemoryManager.OutsideAliasRegion(address, size))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ return process.MemoryManager.UnmapPhysicalMemory(address, size);
+ }
+
+ [Svc(0x4b)]
+ public Result CreateCodeMemory(out int handle, [PointerSized] ulong address, [PointerSized] ulong size)
+ {
+ handle = 0;
+
+ if (!PageAligned(address))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(size) || size == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (size + address <= address)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ KCodeMemory codeMemory = new KCodeMemory(_context);
+
+ using var _ = new OnScopeExit(codeMemory.DecrementReferenceCount);
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (!currentProcess.MemoryManager.InsideAddrSpace(address, size))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ Result result = codeMemory.Initialize(address, size);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ return currentProcess.HandleTable.GenerateHandle(codeMemory, out handle);
+ }
+
+ [Svc(0x4c)]
+ public Result ControlCodeMemory(
+ int handle,
+ CodeMemoryOperation op,
+ ulong address,
+ ulong size,
+ KMemoryPermission permission)
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ KCodeMemory codeMemory = currentProcess.HandleTable.GetObject<KCodeMemory>(handle);
+
+ // Newer versions of the kernel also returns an error here if the owner and process
+ // where the operation will happen are the same. We do not return an error here
+ // for homebrew because some of them requires this to be patched out to work (for JIT).
+ if (codeMemory == null || (!currentProcess.AllowCodeMemoryForJit && codeMemory.Owner == currentProcess))
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ switch (op)
+ {
+ case CodeMemoryOperation.Map:
+ if (!currentProcess.MemoryManager.CanContain(address, size, MemoryState.CodeWritable))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ if (permission != KMemoryPermission.ReadAndWrite)
+ {
+ return KernelResult.InvalidPermission;
+ }
+
+ return codeMemory.Map(address, size, permission);
+
+ case CodeMemoryOperation.MapToOwner:
+ if (!currentProcess.MemoryManager.CanContain(address, size, MemoryState.CodeReadOnly))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ if (permission != KMemoryPermission.Read && permission != KMemoryPermission.ReadAndExecute)
+ {
+ return KernelResult.InvalidPermission;
+ }
+
+ return codeMemory.MapToOwner(address, size, permission);
+
+ case CodeMemoryOperation.Unmap:
+ if (!currentProcess.MemoryManager.CanContain(address, size, MemoryState.CodeWritable))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ if (permission != KMemoryPermission.None)
+ {
+ return KernelResult.InvalidPermission;
+ }
+
+ return codeMemory.Unmap(address, size);
+
+ case CodeMemoryOperation.UnmapFromOwner:
+ if (!currentProcess.MemoryManager.CanContain(address, size, MemoryState.CodeReadOnly))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ if (permission != KMemoryPermission.None)
+ {
+ return KernelResult.InvalidPermission;
+ }
+
+ return codeMemory.UnmapFromOwner(address, size);
+
+ default: return KernelResult.InvalidEnumValue;
+ }
+ }
+
+ [Svc(0x73)]
+ public Result SetProcessMemoryPermission(
+ int handle,
+ [PointerSized] ulong src,
+ [PointerSized] ulong size,
+ KMemoryPermission permission)
+ {
+ if (!PageAligned(src))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(size) || size == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (permission != KMemoryPermission.None &&
+ permission != KMemoryPermission.Read &&
+ permission != KMemoryPermission.ReadAndWrite &&
+ permission != KMemoryPermission.ReadAndExecute)
+ {
+ return KernelResult.InvalidPermission;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ KProcess targetProcess = currentProcess.HandleTable.GetObject<KProcess>(handle);
+
+ if (targetProcess == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if (targetProcess.MemoryManager.OutsideAddrSpace(src, size))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ return targetProcess.MemoryManager.SetProcessMemoryPermission(src, size, permission);
+ }
+
+ [Svc(0x74)]
+ public Result MapProcessMemory(
+ [PointerSized] ulong dst,
+ int handle,
+ ulong src,
+ [PointerSized] ulong size)
+ {
+ if (!PageAligned(src) || !PageAligned(dst))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(size) || size == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (dst + size <= dst || src + size <= src)
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ KProcess dstProcess = KernelStatic.GetCurrentProcess();
+ KProcess srcProcess = dstProcess.HandleTable.GetObject<KProcess>(handle);
+
+ if (srcProcess == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if (!srcProcess.MemoryManager.InsideAddrSpace(src, size) ||
+ !dstProcess.MemoryManager.CanContain(dst, size, MemoryState.ProcessMemory))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ KPageList pageList = new KPageList();
+
+ Result result = srcProcess.MemoryManager.GetPagesIfStateEquals(
+ src,
+ size,
+ MemoryState.MapProcessAllowed,
+ MemoryState.MapProcessAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ pageList);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ return dstProcess.MemoryManager.MapPages(dst, pageList, MemoryState.ProcessMemory, KMemoryPermission.ReadAndWrite);
+ }
+
+ [Svc(0x75)]
+ public Result UnmapProcessMemory(
+ [PointerSized] ulong dst,
+ int handle,
+ ulong src,
+ [PointerSized] ulong size)
+ {
+ if (!PageAligned(src) || !PageAligned(dst))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(size) || size == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (dst + size <= dst || src + size <= src)
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ KProcess dstProcess = KernelStatic.GetCurrentProcess();
+ KProcess srcProcess = dstProcess.HandleTable.GetObject<KProcess>(handle);
+
+ if (srcProcess == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if (!srcProcess.MemoryManager.InsideAddrSpace(src, size) ||
+ !dstProcess.MemoryManager.CanContain(dst, size, MemoryState.ProcessMemory))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ Result result = dstProcess.MemoryManager.UnmapProcessMemory(dst, size, srcProcess.MemoryManager, src);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ return Result.Success;
+ }
+
+ [Svc(0x77)]
+ public Result MapProcessCodeMemory(int handle, ulong dst, ulong src, ulong size)
+ {
+ if (!PageAligned(dst) || !PageAligned(src))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(size) || size == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ KProcess targetProcess = currentProcess.HandleTable.GetObject<KProcess>(handle);
+
+ if (targetProcess == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if (targetProcess.MemoryManager.OutsideAddrSpace(dst, size) ||
+ targetProcess.MemoryManager.OutsideAddrSpace(src, size) ||
+ targetProcess.MemoryManager.InsideAliasRegion(dst, size) ||
+ targetProcess.MemoryManager.InsideHeapRegion(dst, size))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ if (size + dst <= dst || size + src <= src)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ return targetProcess.MemoryManager.MapProcessCodeMemory(dst, src, size);
+ }
+
+ [Svc(0x78)]
+ public Result UnmapProcessCodeMemory(int handle, ulong dst, ulong src, ulong size)
+ {
+ if (!PageAligned(dst) || !PageAligned(src))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ if (!PageAligned(size) || size == 0)
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ KProcess targetProcess = currentProcess.HandleTable.GetObject<KProcess>(handle);
+
+ if (targetProcess == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if (targetProcess.MemoryManager.OutsideAddrSpace(dst, size) ||
+ targetProcess.MemoryManager.OutsideAddrSpace(src, size) ||
+ targetProcess.MemoryManager.InsideAliasRegion(dst, size) ||
+ targetProcess.MemoryManager.InsideHeapRegion(dst, size))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ if (size + dst <= dst || size + src <= src)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ return targetProcess.MemoryManager.UnmapProcessCodeMemory(dst, src, size);
+ }
+
+ private static bool PageAligned(ulong address)
+ {
+ return (address & (KPageTableBase.PageSize - 1)) == 0;
+ }
+
+ // System
+
+ [Svc(0x7b)]
+ public Result TerminateProcess(int handle)
+ {
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ process = process.HandleTable.GetObject<KProcess>(handle);
+
+ Result result;
+
+ if (process != null)
+ {
+ if (process == KernelStatic.GetCurrentProcess())
+ {
+ result = Result.Success;
+ process.DecrementToZeroWhileTerminatingCurrent();
+ }
+ else
+ {
+ result = process.Terminate();
+ process.DecrementReferenceCount();
+ }
+ }
+ else
+ {
+ result = KernelResult.InvalidHandle;
+ }
+
+ return result;
+ }
+
+ [Svc(7)]
+ public void ExitProcess()
+ {
+ KernelStatic.GetCurrentProcess().TerminateCurrentProcess();
+ }
+
+ [Svc(0x11)]
+ public Result SignalEvent(int handle)
+ {
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ KWritableEvent writableEvent = process.HandleTable.GetObject<KWritableEvent>(handle);
+
+ Result result;
+
+ if (writableEvent != null)
+ {
+ writableEvent.Signal();
+
+ result = Result.Success;
+ }
+ else
+ {
+ result = KernelResult.InvalidHandle;
+ }
+
+ return result;
+ }
+
+ [Svc(0x12)]
+ public Result ClearEvent(int handle)
+ {
+ Result result;
+
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ KWritableEvent writableEvent = process.HandleTable.GetObject<KWritableEvent>(handle);
+
+ if (writableEvent == null)
+ {
+ KReadableEvent readableEvent = process.HandleTable.GetObject<KReadableEvent>(handle);
+
+ result = readableEvent?.Clear() ?? KernelResult.InvalidHandle;
+ }
+ else
+ {
+ result = writableEvent.Clear();
+ }
+
+ return result;
+ }
+
+ [Svc(0x16)]
+ public Result CloseHandle(int handle)
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ return currentProcess.HandleTable.CloseHandle(handle) ? Result.Success : KernelResult.InvalidHandle;
+ }
+
+ [Svc(0x17)]
+ public Result ResetSignal(int handle)
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ KReadableEvent readableEvent = currentProcess.HandleTable.GetObject<KReadableEvent>(handle);
+
+ Result result;
+
+ if (readableEvent != null)
+ {
+ result = readableEvent.ClearIfSignaled();
+ }
+ else
+ {
+ KProcess process = currentProcess.HandleTable.GetKProcess(handle);
+
+ if (process != null)
+ {
+ result = process.ClearIfNotExited();
+ }
+ else
+ {
+ result = KernelResult.InvalidHandle;
+ }
+ }
+
+ return result;
+ }
+
+ [Svc(0x1e)]
+ public ulong GetSystemTick()
+ {
+ return _context.TickSource.Counter;
+ }
+
+ [Svc(0x26)]
+ public void Break(ulong reason)
+ {
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ if ((reason & (1UL << 31)) == 0)
+ {
+ currentThread.PrintGuestStackTrace();
+ currentThread.PrintGuestRegisterPrintout();
+
+ // As the process is exiting, this is probably caused by emulation termination.
+ if (currentThread.Owner.State == ProcessState.Exiting)
+ {
+ return;
+ }
+
+ // TODO: Debug events.
+ currentThread.Owner.TerminateCurrentProcess();
+
+ throw new GuestBrokeExecutionException();
+ }
+ else
+ {
+ Logger.Debug?.Print(LogClass.KernelSvc, "Debugger triggered.");
+ }
+ }
+
+ [Svc(0x27)]
+ public void OutputDebugString([PointerSized] ulong strPtr, [PointerSized] ulong size)
+ {
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ string str = MemoryHelper.ReadAsciiString(process.CpuMemory, strPtr, (long)size);
+
+ Logger.Warning?.Print(LogClass.KernelSvc, str);
+ }
+
+ [Svc(0x29)]
+ public Result GetInfo(out ulong value, InfoType id, int handle, long subId)
+ {
+ value = 0;
+
+ switch (id)
+ {
+ case InfoType.CoreMask:
+ case InfoType.PriorityMask:
+ case InfoType.AliasRegionAddress:
+ case InfoType.AliasRegionSize:
+ case InfoType.HeapRegionAddress:
+ case InfoType.HeapRegionSize:
+ case InfoType.TotalMemorySize:
+ case InfoType.UsedMemorySize:
+ case InfoType.AslrRegionAddress:
+ case InfoType.AslrRegionSize:
+ case InfoType.StackRegionAddress:
+ case InfoType.StackRegionSize:
+ case InfoType.SystemResourceSizeTotal:
+ case InfoType.SystemResourceSizeUsed:
+ case InfoType.ProgramId:
+ case InfoType.UserExceptionContextAddress:
+ case InfoType.TotalNonSystemMemorySize:
+ case InfoType.UsedNonSystemMemorySize:
+ case InfoType.IsApplication:
+ case InfoType.FreeThreadCount:
+ {
+ if (subId != 0)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ KProcess process = currentProcess.HandleTable.GetKProcess(handle);
+
+ if (process == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ switch (id)
+ {
+ case InfoType.CoreMask: value = process.Capabilities.AllowedCpuCoresMask; break;
+ case InfoType.PriorityMask: value = process.Capabilities.AllowedThreadPriosMask; break;
+
+ case InfoType.AliasRegionAddress: value = process.MemoryManager.AliasRegionStart; break;
+ case InfoType.AliasRegionSize:
+ value = (process.MemoryManager.AliasRegionEnd -
+ process.MemoryManager.AliasRegionStart); break;
+
+ case InfoType.HeapRegionAddress: value = process.MemoryManager.HeapRegionStart; break;
+ case InfoType.HeapRegionSize:
+ value = (process.MemoryManager.HeapRegionEnd -
+ process.MemoryManager.HeapRegionStart); break;
+
+ case InfoType.TotalMemorySize: value = process.GetMemoryCapacity(); break;
+
+ case InfoType.UsedMemorySize: value = process.GetMemoryUsage(); break;
+
+ case InfoType.AslrRegionAddress: value = process.MemoryManager.GetAddrSpaceBaseAddr(); break;
+
+ case InfoType.AslrRegionSize: value = process.MemoryManager.GetAddrSpaceSize(); break;
+
+ case InfoType.StackRegionAddress: value = process.MemoryManager.StackRegionStart; break;
+ case InfoType.StackRegionSize:
+ value = (process.MemoryManager.StackRegionEnd -
+ process.MemoryManager.StackRegionStart); break;
+
+ case InfoType.SystemResourceSizeTotal: value = process.PersonalMmHeapPagesCount * KPageTableBase.PageSize; break;
+
+ case InfoType.SystemResourceSizeUsed:
+ if (process.PersonalMmHeapPagesCount != 0)
+ {
+ value = process.MemoryManager.GetMmUsedPages() * KPageTableBase.PageSize;
+ }
+
+ break;
+
+ case InfoType.ProgramId: value = process.TitleId; break;
+
+ case InfoType.UserExceptionContextAddress: value = process.UserExceptionContextAddress; break;
+
+ case InfoType.TotalNonSystemMemorySize: value = process.GetMemoryCapacityWithoutPersonalMmHeap(); break;
+
+ case InfoType.UsedNonSystemMemorySize: value = process.GetMemoryUsageWithoutPersonalMmHeap(); break;
+
+ case InfoType.IsApplication: value = process.IsApplication ? 1UL : 0UL; break;
+
+ case InfoType.FreeThreadCount:
+ if (process.ResourceLimit != null)
+ {
+ value = (ulong)(process.ResourceLimit.GetLimitValue(LimitableResource.Thread) -
+ process.ResourceLimit.GetCurrentValue(LimitableResource.Thread));
+ }
+ else
+ {
+ value = 0;
+ }
+
+ break;
+ }
+
+ break;
+ }
+
+ case InfoType.DebuggerAttached:
+ {
+ if (handle != 0)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if (subId != 0)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ value = KernelStatic.GetCurrentProcess().Debug ? 1UL : 0UL;
+
+ break;
+ }
+
+ case InfoType.ResourceLimit:
+ {
+ if (handle != 0)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if (subId != 0)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (currentProcess.ResourceLimit != null)
+ {
+ KHandleTable handleTable = currentProcess.HandleTable;
+ KResourceLimit resourceLimit = currentProcess.ResourceLimit;
+
+ Result result = handleTable.GenerateHandle(resourceLimit, out int resLimHandle);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ value = (uint)resLimHandle;
+ }
+
+ break;
+ }
+
+ case InfoType.IdleTickCount:
+ {
+ if (handle != 0)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ int currentCore = KernelStatic.GetCurrentThread().CurrentCore;
+
+ if (subId != -1 && subId != currentCore)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ value = (ulong)KTimeManager.ConvertHostTicksToTicks(_context.Schedulers[currentCore].TotalIdleTimeTicks);
+
+ break;
+ }
+
+ case InfoType.RandomEntropy:
+ {
+ if (handle != 0)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if ((ulong)subId > 3)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ value = currentProcess.RandomEntropy[subId];
+
+ break;
+ }
+
+ case InfoType.ThreadTickCount:
+ {
+ if (subId < -1 || subId > 3)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ KThread thread = KernelStatic.GetCurrentProcess().HandleTable.GetKThread(handle);
+
+ if (thread == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ int currentCore = currentThread.CurrentCore;
+
+ if (subId != -1 && subId != currentCore)
+ {
+ return Result.Success;
+ }
+
+ KScheduler scheduler = _context.Schedulers[currentCore];
+
+ long timeDelta = PerformanceCounter.ElapsedTicks - scheduler.LastContextSwitchTime;
+
+ if (subId != -1)
+ {
+ value = (ulong)KTimeManager.ConvertHostTicksToTicks(timeDelta);
+ }
+ else
+ {
+ long totalTimeRunning = thread.TotalTimeRunning;
+
+ if (thread == currentThread)
+ {
+ totalTimeRunning += timeDelta;
+ }
+
+ value = (ulong)KTimeManager.ConvertHostTicksToTicks(totalTimeRunning);
+ }
+
+ break;
+ }
+
+ case InfoType.MesosphereCurrentProcess:
+ {
+ if (handle != 0)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if ((ulong)subId != 0)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+ KHandleTable handleTable = currentProcess.HandleTable;
+
+ Result result = handleTable.GenerateHandle(currentProcess, out int outHandle);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ value = (ulong)outHandle;
+
+ break;
+ }
+
+ default: return KernelResult.InvalidEnumValue;
+ }
+
+ return Result.Success;
+ }
+
+ [Svc(0x45)]
+ public Result CreateEvent(out int wEventHandle, out int rEventHandle)
+ {
+ KEvent Event = new KEvent(_context);
+
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ Result result = process.HandleTable.GenerateHandle(Event.WritableEvent, out wEventHandle);
+
+ if (result == Result.Success)
+ {
+ result = process.HandleTable.GenerateHandle(Event.ReadableEvent, out rEventHandle);
+
+ if (result != Result.Success)
+ {
+ process.HandleTable.CloseHandle(wEventHandle);
+ }
+ }
+ else
+ {
+ rEventHandle = 0;
+ }
+
+ return result;
+ }
+
+ [Svc(0x65)]
+ public Result GetProcessList(out int count, [PointerSized] ulong address, int maxCount)
+ {
+ count = 0;
+
+ if ((maxCount >> 28) != 0)
+ {
+ return KernelResult.MaximumExceeded;
+ }
+
+ if (maxCount != 0)
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ ulong copySize = (ulong)maxCount * 8;
+
+ if (address + copySize <= address)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (currentProcess.MemoryManager.OutsideAddrSpace(address, copySize))
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+
+ int copyCount = 0;
+
+ lock (_context.Processes)
+ {
+ foreach (KProcess process in _context.Processes.Values)
+ {
+ if (copyCount < maxCount)
+ {
+ if (!KernelTransfer.KernelToUser(address + (ulong)copyCount * 8, process.Pid))
+ {
+ return KernelResult.UserCopyFailed;
+ }
+ }
+
+ copyCount++;
+ }
+ }
+
+ count = copyCount;
+
+ return Result.Success;
+ }
+
+ [Svc(0x6f)]
+ public Result GetSystemInfo(out long value, uint id, int handle, long subId)
+ {
+ value = 0;
+
+ if (id > 2)
+ {
+ return KernelResult.InvalidEnumValue;
+ }
+
+ if (handle != 0)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if (id < 2)
+ {
+ if ((ulong)subId > 3)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ KMemoryRegionManager region = _context.MemoryManager.MemoryRegions[subId];
+
+ switch (id)
+ {
+ // Memory region capacity.
+ case 0: value = (long)region.Size; break;
+
+ // Memory region free space.
+ case 1:
+ {
+ ulong freePagesCount = region.GetFreePages();
+
+ value = (long)(freePagesCount * KPageTableBase.PageSize);
+
+ break;
+ }
+ }
+ }
+ else /* if (Id == 2) */
+ {
+ if ((ulong)subId > 1)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ switch (subId)
+ {
+ case 0: value = _context.PrivilegedProcessLowestId; break;
+ case 1: value = _context.PrivilegedProcessHighestId; break;
+ }
+ }
+
+ return Result.Success;
+ }
+
+ [Svc(0x30)]
+ public Result GetResourceLimitLimitValue(out long limitValue, int handle, LimitableResource resource)
+ {
+ limitValue = 0;
+
+ if (resource >= LimitableResource.Count)
+ {
+ return KernelResult.InvalidEnumValue;
+ }
+
+ KResourceLimit resourceLimit = KernelStatic.GetCurrentProcess().HandleTable.GetObject<KResourceLimit>(handle);
+
+ if (resourceLimit == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ limitValue = resourceLimit.GetLimitValue(resource);
+
+ return Result.Success;
+ }
+
+ [Svc(0x31)]
+ public Result GetResourceLimitCurrentValue(out long limitValue, int handle, LimitableResource resource)
+ {
+ limitValue = 0;
+
+ if (resource >= LimitableResource.Count)
+ {
+ return KernelResult.InvalidEnumValue;
+ }
+
+ KResourceLimit resourceLimit = KernelStatic.GetCurrentProcess().HandleTable.GetObject<KResourceLimit>(handle);
+
+ if (resourceLimit == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ limitValue = resourceLimit.GetCurrentValue(resource);
+
+ return Result.Success;
+ }
+
+ [Svc(0x37)]
+ public Result GetResourceLimitPeakValue(out long peak, int handle, LimitableResource resource)
+ {
+ peak = 0;
+
+ if (resource >= LimitableResource.Count)
+ {
+ return KernelResult.InvalidEnumValue;
+ }
+
+ KResourceLimit resourceLimit = KernelStatic.GetCurrentProcess().HandleTable.GetObject<KResourceLimit>(handle);
+
+ if (resourceLimit == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ peak = resourceLimit.GetPeakValue(resource);
+
+ return Result.Success;
+ }
+
+ [Svc(0x7d)]
+ public Result CreateResourceLimit(out int handle)
+ {
+ KResourceLimit limit = new KResourceLimit(_context);
+
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ return process.HandleTable.GenerateHandle(limit, out handle);
+ }
+
+ [Svc(0x7e)]
+ public Result SetResourceLimitLimitValue(int handle, LimitableResource resource, long limitValue)
+ {
+ if (resource >= LimitableResource.Count)
+ {
+ return KernelResult.InvalidEnumValue;
+ }
+
+ KResourceLimit resourceLimit = KernelStatic.GetCurrentProcess().HandleTable.GetObject<KResourceLimit>(handle);
+
+ if (resourceLimit == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ return resourceLimit.SetLimitValue(resource, limitValue);
+ }
+
+ // Thread
+
+ [Svc(8)]
+ public Result CreateThread(
+ out int handle,
+ [PointerSized] ulong entrypoint,
+ [PointerSized] ulong argsPtr,
+ [PointerSized] ulong stackTop,
+ int priority,
+ int cpuCore)
+ {
+ return CreateThread(out handle, entrypoint, argsPtr, stackTop, priority, cpuCore, null);
+ }
+
+ public Result CreateThread(
+ out int handle,
+ ulong entrypoint,
+ ulong argsPtr,
+ ulong stackTop,
+ int priority,
+ int cpuCore,
+ ThreadStart customThreadStart)
+ {
+ handle = 0;
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (cpuCore == -2)
+ {
+ cpuCore = currentProcess.DefaultCpuCore;
+ }
+
+ if ((uint)cpuCore >= KScheduler.CpuCoresCount || !currentProcess.IsCpuCoreAllowed(cpuCore))
+ {
+ return KernelResult.InvalidCpuCore;
+ }
+
+ if ((uint)priority >= KScheduler.PrioritiesCount || !currentProcess.IsPriorityAllowed(priority))
+ {
+ return KernelResult.InvalidPriority;
+ }
+
+ long timeout = KTimeManager.ConvertMillisecondsToNanoseconds(100);
+
+ if (currentProcess.ResourceLimit != null &&
+ !currentProcess.ResourceLimit.Reserve(LimitableResource.Thread, 1, timeout))
+ {
+ return KernelResult.ResLimitExceeded;
+ }
+
+ KThread thread = new KThread(_context);
+
+ Result result = currentProcess.InitializeThread(
+ thread,
+ entrypoint,
+ argsPtr,
+ stackTop,
+ priority,
+ cpuCore,
+ customThreadStart);
+
+ if (result == Result.Success)
+ {
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ result = process.HandleTable.GenerateHandle(thread, out handle);
+ }
+ else
+ {
+ currentProcess.ResourceLimit?.Release(LimitableResource.Thread, 1);
+ }
+
+ thread.DecrementReferenceCount();
+
+ return result;
+ }
+
+ [Svc(9)]
+ public Result StartThread(int handle)
+ {
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ KThread thread = process.HandleTable.GetKThread(handle);
+
+ if (thread != null)
+ {
+ thread.IncrementReferenceCount();
+
+ Result result = thread.Start();
+
+ if (result == Result.Success)
+ {
+ thread.IncrementReferenceCount();
+ }
+
+ thread.DecrementReferenceCount();
+
+ return result;
+ }
+ else
+ {
+ return KernelResult.InvalidHandle;
+ }
+ }
+
+ [Svc(0xa)]
+ public void ExitThread()
+ {
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ currentThread.Exit();
+ }
+
+ [Svc(0xb)]
+ public void SleepThread(long timeout)
+ {
+ if (timeout < 1)
+ {
+ switch (timeout)
+ {
+ case 0: KScheduler.Yield(_context); break;
+ case -1: KScheduler.YieldWithLoadBalancing(_context); break;
+ case -2: KScheduler.YieldToAnyThread(_context); break;
+ }
+ }
+ else
+ {
+ KernelStatic.GetCurrentThread().Sleep(timeout + KTimeManager.DefaultTimeIncrementNanoseconds);
+ }
+ }
+
+ [Svc(0xc)]
+ public Result GetThreadPriority(out int priority, int handle)
+ {
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ KThread thread = process.HandleTable.GetKThread(handle);
+
+ if (thread != null)
+ {
+ priority = thread.DynamicPriority;
+
+ return Result.Success;
+ }
+ else
+ {
+ priority = 0;
+
+ return KernelResult.InvalidHandle;
+ }
+ }
+
+ [Svc(0xd)]
+ public Result SetThreadPriority(int handle, int priority)
+ {
+ // TODO: NPDM check.
+
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ KThread thread = process.HandleTable.GetKThread(handle);
+
+ if (thread == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ thread.SetPriority(priority);
+
+ return Result.Success;
+ }
+
+ [Svc(0xe)]
+ public Result GetThreadCoreMask(out int preferredCore, out ulong affinityMask, int handle)
+ {
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ KThread thread = process.HandleTable.GetKThread(handle);
+
+ if (thread != null)
+ {
+ preferredCore = thread.PreferredCore;
+ affinityMask = thread.AffinityMask;
+
+ return Result.Success;
+ }
+ else
+ {
+ preferredCore = 0;
+ affinityMask = 0;
+
+ return KernelResult.InvalidHandle;
+ }
+ }
+
+ [Svc(0xf)]
+ public Result SetThreadCoreMask(int handle, int preferredCore, ulong affinityMask)
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (preferredCore == -2)
+ {
+ preferredCore = currentProcess.DefaultCpuCore;
+
+ affinityMask = 1UL << preferredCore;
+ }
+ else
+ {
+ if ((currentProcess.Capabilities.AllowedCpuCoresMask | affinityMask) !=
+ currentProcess.Capabilities.AllowedCpuCoresMask)
+ {
+ return KernelResult.InvalidCpuCore;
+ }
+
+ if (affinityMask == 0)
+ {
+ return KernelResult.InvalidCombination;
+ }
+
+ if ((uint)preferredCore > 3)
+ {
+ if ((preferredCore | 2) != -1)
+ {
+ return KernelResult.InvalidCpuCore;
+ }
+ }
+ else if ((affinityMask & (1UL << preferredCore)) == 0)
+ {
+ return KernelResult.InvalidCombination;
+ }
+ }
+
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ KThread thread = process.HandleTable.GetKThread(handle);
+
+ if (thread == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ return thread.SetCoreAndAffinityMask(preferredCore, affinityMask);
+ }
+
+ [Svc(0x10)]
+ public int GetCurrentProcessorNumber()
+ {
+ return KernelStatic.GetCurrentThread().CurrentCore;
+ }
+
+ [Svc(0x25)]
+ public Result GetThreadId(out ulong threadUid, int handle)
+ {
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ KThread thread = process.HandleTable.GetKThread(handle);
+
+ if (thread != null)
+ {
+ threadUid = thread.ThreadUid;
+
+ return Result.Success;
+ }
+ else
+ {
+ threadUid = 0;
+
+ return KernelResult.InvalidHandle;
+ }
+ }
+
+ [Svc(0x32)]
+ public Result SetThreadActivity(int handle, bool pause)
+ {
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ KThread thread = process.HandleTable.GetObject<KThread>(handle);
+
+ if (thread == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if (thread.Owner != process)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if (thread == KernelStatic.GetCurrentThread())
+ {
+ return KernelResult.InvalidThread;
+ }
+
+ return thread.SetActivity(pause);
+ }
+
+ [Svc(0x33)]
+ public Result GetThreadContext3([PointerSized] ulong address, int handle)
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ KThread thread = currentProcess.HandleTable.GetObject<KThread>(handle);
+
+ if (thread == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if (thread.Owner != currentProcess)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ if (currentThread == thread)
+ {
+ return KernelResult.InvalidThread;
+ }
+
+ Result result = thread.GetThreadContext3(out ThreadContext context);
+
+ if (result == Result.Success)
+ {
+ return KernelTransfer.KernelToUser(address, context)
+ ? Result.Success
+ : KernelResult.InvalidMemState;
+ }
+
+ return result;
+ }
+
+ // Thread synchronization
+
+ [Svc(0x18)]
+ public Result WaitSynchronization(out int handleIndex, [PointerSized] ulong handlesPtr, int handlesCount, long timeout)
+ {
+ handleIndex = 0;
+
+ if ((uint)handlesCount > KThread.MaxWaitSyncObjects)
+ {
+ return KernelResult.MaximumExceeded;
+ }
+
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ if (handlesCount != 0)
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (currentProcess.MemoryManager.AddrSpaceStart > handlesPtr)
+ {
+ return KernelResult.UserCopyFailed;
+ }
+
+ long handlesSize = handlesCount * 4;
+
+ if (handlesPtr + (ulong)handlesSize <= handlesPtr)
+ {
+ return KernelResult.UserCopyFailed;
+ }
+
+ if (handlesPtr + (ulong)handlesSize - 1 > currentProcess.MemoryManager.AddrSpaceEnd - 1)
+ {
+ return KernelResult.UserCopyFailed;
+ }
+
+ Span<int> handles = new Span<int>(currentThread.WaitSyncHandles).Slice(0, handlesCount);
+
+ if (!KernelTransfer.UserToKernelArray(handlesPtr, handles))
+ {
+ return KernelResult.UserCopyFailed;
+ }
+
+ return WaitSynchronization(out handleIndex, handles, timeout);
+ }
+
+ return WaitSynchronization(out handleIndex, ReadOnlySpan<int>.Empty, timeout);
+ }
+
+ public Result WaitSynchronization(out int handleIndex, ReadOnlySpan<int> handles, long timeout)
+ {
+ handleIndex = 0;
+
+ if ((uint)handles.Length > KThread.MaxWaitSyncObjects)
+ {
+ return KernelResult.MaximumExceeded;
+ }
+
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ var syncObjs = new Span<KSynchronizationObject>(currentThread.WaitSyncObjects).Slice(0, handles.Length);
+
+ if (handles.Length != 0)
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ int processedHandles = 0;
+
+ for (; processedHandles < handles.Length; processedHandles++)
+ {
+ KSynchronizationObject syncObj = currentProcess.HandleTable.GetObject<KSynchronizationObject>(handles[processedHandles]);
+
+ if (syncObj == null)
+ {
+ break;
+ }
+
+ syncObjs[processedHandles] = syncObj;
+
+ syncObj.IncrementReferenceCount();
+ }
+
+ if (processedHandles != handles.Length)
+ {
+ // One or more handles are invalid.
+ for (int index = 0; index < processedHandles; index++)
+ {
+ currentThread.WaitSyncObjects[index].DecrementReferenceCount();
+ }
+
+ return KernelResult.InvalidHandle;
+ }
+ }
+
+ if (timeout > 0)
+ {
+ timeout += KTimeManager.DefaultTimeIncrementNanoseconds;
+ }
+
+ Result result = _context.Synchronization.WaitFor(syncObjs, timeout, out handleIndex);
+
+ if (result == KernelResult.PortRemoteClosed)
+ {
+ result = Result.Success;
+ }
+
+ for (int index = 0; index < handles.Length; index++)
+ {
+ currentThread.WaitSyncObjects[index].DecrementReferenceCount();
+ }
+
+ return result;
+ }
+
+ [Svc(0x19)]
+ public Result CancelSynchronization(int handle)
+ {
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ KThread thread = process.HandleTable.GetKThread(handle);
+
+ if (thread == null)
+ {
+ return KernelResult.InvalidHandle;
+ }
+
+ thread.CancelSynchronization();
+
+ return Result.Success;
+ }
+
+ [Svc(0x1a)]
+ public Result ArbitrateLock(int ownerHandle, [PointerSized] ulong mutexAddress, int requesterHandle)
+ {
+ if (IsPointingInsideKernel(mutexAddress))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (IsAddressNotWordAligned(mutexAddress))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ return currentProcess.AddressArbiter.ArbitrateLock(ownerHandle, mutexAddress, requesterHandle);
+ }
+
+ [Svc(0x1b)]
+ public Result ArbitrateUnlock([PointerSized] ulong mutexAddress)
+ {
+ if (IsPointingInsideKernel(mutexAddress))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (IsAddressNotWordAligned(mutexAddress))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ return currentProcess.AddressArbiter.ArbitrateUnlock(mutexAddress);
+ }
+
+ [Svc(0x1c)]
+ public Result WaitProcessWideKeyAtomic(
+ [PointerSized] ulong mutexAddress,
+ [PointerSized] ulong condVarAddress,
+ int handle,
+ long timeout)
+ {
+ if (IsPointingInsideKernel(mutexAddress))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (IsAddressNotWordAligned(mutexAddress))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (timeout > 0)
+ {
+ timeout += KTimeManager.DefaultTimeIncrementNanoseconds;
+ }
+
+ return currentProcess.AddressArbiter.WaitProcessWideKeyAtomic(
+ mutexAddress,
+ condVarAddress,
+ handle,
+ timeout);
+ }
+
+ [Svc(0x1d)]
+ public Result SignalProcessWideKey([PointerSized] ulong address, int count)
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ currentProcess.AddressArbiter.SignalProcessWideKey(address, count);
+
+ return Result.Success;
+ }
+
+ [Svc(0x34)]
+ public Result WaitForAddress([PointerSized] ulong address, ArbitrationType type, int value, long timeout)
+ {
+ if (IsPointingInsideKernel(address))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (IsAddressNotWordAligned(address))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (timeout > 0)
+ {
+ timeout += KTimeManager.DefaultTimeIncrementNanoseconds;
+ }
+
+ return type switch
+ {
+ ArbitrationType.WaitIfLessThan
+ => currentProcess.AddressArbiter.WaitForAddressIfLessThan(address, value, false, timeout),
+ ArbitrationType.DecrementAndWaitIfLessThan
+ => currentProcess.AddressArbiter.WaitForAddressIfLessThan(address, value, true, timeout),
+ ArbitrationType.WaitIfEqual
+ => currentProcess.AddressArbiter.WaitForAddressIfEqual(address, value, timeout),
+ _ => KernelResult.InvalidEnumValue,
+ };
+ }
+
+ [Svc(0x35)]
+ public Result SignalToAddress([PointerSized] ulong address, SignalType type, int value, int count)
+ {
+ if (IsPointingInsideKernel(address))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (IsAddressNotWordAligned(address))
+ {
+ return KernelResult.InvalidAddress;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ return type switch
+ {
+ SignalType.Signal
+ => currentProcess.AddressArbiter.Signal(address, count),
+ SignalType.SignalAndIncrementIfEqual
+ => currentProcess.AddressArbiter.SignalAndIncrementIfEqual(address, value, count),
+ SignalType.SignalAndModifyIfEqual
+ => currentProcess.AddressArbiter.SignalAndModifyIfEqual(address, value, count),
+ _ => KernelResult.InvalidEnumValue
+ };
+ }
+
+ [Svc(0x36)]
+ public Result SynchronizePreemptionState()
+ {
+ KernelStatic.GetCurrentThread().SynchronizePreemptionState();
+
+ return Result.Success;
+ }
+
+ private static bool IsPointingInsideKernel(ulong address)
+ {
+ return (address + 0x1000000000) < 0xffffff000;
+ }
+
+ private static bool IsAddressNotWordAligned(ulong address)
+ {
+ return (address & 3) != 0;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SyscallHandler.cs b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SyscallHandler.cs
new file mode 100644
index 00000000..710bac94
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/SyscallHandler.cs
@@ -0,0 +1,44 @@
+using Ryujinx.Cpu;
+using Ryujinx.HLE.HOS.Kernel.Threading;
+
+namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
+{
+ partial class SyscallHandler
+ {
+ private readonly KernelContext _context;
+
+ public SyscallHandler(KernelContext context)
+ {
+ _context = context;
+ }
+
+ public void SvcCall(IExecutionContext context, ulong address, int id)
+ {
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ if (currentThread.Owner != null &&
+ currentThread.GetUserDisableCount() != 0 &&
+ currentThread.Owner.PinnedThreads[currentThread.CurrentCore] == null)
+ {
+ _context.CriticalSection.Enter();
+
+ currentThread.Owner.PinThread(currentThread);
+
+ currentThread.SetUserInterruptFlag();
+
+ _context.CriticalSection.Leave();
+ }
+
+ if (context.IsAarch32)
+ {
+ SyscallDispatch.Dispatch32(_context.Syscall, context, id);
+ }
+ else
+ {
+ SyscallDispatch.Dispatch64(_context.Syscall, context, id);
+ }
+
+ currentThread.HandlePostSyscall();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/ThreadContext.cs b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/ThreadContext.cs
new file mode 100644
index 00000000..b524406a
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/SupervisorCall/ThreadContext.cs
@@ -0,0 +1,22 @@
+using ARMeilleure.State;
+using Ryujinx.Common.Memory;
+
+namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall
+{
+ struct ThreadContext
+ {
+ public Array29<ulong> Registers;
+ public ulong Fp;
+ public ulong Lr;
+ public ulong Sp;
+ public ulong Pc;
+ public uint Pstate;
+#pragma warning disable CS0169
+ private uint _padding;
+#pragma warning restore CS0169
+ public Array32<V128> FpuRegisters;
+ public uint Fpcr;
+ public uint Fpsr;
+ public ulong Tpidr;
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/ArbitrationType.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/ArbitrationType.cs
new file mode 100644
index 00000000..89c1bf1f
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/ArbitrationType.cs
@@ -0,0 +1,9 @@
+namespace Ryujinx.HLE.HOS.Kernel.Threading
+{
+ enum ArbitrationType
+ {
+ WaitIfLessThan = 0,
+ DecrementAndWaitIfLessThan = 1,
+ WaitIfEqual = 2
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KAddressArbiter.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KAddressArbiter.cs
new file mode 100644
index 00000000..74867b44
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KAddressArbiter.cs
@@ -0,0 +1,581 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.Horizon.Common;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Threading;
+
+namespace Ryujinx.HLE.HOS.Kernel.Threading
+{
+ class KAddressArbiter
+ {
+ private const int HasListenersMask = 0x40000000;
+
+ private readonly KernelContext _context;
+
+ private readonly List<KThread> _condVarThreads;
+ private readonly List<KThread> _arbiterThreads;
+
+ public KAddressArbiter(KernelContext context)
+ {
+ _context = context;
+
+ _condVarThreads = new List<KThread>();
+ _arbiterThreads = new List<KThread>();
+ }
+
+ public Result ArbitrateLock(int ownerHandle, ulong mutexAddress, int requesterHandle)
+ {
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ _context.CriticalSection.Enter();
+
+ if (currentThread.TerminationRequested)
+ {
+ _context.CriticalSection.Leave();
+
+ return KernelResult.ThreadTerminating;
+ }
+
+ currentThread.SignaledObj = null;
+ currentThread.ObjSyncResult = Result.Success;
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (!KernelTransfer.UserToKernel(out int mutexValue, mutexAddress))
+ {
+ _context.CriticalSection.Leave();
+
+ return KernelResult.InvalidMemState;
+ }
+
+ if (mutexValue != (ownerHandle | HasListenersMask))
+ {
+ _context.CriticalSection.Leave();
+
+ return Result.Success;
+ }
+
+ KThread mutexOwner = currentProcess.HandleTable.GetObject<KThread>(ownerHandle);
+
+ if (mutexOwner == null)
+ {
+ _context.CriticalSection.Leave();
+
+ return KernelResult.InvalidHandle;
+ }
+
+ currentThread.MutexAddress = mutexAddress;
+ currentThread.ThreadHandleForUserMutex = requesterHandle;
+
+ mutexOwner.AddMutexWaiter(currentThread);
+
+ currentThread.Reschedule(ThreadSchedState.Paused);
+
+ _context.CriticalSection.Leave();
+ _context.CriticalSection.Enter();
+
+ if (currentThread.MutexOwner != null)
+ {
+ currentThread.MutexOwner.RemoveMutexWaiter(currentThread);
+ }
+
+ _context.CriticalSection.Leave();
+
+ return currentThread.ObjSyncResult;
+ }
+
+ public Result ArbitrateUnlock(ulong mutexAddress)
+ {
+ _context.CriticalSection.Enter();
+
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ (int mutexValue, KThread newOwnerThread) = MutexUnlock(currentThread, mutexAddress);
+
+ Result result = Result.Success;
+
+ if (!KernelTransfer.KernelToUser(mutexAddress, mutexValue))
+ {
+ result = KernelResult.InvalidMemState;
+ }
+
+ if (result != Result.Success && newOwnerThread != null)
+ {
+ newOwnerThread.SignaledObj = null;
+ newOwnerThread.ObjSyncResult = result;
+ }
+
+ _context.CriticalSection.Leave();
+
+ return result;
+ }
+
+ public Result WaitProcessWideKeyAtomic(ulong mutexAddress, ulong condVarAddress, int threadHandle, long timeout)
+ {
+ _context.CriticalSection.Enter();
+
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ currentThread.SignaledObj = null;
+ currentThread.ObjSyncResult = KernelResult.TimedOut;
+
+ if (currentThread.TerminationRequested)
+ {
+ _context.CriticalSection.Leave();
+
+ return KernelResult.ThreadTerminating;
+ }
+
+ (int mutexValue, _) = MutexUnlock(currentThread, mutexAddress);
+
+ KernelTransfer.KernelToUser(condVarAddress, 1);
+
+ if (!KernelTransfer.KernelToUser(mutexAddress, mutexValue))
+ {
+ _context.CriticalSection.Leave();
+
+ return KernelResult.InvalidMemState;
+ }
+
+ currentThread.MutexAddress = mutexAddress;
+ currentThread.ThreadHandleForUserMutex = threadHandle;
+ currentThread.CondVarAddress = condVarAddress;
+
+ _condVarThreads.Add(currentThread);
+
+ if (timeout != 0)
+ {
+ currentThread.Reschedule(ThreadSchedState.Paused);
+
+ if (timeout > 0)
+ {
+ _context.TimeManager.ScheduleFutureInvocation(currentThread, timeout);
+ }
+ }
+
+ _context.CriticalSection.Leave();
+
+ if (timeout > 0)
+ {
+ _context.TimeManager.UnscheduleFutureInvocation(currentThread);
+ }
+
+ _context.CriticalSection.Enter();
+
+ if (currentThread.MutexOwner != null)
+ {
+ currentThread.MutexOwner.RemoveMutexWaiter(currentThread);
+ }
+
+ _condVarThreads.Remove(currentThread);
+
+ _context.CriticalSection.Leave();
+
+ return currentThread.ObjSyncResult;
+ }
+
+ private (int, KThread) MutexUnlock(KThread currentThread, ulong mutexAddress)
+ {
+ KThread newOwnerThread = currentThread.RelinquishMutex(mutexAddress, out int count);
+
+ int mutexValue = 0;
+
+ if (newOwnerThread != null)
+ {
+ mutexValue = newOwnerThread.ThreadHandleForUserMutex;
+
+ if (count >= 2)
+ {
+ mutexValue |= HasListenersMask;
+ }
+
+ newOwnerThread.SignaledObj = null;
+ newOwnerThread.ObjSyncResult = Result.Success;
+
+ newOwnerThread.ReleaseAndResume();
+ }
+
+ return (mutexValue, newOwnerThread);
+ }
+
+ public void SignalProcessWideKey(ulong address, int count)
+ {
+ _context.CriticalSection.Enter();
+
+ WakeThreads(_condVarThreads, count, TryAcquireMutex, x => x.CondVarAddress == address);
+
+ if (!_condVarThreads.Any(x => x.CondVarAddress == address))
+ {
+ KernelTransfer.KernelToUser(address, 0);
+ }
+
+ _context.CriticalSection.Leave();
+ }
+
+ private static void TryAcquireMutex(KThread requester)
+ {
+ ulong address = requester.MutexAddress;
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (!currentProcess.CpuMemory.IsMapped(address))
+ {
+ // Invalid address.
+ requester.SignaledObj = null;
+ requester.ObjSyncResult = KernelResult.InvalidMemState;
+
+ return;
+ }
+
+ ref int mutexRef = ref currentProcess.CpuMemory.GetRef<int>(address);
+
+ int mutexValue, newMutexValue;
+
+ do
+ {
+ mutexValue = mutexRef;
+
+ if (mutexValue != 0)
+ {
+ // Update value to indicate there is a mutex waiter now.
+ newMutexValue = mutexValue | HasListenersMask;
+ }
+ else
+ {
+ // No thread owning the mutex, assign to requesting thread.
+ newMutexValue = requester.ThreadHandleForUserMutex;
+ }
+ }
+ while (Interlocked.CompareExchange(ref mutexRef, newMutexValue, mutexValue) != mutexValue);
+
+ if (mutexValue == 0)
+ {
+ // We now own the mutex.
+ requester.SignaledObj = null;
+ requester.ObjSyncResult = Result.Success;
+
+ requester.ReleaseAndResume();
+
+ return;
+ }
+
+ mutexValue &= ~HasListenersMask;
+
+ KThread mutexOwner = currentProcess.HandleTable.GetObject<KThread>(mutexValue);
+
+ if (mutexOwner != null)
+ {
+ // Mutex already belongs to another thread, wait for it.
+ mutexOwner.AddMutexWaiter(requester);
+ }
+ else
+ {
+ // Invalid mutex owner.
+ requester.SignaledObj = null;
+ requester.ObjSyncResult = KernelResult.InvalidHandle;
+
+ requester.ReleaseAndResume();
+ }
+ }
+
+ public Result WaitForAddressIfEqual(ulong address, int value, long timeout)
+ {
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ _context.CriticalSection.Enter();
+
+ if (currentThread.TerminationRequested)
+ {
+ _context.CriticalSection.Leave();
+
+ return KernelResult.ThreadTerminating;
+ }
+
+ currentThread.SignaledObj = null;
+ currentThread.ObjSyncResult = KernelResult.TimedOut;
+
+ if (!KernelTransfer.UserToKernel(out int currentValue, address))
+ {
+ _context.CriticalSection.Leave();
+
+ return KernelResult.InvalidMemState;
+ }
+
+ if (currentValue == value)
+ {
+ if (timeout == 0)
+ {
+ _context.CriticalSection.Leave();
+
+ return KernelResult.TimedOut;
+ }
+
+ currentThread.MutexAddress = address;
+ currentThread.WaitingInArbitration = true;
+
+ _arbiterThreads.Add(currentThread);
+
+ currentThread.Reschedule(ThreadSchedState.Paused);
+
+ if (timeout > 0)
+ {
+ _context.TimeManager.ScheduleFutureInvocation(currentThread, timeout);
+ }
+
+ _context.CriticalSection.Leave();
+
+ if (timeout > 0)
+ {
+ _context.TimeManager.UnscheduleFutureInvocation(currentThread);
+ }
+
+ _context.CriticalSection.Enter();
+
+ if (currentThread.WaitingInArbitration)
+ {
+ _arbiterThreads.Remove(currentThread);
+
+ currentThread.WaitingInArbitration = false;
+ }
+
+ _context.CriticalSection.Leave();
+
+ return currentThread.ObjSyncResult;
+ }
+
+ _context.CriticalSection.Leave();
+
+ return KernelResult.InvalidState;
+ }
+
+ public Result WaitForAddressIfLessThan(ulong address, int value, bool shouldDecrement, long timeout)
+ {
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ _context.CriticalSection.Enter();
+
+ if (currentThread.TerminationRequested)
+ {
+ _context.CriticalSection.Leave();
+
+ return KernelResult.ThreadTerminating;
+ }
+
+ currentThread.SignaledObj = null;
+ currentThread.ObjSyncResult = KernelResult.TimedOut;
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (!KernelTransfer.UserToKernel(out int currentValue, address))
+ {
+ _context.CriticalSection.Leave();
+
+ return KernelResult.InvalidMemState;
+ }
+
+ if (shouldDecrement)
+ {
+ currentValue = Interlocked.Decrement(ref currentProcess.CpuMemory.GetRef<int>(address)) + 1;
+ }
+
+ if (currentValue < value)
+ {
+ if (timeout == 0)
+ {
+ _context.CriticalSection.Leave();
+
+ return KernelResult.TimedOut;
+ }
+
+ currentThread.MutexAddress = address;
+ currentThread.WaitingInArbitration = true;
+
+ _arbiterThreads.Add(currentThread);
+
+ currentThread.Reschedule(ThreadSchedState.Paused);
+
+ if (timeout > 0)
+ {
+ _context.TimeManager.ScheduleFutureInvocation(currentThread, timeout);
+ }
+
+ _context.CriticalSection.Leave();
+
+ if (timeout > 0)
+ {
+ _context.TimeManager.UnscheduleFutureInvocation(currentThread);
+ }
+
+ _context.CriticalSection.Enter();
+
+ if (currentThread.WaitingInArbitration)
+ {
+ _arbiterThreads.Remove(currentThread);
+
+ currentThread.WaitingInArbitration = false;
+ }
+
+ _context.CriticalSection.Leave();
+
+ return currentThread.ObjSyncResult;
+ }
+
+ _context.CriticalSection.Leave();
+
+ return KernelResult.InvalidState;
+ }
+
+ public Result Signal(ulong address, int count)
+ {
+ _context.CriticalSection.Enter();
+
+ WakeArbiterThreads(address, count);
+
+ _context.CriticalSection.Leave();
+
+ return Result.Success;
+ }
+
+ public Result SignalAndIncrementIfEqual(ulong address, int value, int count)
+ {
+ _context.CriticalSection.Enter();
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (!currentProcess.CpuMemory.IsMapped(address))
+ {
+ _context.CriticalSection.Leave();
+
+ return KernelResult.InvalidMemState;
+ }
+
+ ref int valueRef = ref currentProcess.CpuMemory.GetRef<int>(address);
+
+ int currentValue;
+
+ do
+ {
+ currentValue = valueRef;
+
+ if (currentValue != value)
+ {
+ _context.CriticalSection.Leave();
+
+ return KernelResult.InvalidState;
+ }
+ }
+ while (Interlocked.CompareExchange(ref valueRef, currentValue + 1, currentValue) != currentValue);
+
+ WakeArbiterThreads(address, count);
+
+ _context.CriticalSection.Leave();
+
+ return Result.Success;
+ }
+
+ public Result SignalAndModifyIfEqual(ulong address, int value, int count)
+ {
+ _context.CriticalSection.Enter();
+
+ int addend;
+
+ // The value is decremented if the number of threads waiting is less
+ // or equal to the Count of threads to be signaled, or Count is zero
+ // or negative. It is incremented if there are no threads waiting.
+ int waitingCount = 0;
+
+ foreach (KThread thread in _arbiterThreads.Where(x => x.MutexAddress == address))
+ {
+ if (++waitingCount >= count)
+ {
+ break;
+ }
+ }
+
+ if (waitingCount > 0)
+ {
+ if (count <= 0)
+ {
+ addend = -2;
+ }
+ else if (waitingCount < count)
+ {
+ addend = -1;
+ }
+ else
+ {
+ addend = 0;
+ }
+ }
+ else
+ {
+ addend = 1;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (!currentProcess.CpuMemory.IsMapped(address))
+ {
+ _context.CriticalSection.Leave();
+
+ return KernelResult.InvalidMemState;
+ }
+
+ ref int valueRef = ref currentProcess.CpuMemory.GetRef<int>(address);
+
+ int currentValue;
+
+ do
+ {
+ currentValue = valueRef;
+
+ if (currentValue != value)
+ {
+ _context.CriticalSection.Leave();
+
+ return KernelResult.InvalidState;
+ }
+ }
+ while (Interlocked.CompareExchange(ref valueRef, currentValue + addend, currentValue) != currentValue);
+
+ WakeArbiterThreads(address, count);
+
+ _context.CriticalSection.Leave();
+
+ return Result.Success;
+ }
+
+ private void WakeArbiterThreads(ulong address, int count)
+ {
+ static void RemoveArbiterThread(KThread thread)
+ {
+ thread.SignaledObj = null;
+ thread.ObjSyncResult = Result.Success;
+
+ thread.ReleaseAndResume();
+
+ thread.WaitingInArbitration = false;
+ }
+
+ WakeThreads(_arbiterThreads, count, RemoveArbiterThread, x => x.MutexAddress == address);
+ }
+
+ private static void WakeThreads(
+ List<KThread> threads,
+ int count,
+ Action<KThread> removeCallback,
+ Func<KThread, bool> predicate)
+ {
+ var candidates = threads.Where(predicate).OrderBy(x => x.DynamicPriority);
+ var toSignal = (count > 0 ? candidates.Take(count) : candidates).ToArray();
+
+ foreach (KThread thread in toSignal)
+ {
+ removeCallback(thread);
+ threads.Remove(thread);
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KConditionVariable.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KConditionVariable.cs
new file mode 100644
index 00000000..891e632f
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KConditionVariable.cs
@@ -0,0 +1,70 @@
+using System.Collections.Generic;
+using System.Threading;
+
+namespace Ryujinx.HLE.HOS.Kernel.Threading
+{
+ static class KConditionVariable
+ {
+ public static void Wait(KernelContext context, LinkedList<KThread> threadList, object mutex, long timeout)
+ {
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ context.CriticalSection.Enter();
+
+ Monitor.Exit(mutex);
+
+ currentThread.Withholder = threadList;
+
+ currentThread.Reschedule(ThreadSchedState.Paused);
+
+ currentThread.WithholderNode = threadList.AddLast(currentThread);
+
+ if (currentThread.TerminationRequested)
+ {
+ threadList.Remove(currentThread.WithholderNode);
+
+ currentThread.Reschedule(ThreadSchedState.Running);
+
+ currentThread.Withholder = null;
+
+ context.CriticalSection.Leave();
+ }
+ else
+ {
+ if (timeout > 0)
+ {
+ context.TimeManager.ScheduleFutureInvocation(currentThread, timeout);
+ }
+
+ context.CriticalSection.Leave();
+
+ if (timeout > 0)
+ {
+ context.TimeManager.UnscheduleFutureInvocation(currentThread);
+ }
+ }
+
+ Monitor.Enter(mutex);
+ }
+
+ public static void NotifyAll(KernelContext context, LinkedList<KThread> threadList)
+ {
+ context.CriticalSection.Enter();
+
+ LinkedListNode<KThread> node = threadList.First;
+
+ for (; node != null; node = threadList.First)
+ {
+ KThread thread = node.Value;
+
+ threadList.Remove(thread.WithholderNode);
+
+ thread.Withholder = null;
+
+ thread.Reschedule(ThreadSchedState.Running);
+ }
+
+ context.CriticalSection.Leave();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KCriticalSection.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KCriticalSection.cs
new file mode 100644
index 00000000..1d61f2f0
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KCriticalSection.cs
@@ -0,0 +1,64 @@
+using System.Threading;
+
+namespace Ryujinx.HLE.HOS.Kernel.Threading
+{
+ class KCriticalSection
+ {
+ private readonly KernelContext _context;
+ private readonly object _lock;
+ private int _recursionCount;
+
+ public object Lock => _lock;
+
+ public KCriticalSection(KernelContext context)
+ {
+ _context = context;
+ _lock = new object();
+ }
+
+ public void Enter()
+ {
+ Monitor.Enter(_lock);
+
+ _recursionCount++;
+ }
+
+ public void Leave()
+ {
+ if (_recursionCount == 0)
+ {
+ return;
+ }
+
+ if (--_recursionCount == 0)
+ {
+ ulong scheduledCoresMask = KScheduler.SelectThreads(_context);
+
+ Monitor.Exit(_lock);
+
+ KThread currentThread = KernelStatic.GetCurrentThread();
+ bool isCurrentThreadSchedulable = currentThread != null && currentThread.IsSchedulable;
+ if (isCurrentThreadSchedulable)
+ {
+ KScheduler.EnableScheduling(_context, scheduledCoresMask);
+ }
+ else
+ {
+ KScheduler.EnableSchedulingFromForeignThread(_context, scheduledCoresMask);
+
+ // If the thread exists but is not schedulable, we still want to suspend
+ // it if it's not runnable. That allows the kernel to still block HLE threads
+ // even if they are not scheduled on guest cores.
+ if (currentThread != null && !currentThread.IsSchedulable && currentThread.Context.Running)
+ {
+ currentThread.SchedulerWaitEvent.WaitOne();
+ }
+ }
+ }
+ else
+ {
+ Monitor.Exit(_lock);
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KEvent.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KEvent.cs
new file mode 100644
index 00000000..da3e217b
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KEvent.cs
@@ -0,0 +1,14 @@
+namespace Ryujinx.HLE.HOS.Kernel.Threading
+{
+ class KEvent
+ {
+ public KReadableEvent ReadableEvent { get; private set; }
+ public KWritableEvent WritableEvent { get; private set; }
+
+ public KEvent(KernelContext context)
+ {
+ ReadableEvent = new KReadableEvent(context, this);
+ WritableEvent = new KWritableEvent(context, this);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KPriorityQueue.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KPriorityQueue.cs
new file mode 100644
index 00000000..14fba704
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KPriorityQueue.cs
@@ -0,0 +1,286 @@
+using System.Collections.Generic;
+using System.Numerics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Threading
+{
+ class KPriorityQueue
+ {
+ private readonly LinkedList<KThread>[][] _scheduledThreadsPerPrioPerCore;
+ private readonly LinkedList<KThread>[][] _suggestedThreadsPerPrioPerCore;
+
+ private readonly long[] _scheduledPrioritiesPerCore;
+ private readonly long[] _suggestedPrioritiesPerCore;
+
+ public KPriorityQueue()
+ {
+ _suggestedThreadsPerPrioPerCore = new LinkedList<KThread>[KScheduler.PrioritiesCount][];
+ _scheduledThreadsPerPrioPerCore = new LinkedList<KThread>[KScheduler.PrioritiesCount][];
+
+ for (int prio = 0; prio < KScheduler.PrioritiesCount; prio++)
+ {
+ _suggestedThreadsPerPrioPerCore[prio] = new LinkedList<KThread>[KScheduler.CpuCoresCount];
+ _scheduledThreadsPerPrioPerCore[prio] = new LinkedList<KThread>[KScheduler.CpuCoresCount];
+
+ for (int core = 0; core < KScheduler.CpuCoresCount; core++)
+ {
+ _suggestedThreadsPerPrioPerCore[prio][core] = new LinkedList<KThread>();
+ _scheduledThreadsPerPrioPerCore[prio][core] = new LinkedList<KThread>();
+ }
+ }
+
+ _scheduledPrioritiesPerCore = new long[KScheduler.CpuCoresCount];
+ _suggestedPrioritiesPerCore = new long[KScheduler.CpuCoresCount];
+ }
+
+ public readonly ref struct KThreadEnumerable
+ {
+ readonly LinkedList<KThread>[][] _listPerPrioPerCore;
+ readonly long[] _prios;
+ readonly int _core;
+
+ public KThreadEnumerable(LinkedList<KThread>[][] listPerPrioPerCore, long[] prios, int core)
+ {
+ _listPerPrioPerCore = listPerPrioPerCore;
+ _prios = prios;
+ _core = core;
+ }
+
+ public Enumerator GetEnumerator()
+ {
+ return new Enumerator(_listPerPrioPerCore, _prios, _core);
+ }
+
+ public ref struct Enumerator
+ {
+ private readonly LinkedList<KThread>[][] _listPerPrioPerCore;
+ private readonly int _core;
+ private long _prioMask;
+ private int _prio;
+ private LinkedList<KThread> _list;
+ private LinkedListNode<KThread> _node;
+
+ public Enumerator(LinkedList<KThread>[][] listPerPrioPerCore, long[] prios, int core)
+ {
+ _listPerPrioPerCore = listPerPrioPerCore;
+ _core = core;
+ _prioMask = prios[core];
+ _prio = BitOperations.TrailingZeroCount(_prioMask);
+ _prioMask &= ~(1L << _prio);
+ }
+
+ public KThread Current => _node?.Value;
+
+ public bool MoveNext()
+ {
+ _node = _node?.Next;
+
+ if (_node == null)
+ {
+ if (!MoveNextListAndFirstNode())
+ {
+ return false;
+ }
+ }
+
+ return _node != null;
+ }
+
+ private bool MoveNextListAndFirstNode()
+ {
+ if (_prio < KScheduler.PrioritiesCount)
+ {
+ _list = _listPerPrioPerCore[_prio][_core];
+
+ _node = _list.First;
+
+ _prio = BitOperations.TrailingZeroCount(_prioMask);
+
+ _prioMask &= ~(1L << _prio);
+
+ return true;
+ }
+ else
+ {
+ _list = null;
+ _node = null;
+ return false;
+ }
+ }
+ }
+ }
+
+ public KThreadEnumerable ScheduledThreads(int core)
+ {
+ return new KThreadEnumerable(_scheduledThreadsPerPrioPerCore, _scheduledPrioritiesPerCore, core);
+ }
+
+ public KThreadEnumerable SuggestedThreads(int core)
+ {
+ return new KThreadEnumerable(_suggestedThreadsPerPrioPerCore, _suggestedPrioritiesPerCore, core);
+ }
+
+ public KThread ScheduledThreadsFirstOrDefault(int core)
+ {
+ return ScheduledThreadsElementAtOrDefault(core, 0);
+ }
+
+ public KThread ScheduledThreadsElementAtOrDefault(int core, int index)
+ {
+ int currentIndex = 0;
+ foreach (var scheduledThread in ScheduledThreads(core))
+ {
+ if (currentIndex == index)
+ {
+ return scheduledThread;
+ }
+ else
+ {
+ currentIndex++;
+ }
+ }
+
+ return null;
+ }
+
+ public KThread ScheduledThreadsWithDynamicPriorityFirstOrDefault(int core, int dynamicPriority)
+ {
+ foreach (var scheduledThread in ScheduledThreads(core))
+ {
+ if (scheduledThread.DynamicPriority == dynamicPriority)
+ {
+ return scheduledThread;
+ }
+ }
+
+ return null;
+ }
+
+ public bool HasScheduledThreads(int core)
+ {
+ return ScheduledThreadsFirstOrDefault(core) != null;
+ }
+
+ public void TransferToCore(int prio, int dstCore, KThread thread)
+ {
+ int srcCore = thread.ActiveCore;
+ if (srcCore == dstCore)
+ {
+ return;
+ }
+
+ thread.ActiveCore = dstCore;
+
+ if (srcCore >= 0)
+ {
+ Unschedule(prio, srcCore, thread);
+ }
+
+ if (dstCore >= 0)
+ {
+ Unsuggest(prio, dstCore, thread);
+ Schedule(prio, dstCore, thread);
+ }
+
+ if (srcCore >= 0)
+ {
+ Suggest(prio, srcCore, thread);
+ }
+ }
+
+ public void Suggest(int prio, int core, KThread thread)
+ {
+ if (prio >= KScheduler.PrioritiesCount)
+ {
+ return;
+ }
+
+ thread.SiblingsPerCore[core] = SuggestedQueue(prio, core).AddFirst(thread);
+
+ _suggestedPrioritiesPerCore[core] |= 1L << prio;
+ }
+
+ public void Unsuggest(int prio, int core, KThread thread)
+ {
+ if (prio >= KScheduler.PrioritiesCount)
+ {
+ return;
+ }
+
+ LinkedList<KThread> queue = SuggestedQueue(prio, core);
+
+ queue.Remove(thread.SiblingsPerCore[core]);
+
+ if (queue.First == null)
+ {
+ _suggestedPrioritiesPerCore[core] &= ~(1L << prio);
+ }
+ }
+
+ public void Schedule(int prio, int core, KThread thread)
+ {
+ if (prio >= KScheduler.PrioritiesCount)
+ {
+ return;
+ }
+
+ thread.SiblingsPerCore[core] = ScheduledQueue(prio, core).AddLast(thread);
+
+ _scheduledPrioritiesPerCore[core] |= 1L << prio;
+ }
+
+ public void SchedulePrepend(int prio, int core, KThread thread)
+ {
+ if (prio >= KScheduler.PrioritiesCount)
+ {
+ return;
+ }
+
+ thread.SiblingsPerCore[core] = ScheduledQueue(prio, core).AddFirst(thread);
+
+ _scheduledPrioritiesPerCore[core] |= 1L << prio;
+ }
+
+ public KThread Reschedule(int prio, int core, KThread thread)
+ {
+ if (prio >= KScheduler.PrioritiesCount)
+ {
+ return null;
+ }
+
+ LinkedList<KThread> queue = ScheduledQueue(prio, core);
+
+ queue.Remove(thread.SiblingsPerCore[core]);
+
+ thread.SiblingsPerCore[core] = queue.AddLast(thread);
+
+ return queue.First.Value;
+ }
+
+ public void Unschedule(int prio, int core, KThread thread)
+ {
+ if (prio >= KScheduler.PrioritiesCount)
+ {
+ return;
+ }
+
+ LinkedList<KThread> queue = ScheduledQueue(prio, core);
+
+ queue.Remove(thread.SiblingsPerCore[core]);
+
+ if (queue.First == null)
+ {
+ _scheduledPrioritiesPerCore[core] &= ~(1L << prio);
+ }
+ }
+
+ private LinkedList<KThread> SuggestedQueue(int prio, int core)
+ {
+ return _suggestedThreadsPerPrioPerCore[prio][core];
+ }
+
+ private LinkedList<KThread> ScheduledQueue(int prio, int core)
+ {
+ return _scheduledThreadsPerPrioPerCore[prio][core];
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KReadableEvent.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KReadableEvent.cs
new file mode 100644
index 00000000..d9e7befa
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KReadableEvent.cs
@@ -0,0 +1,65 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.Horizon.Common;
+
+namespace Ryujinx.HLE.HOS.Kernel.Threading
+{
+ class KReadableEvent : KSynchronizationObject
+ {
+ private readonly KEvent _parent;
+
+ private bool _signaled;
+
+ public KReadableEvent(KernelContext context, KEvent parent) : base(context)
+ {
+ _parent = parent;
+ }
+
+ public override void Signal()
+ {
+ KernelContext.CriticalSection.Enter();
+
+ if (!_signaled)
+ {
+ _signaled = true;
+
+ base.Signal();
+ }
+
+ KernelContext.CriticalSection.Leave();
+ }
+
+ public Result Clear()
+ {
+ _signaled = false;
+
+ return Result.Success;
+ }
+
+ public Result ClearIfSignaled()
+ {
+ Result result;
+
+ KernelContext.CriticalSection.Enter();
+
+ if (_signaled)
+ {
+ _signaled = false;
+
+ result = Result.Success;
+ }
+ else
+ {
+ result = KernelResult.InvalidState;
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ return result;
+ }
+
+ public override bool IsSignaled()
+ {
+ return _signaled;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KScheduler.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KScheduler.cs
new file mode 100644
index 00000000..b9de7d9c
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KScheduler.cs
@@ -0,0 +1,661 @@
+using Ryujinx.Common;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using System;
+using System.Numerics;
+using System.Threading;
+
+namespace Ryujinx.HLE.HOS.Kernel.Threading
+{
+ partial class KScheduler : IDisposable
+ {
+ public const int PrioritiesCount = 64;
+ public const int CpuCoresCount = 4;
+
+ private const int RoundRobinTimeQuantumMs = 10;
+
+ private static readonly int[] PreemptionPriorities = new int[] { 59, 59, 59, 63 };
+
+ private static readonly int[] _srcCoresHighestPrioThreads = new int[CpuCoresCount];
+
+ private readonly KernelContext _context;
+ private readonly int _coreId;
+
+ private struct SchedulingState
+ {
+ public volatile bool NeedsScheduling;
+ public volatile KThread SelectedThread;
+ }
+
+ private SchedulingState _state;
+
+ private AutoResetEvent _idleInterruptEvent;
+ private readonly object _idleInterruptEventLock;
+
+ private KThread _previousThread;
+ private KThread _currentThread;
+ private readonly KThread _idleThread;
+
+ public KThread PreviousThread => _previousThread;
+ public KThread CurrentThread => _currentThread;
+ public long LastContextSwitchTime { get; private set; }
+ public long TotalIdleTimeTicks => _idleThread.TotalTimeRunning;
+
+ public KScheduler(KernelContext context, int coreId)
+ {
+ _context = context;
+ _coreId = coreId;
+
+ _idleInterruptEvent = new AutoResetEvent(false);
+ _idleInterruptEventLock = new object();
+
+ KThread idleThread = CreateIdleThread(context, coreId);
+
+ _currentThread = idleThread;
+ _idleThread = idleThread;
+
+ idleThread.StartHostThread();
+ idleThread.SchedulerWaitEvent.Set();
+ }
+
+ private KThread CreateIdleThread(KernelContext context, int cpuCore)
+ {
+ KThread idleThread = new KThread(context);
+
+ idleThread.Initialize(0UL, 0UL, 0UL, PrioritiesCount, cpuCore, null, ThreadType.Dummy, IdleThreadLoop);
+
+ return idleThread;
+ }
+
+ public static ulong SelectThreads(KernelContext context)
+ {
+ if (context.ThreadReselectionRequested)
+ {
+ return SelectThreadsImpl(context);
+ }
+ else
+ {
+ return 0UL;
+ }
+ }
+
+ private static ulong SelectThreadsImpl(KernelContext context)
+ {
+ context.ThreadReselectionRequested = false;
+
+ ulong scheduledCoresMask = 0UL;
+
+ for (int core = 0; core < CpuCoresCount; core++)
+ {
+ KThread thread = context.PriorityQueue.ScheduledThreadsFirstOrDefault(core);
+
+ if (thread != null &&
+ thread.Owner != null &&
+ thread.Owner.PinnedThreads[core] != null &&
+ thread.Owner.PinnedThreads[core] != thread)
+ {
+ KThread candidate = thread.Owner.PinnedThreads[core];
+
+ if (candidate.KernelWaitersCount == 0 && !thread.Owner.IsExceptionUserThread(candidate))
+ {
+ if (candidate.SchedFlags == ThreadSchedState.Running)
+ {
+ thread = candidate;
+ }
+ else
+ {
+ thread = null;
+ }
+ }
+ }
+
+ scheduledCoresMask |= context.Schedulers[core].SelectThread(thread);
+ }
+
+ for (int core = 0; core < CpuCoresCount; core++)
+ {
+ // If the core is not idle (there's already a thread running on it),
+ // then we don't need to attempt load balancing.
+ if (context.PriorityQueue.HasScheduledThreads(core))
+ {
+ continue;
+ }
+
+ Array.Fill(_srcCoresHighestPrioThreads, 0);
+
+ int srcCoresHighestPrioThreadsCount = 0;
+
+ KThread dst = null;
+
+ // Select candidate threads that could run on this core.
+ // Give preference to threads that are not yet selected.
+ foreach (KThread suggested in context.PriorityQueue.SuggestedThreads(core))
+ {
+ if (suggested.ActiveCore < 0 || suggested != context.Schedulers[suggested.ActiveCore]._state.SelectedThread)
+ {
+ dst = suggested;
+ break;
+ }
+
+ _srcCoresHighestPrioThreads[srcCoresHighestPrioThreadsCount++] = suggested.ActiveCore;
+ }
+
+ // Not yet selected candidate found.
+ if (dst != null)
+ {
+ // Priorities < 2 are used for the kernel message dispatching
+ // threads, we should skip load balancing entirely.
+ if (dst.DynamicPriority >= 2)
+ {
+ context.PriorityQueue.TransferToCore(dst.DynamicPriority, core, dst);
+
+ scheduledCoresMask |= context.Schedulers[core].SelectThread(dst);
+ }
+
+ continue;
+ }
+
+ // All candidates are already selected, choose the best one
+ // (the first one that doesn't make the source core idle if moved).
+ for (int index = 0; index < srcCoresHighestPrioThreadsCount; index++)
+ {
+ int srcCore = _srcCoresHighestPrioThreads[index];
+
+ KThread src = context.PriorityQueue.ScheduledThreadsElementAtOrDefault(srcCore, 1);
+
+ if (src != null)
+ {
+ // Run the second thread on the queue on the source core,
+ // move the first one to the current core.
+ KThread origSelectedCoreSrc = context.Schedulers[srcCore]._state.SelectedThread;
+
+ scheduledCoresMask |= context.Schedulers[srcCore].SelectThread(src);
+
+ context.PriorityQueue.TransferToCore(origSelectedCoreSrc.DynamicPriority, core, origSelectedCoreSrc);
+
+ scheduledCoresMask |= context.Schedulers[core].SelectThread(origSelectedCoreSrc);
+ }
+ }
+ }
+
+ return scheduledCoresMask;
+ }
+
+ private ulong SelectThread(KThread nextThread)
+ {
+ KThread previousThread = _state.SelectedThread;
+
+ if (previousThread != nextThread)
+ {
+ if (previousThread != null)
+ {
+ previousThread.LastScheduledTime = PerformanceCounter.ElapsedTicks;
+ }
+
+ _state.SelectedThread = nextThread;
+ _state.NeedsScheduling = true;
+ return 1UL << _coreId;
+ }
+ else
+ {
+ return 0UL;
+ }
+ }
+
+ public static void EnableScheduling(KernelContext context, ulong scheduledCoresMask)
+ {
+ KScheduler currentScheduler = context.Schedulers[KernelStatic.GetCurrentThread().CurrentCore];
+
+ // Note that "RescheduleCurrentCore" will block, so "RescheduleOtherCores" must be done first.
+ currentScheduler.RescheduleOtherCores(scheduledCoresMask);
+ currentScheduler.RescheduleCurrentCore();
+ }
+
+ public static void EnableSchedulingFromForeignThread(KernelContext context, ulong scheduledCoresMask)
+ {
+ RescheduleOtherCores(context, scheduledCoresMask);
+ }
+
+ private void RescheduleCurrentCore()
+ {
+ if (_state.NeedsScheduling)
+ {
+ Schedule();
+ }
+ }
+
+ private void RescheduleOtherCores(ulong scheduledCoresMask)
+ {
+ RescheduleOtherCores(_context, scheduledCoresMask & ~(1UL << _coreId));
+ }
+
+ private static void RescheduleOtherCores(KernelContext context, ulong scheduledCoresMask)
+ {
+ while (scheduledCoresMask != 0)
+ {
+ int coreToSignal = BitOperations.TrailingZeroCount(scheduledCoresMask);
+
+ KThread threadToSignal = context.Schedulers[coreToSignal]._currentThread;
+
+ // Request the thread running on that core to stop and reschedule, if we have one.
+ if (threadToSignal != context.Schedulers[coreToSignal]._idleThread)
+ {
+ threadToSignal.Context.RequestInterrupt();
+ }
+
+ // If the core is idle, ensure that the idle thread is awaken.
+ context.Schedulers[coreToSignal]._idleInterruptEvent.Set();
+
+ scheduledCoresMask &= ~(1UL << coreToSignal);
+ }
+ }
+
+ private void IdleThreadLoop()
+ {
+ while (_context.Running)
+ {
+ _state.NeedsScheduling = false;
+ Thread.MemoryBarrier();
+ KThread nextThread = PickNextThread(_state.SelectedThread);
+
+ if (_idleThread != nextThread)
+ {
+ _idleThread.SchedulerWaitEvent.Reset();
+ WaitHandle.SignalAndWait(nextThread.SchedulerWaitEvent, _idleThread.SchedulerWaitEvent);
+ }
+
+ _idleInterruptEvent.WaitOne();
+ }
+
+ lock (_idleInterruptEventLock)
+ {
+ _idleInterruptEvent.Dispose();
+ _idleInterruptEvent = null;
+ }
+ }
+
+ public void Schedule()
+ {
+ _state.NeedsScheduling = false;
+ Thread.MemoryBarrier();
+ KThread currentThread = KernelStatic.GetCurrentThread();
+ KThread selectedThread = _state.SelectedThread;
+
+ // If the thread is already scheduled and running on the core, we have nothing to do.
+ if (currentThread == selectedThread)
+ {
+ return;
+ }
+
+ currentThread.SchedulerWaitEvent.Reset();
+ currentThread.ThreadContext.Unlock();
+
+ // Wake all the threads that might be waiting until this thread context is unlocked.
+ for (int core = 0; core < CpuCoresCount; core++)
+ {
+ _context.Schedulers[core]._idleInterruptEvent.Set();
+ }
+
+ KThread nextThread = PickNextThread(selectedThread);
+
+ if (currentThread.Context.Running)
+ {
+ // Wait until this thread is scheduled again, and allow the next thread to run.
+ WaitHandle.SignalAndWait(nextThread.SchedulerWaitEvent, currentThread.SchedulerWaitEvent);
+ }
+ else
+ {
+ // Allow the next thread to run.
+ nextThread.SchedulerWaitEvent.Set();
+
+ // We don't need to wait since the thread is exiting, however we need to
+ // make sure this thread will never call the scheduler again, since it is
+ // no longer assigned to a core.
+ currentThread.MakeUnschedulable();
+
+ // Just to be sure, set the core to a invalid value.
+ // This will trigger a exception if it attempts to call schedule again,
+ // rather than leaving the scheduler in a invalid state.
+ currentThread.CurrentCore = -1;
+ }
+ }
+
+ private KThread PickNextThread(KThread selectedThread)
+ {
+ while (true)
+ {
+ if (selectedThread != null)
+ {
+ // Try to run the selected thread.
+ // We need to acquire the context lock to be sure the thread is not
+ // already running on another core. If it is, then we return here
+ // and the caller should try again once there is something available for scheduling.
+ // The thread currently running on the core should have been requested to
+ // interrupt so this is not expected to take long.
+ // The idle thread must also be paused if we are scheduling a thread
+ // on the core, as the scheduled thread will handle the next switch.
+ if (selectedThread.ThreadContext.Lock())
+ {
+ SwitchTo(selectedThread);
+
+ if (!_state.NeedsScheduling)
+ {
+ return selectedThread;
+ }
+
+ selectedThread.ThreadContext.Unlock();
+ }
+ else
+ {
+ return _idleThread;
+ }
+ }
+ else
+ {
+ // The core is idle now, make sure that the idle thread can run
+ // and switch the core when a thread is available.
+ SwitchTo(null);
+ return _idleThread;
+ }
+
+ _state.NeedsScheduling = false;
+ Thread.MemoryBarrier();
+ selectedThread = _state.SelectedThread;
+ }
+ }
+
+ private void SwitchTo(KThread nextThread)
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ nextThread ??= _idleThread;
+
+ if (currentThread != nextThread)
+ {
+ long previousTicks = LastContextSwitchTime;
+ long currentTicks = PerformanceCounter.ElapsedTicks;
+ long ticksDelta = currentTicks - previousTicks;
+
+ currentThread.AddCpuTime(ticksDelta);
+
+ if (currentProcess != null)
+ {
+ currentProcess.AddCpuTime(ticksDelta);
+ }
+
+ LastContextSwitchTime = currentTicks;
+
+ if (currentProcess != null)
+ {
+ _previousThread = !currentThread.TerminationRequested && currentThread.ActiveCore == _coreId ? currentThread : null;
+ }
+ else if (currentThread == _idleThread)
+ {
+ _previousThread = null;
+ }
+ }
+
+ if (nextThread.CurrentCore != _coreId)
+ {
+ nextThread.CurrentCore = _coreId;
+ }
+
+ _currentThread = nextThread;
+ }
+
+ public static void PreemptionThreadLoop(KernelContext context)
+ {
+ while (context.Running)
+ {
+ context.CriticalSection.Enter();
+
+ for (int core = 0; core < CpuCoresCount; core++)
+ {
+ RotateScheduledQueue(context, core, PreemptionPriorities[core]);
+ }
+
+ context.CriticalSection.Leave();
+
+ Thread.Sleep(RoundRobinTimeQuantumMs);
+ }
+ }
+
+ private static void RotateScheduledQueue(KernelContext context, int core, int prio)
+ {
+ KThread selectedThread = context.PriorityQueue.ScheduledThreadsWithDynamicPriorityFirstOrDefault(core, prio);
+ KThread nextThread = null;
+
+ // Yield priority queue.
+ if (selectedThread != null)
+ {
+ nextThread = context.PriorityQueue.Reschedule(prio, core, selectedThread);
+ }
+
+ static KThread FirstSuitableCandidateOrDefault(KernelContext context, int core, KThread selectedThread, KThread nextThread, Predicate< KThread> predicate)
+ {
+ foreach (KThread suggested in context.PriorityQueue.SuggestedThreads(core))
+ {
+ int suggestedCore = suggested.ActiveCore;
+ if (suggestedCore >= 0)
+ {
+ KThread selectedSuggestedCore = context.PriorityQueue.ScheduledThreadsFirstOrDefault(suggestedCore);
+
+ if (selectedSuggestedCore == suggested || (selectedSuggestedCore != null && selectedSuggestedCore.DynamicPriority < 2))
+ {
+ continue;
+ }
+ }
+
+ // If the candidate was scheduled after the current thread, then it's not worth it.
+ if (nextThread == selectedThread ||
+ nextThread == null ||
+ nextThread.LastScheduledTime >= suggested.LastScheduledTime)
+ {
+ if (predicate(suggested))
+ {
+ return suggested;
+ }
+ }
+ }
+
+ return null;
+ }
+
+ // Select candidate threads that could run on this core.
+ // Only take into account threads that are not yet selected.
+ KThread dst = FirstSuitableCandidateOrDefault(context, core, selectedThread, nextThread, x => x.DynamicPriority == prio);
+
+ if (dst != null)
+ {
+ context.PriorityQueue.TransferToCore(prio, core, dst);
+ }
+
+ // If the priority of the currently selected thread is lower or same as the preemption priority,
+ // then try to migrate a thread with lower priority.
+ KThread bestCandidate = context.PriorityQueue.ScheduledThreadsFirstOrDefault(core);
+
+ if (bestCandidate != null && bestCandidate.DynamicPriority >= prio)
+ {
+ dst = FirstSuitableCandidateOrDefault(context, core, selectedThread, nextThread, x => x.DynamicPriority < bestCandidate.DynamicPriority);
+
+ if (dst != null)
+ {
+ context.PriorityQueue.TransferToCore(dst.DynamicPriority, core, dst);
+ }
+ }
+
+ context.ThreadReselectionRequested = true;
+ }
+
+ public static void Yield(KernelContext context)
+ {
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ if (!currentThread.IsSchedulable)
+ {
+ return;
+ }
+
+ context.CriticalSection.Enter();
+
+ if (currentThread.SchedFlags != ThreadSchedState.Running)
+ {
+ context.CriticalSection.Leave();
+ return;
+ }
+
+ KThread nextThread = context.PriorityQueue.Reschedule(currentThread.DynamicPriority, currentThread.ActiveCore, currentThread);
+
+ if (nextThread != currentThread)
+ {
+ context.ThreadReselectionRequested = true;
+ }
+
+ context.CriticalSection.Leave();
+ }
+
+ public static void YieldWithLoadBalancing(KernelContext context)
+ {
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ if (!currentThread.IsSchedulable)
+ {
+ return;
+ }
+
+ context.CriticalSection.Enter();
+
+ if (currentThread.SchedFlags != ThreadSchedState.Running)
+ {
+ context.CriticalSection.Leave();
+ return;
+ }
+
+ int prio = currentThread.DynamicPriority;
+ int core = currentThread.ActiveCore;
+
+ // Move current thread to the end of the queue.
+ KThread nextThread = context.PriorityQueue.Reschedule(prio, core, currentThread);
+
+ static KThread FirstSuitableCandidateOrDefault(KernelContext context, int core, KThread nextThread, int lessThanOrEqualPriority)
+ {
+ foreach (KThread suggested in context.PriorityQueue.SuggestedThreads(core))
+ {
+ int suggestedCore = suggested.ActiveCore;
+ if (suggestedCore >= 0)
+ {
+ KThread selectedSuggestedCore = context.Schedulers[suggestedCore]._state.SelectedThread;
+
+ if (selectedSuggestedCore == suggested || (selectedSuggestedCore != null && selectedSuggestedCore.DynamicPriority < 2))
+ {
+ continue;
+ }
+ }
+
+ // If the candidate was scheduled after the current thread, then it's not worth it,
+ // unless the priority is higher than the current one.
+ if (suggested.LastScheduledTime <= nextThread.LastScheduledTime ||
+ suggested.DynamicPriority < nextThread.DynamicPriority)
+ {
+ if (suggested.DynamicPriority <= lessThanOrEqualPriority)
+ {
+ return suggested;
+ }
+ }
+ }
+
+ return null;
+ }
+
+ KThread dst = FirstSuitableCandidateOrDefault(context, core, nextThread, prio);
+
+ if (dst != null)
+ {
+ context.PriorityQueue.TransferToCore(dst.DynamicPriority, core, dst);
+
+ context.ThreadReselectionRequested = true;
+ }
+ else if (currentThread != nextThread)
+ {
+ context.ThreadReselectionRequested = true;
+ }
+
+ context.CriticalSection.Leave();
+ }
+
+ public static void YieldToAnyThread(KernelContext context)
+ {
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ if (!currentThread.IsSchedulable)
+ {
+ return;
+ }
+
+ context.CriticalSection.Enter();
+
+ if (currentThread.SchedFlags != ThreadSchedState.Running)
+ {
+ context.CriticalSection.Leave();
+ return;
+ }
+
+ int core = currentThread.ActiveCore;
+
+ context.PriorityQueue.TransferToCore(currentThread.DynamicPriority, -1, currentThread);
+
+ if (!context.PriorityQueue.HasScheduledThreads(core))
+ {
+ KThread selectedThread = null;
+
+ foreach (KThread suggested in context.PriorityQueue.SuggestedThreads(core))
+ {
+ int suggestedCore = suggested.ActiveCore;
+
+ if (suggestedCore < 0)
+ {
+ continue;
+ }
+
+ KThread firstCandidate = context.PriorityQueue.ScheduledThreadsFirstOrDefault(suggestedCore);
+
+ if (firstCandidate == suggested)
+ {
+ continue;
+ }
+
+ if (firstCandidate == null || firstCandidate.DynamicPriority >= 2)
+ {
+ context.PriorityQueue.TransferToCore(suggested.DynamicPriority, core, suggested);
+ }
+
+ selectedThread = suggested;
+ break;
+ }
+
+ if (currentThread != selectedThread)
+ {
+ context.ThreadReselectionRequested = true;
+ }
+ }
+ else
+ {
+ context.ThreadReselectionRequested = true;
+ }
+
+ context.CriticalSection.Leave();
+ }
+
+ public void Dispose()
+ {
+ // Ensure that the idle thread is not blocked and can exit.
+ lock (_idleInterruptEventLock)
+ {
+ if (_idleInterruptEvent != null)
+ {
+ _idleInterruptEvent.Set();
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KSynchronization.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KSynchronization.cs
new file mode 100644
index 00000000..9c196810
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KSynchronization.cs
@@ -0,0 +1,142 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.Horizon.Common;
+using System;
+using System.Buffers;
+using System.Collections.Generic;
+
+namespace Ryujinx.HLE.HOS.Kernel.Threading
+{
+ class KSynchronization
+ {
+ private KernelContext _context;
+
+ public KSynchronization(KernelContext context)
+ {
+ _context = context;
+ }
+
+ public Result WaitFor(Span<KSynchronizationObject> syncObjs, long timeout, out int handleIndex)
+ {
+ handleIndex = 0;
+
+ Result result = KernelResult.TimedOut;
+
+ _context.CriticalSection.Enter();
+
+ // Check if objects are already signaled before waiting.
+ for (int index = 0; index < syncObjs.Length; index++)
+ {
+ if (!syncObjs[index].IsSignaled())
+ {
+ continue;
+ }
+
+ handleIndex = index;
+
+ _context.CriticalSection.Leave();
+
+ return Result.Success;
+ }
+
+ if (timeout == 0)
+ {
+ _context.CriticalSection.Leave();
+
+ return result;
+ }
+
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ if (currentThread.TerminationRequested)
+ {
+ result = KernelResult.ThreadTerminating;
+ }
+ else if (currentThread.SyncCancelled)
+ {
+ currentThread.SyncCancelled = false;
+
+ result = KernelResult.Cancelled;
+ }
+ else
+ {
+ LinkedListNode<KThread>[] syncNodesArray = ArrayPool<LinkedListNode<KThread>>.Shared.Rent(syncObjs.Length);
+
+ Span<LinkedListNode<KThread>> syncNodes = syncNodesArray.AsSpan(0, syncObjs.Length);
+
+ for (int index = 0; index < syncObjs.Length; index++)
+ {
+ syncNodes[index] = syncObjs[index].AddWaitingThread(currentThread);
+ }
+
+ currentThread.WaitingSync = true;
+ currentThread.SignaledObj = null;
+ currentThread.ObjSyncResult = result;
+
+ currentThread.Reschedule(ThreadSchedState.Paused);
+
+ if (timeout > 0)
+ {
+ _context.TimeManager.ScheduleFutureInvocation(currentThread, timeout);
+ }
+
+ _context.CriticalSection.Leave();
+
+ currentThread.WaitingSync = false;
+
+ if (timeout > 0)
+ {
+ _context.TimeManager.UnscheduleFutureInvocation(currentThread);
+ }
+
+ _context.CriticalSection.Enter();
+
+ result = currentThread.ObjSyncResult;
+
+ handleIndex = -1;
+
+ for (int index = 0; index < syncObjs.Length; index++)
+ {
+ syncObjs[index].RemoveWaitingThread(syncNodes[index]);
+
+ if (syncObjs[index] == currentThread.SignaledObj)
+ {
+ handleIndex = index;
+ }
+ }
+
+ ArrayPool<LinkedListNode<KThread>>.Shared.Return(syncNodesArray);
+ }
+
+ _context.CriticalSection.Leave();
+
+ return result;
+ }
+
+ public void SignalObject(KSynchronizationObject syncObj)
+ {
+ _context.CriticalSection.Enter();
+
+ if (syncObj.IsSignaled())
+ {
+ LinkedListNode<KThread> node = syncObj.WaitingThreads.First;
+
+ while (node != null)
+ {
+ KThread thread = node.Value;
+
+ if ((thread.SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused)
+ {
+ thread.SignaledObj = syncObj;
+ thread.ObjSyncResult = Result.Success;
+
+ thread.Reschedule(ThreadSchedState.Running);
+ }
+
+ node = node.Next;
+ }
+ }
+
+ _context.CriticalSection.Leave();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KThread.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KThread.cs
new file mode 100644
index 00000000..63396468
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KThread.cs
@@ -0,0 +1,1438 @@
+using Ryujinx.Common.Logging;
+using Ryujinx.Cpu;
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.HLE.HOS.Kernel.SupervisorCall;
+using Ryujinx.Horizon.Common;
+using System;
+using System.Collections.Generic;
+using System.Numerics;
+using System.Threading;
+
+namespace Ryujinx.HLE.HOS.Kernel.Threading
+{
+ class KThread : KSynchronizationObject, IKFutureSchedulerObject
+ {
+ private const int TlsUserDisableCountOffset = 0x100;
+ private const int TlsUserInterruptFlagOffset = 0x102;
+
+ public const int MaxWaitSyncObjects = 64;
+
+ private ManualResetEvent _schedulerWaitEvent;
+
+ public ManualResetEvent SchedulerWaitEvent => _schedulerWaitEvent;
+
+ public Thread HostThread { get; private set; }
+
+ public IExecutionContext Context { get; private set; }
+
+ public KThreadContext ThreadContext { get; private set; }
+
+ public int DynamicPriority { get; set; }
+ public ulong AffinityMask { get; set; }
+
+ public ulong ThreadUid { get; private set; }
+
+ private long _totalTimeRunning;
+
+ public long TotalTimeRunning => _totalTimeRunning;
+
+ public KSynchronizationObject SignaledObj { get; set; }
+
+ public ulong CondVarAddress { get; set; }
+
+ private ulong _entrypoint;
+ private ThreadStart _customThreadStart;
+ private bool _forcedUnschedulable;
+
+ public bool IsSchedulable => _customThreadStart == null && !_forcedUnschedulable;
+
+ public ulong MutexAddress { get; set; }
+ public int KernelWaitersCount { get; private set; }
+
+ public KProcess Owner { get; private set; }
+
+ private ulong _tlsAddress;
+
+ public ulong TlsAddress => _tlsAddress;
+
+ public KSynchronizationObject[] WaitSyncObjects { get; }
+ public int[] WaitSyncHandles { get; }
+
+ public long LastScheduledTime { get; set; }
+
+ public LinkedListNode<KThread>[] SiblingsPerCore { get; private set; }
+
+ public LinkedList<KThread> Withholder { get; set; }
+ public LinkedListNode<KThread> WithholderNode { get; set; }
+
+ public LinkedListNode<KThread> ProcessListNode { get; set; }
+
+ private LinkedList<KThread> _mutexWaiters;
+ private LinkedListNode<KThread> _mutexWaiterNode;
+
+ private LinkedList<KThread> _pinnedWaiters;
+
+ public KThread MutexOwner { get; private set; }
+
+ public int ThreadHandleForUserMutex { get; set; }
+
+ private ThreadSchedState _forcePauseFlags;
+ private ThreadSchedState _forcePausePermissionFlags;
+
+ public Result ObjSyncResult { get; set; }
+
+ public int BasePriority { get; set; }
+ public int PreferredCore { get; set; }
+
+ public int CurrentCore { get; set; }
+ public int ActiveCore { get; set; }
+
+ public bool IsPinned { get; private set; }
+
+ private ulong _originalAffinityMask;
+ private int _originalPreferredCore;
+ private int _originalBasePriority;
+ private int _coreMigrationDisableCount;
+
+ public ThreadSchedState SchedFlags { get; private set; }
+
+ private int _shallBeTerminated;
+
+ private bool ShallBeTerminated => _shallBeTerminated != 0;
+
+ public bool TerminationRequested => ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending;
+
+ public bool SyncCancelled { get; set; }
+ public bool WaitingSync { get; set; }
+
+ private int _hasExited;
+ private bool _hasBeenInitialized;
+ private bool _hasBeenReleased;
+
+ public bool WaitingInArbitration { get; set; }
+
+ private object _activityOperationLock;
+
+ public KThread(KernelContext context) : base(context)
+ {
+ WaitSyncObjects = new KSynchronizationObject[MaxWaitSyncObjects];
+ WaitSyncHandles = new int[MaxWaitSyncObjects];
+
+ SiblingsPerCore = new LinkedListNode<KThread>[KScheduler.CpuCoresCount];
+
+ _mutexWaiters = new LinkedList<KThread>();
+ _pinnedWaiters = new LinkedList<KThread>();
+
+ _activityOperationLock = new object();
+ }
+
+ public Result Initialize(
+ ulong entrypoint,
+ ulong argsPtr,
+ ulong stackTop,
+ int priority,
+ int cpuCore,
+ KProcess owner,
+ ThreadType type,
+ ThreadStart customThreadStart = null)
+ {
+ if ((uint)type > 3)
+ {
+ throw new ArgumentException($"Invalid thread type \"{type}\".");
+ }
+
+ PreferredCore = cpuCore;
+ AffinityMask |= 1UL << cpuCore;
+
+ SchedFlags = type == ThreadType.Dummy
+ ? ThreadSchedState.Running
+ : ThreadSchedState.None;
+
+ ActiveCore = cpuCore;
+ ObjSyncResult = KernelResult.ThreadNotStarted;
+ DynamicPriority = priority;
+ BasePriority = priority;
+ CurrentCore = cpuCore;
+ IsPinned = false;
+
+ _entrypoint = entrypoint;
+ _customThreadStart = customThreadStart;
+
+ if (type == ThreadType.User)
+ {
+ if (owner.AllocateThreadLocalStorage(out _tlsAddress) != Result.Success)
+ {
+ return KernelResult.OutOfMemory;
+ }
+
+ MemoryHelper.FillWithZeros(owner.CpuMemory, _tlsAddress, KTlsPageInfo.TlsEntrySize);
+ }
+
+ bool is64Bits;
+
+ if (owner != null)
+ {
+ Owner = owner;
+
+ owner.IncrementReferenceCount();
+ owner.IncrementThreadCount();
+
+ is64Bits = owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit);
+ }
+ else
+ {
+ is64Bits = true;
+ }
+
+ HostThread = new Thread(ThreadStart);
+
+ Context = owner?.CreateExecutionContext() ?? new ProcessExecutionContext();
+
+ ThreadContext = new KThreadContext(Context);
+
+ Context.IsAarch32 = !is64Bits;
+
+ Context.SetX(0, argsPtr);
+
+ if (is64Bits)
+ {
+ Context.SetX(18, KSystemControl.GenerateRandom() | 1);
+ Context.SetX(31, stackTop);
+ }
+ else
+ {
+ Context.SetX(13, (uint)stackTop);
+ }
+
+ Context.TpidrroEl0 = (long)_tlsAddress;
+
+ ThreadUid = KernelContext.NewThreadUid();
+
+ HostThread.Name = customThreadStart != null ? $"HLE.OsThread.{ThreadUid}" : $"HLE.GuestThread.{ThreadUid}";
+
+ _hasBeenInitialized = true;
+
+ _forcePausePermissionFlags = ThreadSchedState.ForcePauseMask;
+
+ if (owner != null)
+ {
+ owner.AddThread(this);
+
+ if (owner.IsPaused)
+ {
+ KernelContext.CriticalSection.Enter();
+
+ if (TerminationRequested)
+ {
+ KernelContext.CriticalSection.Leave();
+
+ return Result.Success;
+ }
+
+ _forcePauseFlags |= ThreadSchedState.ProcessPauseFlag;
+
+ CombineForcePauseFlags();
+
+ KernelContext.CriticalSection.Leave();
+ }
+ }
+
+ return Result.Success;
+ }
+
+ public Result Start()
+ {
+ if (!KernelContext.KernelInitialized)
+ {
+ KernelContext.CriticalSection.Enter();
+
+ if (!TerminationRequested)
+ {
+ _forcePauseFlags |= ThreadSchedState.KernelInitPauseFlag;
+
+ CombineForcePauseFlags();
+ }
+
+ KernelContext.CriticalSection.Leave();
+ }
+
+ Result result = KernelResult.ThreadTerminating;
+
+ KernelContext.CriticalSection.Enter();
+
+ if (!ShallBeTerminated)
+ {
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ while (SchedFlags != ThreadSchedState.TerminationPending && (currentThread == null || !currentThread.TerminationRequested))
+ {
+ if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.None)
+ {
+ result = KernelResult.InvalidState;
+ break;
+ }
+
+ if (currentThread == null || currentThread._forcePauseFlags == ThreadSchedState.None)
+ {
+ if (Owner != null && _forcePauseFlags != ThreadSchedState.None)
+ {
+ CombineForcePauseFlags();
+ }
+
+ SetNewSchedFlags(ThreadSchedState.Running);
+
+ StartHostThread();
+
+ result = Result.Success;
+ break;
+ }
+ else
+ {
+ currentThread.CombineForcePauseFlags();
+
+ KernelContext.CriticalSection.Leave();
+ KernelContext.CriticalSection.Enter();
+
+ if (currentThread.ShallBeTerminated)
+ {
+ break;
+ }
+ }
+ }
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ return result;
+ }
+
+ public ThreadSchedState PrepareForTermination()
+ {
+ KernelContext.CriticalSection.Enter();
+
+ if (Owner != null && Owner.PinnedThreads[KernelStatic.GetCurrentThread().CurrentCore] == this)
+ {
+ Owner.UnpinThread(this);
+ }
+
+ ThreadSchedState result;
+
+ if (Interlocked.Exchange(ref _shallBeTerminated, 1) == 0)
+ {
+ if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.None)
+ {
+ SchedFlags = ThreadSchedState.TerminationPending;
+ }
+ else
+ {
+ if (_forcePauseFlags != ThreadSchedState.None)
+ {
+ _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
+
+ ThreadSchedState oldSchedFlags = SchedFlags;
+
+ SchedFlags &= ThreadSchedState.LowMask;
+
+ AdjustScheduling(oldSchedFlags);
+ }
+
+ if (BasePriority >= 0x10)
+ {
+ SetPriority(0xF);
+ }
+
+ if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Running)
+ {
+ // TODO: GIC distributor stuffs (sgir changes ect)
+ Context.RequestInterrupt();
+ }
+
+ SignaledObj = null;
+ ObjSyncResult = KernelResult.ThreadTerminating;
+
+ ReleaseAndResume();
+ }
+ }
+
+ result = SchedFlags;
+
+ KernelContext.CriticalSection.Leave();
+
+ return result & ThreadSchedState.LowMask;
+ }
+
+ public void Terminate()
+ {
+ ThreadSchedState state = PrepareForTermination();
+
+ if (state != ThreadSchedState.TerminationPending)
+ {
+ KernelContext.Synchronization.WaitFor(new KSynchronizationObject[] { this }, -1, out _);
+ }
+ }
+
+ public void HandlePostSyscall()
+ {
+ ThreadSchedState state;
+
+ do
+ {
+ if (TerminationRequested)
+ {
+ Exit();
+
+ // As the death of the thread is handled by the CPU emulator, we differ from the official kernel and return here.
+ break;
+ }
+
+ KernelContext.CriticalSection.Enter();
+
+ if (TerminationRequested)
+ {
+ state = ThreadSchedState.TerminationPending;
+ }
+ else
+ {
+ if (_forcePauseFlags != ThreadSchedState.None)
+ {
+ CombineForcePauseFlags();
+ }
+
+ state = ThreadSchedState.Running;
+ }
+
+ KernelContext.CriticalSection.Leave();
+ } while (state == ThreadSchedState.TerminationPending);
+ }
+
+ public void Exit()
+ {
+ // TODO: Debug event.
+
+ if (Owner != null)
+ {
+ Owner.ResourceLimit?.Release(LimitableResource.Thread, 0, 1);
+
+ _hasBeenReleased = true;
+ }
+
+ KernelContext.CriticalSection.Enter();
+
+ _forcePauseFlags &= ~ThreadSchedState.ForcePauseMask;
+ _forcePausePermissionFlags = 0;
+
+ bool decRef = ExitImpl();
+
+ Context.StopRunning();
+
+ KernelContext.CriticalSection.Leave();
+
+ if (decRef)
+ {
+ DecrementReferenceCount();
+ }
+ }
+
+ private bool ExitImpl()
+ {
+ KernelContext.CriticalSection.Enter();
+
+ SetNewSchedFlags(ThreadSchedState.TerminationPending);
+
+ bool decRef = Interlocked.Exchange(ref _hasExited, 1) == 0;
+
+ Signal();
+
+ KernelContext.CriticalSection.Leave();
+
+ return decRef;
+ }
+
+ private int GetEffectiveRunningCore()
+ {
+ for (int coreNumber = 0; coreNumber < KScheduler.CpuCoresCount; coreNumber++)
+ {
+ if (KernelContext.Schedulers[coreNumber].CurrentThread == this)
+ {
+ return coreNumber;
+ }
+ }
+
+ return -1;
+ }
+
+ public Result Sleep(long timeout)
+ {
+ KernelContext.CriticalSection.Enter();
+
+ if (TerminationRequested)
+ {
+ KernelContext.CriticalSection.Leave();
+
+ return KernelResult.ThreadTerminating;
+ }
+
+ SetNewSchedFlags(ThreadSchedState.Paused);
+
+ if (timeout > 0)
+ {
+ KernelContext.TimeManager.ScheduleFutureInvocation(this, timeout);
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ if (timeout > 0)
+ {
+ KernelContext.TimeManager.UnscheduleFutureInvocation(this);
+ }
+
+ return Result.Success;
+ }
+
+ public void SetPriority(int priority)
+ {
+ KernelContext.CriticalSection.Enter();
+
+ if (IsPinned)
+ {
+ _originalBasePriority = priority;
+ }
+ else
+ {
+ BasePriority = priority;
+ }
+
+ UpdatePriorityInheritance();
+
+ KernelContext.CriticalSection.Leave();
+ }
+
+ public void Suspend(ThreadSchedState type)
+ {
+ _forcePauseFlags |= type;
+
+ CombineForcePauseFlags();
+ }
+
+ public void Resume(ThreadSchedState type)
+ {
+ ThreadSchedState oldForcePauseFlags = _forcePauseFlags;
+
+ _forcePauseFlags &= ~type;
+
+ if ((oldForcePauseFlags & ~type) == ThreadSchedState.None)
+ {
+ ThreadSchedState oldSchedFlags = SchedFlags;
+
+ SchedFlags &= ThreadSchedState.LowMask;
+
+ AdjustScheduling(oldSchedFlags);
+ }
+ }
+
+ public Result SetActivity(bool pause)
+ {
+ lock (_activityOperationLock)
+ {
+ Result result = Result.Success;
+
+ KernelContext.CriticalSection.Enter();
+
+ ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
+
+ if (lowNibble != ThreadSchedState.Paused && lowNibble != ThreadSchedState.Running)
+ {
+ KernelContext.CriticalSection.Leave();
+
+ return KernelResult.InvalidState;
+ }
+
+ if (!TerminationRequested)
+ {
+ if (pause)
+ {
+ // Pause, the force pause flag should be clear (thread is NOT paused).
+ if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
+ {
+ Suspend(ThreadSchedState.ThreadPauseFlag);
+ }
+ else
+ {
+ result = KernelResult.InvalidState;
+ }
+ }
+ else
+ {
+ // Unpause, the force pause flag should be set (thread is paused).
+ if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) != 0)
+ {
+ Resume(ThreadSchedState.ThreadPauseFlag);
+ }
+ else
+ {
+ result = KernelResult.InvalidState;
+ }
+ }
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ if (result == Result.Success && pause)
+ {
+ bool isThreadRunning = true;
+
+ while (isThreadRunning)
+ {
+ KernelContext.CriticalSection.Enter();
+
+ if (TerminationRequested)
+ {
+ KernelContext.CriticalSection.Leave();
+
+ break;
+ }
+
+ isThreadRunning = false;
+
+ if (IsPinned)
+ {
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ if (currentThread.TerminationRequested)
+ {
+ KernelContext.CriticalSection.Leave();
+
+ result = KernelResult.ThreadTerminating;
+
+ break;
+ }
+
+ _pinnedWaiters.AddLast(currentThread);
+
+ currentThread.Reschedule(ThreadSchedState.Paused);
+ }
+ else
+ {
+ isThreadRunning = GetEffectiveRunningCore() >= 0;
+ }
+
+ KernelContext.CriticalSection.Leave();
+ }
+ }
+
+ return result;
+ }
+ }
+
+ public Result GetThreadContext3(out ThreadContext context)
+ {
+ context = default;
+
+ lock (_activityOperationLock)
+ {
+ KernelContext.CriticalSection.Enter();
+
+ if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
+ {
+ KernelContext.CriticalSection.Leave();
+
+ return KernelResult.InvalidState;
+ }
+
+ if (!TerminationRequested)
+ {
+ context = GetCurrentContext();
+ }
+
+ KernelContext.CriticalSection.Leave();
+ }
+
+ return Result.Success;
+ }
+
+ private static uint GetPsr(IExecutionContext context)
+ {
+ return context.Pstate & 0xFF0FFE20;
+ }
+
+ private ThreadContext GetCurrentContext()
+ {
+ const int MaxRegistersAArch32 = 15;
+ const int MaxFpuRegistersAArch32 = 16;
+
+ ThreadContext context = new ThreadContext();
+
+ if (Owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit))
+ {
+ for (int i = 0; i < context.Registers.Length; i++)
+ {
+ context.Registers[i] = Context.GetX(i);
+ }
+
+ for (int i = 0; i < context.FpuRegisters.Length; i++)
+ {
+ context.FpuRegisters[i] = Context.GetV(i);
+ }
+
+ context.Fp = Context.GetX(29);
+ context.Lr = Context.GetX(30);
+ context.Sp = Context.GetX(31);
+ context.Pc = Context.Pc;
+ context.Pstate = GetPsr(Context);
+ context.Tpidr = (ulong)Context.TpidrroEl0;
+ }
+ else
+ {
+ for (int i = 0; i < MaxRegistersAArch32; i++)
+ {
+ context.Registers[i] = (uint)Context.GetX(i);
+ }
+
+ for (int i = 0; i < MaxFpuRegistersAArch32; i++)
+ {
+ context.FpuRegisters[i] = Context.GetV(i);
+ }
+
+ context.Pc = (uint)Context.Pc;
+ context.Pstate = GetPsr(Context);
+ context.Tpidr = (uint)Context.TpidrroEl0;
+ }
+
+ context.Fpcr = (uint)Context.Fpcr;
+ context.Fpsr = (uint)Context.Fpsr;
+
+ return context;
+ }
+
+ public void CancelSynchronization()
+ {
+ KernelContext.CriticalSection.Enter();
+
+ if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.Paused || !WaitingSync)
+ {
+ SyncCancelled = true;
+ }
+ else if (Withholder != null)
+ {
+ Withholder.Remove(WithholderNode);
+
+ SetNewSchedFlags(ThreadSchedState.Running);
+
+ Withholder = null;
+
+ SyncCancelled = true;
+ }
+ else
+ {
+ SignaledObj = null;
+ ObjSyncResult = KernelResult.Cancelled;
+
+ SetNewSchedFlags(ThreadSchedState.Running);
+
+ SyncCancelled = false;
+ }
+
+ KernelContext.CriticalSection.Leave();
+ }
+
+ public Result SetCoreAndAffinityMask(int newCore, ulong newAffinityMask)
+ {
+ lock (_activityOperationLock)
+ {
+ KernelContext.CriticalSection.Enter();
+
+ bool isCoreMigrationDisabled = _coreMigrationDisableCount != 0;
+
+ // The value -3 is "do not change the preferred core".
+ if (newCore == -3)
+ {
+ newCore = isCoreMigrationDisabled ? _originalPreferredCore : PreferredCore;
+
+ if ((newAffinityMask & (1UL << newCore)) == 0)
+ {
+ KernelContext.CriticalSection.Leave();
+
+ return KernelResult.InvalidCombination;
+ }
+ }
+
+ if (isCoreMigrationDisabled)
+ {
+ _originalPreferredCore = newCore;
+ _originalAffinityMask = newAffinityMask;
+ }
+ else
+ {
+ ulong oldAffinityMask = AffinityMask;
+
+ PreferredCore = newCore;
+ AffinityMask = newAffinityMask;
+
+ if (oldAffinityMask != newAffinityMask)
+ {
+ int oldCore = ActiveCore;
+
+ if (oldCore >= 0 && ((AffinityMask >> oldCore) & 1) == 0)
+ {
+ if (PreferredCore < 0)
+ {
+ ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount(AffinityMask);
+ }
+ else
+ {
+ ActiveCore = PreferredCore;
+ }
+ }
+
+ AdjustSchedulingForNewAffinity(oldAffinityMask, oldCore);
+ }
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ bool targetThreadPinned = true;
+
+ while (targetThreadPinned)
+ {
+ KernelContext.CriticalSection.Enter();
+
+ if (TerminationRequested)
+ {
+ KernelContext.CriticalSection.Leave();
+
+ break;
+ }
+
+ targetThreadPinned = false;
+
+ int coreNumber = GetEffectiveRunningCore();
+ bool isPinnedThreadCurrentlyRunning = coreNumber >= 0;
+
+ if (isPinnedThreadCurrentlyRunning && ((1UL << coreNumber) & AffinityMask) == 0)
+ {
+ if (IsPinned)
+ {
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ if (currentThread.TerminationRequested)
+ {
+ KernelContext.CriticalSection.Leave();
+
+ return KernelResult.ThreadTerminating;
+ }
+
+ _pinnedWaiters.AddLast(currentThread);
+
+ currentThread.Reschedule(ThreadSchedState.Paused);
+ }
+ else
+ {
+ targetThreadPinned = true;
+ }
+ }
+
+ KernelContext.CriticalSection.Leave();
+ }
+
+ return Result.Success;
+ }
+ }
+
+ private void CombineForcePauseFlags()
+ {
+ ThreadSchedState oldFlags = SchedFlags;
+ ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
+
+ SchedFlags = lowNibble | (_forcePauseFlags & _forcePausePermissionFlags);
+
+ AdjustScheduling(oldFlags);
+ }
+
+ private void SetNewSchedFlags(ThreadSchedState newFlags)
+ {
+ KernelContext.CriticalSection.Enter();
+
+ ThreadSchedState oldFlags = SchedFlags;
+
+ SchedFlags = (oldFlags & ThreadSchedState.HighMask) | newFlags;
+
+ if ((oldFlags & ThreadSchedState.LowMask) != newFlags)
+ {
+ AdjustScheduling(oldFlags);
+ }
+
+ KernelContext.CriticalSection.Leave();
+ }
+
+ public void ReleaseAndResume()
+ {
+ KernelContext.CriticalSection.Enter();
+
+ if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused)
+ {
+ if (Withholder != null)
+ {
+ Withholder.Remove(WithholderNode);
+
+ SetNewSchedFlags(ThreadSchedState.Running);
+
+ Withholder = null;
+ }
+ else
+ {
+ SetNewSchedFlags(ThreadSchedState.Running);
+ }
+ }
+
+ KernelContext.CriticalSection.Leave();
+ }
+
+ public void Reschedule(ThreadSchedState newFlags)
+ {
+ KernelContext.CriticalSection.Enter();
+
+ ThreadSchedState oldFlags = SchedFlags;
+
+ SchedFlags = (oldFlags & ThreadSchedState.HighMask) |
+ (newFlags & ThreadSchedState.LowMask);
+
+ AdjustScheduling(oldFlags);
+
+ KernelContext.CriticalSection.Leave();
+ }
+
+ public void AddMutexWaiter(KThread requester)
+ {
+ AddToMutexWaitersList(requester);
+
+ requester.MutexOwner = this;
+
+ UpdatePriorityInheritance();
+ }
+
+ public void RemoveMutexWaiter(KThread thread)
+ {
+ if (thread._mutexWaiterNode?.List != null)
+ {
+ _mutexWaiters.Remove(thread._mutexWaiterNode);
+ }
+
+ thread.MutexOwner = null;
+
+ UpdatePriorityInheritance();
+ }
+
+ public KThread RelinquishMutex(ulong mutexAddress, out int count)
+ {
+ count = 0;
+
+ if (_mutexWaiters.First == null)
+ {
+ return null;
+ }
+
+ KThread newMutexOwner = null;
+
+ LinkedListNode<KThread> currentNode = _mutexWaiters.First;
+
+ do
+ {
+ // Skip all threads that are not waiting for this mutex.
+ while (currentNode != null && currentNode.Value.MutexAddress != mutexAddress)
+ {
+ currentNode = currentNode.Next;
+ }
+
+ if (currentNode == null)
+ {
+ break;
+ }
+
+ LinkedListNode<KThread> nextNode = currentNode.Next;
+
+ _mutexWaiters.Remove(currentNode);
+
+ currentNode.Value.MutexOwner = newMutexOwner;
+
+ if (newMutexOwner != null)
+ {
+ // New owner was already selected, re-insert on new owner list.
+ newMutexOwner.AddToMutexWaitersList(currentNode.Value);
+ }
+ else
+ {
+ // New owner not selected yet, use current thread.
+ newMutexOwner = currentNode.Value;
+ }
+
+ count++;
+
+ currentNode = nextNode;
+ }
+ while (currentNode != null);
+
+ if (newMutexOwner != null)
+ {
+ UpdatePriorityInheritance();
+
+ newMutexOwner.UpdatePriorityInheritance();
+ }
+
+ return newMutexOwner;
+ }
+
+ private void UpdatePriorityInheritance()
+ {
+ // If any of the threads waiting for the mutex has
+ // higher priority than the current thread, then
+ // the current thread inherits that priority.
+ int highestPriority = BasePriority;
+
+ if (_mutexWaiters.First != null)
+ {
+ int waitingDynamicPriority = _mutexWaiters.First.Value.DynamicPriority;
+
+ if (waitingDynamicPriority < highestPriority)
+ {
+ highestPriority = waitingDynamicPriority;
+ }
+ }
+
+ if (highestPriority != DynamicPriority)
+ {
+ int oldPriority = DynamicPriority;
+
+ DynamicPriority = highestPriority;
+
+ AdjustSchedulingForNewPriority(oldPriority);
+
+ if (MutexOwner != null)
+ {
+ // Remove and re-insert to ensure proper sorting based on new priority.
+ MutexOwner._mutexWaiters.Remove(_mutexWaiterNode);
+
+ MutexOwner.AddToMutexWaitersList(this);
+
+ MutexOwner.UpdatePriorityInheritance();
+ }
+ }
+ }
+
+ private void AddToMutexWaitersList(KThread thread)
+ {
+ LinkedListNode<KThread> nextPrio = _mutexWaiters.First;
+
+ int currentPriority = thread.DynamicPriority;
+
+ while (nextPrio != null && nextPrio.Value.DynamicPriority <= currentPriority)
+ {
+ nextPrio = nextPrio.Next;
+ }
+
+ if (nextPrio != null)
+ {
+ thread._mutexWaiterNode = _mutexWaiters.AddBefore(nextPrio, thread);
+ }
+ else
+ {
+ thread._mutexWaiterNode = _mutexWaiters.AddLast(thread);
+ }
+ }
+
+ private void AdjustScheduling(ThreadSchedState oldFlags)
+ {
+ if (oldFlags == SchedFlags)
+ {
+ return;
+ }
+
+ if (!IsSchedulable)
+ {
+ if (!_forcedUnschedulable)
+ {
+ // Ensure our thread is running and we have an event.
+ StartHostThread();
+
+ // If the thread is not schedulable, we want to just run or pause
+ // it directly as we don't care about priority or the core it is
+ // running on in this case.
+ if (SchedFlags == ThreadSchedState.Running)
+ {
+ _schedulerWaitEvent.Set();
+ }
+ else
+ {
+ _schedulerWaitEvent.Reset();
+ }
+ }
+
+ return;
+ }
+
+ if (oldFlags == ThreadSchedState.Running)
+ {
+ // Was running, now it's stopped.
+ if (ActiveCore >= 0)
+ {
+ KernelContext.PriorityQueue.Unschedule(DynamicPriority, ActiveCore, this);
+ }
+
+ for (int core = 0; core < KScheduler.CpuCoresCount; core++)
+ {
+ if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
+ {
+ KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
+ }
+ }
+ }
+ else if (SchedFlags == ThreadSchedState.Running)
+ {
+ // Was stopped, now it's running.
+ if (ActiveCore >= 0)
+ {
+ KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
+ }
+
+ for (int core = 0; core < KScheduler.CpuCoresCount; core++)
+ {
+ if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
+ {
+ KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
+ }
+ }
+ }
+
+ KernelContext.ThreadReselectionRequested = true;
+ }
+
+ private void AdjustSchedulingForNewPriority(int oldPriority)
+ {
+ if (SchedFlags != ThreadSchedState.Running || !IsSchedulable)
+ {
+ return;
+ }
+
+ // Remove thread from the old priority queues.
+ if (ActiveCore >= 0)
+ {
+ KernelContext.PriorityQueue.Unschedule(oldPriority, ActiveCore, this);
+ }
+
+ for (int core = 0; core < KScheduler.CpuCoresCount; core++)
+ {
+ if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
+ {
+ KernelContext.PriorityQueue.Unsuggest(oldPriority, core, this);
+ }
+ }
+
+ // Add thread to the new priority queues.
+ KThread currentThread = KernelStatic.GetCurrentThread();
+
+ if (ActiveCore >= 0)
+ {
+ if (currentThread == this)
+ {
+ KernelContext.PriorityQueue.SchedulePrepend(DynamicPriority, ActiveCore, this);
+ }
+ else
+ {
+ KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
+ }
+ }
+
+ for (int core = 0; core < KScheduler.CpuCoresCount; core++)
+ {
+ if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
+ {
+ KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
+ }
+ }
+
+ KernelContext.ThreadReselectionRequested = true;
+ }
+
+ private void AdjustSchedulingForNewAffinity(ulong oldAffinityMask, int oldCore)
+ {
+ if (SchedFlags != ThreadSchedState.Running || DynamicPriority >= KScheduler.PrioritiesCount || !IsSchedulable)
+ {
+ return;
+ }
+
+ // Remove thread from the old priority queues.
+ for (int core = 0; core < KScheduler.CpuCoresCount; core++)
+ {
+ if (((oldAffinityMask >> core) & 1) != 0)
+ {
+ if (core == oldCore)
+ {
+ KernelContext.PriorityQueue.Unschedule(DynamicPriority, core, this);
+ }
+ else
+ {
+ KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
+ }
+ }
+ }
+
+ // Add thread to the new priority queues.
+ for (int core = 0; core < KScheduler.CpuCoresCount; core++)
+ {
+ if (((AffinityMask >> core) & 1) != 0)
+ {
+ if (core == ActiveCore)
+ {
+ KernelContext.PriorityQueue.Schedule(DynamicPriority, core, this);
+ }
+ else
+ {
+ KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
+ }
+ }
+ }
+
+ KernelContext.ThreadReselectionRequested = true;
+ }
+
+ public void SetEntryArguments(long argsPtr, int threadHandle)
+ {
+ Context.SetX(0, (ulong)argsPtr);
+ Context.SetX(1, (ulong)threadHandle);
+ }
+
+ public void TimeUp()
+ {
+ ReleaseAndResume();
+ }
+
+ public string GetGuestStackTrace()
+ {
+ return Owner.Debugger.GetGuestStackTrace(this);
+ }
+
+ public string GetGuestRegisterPrintout()
+ {
+ return Owner.Debugger.GetCpuRegisterPrintout(this);
+ }
+
+ public void PrintGuestStackTrace()
+ {
+ Logger.Info?.Print(LogClass.Cpu, $"Guest stack trace:\n{GetGuestStackTrace()}\n");
+ }
+
+ public void PrintGuestRegisterPrintout()
+ {
+ Logger.Info?.Print(LogClass.Cpu, $"Guest CPU registers:\n{GetGuestRegisterPrintout()}\n");
+ }
+
+ public void AddCpuTime(long ticks)
+ {
+ Interlocked.Add(ref _totalTimeRunning, ticks);
+ }
+
+ public void StartHostThread()
+ {
+ if (_schedulerWaitEvent == null)
+ {
+ var schedulerWaitEvent = new ManualResetEvent(false);
+
+ if (Interlocked.Exchange(ref _schedulerWaitEvent, schedulerWaitEvent) == null)
+ {
+ HostThread.Start();
+ }
+ else
+ {
+ schedulerWaitEvent.Dispose();
+ }
+ }
+ }
+
+ private void ThreadStart()
+ {
+ _schedulerWaitEvent.WaitOne();
+ KernelStatic.SetKernelContext(KernelContext, this);
+
+ if (_customThreadStart != null)
+ {
+ _customThreadStart();
+
+ // Ensure that anything trying to join the HLE thread is unblocked.
+ Exit();
+ HandlePostSyscall();
+ }
+ else
+ {
+ Owner.Context.Execute(Context, _entrypoint);
+ }
+
+ Context.Dispose();
+ _schedulerWaitEvent.Dispose();
+ }
+
+ public void MakeUnschedulable()
+ {
+ _forcedUnschedulable = true;
+ }
+
+ public override bool IsSignaled()
+ {
+ return _hasExited != 0;
+ }
+
+ protected override void Destroy()
+ {
+ if (_hasBeenInitialized)
+ {
+ FreeResources();
+
+ bool released = Owner != null || _hasBeenReleased;
+
+ if (Owner != null)
+ {
+ Owner.ResourceLimit?.Release(LimitableResource.Thread, 1, released ? 0 : 1);
+
+ Owner.DecrementReferenceCount();
+ }
+ else
+ {
+ KernelContext.ResourceLimit.Release(LimitableResource.Thread, 1, released ? 0 : 1);
+ }
+ }
+ }
+
+ private void FreeResources()
+ {
+ Owner?.RemoveThread(this);
+
+ if (_tlsAddress != 0 && Owner.FreeThreadLocalStorage(_tlsAddress) != Result.Success)
+ {
+ throw new InvalidOperationException("Unexpected failure freeing thread local storage.");
+ }
+
+ KernelContext.CriticalSection.Enter();
+
+ // Wake up all threads that may be waiting for a mutex being held by this thread.
+ foreach (KThread thread in _mutexWaiters)
+ {
+ thread.MutexOwner = null;
+ thread._originalPreferredCore = 0;
+ thread.ObjSyncResult = KernelResult.InvalidState;
+
+ thread.ReleaseAndResume();
+ }
+
+ KernelContext.CriticalSection.Leave();
+
+ Owner?.DecrementThreadCountAndTerminateIfZero();
+ }
+
+ public void Pin()
+ {
+ IsPinned = true;
+ _coreMigrationDisableCount++;
+
+ int activeCore = ActiveCore;
+
+ _originalPreferredCore = PreferredCore;
+ _originalAffinityMask = AffinityMask;
+
+ ActiveCore = CurrentCore;
+ PreferredCore = CurrentCore;
+ AffinityMask = 1UL << CurrentCore;
+
+ if (activeCore != CurrentCore || _originalAffinityMask != AffinityMask)
+ {
+ AdjustSchedulingForNewAffinity(_originalAffinityMask, activeCore);
+ }
+
+ _originalBasePriority = BasePriority;
+ BasePriority = Math.Min(_originalBasePriority, BitOperations.TrailingZeroCount(Owner.Capabilities.AllowedThreadPriosMask) - 1);
+ UpdatePriorityInheritance();
+
+ // Disallows thread pausing
+ _forcePausePermissionFlags &= ~ThreadSchedState.ThreadPauseFlag;
+ CombineForcePauseFlags();
+
+ // TODO: Assign reduced SVC permissions
+ }
+
+ public void Unpin()
+ {
+ IsPinned = false;
+ _coreMigrationDisableCount--;
+
+ ulong affinityMask = AffinityMask;
+ int activeCore = ActiveCore;
+
+ PreferredCore = _originalPreferredCore;
+ AffinityMask = _originalAffinityMask;
+
+ if (AffinityMask != affinityMask)
+ {
+ if ((AffinityMask & 1UL << ActiveCore) != 0)
+ {
+ if (PreferredCore >= 0)
+ {
+ ActiveCore = PreferredCore;
+ }
+ else
+ {
+ ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount((ulong)AffinityMask);
+ }
+
+ AdjustSchedulingForNewAffinity(affinityMask, activeCore);
+ }
+ }
+
+ BasePriority = _originalBasePriority;
+ UpdatePriorityInheritance();
+
+ if (!TerminationRequested)
+ {
+ // Allows thread pausing
+ _forcePausePermissionFlags |= ThreadSchedState.ThreadPauseFlag;
+ CombineForcePauseFlags();
+
+ // TODO: Restore SVC permissions
+ }
+
+ // Wake up waiters
+ foreach (KThread waiter in _pinnedWaiters)
+ {
+ waiter.ReleaseAndResume();
+ }
+
+ _pinnedWaiters.Clear();
+ }
+
+ public void SynchronizePreemptionState()
+ {
+ KernelContext.CriticalSection.Enter();
+
+ if (Owner != null && Owner.PinnedThreads[CurrentCore] == this)
+ {
+ ClearUserInterruptFlag();
+
+ Owner.UnpinThread(this);
+ }
+
+ KernelContext.CriticalSection.Leave();
+ }
+
+ public ushort GetUserDisableCount()
+ {
+ return Owner.CpuMemory.Read<ushort>(_tlsAddress + TlsUserDisableCountOffset);
+ }
+
+ public void SetUserInterruptFlag()
+ {
+ Owner.CpuMemory.Write<ushort>(_tlsAddress + TlsUserInterruptFlagOffset, 1);
+ }
+
+ public void ClearUserInterruptFlag()
+ {
+ Owner.CpuMemory.Write<ushort>(_tlsAddress + TlsUserInterruptFlagOffset, 0);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KThreadContext.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KThreadContext.cs
new file mode 100644
index 00000000..e8ad53c2
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KThreadContext.cs
@@ -0,0 +1,33 @@
+using Ryujinx.Cpu;
+using Ryujinx.Horizon.Common;
+using System.Threading;
+
+namespace Ryujinx.HLE.HOS.Kernel.Threading
+{
+ class KThreadContext : IThreadContext
+ {
+ private readonly IExecutionContext _context;
+
+ public bool Running => _context.Running;
+ public ulong TlsAddress => (ulong)_context.TpidrroEl0;
+
+ public ulong GetX(int index) => _context.GetX(index);
+
+ private int _locked;
+
+ public KThreadContext(IExecutionContext context)
+ {
+ _context = context;
+ }
+
+ public bool Lock()
+ {
+ return Interlocked.Exchange(ref _locked, 1) == 0;
+ }
+
+ public void Unlock()
+ {
+ Interlocked.Exchange(ref _locked, 0);
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/KWritableEvent.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/KWritableEvent.cs
new file mode 100644
index 00000000..b46122be
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/KWritableEvent.cs
@@ -0,0 +1,25 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.Horizon.Common;
+
+namespace Ryujinx.HLE.HOS.Kernel.Threading
+{
+ class KWritableEvent : KAutoObject
+ {
+ private readonly KEvent _parent;
+
+ public KWritableEvent(KernelContext context, KEvent parent) : base(context)
+ {
+ _parent = parent;
+ }
+
+ public void Signal()
+ {
+ _parent.ReadableEvent.Signal();
+ }
+
+ public Result Clear()
+ {
+ return _parent.ReadableEvent.Clear();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/SignalType.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/SignalType.cs
new file mode 100644
index 00000000..e72b719b
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/SignalType.cs
@@ -0,0 +1,9 @@
+namespace Ryujinx.HLE.HOS.Kernel.Threading
+{
+ enum SignalType
+ {
+ Signal = 0,
+ SignalAndIncrementIfEqual = 1,
+ SignalAndModifyIfEqual = 2
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/ThreadSchedState.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/ThreadSchedState.cs
new file mode 100644
index 00000000..9577075c
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/ThreadSchedState.cs
@@ -0,0 +1,20 @@
+namespace Ryujinx.HLE.HOS.Kernel.Threading
+{
+ enum ThreadSchedState : ushort
+ {
+ LowMask = 0xf,
+ HighMask = 0xfff0,
+ ForcePauseMask = 0x1f0,
+
+ ProcessPauseFlag = 1 << 4,
+ ThreadPauseFlag = 1 << 5,
+ ProcessDebugPauseFlag = 1 << 6,
+ BacktracePauseFlag = 1 << 7,
+ KernelInitPauseFlag = 1 << 8,
+
+ None = 0,
+ Paused = 1,
+ Running = 2,
+ TerminationPending = 3
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Threading/ThreadType.cs b/src/Ryujinx.HLE/HOS/Kernel/Threading/ThreadType.cs
new file mode 100644
index 00000000..0b44b57f
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Threading/ThreadType.cs
@@ -0,0 +1,10 @@
+namespace Ryujinx.HLE.HOS.Kernel.Threading
+{
+ enum ThreadType
+ {
+ Dummy,
+ Kernel,
+ Kernel2,
+ User
+ }
+} \ No newline at end of file