aboutsummaryrefslogtreecommitdiff
path: root/src/Ryujinx.Cpu
diff options
context:
space:
mode:
Diffstat (limited to 'src/Ryujinx.Cpu')
-rw-r--r--src/Ryujinx.Cpu/AddressSpace.cs470
-rw-r--r--src/Ryujinx.Cpu/AppleHv/Arm/ApFlags.cs27
-rw-r--r--src/Ryujinx.Cpu/AppleHv/Arm/ExceptionClass.cs47
-rw-r--r--src/Ryujinx.Cpu/AppleHv/DummyDiskCacheLoadState.cs17
-rw-r--r--src/Ryujinx.Cpu/AppleHv/HvAddressSpace.cs129
-rw-r--r--src/Ryujinx.Cpu/AppleHv/HvAddressSpaceRange.cs370
-rw-r--r--src/Ryujinx.Cpu/AppleHv/HvApi.cs320
-rw-r--r--src/Ryujinx.Cpu/AppleHv/HvCpuContext.cs47
-rw-r--r--src/Ryujinx.Cpu/AppleHv/HvEngine.cs20
-rw-r--r--src/Ryujinx.Cpu/AppleHv/HvExecutionContext.cs284
-rw-r--r--src/Ryujinx.Cpu/AppleHv/HvExecutionContextShadow.cs59
-rw-r--r--src/Ryujinx.Cpu/AppleHv/HvExecutionContextVcpu.cs196
-rw-r--r--src/Ryujinx.Cpu/AppleHv/HvIpaAllocator.cs34
-rw-r--r--src/Ryujinx.Cpu/AppleHv/HvMemoryBlockAllocation.cs34
-rw-r--r--src/Ryujinx.Cpu/AppleHv/HvMemoryBlockAllocator.cs59
-rw-r--r--src/Ryujinx.Cpu/AppleHv/HvMemoryManager.cs947
-rw-r--r--src/Ryujinx.Cpu/AppleHv/HvVcpu.cs25
-rw-r--r--src/Ryujinx.Cpu/AppleHv/HvVcpuPool.cs103
-rw-r--r--src/Ryujinx.Cpu/AppleHv/HvVm.cs68
-rw-r--r--src/Ryujinx.Cpu/AppleHv/IHvExecutionContext.cs46
-rw-r--r--src/Ryujinx.Cpu/ExceptionCallbacks.cs64
-rw-r--r--src/Ryujinx.Cpu/ICpuContext.cs61
-rw-r--r--src/Ryujinx.Cpu/ICpuEngine.cs18
-rw-r--r--src/Ryujinx.Cpu/IDiskCacheState.cs20
-rw-r--r--src/Ryujinx.Cpu/IExecutionContext.cs112
-rw-r--r--src/Ryujinx.Cpu/ITickSource.cs31
-rw-r--r--src/Ryujinx.Cpu/IVirtualMemoryManagerTracked.cs56
-rw-r--r--src/Ryujinx.Cpu/Jit/JitCpuContext.cs53
-rw-r--r--src/Ryujinx.Cpu/Jit/JitDiskCacheLoadState.cs38
-rw-r--r--src/Ryujinx.Cpu/Jit/JitEngine.cs20
-rw-r--r--src/Ryujinx.Cpu/Jit/JitExecutionContext.cs123
-rw-r--r--src/Ryujinx.Cpu/Jit/JitMemoryAllocator.cs13
-rw-r--r--src/Ryujinx.Cpu/Jit/JitMemoryBlock.cs24
-rw-r--r--src/Ryujinx.Cpu/Jit/MemoryManager.cs704
-rw-r--r--src/Ryujinx.Cpu/Jit/MemoryManagerHostMapped.cs817
-rw-r--r--src/Ryujinx.Cpu/LoadState.cs12
-rw-r--r--src/Ryujinx.Cpu/MemoryEhMeilleure.cs62
-rw-r--r--src/Ryujinx.Cpu/MemoryHelper.cs63
-rw-r--r--src/Ryujinx.Cpu/MemoryManagerBase.cs32
-rw-r--r--src/Ryujinx.Cpu/PrivateMemoryAllocation.cs41
-rw-r--r--src/Ryujinx.Cpu/PrivateMemoryAllocator.cs268
-rw-r--r--src/Ryujinx.Cpu/Ryujinx.Cpu.csproj13
-rw-r--r--src/Ryujinx.Cpu/TickSource.cs45
-rw-r--r--src/Ryujinx.Cpu/Tracking/CpuMultiRegionHandle.cs28
-rw-r--r--src/Ryujinx.Cpu/Tracking/CpuRegionHandle.cs37
-rw-r--r--src/Ryujinx.Cpu/Tracking/CpuSmartMultiRegionHandle.cs26
46 files changed, 6083 insertions, 0 deletions
diff --git a/src/Ryujinx.Cpu/AddressSpace.cs b/src/Ryujinx.Cpu/AddressSpace.cs
new file mode 100644
index 00000000..9dc32426
--- /dev/null
+++ b/src/Ryujinx.Cpu/AddressSpace.cs
@@ -0,0 +1,470 @@
+using Ryujinx.Common;
+using Ryujinx.Common.Collections;
+using Ryujinx.Memory;
+using System;
+
+namespace Ryujinx.Cpu
+{
+ class AddressSpace : IDisposable
+ {
+ private const ulong PageSize = 0x1000;
+
+ private const int DefaultBlockAlignment = 1 << 20;
+
+ private enum MappingType : byte
+ {
+ None,
+ Private,
+ Shared
+ }
+
+ private class Mapping : IntrusiveRedBlackTreeNode<Mapping>, IComparable<Mapping>
+ {
+ public ulong Address { get; private set; }
+ public ulong Size { get; private set; }
+ public ulong EndAddress => Address + Size;
+ public MappingType Type { get; private set; }
+
+ public Mapping(ulong address, ulong size, MappingType type)
+ {
+ Address = address;
+ Size = size;
+ Type = type;
+ }
+
+ public Mapping Split(ulong splitAddress)
+ {
+ ulong leftSize = splitAddress - Address;
+ ulong rightSize = EndAddress - splitAddress;
+
+ Mapping left = new Mapping(Address, leftSize, Type);
+
+ Address = splitAddress;
+ Size = rightSize;
+
+ return left;
+ }
+
+ public void UpdateState(MappingType newType)
+ {
+ Type = newType;
+ }
+
+ public void Extend(ulong sizeDelta)
+ {
+ Size += sizeDelta;
+ }
+
+ public int CompareTo(Mapping other)
+ {
+ if (Address < other.Address)
+ {
+ return -1;
+ }
+ else if (Address <= other.EndAddress - 1UL)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+ }
+ }
+
+ private class PrivateMapping : IntrusiveRedBlackTreeNode<PrivateMapping>, IComparable<PrivateMapping>
+ {
+ public ulong Address { get; private set; }
+ public ulong Size { get; private set; }
+ public ulong EndAddress => Address + Size;
+ public PrivateMemoryAllocation PrivateAllocation { get; private set; }
+
+ public PrivateMapping(ulong address, ulong size, PrivateMemoryAllocation privateAllocation)
+ {
+ Address = address;
+ Size = size;
+ PrivateAllocation = privateAllocation;
+ }
+
+ public PrivateMapping Split(ulong splitAddress)
+ {
+ ulong leftSize = splitAddress - Address;
+ ulong rightSize = EndAddress - splitAddress;
+
+ (var leftAllocation, PrivateAllocation) = PrivateAllocation.Split(leftSize);
+
+ PrivateMapping left = new PrivateMapping(Address, leftSize, leftAllocation);
+
+ Address = splitAddress;
+ Size = rightSize;
+
+ return left;
+ }
+
+ public void Map(MemoryBlock baseBlock, MemoryBlock mirrorBlock, PrivateMemoryAllocation newAllocation)
+ {
+ baseBlock.MapView(newAllocation.Memory, newAllocation.Offset, Address, Size);
+ mirrorBlock.MapView(newAllocation.Memory, newAllocation.Offset, Address, Size);
+ PrivateAllocation = newAllocation;
+ }
+
+ public void Unmap(MemoryBlock baseBlock, MemoryBlock mirrorBlock)
+ {
+ if (PrivateAllocation.IsValid)
+ {
+ baseBlock.UnmapView(PrivateAllocation.Memory, Address, Size);
+ mirrorBlock.UnmapView(PrivateAllocation.Memory, Address, Size);
+ PrivateAllocation.Dispose();
+ }
+
+ PrivateAllocation = default;
+ }
+
+ public void Extend(ulong sizeDelta)
+ {
+ Size += sizeDelta;
+ }
+
+ public int CompareTo(PrivateMapping other)
+ {
+ if (Address < other.Address)
+ {
+ return -1;
+ }
+ else if (Address <= other.EndAddress - 1UL)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+ }
+ }
+
+ private readonly MemoryBlock _backingMemory;
+ private readonly PrivateMemoryAllocator _privateMemoryAllocator;
+ private readonly IntrusiveRedBlackTree<Mapping> _mappingTree;
+ private readonly IntrusiveRedBlackTree<PrivateMapping> _privateTree;
+
+ private readonly object _treeLock;
+
+ private readonly bool _supports4KBPages;
+
+ public MemoryBlock Base { get; }
+ public MemoryBlock Mirror { get; }
+
+ public AddressSpace(MemoryBlock backingMemory, ulong asSize, bool supports4KBPages)
+ {
+ if (!supports4KBPages)
+ {
+ _privateMemoryAllocator = new PrivateMemoryAllocator(DefaultBlockAlignment, MemoryAllocationFlags.Mirrorable | MemoryAllocationFlags.NoMap);
+ _mappingTree = new IntrusiveRedBlackTree<Mapping>();
+ _privateTree = new IntrusiveRedBlackTree<PrivateMapping>();
+ _treeLock = new object();
+
+ _mappingTree.Add(new Mapping(0UL, asSize, MappingType.None));
+ _privateTree.Add(new PrivateMapping(0UL, asSize, default));
+ }
+
+ _backingMemory = backingMemory;
+ _supports4KBPages = supports4KBPages;
+
+ MemoryAllocationFlags asFlags = MemoryAllocationFlags.Reserve | MemoryAllocationFlags.ViewCompatible;
+
+ Base = new MemoryBlock(asSize, asFlags);
+ Mirror = new MemoryBlock(asSize, asFlags);
+ }
+
+ public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags)
+ {
+ if (_supports4KBPages)
+ {
+ Base.MapView(_backingMemory, pa, va, size);
+ Mirror.MapView(_backingMemory, pa, va, size);
+
+ return;
+ }
+
+ lock (_treeLock)
+ {
+ ulong alignment = MemoryBlock.GetPageSize();
+ bool isAligned = ((va | pa | size) & (alignment - 1)) == 0;
+
+ if (flags.HasFlag(MemoryMapFlags.Private) && !isAligned)
+ {
+ Update(va, pa, size, MappingType.Private);
+ }
+ else
+ {
+ // The update method assumes that shared mappings are already aligned.
+
+ if (!flags.HasFlag(MemoryMapFlags.Private))
+ {
+ if ((va & (alignment - 1)) != (pa & (alignment - 1)))
+ {
+ throw new InvalidMemoryRegionException($"Virtual address 0x{va:X} and physical address 0x{pa:X} are misaligned and can't be aligned.");
+ }
+
+ ulong endAddress = va + size;
+ va = BitUtils.AlignDown(va, alignment);
+ pa = BitUtils.AlignDown(pa, alignment);
+ size = BitUtils.AlignUp(endAddress, alignment) - va;
+ }
+
+ Update(va, pa, size, MappingType.Shared);
+ }
+ }
+ }
+
+ public void Unmap(ulong va, ulong size)
+ {
+ if (_supports4KBPages)
+ {
+ Base.UnmapView(_backingMemory, va, size);
+ Mirror.UnmapView(_backingMemory, va, size);
+
+ return;
+ }
+
+ lock (_treeLock)
+ {
+ Update(va, 0UL, size, MappingType.None);
+ }
+ }
+
+ private void Update(ulong va, ulong pa, ulong size, MappingType type)
+ {
+ Mapping map = _mappingTree.GetNode(new Mapping(va, 1UL, MappingType.None));
+
+ Update(map, va, pa, size, type);
+ }
+
+ private Mapping Update(Mapping map, ulong va, ulong pa, ulong size, MappingType type)
+ {
+ ulong endAddress = va + size;
+
+ for (; map != null; map = map.Successor)
+ {
+ if (map.Address < va)
+ {
+ _mappingTree.Add(map.Split(va));
+ }
+
+ if (map.EndAddress > endAddress)
+ {
+ Mapping newMap = map.Split(endAddress);
+ _mappingTree.Add(newMap);
+ map = newMap;
+ }
+
+ switch (type)
+ {
+ case MappingType.None:
+ if (map.Type == MappingType.Shared)
+ {
+ ulong startOffset = map.Address - va;
+ ulong mapVa = va + startOffset;
+ ulong mapSize = Math.Min(size - startOffset, map.Size);
+ ulong mapEndAddress = mapVa + mapSize;
+ ulong alignment = MemoryBlock.GetPageSize();
+
+ mapVa = BitUtils.AlignDown(mapVa, alignment);
+ mapEndAddress = BitUtils.AlignUp(mapEndAddress, alignment);
+
+ mapSize = mapEndAddress - mapVa;
+
+ Base.UnmapView(_backingMemory, mapVa, mapSize);
+ Mirror.UnmapView(_backingMemory, mapVa, mapSize);
+ }
+ else
+ {
+ UnmapPrivate(va, size);
+ }
+ break;
+ case MappingType.Private:
+ if (map.Type == MappingType.Shared)
+ {
+ throw new InvalidMemoryRegionException($"Private mapping request at 0x{va:X} with size 0x{size:X} overlaps shared mapping at 0x{map.Address:X} with size 0x{map.Size:X}.");
+ }
+ else
+ {
+ MapPrivate(va, size);
+ }
+ break;
+ case MappingType.Shared:
+ if (map.Type != MappingType.None)
+ {
+ throw new InvalidMemoryRegionException($"Shared mapping request at 0x{va:X} with size 0x{size:X} overlaps mapping at 0x{map.Address:X} with size 0x{map.Size:X}.");
+ }
+ else
+ {
+ ulong startOffset = map.Address - va;
+ ulong mapPa = pa + startOffset;
+ ulong mapVa = va + startOffset;
+ ulong mapSize = Math.Min(size - startOffset, map.Size);
+
+ Base.MapView(_backingMemory, mapPa, mapVa, mapSize);
+ Mirror.MapView(_backingMemory, mapPa, mapVa, mapSize);
+ }
+ break;
+ }
+
+ map.UpdateState(type);
+ map = TryCoalesce(map);
+
+ if (map.EndAddress >= endAddress)
+ {
+ break;
+ }
+ }
+
+ return map;
+ }
+
+ private Mapping TryCoalesce(Mapping map)
+ {
+ Mapping previousMap = map.Predecessor;
+ Mapping nextMap = map.Successor;
+
+ if (previousMap != null && CanCoalesce(previousMap, map))
+ {
+ previousMap.Extend(map.Size);
+ _mappingTree.Remove(map);
+ map = previousMap;
+ }
+
+ if (nextMap != null && CanCoalesce(map, nextMap))
+ {
+ map.Extend(nextMap.Size);
+ _mappingTree.Remove(nextMap);
+ }
+
+ return map;
+ }
+
+ private static bool CanCoalesce(Mapping left, Mapping right)
+ {
+ return left.Type == right.Type;
+ }
+
+ private void MapPrivate(ulong va, ulong size)
+ {
+ ulong endAddress = va + size;
+
+ ulong alignment = MemoryBlock.GetPageSize();
+
+ // Expand the range outwards based on page size to ensure that at least the requested region is mapped.
+ ulong vaAligned = BitUtils.AlignDown(va, alignment);
+ ulong endAddressAligned = BitUtils.AlignUp(endAddress, alignment);
+
+ ulong sizeAligned = endAddressAligned - vaAligned;
+
+ PrivateMapping map = _privateTree.GetNode(new PrivateMapping(va, 1UL, default));
+
+ for (; map != null; map = map.Successor)
+ {
+ if (!map.PrivateAllocation.IsValid)
+ {
+ if (map.Address < vaAligned)
+ {
+ _privateTree.Add(map.Split(vaAligned));
+ }
+
+ if (map.EndAddress > endAddressAligned)
+ {
+ PrivateMapping newMap = map.Split(endAddressAligned);
+ _privateTree.Add(newMap);
+ map = newMap;
+ }
+
+ map.Map(Base, Mirror, _privateMemoryAllocator.Allocate(map.Size, MemoryBlock.GetPageSize()));
+ }
+
+ if (map.EndAddress >= endAddressAligned)
+ {
+ break;
+ }
+ }
+ }
+
+ private void UnmapPrivate(ulong va, ulong size)
+ {
+ ulong endAddress = va + size;
+
+ ulong alignment = MemoryBlock.GetPageSize();
+
+ // Shrink the range inwards based on page size to ensure we won't unmap memory that might be still in use.
+ ulong vaAligned = BitUtils.AlignUp(va, alignment);
+ ulong endAddressAligned = BitUtils.AlignDown(endAddress, alignment);
+
+ if (endAddressAligned <= vaAligned)
+ {
+ return;
+ }
+
+ ulong alignedSize = endAddressAligned - vaAligned;
+
+ PrivateMapping map = _privateTree.GetNode(new PrivateMapping(va, 1UL, default));
+
+ for (; map != null; map = map.Successor)
+ {
+ if (map.PrivateAllocation.IsValid)
+ {
+ if (map.Address < vaAligned)
+ {
+ _privateTree.Add(map.Split(vaAligned));
+ }
+
+ if (map.EndAddress > endAddressAligned)
+ {
+ PrivateMapping newMap = map.Split(endAddressAligned);
+ _privateTree.Add(newMap);
+ map = newMap;
+ }
+
+ map.Unmap(Base, Mirror);
+ map = TryCoalesce(map);
+ }
+
+ if (map.EndAddress >= endAddressAligned)
+ {
+ break;
+ }
+ }
+ }
+
+ private PrivateMapping TryCoalesce(PrivateMapping map)
+ {
+ PrivateMapping previousMap = map.Predecessor;
+ PrivateMapping nextMap = map.Successor;
+
+ if (previousMap != null && CanCoalesce(previousMap, map))
+ {
+ previousMap.Extend(map.Size);
+ _privateTree.Remove(map);
+ map = previousMap;
+ }
+
+ if (nextMap != null && CanCoalesce(map, nextMap))
+ {
+ map.Extend(nextMap.Size);
+ _privateTree.Remove(nextMap);
+ }
+
+ return map;
+ }
+
+ private static bool CanCoalesce(PrivateMapping left, PrivateMapping right)
+ {
+ return !left.PrivateAllocation.IsValid && !right.PrivateAllocation.IsValid;
+ }
+
+ public void Dispose()
+ {
+ _privateMemoryAllocator?.Dispose();
+ Base.Dispose();
+ Mirror.Dispose();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/AppleHv/Arm/ApFlags.cs b/src/Ryujinx.Cpu/AppleHv/Arm/ApFlags.cs
new file mode 100644
index 00000000..95e67432
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/Arm/ApFlags.cs
@@ -0,0 +1,27 @@
+namespace Ryujinx.Cpu.AppleHv.Arm
+{
+ enum ApFlags : ulong
+ {
+ ApShift = 6,
+ PxnShift = 53,
+ UxnShift = 54,
+
+ UserExecuteKernelReadWriteExecute = (0UL << (int)ApShift),
+ UserReadWriteExecuteKernelReadWrite = (1UL << (int)ApShift),
+ UserExecuteKernelReadExecute = (2UL << (int)ApShift),
+ UserReadExecuteKernelReadExecute = (3UL << (int)ApShift),
+
+ UserExecuteKernelReadWrite = (1UL << (int)PxnShift) | (0UL << (int)ApShift),
+ UserExecuteKernelRead = (1UL << (int)PxnShift) | (2UL << (int)ApShift),
+ UserReadExecuteKernelRead = (1UL << (int)PxnShift) | (3UL << (int)ApShift),
+
+ UserNoneKernelReadWriteExecute = (1UL << (int)UxnShift) | (0UL << (int)ApShift),
+ UserReadWriteKernelReadWrite = (1UL << (int)UxnShift) | (1UL << (int)ApShift),
+ UserNoneKernelReadExecute = (1UL << (int)UxnShift) | (2UL << (int)ApShift),
+ UserReadKernelReadExecute = (1UL << (int)UxnShift) | (3UL << (int)ApShift),
+
+ UserNoneKernelReadWrite = (1UL << (int)PxnShift) | (1UL << (int)UxnShift) | (0UL << (int)ApShift),
+ UserNoneKernelRead = (1UL << (int)PxnShift) | (1UL << (int)UxnShift) | (2UL << (int)ApShift),
+ UserReadKernelRead = (1UL << (int)PxnShift) | (1UL << (int)UxnShift) | (3UL << (int)ApShift)
+ }
+}
diff --git a/src/Ryujinx.Cpu/AppleHv/Arm/ExceptionClass.cs b/src/Ryujinx.Cpu/AppleHv/Arm/ExceptionClass.cs
new file mode 100644
index 00000000..18152f25
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/Arm/ExceptionClass.cs
@@ -0,0 +1,47 @@
+namespace Ryujinx.Cpu.AppleHv.Arm
+{
+ enum ExceptionClass
+ {
+ Unknown = 0b000000,
+ TrappedWfeWfiWfetWfit = 0b000001,
+ TrappedMcrMrcCp15 = 0b000011,
+ TrappedMcrrMrrcCp15 = 0b000100,
+ TrappedMcrMrcCp14 = 0b000101,
+ TrappedLdcStc = 0b000110,
+ TrappedSveFpSimd = 0b000111,
+ TrappedVmrs = 0b001000,
+ TrappedPAuth = 0b001001,
+ TrappedLd64bSt64bSt64bvSt64bv0 = 0b001010,
+ TrappedMrrcCp14 = 0b001100,
+ IllegalExecutionState = 0b001110,
+ SvcAarch32 = 0b010001,
+ HvcAarch32 = 0b010010,
+ SmcAarch32 = 0b010011,
+ SvcAarch64 = 0b010101,
+ HvcAarch64 = 0b010110,
+ SmcAarch64 = 0b010111,
+ TrappedMsrMrsSystem = 0b011000,
+ TrappedSve = 0b011001,
+ TrappedEretEretaaEretab = 0b011010,
+ PointerAuthenticationFailure = 0b011100,
+ ImplementationDefinedEl3 = 0b011111,
+ InstructionAbortLowerEl = 0b100000,
+ InstructionAbortSameEl = 0b100001,
+ PcAlignmentFault = 0b100010,
+ DataAbortLowerEl = 0b100100,
+ DataAbortSameEl = 0b100101,
+ SpAlignmentFault = 0b100110,
+ TrappedFpExceptionAarch32 = 0b101000,
+ TrappedFpExceptionAarch64 = 0b101100,
+ SErrorInterrupt = 0b101111,
+ BreakpointLowerEl = 0b110000,
+ BreakpointSameEl = 0b110001,
+ SoftwareStepLowerEl = 0b110010,
+ SoftwareStepSameEl = 0b110011,
+ WatchpointLowerEl = 0b110100,
+ WatchpointSameEl = 0b110101,
+ BkptAarch32 = 0b111000,
+ VectorCatchAarch32 = 0b111010,
+ BrkAarch64 = 0b111100
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/AppleHv/DummyDiskCacheLoadState.cs b/src/Ryujinx.Cpu/AppleHv/DummyDiskCacheLoadState.cs
new file mode 100644
index 00000000..6a692e74
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/DummyDiskCacheLoadState.cs
@@ -0,0 +1,17 @@
+using System;
+
+namespace Ryujinx.Cpu.AppleHv
+{
+ public class DummyDiskCacheLoadState : IDiskCacheLoadState
+ {
+#pragma warning disable CS0067
+ /// <inheritdoc/>
+ public event Action<LoadState, int, int> StateChanged;
+#pragma warning restore CS0067
+
+ /// <inheritdoc/>
+ public void Cancel()
+ {
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/AppleHv/HvAddressSpace.cs b/src/Ryujinx.Cpu/AppleHv/HvAddressSpace.cs
new file mode 100644
index 00000000..78f4c464
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/HvAddressSpace.cs
@@ -0,0 +1,129 @@
+using Ryujinx.Cpu.AppleHv.Arm;
+using Ryujinx.Memory;
+using System;
+
+namespace Ryujinx.Cpu.AppleHv
+{
+ class HvAddressSpace : IDisposable
+ {
+ private const ulong KernelRegionBase = unchecked((ulong)-(1L << 39));
+ private const ulong KernelRegionCodeOffset = 0UL;
+ private const ulong KernelRegionCodeSize = 0x2000UL;
+ private const ulong KernelRegionTlbiEretOffset = KernelRegionCodeOffset + 0x1000UL;
+ private const ulong KernelRegionEretOffset = KernelRegionTlbiEretOffset + 4UL;
+
+ public const ulong KernelRegionEretAddress = KernelRegionBase + KernelRegionEretOffset;
+ public const ulong KernelRegionTlbiEretAddress = KernelRegionBase + KernelRegionTlbiEretOffset;
+
+ private const ulong AllocationGranule = 1UL << 14;
+
+ private readonly ulong _asBase;
+ private readonly ulong _asSize;
+ private readonly ulong _backingSize;
+
+ private readonly HvAddressSpaceRange _userRange;
+ private readonly HvAddressSpaceRange _kernelRange;
+
+ private MemoryBlock _kernelCodeBlock;
+
+ public HvAddressSpace(MemoryBlock backingMemory, ulong asSize)
+ {
+ (_asBase, var ipaAllocator) = HvVm.CreateAddressSpace(backingMemory);
+ _asSize = asSize;
+ _backingSize = backingMemory.Size;
+
+ _userRange = new HvAddressSpaceRange(ipaAllocator);
+ _kernelRange = new HvAddressSpaceRange(ipaAllocator);
+
+ _kernelCodeBlock = new MemoryBlock(AllocationGranule);
+
+ InitializeKernelCode(ipaAllocator);
+ }
+
+ private void InitializeKernelCode(HvIpaAllocator ipaAllocator)
+ {
+ // Write exception handlers.
+ for (ulong offset = 0; offset < 0x800; offset += 0x80)
+ {
+ // Offsets:
+ // 0x0: Synchronous
+ // 0x80: IRQ
+ // 0x100: FIQ
+ // 0x180: SError
+ _kernelCodeBlock.Write(KernelRegionCodeOffset + offset, 0xD41FFFE2u); // HVC #0xFFFF
+ _kernelCodeBlock.Write(KernelRegionCodeOffset + offset + 4, 0xD69F03E0u); // ERET
+ }
+
+ _kernelCodeBlock.Write(KernelRegionTlbiEretOffset, 0xD508831Fu); // TLBI VMALLE1IS
+ _kernelCodeBlock.Write(KernelRegionEretOffset, 0xD69F03E0u); // ERET
+
+ ulong kernelCodePa = ipaAllocator.Allocate(AllocationGranule);
+ HvApi.hv_vm_map((ulong)_kernelCodeBlock.Pointer, kernelCodePa, AllocationGranule, hv_memory_flags_t.HV_MEMORY_READ | hv_memory_flags_t.HV_MEMORY_EXEC).ThrowOnError();
+
+ _kernelRange.Map(KernelRegionCodeOffset, kernelCodePa, KernelRegionCodeSize, ApFlags.UserNoneKernelReadExecute);
+ }
+
+ public void InitializeMmu(ulong vcpu)
+ {
+ HvApi.hv_vcpu_set_sys_reg(vcpu, hv_sys_reg_t.HV_SYS_REG_VBAR_EL1, KernelRegionBase + KernelRegionCodeOffset);
+
+ HvApi.hv_vcpu_set_sys_reg(vcpu, hv_sys_reg_t.HV_SYS_REG_TTBR0_EL1, _userRange.GetIpaBase());
+ HvApi.hv_vcpu_set_sys_reg(vcpu, hv_sys_reg_t.HV_SYS_REG_TTBR1_EL1, _kernelRange.GetIpaBase());
+ HvApi.hv_vcpu_set_sys_reg(vcpu, hv_sys_reg_t.HV_SYS_REG_MAIR_EL1, 0xffUL);
+ HvApi.hv_vcpu_set_sys_reg(vcpu, hv_sys_reg_t.HV_SYS_REG_TCR_EL1, 0x00000011B5193519UL);
+ HvApi.hv_vcpu_set_sys_reg(vcpu, hv_sys_reg_t.HV_SYS_REG_SCTLR_EL1, 0x0000000034D5D925UL);
+ }
+
+ public bool GetAndClearUserTlbInvalidationPending()
+ {
+ return _userRange.GetAndClearTlbInvalidationPending();
+ }
+
+ public void MapUser(ulong va, ulong pa, ulong size, MemoryPermission permission)
+ {
+ pa += _asBase;
+
+ lock (_userRange)
+ {
+ _userRange.Map(va, pa, size, GetApFlags(permission));
+ }
+ }
+
+ public void UnmapUser(ulong va, ulong size)
+ {
+ lock (_userRange)
+ {
+ _userRange.Unmap(va, size);
+ }
+ }
+
+ public void ReprotectUser(ulong va, ulong size, MemoryPermission permission)
+ {
+ lock (_userRange)
+ {
+ _userRange.Reprotect(va, size, GetApFlags(permission));
+ }
+ }
+
+ private static ApFlags GetApFlags(MemoryPermission permission)
+ {
+ return permission switch
+ {
+ MemoryPermission.None => ApFlags.UserNoneKernelRead,
+ MemoryPermission.Execute => ApFlags.UserExecuteKernelRead,
+ MemoryPermission.Read => ApFlags.UserReadKernelRead,
+ MemoryPermission.ReadAndWrite => ApFlags.UserReadWriteKernelReadWrite,
+ MemoryPermission.ReadAndExecute => ApFlags.UserReadExecuteKernelRead,
+ MemoryPermission.ReadWriteExecute => ApFlags.UserReadWriteExecuteKernelReadWrite,
+ _ => throw new ArgumentException($"Permission \"{permission}\" is invalid.")
+ };
+ }
+
+ public void Dispose()
+ {
+ _userRange.Dispose();
+ _kernelRange.Dispose();
+ HvVm.DestroyAddressSpace(_asBase, _backingSize);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/AppleHv/HvAddressSpaceRange.cs b/src/Ryujinx.Cpu/AppleHv/HvAddressSpaceRange.cs
new file mode 100644
index 00000000..ca30bb68
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/HvAddressSpaceRange.cs
@@ -0,0 +1,370 @@
+using Ryujinx.Cpu.AppleHv.Arm;
+using System;
+using System.Diagnostics;
+using System.Runtime.InteropServices;
+using System.Threading;
+
+namespace Ryujinx.Cpu.AppleHv
+{
+ class HvAddressSpaceRange : IDisposable
+ {
+ private const ulong AllocationGranule = 1UL << 14;
+
+ private const ulong AttributesMask = (0x3ffUL << 2) | (0x3fffUL << 50);
+
+ private const ulong BaseAttributes = (1UL << 10) | (3UL << 8); // Access flag set, inner shareable.
+
+ private const int LevelBits = 9;
+ private const int LevelCount = 1 << LevelBits;
+ private const int LevelMask = LevelCount - 1;
+ private const int PageBits = 12;
+ private const int PageSize = 1 << PageBits;
+ private const int PageMask = PageSize - 1;
+ private const int AllLevelsMask = PageMask | (LevelMask << PageBits) | (LevelMask << (PageBits + LevelBits));
+
+ private class PtLevel
+ {
+ public ulong Address => Allocation.Ipa + Allocation.Offset;
+ public int EntriesCount;
+ public readonly HvMemoryBlockAllocation Allocation;
+ public readonly PtLevel[] Next;
+
+ public PtLevel(HvMemoryBlockAllocator blockAllocator, int count, bool hasNext)
+ {
+ ulong size = (ulong)count * sizeof(ulong);
+ Allocation = blockAllocator.Allocate(size, PageSize);
+
+ AsSpan().Fill(0UL);
+
+ if (hasNext)
+ {
+ Next = new PtLevel[count];
+ }
+ }
+
+ public unsafe Span<ulong> AsSpan()
+ {
+ return MemoryMarshal.Cast<byte, ulong>(Allocation.Memory.GetSpan(Allocation.Offset, (int)Allocation.Size));
+ }
+ }
+
+ private PtLevel _level0;
+
+ private int _tlbInvalidationPending;
+
+ private readonly HvIpaAllocator _ipaAllocator;
+ private readonly HvMemoryBlockAllocator _blockAllocator;
+
+ public HvAddressSpaceRange(HvIpaAllocator ipaAllocator)
+ {
+ _ipaAllocator = ipaAllocator;
+ _blockAllocator = new HvMemoryBlockAllocator(ipaAllocator, (int)AllocationGranule);
+ }
+
+ public ulong GetIpaBase()
+ {
+ return EnsureLevel0().Address;
+ }
+
+ public bool GetAndClearTlbInvalidationPending()
+ {
+ return Interlocked.Exchange(ref _tlbInvalidationPending, 0) != 0;
+ }
+
+ public void Map(ulong va, ulong pa, ulong size, ApFlags accessPermission)
+ {
+ MapImpl(va, pa, size, (ulong)accessPermission | BaseAttributes);
+ }
+
+ public void Unmap(ulong va, ulong size)
+ {
+ UnmapImpl(EnsureLevel0(), 0, va, size);
+ Interlocked.Exchange(ref _tlbInvalidationPending, 1);
+ }
+
+ public void Reprotect(ulong va, ulong size, ApFlags accessPermission)
+ {
+ UpdateAttributes(va, size, (ulong)accessPermission | BaseAttributes);
+ }
+
+ private void MapImpl(ulong va, ulong pa, ulong size, ulong attr)
+ {
+ PtLevel level0 = EnsureLevel0();
+
+ ulong endVa = va + size;
+
+ while (va < endVa)
+ {
+ (ulong mapSize, int depth) = GetMapSizeAndDepth(va, pa, endVa);
+
+ PtLevel currentLevel = level0;
+
+ for (int i = 0; i < depth; i++)
+ {
+ int l = (int)(va >> (PageBits + (2 - i) * LevelBits)) & LevelMask;
+ EnsureTable(currentLevel, l, i == 0);
+ currentLevel = currentLevel.Next[l];
+ }
+
+ (ulong blockSize, int blockShift) = GetBlockSizeAndShift(depth);
+
+ for (ulong i = 0; i < mapSize; i += blockSize)
+ {
+ if ((va >> blockShift) << blockShift != va ||
+ (pa >> blockShift) << blockShift != pa)
+ {
+ Debug.Fail($"Block size 0x{blockSize:X} (log2: {blockShift}) is invalid for VA 0x{va:X} or PA 0x{pa:X}.");
+ }
+
+ WriteBlock(currentLevel, (int)(va >> blockShift) & LevelMask, depth, pa, attr);
+
+ va += blockSize;
+ pa += blockSize;
+ }
+ }
+ }
+
+ private void UnmapImpl(PtLevel level, int depth, ulong va, ulong size)
+ {
+ ulong endVa = (va + size + PageMask) & ~((ulong)PageMask);
+ va &= ~((ulong)PageMask);
+
+ (ulong blockSize, int blockShift) = GetBlockSizeAndShift(depth);
+
+ while (va < endVa)
+ {
+ ulong nextEntryVa = GetNextAddress(va, blockSize);
+ ulong chunckSize = Math.Min(endVa - va, nextEntryVa - va);
+
+ int l = (int)(va >> (PageBits + (2 - depth) * LevelBits)) & LevelMask;
+
+ PtLevel nextTable = level.Next != null ? level.Next[l] : null;
+
+ if (nextTable != null)
+ {
+ // Entry is a table, visit it and update attributes as required.
+ UnmapImpl(nextTable, depth + 1, va, chunckSize);
+ }
+ else if (chunckSize != blockSize)
+ {
+ // Entry is a block but is not aligned, we need to turn it into a table.
+ ref ulong pte = ref level.AsSpan()[l];
+ nextTable = CreateTable(pte, depth + 1);
+ level.Next[l] = nextTable;
+
+ // Now that we have a table, we can handle it like the first case.
+ UnmapImpl(nextTable, depth + 1, va, chunckSize);
+
+ // Update PTE to point to the new table.
+ pte = (nextTable.Address & ~(ulong)PageMask) | 3UL;
+ }
+
+ // If entry is a block, or if entry is a table but it is empty, we can remove it.
+ if (nextTable == null || nextTable.EntriesCount == 0)
+ {
+ // Entry is a block and is fully aligned, so we can just set it to 0.
+ if (nextTable != null)
+ {
+ nextTable.Allocation.Dispose();
+ level.Next[l] = null;
+ }
+
+ level.AsSpan()[l] = 0UL;
+ level.EntriesCount--;
+ ValidateEntriesCount(level.EntriesCount);
+ }
+
+ va += chunckSize;
+ }
+ }
+
+ private void UpdateAttributes(ulong va, ulong size, ulong newAttr)
+ {
+ UpdateAttributes(EnsureLevel0(), 0, va, size, newAttr);
+
+ Interlocked.Exchange(ref _tlbInvalidationPending, 1);
+ }
+
+ private void UpdateAttributes(PtLevel level, int depth, ulong va, ulong size, ulong newAttr)
+ {
+ ulong endVa = (va + size + PageSize - 1) & ~((ulong)PageSize - 1);
+ va &= ~((ulong)PageSize - 1);
+
+ (ulong blockSize, int blockShift) = GetBlockSizeAndShift(depth);
+
+ while (va < endVa)
+ {
+ ulong nextEntryVa = GetNextAddress(va, blockSize);
+ ulong chunckSize = Math.Min(endVa - va, nextEntryVa - va);
+
+ int l = (int)(va >> (PageBits + (2 - depth) * LevelBits)) & LevelMask;
+
+ ref ulong pte = ref level.AsSpan()[l];
+
+ // First check if the region is mapped.
+ if ((pte & 3) != 0)
+ {
+ PtLevel nextTable = level.Next != null ? level.Next[l] : null;
+
+ if (nextTable != null)
+ {
+ // Entry is a table, visit it and update attributes as required.
+ UpdateAttributes(nextTable, depth + 1, va, chunckSize, newAttr);
+ }
+ else if (chunckSize != blockSize)
+ {
+ // Entry is a block but is not aligned, we need to turn it into a table.
+ nextTable = CreateTable(pte, depth + 1);
+ level.Next[l] = nextTable;
+
+ // Now that we have a table, we can handle it like the first case.
+ UpdateAttributes(nextTable, depth + 1, va, chunckSize, newAttr);
+
+ // Update PTE to point to the new table.
+ pte = (nextTable.Address & ~(ulong)PageMask) | 3UL;
+ }
+ else
+ {
+ // Entry is a block and is fully aligned, so we can just update the attributes.
+ // Update PTE with the new attributes.
+ pte = (pte & ~AttributesMask) | newAttr;
+ }
+ }
+
+ va += chunckSize;
+ }
+ }
+
+ private PtLevel CreateTable(ulong pte, int depth)
+ {
+ pte &= ~3UL;
+ pte |= (depth == 2 ? 3UL : 1UL);
+
+ PtLevel level = new PtLevel(_blockAllocator, LevelCount, depth < 2);
+ Span<ulong> currentLevel = level.AsSpan();
+
+ (ulong blockSize, int blockShift) = GetBlockSizeAndShift(depth);
+
+ // Fill in the blocks.
+ for (int i = 0; i < LevelCount; i++)
+ {
+ ulong offset = (ulong)i << blockShift;
+ currentLevel[i] = pte + offset;
+ }
+
+ level.EntriesCount = LevelCount;
+
+ return level;
+ }
+
+ private static (ulong, int) GetBlockSizeAndShift(int depth)
+ {
+ int blockShift = PageBits + (2 - depth) * LevelBits;
+ ulong blockSize = 1UL << blockShift;
+
+ return (blockSize, blockShift);
+ }
+
+ private static (ulong, int) GetMapSizeAndDepth(ulong va, ulong pa, ulong endVa)
+ {
+ // Both virtual and physical addresses must be aligned to the block size.
+ ulong combinedAddress = va | pa;
+
+ ulong l0Alignment = 1UL << (PageBits + LevelBits * 2);
+ ulong l1Alignment = 1UL << (PageBits + LevelBits);
+
+ if ((combinedAddress & (l0Alignment - 1)) == 0 && AlignDown(endVa, l0Alignment) > va)
+ {
+ return (AlignDown(endVa, l0Alignment) - va, 0);
+ }
+ else if ((combinedAddress & (l1Alignment - 1)) == 0 && AlignDown(endVa, l1Alignment) > va)
+ {
+ ulong nextOrderVa = GetNextAddress(va, l0Alignment);
+
+ if (nextOrderVa <= endVa)
+ {
+ return (nextOrderVa - va, 1);
+ }
+ else
+ {
+ return (AlignDown(endVa, l1Alignment) - va, 1);
+ }
+ }
+ else
+ {
+ ulong nextOrderVa = GetNextAddress(va, l1Alignment);
+
+ if (nextOrderVa <= endVa)
+ {
+ return (nextOrderVa - va, 2);
+ }
+ else
+ {
+ return (endVa - va, 2);
+ }
+ }
+ }
+
+ private static ulong AlignDown(ulong va, ulong alignment)
+ {
+ return va & ~(alignment - 1);
+ }
+
+ private static ulong GetNextAddress(ulong va, ulong alignment)
+ {
+ return (va + alignment) & ~(alignment - 1);
+ }
+
+ private PtLevel EnsureLevel0()
+ {
+ PtLevel level0 = _level0;
+
+ if (level0 == null)
+ {
+ level0 = new PtLevel(_blockAllocator, LevelCount, true);
+ _level0 = level0;
+ }
+
+ return level0;
+ }
+
+ private void EnsureTable(PtLevel level, int index, bool hasNext)
+ {
+ Span<ulong> currentTable = level.AsSpan();
+
+ if ((currentTable[index] & 1) == 0)
+ {
+ PtLevel nextLevel = new PtLevel(_blockAllocator, LevelCount, hasNext);
+
+ currentTable[index] = (nextLevel.Address & ~(ulong)PageMask) | 3UL;
+ level.Next[index] = nextLevel;
+ level.EntriesCount++;
+ ValidateEntriesCount(level.EntriesCount);
+ }
+ else if (level.Next[index] == null)
+ {
+ Debug.Fail($"Index {index} is block, expected a table.");
+ }
+ }
+
+ private void WriteBlock(PtLevel level, int index, int depth, ulong pa, ulong attr)
+ {
+ Span<ulong> currentTable = level.AsSpan();
+
+ currentTable[index] = (pa & ~((ulong)AllLevelsMask >> (depth * LevelBits))) | (depth == 2 ? 3UL : 1UL) | attr;
+
+ level.EntriesCount++;
+ ValidateEntriesCount(level.EntriesCount);
+ }
+
+ private static void ValidateEntriesCount(int count)
+ {
+ Debug.Assert(count >= 0 && count <= LevelCount, $"Entries count {count} is invalid.");
+ }
+
+ public void Dispose()
+ {
+ _blockAllocator.Dispose();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/AppleHv/HvApi.cs b/src/Ryujinx.Cpu/AppleHv/HvApi.cs
new file mode 100644
index 00000000..d7628bb5
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/HvApi.cs
@@ -0,0 +1,320 @@
+using ARMeilleure.State;
+using System;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Cpu.AppleHv
+{
+ struct hv_vcpu_exit_exception_t
+ {
+#pragma warning disable CS0649
+ public ulong syndrome;
+ public ulong virtual_address;
+ public ulong physical_address;
+#pragma warning restore CS0649
+ }
+
+ struct hv_vcpu_exit_t
+ {
+#pragma warning disable CS0649
+ public uint reason;
+ public hv_vcpu_exit_exception_t exception;
+#pragma warning restore CS0649
+ }
+
+ enum hv_reg_t : uint
+ {
+ HV_REG_X0,
+ HV_REG_X1,
+ HV_REG_X2,
+ HV_REG_X3,
+ HV_REG_X4,
+ HV_REG_X5,
+ HV_REG_X6,
+ HV_REG_X7,
+ HV_REG_X8,
+ HV_REG_X9,
+ HV_REG_X10,
+ HV_REG_X11,
+ HV_REG_X12,
+ HV_REG_X13,
+ HV_REG_X14,
+ HV_REG_X15,
+ HV_REG_X16,
+ HV_REG_X17,
+ HV_REG_X18,
+ HV_REG_X19,
+ HV_REG_X20,
+ HV_REG_X21,
+ HV_REG_X22,
+ HV_REG_X23,
+ HV_REG_X24,
+ HV_REG_X25,
+ HV_REG_X26,
+ HV_REG_X27,
+ HV_REG_X28,
+ HV_REG_X29,
+ HV_REG_FP = HV_REG_X29,
+ HV_REG_X30,
+ HV_REG_LR = HV_REG_X30,
+ HV_REG_PC,
+ HV_REG_FPCR,
+ HV_REG_FPSR,
+ HV_REG_CPSR,
+ }
+
+ enum hv_simd_fp_reg_t : uint
+ {
+ HV_SIMD_FP_REG_Q0,
+ HV_SIMD_FP_REG_Q1,
+ HV_SIMD_FP_REG_Q2,
+ HV_SIMD_FP_REG_Q3,
+ HV_SIMD_FP_REG_Q4,
+ HV_SIMD_FP_REG_Q5,
+ HV_SIMD_FP_REG_Q6,
+ HV_SIMD_FP_REG_Q7,
+ HV_SIMD_FP_REG_Q8,
+ HV_SIMD_FP_REG_Q9,
+ HV_SIMD_FP_REG_Q10,
+ HV_SIMD_FP_REG_Q11,
+ HV_SIMD_FP_REG_Q12,
+ HV_SIMD_FP_REG_Q13,
+ HV_SIMD_FP_REG_Q14,
+ HV_SIMD_FP_REG_Q15,
+ HV_SIMD_FP_REG_Q16,
+ HV_SIMD_FP_REG_Q17,
+ HV_SIMD_FP_REG_Q18,
+ HV_SIMD_FP_REG_Q19,
+ HV_SIMD_FP_REG_Q20,
+ HV_SIMD_FP_REG_Q21,
+ HV_SIMD_FP_REG_Q22,
+ HV_SIMD_FP_REG_Q23,
+ HV_SIMD_FP_REG_Q24,
+ HV_SIMD_FP_REG_Q25,
+ HV_SIMD_FP_REG_Q26,
+ HV_SIMD_FP_REG_Q27,
+ HV_SIMD_FP_REG_Q28,
+ HV_SIMD_FP_REG_Q29,
+ HV_SIMD_FP_REG_Q30,
+ HV_SIMD_FP_REG_Q31,
+ }
+
+ enum hv_sys_reg_t : ushort
+ {
+ HV_SYS_REG_DBGBVR0_EL1 = 0x8004,
+ HV_SYS_REG_DBGBCR0_EL1 = 0x8005,
+ HV_SYS_REG_DBGWVR0_EL1 = 0x8006,
+ HV_SYS_REG_DBGWCR0_EL1 = 0x8007,
+ HV_SYS_REG_DBGBVR1_EL1 = 0x800c,
+ HV_SYS_REG_DBGBCR1_EL1 = 0x800d,
+ HV_SYS_REG_DBGWVR1_EL1 = 0x800e,
+ HV_SYS_REG_DBGWCR1_EL1 = 0x800f,
+ HV_SYS_REG_MDCCINT_EL1 = 0x8010,
+ HV_SYS_REG_MDSCR_EL1 = 0x8012,
+ HV_SYS_REG_DBGBVR2_EL1 = 0x8014,
+ HV_SYS_REG_DBGBCR2_EL1 = 0x8015,
+ HV_SYS_REG_DBGWVR2_EL1 = 0x8016,
+ HV_SYS_REG_DBGWCR2_EL1 = 0x8017,
+ HV_SYS_REG_DBGBVR3_EL1 = 0x801c,
+ HV_SYS_REG_DBGBCR3_EL1 = 0x801d,
+ HV_SYS_REG_DBGWVR3_EL1 = 0x801e,
+ HV_SYS_REG_DBGWCR3_EL1 = 0x801f,
+ HV_SYS_REG_DBGBVR4_EL1 = 0x8024,
+ HV_SYS_REG_DBGBCR4_EL1 = 0x8025,
+ HV_SYS_REG_DBGWVR4_EL1 = 0x8026,
+ HV_SYS_REG_DBGWCR4_EL1 = 0x8027,
+ HV_SYS_REG_DBGBVR5_EL1 = 0x802c,
+ HV_SYS_REG_DBGBCR5_EL1 = 0x802d,
+ HV_SYS_REG_DBGWVR5_EL1 = 0x802e,
+ HV_SYS_REG_DBGWCR5_EL1 = 0x802f,
+ HV_SYS_REG_DBGBVR6_EL1 = 0x8034,
+ HV_SYS_REG_DBGBCR6_EL1 = 0x8035,
+ HV_SYS_REG_DBGWVR6_EL1 = 0x8036,
+ HV_SYS_REG_DBGWCR6_EL1 = 0x8037,
+ HV_SYS_REG_DBGBVR7_EL1 = 0x803c,
+ HV_SYS_REG_DBGBCR7_EL1 = 0x803d,
+ HV_SYS_REG_DBGWVR7_EL1 = 0x803e,
+ HV_SYS_REG_DBGWCR7_EL1 = 0x803f,
+ HV_SYS_REG_DBGBVR8_EL1 = 0x8044,
+ HV_SYS_REG_DBGBCR8_EL1 = 0x8045,
+ HV_SYS_REG_DBGWVR8_EL1 = 0x8046,
+ HV_SYS_REG_DBGWCR8_EL1 = 0x8047,
+ HV_SYS_REG_DBGBVR9_EL1 = 0x804c,
+ HV_SYS_REG_DBGBCR9_EL1 = 0x804d,
+ HV_SYS_REG_DBGWVR9_EL1 = 0x804e,
+ HV_SYS_REG_DBGWCR9_EL1 = 0x804f,
+ HV_SYS_REG_DBGBVR10_EL1 = 0x8054,
+ HV_SYS_REG_DBGBCR10_EL1 = 0x8055,
+ HV_SYS_REG_DBGWVR10_EL1 = 0x8056,
+ HV_SYS_REG_DBGWCR10_EL1 = 0x8057,
+ HV_SYS_REG_DBGBVR11_EL1 = 0x805c,
+ HV_SYS_REG_DBGBCR11_EL1 = 0x805d,
+ HV_SYS_REG_DBGWVR11_EL1 = 0x805e,
+ HV_SYS_REG_DBGWCR11_EL1 = 0x805f,
+ HV_SYS_REG_DBGBVR12_EL1 = 0x8064,
+ HV_SYS_REG_DBGBCR12_EL1 = 0x8065,
+ HV_SYS_REG_DBGWVR12_EL1 = 0x8066,
+ HV_SYS_REG_DBGWCR12_EL1 = 0x8067,
+ HV_SYS_REG_DBGBVR13_EL1 = 0x806c,
+ HV_SYS_REG_DBGBCR13_EL1 = 0x806d,
+ HV_SYS_REG_DBGWVR13_EL1 = 0x806e,
+ HV_SYS_REG_DBGWCR13_EL1 = 0x806f,
+ HV_SYS_REG_DBGBVR14_EL1 = 0x8074,
+ HV_SYS_REG_DBGBCR14_EL1 = 0x8075,
+ HV_SYS_REG_DBGWVR14_EL1 = 0x8076,
+ HV_SYS_REG_DBGWCR14_EL1 = 0x8077,
+ HV_SYS_REG_DBGBVR15_EL1 = 0x807c,
+ HV_SYS_REG_DBGBCR15_EL1 = 0x807d,
+ HV_SYS_REG_DBGWVR15_EL1 = 0x807e,
+ HV_SYS_REG_DBGWCR15_EL1 = 0x807f,
+ HV_SYS_REG_MIDR_EL1 = 0xc000,
+ HV_SYS_REG_MPIDR_EL1 = 0xc005,
+ HV_SYS_REG_ID_AA64PFR0_EL1 = 0xc020,
+ HV_SYS_REG_ID_AA64PFR1_EL1 = 0xc021,
+ HV_SYS_REG_ID_AA64DFR0_EL1 = 0xc028,
+ HV_SYS_REG_ID_AA64DFR1_EL1 = 0xc029,
+ HV_SYS_REG_ID_AA64ISAR0_EL1 = 0xc030,
+ HV_SYS_REG_ID_AA64ISAR1_EL1 = 0xc031,
+ HV_SYS_REG_ID_AA64MMFR0_EL1 = 0xc038,
+ HV_SYS_REG_ID_AA64MMFR1_EL1 = 0xc039,
+ HV_SYS_REG_ID_AA64MMFR2_EL1 = 0xc03a,
+ HV_SYS_REG_SCTLR_EL1 = 0xc080,
+ HV_SYS_REG_CPACR_EL1 = 0xc082,
+ HV_SYS_REG_TTBR0_EL1 = 0xc100,
+ HV_SYS_REG_TTBR1_EL1 = 0xc101,
+ HV_SYS_REG_TCR_EL1 = 0xc102,
+ HV_SYS_REG_APIAKEYLO_EL1 = 0xc108,
+ HV_SYS_REG_APIAKEYHI_EL1 = 0xc109,
+ HV_SYS_REG_APIBKEYLO_EL1 = 0xc10a,
+ HV_SYS_REG_APIBKEYHI_EL1 = 0xc10b,
+ HV_SYS_REG_APDAKEYLO_EL1 = 0xc110,
+ HV_SYS_REG_APDAKEYHI_EL1 = 0xc111,
+ HV_SYS_REG_APDBKEYLO_EL1 = 0xc112,
+ HV_SYS_REG_APDBKEYHI_EL1 = 0xc113,
+ HV_SYS_REG_APGAKEYLO_EL1 = 0xc118,
+ HV_SYS_REG_APGAKEYHI_EL1 = 0xc119,
+ HV_SYS_REG_SPSR_EL1 = 0xc200,
+ HV_SYS_REG_ELR_EL1 = 0xc201,
+ HV_SYS_REG_SP_EL0 = 0xc208,
+ HV_SYS_REG_AFSR0_EL1 = 0xc288,
+ HV_SYS_REG_AFSR1_EL1 = 0xc289,
+ HV_SYS_REG_ESR_EL1 = 0xc290,
+ HV_SYS_REG_FAR_EL1 = 0xc300,
+ HV_SYS_REG_PAR_EL1 = 0xc3a0,
+ HV_SYS_REG_MAIR_EL1 = 0xc510,
+ HV_SYS_REG_AMAIR_EL1 = 0xc518,
+ HV_SYS_REG_VBAR_EL1 = 0xc600,
+ HV_SYS_REG_CONTEXTIDR_EL1 = 0xc681,
+ HV_SYS_REG_TPIDR_EL1 = 0xc684,
+ HV_SYS_REG_CNTKCTL_EL1 = 0xc708,
+ HV_SYS_REG_CSSELR_EL1 = 0xd000,
+ HV_SYS_REG_TPIDR_EL0 = 0xde82,
+ HV_SYS_REG_TPIDRRO_EL0 = 0xde83,
+ HV_SYS_REG_CNTV_CTL_EL0 = 0xdf19,
+ HV_SYS_REG_CNTV_CVAL_EL0 = 0xdf1a,
+ HV_SYS_REG_SP_EL1 = 0xe208,
+ }
+
+ enum hv_memory_flags_t : ulong
+ {
+ HV_MEMORY_READ = 1UL << 0,
+ HV_MEMORY_WRITE = 1UL << 1,
+ HV_MEMORY_EXEC = 1UL << 2
+ }
+
+ enum hv_result_t : uint
+ {
+ HV_SUCCESS = 0,
+ HV_ERROR = 0xfae94001,
+ HV_BUSY = 0xfae94002,
+ HV_BAD_ARGUMENT = 0xfae94003,
+ HV_NO_RESOURCES = 0xfae94005,
+ HV_NO_DEVICE = 0xfae94006,
+ HV_DENIED = 0xfae94007,
+ HV_UNSUPPORTED = 0xfae9400f
+ }
+
+ enum hv_interrupt_type_t : uint
+ {
+ HV_INTERRUPT_TYPE_IRQ,
+ HV_INTERRUPT_TYPE_FIQ
+ }
+
+ struct hv_simd_fp_uchar16_t
+ {
+ public ulong Low;
+ public ulong High;
+ }
+
+ static class HvResultExtensions
+ {
+ public static void ThrowOnError(this hv_result_t result)
+ {
+ if (result != hv_result_t.HV_SUCCESS)
+ {
+ throw new Exception($"Unexpected result \"{result}\".");
+ }
+ }
+ }
+
+ static partial class HvApi
+ {
+ public const string LibraryName = "/System/Library/Frameworks/Hypervisor.framework/Hypervisor";
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public static partial hv_result_t hv_vm_get_max_vcpu_count(out uint max_vcpu_count);
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public static partial hv_result_t hv_vm_create(IntPtr config);
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public static partial hv_result_t hv_vm_destroy();
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public static partial hv_result_t hv_vm_map(ulong addr, ulong ipa, ulong size, hv_memory_flags_t flags);
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public static partial hv_result_t hv_vm_unmap(ulong ipa, ulong size);
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public static partial hv_result_t hv_vm_protect(ulong ipa, ulong size, hv_memory_flags_t flags);
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public unsafe static partial hv_result_t hv_vcpu_create(out ulong vcpu, ref hv_vcpu_exit_t* exit, IntPtr config);
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public unsafe static partial hv_result_t hv_vcpu_destroy(ulong vcpu);
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public static partial hv_result_t hv_vcpu_run(ulong vcpu);
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public static partial hv_result_t hv_vcpus_exit(ref ulong vcpus, uint vcpu_count);
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public static partial hv_result_t hv_vcpu_set_vtimer_mask(ulong vcpu, [MarshalAs(UnmanagedType.Bool)] bool vtimer_is_masked);
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public static partial hv_result_t hv_vcpu_get_reg(ulong vcpu, hv_reg_t reg, out ulong value);
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public static partial hv_result_t hv_vcpu_set_reg(ulong vcpu, hv_reg_t reg, ulong value);
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public static partial hv_result_t hv_vcpu_get_simd_fp_reg(ulong vcpu, hv_simd_fp_reg_t reg, out hv_simd_fp_uchar16_t value);
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public static partial hv_result_t hv_vcpu_set_simd_fp_reg(ulong vcpu, hv_simd_fp_reg_t reg, hv_simd_fp_uchar16_t value); // DO NOT USE DIRECTLY!
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public static partial hv_result_t hv_vcpu_get_sys_reg(ulong vcpu, hv_sys_reg_t reg, out ulong value);
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public static partial hv_result_t hv_vcpu_set_sys_reg(ulong vcpu, hv_sys_reg_t reg, ulong value);
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public static partial hv_result_t hv_vcpu_get_pending_interrupt(ulong vcpu, hv_interrupt_type_t type, [MarshalAs(UnmanagedType.Bool)] out bool pending);
+
+ [LibraryImport(LibraryName, SetLastError = true)]
+ public static partial hv_result_t hv_vcpu_set_pending_interrupt(ulong vcpu, hv_interrupt_type_t type, [MarshalAs(UnmanagedType.Bool)] bool pending);
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/AppleHv/HvCpuContext.cs b/src/Ryujinx.Cpu/AppleHv/HvCpuContext.cs
new file mode 100644
index 00000000..de782d54
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/HvCpuContext.cs
@@ -0,0 +1,47 @@
+using ARMeilleure.Memory;
+using System;
+
+namespace Ryujinx.Cpu.AppleHv
+{
+ class HvCpuContext : ICpuContext
+ {
+ private readonly ITickSource _tickSource;
+ private readonly HvMemoryManager _memoryManager;
+
+ public HvCpuContext(ITickSource tickSource, IMemoryManager memory, bool for64Bit)
+ {
+ _tickSource = tickSource;
+ _memoryManager = (HvMemoryManager)memory;
+ }
+
+ private void UnmapHandler(ulong address, ulong size)
+ {
+ }
+
+ /// <inheritdoc/>
+ public IExecutionContext CreateExecutionContext(ExceptionCallbacks exceptionCallbacks)
+ {
+ return new HvExecutionContext(_tickSource, exceptionCallbacks);
+ }
+
+ /// <inheritdoc/>
+ public void Execute(IExecutionContext context, ulong address)
+ {
+ ((HvExecutionContext)context).Execute(_memoryManager, address);
+ }
+
+ /// <inheritdoc/>
+ public void InvalidateCacheRegion(ulong address, ulong size)
+ {
+ }
+
+ public IDiskCacheLoadState LoadDiskCache(string titleIdText, string displayVersion, bool enabled)
+ {
+ return new DummyDiskCacheLoadState();
+ }
+
+ public void PrepareCodeRange(ulong address, ulong size)
+ {
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/AppleHv/HvEngine.cs b/src/Ryujinx.Cpu/AppleHv/HvEngine.cs
new file mode 100644
index 00000000..7ad99cb9
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/HvEngine.cs
@@ -0,0 +1,20 @@
+using ARMeilleure.Memory;
+
+namespace Ryujinx.Cpu.AppleHv
+{
+ public class HvEngine : ICpuEngine
+ {
+ private readonly ITickSource _tickSource;
+
+ public HvEngine(ITickSource tickSource)
+ {
+ _tickSource = tickSource;
+ }
+
+ /// <inheritdoc/>
+ public ICpuContext CreateCpuContext(IMemoryManager memoryManager, bool for64Bit)
+ {
+ return new HvCpuContext(_tickSource, memoryManager, for64Bit);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/AppleHv/HvExecutionContext.cs b/src/Ryujinx.Cpu/AppleHv/HvExecutionContext.cs
new file mode 100644
index 00000000..dc1f6f6d
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/HvExecutionContext.cs
@@ -0,0 +1,284 @@
+using ARMeilleure.State;
+using Ryujinx.Cpu.AppleHv.Arm;
+using Ryujinx.Memory.Tracking;
+using System;
+
+namespace Ryujinx.Cpu.AppleHv
+{
+ class HvExecutionContext : IExecutionContext
+ {
+ /// <inheritdoc/>
+ public ulong Pc => _impl.ElrEl1;
+
+ /// <inheritdoc/>
+ public long TpidrEl0
+ {
+ get => _impl.TpidrEl0;
+ set => _impl.TpidrEl0 = value;
+ }
+
+ /// <inheritdoc/>
+ public long TpidrroEl0
+ {
+ get => _impl.TpidrroEl0;
+ set => _impl.TpidrroEl0 = value;
+ }
+
+ /// <inheritdoc/>
+ public uint Pstate
+ {
+ get => _impl.Pstate;
+ set => _impl.Pstate = value;
+ }
+
+ /// <inheritdoc/>
+ public uint Fpcr
+ {
+ get => _impl.Fpcr;
+ set => _impl.Fpcr = value;
+ }
+
+ /// <inheritdoc/>
+ public uint Fpsr
+ {
+ get => _impl.Fpsr;
+ set => _impl.Fpsr = value;
+ }
+
+ /// <inheritdoc/>
+ public bool IsAarch32
+ {
+ get => false;
+ set
+ {
+ if (value)
+ {
+ throw new NotSupportedException();
+ }
+ }
+ }
+
+ /// <inheritdoc/>
+ public bool Running { get; private set; }
+
+ private readonly ICounter _counter;
+ private readonly IHvExecutionContext _shadowContext;
+ private IHvExecutionContext _impl;
+
+ private readonly ExceptionCallbacks _exceptionCallbacks;
+
+ public HvExecutionContext(ICounter counter, ExceptionCallbacks exceptionCallbacks)
+ {
+ _counter = counter;
+ _shadowContext = new HvExecutionContextShadow();
+ _impl = _shadowContext;
+ _exceptionCallbacks = exceptionCallbacks;
+ Running = true;
+ }
+
+ /// <inheritdoc/>
+ public ulong GetX(int index) => _impl.GetX(index);
+
+ /// <inheritdoc/>
+ public void SetX(int index, ulong value) => _impl.SetX(index, value);
+
+ /// <inheritdoc/>
+ public V128 GetV(int index) => _impl.GetV(index);
+
+ /// <inheritdoc/>
+ public void SetV(int index, V128 value) => _impl.SetV(index, value);
+
+ private void InterruptHandler()
+ {
+ _exceptionCallbacks.InterruptCallback?.Invoke(this);
+ }
+
+ private void BreakHandler(ulong address, int imm)
+ {
+ _exceptionCallbacks.BreakCallback?.Invoke(this, address, imm);
+ }
+
+ private void SupervisorCallHandler(ulong address, int imm)
+ {
+ _exceptionCallbacks.SupervisorCallback?.Invoke(this, address, imm);
+ }
+
+ private void UndefinedHandler(ulong address, int opCode)
+ {
+ _exceptionCallbacks.UndefinedCallback?.Invoke(this, address, opCode);
+ }
+
+ /// <inheritdoc/>
+ public void RequestInterrupt()
+ {
+ _impl.RequestInterrupt();
+ }
+
+ /// <inheritdoc/>
+ public void StopRunning()
+ {
+ Running = false;
+ RequestInterrupt();
+ }
+
+ public unsafe void Execute(HvMemoryManager memoryManager, ulong address)
+ {
+ HvVcpu vcpu = HvVcpuPool.Instance.Create(memoryManager.AddressSpace, _shadowContext, SwapContext);
+
+ HvApi.hv_vcpu_set_reg(vcpu.Handle, hv_reg_t.HV_REG_PC, address).ThrowOnError();
+
+ while (Running)
+ {
+ HvApi.hv_vcpu_run(vcpu.Handle).ThrowOnError();
+
+ uint reason = vcpu.ExitInfo->reason;
+
+ if (reason == 1)
+ {
+ uint hvEsr = (uint)vcpu.ExitInfo->exception.syndrome;
+ ExceptionClass hvEc = (ExceptionClass)(hvEsr >> 26);
+
+ if (hvEc != ExceptionClass.HvcAarch64)
+ {
+ throw new Exception($"Unhandled exception from guest kernel with ESR 0x{hvEsr:X} ({hvEc}).");
+ }
+
+ address = SynchronousException(memoryManager, ref vcpu);
+ HvApi.hv_vcpu_set_reg(vcpu.Handle, hv_reg_t.HV_REG_PC, address).ThrowOnError();
+ }
+ else if (reason == 0)
+ {
+ if (_impl.GetAndClearInterruptRequested())
+ {
+ ReturnToPool(vcpu);
+ InterruptHandler();
+ vcpu = RentFromPool(memoryManager.AddressSpace, vcpu);
+ }
+ }
+ else
+ {
+ throw new Exception($"Unhandled exit reason {reason}.");
+ }
+ }
+
+ HvVcpuPool.Instance.Destroy(vcpu, SwapContext);
+ }
+
+ private ulong SynchronousException(HvMemoryManager memoryManager, ref HvVcpu vcpu)
+ {
+ ulong vcpuHandle = vcpu.Handle;
+
+ HvApi.hv_vcpu_get_sys_reg(vcpuHandle, hv_sys_reg_t.HV_SYS_REG_ELR_EL1, out ulong elr).ThrowOnError();
+ HvApi.hv_vcpu_get_sys_reg(vcpuHandle, hv_sys_reg_t.HV_SYS_REG_ESR_EL1, out ulong esr).ThrowOnError();
+
+ ExceptionClass ec = (ExceptionClass)((uint)esr >> 26);
+
+ switch (ec)
+ {
+ case ExceptionClass.DataAbortLowerEl:
+ DataAbort(memoryManager.Tracking, vcpuHandle, (uint)esr);
+ break;
+ case ExceptionClass.TrappedMsrMrsSystem:
+ InstructionTrap((uint)esr);
+ HvApi.hv_vcpu_set_sys_reg(vcpuHandle, hv_sys_reg_t.HV_SYS_REG_ELR_EL1, elr + 4UL).ThrowOnError();
+ break;
+ case ExceptionClass.SvcAarch64:
+ ReturnToPool(vcpu);
+ ushort id = (ushort)esr;
+ SupervisorCallHandler(elr - 4UL, id);
+ vcpu = RentFromPool(memoryManager.AddressSpace, vcpu);
+ break;
+ default:
+ throw new Exception($"Unhandled guest exception {ec}.");
+ }
+
+ // Make sure we will continue running at EL0.
+ if (memoryManager.AddressSpace.GetAndClearUserTlbInvalidationPending())
+ {
+ // TODO: Invalidate only the range that was modified?
+ return HvAddressSpace.KernelRegionTlbiEretAddress;
+ }
+ else
+ {
+ return HvAddressSpace.KernelRegionEretAddress;
+ }
+ }
+
+ private void DataAbort(MemoryTracking tracking, ulong vcpu, uint esr)
+ {
+ bool write = (esr & (1u << 6)) != 0;
+ bool farValid = (esr & (1u << 10)) == 0;
+ int accessSizeLog2 = (int)((esr >> 22) & 3);
+
+ if (farValid)
+ {
+ HvApi.hv_vcpu_get_sys_reg(vcpu, hv_sys_reg_t.HV_SYS_REG_FAR_EL1, out ulong far).ThrowOnError();
+
+ ulong size = 1UL << accessSizeLog2;
+
+ if (!tracking.VirtualMemoryEvent(far, size, write))
+ {
+ string rw = write ? "write" : "read";
+ throw new Exception($"Unhandled invalid memory access at VA 0x{far:X} with size 0x{size:X} ({rw}).");
+ }
+ }
+ else
+ {
+ throw new Exception($"Unhandled invalid memory access at unknown VA with ESR 0x{esr:X}.");
+ }
+ }
+
+ private void InstructionTrap(uint esr)
+ {
+ bool read = (esr & 1) != 0;
+ uint rt = (esr >> 5) & 0x1f;
+
+ if (read)
+ {
+ // Op0 Op2 Op1 CRn 00000 CRm
+ switch ((esr >> 1) & 0x1ffe0f)
+ {
+ case 0b11_000_011_1110_00000_0000: // CNTFRQ_EL0
+ WriteRt(rt, _counter.Frequency);
+ break;
+ case 0b11_001_011_1110_00000_0000: // CNTPCT_EL0
+ WriteRt(rt, _counter.Counter);
+ break;
+ default:
+ throw new Exception($"Unhandled system register read with ESR 0x{esr:X}");
+ }
+ }
+ else
+ {
+ throw new Exception($"Unhandled system register write with ESR 0x{esr:X}");
+ }
+ }
+
+ private void WriteRt(uint rt, ulong value)
+ {
+ if (rt < 31)
+ {
+ SetX((int)rt, value);
+ }
+ }
+
+ private void ReturnToPool(HvVcpu vcpu)
+ {
+ HvVcpuPool.Instance.Return(vcpu, SwapContext);
+ }
+
+ private HvVcpu RentFromPool(HvAddressSpace addressSpace, HvVcpu vcpu)
+ {
+ return HvVcpuPool.Instance.Rent(addressSpace, _shadowContext, vcpu, SwapContext);
+ }
+
+ private void SwapContext(IHvExecutionContext newContext)
+ {
+ _impl = newContext;
+ }
+
+ public void Dispose()
+ {
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/AppleHv/HvExecutionContextShadow.cs b/src/Ryujinx.Cpu/AppleHv/HvExecutionContextShadow.cs
new file mode 100644
index 00000000..c088ebdc
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/HvExecutionContextShadow.cs
@@ -0,0 +1,59 @@
+using ARMeilleure.State;
+
+namespace Ryujinx.Cpu.AppleHv
+{
+ unsafe class HvExecutionContextShadow : IHvExecutionContext
+ {
+ public ulong Pc { get; set; }
+ public ulong ElrEl1 { get; set; }
+ public ulong EsrEl1 { get; set; }
+
+ public long TpidrEl0 { get; set; }
+ public long TpidrroEl0 { get; set; }
+
+ public uint Pstate { get; set; }
+
+ public uint Fpcr { get; set; }
+ public uint Fpsr { get; set; }
+
+ public bool IsAarch32 { get; set; }
+
+ private readonly ulong[] _x;
+ private readonly V128[] _v;
+
+ public HvExecutionContextShadow()
+ {
+ _x = new ulong[32];
+ _v = new V128[32];
+ }
+
+ public ulong GetX(int index)
+ {
+ return _x[index];
+ }
+
+ public void SetX(int index, ulong value)
+ {
+ _x[index] = value;
+ }
+
+ public V128 GetV(int index)
+ {
+ return _v[index];
+ }
+
+ public void SetV(int index, V128 value)
+ {
+ _v[index] = value;
+ }
+
+ public void RequestInterrupt()
+ {
+ }
+
+ public bool GetAndClearInterruptRequested()
+ {
+ return false;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/AppleHv/HvExecutionContextVcpu.cs b/src/Ryujinx.Cpu/AppleHv/HvExecutionContextVcpu.cs
new file mode 100644
index 00000000..4f6ebefa
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/HvExecutionContextVcpu.cs
@@ -0,0 +1,196 @@
+using ARMeilleure.State;
+using Ryujinx.Memory;
+using System;
+using System.Runtime.InteropServices;
+using System.Threading;
+
+namespace Ryujinx.Cpu.AppleHv
+{
+ class HvExecutionContextVcpu : IHvExecutionContext
+ {
+ private static MemoryBlock _setSimdFpRegFuncMem;
+ private delegate hv_result_t SetSimdFpReg(ulong vcpu, hv_simd_fp_reg_t reg, in V128 value, IntPtr funcPtr);
+ private static SetSimdFpReg _setSimdFpReg;
+ private static IntPtr _setSimdFpRegNativePtr;
+
+ static HvExecutionContextVcpu()
+ {
+ // .NET does not support passing vectors by value, so we need to pass a pointer and use a native
+ // function to load the value into a vector register.
+ _setSimdFpRegFuncMem = new MemoryBlock(MemoryBlock.GetPageSize());
+ _setSimdFpRegFuncMem.Write(0, 0x3DC00040u); // LDR Q0, [X2]
+ _setSimdFpRegFuncMem.Write(4, 0xD61F0060u); // BR X3
+ _setSimdFpRegFuncMem.Reprotect(0, _setSimdFpRegFuncMem.Size, MemoryPermission.ReadAndExecute);
+
+ _setSimdFpReg = Marshal.GetDelegateForFunctionPointer<SetSimdFpReg>(_setSimdFpRegFuncMem.Pointer);
+
+ if (NativeLibrary.TryLoad(HvApi.LibraryName, out IntPtr hvLibHandle))
+ {
+ _setSimdFpRegNativePtr = NativeLibrary.GetExport(hvLibHandle, nameof(HvApi.hv_vcpu_set_simd_fp_reg));
+ }
+ }
+
+ public ulong Pc
+ {
+ get
+ {
+ HvApi.hv_vcpu_get_reg(_vcpu, hv_reg_t.HV_REG_PC, out ulong pc).ThrowOnError();
+ return pc;
+ }
+ set
+ {
+ HvApi.hv_vcpu_set_reg(_vcpu, hv_reg_t.HV_REG_PC, value).ThrowOnError();
+ }
+ }
+
+ public ulong ElrEl1
+ {
+ get
+ {
+ HvApi.hv_vcpu_get_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_ELR_EL1, out ulong elr).ThrowOnError();
+ return elr;
+ }
+ set
+ {
+ HvApi.hv_vcpu_set_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_ELR_EL1, value).ThrowOnError();
+ }
+ }
+
+ public ulong EsrEl1
+ {
+ get
+ {
+ HvApi.hv_vcpu_get_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_ESR_EL1, out ulong esr).ThrowOnError();
+ return esr;
+ }
+ set
+ {
+ HvApi.hv_vcpu_set_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_ESR_EL1, value).ThrowOnError();
+ }
+ }
+
+ public long TpidrEl0
+ {
+ get
+ {
+ HvApi.hv_vcpu_get_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_TPIDR_EL0, out ulong tpidrEl0).ThrowOnError();
+ return (long)tpidrEl0;
+ }
+ set
+ {
+ HvApi.hv_vcpu_set_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_TPIDR_EL0, (ulong)value).ThrowOnError();
+ }
+ }
+
+ public long TpidrroEl0
+ {
+ get
+ {
+ HvApi.hv_vcpu_get_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_TPIDRRO_EL0, out ulong tpidrroEl0).ThrowOnError();
+ return (long)tpidrroEl0;
+ }
+ set
+ {
+ HvApi.hv_vcpu_set_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_TPIDRRO_EL0, (ulong)value).ThrowOnError();
+ }
+ }
+
+ public uint Pstate
+ {
+ get
+ {
+ HvApi.hv_vcpu_get_reg(_vcpu, hv_reg_t.HV_REG_CPSR, out ulong cpsr).ThrowOnError();
+ return (uint)cpsr;
+ }
+ set
+ {
+ HvApi.hv_vcpu_set_reg(_vcpu, hv_reg_t.HV_REG_CPSR, (ulong)value).ThrowOnError();
+ }
+ }
+
+ public uint Fpcr
+ {
+ get
+ {
+ HvApi.hv_vcpu_get_reg(_vcpu, hv_reg_t.HV_REG_FPCR, out ulong fpcr).ThrowOnError();
+ return (uint)fpcr;
+ }
+ set
+ {
+ HvApi.hv_vcpu_set_reg(_vcpu, hv_reg_t.HV_REG_FPCR, (ulong)value).ThrowOnError();
+ }
+ }
+
+ public uint Fpsr
+ {
+ get
+ {
+ HvApi.hv_vcpu_get_reg(_vcpu, hv_reg_t.HV_REG_FPSR, out ulong fpsr).ThrowOnError();
+ return (uint)fpsr;
+ }
+ set
+ {
+ HvApi.hv_vcpu_set_reg(_vcpu, hv_reg_t.HV_REG_FPSR, (ulong)value).ThrowOnError();
+ }
+ }
+
+ private ulong _vcpu;
+ private int _interruptRequested;
+
+ public HvExecutionContextVcpu(ulong vcpu)
+ {
+ _vcpu = vcpu;
+ }
+
+ public ulong GetX(int index)
+ {
+ if (index == 31)
+ {
+ HvApi.hv_vcpu_get_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_SP_EL0, out ulong value).ThrowOnError();
+ return value;
+ }
+ else
+ {
+ HvApi.hv_vcpu_get_reg(_vcpu, hv_reg_t.HV_REG_X0 + (uint)index, out ulong value).ThrowOnError();
+ return value;
+ }
+ }
+
+ public void SetX(int index, ulong value)
+ {
+ if (index == 31)
+ {
+ HvApi.hv_vcpu_set_sys_reg(_vcpu, hv_sys_reg_t.HV_SYS_REG_SP_EL0, value).ThrowOnError();
+ }
+ else
+ {
+ HvApi.hv_vcpu_set_reg(_vcpu, hv_reg_t.HV_REG_X0 + (uint)index, value).ThrowOnError();
+ }
+ }
+
+ public V128 GetV(int index)
+ {
+ HvApi.hv_vcpu_get_simd_fp_reg(_vcpu, hv_simd_fp_reg_t.HV_SIMD_FP_REG_Q0 + (uint)index, out hv_simd_fp_uchar16_t value).ThrowOnError();
+ return new V128(value.Low, value.High);
+ }
+
+ public void SetV(int index, V128 value)
+ {
+ _setSimdFpReg(_vcpu, hv_simd_fp_reg_t.HV_SIMD_FP_REG_Q0 + (uint)index, value, _setSimdFpRegNativePtr).ThrowOnError();
+ }
+
+ public void RequestInterrupt()
+ {
+ if (Interlocked.Exchange(ref _interruptRequested, 1) == 0)
+ {
+ ulong vcpu = _vcpu;
+ HvApi.hv_vcpus_exit(ref vcpu, 1);
+ }
+ }
+
+ public bool GetAndClearInterruptRequested()
+ {
+ return Interlocked.Exchange(ref _interruptRequested, 0) != 0;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/AppleHv/HvIpaAllocator.cs b/src/Ryujinx.Cpu/AppleHv/HvIpaAllocator.cs
new file mode 100644
index 00000000..7eefe130
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/HvIpaAllocator.cs
@@ -0,0 +1,34 @@
+using System;
+
+namespace Ryujinx.Cpu.AppleHv
+{
+ class HvIpaAllocator
+ {
+ private const ulong AllocationGranule = 1UL << 14;
+ private const ulong IpaRegionSize = 1UL << 35;
+
+ private readonly PrivateMemoryAllocator.Block _block;
+
+ public HvIpaAllocator()
+ {
+ _block = new PrivateMemoryAllocator.Block(null, IpaRegionSize);
+ }
+
+ public ulong Allocate(ulong size, ulong alignment = AllocationGranule)
+ {
+ ulong offset = _block.Allocate(size, alignment);
+
+ if (offset == PrivateMemoryAllocator.InvalidOffset)
+ {
+ throw new InvalidOperationException($"No enough free IPA memory to allocate 0x{size:X} bytes with alignment 0x{alignment:X}.");
+ }
+
+ return offset;
+ }
+
+ public void Free(ulong offset, ulong size)
+ {
+ _block.Free(offset, size);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/AppleHv/HvMemoryBlockAllocation.cs b/src/Ryujinx.Cpu/AppleHv/HvMemoryBlockAllocation.cs
new file mode 100644
index 00000000..94289d1c
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/HvMemoryBlockAllocation.cs
@@ -0,0 +1,34 @@
+using Ryujinx.Memory;
+using System;
+
+namespace Ryujinx.Cpu.AppleHv
+{
+ struct HvMemoryBlockAllocation : IDisposable
+ {
+ private readonly HvMemoryBlockAllocator _owner;
+ private readonly HvMemoryBlockAllocator.Block _block;
+
+ public bool IsValid => _owner != null;
+ public MemoryBlock Memory => _block.Memory;
+ public ulong Ipa => _block.Ipa;
+ public ulong Offset { get; }
+ public ulong Size { get; }
+
+ public HvMemoryBlockAllocation(
+ HvMemoryBlockAllocator owner,
+ HvMemoryBlockAllocator.Block block,
+ ulong offset,
+ ulong size)
+ {
+ _owner = owner;
+ _block = block;
+ Offset = offset;
+ Size = size;
+ }
+
+ public void Dispose()
+ {
+ _owner.Free(_block, Offset, Size);
+ }
+ }
+}
diff --git a/src/Ryujinx.Cpu/AppleHv/HvMemoryBlockAllocator.cs b/src/Ryujinx.Cpu/AppleHv/HvMemoryBlockAllocator.cs
new file mode 100644
index 00000000..24c3a969
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/HvMemoryBlockAllocator.cs
@@ -0,0 +1,59 @@
+using Ryujinx.Memory;
+using System.Collections.Generic;
+
+namespace Ryujinx.Cpu.AppleHv
+{
+ class HvMemoryBlockAllocator : PrivateMemoryAllocatorImpl<HvMemoryBlockAllocator.Block>
+ {
+ private const ulong InvalidOffset = ulong.MaxValue;
+
+ public class Block : PrivateMemoryAllocator.Block
+ {
+ private readonly HvIpaAllocator _ipaAllocator;
+ public ulong Ipa { get; }
+
+ public Block(HvIpaAllocator ipaAllocator, MemoryBlock memory, ulong size) : base(memory, size)
+ {
+ _ipaAllocator = ipaAllocator;
+
+ lock (ipaAllocator)
+ {
+ Ipa = ipaAllocator.Allocate(size);
+ }
+
+ HvApi.hv_vm_map((ulong)Memory.Pointer, Ipa, size, hv_memory_flags_t.HV_MEMORY_READ | hv_memory_flags_t.HV_MEMORY_WRITE).ThrowOnError();
+ }
+
+ public override void Destroy()
+ {
+ HvApi.hv_vm_unmap(Ipa, Size).ThrowOnError();
+
+ lock (_ipaAllocator)
+ {
+ _ipaAllocator.Free(Ipa, Size);
+ }
+
+ base.Destroy();
+ }
+ }
+
+ private readonly HvIpaAllocator _ipaAllocator;
+
+ public HvMemoryBlockAllocator(HvIpaAllocator ipaAllocator, int blockAlignment) : base(blockAlignment, MemoryAllocationFlags.None)
+ {
+ _ipaAllocator = ipaAllocator;
+ }
+
+ public unsafe HvMemoryBlockAllocation Allocate(ulong size, ulong alignment)
+ {
+ var allocation = Allocate(size, alignment, CreateBlock);
+
+ return new HvMemoryBlockAllocation(this, allocation.Block, allocation.Offset, allocation.Size);
+ }
+
+ private Block CreateBlock(MemoryBlock memory, ulong size)
+ {
+ return new Block(_ipaAllocator, memory, size);
+ }
+ }
+}
diff --git a/src/Ryujinx.Cpu/AppleHv/HvMemoryManager.cs b/src/Ryujinx.Cpu/AppleHv/HvMemoryManager.cs
new file mode 100644
index 00000000..437e02ae
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/HvMemoryManager.cs
@@ -0,0 +1,947 @@
+using ARMeilleure.Memory;
+using Ryujinx.Cpu.Tracking;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Range;
+using Ryujinx.Memory.Tracking;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using System.Threading;
+
+namespace Ryujinx.Cpu.AppleHv
+{
+ /// <summary>
+ /// Represents a CPU memory manager which maps guest virtual memory directly onto the Hypervisor page table.
+ /// </summary>
+ public class HvMemoryManager : MemoryManagerBase, IMemoryManager, IVirtualMemoryManagerTracked, IWritableBlock
+ {
+ public const int PageBits = 12;
+ public const int PageSize = 1 << PageBits;
+ public const int PageMask = PageSize - 1;
+
+ public const int PageToPteShift = 5; // 32 pages (2 bits each) in one ulong page table entry.
+ public const ulong BlockMappedMask = 0x5555555555555555; // First bit of each table entry set.
+
+ private enum HostMappedPtBits : ulong
+ {
+ Unmapped = 0,
+ Mapped,
+ WriteTracked,
+ ReadWriteTracked,
+
+ MappedReplicated = 0x5555555555555555,
+ WriteTrackedReplicated = 0xaaaaaaaaaaaaaaaa,
+ ReadWriteTrackedReplicated = ulong.MaxValue
+ }
+
+ private readonly InvalidAccessHandler _invalidAccessHandler;
+
+ private readonly ulong _addressSpaceSize;
+
+ private readonly HvAddressSpace _addressSpace;
+
+ internal HvAddressSpace AddressSpace => _addressSpace;
+
+ private readonly MemoryBlock _backingMemory;
+ private readonly PageTable<ulong> _pageTable;
+
+ private readonly ulong[] _pageBitmap;
+
+ public bool Supports4KBPages => true;
+
+ public int AddressSpaceBits { get; }
+
+ public IntPtr PageTablePointer => IntPtr.Zero;
+
+ public MemoryManagerType Type => MemoryManagerType.SoftwarePageTable;
+
+ public MemoryTracking Tracking { get; }
+
+ public event Action<ulong, ulong> UnmapEvent;
+
+ /// <summary>
+ /// Creates a new instance of the Hypervisor memory manager.
+ /// </summary>
+ /// <param name="backingMemory">Physical backing memory where virtual memory will be mapped to</param>
+ /// <param name="addressSpaceSize">Size of the address space</param>
+ /// <param name="invalidAccessHandler">Optional function to handle invalid memory accesses</param>
+ public HvMemoryManager(MemoryBlock backingMemory, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler = null)
+ {
+ _backingMemory = backingMemory;
+ _pageTable = new PageTable<ulong>();
+ _invalidAccessHandler = invalidAccessHandler;
+ _addressSpaceSize = addressSpaceSize;
+
+ ulong asSize = PageSize;
+ int asBits = PageBits;
+
+ while (asSize < addressSpaceSize)
+ {
+ asSize <<= 1;
+ asBits++;
+ }
+
+ _addressSpace = new HvAddressSpace(backingMemory, asSize);
+
+ AddressSpaceBits = asBits;
+
+ _pageBitmap = new ulong[1 << (AddressSpaceBits - (PageBits + PageToPteShift))];
+ Tracking = new MemoryTracking(this, PageSize, invalidAccessHandler);
+ }
+
+ /// <summary>
+ /// Checks if the virtual address is part of the addressable space.
+ /// </summary>
+ /// <param name="va">Virtual address</param>
+ /// <returns>True if the virtual address is part of the addressable space</returns>
+ private bool ValidateAddress(ulong va)
+ {
+ return va < _addressSpaceSize;
+ }
+
+ /// <summary>
+ /// Checks if the combination of virtual address and size is part of the addressable space.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range in bytes</param>
+ /// <returns>True if the combination of virtual address and size is part of the addressable space</returns>
+ private bool ValidateAddressAndSize(ulong va, ulong size)
+ {
+ ulong endVa = va + size;
+ return endVa >= va && endVa >= size && endVa <= _addressSpaceSize;
+ }
+
+ /// <summary>
+ /// Ensures the combination of virtual address and size is part of the addressable space.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range in bytes</param>
+ /// <exception cref="InvalidMemoryRegionException">Throw when the memory region specified outside the addressable space</exception>
+ private void AssertValidAddressAndSize(ulong va, ulong size)
+ {
+ if (!ValidateAddressAndSize(va, size))
+ {
+ throw new InvalidMemoryRegionException($"va=0x{va:X16}, size=0x{size:X16}");
+ }
+ }
+
+ /// <summary>
+ /// Ensures the combination of virtual address and size is part of the addressable space and fully mapped.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range in bytes</param>
+ private void AssertMapped(ulong va, ulong size)
+ {
+ if (!ValidateAddressAndSize(va, size) || !IsRangeMappedImpl(va, size))
+ {
+ throw new InvalidMemoryRegionException($"Not mapped: va=0x{va:X16}, size=0x{size:X16}");
+ }
+ }
+
+ /// <inheritdoc/>
+ public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ PtMap(va, pa, size);
+ _addressSpace.MapUser(va, pa, size, MemoryPermission.ReadWriteExecute);
+ AddMapping(va, size);
+
+ Tracking.Map(va, size);
+ }
+
+ private void PtMap(ulong va, ulong pa, ulong size)
+ {
+ while (size != 0)
+ {
+ _pageTable.Map(va, pa);
+
+ va += PageSize;
+ pa += PageSize;
+ size -= PageSize;
+ }
+ }
+
+ /// <inheritdoc/>
+ public void MapForeign(ulong va, nuint hostPointer, ulong size)
+ {
+ throw new NotSupportedException();
+ }
+
+ /// <inheritdoc/>
+ public void Unmap(ulong va, ulong size)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ UnmapEvent?.Invoke(va, size);
+ Tracking.Unmap(va, size);
+
+ RemoveMapping(va, size);
+ _addressSpace.UnmapUser(va, size);
+ PtUnmap(va, size);
+ }
+
+ private void PtUnmap(ulong va, ulong size)
+ {
+ while (size != 0)
+ {
+ _pageTable.Unmap(va);
+
+ va += PageSize;
+ size -= PageSize;
+ }
+ }
+
+ /// <inheritdoc/>
+ public T Read<T>(ulong va) where T : unmanaged
+ {
+ return MemoryMarshal.Cast<byte, T>(GetSpan(va, Unsafe.SizeOf<T>()))[0];
+ }
+
+ /// <inheritdoc/>
+ public T ReadTracked<T>(ulong va) where T : unmanaged
+ {
+ try
+ {
+ SignalMemoryTracking(va, (ulong)Unsafe.SizeOf<T>(), false);
+
+ return Read<T>(va);
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+
+ return default;
+ }
+ }
+
+ /// <inheritdoc/>
+ public void Read(ulong va, Span<byte> data)
+ {
+ ReadImpl(va, data);
+ }
+
+ /// <inheritdoc/>
+ public void Write<T>(ulong va, T value) where T : unmanaged
+ {
+ Write(va, MemoryMarshal.Cast<T, byte>(MemoryMarshal.CreateSpan(ref value, 1)));
+ }
+
+ /// <inheritdoc/>
+ public void Write(ulong va, ReadOnlySpan<byte> data)
+ {
+ if (data.Length == 0)
+ {
+ return;
+ }
+
+ SignalMemoryTracking(va, (ulong)data.Length, true);
+
+ WriteImpl(va, data);
+ }
+
+ /// <inheritdoc/>
+ public void WriteUntracked(ulong va, ReadOnlySpan<byte> data)
+ {
+ if (data.Length == 0)
+ {
+ return;
+ }
+
+ WriteImpl(va, data);
+ }
+
+ /// <inheritdoc/>
+ public bool WriteWithRedundancyCheck(ulong va, ReadOnlySpan<byte> data)
+ {
+ if (data.Length == 0)
+ {
+ return false;
+ }
+
+ SignalMemoryTracking(va, (ulong)data.Length, false);
+
+ if (IsContiguousAndMapped(va, data.Length))
+ {
+ var target = _backingMemory.GetSpan(GetPhysicalAddressInternal(va), data.Length);
+
+ bool changed = !data.SequenceEqual(target);
+
+ if (changed)
+ {
+ data.CopyTo(target);
+ }
+
+ return changed;
+ }
+ else
+ {
+ WriteImpl(va, data);
+
+ return true;
+ }
+ }
+
+ private void WriteImpl(ulong va, ReadOnlySpan<byte> data)
+ {
+ try
+ {
+ AssertValidAddressAndSize(va, (ulong)data.Length);
+
+ if (IsContiguousAndMapped(va, data.Length))
+ {
+ data.CopyTo(_backingMemory.GetSpan(GetPhysicalAddressInternal(va), data.Length));
+ }
+ else
+ {
+ int offset = 0, size;
+
+ if ((va & PageMask) != 0)
+ {
+ ulong pa = GetPhysicalAddressChecked(va);
+
+ size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
+
+ data.Slice(0, size).CopyTo(_backingMemory.GetSpan(pa, size));
+
+ offset += size;
+ }
+
+ for (; offset < data.Length; offset += size)
+ {
+ ulong pa = GetPhysicalAddressChecked(va + (ulong)offset);
+
+ size = Math.Min(data.Length - offset, PageSize);
+
+ data.Slice(offset, size).CopyTo(_backingMemory.GetSpan(pa, size));
+ }
+ }
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+ }
+ }
+
+ /// <inheritdoc/>
+ public ReadOnlySpan<byte> GetSpan(ulong va, int size, bool tracked = false)
+ {
+ if (size == 0)
+ {
+ return ReadOnlySpan<byte>.Empty;
+ }
+
+ if (tracked)
+ {
+ SignalMemoryTracking(va, (ulong)size, false);
+ }
+
+ if (IsContiguousAndMapped(va, size))
+ {
+ return _backingMemory.GetSpan(GetPhysicalAddressInternal(va), size);
+ }
+ else
+ {
+ Span<byte> data = new byte[size];
+
+ ReadImpl(va, data);
+
+ return data;
+ }
+ }
+
+ /// <inheritdoc/>
+ public WritableRegion GetWritableRegion(ulong va, int size, bool tracked = false)
+ {
+ if (size == 0)
+ {
+ return new WritableRegion(null, va, Memory<byte>.Empty);
+ }
+
+ if (tracked)
+ {
+ SignalMemoryTracking(va, (ulong)size, true);
+ }
+
+ if (IsContiguousAndMapped(va, size))
+ {
+ return new WritableRegion(null, va, _backingMemory.GetMemory(GetPhysicalAddressInternal(va), size));
+ }
+ else
+ {
+ Memory<byte> memory = new byte[size];
+
+ ReadImpl(va, memory.Span);
+
+ return new WritableRegion(this, va, memory);
+ }
+ }
+
+ /// <inheritdoc/>
+ public ref T GetRef<T>(ulong va) where T : unmanaged
+ {
+ if (!IsContiguous(va, Unsafe.SizeOf<T>()))
+ {
+ ThrowMemoryNotContiguous();
+ }
+
+ SignalMemoryTracking(va, (ulong)Unsafe.SizeOf<T>(), true);
+
+ return ref _backingMemory.GetRef<T>(GetPhysicalAddressChecked(va));
+ }
+
+ /// <inheritdoc/>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public bool IsMapped(ulong va)
+ {
+ return ValidateAddress(va) && IsMappedImpl(va);
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private bool IsMappedImpl(ulong va)
+ {
+ ulong page = va >> PageBits;
+
+ int bit = (int)((page & 31) << 1);
+
+ int pageIndex = (int)(page >> PageToPteShift);
+ ref ulong pageRef = ref _pageBitmap[pageIndex];
+
+ ulong pte = Volatile.Read(ref pageRef);
+
+ return ((pte >> bit) & 3) != 0;
+ }
+
+ /// <inheritdoc/>
+ public bool IsRangeMapped(ulong va, ulong size)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ return IsRangeMappedImpl(va, size);
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void GetPageBlockRange(ulong pageStart, ulong pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex)
+ {
+ startMask = ulong.MaxValue << ((int)(pageStart & 31) << 1);
+ endMask = ulong.MaxValue >> (64 - ((int)(pageEnd & 31) << 1));
+
+ pageIndex = (int)(pageStart >> PageToPteShift);
+ pageEndIndex = (int)((pageEnd - 1) >> PageToPteShift);
+ }
+
+ private bool IsRangeMappedImpl(ulong va, ulong size)
+ {
+ int pages = GetPagesCount(va, size, out _);
+
+ if (pages == 1)
+ {
+ return IsMappedImpl(va);
+ }
+
+ ulong pageStart = va >> PageBits;
+ ulong pageEnd = pageStart + (ulong)pages;
+
+ GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
+
+ // Check if either bit in each 2 bit page entry is set.
+ // OR the block with itself shifted down by 1, and check the first bit of each entry.
+
+ ulong mask = BlockMappedMask & startMask;
+
+ while (pageIndex <= pageEndIndex)
+ {
+ if (pageIndex == pageEndIndex)
+ {
+ mask &= endMask;
+ }
+
+ ref ulong pageRef = ref _pageBitmap[pageIndex++];
+ ulong pte = Volatile.Read(ref pageRef);
+
+ pte |= pte >> 1;
+ if ((pte & mask) != mask)
+ {
+ return false;
+ }
+
+ mask = BlockMappedMask;
+ }
+
+ return true;
+ }
+
+ private static void ThrowMemoryNotContiguous() => throw new MemoryNotContiguousException();
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private bool IsContiguousAndMapped(ulong va, int size) => IsContiguous(va, size) && IsMapped(va);
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private bool IsContiguous(ulong va, int size)
+ {
+ if (!ValidateAddress(va) || !ValidateAddressAndSize(va, (ulong)size))
+ {
+ return false;
+ }
+
+ int pages = GetPagesCount(va, (uint)size, out va);
+
+ for (int page = 0; page < pages - 1; page++)
+ {
+ if (!ValidateAddress(va + PageSize))
+ {
+ return false;
+ }
+
+ if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize))
+ {
+ return false;
+ }
+
+ va += PageSize;
+ }
+
+ return true;
+ }
+
+ /// <inheritdoc/>
+ public IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size)
+ {
+ if (size == 0)
+ {
+ return Enumerable.Empty<HostMemoryRange>();
+ }
+
+ var guestRegions = GetPhysicalRegionsImpl(va, size);
+ if (guestRegions == null)
+ {
+ return null;
+ }
+
+ var regions = new HostMemoryRange[guestRegions.Count];
+
+ for (int i = 0; i < regions.Length; i++)
+ {
+ var guestRegion = guestRegions[i];
+ IntPtr pointer = _backingMemory.GetPointer(guestRegion.Address, guestRegion.Size);
+ regions[i] = new HostMemoryRange((nuint)(ulong)pointer, guestRegion.Size);
+ }
+
+ return regions;
+ }
+
+ /// <inheritdoc/>
+ public IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size)
+ {
+ if (size == 0)
+ {
+ return Enumerable.Empty<MemoryRange>();
+ }
+
+ return GetPhysicalRegionsImpl(va, size);
+ }
+
+ private List<MemoryRange> GetPhysicalRegionsImpl(ulong va, ulong size)
+ {
+ if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size))
+ {
+ return null;
+ }
+
+ int pages = GetPagesCount(va, (uint)size, out va);
+
+ var regions = new List<MemoryRange>();
+
+ ulong regionStart = GetPhysicalAddressInternal(va);
+ ulong regionSize = PageSize;
+
+ for (int page = 0; page < pages - 1; page++)
+ {
+ if (!ValidateAddress(va + PageSize))
+ {
+ return null;
+ }
+
+ ulong newPa = GetPhysicalAddressInternal(va + PageSize);
+
+ if (GetPhysicalAddressInternal(va) + PageSize != newPa)
+ {
+ regions.Add(new MemoryRange(regionStart, regionSize));
+ regionStart = newPa;
+ regionSize = 0;
+ }
+
+ va += PageSize;
+ regionSize += PageSize;
+ }
+
+ regions.Add(new MemoryRange(regionStart, regionSize));
+
+ return regions;
+ }
+
+ private void ReadImpl(ulong va, Span<byte> data)
+ {
+ if (data.Length == 0)
+ {
+ return;
+ }
+
+ try
+ {
+ AssertValidAddressAndSize(va, (ulong)data.Length);
+
+ int offset = 0, size;
+
+ if ((va & PageMask) != 0)
+ {
+ ulong pa = GetPhysicalAddressChecked(va);
+
+ size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
+
+ _backingMemory.GetSpan(pa, size).CopyTo(data.Slice(0, size));
+
+ offset += size;
+ }
+
+ for (; offset < data.Length; offset += size)
+ {
+ ulong pa = GetPhysicalAddressChecked(va + (ulong)offset);
+
+ size = Math.Min(data.Length - offset, PageSize);
+
+ _backingMemory.GetSpan(pa, size).CopyTo(data.Slice(offset, size));
+ }
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+ }
+ }
+
+ /// <inheritdoc/>
+ /// <remarks>
+ /// This function also validates that the given range is both valid and mapped, and will throw if it is not.
+ /// </remarks>
+ public void SignalMemoryTracking(ulong va, ulong size, bool write, bool precise = false, int? exemptId = null)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ if (precise)
+ {
+ Tracking.VirtualMemoryEvent(va, size, write, precise: true, exemptId);
+ return;
+ }
+
+ // Software table, used for managed memory tracking.
+
+ int pages = GetPagesCount(va, size, out _);
+ ulong pageStart = va >> PageBits;
+
+ if (pages == 1)
+ {
+ ulong tag = (ulong)(write ? HostMappedPtBits.WriteTracked : HostMappedPtBits.ReadWriteTracked);
+
+ int bit = (int)((pageStart & 31) << 1);
+
+ int pageIndex = (int)(pageStart >> PageToPteShift);
+ ref ulong pageRef = ref _pageBitmap[pageIndex];
+
+ ulong pte = Volatile.Read(ref pageRef);
+ ulong state = ((pte >> bit) & 3);
+
+ if (state >= tag)
+ {
+ Tracking.VirtualMemoryEvent(va, size, write, precise: false, exemptId);
+ return;
+ }
+ else if (state == 0)
+ {
+ ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}, size=0x{size:X16}");
+ }
+ }
+ else
+ {
+ ulong pageEnd = pageStart + (ulong)pages;
+
+ GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
+
+ ulong mask = startMask;
+
+ ulong anyTrackingTag = (ulong)HostMappedPtBits.WriteTrackedReplicated;
+
+ while (pageIndex <= pageEndIndex)
+ {
+ if (pageIndex == pageEndIndex)
+ {
+ mask &= endMask;
+ }
+
+ ref ulong pageRef = ref _pageBitmap[pageIndex++];
+
+ ulong pte = Volatile.Read(ref pageRef);
+ ulong mappedMask = mask & BlockMappedMask;
+
+ ulong mappedPte = pte | (pte >> 1);
+ if ((mappedPte & mappedMask) != mappedMask)
+ {
+ ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}, size=0x{size:X16}");
+ }
+
+ pte &= mask;
+ if ((pte & anyTrackingTag) != 0) // Search for any tracking.
+ {
+ // Writes trigger any tracking.
+ // Only trigger tracking from reads if both bits are set on any page.
+ if (write || (pte & (pte >> 1) & BlockMappedMask) != 0)
+ {
+ Tracking.VirtualMemoryEvent(va, size, write, precise: false, exemptId);
+ break;
+ }
+ }
+
+ mask = ulong.MaxValue;
+ }
+ }
+ }
+
+ /// <summary>
+ /// Computes the number of pages in a virtual address range.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range</param>
+ /// <param name="startVa">The virtual address of the beginning of the first page</param>
+ /// <remarks>This function does not differentiate between allocated and unallocated pages.</remarks>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private int GetPagesCount(ulong va, ulong size, out ulong startVa)
+ {
+ // WARNING: Always check if ulong does not overflow during the operations.
+ startVa = va & ~(ulong)PageMask;
+ ulong vaSpan = (va - startVa + size + PageMask) & ~(ulong)PageMask;
+
+ return (int)(vaSpan / PageSize);
+ }
+
+ /// <inheritdoc/>
+ public void TrackingReprotect(ulong va, ulong size, MemoryPermission protection)
+ {
+ // Protection is inverted on software pages, since the default value is 0.
+ protection = (~protection) & MemoryPermission.ReadAndWrite;
+
+ int pages = GetPagesCount(va, size, out va);
+ ulong pageStart = va >> PageBits;
+
+ if (pages == 1)
+ {
+ ulong protTag = protection switch
+ {
+ MemoryPermission.None => (ulong)HostMappedPtBits.Mapped,
+ MemoryPermission.Write => (ulong)HostMappedPtBits.WriteTracked,
+ _ => (ulong)HostMappedPtBits.ReadWriteTracked,
+ };
+
+ int bit = (int)((pageStart & 31) << 1);
+
+ ulong tagMask = 3UL << bit;
+ ulong invTagMask = ~tagMask;
+
+ ulong tag = protTag << bit;
+
+ int pageIndex = (int)(pageStart >> PageToPteShift);
+ ref ulong pageRef = ref _pageBitmap[pageIndex];
+
+ ulong pte;
+
+ do
+ {
+ pte = Volatile.Read(ref pageRef);
+ }
+ while ((pte & tagMask) != 0 && Interlocked.CompareExchange(ref pageRef, (pte & invTagMask) | tag, pte) != pte);
+ }
+ else
+ {
+ ulong pageEnd = pageStart + (ulong)pages;
+
+ GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
+
+ ulong mask = startMask;
+
+ ulong protTag = protection switch
+ {
+ MemoryPermission.None => (ulong)HostMappedPtBits.MappedReplicated,
+ MemoryPermission.Write => (ulong)HostMappedPtBits.WriteTrackedReplicated,
+ _ => (ulong)HostMappedPtBits.ReadWriteTrackedReplicated,
+ };
+
+ while (pageIndex <= pageEndIndex)
+ {
+ if (pageIndex == pageEndIndex)
+ {
+ mask &= endMask;
+ }
+
+ ref ulong pageRef = ref _pageBitmap[pageIndex++];
+
+ ulong pte;
+ ulong mappedMask;
+
+ // Change the protection of all 2 bit entries that are mapped.
+ do
+ {
+ pte = Volatile.Read(ref pageRef);
+
+ mappedMask = pte | (pte >> 1);
+ mappedMask |= (mappedMask & BlockMappedMask) << 1;
+ mappedMask &= mask; // Only update mapped pages within the given range.
+ }
+ while (Interlocked.CompareExchange(ref pageRef, (pte & (~mappedMask)) | (protTag & mappedMask), pte) != pte);
+
+ mask = ulong.MaxValue;
+ }
+ }
+
+ protection = protection switch
+ {
+ MemoryPermission.None => MemoryPermission.ReadAndWrite,
+ MemoryPermission.Write => MemoryPermission.Read,
+ _ => MemoryPermission.None
+ };
+
+ _addressSpace.ReprotectUser(va, size, protection);
+ }
+
+ /// <inheritdoc/>
+ public CpuRegionHandle BeginTracking(ulong address, ulong size, int id)
+ {
+ return new CpuRegionHandle(Tracking.BeginTracking(address, size, id));
+ }
+
+ /// <inheritdoc/>
+ public CpuMultiRegionHandle BeginGranularTracking(ulong address, ulong size, IEnumerable<IRegionHandle> handles, ulong granularity, int id)
+ {
+ return new CpuMultiRegionHandle(Tracking.BeginGranularTracking(address, size, handles, granularity, id));
+ }
+
+ /// <inheritdoc/>
+ public CpuSmartMultiRegionHandle BeginSmartGranularTracking(ulong address, ulong size, ulong granularity, int id)
+ {
+ return new CpuSmartMultiRegionHandle(Tracking.BeginSmartGranularTracking(address, size, granularity, id));
+ }
+
+ /// <summary>
+ /// Adds the given address mapping to the page table.
+ /// </summary>
+ /// <param name="va">Virtual memory address</param>
+ /// <param name="size">Size to be mapped</param>
+ private void AddMapping(ulong va, ulong size)
+ {
+ int pages = GetPagesCount(va, size, out _);
+ ulong pageStart = va >> PageBits;
+ ulong pageEnd = pageStart + (ulong)pages;
+
+ GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
+
+ ulong mask = startMask;
+
+ while (pageIndex <= pageEndIndex)
+ {
+ if (pageIndex == pageEndIndex)
+ {
+ mask &= endMask;
+ }
+
+ ref ulong pageRef = ref _pageBitmap[pageIndex++];
+
+ ulong pte;
+ ulong mappedMask;
+
+ // Map all 2-bit entries that are unmapped.
+ do
+ {
+ pte = Volatile.Read(ref pageRef);
+
+ mappedMask = pte | (pte >> 1);
+ mappedMask |= (mappedMask & BlockMappedMask) << 1;
+ mappedMask |= ~mask; // Treat everything outside the range as mapped, thus unchanged.
+ }
+ while (Interlocked.CompareExchange(ref pageRef, (pte & mappedMask) | (BlockMappedMask & (~mappedMask)), pte) != pte);
+
+ mask = ulong.MaxValue;
+ }
+ }
+
+ /// <summary>
+ /// Removes the given address mapping from the page table.
+ /// </summary>
+ /// <param name="va">Virtual memory address</param>
+ /// <param name="size">Size to be unmapped</param>
+ private void RemoveMapping(ulong va, ulong size)
+ {
+ int pages = GetPagesCount(va, size, out _);
+ ulong pageStart = va >> PageBits;
+ ulong pageEnd = pageStart + (ulong)pages;
+
+ GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
+
+ startMask = ~startMask;
+ endMask = ~endMask;
+
+ ulong mask = startMask;
+
+ while (pageIndex <= pageEndIndex)
+ {
+ if (pageIndex == pageEndIndex)
+ {
+ mask |= endMask;
+ }
+
+ ref ulong pageRef = ref _pageBitmap[pageIndex++];
+ ulong pte;
+
+ do
+ {
+ pte = Volatile.Read(ref pageRef);
+ }
+ while (Interlocked.CompareExchange(ref pageRef, pte & mask, pte) != pte);
+
+ mask = 0;
+ }
+ }
+
+ private ulong GetPhysicalAddressChecked(ulong va)
+ {
+ if (!IsMapped(va))
+ {
+ ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}");
+ }
+
+ return GetPhysicalAddressInternal(va);
+ }
+
+ private ulong GetPhysicalAddressInternal(ulong va)
+ {
+ return _pageTable.Read(va) + (va & PageMask);
+ }
+
+ /// <summary>
+ /// Disposes of resources used by the memory manager.
+ /// </summary>
+ protected override void Destroy()
+ {
+ _addressSpace.Dispose();
+ }
+
+ private static void ThrowInvalidMemoryRegionException(string message) => throw new InvalidMemoryRegionException(message);
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/AppleHv/HvVcpu.cs b/src/Ryujinx.Cpu/AppleHv/HvVcpu.cs
new file mode 100644
index 00000000..484a9fe8
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/HvVcpu.cs
@@ -0,0 +1,25 @@
+namespace Ryujinx.Cpu.AppleHv
+{
+ unsafe class HvVcpu
+ {
+ public readonly ulong Handle;
+ public readonly hv_vcpu_exit_t* ExitInfo;
+ public readonly IHvExecutionContext ShadowContext;
+ public readonly IHvExecutionContext NativeContext;
+ public readonly bool IsEphemeral;
+
+ public HvVcpu(
+ ulong handle,
+ hv_vcpu_exit_t* exitInfo,
+ IHvExecutionContext shadowContext,
+ IHvExecutionContext nativeContext,
+ bool isEphemeral)
+ {
+ Handle = handle;
+ ExitInfo = exitInfo;
+ ShadowContext = shadowContext;
+ NativeContext = nativeContext;
+ IsEphemeral = isEphemeral;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/AppleHv/HvVcpuPool.cs b/src/Ryujinx.Cpu/AppleHv/HvVcpuPool.cs
new file mode 100644
index 00000000..cb1944fe
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/HvVcpuPool.cs
@@ -0,0 +1,103 @@
+using System;
+using System.Threading;
+
+namespace Ryujinx.Cpu.AppleHv
+{
+ class HvVcpuPool
+ {
+ // Since there's a limit on the number of VCPUs we can create,
+ // and we assign one VCPU per guest thread, we need to ensure
+ // there are enough VCPUs available for at least the maximum number of active guest threads.
+ // To do that, we always destroy and re-create VCPUs that are above a given limit.
+ // Those VCPUs are called "ephemeral" here because they are not kept for long.
+ //
+ // In the future, we might want to consider a smarter approach that only makes
+ // VCPUs for threads that are not running frequently "ephemeral", but this is
+ // complicated because VCPUs can only be destroyed by the same thread that created them.
+
+ private const int MaxActiveVcpus = 4;
+
+ public static readonly HvVcpuPool Instance = new HvVcpuPool();
+
+ private int _totalVcpus;
+ private int _maxVcpus;
+
+ public HvVcpuPool()
+ {
+ HvApi.hv_vm_get_max_vcpu_count(out uint maxVcpuCount).ThrowOnError();
+ _maxVcpus = (int)maxVcpuCount;
+ }
+
+ public HvVcpu Create(HvAddressSpace addressSpace, IHvExecutionContext shadowContext, Action<IHvExecutionContext> swapContext)
+ {
+ HvVcpu vcpu = CreateNew(addressSpace, shadowContext);
+ vcpu.NativeContext.Load(shadowContext);
+ swapContext(vcpu.NativeContext);
+ return vcpu;
+ }
+
+ public void Destroy(HvVcpu vcpu, Action<IHvExecutionContext> swapContext)
+ {
+ vcpu.ShadowContext.Load(vcpu.NativeContext);
+ swapContext(vcpu.ShadowContext);
+ DestroyVcpu(vcpu);
+ }
+
+ public void Return(HvVcpu vcpu, Action<IHvExecutionContext> swapContext)
+ {
+ if (vcpu.IsEphemeral)
+ {
+ Destroy(vcpu, swapContext);
+ }
+ }
+
+ public HvVcpu Rent(HvAddressSpace addressSpace, IHvExecutionContext shadowContext, HvVcpu vcpu, Action<IHvExecutionContext> swapContext)
+ {
+ if (vcpu.IsEphemeral)
+ {
+ return Create(addressSpace, shadowContext, swapContext);
+ }
+ else
+ {
+ return vcpu;
+ }
+ }
+
+ private unsafe HvVcpu CreateNew(HvAddressSpace addressSpace, IHvExecutionContext shadowContext)
+ {
+ int newCount = IncrementVcpuCount();
+ bool isEphemeral = newCount > _maxVcpus - MaxActiveVcpus;
+
+ // Create VCPU.
+ hv_vcpu_exit_t* exitInfo = null;
+ HvApi.hv_vcpu_create(out ulong vcpuHandle, ref exitInfo, IntPtr.Zero).ThrowOnError();
+
+ // Enable FP and SIMD instructions.
+ HvApi.hv_vcpu_set_sys_reg(vcpuHandle, hv_sys_reg_t.HV_SYS_REG_CPACR_EL1, 0b11 << 20).ThrowOnError();
+
+ addressSpace.InitializeMmu(vcpuHandle);
+
+ HvExecutionContextVcpu nativeContext = new HvExecutionContextVcpu(vcpuHandle);
+
+ HvVcpu vcpu = new HvVcpu(vcpuHandle, exitInfo, shadowContext, nativeContext, isEphemeral);
+
+ return vcpu;
+ }
+
+ private void DestroyVcpu(HvVcpu vcpu)
+ {
+ HvApi.hv_vcpu_destroy(vcpu.Handle).ThrowOnError();
+ DecrementVcpuCount();
+ }
+
+ private int IncrementVcpuCount()
+ {
+ return Interlocked.Increment(ref _totalVcpus);
+ }
+
+ private void DecrementVcpuCount()
+ {
+ Interlocked.Decrement(ref _totalVcpus);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/AppleHv/HvVm.cs b/src/Ryujinx.Cpu/AppleHv/HvVm.cs
new file mode 100644
index 00000000..d91abff9
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/HvVm.cs
@@ -0,0 +1,68 @@
+using Ryujinx.Memory;
+using System;
+
+namespace Ryujinx.Cpu.AppleHv
+{
+ static class HvVm
+ {
+ // This alignment allows us to use larger blocks on the page table.
+ private const ulong AsIpaAlignment = 1UL << 30;
+
+ private static int _addressSpaces;
+ private static HvIpaAllocator _ipaAllocator;
+ private static object _lock = new object();
+
+ public static (ulong, HvIpaAllocator) CreateAddressSpace(MemoryBlock block)
+ {
+ HvIpaAllocator ipaAllocator;
+
+ lock (_lock)
+ {
+ if (++_addressSpaces == 1)
+ {
+ HvApi.hv_vm_create(IntPtr.Zero).ThrowOnError();
+ _ipaAllocator = ipaAllocator = new HvIpaAllocator();
+ }
+ else
+ {
+ ipaAllocator = _ipaAllocator;
+ }
+ }
+
+ ulong baseAddress;
+
+ lock (ipaAllocator)
+ {
+ baseAddress = ipaAllocator.Allocate(block.Size, AsIpaAlignment);
+ }
+
+ var rwx = hv_memory_flags_t.HV_MEMORY_READ | hv_memory_flags_t.HV_MEMORY_WRITE | hv_memory_flags_t.HV_MEMORY_EXEC;
+
+ HvApi.hv_vm_map((ulong)block.Pointer, baseAddress, block.Size, rwx).ThrowOnError();
+
+ return (baseAddress, ipaAllocator);
+ }
+
+ public static void DestroyAddressSpace(ulong address, ulong size)
+ {
+ HvApi.hv_vm_unmap(address, size);
+
+ HvIpaAllocator ipaAllocator;
+
+ lock (_lock)
+ {
+ if (--_addressSpaces == 0)
+ {
+ HvApi.hv_vm_destroy().ThrowOnError();
+ }
+
+ ipaAllocator = _ipaAllocator;
+ }
+
+ lock (ipaAllocator)
+ {
+ ipaAllocator.Free(address, size);
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/AppleHv/IHvExecutionContext.cs b/src/Ryujinx.Cpu/AppleHv/IHvExecutionContext.cs
new file mode 100644
index 00000000..adf2dd99
--- /dev/null
+++ b/src/Ryujinx.Cpu/AppleHv/IHvExecutionContext.cs
@@ -0,0 +1,46 @@
+using ARMeilleure.State;
+
+namespace Ryujinx.Cpu.AppleHv
+{
+ public interface IHvExecutionContext
+ {
+ ulong Pc { get; set; }
+ ulong ElrEl1 { get; set; }
+ ulong EsrEl1 { get; set; }
+
+ long TpidrEl0 { get; set; }
+ long TpidrroEl0 { get; set; }
+
+ uint Pstate { get; set; }
+
+ uint Fpcr { get; set; }
+ uint Fpsr { get; set; }
+
+ ulong GetX(int index);
+ void SetX(int index, ulong value);
+
+ V128 GetV(int index);
+ void SetV(int index, V128 value);
+
+ public void Load(IHvExecutionContext context)
+ {
+ Pc = context.Pc;
+ ElrEl1 = context.ElrEl1;
+ EsrEl1 = context.EsrEl1;
+ TpidrEl0 = context.TpidrEl0;
+ TpidrroEl0 = context.TpidrroEl0;
+ Pstate = context.Pstate;
+ Fpcr = context.Fpcr;
+ Fpsr = context.Fpsr;
+
+ for (int i = 0; i < 32; i++)
+ {
+ SetX(i, context.GetX(i));
+ SetV(i, context.GetV(i));
+ }
+ }
+
+ void RequestInterrupt();
+ bool GetAndClearInterruptRequested();
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/ExceptionCallbacks.cs b/src/Ryujinx.Cpu/ExceptionCallbacks.cs
new file mode 100644
index 00000000..d9293302
--- /dev/null
+++ b/src/Ryujinx.Cpu/ExceptionCallbacks.cs
@@ -0,0 +1,64 @@
+namespace Ryujinx.Cpu
+{
+ /// <summary>
+ /// Exception callback without any additional arguments.
+ /// </summary>
+ /// <param name="context">Context for the thread where the exception was triggered</param>
+ public delegate void ExceptionCallbackNoArgs(IExecutionContext context);
+
+ /// <summary>
+ /// Exception callback.
+ /// </summary>
+ /// <param name="context">Context for the thread where the exception was triggered</param>
+ /// <param name="address">Address of the instruction that caused the exception</param>
+ /// <param name="imm">Immediate value of the instruction that caused the exception, or for undefined instruction, the instruction itself</param>
+ public delegate void ExceptionCallback(IExecutionContext context, ulong address, int imm);
+
+ /// <summary>
+ /// Stores handlers for the various CPU exceptions.
+ /// </summary>
+ public readonly struct ExceptionCallbacks
+ {
+ /// <summary>
+ /// Handler for CPU interrupts triggered using <see cref="IExecutionContext.RequestInterrupt"/>.
+ /// </summary>
+ public readonly ExceptionCallbackNoArgs InterruptCallback;
+
+ /// <summary>
+ /// Handler for CPU software interrupts caused by the Arm BRK instruction.
+ /// </summary>
+ public readonly ExceptionCallback BreakCallback;
+
+ /// <summary>
+ /// Handler for CPU software interrupts caused by the Arm SVC instruction.
+ /// </summary>
+ public readonly ExceptionCallback SupervisorCallback;
+
+ /// <summary>
+ /// Handler for CPU software interrupts caused by any undefined Arm instruction.
+ /// </summary>
+ public readonly ExceptionCallback UndefinedCallback;
+
+ /// <summary>
+ /// Creates a new exception callbacks structure.
+ /// </summary>
+ /// <remarks>
+ /// All handlers are optional, and if null, the CPU will just continue executing as if nothing happened.
+ /// </remarks>
+ /// <param name="interruptCallback">Handler for CPU interrupts triggered using <see cref="IExecutionContext.RequestInterrupt"/></param>
+ /// <param name="breakCallback">Handler for CPU software interrupts caused by the Arm BRK instruction</param>
+ /// <param name="supervisorCallback">Handler for CPU software interrupts caused by the Arm SVC instruction</param>
+ /// <param name="undefinedCallback">Handler for CPU software interrupts caused by any undefined Arm instruction</param>
+ public ExceptionCallbacks(
+ ExceptionCallbackNoArgs interruptCallback = null,
+ ExceptionCallback breakCallback = null,
+ ExceptionCallback supervisorCallback = null,
+ ExceptionCallback undefinedCallback = null)
+ {
+ InterruptCallback = interruptCallback;
+ BreakCallback = breakCallback;
+ SupervisorCallback = supervisorCallback;
+ UndefinedCallback = undefinedCallback;
+ }
+ }
+}
diff --git a/src/Ryujinx.Cpu/ICpuContext.cs b/src/Ryujinx.Cpu/ICpuContext.cs
new file mode 100644
index 00000000..80916d1c
--- /dev/null
+++ b/src/Ryujinx.Cpu/ICpuContext.cs
@@ -0,0 +1,61 @@
+namespace Ryujinx.Cpu
+{
+ /// <summary>
+ /// CPU context interface.
+ /// </summary>
+ public interface ICpuContext
+ {
+ /// <summary>
+ /// Creates a new execution context that will store thread CPU register state when executing guest code.
+ /// </summary>
+ /// <param name="exceptionCallbacks">Optional functions to be called when the CPU receives an interrupt</param>
+ /// <returns>Execution context</returns>
+ IExecutionContext CreateExecutionContext(ExceptionCallbacks exceptionCallbacks);
+
+ /// <summary>
+ /// Starts executing code at a specified entry point address.
+ /// </summary>
+ /// <remarks>
+ /// This function only returns when the execution is stopped, by calling <see cref="IExecutionContext.StopRunning"/>.
+ /// </remarks>
+ /// <param name="context">Execution context to be used for this run</param>
+ /// <param name="address">Entry point address</param>
+ void Execute(IExecutionContext context, ulong address);
+
+ /// <summary>
+ /// Invalidates the instruction cache for a given memory region.
+ /// </summary>
+ /// <remarks>
+ /// This should be called if code is modified to make the CPU emulator aware of the modifications,
+ /// otherwise it might run stale code which will lead to errors and crashes.
+ /// Calling this function is not necessary if the code memory was modified by guest code,
+ /// as the expectation is that it will do it on its own using the appropriate cache invalidation instructions,
+ /// except on Arm32 where those instructions can't be used in unprivileged mode.
+ /// </remarks>
+ /// <param name="address">Address of the region to be invalidated</param>
+ /// <param name="size">Size of the region to be invalidated</param>
+ void InvalidateCacheRegion(ulong address, ulong size);
+
+ /// <summary>
+ /// Loads cached code from disk for a given application.
+ /// </summary>
+ /// <remarks>
+ /// If the execution engine is recompiling guest code, this can be used to load cached code from disk.
+ /// </remarks>
+ /// <param name="titleIdText">Title ID of the application in padded hex form</param>
+ /// <param name="displayVersion">Version of the application</param>
+ /// <param name="enabled">True if the cache should be loaded from disk if it exists, false otherwise</param>
+ /// <returns>Disk cache load progress reporter and manager</returns>
+ IDiskCacheLoadState LoadDiskCache(string titleIdText, string displayVersion, bool enabled);
+
+ /// <summary>
+ /// Indicates that code has been loaded into guest memory, and that it might be executed in the future.
+ /// </summary>
+ /// <remarks>
+ /// Some execution engines might use this information to cache recompiled code on disk or to ensure it can be executed.
+ /// </remarks>
+ /// <param name="address">CPU virtual address where the code starts</param>
+ /// <param name="size">Size of the code range in bytes</param>
+ void PrepareCodeRange(ulong address, ulong size);
+ }
+}
diff --git a/src/Ryujinx.Cpu/ICpuEngine.cs b/src/Ryujinx.Cpu/ICpuEngine.cs
new file mode 100644
index 00000000..b53b23a8
--- /dev/null
+++ b/src/Ryujinx.Cpu/ICpuEngine.cs
@@ -0,0 +1,18 @@
+using ARMeilleure.Memory;
+
+namespace Ryujinx.Cpu
+{
+ /// <summary>
+ /// CPU execution engine interface.
+ /// </summary>
+ public interface ICpuEngine
+ {
+ /// <summary>
+ /// Creates a new CPU context that can be used to run code for multiple threads sharing an address space.
+ /// </summary>
+ /// <param name="memoryManager">Memory manager for the address space of the context</param>
+ /// <param name="for64Bit">Indicates if the context will be used to run 64-bit or 32-bit Arm code</param>
+ /// <returns>CPU context</returns>
+ ICpuContext CreateCpuContext(IMemoryManager memoryManager, bool for64Bit);
+ }
+}
diff --git a/src/Ryujinx.Cpu/IDiskCacheState.cs b/src/Ryujinx.Cpu/IDiskCacheState.cs
new file mode 100644
index 00000000..61bbdf92
--- /dev/null
+++ b/src/Ryujinx.Cpu/IDiskCacheState.cs
@@ -0,0 +1,20 @@
+using System;
+
+namespace Ryujinx.Cpu
+{
+ /// <summary>
+ /// Disk cache load state report and management interface.
+ /// </summary>
+ public interface IDiskCacheLoadState
+ {
+ /// <summary>
+ /// Event used to report the cache load progress.
+ /// </summary>
+ event Action<LoadState, int, int> StateChanged;
+
+ /// <summary>
+ /// Cancels the disk cache load process.
+ /// </summary>
+ void Cancel();
+ }
+}
diff --git a/src/Ryujinx.Cpu/IExecutionContext.cs b/src/Ryujinx.Cpu/IExecutionContext.cs
new file mode 100644
index 00000000..c3821080
--- /dev/null
+++ b/src/Ryujinx.Cpu/IExecutionContext.cs
@@ -0,0 +1,112 @@
+using ARMeilleure.State;
+using System;
+
+namespace Ryujinx.Cpu
+{
+ /// <summary>
+ /// CPU register state interface.
+ /// </summary>
+ public interface IExecutionContext : IDisposable
+ {
+ /// <summary>
+ /// Current Program Counter.
+ /// </summary>
+ /// <remarks>
+ /// In some implementations, this value might not be accurate and might not point to the last instruction executed.
+ /// </remarks>
+ ulong Pc { get; }
+
+ /// <summary>
+ /// Thread ID Register (EL0).
+ /// </summary>
+ long TpidrEl0 { get; set; }
+
+ /// <summary>
+ /// Thread ID Register (read-only) (EL0).
+ /// </summary>
+ long TpidrroEl0 { get; set; }
+
+ /// <summary>
+ /// Processor State Register.
+ /// </summary>
+ uint Pstate { get; set; }
+
+ /// <summary>
+ /// Floating-point Control Register.
+ /// </summary>
+ uint Fpcr { get; set; }
+
+ /// <summary>
+ /// Floating-point Status Register.
+ /// </summary>
+ uint Fpsr { get; set; }
+
+ /// <summary>
+ /// Indicates whenever the CPU is running 64-bit (AArch64 mode) or 32-bit (AArch32 mode) code.
+ /// </summary>
+ bool IsAarch32 { get; set; }
+
+ /// <summary>
+ /// Indicates whenever the CPU is still running code.
+ /// </summary>
+ /// <remarks>
+ /// Even if this is false, the guest code might be still exiting.
+ /// One must not assume that the code is no longer running from this property alone.
+ /// </remarks>
+ bool Running { get; }
+
+ /// <summary>
+ /// Gets the value of a general purpose register.
+ /// </summary>
+ /// <remarks>
+ /// The special <paramref name="index"/> of 31 can be used to access the SP (Stack Pointer) register.
+ /// </remarks>
+ /// <param name="index">Index of the register, in the range 0-31 (inclusive)</param>
+ /// <returns>The register value</returns>
+ ulong GetX(int index);
+
+ /// <summary>
+ /// Sets the value of a general purpose register.
+ /// </summary>
+ /// <remarks>
+ /// The special <paramref name="index"/> of 31 can be used to access the SP (Stack Pointer) register.
+ /// </remarks>
+ /// <param name="index">Index of the register, in the range 0-31 (inclusive)</param>
+ /// <param name="value">Value to be set</param>
+ void SetX(int index, ulong value);
+
+ /// <summary>
+ /// Gets the value of a FP/SIMD register.
+ /// </summary>
+ /// <param name="index">Index of the register, in the range 0-31 (inclusive)</param>
+ /// <returns>The register value</returns>
+ V128 GetV(int index);
+
+ /// <summary>
+ /// Sets the value of a FP/SIMD register.
+ /// </summary>
+ /// <param name="index">Index of the register, in the range 0-31 (inclusive)</param>
+ /// <param name="value">Value to be set</param>
+ void SetV(int index, V128 value);
+
+ /// <summary>
+ /// Requests the thread to stop running temporarily and call <see cref="ExceptionCallbacks.InterruptCallback"/>.
+ /// </summary>
+ /// <remarks>
+ /// The thread might not pause immediately.
+ /// One must not assume that guest code is no longer being executed by the thread after calling this function.
+ /// </remarks>
+ void RequestInterrupt();
+
+ /// <summary>
+ /// Requests the thread to stop running guest code and return as soon as possible.
+ /// </summary>
+ /// <remarks>
+ /// The thread might not stop immediately.
+ /// One must not assume that guest code is no longer being executed by the thread after calling this function.
+ /// After a thread has been stopped, it can't be restarted with the same <see cref="IExecutionContext"/>.
+ /// If you only need to pause the thread temporarily, use <see cref="RequestInterrupt"/> instead.
+ /// </remarks>
+ void StopRunning();
+ }
+}
diff --git a/src/Ryujinx.Cpu/ITickSource.cs b/src/Ryujinx.Cpu/ITickSource.cs
new file mode 100644
index 00000000..e65e99e2
--- /dev/null
+++ b/src/Ryujinx.Cpu/ITickSource.cs
@@ -0,0 +1,31 @@
+using ARMeilleure.State;
+using System;
+
+namespace Ryujinx.Cpu
+{
+ /// <summary>
+ /// Tick source interface.
+ /// </summary>
+ public interface ITickSource : ICounter
+ {
+ /// <summary>
+ /// Time elapsed since the counter was created.
+ /// </summary>
+ TimeSpan ElapsedTime { get; }
+
+ /// <summary>
+ /// Time elapsed since the counter was created, in seconds.
+ /// </summary>
+ double ElapsedSeconds { get; }
+
+ /// <summary>
+ /// Stops counting.
+ /// </summary>
+ void Suspend();
+
+ /// <summary>
+ /// Resumes counting after a call to <see cref="Suspend"/>.
+ /// </summary>
+ void Resume();
+ }
+}
diff --git a/src/Ryujinx.Cpu/IVirtualMemoryManagerTracked.cs b/src/Ryujinx.Cpu/IVirtualMemoryManagerTracked.cs
new file mode 100644
index 00000000..92d3c76c
--- /dev/null
+++ b/src/Ryujinx.Cpu/IVirtualMemoryManagerTracked.cs
@@ -0,0 +1,56 @@
+using Ryujinx.Cpu.Tracking;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Tracking;
+using System;
+using System.Collections.Generic;
+
+namespace Ryujinx.Cpu
+{
+ public interface IVirtualMemoryManagerTracked : IVirtualMemoryManager
+ {
+ /// <summary>
+ /// Reads data from CPU mapped memory, with read tracking
+ /// </summary>
+ /// <typeparam name="T">Type of the data being read</typeparam>
+ /// <param name="va">Virtual address of the data in memory</param>
+ /// <returns>The data</returns>
+ T ReadTracked<T>(ulong va) where T : unmanaged;
+
+ /// <summary>
+ /// Writes data to CPU mapped memory, without write tracking.
+ /// </summary>
+ /// <param name="va">Virtual address to write the data into</param>
+ /// <param name="data">Data to be written</param>
+ void WriteUntracked(ulong va, ReadOnlySpan<byte> data);
+
+ /// <summary>
+ /// Obtains a memory tracking handle for the given virtual region. This should be disposed when finished with.
+ /// </summary>
+ /// <param name="address">CPU virtual address of the region</param>
+ /// <param name="size">Size of the region</param>
+ /// <param name="id">Handle ID</param>
+ /// <returns>The memory tracking handle</returns>
+ CpuRegionHandle BeginTracking(ulong address, ulong size, int id);
+
+ /// <summary>
+ /// Obtains a memory tracking handle for the given virtual region, with a specified granularity. This should be disposed when finished with.
+ /// </summary>
+ /// <param name="address">CPU virtual address of the region</param>
+ /// <param name="size">Size of the region</param>
+ /// <param name="handles">Handles to inherit state from or reuse. When none are present, provide null</param>
+ /// <param name="granularity">Desired granularity of write tracking</param>
+ /// <param name="id">Handle ID</param>
+ /// <returns>The memory tracking handle</returns>
+ CpuMultiRegionHandle BeginGranularTracking(ulong address, ulong size, IEnumerable<IRegionHandle> handles, ulong granularity, int id);
+
+ /// <summary>
+ /// Obtains a smart memory tracking handle for the given virtual region, with a specified granularity. This should be disposed when finished with.
+ /// </summary>
+ /// <param name="address">CPU virtual address of the region</param>
+ /// <param name="size">Size of the region</param>
+ /// <param name="granularity">Desired granularity of write tracking</param>
+ /// <param name="id">Handle ID</param>
+ /// <returns>The memory tracking handle</returns>
+ CpuSmartMultiRegionHandle BeginSmartGranularTracking(ulong address, ulong size, ulong granularity, int id);
+ }
+}
diff --git a/src/Ryujinx.Cpu/Jit/JitCpuContext.cs b/src/Ryujinx.Cpu/Jit/JitCpuContext.cs
new file mode 100644
index 00000000..02465a0b
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/JitCpuContext.cs
@@ -0,0 +1,53 @@
+using ARMeilleure.Memory;
+using ARMeilleure.Translation;
+
+namespace Ryujinx.Cpu.Jit
+{
+ class JitCpuContext : ICpuContext
+ {
+ private readonly ITickSource _tickSource;
+ private readonly Translator _translator;
+
+ public JitCpuContext(ITickSource tickSource, IMemoryManager memory, bool for64Bit)
+ {
+ _tickSource = tickSource;
+ _translator = new Translator(new JitMemoryAllocator(), memory, for64Bit);
+ memory.UnmapEvent += UnmapHandler;
+ }
+
+ private void UnmapHandler(ulong address, ulong size)
+ {
+ _translator.InvalidateJitCacheRegion(address, size);
+ }
+
+ /// <inheritdoc/>
+ public IExecutionContext CreateExecutionContext(ExceptionCallbacks exceptionCallbacks)
+ {
+ return new JitExecutionContext(new JitMemoryAllocator(), _tickSource, exceptionCallbacks);
+ }
+
+ /// <inheritdoc/>
+ public void Execute(IExecutionContext context, ulong address)
+ {
+ _translator.Execute(((JitExecutionContext)context).Impl, address);
+ }
+
+ /// <inheritdoc/>
+ public void InvalidateCacheRegion(ulong address, ulong size)
+ {
+ _translator.InvalidateJitCacheRegion(address, size);
+ }
+
+ /// <inheritdoc/>
+ public IDiskCacheLoadState LoadDiskCache(string titleIdText, string displayVersion, bool enabled)
+ {
+ return new JitDiskCacheLoadState(_translator.LoadDiskCache(titleIdText, displayVersion, enabled));
+ }
+
+ /// <inheritdoc/>
+ public void PrepareCodeRange(ulong address, ulong size)
+ {
+ _translator.PrepareCodeRange(address, size);
+ }
+ }
+}
diff --git a/src/Ryujinx.Cpu/Jit/JitDiskCacheLoadState.cs b/src/Ryujinx.Cpu/Jit/JitDiskCacheLoadState.cs
new file mode 100644
index 00000000..7a4b670b
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/JitDiskCacheLoadState.cs
@@ -0,0 +1,38 @@
+using ARMeilleure.Translation.PTC;
+using System;
+
+namespace Ryujinx.Cpu.Jit
+{
+ public class JitDiskCacheLoadState : IDiskCacheLoadState
+ {
+ /// <inheritdoc/>
+ public event Action<LoadState, int, int> StateChanged;
+
+ private readonly IPtcLoadState _loadState;
+
+ public JitDiskCacheLoadState(IPtcLoadState loadState)
+ {
+ loadState.PtcStateChanged += LoadStateChanged;
+ _loadState = loadState;
+ }
+
+ private void LoadStateChanged(PtcLoadingState newState, int current, int total)
+ {
+ LoadState state = newState switch
+ {
+ PtcLoadingState.Start => LoadState.Unloaded,
+ PtcLoadingState.Loading => LoadState.Loading,
+ PtcLoadingState.Loaded => LoadState.Loaded,
+ _ => throw new ArgumentException($"Invalid load state \"{newState}\".")
+ };
+
+ StateChanged?.Invoke(state, current, total);
+ }
+
+ /// <inheritdoc/>
+ public void Cancel()
+ {
+ _loadState.Continue();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/Jit/JitEngine.cs b/src/Ryujinx.Cpu/Jit/JitEngine.cs
new file mode 100644
index 00000000..b158074f
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/JitEngine.cs
@@ -0,0 +1,20 @@
+using ARMeilleure.Memory;
+
+namespace Ryujinx.Cpu.Jit
+{
+ public class JitEngine : ICpuEngine
+ {
+ private readonly ITickSource _tickSource;
+
+ public JitEngine(ITickSource tickSource)
+ {
+ _tickSource = tickSource;
+ }
+
+ /// <inheritdoc/>
+ public ICpuContext CreateCpuContext(IMemoryManager memoryManager, bool for64Bit)
+ {
+ return new JitCpuContext(_tickSource, memoryManager, for64Bit);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/Jit/JitExecutionContext.cs b/src/Ryujinx.Cpu/Jit/JitExecutionContext.cs
new file mode 100644
index 00000000..e1a527b1
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/JitExecutionContext.cs
@@ -0,0 +1,123 @@
+using ARMeilleure.Memory;
+using ARMeilleure.State;
+
+namespace Ryujinx.Cpu.Jit
+{
+ class JitExecutionContext : IExecutionContext
+ {
+ private readonly ExecutionContext _impl;
+ internal ExecutionContext Impl => _impl;
+
+ /// <inheritdoc/>
+ public ulong Pc => _impl.Pc;
+
+ /// <inheritdoc/>
+ public long TpidrEl0
+ {
+ get => _impl.TpidrEl0;
+ set => _impl.TpidrEl0 = value;
+ }
+
+ /// <inheritdoc/>
+ public long TpidrroEl0
+ {
+ get => _impl.TpidrroEl0;
+ set => _impl.TpidrroEl0 = value;
+ }
+
+ /// <inheritdoc/>
+ public uint Pstate
+ {
+ get => _impl.Pstate;
+ set => _impl.Pstate = value;
+ }
+
+ /// <inheritdoc/>
+ public uint Fpcr
+ {
+ get => (uint)_impl.Fpcr;
+ set => _impl.Fpcr = (FPCR)value;
+ }
+
+ /// <inheritdoc/>
+ public uint Fpsr
+ {
+ get => (uint)_impl.Fpsr;
+ set => _impl.Fpsr = (FPSR)value;
+ }
+
+ /// <inheritdoc/>
+ public bool IsAarch32
+ {
+ get => _impl.IsAarch32;
+ set => _impl.IsAarch32 = value;
+ }
+
+ /// <inheritdoc/>
+ public bool Running => _impl.Running;
+
+ private readonly ExceptionCallbacks _exceptionCallbacks;
+
+ public JitExecutionContext(IJitMemoryAllocator allocator, ICounter counter, ExceptionCallbacks exceptionCallbacks)
+ {
+ _impl = new ExecutionContext(
+ allocator,
+ counter,
+ InterruptHandler,
+ BreakHandler,
+ SupervisorCallHandler,
+ UndefinedHandler);
+
+ _exceptionCallbacks = exceptionCallbacks;
+ }
+
+ /// <inheritdoc/>
+ public ulong GetX(int index) => _impl.GetX(index);
+
+ /// <inheritdoc/>
+ public void SetX(int index, ulong value) => _impl.SetX(index, value);
+
+ /// <inheritdoc/>
+ public V128 GetV(int index) => _impl.GetV(index);
+
+ /// <inheritdoc/>
+ public void SetV(int index, V128 value) => _impl.SetV(index, value);
+
+ private void InterruptHandler(ExecutionContext context)
+ {
+ _exceptionCallbacks.InterruptCallback?.Invoke(this);
+ }
+
+ private void BreakHandler(ExecutionContext context, ulong address, int imm)
+ {
+ _exceptionCallbacks.BreakCallback?.Invoke(this, address, imm);
+ }
+
+ private void SupervisorCallHandler(ExecutionContext context, ulong address, int imm)
+ {
+ _exceptionCallbacks.SupervisorCallback?.Invoke(this, address, imm);
+ }
+
+ private void UndefinedHandler(ExecutionContext context, ulong address, int opCode)
+ {
+ _exceptionCallbacks.UndefinedCallback?.Invoke(this, address, opCode);
+ }
+
+ /// <inheritdoc/>
+ public void RequestInterrupt()
+ {
+ _impl.RequestInterrupt();
+ }
+
+ /// <inheritdoc/>
+ public void StopRunning()
+ {
+ _impl.StopRunning();
+ }
+
+ public void Dispose()
+ {
+ _impl.Dispose();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/Jit/JitMemoryAllocator.cs b/src/Ryujinx.Cpu/Jit/JitMemoryAllocator.cs
new file mode 100644
index 00000000..4aa78d06
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/JitMemoryAllocator.cs
@@ -0,0 +1,13 @@
+using ARMeilleure.Memory;
+using Ryujinx.Memory;
+
+namespace Ryujinx.Cpu.Jit
+{
+ public class JitMemoryAllocator : IJitMemoryAllocator
+ {
+ public IJitMemoryBlock Allocate(ulong size) => new JitMemoryBlock(size, MemoryAllocationFlags.None);
+ public IJitMemoryBlock Reserve(ulong size) => new JitMemoryBlock(size, MemoryAllocationFlags.Reserve | MemoryAllocationFlags.Jit);
+
+ public ulong GetPageSize() => MemoryBlock.GetPageSize();
+ }
+}
diff --git a/src/Ryujinx.Cpu/Jit/JitMemoryBlock.cs b/src/Ryujinx.Cpu/Jit/JitMemoryBlock.cs
new file mode 100644
index 00000000..327fb303
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/JitMemoryBlock.cs
@@ -0,0 +1,24 @@
+using ARMeilleure.Memory;
+using Ryujinx.Memory;
+using System;
+
+namespace Ryujinx.Cpu.Jit
+{
+ public class JitMemoryBlock : IJitMemoryBlock
+ {
+ private readonly MemoryBlock _impl;
+
+ public IntPtr Pointer => _impl.Pointer;
+
+ public JitMemoryBlock(ulong size, MemoryAllocationFlags flags)
+ {
+ _impl = new MemoryBlock(size, flags);
+ }
+
+ public bool Commit(ulong offset, ulong size) => _impl.Commit(offset, size);
+ public void MapAsRx(ulong offset, ulong size) => _impl.Reprotect(offset, size, MemoryPermission.ReadAndExecute);
+ public void MapAsRwx(ulong offset, ulong size) => _impl.Reprotect(offset, size, MemoryPermission.ReadWriteExecute);
+
+ public void Dispose() => _impl.Dispose();
+ }
+}
diff --git a/src/Ryujinx.Cpu/Jit/MemoryManager.cs b/src/Ryujinx.Cpu/Jit/MemoryManager.cs
new file mode 100644
index 00000000..8542d53e
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/MemoryManager.cs
@@ -0,0 +1,704 @@
+using ARMeilleure.Memory;
+using Ryujinx.Cpu.Tracking;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Range;
+using Ryujinx.Memory.Tracking;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using System.Threading;
+
+namespace Ryujinx.Cpu.Jit
+{
+ /// <summary>
+ /// Represents a CPU memory manager.
+ /// </summary>
+ public sealed class MemoryManager : MemoryManagerBase, IMemoryManager, IVirtualMemoryManagerTracked, IWritableBlock
+ {
+ public const int PageBits = 12;
+ public const int PageSize = 1 << PageBits;
+ public const int PageMask = PageSize - 1;
+
+ private const int PteSize = 8;
+
+ private const int PointerTagBit = 62;
+
+ private readonly MemoryBlock _backingMemory;
+ private readonly InvalidAccessHandler _invalidAccessHandler;
+
+ /// <inheritdoc/>
+ public bool Supports4KBPages => true;
+
+ /// <summary>
+ /// Address space width in bits.
+ /// </summary>
+ public int AddressSpaceBits { get; }
+
+ private readonly ulong _addressSpaceSize;
+
+ private readonly MemoryBlock _pageTable;
+
+ /// <summary>
+ /// Page table base pointer.
+ /// </summary>
+ public IntPtr PageTablePointer => _pageTable.Pointer;
+
+ public MemoryManagerType Type => MemoryManagerType.SoftwarePageTable;
+
+ public MemoryTracking Tracking { get; }
+
+ public event Action<ulong, ulong> UnmapEvent;
+
+ /// <summary>
+ /// Creates a new instance of the memory manager.
+ /// </summary>
+ /// <param name="backingMemory">Physical backing memory where virtual memory will be mapped to</param>
+ /// <param name="addressSpaceSize">Size of the address space</param>
+ /// <param name="invalidAccessHandler">Optional function to handle invalid memory accesses</param>
+ public MemoryManager(MemoryBlock backingMemory, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler = null)
+ {
+ _backingMemory = backingMemory;
+ _invalidAccessHandler = invalidAccessHandler;
+
+ ulong asSize = PageSize;
+ int asBits = PageBits;
+
+ while (asSize < addressSpaceSize)
+ {
+ asSize <<= 1;
+ asBits++;
+ }
+
+ AddressSpaceBits = asBits;
+ _addressSpaceSize = asSize;
+ _pageTable = new MemoryBlock((asSize / PageSize) * PteSize);
+
+ Tracking = new MemoryTracking(this, PageSize);
+ }
+
+ /// <inheritdoc/>
+ public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ ulong remainingSize = size;
+ ulong oVa = va;
+ ulong oPa = pa;
+ while (remainingSize != 0)
+ {
+ _pageTable.Write((va / PageSize) * PteSize, PaToPte(pa));
+
+ va += PageSize;
+ pa += PageSize;
+ remainingSize -= PageSize;
+ }
+
+ Tracking.Map(oVa, size);
+ }
+
+ /// <inheritdoc/>
+ public void MapForeign(ulong va, nuint hostPointer, ulong size)
+ {
+ throw new NotSupportedException();
+ }
+
+ /// <inheritdoc/>
+ public void Unmap(ulong va, ulong size)
+ {
+ // If size is 0, there's nothing to unmap, just exit early.
+ if (size == 0)
+ {
+ return;
+ }
+
+ AssertValidAddressAndSize(va, size);
+
+ UnmapEvent?.Invoke(va, size);
+ Tracking.Unmap(va, size);
+
+ ulong remainingSize = size;
+ while (remainingSize != 0)
+ {
+ _pageTable.Write((va / PageSize) * PteSize, 0UL);
+
+ va += PageSize;
+ remainingSize -= PageSize;
+ }
+ }
+
+ /// <inheritdoc/>
+ public T Read<T>(ulong va) where T : unmanaged
+ {
+ return MemoryMarshal.Cast<byte, T>(GetSpan(va, Unsafe.SizeOf<T>()))[0];
+ }
+
+ /// <inheritdoc/>
+ public T ReadTracked<T>(ulong va) where T : unmanaged
+ {
+ try
+ {
+ SignalMemoryTracking(va, (ulong)Unsafe.SizeOf<T>(), false);
+
+ return Read<T>(va);
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+
+ return default;
+ }
+ }
+
+ /// <inheritdoc/>
+ public void Read(ulong va, Span<byte> data)
+ {
+ ReadImpl(va, data);
+ }
+
+ /// <inheritdoc/>
+ public void Write<T>(ulong va, T value) where T : unmanaged
+ {
+ Write(va, MemoryMarshal.Cast<T, byte>(MemoryMarshal.CreateSpan(ref value, 1)));
+ }
+
+ /// <inheritdoc/>
+ public void Write(ulong va, ReadOnlySpan<byte> data)
+ {
+ if (data.Length == 0)
+ {
+ return;
+ }
+
+ SignalMemoryTracking(va, (ulong)data.Length, true);
+
+ WriteImpl(va, data);
+ }
+
+ /// <inheritdoc/>
+ public void WriteUntracked(ulong va, ReadOnlySpan<byte> data)
+ {
+ if (data.Length == 0)
+ {
+ return;
+ }
+
+ WriteImpl(va, data);
+ }
+
+ /// <inheritdoc/>
+ public bool WriteWithRedundancyCheck(ulong va, ReadOnlySpan<byte> data)
+ {
+ if (data.Length == 0)
+ {
+ return false;
+ }
+
+ SignalMemoryTracking(va, (ulong)data.Length, false);
+
+ if (IsContiguousAndMapped(va, data.Length))
+ {
+ var target = _backingMemory.GetSpan(GetPhysicalAddressInternal(va), data.Length);
+
+ bool changed = !data.SequenceEqual(target);
+
+ if (changed)
+ {
+ data.CopyTo(target);
+ }
+
+ return changed;
+ }
+ else
+ {
+ WriteImpl(va, data);
+
+ return true;
+ }
+ }
+
+ /// <summary>
+ /// Writes data to CPU mapped memory.
+ /// </summary>
+ /// <param name="va">Virtual address to write the data into</param>
+ /// <param name="data">Data to be written</param>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void WriteImpl(ulong va, ReadOnlySpan<byte> data)
+ {
+ try
+ {
+ AssertValidAddressAndSize(va, (ulong)data.Length);
+
+ if (IsContiguousAndMapped(va, data.Length))
+ {
+ data.CopyTo(_backingMemory.GetSpan(GetPhysicalAddressInternal(va), data.Length));
+ }
+ else
+ {
+ int offset = 0, size;
+
+ if ((va & PageMask) != 0)
+ {
+ ulong pa = GetPhysicalAddressInternal(va);
+
+ size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
+
+ data.Slice(0, size).CopyTo(_backingMemory.GetSpan(pa, size));
+
+ offset += size;
+ }
+
+ for (; offset < data.Length; offset += size)
+ {
+ ulong pa = GetPhysicalAddressInternal(va + (ulong)offset);
+
+ size = Math.Min(data.Length - offset, PageSize);
+
+ data.Slice(offset, size).CopyTo(_backingMemory.GetSpan(pa, size));
+ }
+ }
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+ }
+ }
+
+ /// <inheritdoc/>
+ public ReadOnlySpan<byte> GetSpan(ulong va, int size, bool tracked = false)
+ {
+ if (size == 0)
+ {
+ return ReadOnlySpan<byte>.Empty;
+ }
+
+ if (tracked)
+ {
+ SignalMemoryTracking(va, (ulong)size, false);
+ }
+
+ if (IsContiguousAndMapped(va, size))
+ {
+ return _backingMemory.GetSpan(GetPhysicalAddressInternal(va), size);
+ }
+ else
+ {
+ Span<byte> data = new byte[size];
+
+ ReadImpl(va, data);
+
+ return data;
+ }
+ }
+
+ /// <inheritdoc/>
+ public unsafe WritableRegion GetWritableRegion(ulong va, int size, bool tracked = false)
+ {
+ if (size == 0)
+ {
+ return new WritableRegion(null, va, Memory<byte>.Empty);
+ }
+
+ if (IsContiguousAndMapped(va, size))
+ {
+ if (tracked)
+ {
+ SignalMemoryTracking(va, (ulong)size, true);
+ }
+
+ return new WritableRegion(null, va, _backingMemory.GetMemory(GetPhysicalAddressInternal(va), size));
+ }
+ else
+ {
+ Memory<byte> memory = new byte[size];
+
+ GetSpan(va, size).CopyTo(memory.Span);
+
+ return new WritableRegion(this, va, memory, tracked);
+ }
+ }
+
+ /// <inheritdoc/>
+ public ref T GetRef<T>(ulong va) where T : unmanaged
+ {
+ if (!IsContiguous(va, Unsafe.SizeOf<T>()))
+ {
+ ThrowMemoryNotContiguous();
+ }
+
+ SignalMemoryTracking(va, (ulong)Unsafe.SizeOf<T>(), true);
+
+ return ref _backingMemory.GetRef<T>(GetPhysicalAddressInternal(va));
+ }
+
+ /// <summary>
+ /// Computes the number of pages in a virtual address range.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range</param>
+ /// <param name="startVa">The virtual address of the beginning of the first page</param>
+ /// <remarks>This function does not differentiate between allocated and unallocated pages.</remarks>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private int GetPagesCount(ulong va, uint size, out ulong startVa)
+ {
+ // WARNING: Always check if ulong does not overflow during the operations.
+ startVa = va & ~(ulong)PageMask;
+ ulong vaSpan = (va - startVa + size + PageMask) & ~(ulong)PageMask;
+
+ return (int)(vaSpan / PageSize);
+ }
+
+ private static void ThrowMemoryNotContiguous() => throw new MemoryNotContiguousException();
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private bool IsContiguousAndMapped(ulong va, int size) => IsContiguous(va, size) && IsMapped(va);
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private bool IsContiguous(ulong va, int size)
+ {
+ if (!ValidateAddress(va) || !ValidateAddressAndSize(va, (ulong)size))
+ {
+ return false;
+ }
+
+ int pages = GetPagesCount(va, (uint)size, out va);
+
+ for (int page = 0; page < pages - 1; page++)
+ {
+ if (!ValidateAddress(va + PageSize))
+ {
+ return false;
+ }
+
+ if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize))
+ {
+ return false;
+ }
+
+ va += PageSize;
+ }
+
+ return true;
+ }
+
+ /// <inheritdoc/>
+ public IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size)
+ {
+ if (size == 0)
+ {
+ return Enumerable.Empty<HostMemoryRange>();
+ }
+
+ var guestRegions = GetPhysicalRegionsImpl(va, size);
+ if (guestRegions == null)
+ {
+ return null;
+ }
+
+ var regions = new HostMemoryRange[guestRegions.Count];
+
+ for (int i = 0; i < regions.Length; i++)
+ {
+ var guestRegion = guestRegions[i];
+ IntPtr pointer = _backingMemory.GetPointer(guestRegion.Address, guestRegion.Size);
+ regions[i] = new HostMemoryRange((nuint)(ulong)pointer, guestRegion.Size);
+ }
+
+ return regions;
+ }
+
+ /// <inheritdoc/>
+ public IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size)
+ {
+ if (size == 0)
+ {
+ return Enumerable.Empty<MemoryRange>();
+ }
+
+ return GetPhysicalRegionsImpl(va, size);
+ }
+
+ private List<MemoryRange> GetPhysicalRegionsImpl(ulong va, ulong size)
+ {
+ if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size))
+ {
+ return null;
+ }
+
+ int pages = GetPagesCount(va, (uint)size, out va);
+
+ var regions = new List<MemoryRange>();
+
+ ulong regionStart = GetPhysicalAddressInternal(va);
+ ulong regionSize = PageSize;
+
+ for (int page = 0; page < pages - 1; page++)
+ {
+ if (!ValidateAddress(va + PageSize))
+ {
+ return null;
+ }
+
+ ulong newPa = GetPhysicalAddressInternal(va + PageSize);
+
+ if (GetPhysicalAddressInternal(va) + PageSize != newPa)
+ {
+ regions.Add(new MemoryRange(regionStart, regionSize));
+ regionStart = newPa;
+ regionSize = 0;
+ }
+
+ va += PageSize;
+ regionSize += PageSize;
+ }
+
+ regions.Add(new MemoryRange(regionStart, regionSize));
+
+ return regions;
+ }
+
+ private void ReadImpl(ulong va, Span<byte> data)
+ {
+ if (data.Length == 0)
+ {
+ return;
+ }
+
+ try
+ {
+ AssertValidAddressAndSize(va, (ulong)data.Length);
+
+ int offset = 0, size;
+
+ if ((va & PageMask) != 0)
+ {
+ ulong pa = GetPhysicalAddressInternal(va);
+
+ size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
+
+ _backingMemory.GetSpan(pa, size).CopyTo(data.Slice(0, size));
+
+ offset += size;
+ }
+
+ for (; offset < data.Length; offset += size)
+ {
+ ulong pa = GetPhysicalAddressInternal(va + (ulong)offset);
+
+ size = Math.Min(data.Length - offset, PageSize);
+
+ _backingMemory.GetSpan(pa, size).CopyTo(data.Slice(offset, size));
+ }
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+ }
+ }
+
+ /// <inheritdoc/>
+ public bool IsRangeMapped(ulong va, ulong size)
+ {
+ if (size == 0UL)
+ {
+ return true;
+ }
+
+ if (!ValidateAddressAndSize(va, size))
+ {
+ return false;
+ }
+
+ int pages = GetPagesCount(va, (uint)size, out va);
+
+ for (int page = 0; page < pages; page++)
+ {
+ if (!IsMapped(va))
+ {
+ return false;
+ }
+
+ va += PageSize;
+ }
+
+ return true;
+ }
+
+ /// <inheritdoc/>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public bool IsMapped(ulong va)
+ {
+ if (!ValidateAddress(va))
+ {
+ return false;
+ }
+
+ return _pageTable.Read<ulong>((va / PageSize) * PteSize) != 0;
+ }
+
+ private bool ValidateAddress(ulong va)
+ {
+ return va < _addressSpaceSize;
+ }
+
+ /// <summary>
+ /// Checks if the combination of virtual address and size is part of the addressable space.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range in bytes</param>
+ /// <returns>True if the combination of virtual address and size is part of the addressable space</returns>
+ private bool ValidateAddressAndSize(ulong va, ulong size)
+ {
+ ulong endVa = va + size;
+ return endVa >= va && endVa >= size && endVa <= _addressSpaceSize;
+ }
+
+ /// <summary>
+ /// Ensures the combination of virtual address and size is part of the addressable space.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range in bytes</param>
+ /// <exception cref="InvalidMemoryRegionException">Throw when the memory region specified outside the addressable space</exception>
+ private void AssertValidAddressAndSize(ulong va, ulong size)
+ {
+ if (!ValidateAddressAndSize(va, size))
+ {
+ throw new InvalidMemoryRegionException($"va=0x{va:X16}, size=0x{size:X16}");
+ }
+ }
+
+ private ulong GetPhysicalAddress(ulong va)
+ {
+ // We return -1L if the virtual address is invalid or unmapped.
+ if (!ValidateAddress(va) || !IsMapped(va))
+ {
+ return ulong.MaxValue;
+ }
+
+ return GetPhysicalAddressInternal(va);
+ }
+
+ private ulong GetPhysicalAddressInternal(ulong va)
+ {
+ return PteToPa(_pageTable.Read<ulong>((va / PageSize) * PteSize) & ~(0xffffUL << 48)) + (va & PageMask);
+ }
+
+ /// <inheritdoc/>
+ public void TrackingReprotect(ulong va, ulong size, MemoryPermission protection)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ // Protection is inverted on software pages, since the default value is 0.
+ protection = (~protection) & MemoryPermission.ReadAndWrite;
+
+ long tag = protection switch
+ {
+ MemoryPermission.None => 0L,
+ MemoryPermission.Write => 2L << PointerTagBit,
+ _ => 3L << PointerTagBit
+ };
+
+ int pages = GetPagesCount(va, (uint)size, out va);
+ ulong pageStart = va >> PageBits;
+ long invTagMask = ~(0xffffL << 48);
+
+ for (int page = 0; page < pages; page++)
+ {
+ ref long pageRef = ref _pageTable.GetRef<long>(pageStart * PteSize);
+
+ long pte;
+
+ do
+ {
+ pte = Volatile.Read(ref pageRef);
+ }
+ while (pte != 0 && Interlocked.CompareExchange(ref pageRef, (pte & invTagMask) | tag, pte) != pte);
+
+ pageStart++;
+ }
+ }
+
+ /// <inheritdoc/>
+ public CpuRegionHandle BeginTracking(ulong address, ulong size, int id)
+ {
+ return new CpuRegionHandle(Tracking.BeginTracking(address, size, id));
+ }
+
+ /// <inheritdoc/>
+ public CpuMultiRegionHandle BeginGranularTracking(ulong address, ulong size, IEnumerable<IRegionHandle> handles, ulong granularity, int id)
+ {
+ return new CpuMultiRegionHandle(Tracking.BeginGranularTracking(address, size, handles, granularity, id));
+ }
+
+ /// <inheritdoc/>
+ public CpuSmartMultiRegionHandle BeginSmartGranularTracking(ulong address, ulong size, ulong granularity, int id)
+ {
+ return new CpuSmartMultiRegionHandle(Tracking.BeginSmartGranularTracking(address, size, granularity, id));
+ }
+
+ /// <inheritdoc/>
+ public void SignalMemoryTracking(ulong va, ulong size, bool write, bool precise = false, int? exemptId = null)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ if (precise)
+ {
+ Tracking.VirtualMemoryEvent(va, size, write, precise: true, exemptId);
+ return;
+ }
+
+ // We emulate guard pages for software memory access. This makes for an easy transition to
+ // tracking using host guard pages in future, but also supporting platforms where this is not possible.
+
+ // Write tag includes read protection, since we don't have any read actions that aren't performed before write too.
+ long tag = (write ? 3L : 1L) << PointerTagBit;
+
+ int pages = GetPagesCount(va, (uint)size, out _);
+ ulong pageStart = va >> PageBits;
+
+ for (int page = 0; page < pages; page++)
+ {
+ ref long pageRef = ref _pageTable.GetRef<long>(pageStart * PteSize);
+
+ long pte;
+
+ pte = Volatile.Read(ref pageRef);
+
+ if ((pte & tag) != 0)
+ {
+ Tracking.VirtualMemoryEvent(va, size, write, precise: false, exemptId);
+ break;
+ }
+
+ pageStart++;
+ }
+ }
+
+ private ulong PaToPte(ulong pa)
+ {
+ return (ulong)_backingMemory.GetPointer(pa, PageSize);
+ }
+
+ private ulong PteToPa(ulong pte)
+ {
+ return (ulong)((long)pte - _backingMemory.Pointer.ToInt64());
+ }
+
+ /// <summary>
+ /// Disposes of resources used by the memory manager.
+ /// </summary>
+ protected override void Destroy() => _pageTable.Dispose();
+
+ private void ThrowInvalidMemoryRegionException(string message) => throw new InvalidMemoryRegionException(message);
+ }
+}
diff --git a/src/Ryujinx.Cpu/Jit/MemoryManagerHostMapped.cs b/src/Ryujinx.Cpu/Jit/MemoryManagerHostMapped.cs
new file mode 100644
index 00000000..090740ab
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/MemoryManagerHostMapped.cs
@@ -0,0 +1,817 @@
+using ARMeilleure.Memory;
+using Ryujinx.Cpu.Tracking;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Range;
+using Ryujinx.Memory.Tracking;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.CompilerServices;
+using System.Threading;
+
+namespace Ryujinx.Cpu.Jit
+{
+ /// <summary>
+ /// Represents a CPU memory manager which maps guest virtual memory directly onto a host virtual region.
+ /// </summary>
+ public sealed class MemoryManagerHostMapped : MemoryManagerBase, IMemoryManager, IVirtualMemoryManagerTracked, IWritableBlock
+ {
+ public const int PageBits = 12;
+ public const int PageSize = 1 << PageBits;
+ public const int PageMask = PageSize - 1;
+
+ public const int PageToPteShift = 5; // 32 pages (2 bits each) in one ulong page table entry.
+ public const ulong BlockMappedMask = 0x5555555555555555; // First bit of each table entry set.
+
+ private enum HostMappedPtBits : ulong
+ {
+ Unmapped = 0,
+ Mapped,
+ WriteTracked,
+ ReadWriteTracked,
+
+ MappedReplicated = 0x5555555555555555,
+ WriteTrackedReplicated = 0xaaaaaaaaaaaaaaaa,
+ ReadWriteTrackedReplicated = ulong.MaxValue
+ }
+
+ private readonly InvalidAccessHandler _invalidAccessHandler;
+ private readonly bool _unsafeMode;
+
+ private readonly AddressSpace _addressSpace;
+ private readonly ulong _addressSpaceSize;
+
+ private readonly PageTable<ulong> _pageTable;
+
+ private readonly MemoryEhMeilleure _memoryEh;
+
+ private readonly ulong[] _pageBitmap;
+
+ /// <inheritdoc/>
+ public bool Supports4KBPages => MemoryBlock.GetPageSize() == PageSize;
+
+ public int AddressSpaceBits { get; }
+
+ public IntPtr PageTablePointer => _addressSpace.Base.Pointer;
+
+ public MemoryManagerType Type => _unsafeMode ? MemoryManagerType.HostMappedUnsafe : MemoryManagerType.HostMapped;
+
+ public MemoryTracking Tracking { get; }
+
+ public event Action<ulong, ulong> UnmapEvent;
+
+ /// <summary>
+ /// Creates a new instance of the host mapped memory manager.
+ /// </summary>
+ /// <param name="backingMemory">Physical backing memory where virtual memory will be mapped to</param>
+ /// <param name="addressSpaceSize">Size of the address space</param>
+ /// <param name="unsafeMode">True if unmanaged access should not be masked (unsafe), false otherwise.</param>
+ /// <param name="invalidAccessHandler">Optional function to handle invalid memory accesses</param>
+ public MemoryManagerHostMapped(MemoryBlock backingMemory, ulong addressSpaceSize, bool unsafeMode, InvalidAccessHandler invalidAccessHandler = null)
+ {
+ _pageTable = new PageTable<ulong>();
+ _invalidAccessHandler = invalidAccessHandler;
+ _unsafeMode = unsafeMode;
+ _addressSpaceSize = addressSpaceSize;
+
+ ulong asSize = PageSize;
+ int asBits = PageBits;
+
+ while (asSize < addressSpaceSize)
+ {
+ asSize <<= 1;
+ asBits++;
+ }
+
+ AddressSpaceBits = asBits;
+
+ _pageBitmap = new ulong[1 << (AddressSpaceBits - (PageBits + PageToPteShift))];
+
+ _addressSpace = new AddressSpace(backingMemory, asSize, Supports4KBPages);
+
+ Tracking = new MemoryTracking(this, (int)MemoryBlock.GetPageSize(), invalidAccessHandler);
+ _memoryEh = new MemoryEhMeilleure(_addressSpace.Base, _addressSpace.Mirror, Tracking);
+ }
+
+ /// <summary>
+ /// Checks if the virtual address is part of the addressable space.
+ /// </summary>
+ /// <param name="va">Virtual address</param>
+ /// <returns>True if the virtual address is part of the addressable space</returns>
+ private bool ValidateAddress(ulong va)
+ {
+ return va < _addressSpaceSize;
+ }
+
+ /// <summary>
+ /// Checks if the combination of virtual address and size is part of the addressable space.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range in bytes</param>
+ /// <returns>True if the combination of virtual address and size is part of the addressable space</returns>
+ private bool ValidateAddressAndSize(ulong va, ulong size)
+ {
+ ulong endVa = va + size;
+ return endVa >= va && endVa >= size && endVa <= _addressSpaceSize;
+ }
+
+ /// <summary>
+ /// Ensures the combination of virtual address and size is part of the addressable space.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range in bytes</param>
+ /// <exception cref="InvalidMemoryRegionException">Throw when the memory region specified outside the addressable space</exception>
+ private void AssertValidAddressAndSize(ulong va, ulong size)
+ {
+ if (!ValidateAddressAndSize(va, size))
+ {
+ throw new InvalidMemoryRegionException($"va=0x{va:X16}, size=0x{size:X16}");
+ }
+ }
+
+ /// <summary>
+ /// Ensures the combination of virtual address and size is part of the addressable space and fully mapped.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range in bytes</param>
+ private void AssertMapped(ulong va, ulong size)
+ {
+ if (!ValidateAddressAndSize(va, size) || !IsRangeMappedImpl(va, size))
+ {
+ throw new InvalidMemoryRegionException($"Not mapped: va=0x{va:X16}, size=0x{size:X16}");
+ }
+ }
+
+ /// <inheritdoc/>
+ public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ _addressSpace.Map(va, pa, size, flags);
+ AddMapping(va, size);
+ PtMap(va, pa, size);
+
+ Tracking.Map(va, size);
+ }
+
+ /// <inheritdoc/>
+ public void MapForeign(ulong va, nuint hostPointer, ulong size)
+ {
+ throw new NotSupportedException();
+ }
+
+ /// <inheritdoc/>
+ public void Unmap(ulong va, ulong size)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ UnmapEvent?.Invoke(va, size);
+ Tracking.Unmap(va, size);
+
+ RemoveMapping(va, size);
+ PtUnmap(va, size);
+ _addressSpace.Unmap(va, size);
+ }
+
+ private void PtMap(ulong va, ulong pa, ulong size)
+ {
+ while (size != 0)
+ {
+ _pageTable.Map(va, pa);
+
+ va += PageSize;
+ pa += PageSize;
+ size -= PageSize;
+ }
+ }
+
+ private void PtUnmap(ulong va, ulong size)
+ {
+ while (size != 0)
+ {
+ _pageTable.Unmap(va);
+
+ va += PageSize;
+ size -= PageSize;
+ }
+ }
+
+ /// <inheritdoc/>
+ public T Read<T>(ulong va) where T : unmanaged
+ {
+ try
+ {
+ AssertMapped(va, (ulong)Unsafe.SizeOf<T>());
+
+ return _addressSpace.Mirror.Read<T>(va);
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+
+ return default;
+ }
+ }
+
+ /// <inheritdoc/>
+ public T ReadTracked<T>(ulong va) where T : unmanaged
+ {
+ try
+ {
+ SignalMemoryTracking(va, (ulong)Unsafe.SizeOf<T>(), false);
+
+ return Read<T>(va);
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+
+ return default;
+ }
+ }
+
+ /// <inheritdoc/>
+ public void Read(ulong va, Span<byte> data)
+ {
+ try
+ {
+ AssertMapped(va, (ulong)data.Length);
+
+ _addressSpace.Mirror.Read(va, data);
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+ }
+ }
+
+
+ /// <inheritdoc/>
+ public void Write<T>(ulong va, T value) where T : unmanaged
+ {
+ try
+ {
+ SignalMemoryTracking(va, (ulong)Unsafe.SizeOf<T>(), write: true);
+
+ _addressSpace.Mirror.Write(va, value);
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+ }
+ }
+
+ /// <inheritdoc/>
+ public void Write(ulong va, ReadOnlySpan<byte> data)
+ {
+ try
+ {
+ SignalMemoryTracking(va, (ulong)data.Length, write: true);
+
+ _addressSpace.Mirror.Write(va, data);
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+ }
+ }
+
+ /// <inheritdoc/>
+ public void WriteUntracked(ulong va, ReadOnlySpan<byte> data)
+ {
+ try
+ {
+ AssertMapped(va, (ulong)data.Length);
+
+ _addressSpace.Mirror.Write(va, data);
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+ }
+ }
+
+ /// <inheritdoc/>
+ public bool WriteWithRedundancyCheck(ulong va, ReadOnlySpan<byte> data)
+ {
+ try
+ {
+ SignalMemoryTracking(va, (ulong)data.Length, false);
+
+ Span<byte> target = _addressSpace.Mirror.GetSpan(va, data.Length);
+ bool changed = !data.SequenceEqual(target);
+
+ if (changed)
+ {
+ data.CopyTo(target);
+ }
+
+ return changed;
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+
+ return true;
+ }
+ }
+
+ /// <inheritdoc/>
+ public ReadOnlySpan<byte> GetSpan(ulong va, int size, bool tracked = false)
+ {
+ if (tracked)
+ {
+ SignalMemoryTracking(va, (ulong)size, write: false);
+ }
+ else
+ {
+ AssertMapped(va, (ulong)size);
+ }
+
+ return _addressSpace.Mirror.GetSpan(va, size);
+ }
+
+ /// <inheritdoc/>
+ public WritableRegion GetWritableRegion(ulong va, int size, bool tracked = false)
+ {
+ if (tracked)
+ {
+ SignalMemoryTracking(va, (ulong)size, true);
+ }
+ else
+ {
+ AssertMapped(va, (ulong)size);
+ }
+
+ return _addressSpace.Mirror.GetWritableRegion(va, size);
+ }
+
+ /// <inheritdoc/>
+ public ref T GetRef<T>(ulong va) where T : unmanaged
+ {
+ SignalMemoryTracking(va, (ulong)Unsafe.SizeOf<T>(), true);
+
+ return ref _addressSpace.Mirror.GetRef<T>(va);
+ }
+
+ /// <inheritdoc/>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public bool IsMapped(ulong va)
+ {
+ return ValidateAddress(va) && IsMappedImpl(va);
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private bool IsMappedImpl(ulong va)
+ {
+ ulong page = va >> PageBits;
+
+ int bit = (int)((page & 31) << 1);
+
+ int pageIndex = (int)(page >> PageToPteShift);
+ ref ulong pageRef = ref _pageBitmap[pageIndex];
+
+ ulong pte = Volatile.Read(ref pageRef);
+
+ return ((pte >> bit) & 3) != 0;
+ }
+
+ /// <inheritdoc/>
+ public bool IsRangeMapped(ulong va, ulong size)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ return IsRangeMappedImpl(va, size);
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void GetPageBlockRange(ulong pageStart, ulong pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex)
+ {
+ startMask = ulong.MaxValue << ((int)(pageStart & 31) << 1);
+ endMask = ulong.MaxValue >> (64 - ((int)(pageEnd & 31) << 1));
+
+ pageIndex = (int)(pageStart >> PageToPteShift);
+ pageEndIndex = (int)((pageEnd - 1) >> PageToPteShift);
+ }
+
+ private bool IsRangeMappedImpl(ulong va, ulong size)
+ {
+ int pages = GetPagesCount(va, size, out _);
+
+ if (pages == 1)
+ {
+ return IsMappedImpl(va);
+ }
+
+ ulong pageStart = va >> PageBits;
+ ulong pageEnd = pageStart + (ulong)pages;
+
+ GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
+
+ // Check if either bit in each 2 bit page entry is set.
+ // OR the block with itself shifted down by 1, and check the first bit of each entry.
+
+ ulong mask = BlockMappedMask & startMask;
+
+ while (pageIndex <= pageEndIndex)
+ {
+ if (pageIndex == pageEndIndex)
+ {
+ mask &= endMask;
+ }
+
+ ref ulong pageRef = ref _pageBitmap[pageIndex++];
+ ulong pte = Volatile.Read(ref pageRef);
+
+ pte |= pte >> 1;
+ if ((pte & mask) != mask)
+ {
+ return false;
+ }
+
+ mask = BlockMappedMask;
+ }
+
+ return true;
+ }
+
+ /// <inheritdoc/>
+ public IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ return Enumerable.Repeat(new HostMemoryRange((nuint)(ulong)_addressSpace.Mirror.GetPointer(va, size), size), 1);
+ }
+
+ /// <inheritdoc/>
+ public IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size)
+ {
+ int pages = GetPagesCount(va, (uint)size, out va);
+
+ var regions = new List<MemoryRange>();
+
+ ulong regionStart = GetPhysicalAddressChecked(va);
+ ulong regionSize = PageSize;
+
+ for (int page = 0; page < pages - 1; page++)
+ {
+ if (!ValidateAddress(va + PageSize))
+ {
+ return null;
+ }
+
+ ulong newPa = GetPhysicalAddressChecked(va + PageSize);
+
+ if (GetPhysicalAddressChecked(va) + PageSize != newPa)
+ {
+ regions.Add(new MemoryRange(regionStart, regionSize));
+ regionStart = newPa;
+ regionSize = 0;
+ }
+
+ va += PageSize;
+ regionSize += PageSize;
+ }
+
+ regions.Add(new MemoryRange(regionStart, regionSize));
+
+ return regions;
+ }
+
+ private ulong GetPhysicalAddressChecked(ulong va)
+ {
+ if (!IsMapped(va))
+ {
+ ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}");
+ }
+
+ return GetPhysicalAddressInternal(va);
+ }
+
+ private ulong GetPhysicalAddressInternal(ulong va)
+ {
+ return _pageTable.Read(va) + (va & PageMask);
+ }
+
+ /// <inheritdoc/>
+ /// <remarks>
+ /// This function also validates that the given range is both valid and mapped, and will throw if it is not.
+ /// </remarks>
+ public void SignalMemoryTracking(ulong va, ulong size, bool write, bool precise = false, int? exemptId = null)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ if (precise)
+ {
+ Tracking.VirtualMemoryEvent(va, size, write, precise: true, exemptId);
+ return;
+ }
+
+ // Software table, used for managed memory tracking.
+
+ int pages = GetPagesCount(va, size, out _);
+ ulong pageStart = va >> PageBits;
+
+ if (pages == 1)
+ {
+ ulong tag = (ulong)(write ? HostMappedPtBits.WriteTracked : HostMappedPtBits.ReadWriteTracked);
+
+ int bit = (int)((pageStart & 31) << 1);
+
+ int pageIndex = (int)(pageStart >> PageToPteShift);
+ ref ulong pageRef = ref _pageBitmap[pageIndex];
+
+ ulong pte = Volatile.Read(ref pageRef);
+ ulong state = ((pte >> bit) & 3);
+
+ if (state >= tag)
+ {
+ Tracking.VirtualMemoryEvent(va, size, write, precise: false, exemptId);
+ return;
+ }
+ else if (state == 0)
+ {
+ ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}, size=0x{size:X16}");
+ }
+ }
+ else
+ {
+ ulong pageEnd = pageStart + (ulong)pages;
+
+ GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
+
+ ulong mask = startMask;
+
+ ulong anyTrackingTag = (ulong)HostMappedPtBits.WriteTrackedReplicated;
+
+ while (pageIndex <= pageEndIndex)
+ {
+ if (pageIndex == pageEndIndex)
+ {
+ mask &= endMask;
+ }
+
+ ref ulong pageRef = ref _pageBitmap[pageIndex++];
+
+ ulong pte = Volatile.Read(ref pageRef);
+ ulong mappedMask = mask & BlockMappedMask;
+
+ ulong mappedPte = pte | (pte >> 1);
+ if ((mappedPte & mappedMask) != mappedMask)
+ {
+ ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}, size=0x{size:X16}");
+ }
+
+ pte &= mask;
+ if ((pte & anyTrackingTag) != 0) // Search for any tracking.
+ {
+ // Writes trigger any tracking.
+ // Only trigger tracking from reads if both bits are set on any page.
+ if (write || (pte & (pte >> 1) & BlockMappedMask) != 0)
+ {
+ Tracking.VirtualMemoryEvent(va, size, write, precise: false, exemptId);
+ break;
+ }
+ }
+
+ mask = ulong.MaxValue;
+ }
+ }
+ }
+
+ /// <summary>
+ /// Computes the number of pages in a virtual address range.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range</param>
+ /// <param name="startVa">The virtual address of the beginning of the first page</param>
+ /// <remarks>This function does not differentiate between allocated and unallocated pages.</remarks>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private int GetPagesCount(ulong va, ulong size, out ulong startVa)
+ {
+ // WARNING: Always check if ulong does not overflow during the operations.
+ startVa = va & ~(ulong)PageMask;
+ ulong vaSpan = (va - startVa + size + PageMask) & ~(ulong)PageMask;
+
+ return (int)(vaSpan / PageSize);
+ }
+
+ /// <inheritdoc/>
+ public void TrackingReprotect(ulong va, ulong size, MemoryPermission protection)
+ {
+ // Protection is inverted on software pages, since the default value is 0.
+ protection = (~protection) & MemoryPermission.ReadAndWrite;
+
+ int pages = GetPagesCount(va, size, out va);
+ ulong pageStart = va >> PageBits;
+
+ if (pages == 1)
+ {
+ ulong protTag = protection switch
+ {
+ MemoryPermission.None => (ulong)HostMappedPtBits.Mapped,
+ MemoryPermission.Write => (ulong)HostMappedPtBits.WriteTracked,
+ _ => (ulong)HostMappedPtBits.ReadWriteTracked,
+ };
+
+ int bit = (int)((pageStart & 31) << 1);
+
+ ulong tagMask = 3UL << bit;
+ ulong invTagMask = ~tagMask;
+
+ ulong tag = protTag << bit;
+
+ int pageIndex = (int)(pageStart >> PageToPteShift);
+ ref ulong pageRef = ref _pageBitmap[pageIndex];
+
+ ulong pte;
+
+ do
+ {
+ pte = Volatile.Read(ref pageRef);
+ }
+ while ((pte & tagMask) != 0 && Interlocked.CompareExchange(ref pageRef, (pte & invTagMask) | tag, pte) != pte);
+ }
+ else
+ {
+ ulong pageEnd = pageStart + (ulong)pages;
+
+ GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
+
+ ulong mask = startMask;
+
+ ulong protTag = protection switch
+ {
+ MemoryPermission.None => (ulong)HostMappedPtBits.MappedReplicated,
+ MemoryPermission.Write => (ulong)HostMappedPtBits.WriteTrackedReplicated,
+ _ => (ulong)HostMappedPtBits.ReadWriteTrackedReplicated,
+ };
+
+ while (pageIndex <= pageEndIndex)
+ {
+ if (pageIndex == pageEndIndex)
+ {
+ mask &= endMask;
+ }
+
+ ref ulong pageRef = ref _pageBitmap[pageIndex++];
+
+ ulong pte;
+ ulong mappedMask;
+
+ // Change the protection of all 2 bit entries that are mapped.
+ do
+ {
+ pte = Volatile.Read(ref pageRef);
+
+ mappedMask = pte | (pte >> 1);
+ mappedMask |= (mappedMask & BlockMappedMask) << 1;
+ mappedMask &= mask; // Only update mapped pages within the given range.
+ }
+ while (Interlocked.CompareExchange(ref pageRef, (pte & (~mappedMask)) | (protTag & mappedMask), pte) != pte);
+
+ mask = ulong.MaxValue;
+ }
+ }
+
+ protection = protection switch
+ {
+ MemoryPermission.None => MemoryPermission.ReadAndWrite,
+ MemoryPermission.Write => MemoryPermission.Read,
+ _ => MemoryPermission.None
+ };
+
+ _addressSpace.Base.Reprotect(va, size, protection, false);
+ }
+
+ /// <inheritdoc/>
+ public CpuRegionHandle BeginTracking(ulong address, ulong size, int id)
+ {
+ return new CpuRegionHandle(Tracking.BeginTracking(address, size, id));
+ }
+
+ /// <inheritdoc/>
+ public CpuMultiRegionHandle BeginGranularTracking(ulong address, ulong size, IEnumerable<IRegionHandle> handles, ulong granularity, int id)
+ {
+ return new CpuMultiRegionHandle(Tracking.BeginGranularTracking(address, size, handles, granularity, id));
+ }
+
+ /// <inheritdoc/>
+ public CpuSmartMultiRegionHandle BeginSmartGranularTracking(ulong address, ulong size, ulong granularity, int id)
+ {
+ return new CpuSmartMultiRegionHandle(Tracking.BeginSmartGranularTracking(address, size, granularity, id));
+ }
+
+ /// <summary>
+ /// Adds the given address mapping to the page table.
+ /// </summary>
+ /// <param name="va">Virtual memory address</param>
+ /// <param name="size">Size to be mapped</param>
+ private void AddMapping(ulong va, ulong size)
+ {
+ int pages = GetPagesCount(va, size, out _);
+ ulong pageStart = va >> PageBits;
+ ulong pageEnd = pageStart + (ulong)pages;
+
+ GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
+
+ ulong mask = startMask;
+
+ while (pageIndex <= pageEndIndex)
+ {
+ if (pageIndex == pageEndIndex)
+ {
+ mask &= endMask;
+ }
+
+ ref ulong pageRef = ref _pageBitmap[pageIndex++];
+
+ ulong pte;
+ ulong mappedMask;
+
+ // Map all 2-bit entries that are unmapped.
+ do
+ {
+ pte = Volatile.Read(ref pageRef);
+
+ mappedMask = pte | (pte >> 1);
+ mappedMask |= (mappedMask & BlockMappedMask) << 1;
+ mappedMask |= ~mask; // Treat everything outside the range as mapped, thus unchanged.
+ }
+ while (Interlocked.CompareExchange(ref pageRef, (pte & mappedMask) | (BlockMappedMask & (~mappedMask)), pte) != pte);
+
+ mask = ulong.MaxValue;
+ }
+ }
+
+ /// <summary>
+ /// Removes the given address mapping from the page table.
+ /// </summary>
+ /// <param name="va">Virtual memory address</param>
+ /// <param name="size">Size to be unmapped</param>
+ private void RemoveMapping(ulong va, ulong size)
+ {
+ int pages = GetPagesCount(va, size, out _);
+ ulong pageStart = va >> PageBits;
+ ulong pageEnd = pageStart + (ulong)pages;
+
+ GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
+
+ startMask = ~startMask;
+ endMask = ~endMask;
+
+ ulong mask = startMask;
+
+ while (pageIndex <= pageEndIndex)
+ {
+ if (pageIndex == pageEndIndex)
+ {
+ mask |= endMask;
+ }
+
+ ref ulong pageRef = ref _pageBitmap[pageIndex++];
+ ulong pte;
+
+ do
+ {
+ pte = Volatile.Read(ref pageRef);
+ }
+ while (Interlocked.CompareExchange(ref pageRef, pte & mask, pte) != pte);
+
+ mask = 0;
+ }
+ }
+
+ /// <summary>
+ /// Disposes of resources used by the memory manager.
+ /// </summary>
+ protected override void Destroy()
+ {
+ _addressSpace.Dispose();
+ _memoryEh.Dispose();
+ }
+
+ private static void ThrowInvalidMemoryRegionException(string message) => throw new InvalidMemoryRegionException(message);
+ }
+}
diff --git a/src/Ryujinx.Cpu/LoadState.cs b/src/Ryujinx.Cpu/LoadState.cs
new file mode 100644
index 00000000..1f2e1ae8
--- /dev/null
+++ b/src/Ryujinx.Cpu/LoadState.cs
@@ -0,0 +1,12 @@
+namespace Ryujinx.Cpu
+{
+ /// <summary>
+ /// Load state.
+ /// </summary>
+ public enum LoadState
+ {
+ Unloaded,
+ Loading,
+ Loaded
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/MemoryEhMeilleure.cs b/src/Ryujinx.Cpu/MemoryEhMeilleure.cs
new file mode 100644
index 00000000..0b434ea7
--- /dev/null
+++ b/src/Ryujinx.Cpu/MemoryEhMeilleure.cs
@@ -0,0 +1,62 @@
+using ARMeilleure.Signal;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Tracking;
+using System;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Cpu
+{
+ public class MemoryEhMeilleure : IDisposable
+ {
+ private delegate bool TrackingEventDelegate(ulong address, ulong size, bool write);
+
+ private readonly MemoryTracking _tracking;
+ private readonly TrackingEventDelegate _trackingEvent;
+
+ private readonly ulong _baseAddress;
+ private readonly ulong _mirrorAddress;
+
+ public MemoryEhMeilleure(MemoryBlock addressSpace, MemoryBlock addressSpaceMirror, MemoryTracking tracking)
+ {
+ _tracking = tracking;
+
+ _baseAddress = (ulong)addressSpace.Pointer;
+ ulong endAddress = _baseAddress + addressSpace.Size;
+
+ _trackingEvent = new TrackingEventDelegate(tracking.VirtualMemoryEvent);
+ bool added = NativeSignalHandler.AddTrackedRegion((nuint)_baseAddress, (nuint)endAddress, Marshal.GetFunctionPointerForDelegate(_trackingEvent));
+
+ if (!added)
+ {
+ throw new InvalidOperationException("Number of allowed tracked regions exceeded.");
+ }
+
+ if (OperatingSystem.IsWindows())
+ {
+ // Add a tracking event with no signal handler for the mirror on Windows.
+ // The native handler has its own code to check for the partial overlap race when regions are protected by accident,
+ // and when there is no signal handler present.
+
+ _mirrorAddress = (ulong)addressSpaceMirror.Pointer;
+ ulong endAddressMirror = _mirrorAddress + addressSpace.Size;
+
+ bool addedMirror = NativeSignalHandler.AddTrackedRegion((nuint)_mirrorAddress, (nuint)endAddressMirror, IntPtr.Zero);
+
+ if (!addedMirror)
+ {
+ throw new InvalidOperationException("Number of allowed tracked regions exceeded.");
+ }
+ }
+ }
+
+ public void Dispose()
+ {
+ NativeSignalHandler.RemoveTrackedRegion((nuint)_baseAddress);
+
+ if (_mirrorAddress != 0)
+ {
+ NativeSignalHandler.RemoveTrackedRegion((nuint)_mirrorAddress);
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Cpu/MemoryHelper.cs b/src/Ryujinx.Cpu/MemoryHelper.cs
new file mode 100644
index 00000000..194a0c35
--- /dev/null
+++ b/src/Ryujinx.Cpu/MemoryHelper.cs
@@ -0,0 +1,63 @@
+using Microsoft.IO;
+using Ryujinx.Common.Memory;
+using Ryujinx.Memory;
+using System;
+using System.IO;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using System.Text;
+
+namespace Ryujinx.Cpu
+{
+ public static class MemoryHelper
+ {
+ public static void FillWithZeros(IVirtualMemoryManager memory, ulong position, int size)
+ {
+ int size8 = size & ~(8 - 1);
+
+ for (int offs = 0; offs < size8; offs += 8)
+ {
+ memory.Write<long>(position + (ulong)offs, 0);
+ }
+
+ for (int offs = size8; offs < (size - size8); offs++)
+ {
+ memory.Write<byte>(position + (ulong)offs, 0);
+ }
+ }
+
+ public unsafe static T Read<T>(IVirtualMemoryManager memory, ulong position) where T : unmanaged
+ {
+ return MemoryMarshal.Cast<byte, T>(memory.GetSpan(position, Unsafe.SizeOf<T>()))[0];
+ }
+
+ public unsafe static ulong Write<T>(IVirtualMemoryManager memory, ulong position, T value) where T : unmanaged
+ {
+ ReadOnlySpan<byte> data = MemoryMarshal.Cast<T, byte>(MemoryMarshal.CreateReadOnlySpan(ref value, 1));
+
+ memory.Write(position, data);
+
+ return (ulong)data.Length;
+ }
+
+ public static string ReadAsciiString(IVirtualMemoryManager memory, ulong position, long maxSize = -1)
+ {
+ using (RecyclableMemoryStream ms = MemoryStreamManager.Shared.GetStream())
+ {
+ for (long offs = 0; offs < maxSize || maxSize == -1; offs++)
+ {
+ byte value = memory.Read<byte>(position + (ulong)offs);
+
+ if (value == 0)
+ {
+ break;
+ }
+
+ ms.WriteByte(value);
+ }
+
+ return Encoding.ASCII.GetString(ms.GetReadOnlySequence());
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/MemoryManagerBase.cs b/src/Ryujinx.Cpu/MemoryManagerBase.cs
new file mode 100644
index 00000000..d2fc7a19
--- /dev/null
+++ b/src/Ryujinx.Cpu/MemoryManagerBase.cs
@@ -0,0 +1,32 @@
+using Ryujinx.Memory;
+using System.Diagnostics;
+using System.Threading;
+
+namespace Ryujinx.Cpu
+{
+ public abstract class MemoryManagerBase : IRefCounted
+ {
+ private int _referenceCount;
+
+ public void IncrementReferenceCount()
+ {
+ int newRefCount = Interlocked.Increment(ref _referenceCount);
+
+ Debug.Assert(newRefCount >= 1);
+ }
+
+ public void DecrementReferenceCount()
+ {
+ int newRefCount = Interlocked.Decrement(ref _referenceCount);
+
+ Debug.Assert(newRefCount >= 0);
+
+ if (newRefCount == 0)
+ {
+ Destroy();
+ }
+ }
+
+ protected abstract void Destroy();
+ }
+}
diff --git a/src/Ryujinx.Cpu/PrivateMemoryAllocation.cs b/src/Ryujinx.Cpu/PrivateMemoryAllocation.cs
new file mode 100644
index 00000000..1327880e
--- /dev/null
+++ b/src/Ryujinx.Cpu/PrivateMemoryAllocation.cs
@@ -0,0 +1,41 @@
+using Ryujinx.Memory;
+using System;
+
+namespace Ryujinx.Cpu
+{
+ struct PrivateMemoryAllocation : IDisposable
+ {
+ private readonly PrivateMemoryAllocator _owner;
+ private readonly PrivateMemoryAllocator.Block _block;
+
+ public bool IsValid => _owner != null;
+ public MemoryBlock Memory => _block?.Memory;
+ public ulong Offset { get; }
+ public ulong Size { get; }
+
+ public PrivateMemoryAllocation(
+ PrivateMemoryAllocator owner,
+ PrivateMemoryAllocator.Block block,
+ ulong offset,
+ ulong size)
+ {
+ _owner = owner;
+ _block = block;
+ Offset = offset;
+ Size = size;
+ }
+
+ public (PrivateMemoryAllocation, PrivateMemoryAllocation) Split(ulong splitOffset)
+ {
+ PrivateMemoryAllocation left = new PrivateMemoryAllocation(_owner, _block, Offset, splitOffset);
+ PrivateMemoryAllocation right = new PrivateMemoryAllocation(_owner, _block, Offset + splitOffset, Size - splitOffset);
+
+ return (left, right);
+ }
+
+ public void Dispose()
+ {
+ _owner.Free(_block, Offset, Size);
+ }
+ }
+}
diff --git a/src/Ryujinx.Cpu/PrivateMemoryAllocator.cs b/src/Ryujinx.Cpu/PrivateMemoryAllocator.cs
new file mode 100644
index 00000000..cbf1f1d9
--- /dev/null
+++ b/src/Ryujinx.Cpu/PrivateMemoryAllocator.cs
@@ -0,0 +1,268 @@
+using Ryujinx.Common;
+using Ryujinx.Memory;
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+
+namespace Ryujinx.Cpu
+{
+ class PrivateMemoryAllocator : PrivateMemoryAllocatorImpl<PrivateMemoryAllocator.Block>
+ {
+ public const ulong InvalidOffset = ulong.MaxValue;
+
+ public class Block : IComparable<Block>
+ {
+ public MemoryBlock Memory { get; private set; }
+ public ulong Size { get; }
+
+ private struct Range : IComparable<Range>
+ {
+ public ulong Offset { get; }
+ public ulong Size { get; }
+
+ public Range(ulong offset, ulong size)
+ {
+ Offset = offset;
+ Size = size;
+ }
+
+ public int CompareTo(Range other)
+ {
+ return Offset.CompareTo(other.Offset);
+ }
+ }
+
+ private readonly List<Range> _freeRanges;
+
+ public Block(MemoryBlock memory, ulong size)
+ {
+ Memory = memory;
+ Size = size;
+ _freeRanges = new List<Range>
+ {
+ new Range(0, size)
+ };
+ }
+
+ public ulong Allocate(ulong size, ulong alignment)
+ {
+ for (int i = 0; i < _freeRanges.Count; i++)
+ {
+ var range = _freeRanges[i];
+
+ ulong alignedOffset = BitUtils.AlignUp(range.Offset, alignment);
+ ulong sizeDelta = alignedOffset - range.Offset;
+ ulong usableSize = range.Size - sizeDelta;
+
+ if (sizeDelta < range.Size && usableSize >= size)
+ {
+ _freeRanges.RemoveAt(i);
+
+ if (sizeDelta != 0)
+ {
+ InsertFreeRange(range.Offset, sizeDelta);
+ }
+
+ ulong endOffset = range.Offset + range.Size;
+ ulong remainingSize = endOffset - (alignedOffset + size);
+ if (remainingSize != 0)
+ {
+ InsertFreeRange(endOffset - remainingSize, remainingSize);
+ }
+
+ return alignedOffset;
+ }
+ }
+
+ return InvalidOffset;
+ }
+
+ public void Free(ulong offset, ulong size)
+ {
+ InsertFreeRangeComingled(offset, size);
+ }
+
+ private void InsertFreeRange(ulong offset, ulong size)
+ {
+ var range = new Range(offset, size);
+ int index = _freeRanges.BinarySearch(range);
+ if (index < 0)
+ {
+ index = ~index;
+ }
+
+ _freeRanges.Insert(index, range);
+ }
+
+ private void InsertFreeRangeComingled(ulong offset, ulong size)
+ {
+ ulong endOffset = offset + size;
+ var range = new Range(offset, size);
+ int index = _freeRanges.BinarySearch(range);
+ if (index < 0)
+ {
+ index = ~index;
+ }
+
+ if (index < _freeRanges.Count && _freeRanges[index].Offset == endOffset)
+ {
+ endOffset = _freeRanges[index].Offset + _freeRanges[index].Size;
+ _freeRanges.RemoveAt(index);
+ }
+
+ if (index > 0 && _freeRanges[index - 1].Offset + _freeRanges[index - 1].Size == offset)
+ {
+ offset = _freeRanges[index - 1].Offset;
+ _freeRanges.RemoveAt(--index);
+ }
+
+ range = new Range(offset, endOffset - offset);
+
+ _freeRanges.Insert(index, range);
+ }
+
+ public bool IsTotallyFree()
+ {
+ if (_freeRanges.Count == 1 && _freeRanges[0].Size == Size)
+ {
+ Debug.Assert(_freeRanges[0].Offset == 0);
+ return true;
+ }
+
+ return false;
+ }
+
+ public int CompareTo(Block other)
+ {
+ return Size.CompareTo(other.Size);
+ }
+
+ public virtual void Destroy()
+ {
+ Memory.Dispose();
+ }
+ }
+
+ public PrivateMemoryAllocator(int blockAlignment, MemoryAllocationFlags allocationFlags) : base(blockAlignment, allocationFlags)
+ {
+ }
+
+ public PrivateMemoryAllocation Allocate(ulong size, ulong alignment)
+ {
+ var allocation = Allocate(size, alignment, CreateBlock);
+
+ return new PrivateMemoryAllocation(this, allocation.Block, allocation.Offset, allocation.Size);
+ }
+
+ private Block CreateBlock(MemoryBlock memory, ulong size)
+ {
+ return new Block(memory, size);
+ }
+ }
+
+ class PrivateMemoryAllocatorImpl<T> : IDisposable where T : PrivateMemoryAllocator.Block
+ {
+ private const ulong InvalidOffset = ulong.MaxValue;
+
+ public struct Allocation
+ {
+ public T Block { get; }
+ public ulong Offset { get; }
+ public ulong Size { get; }
+
+ public Allocation(T block, ulong offset, ulong size)
+ {
+ Block = block;
+ Offset = offset;
+ Size = size;
+ }
+ }
+
+ private readonly List<T> _blocks;
+
+ private readonly int _blockAlignment;
+ private readonly MemoryAllocationFlags _allocationFlags;
+
+ public PrivateMemoryAllocatorImpl(int blockAlignment, MemoryAllocationFlags allocationFlags)
+ {
+ _blocks = new List<T>();
+ _blockAlignment = blockAlignment;
+ _allocationFlags = allocationFlags;
+ }
+
+ protected Allocation Allocate(ulong size, ulong alignment, Func<MemoryBlock, ulong, T> createBlock)
+ {
+ // Ensure we have a sane alignment value.
+ if ((ulong)(int)alignment != alignment || (int)alignment <= 0)
+ {
+ throw new ArgumentOutOfRangeException(nameof(alignment), $"Invalid alignment 0x{alignment:X}.");
+ }
+
+ for (int i = 0; i < _blocks.Count; i++)
+ {
+ var block = _blocks[i];
+
+ if (block.Size >= size)
+ {
+ ulong offset = block.Allocate(size, alignment);
+ if (offset != InvalidOffset)
+ {
+ return new Allocation(block, offset, size);
+ }
+ }
+ }
+
+ ulong blockAlignedSize = BitUtils.AlignUp(size, (ulong)_blockAlignment);
+
+ var memory = new MemoryBlock(blockAlignedSize, _allocationFlags);
+ var newBlock = createBlock(memory, blockAlignedSize);
+
+ InsertBlock(newBlock);
+
+ ulong newBlockOffset = newBlock.Allocate(size, alignment);
+ Debug.Assert(newBlockOffset != InvalidOffset);
+
+ return new Allocation(newBlock, newBlockOffset, size);
+ }
+
+ public void Free(PrivateMemoryAllocator.Block block, ulong offset, ulong size)
+ {
+ block.Free(offset, size);
+
+ if (block.IsTotallyFree())
+ {
+ for (int i = 0; i < _blocks.Count; i++)
+ {
+ if (_blocks[i] == block)
+ {
+ _blocks.RemoveAt(i);
+ break;
+ }
+ }
+
+ block.Destroy();
+ }
+ }
+
+ private void InsertBlock(T block)
+ {
+ int index = _blocks.BinarySearch(block);
+ if (index < 0)
+ {
+ index = ~index;
+ }
+
+ _blocks.Insert(index, block);
+ }
+
+ public void Dispose()
+ {
+ for (int i = 0; i < _blocks.Count; i++)
+ {
+ _blocks[i].Destroy();
+ }
+
+ _blocks.Clear();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/Ryujinx.Cpu.csproj b/src/Ryujinx.Cpu/Ryujinx.Cpu.csproj
new file mode 100644
index 00000000..7da8da25
--- /dev/null
+++ b/src/Ryujinx.Cpu/Ryujinx.Cpu.csproj
@@ -0,0 +1,13 @@
+<Project Sdk="Microsoft.NET.Sdk">
+
+ <PropertyGroup>
+ <TargetFramework>net7.0</TargetFramework>
+ <AllowUnsafeBlocks>true</AllowUnsafeBlocks>
+ </PropertyGroup>
+
+ <ItemGroup>
+ <ProjectReference Include="..\ARMeilleure\ARMeilleure.csproj" />
+ <ProjectReference Include="..\Ryujinx.Memory\Ryujinx.Memory.csproj" />
+ </ItemGroup>
+
+</Project>
diff --git a/src/Ryujinx.Cpu/TickSource.cs b/src/Ryujinx.Cpu/TickSource.cs
new file mode 100644
index 00000000..dc510bc2
--- /dev/null
+++ b/src/Ryujinx.Cpu/TickSource.cs
@@ -0,0 +1,45 @@
+using System;
+using System.Diagnostics;
+
+namespace Ryujinx.Cpu
+{
+ public class TickSource : ITickSource
+ {
+ private static Stopwatch _tickCounter;
+
+ private static double _hostTickFreq;
+
+ /// <inheritdoc/>
+ public ulong Frequency { get; }
+
+ /// <inheritdoc/>
+ public ulong Counter => (ulong)(ElapsedSeconds * Frequency);
+
+ /// <inheritdoc/>
+ public TimeSpan ElapsedTime => _tickCounter.Elapsed;
+
+ /// <inheritdoc/>
+ public double ElapsedSeconds => _tickCounter.ElapsedTicks * _hostTickFreq;
+
+ public TickSource(ulong frequency)
+ {
+ Frequency = frequency;
+ _hostTickFreq = 1.0 / Stopwatch.Frequency;
+
+ _tickCounter = new Stopwatch();
+ _tickCounter.Start();
+ }
+
+ /// <inheritdoc/>
+ public void Suspend()
+ {
+ _tickCounter.Stop();
+ }
+
+ /// <inheritdoc/>
+ public void Resume()
+ {
+ _tickCounter.Start();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Cpu/Tracking/CpuMultiRegionHandle.cs b/src/Ryujinx.Cpu/Tracking/CpuMultiRegionHandle.cs
new file mode 100644
index 00000000..0ed8bfc5
--- /dev/null
+++ b/src/Ryujinx.Cpu/Tracking/CpuMultiRegionHandle.cs
@@ -0,0 +1,28 @@
+using Ryujinx.Memory.Tracking;
+using System;
+using System.Collections.Generic;
+
+namespace Ryujinx.Cpu.Tracking
+{
+ public class CpuMultiRegionHandle : IMultiRegionHandle
+ {
+ private readonly MultiRegionHandle _impl;
+
+ public bool Dirty => _impl.Dirty;
+
+ internal CpuMultiRegionHandle(MultiRegionHandle impl)
+ {
+ _impl = impl;
+ }
+
+ public void Dispose() => _impl.Dispose();
+ public void ForceDirty(ulong address, ulong size) => _impl.ForceDirty(address, size);
+ public IEnumerable<IRegionHandle> GetHandles() => _impl.GetHandles();
+ public void QueryModified(Action<ulong, ulong> modifiedAction) => _impl.QueryModified(modifiedAction);
+ public void QueryModified(ulong address, ulong size, Action<ulong, ulong> modifiedAction) => _impl.QueryModified(address, size, modifiedAction);
+ public void QueryModified(ulong address, ulong size, Action<ulong, ulong> modifiedAction, int sequenceNumber) => _impl.QueryModified(address, size, modifiedAction, sequenceNumber);
+ public void RegisterAction(ulong address, ulong size, RegionSignal action) => _impl.RegisterAction(address, size, action);
+ public void RegisterPreciseAction(ulong address, ulong size, PreciseRegionSignal action) => _impl.RegisterPreciseAction(address, size, action);
+ public void SignalWrite() => _impl.SignalWrite();
+ }
+}
diff --git a/src/Ryujinx.Cpu/Tracking/CpuRegionHandle.cs b/src/Ryujinx.Cpu/Tracking/CpuRegionHandle.cs
new file mode 100644
index 00000000..e766460f
--- /dev/null
+++ b/src/Ryujinx.Cpu/Tracking/CpuRegionHandle.cs
@@ -0,0 +1,37 @@
+using Ryujinx.Memory.Tracking;
+using System;
+
+namespace Ryujinx.Cpu.Tracking
+{
+ public class CpuRegionHandle : IRegionHandle
+ {
+ private readonly RegionHandle _impl;
+
+ public bool Dirty => _impl.Dirty;
+ public bool Unmapped => _impl.Unmapped;
+ public ulong Address => _impl.Address;
+ public ulong Size => _impl.Size;
+ public ulong EndAddress => _impl.EndAddress;
+
+ internal CpuRegionHandle(RegionHandle impl)
+ {
+ _impl = impl;
+ }
+
+ public void Dispose() => _impl.Dispose();
+ public bool DirtyOrVolatile() => _impl.DirtyOrVolatile();
+ public void ForceDirty() => _impl.ForceDirty();
+ public IRegionHandle GetHandle() => _impl;
+ public void RegisterAction(RegionSignal action) => _impl.RegisterAction(action);
+ public void RegisterPreciseAction(PreciseRegionSignal action) => _impl.RegisterPreciseAction(action);
+ public void RegisterDirtyEvent(Action action) => _impl.RegisterDirtyEvent(action);
+ public void Reprotect(bool asDirty = false) => _impl.Reprotect(asDirty);
+
+ public bool OverlapsWith(ulong address, ulong size) => _impl.OverlapsWith(address, size);
+
+ public bool RangeEquals(CpuRegionHandle other)
+ {
+ return _impl.RealAddress == other._impl.RealAddress && _impl.RealSize == other._impl.RealSize;
+ }
+ }
+}
diff --git a/src/Ryujinx.Cpu/Tracking/CpuSmartMultiRegionHandle.cs b/src/Ryujinx.Cpu/Tracking/CpuSmartMultiRegionHandle.cs
new file mode 100644
index 00000000..665271c6
--- /dev/null
+++ b/src/Ryujinx.Cpu/Tracking/CpuSmartMultiRegionHandle.cs
@@ -0,0 +1,26 @@
+using Ryujinx.Memory.Tracking;
+using System;
+
+namespace Ryujinx.Cpu.Tracking
+{
+ public class CpuSmartMultiRegionHandle : IMultiRegionHandle
+ {
+ private readonly SmartMultiRegionHandle _impl;
+
+ public bool Dirty => _impl.Dirty;
+
+ internal CpuSmartMultiRegionHandle(SmartMultiRegionHandle impl)
+ {
+ _impl = impl;
+ }
+
+ public void Dispose() => _impl.Dispose();
+ public void ForceDirty(ulong address, ulong size) => _impl.ForceDirty(address, size);
+ public void RegisterAction(RegionSignal action) => _impl.RegisterAction(action);
+ public void RegisterPreciseAction(PreciseRegionSignal action) => _impl.RegisterPreciseAction(action);
+ public void QueryModified(Action<ulong, ulong> modifiedAction) => _impl.QueryModified(modifiedAction);
+ public void QueryModified(ulong address, ulong size, Action<ulong, ulong> modifiedAction) => _impl.QueryModified(address, size, modifiedAction);
+ public void QueryModified(ulong address, ulong size, Action<ulong, ulong> modifiedAction, int sequenceNumber) => _impl.QueryModified(address, size, modifiedAction, sequenceNumber);
+ public void SignalWrite() => _impl.SignalWrite();
+ }
+}