diff options
Diffstat (limited to 'src/Ryujinx.Cpu/Jit/HostTracked')
6 files changed, 1676 insertions, 0 deletions
diff --git a/src/Ryujinx.Cpu/Jit/HostTracked/AddressIntrusiveRedBlackTree.cs b/src/Ryujinx.Cpu/Jit/HostTracked/AddressIntrusiveRedBlackTree.cs new file mode 100644 index 00000000..0e244330 --- /dev/null +++ b/src/Ryujinx.Cpu/Jit/HostTracked/AddressIntrusiveRedBlackTree.cs @@ -0,0 +1,35 @@ +using Ryujinx.Common.Collections; +using System; + +namespace Ryujinx.Cpu.Jit.HostTracked +{ + internal class AddressIntrusiveRedBlackTree<T> : IntrusiveRedBlackTree<T> where T : IntrusiveRedBlackTreeNode<T>, IComparable<T>, IComparable<ulong> + { + /// <summary> + /// Retrieve the node that is considered equal to the specified address by the comparator. + /// </summary> + /// <param name="address">Address to compare with</param> + /// <returns>Node that is equal to <paramref name="address"/></returns> + public T GetNode(ulong address) + { + T node = Root; + while (node != null) + { + int cmp = node.CompareTo(address); + if (cmp < 0) + { + node = node.Left; + } + else if (cmp > 0) + { + node = node.Right; + } + else + { + return node; + } + } + return null; + } + } +} diff --git a/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartition.cs b/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartition.cs new file mode 100644 index 00000000..224c5edc --- /dev/null +++ b/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartition.cs @@ -0,0 +1,708 @@ +using Ryujinx.Common; +using Ryujinx.Common.Collections; +using Ryujinx.Memory; +using System; +using System.Diagnostics; +using System.Threading; + +namespace Ryujinx.Cpu.Jit.HostTracked +{ + readonly struct PrivateRange + { + public readonly MemoryBlock Memory; + public readonly ulong Offset; + public readonly ulong Size; + + public static PrivateRange Empty => new(null, 0, 0); + + public PrivateRange(MemoryBlock memory, ulong offset, ulong size) + { + Memory = memory; + Offset = offset; + Size = size; + } + } + + class AddressSpacePartition : IDisposable + { + public const ulong GuestPageSize = 0x1000; + + private const int DefaultBlockAlignment = 1 << 20; + + private enum MappingType : byte + { + None, + Private, + } + + private class Mapping : IntrusiveRedBlackTreeNode<Mapping>, IComparable<Mapping>, IComparable<ulong> + { + public ulong Address { get; private set; } + public ulong Size { get; private set; } + public ulong EndAddress => Address + Size; + public MappingType Type { get; private set; } + + public Mapping(ulong address, ulong size, MappingType type) + { + Address = address; + Size = size; + Type = type; + } + + public Mapping Split(ulong splitAddress) + { + ulong leftSize = splitAddress - Address; + ulong rightSize = EndAddress - splitAddress; + + Mapping left = new(Address, leftSize, Type); + + Address = splitAddress; + Size = rightSize; + + return left; + } + + public void UpdateState(MappingType newType) + { + Type = newType; + } + + public void Extend(ulong sizeDelta) + { + Size += sizeDelta; + } + + public int CompareTo(Mapping other) + { + if (Address < other.Address) + { + return -1; + } + else if (Address <= other.EndAddress - 1UL) + { + return 0; + } + else + { + return 1; + } + } + + public int CompareTo(ulong address) + { + if (address < Address) + { + return -1; + } + else if (address <= EndAddress - 1UL) + { + return 0; + } + else + { + return 1; + } + } + } + + private class PrivateMapping : IntrusiveRedBlackTreeNode<PrivateMapping>, IComparable<PrivateMapping>, IComparable<ulong> + { + public ulong Address { get; private set; } + public ulong Size { get; private set; } + public ulong EndAddress => Address + Size; + public PrivateMemoryAllocation PrivateAllocation { get; private set; } + + public PrivateMapping(ulong address, ulong size, PrivateMemoryAllocation privateAllocation) + { + Address = address; + Size = size; + PrivateAllocation = privateAllocation; + } + + public PrivateMapping Split(ulong splitAddress) + { + ulong leftSize = splitAddress - Address; + ulong rightSize = EndAddress - splitAddress; + + Debug.Assert(leftSize > 0); + Debug.Assert(rightSize > 0); + + (var leftAllocation, PrivateAllocation) = PrivateAllocation.Split(leftSize); + + PrivateMapping left = new(Address, leftSize, leftAllocation); + + Address = splitAddress; + Size = rightSize; + + return left; + } + + public void Map(AddressSpacePartitionMultiAllocation baseBlock, ulong baseAddress, PrivateMemoryAllocation newAllocation) + { + baseBlock.MapView(newAllocation.Memory, newAllocation.Offset, Address - baseAddress, Size); + PrivateAllocation = newAllocation; + } + + public void Unmap(AddressSpacePartitionMultiAllocation baseBlock, ulong baseAddress) + { + if (PrivateAllocation.IsValid) + { + baseBlock.UnmapView(PrivateAllocation.Memory, Address - baseAddress, Size); + PrivateAllocation.Dispose(); + } + + PrivateAllocation = default; + } + + public void Extend(ulong sizeDelta) + { + Size += sizeDelta; + } + + public int CompareTo(PrivateMapping other) + { + if (Address < other.Address) + { + return -1; + } + else if (Address <= other.EndAddress - 1UL) + { + return 0; + } + else + { + return 1; + } + } + + public int CompareTo(ulong address) + { + if (address < Address) + { + return -1; + } + else if (address <= EndAddress - 1UL) + { + return 0; + } + else + { + return 1; + } + } + } + + private readonly MemoryBlock _backingMemory; + private readonly AddressSpacePartitionMultiAllocation _baseMemory; + private readonly PrivateMemoryAllocator _privateMemoryAllocator; + + private readonly AddressIntrusiveRedBlackTree<Mapping> _mappingTree; + private readonly AddressIntrusiveRedBlackTree<PrivateMapping> _privateTree; + + private readonly ReaderWriterLockSlim _treeLock; + + private readonly ulong _hostPageSize; + + private ulong? _firstPagePa; + private ulong? _lastPagePa; + private ulong _cachedFirstPagePa; + private ulong _cachedLastPagePa; + private MemoryBlock _firstPageMemoryForUnmap; + private ulong _firstPageOffsetForLateMap; + private MemoryPermission _firstPageMemoryProtection; + + public ulong Address { get; } + public ulong Size { get; } + public ulong EndAddress => Address + Size; + + public AddressSpacePartition(AddressSpacePartitionAllocation baseMemory, MemoryBlock backingMemory, ulong address, ulong size) + { + _privateMemoryAllocator = new PrivateMemoryAllocator(DefaultBlockAlignment, MemoryAllocationFlags.Mirrorable); + _mappingTree = new AddressIntrusiveRedBlackTree<Mapping>(); + _privateTree = new AddressIntrusiveRedBlackTree<PrivateMapping>(); + _treeLock = new ReaderWriterLockSlim(); + + _mappingTree.Add(new Mapping(address, size, MappingType.None)); + _privateTree.Add(new PrivateMapping(address, size, default)); + + _hostPageSize = MemoryBlock.GetPageSize(); + + _backingMemory = backingMemory; + _baseMemory = new(baseMemory); + + _cachedFirstPagePa = ulong.MaxValue; + _cachedLastPagePa = ulong.MaxValue; + + Address = address; + Size = size; + } + + public bool IsEmpty() + { + _treeLock.EnterReadLock(); + + try + { + Mapping map = _mappingTree.GetNode(Address); + + return map != null && map.Address == Address && map.Size == Size && map.Type == MappingType.None; + } + finally + { + _treeLock.ExitReadLock(); + } + } + + public void Map(ulong va, ulong pa, ulong size) + { + Debug.Assert(va >= Address); + Debug.Assert(va + size <= EndAddress); + + if (va == Address) + { + _firstPagePa = pa; + } + + if (va <= EndAddress - GuestPageSize && va + size > EndAddress - GuestPageSize) + { + _lastPagePa = pa + ((EndAddress - GuestPageSize) - va); + } + + Update(va, pa, size, MappingType.Private); + } + + public void Unmap(ulong va, ulong size) + { + Debug.Assert(va >= Address); + Debug.Assert(va + size <= EndAddress); + + if (va == Address) + { + _firstPagePa = null; + } + + if (va <= EndAddress - GuestPageSize && va + size > EndAddress - GuestPageSize) + { + _lastPagePa = null; + } + + Update(va, 0UL, size, MappingType.None); + } + + public void ReprotectAligned(ulong va, ulong size, MemoryPermission protection) + { + Debug.Assert(va >= Address); + Debug.Assert(va + size <= EndAddress); + + _baseMemory.Reprotect(va - Address, size, protection, false); + + if (va == Address) + { + _firstPageMemoryProtection = protection; + } + } + + public void Reprotect( + ulong va, + ulong size, + MemoryPermission protection, + AddressSpacePartitioned addressSpace, + Action<ulong, IntPtr, ulong> updatePtCallback) + { + if (_baseMemory.LazyInitMirrorForProtection(addressSpace, Address, Size, protection)) + { + LateMap(); + } + + updatePtCallback(va, _baseMemory.GetPointerForProtection(va - Address, size, protection), size); + } + + public IntPtr GetPointer(ulong va, ulong size) + { + Debug.Assert(va >= Address); + Debug.Assert(va + size <= EndAddress); + + return _baseMemory.GetPointer(va - Address, size); + } + + public void InsertBridgeAtEnd(AddressSpacePartition partitionAfter, bool useProtectionMirrors) + { + ulong firstPagePa = partitionAfter?._firstPagePa ?? ulong.MaxValue; + ulong lastPagePa = _lastPagePa ?? ulong.MaxValue; + + if (firstPagePa != _cachedFirstPagePa || lastPagePa != _cachedLastPagePa) + { + if (partitionAfter != null && partitionAfter._firstPagePa.HasValue) + { + (MemoryBlock firstPageMemory, ulong firstPageOffset) = partitionAfter.GetFirstPageMemoryAndOffset(); + + _baseMemory.MapView(firstPageMemory, firstPageOffset, Size, _hostPageSize); + + if (!useProtectionMirrors) + { + _baseMemory.Reprotect(Size, _hostPageSize, partitionAfter._firstPageMemoryProtection, throwOnFail: false); + } + + _firstPageMemoryForUnmap = firstPageMemory; + _firstPageOffsetForLateMap = firstPageOffset; + } + else + { + MemoryBlock firstPageMemoryForUnmap = _firstPageMemoryForUnmap; + + if (firstPageMemoryForUnmap != null) + { + _baseMemory.UnmapView(firstPageMemoryForUnmap, Size, _hostPageSize); + _firstPageMemoryForUnmap = null; + } + } + + _cachedFirstPagePa = firstPagePa; + _cachedLastPagePa = lastPagePa; + } + } + + public void ReprotectBridge(MemoryPermission protection) + { + if (_firstPageMemoryForUnmap != null) + { + _baseMemory.Reprotect(Size, _hostPageSize, protection, throwOnFail: false); + } + } + + private (MemoryBlock, ulong) GetFirstPageMemoryAndOffset() + { + _treeLock.EnterReadLock(); + + try + { + PrivateMapping map = _privateTree.GetNode(Address); + + if (map != null && map.PrivateAllocation.IsValid) + { + return (map.PrivateAllocation.Memory, map.PrivateAllocation.Offset + (Address - map.Address)); + } + } + finally + { + _treeLock.ExitReadLock(); + } + + return (_backingMemory, _firstPagePa.Value); + } + + public PrivateRange GetPrivateAllocation(ulong va) + { + _treeLock.EnterReadLock(); + + try + { + PrivateMapping map = _privateTree.GetNode(va); + + if (map != null && map.PrivateAllocation.IsValid) + { + return new(map.PrivateAllocation.Memory, map.PrivateAllocation.Offset + (va - map.Address), map.Size - (va - map.Address)); + } + } + finally + { + _treeLock.ExitReadLock(); + } + + return PrivateRange.Empty; + } + + private void Update(ulong va, ulong pa, ulong size, MappingType type) + { + _treeLock.EnterWriteLock(); + + try + { + Mapping map = _mappingTree.GetNode(va); + + Update(map, va, pa, size, type); + } + finally + { + _treeLock.ExitWriteLock(); + } + } + + private Mapping Update(Mapping map, ulong va, ulong pa, ulong size, MappingType type) + { + ulong endAddress = va + size; + + for (; map != null; map = map.Successor) + { + if (map.Address < va) + { + _mappingTree.Add(map.Split(va)); + } + + if (map.EndAddress > endAddress) + { + Mapping newMap = map.Split(endAddress); + _mappingTree.Add(newMap); + map = newMap; + } + + switch (type) + { + case MappingType.None: + ulong alignment = _hostPageSize; + + bool unmappedBefore = map.Predecessor == null || + (map.Predecessor.Type == MappingType.None && map.Predecessor.Address <= BitUtils.AlignDown(va, alignment)); + + bool unmappedAfter = map.Successor == null || + (map.Successor.Type == MappingType.None && map.Successor.EndAddress >= BitUtils.AlignUp(endAddress, alignment)); + + UnmapPrivate(va, size, unmappedBefore, unmappedAfter); + break; + case MappingType.Private: + MapPrivate(va, size); + break; + } + + map.UpdateState(type); + map = TryCoalesce(map); + + if (map.EndAddress >= endAddress) + { + break; + } + } + + return map; + } + + private Mapping TryCoalesce(Mapping map) + { + Mapping previousMap = map.Predecessor; + Mapping nextMap = map.Successor; + + if (previousMap != null && CanCoalesce(previousMap, map)) + { + previousMap.Extend(map.Size); + _mappingTree.Remove(map); + map = previousMap; + } + + if (nextMap != null && CanCoalesce(map, nextMap)) + { + map.Extend(nextMap.Size); + _mappingTree.Remove(nextMap); + } + + return map; + } + + private static bool CanCoalesce(Mapping left, Mapping right) + { + return left.Type == right.Type; + } + + private void MapPrivate(ulong va, ulong size) + { + ulong endAddress = va + size; + + ulong alignment = _hostPageSize; + + // Expand the range outwards based on page size to ensure that at least the requested region is mapped. + ulong vaAligned = BitUtils.AlignDown(va, alignment); + ulong endAddressAligned = BitUtils.AlignUp(endAddress, alignment); + + PrivateMapping map = _privateTree.GetNode(va); + + for (; map != null; map = map.Successor) + { + if (!map.PrivateAllocation.IsValid) + { + if (map.Address < vaAligned) + { + _privateTree.Add(map.Split(vaAligned)); + } + + if (map.EndAddress > endAddressAligned) + { + PrivateMapping newMap = map.Split(endAddressAligned); + _privateTree.Add(newMap); + map = newMap; + } + + map.Map(_baseMemory, Address, _privateMemoryAllocator.Allocate(map.Size, _hostPageSize)); + } + + if (map.EndAddress >= endAddressAligned) + { + break; + } + } + } + + private void UnmapPrivate(ulong va, ulong size, bool unmappedBefore, bool unmappedAfter) + { + ulong endAddress = va + size; + + ulong alignment = _hostPageSize; + + // If the adjacent mappings are unmapped, expand the range outwards, + // otherwise shrink it inwards. We must ensure we won't unmap pages that might still be in use. + ulong vaAligned = unmappedBefore ? BitUtils.AlignDown(va, alignment) : BitUtils.AlignUp(va, alignment); + ulong endAddressAligned = unmappedAfter ? BitUtils.AlignUp(endAddress, alignment) : BitUtils.AlignDown(endAddress, alignment); + + if (endAddressAligned <= vaAligned) + { + return; + } + + PrivateMapping map = _privateTree.GetNode(vaAligned); + + for (; map != null; map = map.Successor) + { + if (map.PrivateAllocation.IsValid) + { + if (map.Address < vaAligned) + { + _privateTree.Add(map.Split(vaAligned)); + } + + if (map.EndAddress > endAddressAligned) + { + PrivateMapping newMap = map.Split(endAddressAligned); + _privateTree.Add(newMap); + map = newMap; + } + + map.Unmap(_baseMemory, Address); + map = TryCoalesce(map); + } + + if (map.EndAddress >= endAddressAligned) + { + break; + } + } + } + + private PrivateMapping TryCoalesce(PrivateMapping map) + { + PrivateMapping previousMap = map.Predecessor; + PrivateMapping nextMap = map.Successor; + + if (previousMap != null && CanCoalesce(previousMap, map)) + { + previousMap.Extend(map.Size); + _privateTree.Remove(map); + map = previousMap; + } + + if (nextMap != null && CanCoalesce(map, nextMap)) + { + map.Extend(nextMap.Size); + _privateTree.Remove(nextMap); + } + + return map; + } + + private static bool CanCoalesce(PrivateMapping left, PrivateMapping right) + { + return !left.PrivateAllocation.IsValid && !right.PrivateAllocation.IsValid; + } + + private void LateMap() + { + // Map all existing private allocations. + // This is necessary to ensure mirrors that are lazily created have the same mappings as the main one. + + PrivateMapping map = _privateTree.GetNode(Address); + + for (; map != null; map = map.Successor) + { + if (map.PrivateAllocation.IsValid) + { + _baseMemory.LateMapView(map.PrivateAllocation.Memory, map.PrivateAllocation.Offset, map.Address - Address, map.Size); + } + } + + MemoryBlock firstPageMemory = _firstPageMemoryForUnmap; + ulong firstPageOffset = _firstPageOffsetForLateMap; + + if (firstPageMemory != null) + { + _baseMemory.LateMapView(firstPageMemory, firstPageOffset, Size, _hostPageSize); + } + } + + public PrivateRange GetFirstPrivateAllocation(ulong va, ulong size, out ulong nextVa) + { + _treeLock.EnterReadLock(); + + try + { + PrivateMapping map = _privateTree.GetNode(va); + + nextVa = map.EndAddress; + + if (map != null && map.PrivateAllocation.IsValid) + { + ulong startOffset = va - map.Address; + + return new( + map.PrivateAllocation.Memory, + map.PrivateAllocation.Offset + startOffset, + Math.Min(map.PrivateAllocation.Size - startOffset, size)); + } + } + finally + { + _treeLock.ExitReadLock(); + } + + return PrivateRange.Empty; + } + + public bool HasPrivateAllocation(ulong va, ulong size, ulong startVa, ulong startSize, ref PrivateRange range) + { + ulong endVa = va + size; + + _treeLock.EnterReadLock(); + + try + { + for (PrivateMapping map = _privateTree.GetNode(va); map != null && map.Address < endVa; map = map.Successor) + { + if (map.PrivateAllocation.IsValid) + { + if (map.Address <= startVa && map.EndAddress >= startVa + startSize) + { + ulong startOffset = startVa - map.Address; + + range = new( + map.PrivateAllocation.Memory, + map.PrivateAllocation.Offset + startOffset, + Math.Min(map.PrivateAllocation.Size - startOffset, startSize)); + } + + return true; + } + } + } + finally + { + _treeLock.ExitReadLock(); + } + + return false; + } + + public void Dispose() + { + GC.SuppressFinalize(this); + + _privateMemoryAllocator.Dispose(); + _baseMemory.Dispose(); + } + } +} diff --git a/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitionAllocator.cs b/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitionAllocator.cs new file mode 100644 index 00000000..44dedb64 --- /dev/null +++ b/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitionAllocator.cs @@ -0,0 +1,202 @@ +using Ryujinx.Common; +using Ryujinx.Common.Collections; +using Ryujinx.Memory; +using Ryujinx.Memory.Tracking; +using System; + +namespace Ryujinx.Cpu.Jit.HostTracked +{ + readonly struct AddressSpacePartitionAllocation : IDisposable + { + private readonly AddressSpacePartitionAllocator _owner; + private readonly PrivateMemoryAllocatorImpl<AddressSpacePartitionAllocator.Block>.Allocation _allocation; + + public IntPtr Pointer => (IntPtr)((ulong)_allocation.Block.Memory.Pointer + _allocation.Offset); + + public bool IsValid => _owner != null; + + public AddressSpacePartitionAllocation( + AddressSpacePartitionAllocator owner, + PrivateMemoryAllocatorImpl<AddressSpacePartitionAllocator.Block>.Allocation allocation) + { + _owner = owner; + _allocation = allocation; + } + + public void RegisterMapping(ulong va, ulong endVa) + { + _allocation.Block.AddMapping(_allocation.Offset, _allocation.Size, va, endVa); + } + + public void MapView(MemoryBlock srcBlock, ulong srcOffset, ulong dstOffset, ulong size) + { + _allocation.Block.Memory.MapView(srcBlock, srcOffset, _allocation.Offset + dstOffset, size); + } + + public void UnmapView(MemoryBlock srcBlock, ulong offset, ulong size) + { + _allocation.Block.Memory.UnmapView(srcBlock, _allocation.Offset + offset, size); + } + + public void Reprotect(ulong offset, ulong size, MemoryPermission permission, bool throwOnFail) + { + _allocation.Block.Memory.Reprotect(_allocation.Offset + offset, size, permission, throwOnFail); + } + + public IntPtr GetPointer(ulong offset, ulong size) + { + return _allocation.Block.Memory.GetPointer(_allocation.Offset + offset, size); + } + + public void Dispose() + { + _allocation.Block.RemoveMapping(_allocation.Offset, _allocation.Size); + _owner.Free(_allocation.Block, _allocation.Offset, _allocation.Size); + } + } + + class AddressSpacePartitionAllocator : PrivateMemoryAllocatorImpl<AddressSpacePartitionAllocator.Block> + { + private const ulong DefaultBlockAlignment = 1UL << 32; // 4GB + + public class Block : PrivateMemoryAllocator.Block + { + private readonly MemoryTracking _tracking; + private readonly Func<ulong, ulong> _readPtCallback; + private readonly MemoryEhMeilleure _memoryEh; + + private class Mapping : IntrusiveRedBlackTreeNode<Mapping>, IComparable<Mapping>, IComparable<ulong> + { + public ulong Address { get; } + public ulong Size { get; } + public ulong EndAddress => Address + Size; + public ulong Va { get; } + public ulong EndVa { get; } + + public Mapping(ulong address, ulong size, ulong va, ulong endVa) + { + Address = address; + Size = size; + Va = va; + EndVa = endVa; + } + + public int CompareTo(Mapping other) + { + if (Address < other.Address) + { + return -1; + } + else if (Address <= other.EndAddress - 1UL) + { + return 0; + } + else + { + return 1; + } + } + + public int CompareTo(ulong address) + { + if (address < Address) + { + return -1; + } + else if (address <= EndAddress - 1UL) + { + return 0; + } + else + { + return 1; + } + } + } + + private readonly AddressIntrusiveRedBlackTree<Mapping> _mappingTree; + private readonly object _lock; + + public Block(MemoryTracking tracking, Func<ulong, ulong> readPtCallback, MemoryBlock memory, ulong size, object locker) : base(memory, size) + { + _tracking = tracking; + _readPtCallback = readPtCallback; + _memoryEh = new(memory, null, tracking, VirtualMemoryEvent); + _mappingTree = new(); + _lock = locker; + } + + public void AddMapping(ulong offset, ulong size, ulong va, ulong endVa) + { + _mappingTree.Add(new(offset, size, va, endVa)); + } + + public void RemoveMapping(ulong offset, ulong size) + { + _mappingTree.Remove(_mappingTree.GetNode(offset)); + } + + private ulong VirtualMemoryEvent(ulong address, ulong size, bool write) + { + Mapping map; + + lock (_lock) + { + map = _mappingTree.GetNode(address); + } + + if (map == null) + { + return 0; + } + + address -= map.Address; + + ulong addressAligned = BitUtils.AlignDown(address, AddressSpacePartition.GuestPageSize); + ulong endAddressAligned = BitUtils.AlignUp(address + size, AddressSpacePartition.GuestPageSize); + ulong sizeAligned = endAddressAligned - addressAligned; + + if (!_tracking.VirtualMemoryEvent(map.Va + addressAligned, sizeAligned, write)) + { + return 0; + } + + return _readPtCallback(map.Va + address); + } + + public override void Destroy() + { + _memoryEh.Dispose(); + + base.Destroy(); + } + } + + private readonly MemoryTracking _tracking; + private readonly Func<ulong, ulong> _readPtCallback; + private readonly object _lock; + + public AddressSpacePartitionAllocator( + MemoryTracking tracking, + Func<ulong, ulong> readPtCallback, + object locker) : base(DefaultBlockAlignment, MemoryAllocationFlags.Reserve | MemoryAllocationFlags.ViewCompatible) + { + _tracking = tracking; + _readPtCallback = readPtCallback; + _lock = locker; + } + + public AddressSpacePartitionAllocation Allocate(ulong va, ulong size) + { + AddressSpacePartitionAllocation allocation = new(this, Allocate(size, MemoryBlock.GetPageSize(), CreateBlock)); + allocation.RegisterMapping(va, va + size); + + return allocation; + } + + private Block CreateBlock(MemoryBlock memory, ulong size) + { + return new Block(_tracking, _readPtCallback, memory, size, _lock); + } + } +} diff --git a/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitionMultiAllocation.cs b/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitionMultiAllocation.cs new file mode 100644 index 00000000..3b065583 --- /dev/null +++ b/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitionMultiAllocation.cs @@ -0,0 +1,101 @@ +using Ryujinx.Memory; +using System; +using System.Diagnostics; + +namespace Ryujinx.Cpu.Jit.HostTracked +{ + class AddressSpacePartitionMultiAllocation : IDisposable + { + private readonly AddressSpacePartitionAllocation _baseMemory; + private AddressSpacePartitionAllocation _baseMemoryRo; + private AddressSpacePartitionAllocation _baseMemoryNone; + + public AddressSpacePartitionMultiAllocation(AddressSpacePartitionAllocation baseMemory) + { + _baseMemory = baseMemory; + } + + public void MapView(MemoryBlock srcBlock, ulong srcOffset, ulong dstOffset, ulong size) + { + _baseMemory.MapView(srcBlock, srcOffset, dstOffset, size); + + if (_baseMemoryRo.IsValid) + { + _baseMemoryRo.MapView(srcBlock, srcOffset, dstOffset, size); + _baseMemoryRo.Reprotect(dstOffset, size, MemoryPermission.Read, false); + } + } + + public void LateMapView(MemoryBlock srcBlock, ulong srcOffset, ulong dstOffset, ulong size) + { + _baseMemoryRo.MapView(srcBlock, srcOffset, dstOffset, size); + _baseMemoryRo.Reprotect(dstOffset, size, MemoryPermission.Read, false); + } + + public void UnmapView(MemoryBlock srcBlock, ulong offset, ulong size) + { + _baseMemory.UnmapView(srcBlock, offset, size); + + if (_baseMemoryRo.IsValid) + { + _baseMemoryRo.UnmapView(srcBlock, offset, size); + } + } + + public void Reprotect(ulong offset, ulong size, MemoryPermission permission, bool throwOnFail) + { + _baseMemory.Reprotect(offset, size, permission, throwOnFail); + } + + public IntPtr GetPointer(ulong offset, ulong size) + { + return _baseMemory.GetPointer(offset, size); + } + + public bool LazyInitMirrorForProtection(AddressSpacePartitioned addressSpace, ulong blockAddress, ulong blockSize, MemoryPermission permission) + { + if (permission == MemoryPermission.None && !_baseMemoryNone.IsValid) + { + _baseMemoryNone = addressSpace.CreateAsPartitionAllocation(blockAddress, blockSize); + } + else if (permission == MemoryPermission.Read && !_baseMemoryRo.IsValid) + { + _baseMemoryRo = addressSpace.CreateAsPartitionAllocation(blockAddress, blockSize); + + return true; + } + + return false; + } + + public IntPtr GetPointerForProtection(ulong offset, ulong size, MemoryPermission permission) + { + AddressSpacePartitionAllocation allocation = permission switch + { + MemoryPermission.ReadAndWrite => _baseMemory, + MemoryPermission.Read => _baseMemoryRo, + MemoryPermission.None => _baseMemoryNone, + _ => throw new ArgumentException($"Invalid protection \"{permission}\"."), + }; + + Debug.Assert(allocation.IsValid); + + return allocation.GetPointer(offset, size); + } + + public void Dispose() + { + _baseMemory.Dispose(); + + if (_baseMemoryRo.IsValid) + { + _baseMemoryRo.Dispose(); + } + + if (_baseMemoryNone.IsValid) + { + _baseMemoryNone.Dispose(); + } + } + } +} diff --git a/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitioned.cs b/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitioned.cs new file mode 100644 index 00000000..2cf2c248 --- /dev/null +++ b/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitioned.cs @@ -0,0 +1,407 @@ +using Ryujinx.Common; +using Ryujinx.Memory; +using Ryujinx.Memory.Tracking; +using System; +using System.Collections.Generic; +using System.Diagnostics; + +namespace Ryujinx.Cpu.Jit.HostTracked +{ + class AddressSpacePartitioned : IDisposable + { + private const int PartitionBits = 25; + private const ulong PartitionSize = 1UL << PartitionBits; + + private readonly MemoryBlock _backingMemory; + private readonly List<AddressSpacePartition> _partitions; + private readonly AddressSpacePartitionAllocator _asAllocator; + private readonly Action<ulong, IntPtr, ulong> _updatePtCallback; + private readonly bool _useProtectionMirrors; + + public AddressSpacePartitioned(MemoryTracking tracking, MemoryBlock backingMemory, NativePageTable nativePageTable, bool useProtectionMirrors) + { + _backingMemory = backingMemory; + _partitions = new(); + _asAllocator = new(tracking, nativePageTable.Read, _partitions); + _updatePtCallback = nativePageTable.Update; + _useProtectionMirrors = useProtectionMirrors; + } + + public void Map(ulong va, ulong pa, ulong size) + { + ulong endVa = va + size; + + lock (_partitions) + { + EnsurePartitionsLocked(va, size); + + while (va < endVa) + { + int partitionIndex = FindPartitionIndexLocked(va); + AddressSpacePartition partition = _partitions[partitionIndex]; + + (ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa); + + partition.Map(clampedVa, pa, clampedEndVa - clampedVa); + + ulong currentSize = clampedEndVa - clampedVa; + + va += currentSize; + pa += currentSize; + + InsertOrRemoveBridgeIfNeeded(partitionIndex); + } + } + } + + public void Unmap(ulong va, ulong size) + { + ulong endVa = va + size; + + while (va < endVa) + { + AddressSpacePartition partition; + + lock (_partitions) + { + int partitionIndex = FindPartitionIndexLocked(va); + if (partitionIndex < 0) + { + va += PartitionSize - (va & (PartitionSize - 1)); + + continue; + } + + partition = _partitions[partitionIndex]; + + (ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa); + + partition.Unmap(clampedVa, clampedEndVa - clampedVa); + + va += clampedEndVa - clampedVa; + + InsertOrRemoveBridgeIfNeeded(partitionIndex); + + if (partition.IsEmpty()) + { + _partitions.Remove(partition); + partition.Dispose(); + } + } + } + } + + public void Reprotect(ulong va, ulong size, MemoryPermission protection) + { + ulong endVa = va + size; + + lock (_partitions) + { + while (va < endVa) + { + AddressSpacePartition partition = FindPartitionWithIndex(va, out int partitionIndex); + + if (partition == null) + { + va += PartitionSize - (va & (PartitionSize - 1)); + + continue; + } + + (ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa); + + if (_useProtectionMirrors) + { + partition.Reprotect(clampedVa, clampedEndVa - clampedVa, protection, this, _updatePtCallback); + } + else + { + partition.ReprotectAligned(clampedVa, clampedEndVa - clampedVa, protection); + + if (clampedVa == partition.Address && + partitionIndex > 0 && + _partitions[partitionIndex - 1].EndAddress == partition.Address) + { + _partitions[partitionIndex - 1].ReprotectBridge(protection); + } + } + + va += clampedEndVa - clampedVa; + } + } + } + + public PrivateRange GetPrivateAllocation(ulong va) + { + AddressSpacePartition partition = FindPartition(va); + + if (partition == null) + { + return PrivateRange.Empty; + } + + return partition.GetPrivateAllocation(va); + } + + public PrivateRange GetFirstPrivateAllocation(ulong va, ulong size, out ulong nextVa) + { + AddressSpacePartition partition = FindPartition(va); + + if (partition == null) + { + nextVa = (va & ~(PartitionSize - 1)) + PartitionSize; + + return PrivateRange.Empty; + } + + return partition.GetFirstPrivateAllocation(va, size, out nextVa); + } + + public bool HasAnyPrivateAllocation(ulong va, ulong size, out PrivateRange range) + { + range = PrivateRange.Empty; + + ulong startVa = va; + ulong endVa = va + size; + + while (va < endVa) + { + AddressSpacePartition partition = FindPartition(va); + + if (partition == null) + { + va += PartitionSize - (va & (PartitionSize - 1)); + + continue; + } + + (ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa); + + if (partition.HasPrivateAllocation(clampedVa, clampedEndVa - clampedVa, startVa, size, ref range)) + { + return true; + } + + va += clampedEndVa - clampedVa; + } + + return false; + } + + private void InsertOrRemoveBridgeIfNeeded(int partitionIndex) + { + if (partitionIndex > 0) + { + if (_partitions[partitionIndex - 1].EndAddress == _partitions[partitionIndex].Address) + { + _partitions[partitionIndex - 1].InsertBridgeAtEnd(_partitions[partitionIndex], _useProtectionMirrors); + } + else + { + _partitions[partitionIndex - 1].InsertBridgeAtEnd(null, _useProtectionMirrors); + } + } + + if (partitionIndex + 1 < _partitions.Count && _partitions[partitionIndex].EndAddress == _partitions[partitionIndex + 1].Address) + { + _partitions[partitionIndex].InsertBridgeAtEnd(_partitions[partitionIndex + 1], _useProtectionMirrors); + } + else + { + _partitions[partitionIndex].InsertBridgeAtEnd(null, _useProtectionMirrors); + } + } + + public IntPtr GetPointer(ulong va, ulong size) + { + AddressSpacePartition partition = FindPartition(va); + + return partition.GetPointer(va, size); + } + + private static (ulong, ulong) ClampRange(AddressSpacePartition partition, ulong va, ulong endVa) + { + if (va < partition.Address) + { + va = partition.Address; + } + + if (endVa > partition.EndAddress) + { + endVa = partition.EndAddress; + } + + return (va, endVa); + } + + private AddressSpacePartition FindPartition(ulong va) + { + lock (_partitions) + { + int index = FindPartitionIndexLocked(va); + if (index >= 0) + { + return _partitions[index]; + } + } + + return null; + } + + private AddressSpacePartition FindPartitionWithIndex(ulong va, out int index) + { + lock (_partitions) + { + index = FindPartitionIndexLocked(va); + if (index >= 0) + { + return _partitions[index]; + } + } + + return null; + } + + private int FindPartitionIndexLocked(ulong va) + { + int left = 0; + int middle; + int right = _partitions.Count - 1; + + while (left <= right) + { + middle = left + ((right - left) >> 1); + + AddressSpacePartition partition = _partitions[middle]; + + if (partition.Address <= va && partition.EndAddress > va) + { + return middle; + } + + if (partition.Address >= va) + { + right = middle - 1; + } + else + { + left = middle + 1; + } + } + + return -1; + } + + private void EnsurePartitionsLocked(ulong va, ulong size) + { + ulong endVa = BitUtils.AlignUp(va + size, PartitionSize); + va = BitUtils.AlignDown(va, PartitionSize); + + for (int i = 0; i < _partitions.Count && va < endVa; i++) + { + AddressSpacePartition partition = _partitions[i]; + + if (partition.Address <= va && partition.EndAddress > va) + { + if (partition.EndAddress >= endVa) + { + // Fully mapped already. + va = endVa; + + break; + } + + ulong gapSize; + + if (i + 1 < _partitions.Count) + { + AddressSpacePartition nextPartition = _partitions[i + 1]; + + if (partition.EndAddress == nextPartition.Address) + { + va = partition.EndAddress; + + continue; + } + + gapSize = Math.Min(endVa, nextPartition.Address) - partition.EndAddress; + } + else + { + gapSize = endVa - partition.EndAddress; + } + + _partitions.Insert(i + 1, CreateAsPartition(partition.EndAddress, gapSize)); + va = partition.EndAddress + gapSize; + i++; + } + else if (partition.EndAddress > va) + { + Debug.Assert(partition.Address > va); + + ulong gapSize; + + if (partition.Address < endVa) + { + gapSize = partition.Address - va; + } + else + { + gapSize = endVa - va; + } + + _partitions.Insert(i, CreateAsPartition(va, gapSize)); + va = Math.Min(partition.EndAddress, endVa); + i++; + } + } + + if (va < endVa) + { + _partitions.Add(CreateAsPartition(va, endVa - va)); + } + + ValidatePartitionList(); + } + + [Conditional("DEBUG")] + private void ValidatePartitionList() + { + for (int i = 1; i < _partitions.Count; i++) + { + Debug.Assert(_partitions[i].Address > _partitions[i - 1].Address); + Debug.Assert(_partitions[i].EndAddress > _partitions[i - 1].EndAddress); + } + } + + private AddressSpacePartition CreateAsPartition(ulong va, ulong size) + { + return new(CreateAsPartitionAllocation(va, size), _backingMemory, va, size); + } + + public AddressSpacePartitionAllocation CreateAsPartitionAllocation(ulong va, ulong size) + { + return _asAllocator.Allocate(va, size + MemoryBlock.GetPageSize()); + } + + protected virtual void Dispose(bool disposing) + { + if (disposing) + { + foreach (AddressSpacePartition partition in _partitions) + { + partition.Dispose(); + } + + _partitions.Clear(); + _asAllocator.Dispose(); + } + } + + public void Dispose() + { + Dispose(disposing: true); + GC.SuppressFinalize(this); + } + } +} diff --git a/src/Ryujinx.Cpu/Jit/HostTracked/NativePageTable.cs b/src/Ryujinx.Cpu/Jit/HostTracked/NativePageTable.cs new file mode 100644 index 00000000..e3174e3f --- /dev/null +++ b/src/Ryujinx.Cpu/Jit/HostTracked/NativePageTable.cs @@ -0,0 +1,223 @@ +using Ryujinx.Cpu.Signal; +using Ryujinx.Memory; +using System; +using System.Diagnostics; +using System.Numerics; +using System.Runtime.InteropServices; + +namespace Ryujinx.Cpu.Jit.HostTracked +{ + sealed class NativePageTable : IDisposable + { + private delegate ulong TrackingEventDelegate(ulong address, ulong size, bool write); + + private const int PageBits = 12; + private const int PageSize = 1 << PageBits; + private const int PageMask = PageSize - 1; + + private const int PteSize = 8; + + private readonly int _bitsPerPtPage; + private readonly int _entriesPerPtPage; + private readonly int _pageCommitmentBits; + + private readonly PageTable<ulong> _pageTable; + private readonly MemoryBlock _nativePageTable; + private readonly ulong[] _pageCommitmentBitmap; + private readonly ulong _hostPageSize; + + private readonly TrackingEventDelegate _trackingEvent; + + private bool _disposed; + + public IntPtr PageTablePointer => _nativePageTable.Pointer; + + public NativePageTable(ulong asSize) + { + ulong hostPageSize = MemoryBlock.GetPageSize(); + + _entriesPerPtPage = (int)(hostPageSize / sizeof(ulong)); + _bitsPerPtPage = BitOperations.Log2((uint)_entriesPerPtPage); + _pageCommitmentBits = PageBits + _bitsPerPtPage; + + _hostPageSize = hostPageSize; + _pageTable = new PageTable<ulong>(); + _nativePageTable = new MemoryBlock((asSize / PageSize) * PteSize + _hostPageSize, MemoryAllocationFlags.Reserve); + _pageCommitmentBitmap = new ulong[(asSize >> _pageCommitmentBits) / (sizeof(ulong) * 8)]; + + ulong ptStart = (ulong)_nativePageTable.Pointer; + ulong ptEnd = ptStart + _nativePageTable.Size; + + _trackingEvent = VirtualMemoryEvent; + + bool added = NativeSignalHandler.AddTrackedRegion((nuint)ptStart, (nuint)ptEnd, Marshal.GetFunctionPointerForDelegate(_trackingEvent)); + + if (!added) + { + throw new InvalidOperationException("Number of allowed tracked regions exceeded."); + } + } + + public void Map(ulong va, ulong pa, ulong size, AddressSpacePartitioned addressSpace, MemoryBlock backingMemory, bool privateMap) + { + while (size != 0) + { + _pageTable.Map(va, pa); + + EnsureCommitment(va); + + if (privateMap) + { + _nativePageTable.Write((va / PageSize) * PteSize, GetPte(va, addressSpace.GetPointer(va, PageSize))); + } + else + { + _nativePageTable.Write((va / PageSize) * PteSize, GetPte(va, backingMemory.GetPointer(pa, PageSize))); + } + + va += PageSize; + pa += PageSize; + size -= PageSize; + } + } + + public void Unmap(ulong va, ulong size) + { + IntPtr guardPagePtr = GetGuardPagePointer(); + + while (size != 0) + { + _pageTable.Unmap(va); + _nativePageTable.Write((va / PageSize) * PteSize, GetPte(va, guardPagePtr)); + + va += PageSize; + size -= PageSize; + } + } + + public ulong Read(ulong va) + { + ulong pte = _nativePageTable.Read<ulong>((va / PageSize) * PteSize); + + pte += va & ~(ulong)PageMask; + + return pte + (va & PageMask); + } + + public void Update(ulong va, IntPtr ptr, ulong size) + { + ulong remainingSize = size; + + while (remainingSize != 0) + { + EnsureCommitment(va); + + _nativePageTable.Write((va / PageSize) * PteSize, GetPte(va, ptr)); + + va += PageSize; + ptr += PageSize; + remainingSize -= PageSize; + } + } + + private void EnsureCommitment(ulong va) + { + ulong bit = va >> _pageCommitmentBits; + + int index = (int)(bit / (sizeof(ulong) * 8)); + int shift = (int)(bit % (sizeof(ulong) * 8)); + + ulong mask = 1UL << shift; + + ulong oldMask = _pageCommitmentBitmap[index]; + + if ((oldMask & mask) == 0) + { + lock (_pageCommitmentBitmap) + { + oldMask = _pageCommitmentBitmap[index]; + + if ((oldMask & mask) != 0) + { + return; + } + + _nativePageTable.Commit(bit * _hostPageSize, _hostPageSize); + + Span<ulong> pageSpan = MemoryMarshal.Cast<byte, ulong>(_nativePageTable.GetSpan(bit * _hostPageSize, (int)_hostPageSize)); + + Debug.Assert(pageSpan.Length == _entriesPerPtPage); + + IntPtr guardPagePtr = GetGuardPagePointer(); + + for (int i = 0; i < pageSpan.Length; i++) + { + pageSpan[i] = GetPte((bit << _pageCommitmentBits) | ((ulong)i * PageSize), guardPagePtr); + } + + _pageCommitmentBitmap[index] = oldMask | mask; + } + } + } + + private IntPtr GetGuardPagePointer() + { + return _nativePageTable.GetPointer(_nativePageTable.Size - _hostPageSize, _hostPageSize); + } + + private static ulong GetPte(ulong va, IntPtr ptr) + { + Debug.Assert((va & PageMask) == 0); + + return (ulong)ptr - va; + } + + public ulong GetPhysicalAddress(ulong va) + { + return _pageTable.Read(va) + (va & PageMask); + } + + private ulong VirtualMemoryEvent(ulong address, ulong size, bool write) + { + if (address < _nativePageTable.Size - _hostPageSize) + { + // Some prefetch instructions do not cause faults with invalid addresses. + // Retry if we are hitting a case where the page table is unmapped, the next + // run will execute the actual instruction. + // The address loaded from the page table will be invalid, and it should hit the else case + // if the instruction faults on unmapped or protected memory. + + ulong va = address * (PageSize / sizeof(ulong)); + + EnsureCommitment(va); + + return (ulong)_nativePageTable.Pointer + address; + } + else + { + throw new InvalidMemoryRegionException(); + } + } + + private void Dispose(bool disposing) + { + if (!_disposed) + { + if (disposing) + { + NativeSignalHandler.RemoveTrackedRegion((nuint)_nativePageTable.Pointer); + + _nativePageTable.Dispose(); + } + + _disposed = true; + } + } + + public void Dispose() + { + Dispose(disposing: true); + GC.SuppressFinalize(this); + } + } +} |
