aboutsummaryrefslogtreecommitdiff
path: root/Ryujinx.Memory
diff options
context:
space:
mode:
Diffstat (limited to 'Ryujinx.Memory')
-rw-r--r--Ryujinx.Memory/AddressSpaceManager.cs259
-rw-r--r--Ryujinx.Memory/IRefCounted.cs8
-rw-r--r--Ryujinx.Memory/IVirtualMemoryManager.cs112
-rw-r--r--Ryujinx.Memory/InvalidAccessHandler.cs9
-rw-r--r--Ryujinx.Memory/MemoryAllocationFlags.cs8
-rw-r--r--Ryujinx.Memory/MemoryBlock.cs117
-rw-r--r--Ryujinx.Memory/MemoryManagement.cs106
-rw-r--r--Ryujinx.Memory/MemoryManagementUnix.cs210
-rw-r--r--Ryujinx.Memory/MemoryManagementWindows.cs261
-rw-r--r--Ryujinx.Memory/MemoryPermission.cs7
-rw-r--r--Ryujinx.Memory/NativeMemoryManager.cs2
-rw-r--r--Ryujinx.Memory/PageTable.cs141
-rw-r--r--Ryujinx.Memory/Range/HostMemoryRange.cs71
-rw-r--r--Ryujinx.Memory/Tracking/IMultiRegionHandle.cs7
-rw-r--r--Ryujinx.Memory/Tracking/IRegionHandle.cs1
-rw-r--r--Ryujinx.Memory/Tracking/MemoryTracking.cs20
-rw-r--r--Ryujinx.Memory/Tracking/MultiRegionHandle.cs16
-rw-r--r--Ryujinx.Memory/Tracking/RegionHandle.cs77
-rw-r--r--Ryujinx.Memory/Tracking/SmartMultiRegionHandle.cs11
-rw-r--r--Ryujinx.Memory/Tracking/VirtualRegion.cs22
-rw-r--r--Ryujinx.Memory/WindowsShared/EmulatedSharedMemoryWindows.cs698
-rw-r--r--Ryujinx.Memory/WindowsShared/PlaceholderList.cs291
-rw-r--r--Ryujinx.Memory/WindowsShared/WindowsFlags.cs52
23 files changed, 2251 insertions, 255 deletions
diff --git a/Ryujinx.Memory/AddressSpaceManager.cs b/Ryujinx.Memory/AddressSpaceManager.cs
index 916a3816..d8ee4746 100644
--- a/Ryujinx.Memory/AddressSpaceManager.cs
+++ b/Ryujinx.Memory/AddressSpaceManager.cs
@@ -1,4 +1,7 @@
-using System;
+using Ryujinx.Memory.Range;
+using System;
+using System.Collections.Generic;
+using System.Linq;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
@@ -10,15 +13,9 @@ namespace Ryujinx.Memory
/// </summary>
public sealed class AddressSpaceManager : IVirtualMemoryManager, IWritableBlock
{
- public const int PageBits = 12;
- public const int PageSize = 1 << PageBits;
- public const int PageMask = PageSize - 1;
-
- private const int PtLevelBits = 9; // 9 * 4 + 12 = 48 (max address space size)
- private const int PtLevelSize = 1 << PtLevelBits;
- private const int PtLevelMask = PtLevelSize - 1;
-
- private const ulong Unmapped = ulong.MaxValue;
+ public const int PageBits = PageTable<nuint>.PageBits;
+ public const int PageSize = PageTable<nuint>.PageSize;
+ public const int PageMask = PageTable<nuint>.PageMask;
/// <summary>
/// Address space width in bits.
@@ -27,16 +24,14 @@ namespace Ryujinx.Memory
private readonly ulong _addressSpaceSize;
- private readonly MemoryBlock _backingMemory;
-
- private readonly ulong[][][][] _pageTable;
+ private readonly PageTable<nuint> _pageTable;
/// <summary>
/// Creates a new instance of the memory manager.
/// </summary>
/// <param name="backingMemory">Physical backing memory where virtual memory will be mapped to</param>
/// <param name="addressSpaceSize">Size of the address space</param>
- public AddressSpaceManager(MemoryBlock backingMemory, ulong addressSpaceSize)
+ public AddressSpaceManager(ulong addressSpaceSize)
{
ulong asSize = PageSize;
int asBits = PageBits;
@@ -49,8 +44,7 @@ namespace Ryujinx.Memory
AddressSpaceBits = asBits;
_addressSpaceSize = asSize;
- _backingMemory = backingMemory;
- _pageTable = new ulong[PtLevelSize][][][];
+ _pageTable = new PageTable<nuint>();
}
/// <summary>
@@ -60,18 +54,18 @@ namespace Ryujinx.Memory
/// Addresses and size must be page aligned.
/// </remarks>
/// <param name="va">Virtual memory address</param>
- /// <param name="pa">Physical memory address</param>
+ /// <param name="hostAddress">Physical memory address</param>
/// <param name="size">Size to be mapped</param>
- public void Map(ulong va, ulong pa, ulong size)
+ public void Map(ulong va, nuint hostAddress, ulong size)
{
AssertValidAddressAndSize(va, size);
while (size != 0)
{
- PtMap(va, pa);
+ _pageTable.Map(va, hostAddress);
va += PageSize;
- pa += PageSize;
+ hostAddress += PageSize;
size -= PageSize;
}
}
@@ -87,7 +81,7 @@ namespace Ryujinx.Memory
while (size != 0)
{
- PtUnmap(va);
+ _pageTable.Unmap(va);
va += PageSize;
size -= PageSize;
@@ -146,7 +140,7 @@ namespace Ryujinx.Memory
if (IsContiguousAndMapped(va, data.Length))
{
- data.CopyTo(_backingMemory.GetSpan(GetPhysicalAddressInternal(va), data.Length));
+ data.CopyTo(GetHostSpanContiguous(va, data.Length));
}
else
{
@@ -154,22 +148,18 @@ namespace Ryujinx.Memory
if ((va & PageMask) != 0)
{
- ulong pa = GetPhysicalAddressInternal(va);
-
size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
- data.Slice(0, size).CopyTo(_backingMemory.GetSpan(pa, size));
+ data.Slice(0, size).CopyTo(GetHostSpanContiguous(va, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
- ulong pa = GetPhysicalAddressInternal(va + (ulong)offset);
-
size = Math.Min(data.Length - offset, PageSize);
- data.Slice(offset, size).CopyTo(_backingMemory.GetSpan(pa, size));
+ data.Slice(offset, size).CopyTo(GetHostSpanContiguous(va + (ulong)offset, size));
}
}
}
@@ -195,7 +185,7 @@ namespace Ryujinx.Memory
if (IsContiguousAndMapped(va, size))
{
- return _backingMemory.GetSpan(GetPhysicalAddressInternal(va), size);
+ return GetHostSpanContiguous(va, size);
}
else
{
@@ -219,7 +209,7 @@ namespace Ryujinx.Memory
/// <param name="size">Size of the data</param>
/// <returns>A writable region of memory containing the data</returns>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
- public WritableRegion GetWritableRegion(ulong va, int size)
+ public unsafe WritableRegion GetWritableRegion(ulong va, int size)
{
if (size == 0)
{
@@ -228,7 +218,7 @@ namespace Ryujinx.Memory
if (IsContiguousAndMapped(va, size))
{
- return new WritableRegion(null, va, _backingMemory.GetMemory(GetPhysicalAddressInternal(va), size));
+ return new WritableRegion(null, va, new NativeMemoryManager<byte>((byte*)GetHostAddress(va), size).Memory);
}
else
{
@@ -250,14 +240,14 @@ namespace Ryujinx.Memory
/// <param name="va">Virtual address of the data</param>
/// <returns>A reference to the data in memory</returns>
/// <exception cref="MemoryNotContiguousException">Throw if the specified memory region is not contiguous in physical memory</exception>
- public ref T GetRef<T>(ulong va) where T : unmanaged
+ public unsafe ref T GetRef<T>(ulong va) where T : unmanaged
{
if (!IsContiguous(va, Unsafe.SizeOf<T>()))
{
ThrowMemoryNotContiguous();
}
- return ref _backingMemory.GetRef<T>(GetPhysicalAddressInternal(va));
+ return ref *(T*)GetHostAddress(va);
}
/// <summary>
@@ -299,7 +289,7 @@ namespace Ryujinx.Memory
return false;
}
- if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize))
+ if (GetHostAddress(va) + PageSize != GetHostAddress(va + PageSize))
{
return false;
}
@@ -317,9 +307,48 @@ namespace Ryujinx.Memory
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range</param>
/// <returns>Array of physical regions</returns>
- public (ulong address, ulong size)[] GetPhysicalRegions(ulong va, ulong size)
+ public IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size)
{
- throw new NotImplementedException();
+ if (size == 0)
+ {
+ return Enumerable.Empty<HostMemoryRange>();
+ }
+
+ if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size))
+ {
+ return null;
+ }
+
+ int pages = GetPagesCount(va, (uint)size, out va);
+
+ var regions = new List<HostMemoryRange>();
+
+ nuint regionStart = GetHostAddress(va);
+ ulong regionSize = PageSize;
+
+ for (int page = 0; page < pages - 1; page++)
+ {
+ if (!ValidateAddress(va + PageSize))
+ {
+ return null;
+ }
+
+ nuint newHostAddress = GetHostAddress(va + PageSize);
+
+ if (GetHostAddress(va) + PageSize != newHostAddress)
+ {
+ regions.Add(new HostMemoryRange(regionStart, regionSize));
+ regionStart = newHostAddress;
+ regionSize = 0;
+ }
+
+ va += PageSize;
+ regionSize += PageSize;
+ }
+
+ regions.Add(new HostMemoryRange(regionStart, regionSize));
+
+ return regions;
}
private void ReadImpl(ulong va, Span<byte> data)
@@ -335,22 +364,18 @@ namespace Ryujinx.Memory
if ((va & PageMask) != 0)
{
- ulong pa = GetPhysicalAddressInternal(va);
-
size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
- _backingMemory.GetSpan(pa, size).CopyTo(data.Slice(0, size));
+ GetHostSpanContiguous(va, size).CopyTo(data.Slice(0, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
- ulong pa = GetPhysicalAddressInternal(va + (ulong)offset);
-
size = Math.Min(data.Length - offset, PageSize);
- _backingMemory.GetSpan(pa, size).CopyTo(data.Slice(offset, size));
+ GetHostSpanContiguous(va + (ulong)offset, size).CopyTo(data.Slice(offset, size));
}
}
@@ -367,7 +392,7 @@ namespace Ryujinx.Memory
return false;
}
- return PtRead(va) != Unmapped;
+ return _pageTable.Read(va) != 0;
}
/// <summary>
@@ -434,28 +459,14 @@ namespace Ryujinx.Memory
}
}
- /// <summary>
- /// Performs address translation of the address inside a mapped memory range.
- /// </summary>
- /// <remarks>
- /// If the address is invalid or unmapped, -1 will be returned.
- /// </remarks>
- /// <param name="va">Virtual address to be translated</param>
- /// <returns>The physical address</returns>
- public ulong GetPhysicalAddress(ulong va)
+ private unsafe Span<byte> GetHostSpanContiguous(ulong va, int size)
{
- // We return -1L if the virtual address is invalid or unmapped.
- if (!ValidateAddress(va) || !IsMapped(va))
- {
- return ulong.MaxValue;
- }
-
- return GetPhysicalAddressInternal(va);
+ return new Span<byte>((void*)GetHostAddress(va), size);
}
- private ulong GetPhysicalAddressInternal(ulong va)
+ private nuint GetHostAddress(ulong va)
{
- return PtRead(va) + (va & PageMask);
+ return _pageTable.Read(va) + (nuint)(va & PageMask);
}
/// <summary>
@@ -469,132 +480,6 @@ namespace Ryujinx.Memory
throw new NotImplementedException();
}
- private ulong PtRead(ulong va)
- {
- int l3 = (int)(va >> PageBits) & PtLevelMask;
- int l2 = (int)(va >> (PageBits + PtLevelBits)) & PtLevelMask;
- int l1 = (int)(va >> (PageBits + PtLevelBits * 2)) & PtLevelMask;
- int l0 = (int)(va >> (PageBits + PtLevelBits * 3)) & PtLevelMask;
-
- if (_pageTable[l0] == null)
- {
- return Unmapped;
- }
-
- if (_pageTable[l0][l1] == null)
- {
- return Unmapped;
- }
-
- if (_pageTable[l0][l1][l2] == null)
- {
- return Unmapped;
- }
-
- return _pageTable[l0][l1][l2][l3];
- }
-
- private void PtMap(ulong va, ulong value)
- {
- int l3 = (int)(va >> PageBits) & PtLevelMask;
- int l2 = (int)(va >> (PageBits + PtLevelBits)) & PtLevelMask;
- int l1 = (int)(va >> (PageBits + PtLevelBits * 2)) & PtLevelMask;
- int l0 = (int)(va >> (PageBits + PtLevelBits * 3)) & PtLevelMask;
-
- if (_pageTable[l0] == null)
- {
- _pageTable[l0] = new ulong[PtLevelSize][][];
- }
-
- if (_pageTable[l0][l1] == null)
- {
- _pageTable[l0][l1] = new ulong[PtLevelSize][];
- }
-
- if (_pageTable[l0][l1][l2] == null)
- {
- _pageTable[l0][l1][l2] = new ulong[PtLevelSize];
-
- for (int i = 0; i < _pageTable[l0][l1][l2].Length; i++)
- {
- _pageTable[l0][l1][l2][i] = Unmapped;
- }
- }
-
- _pageTable[l0][l1][l2][l3] = value;
- }
-
- private void PtUnmap(ulong va)
- {
- int l3 = (int)(va >> PageBits) & PtLevelMask;
- int l2 = (int)(va >> (PageBits + PtLevelBits)) & PtLevelMask;
- int l1 = (int)(va >> (PageBits + PtLevelBits * 2)) & PtLevelMask;
- int l0 = (int)(va >> (PageBits + PtLevelBits * 3)) & PtLevelMask;
-
- if (_pageTable[l0] == null)
- {
- return;
- }
-
- if (_pageTable[l0][l1] == null)
- {
- return;
- }
-
- if (_pageTable[l0][l1][l2] == null)
- {
- return;
- }
-
- _pageTable[l0][l1][l2][l3] = Unmapped;
-
- bool empty = true;
-
- for (int i = 0; i < _pageTable[l0][l1][l2].Length; i++)
- {
- empty &= (_pageTable[l0][l1][l2][i] == Unmapped);
- }
-
- if (empty)
- {
- _pageTable[l0][l1][l2] = null;
-
- RemoveIfAllNull(l0, l1);
- }
- }
-
- private void RemoveIfAllNull(int l0, int l1)
- {
- bool empty = true;
-
- for (int i = 0; i < _pageTable[l0][l1].Length; i++)
- {
- empty &= (_pageTable[l0][l1][i] == null);
- }
-
- if (empty)
- {
- _pageTable[l0][l1] = null;
-
- RemoveIfAllNull(l0);
- }
- }
-
- private void RemoveIfAllNull(int l0)
- {
- bool empty = true;
-
- for (int i = 0; i < _pageTable[l0].Length; i++)
- {
- empty &= (_pageTable[l0][i] == null);
- }
-
- if (empty)
- {
- _pageTable[l0] = null;
- }
- }
-
public void SignalMemoryTracking(ulong va, ulong size, bool write)
{
// Only the ARM Memory Manager has tracking for now.
diff --git a/Ryujinx.Memory/IRefCounted.cs b/Ryujinx.Memory/IRefCounted.cs
new file mode 100644
index 00000000..e0a311d6
--- /dev/null
+++ b/Ryujinx.Memory/IRefCounted.cs
@@ -0,0 +1,8 @@
+namespace Ryujinx.Memory
+{
+ public interface IRefCounted
+ {
+ void IncrementReferenceCount();
+ void DecrementReferenceCount();
+ }
+}
diff --git a/Ryujinx.Memory/IVirtualMemoryManager.cs b/Ryujinx.Memory/IVirtualMemoryManager.cs
index f52c4b22..b5e08019 100644
--- a/Ryujinx.Memory/IVirtualMemoryManager.cs
+++ b/Ryujinx.Memory/IVirtualMemoryManager.cs
@@ -1,16 +1,61 @@
-using System;
+using Ryujinx.Memory.Range;
+using System;
+using System.Collections.Generic;
namespace Ryujinx.Memory
{
public interface IVirtualMemoryManager
{
- void Map(ulong va, ulong pa, ulong size);
+ /// <summary>
+ /// Maps a virtual memory range into a physical memory range.
+ /// </summary>
+ /// <remarks>
+ /// Addresses and size must be page aligned.
+ /// </remarks>
+ /// <param name="va">Virtual memory address</param>
+ /// <param name="hostAddress">Pointer where the region should be mapped to</param>
+ /// <param name="size">Size to be mapped</param>
+ void Map(ulong va, nuint hostAddress, ulong size);
+
+ /// <summary>
+ /// Unmaps a previously mapped range of virtual memory.
+ /// </summary>
+ /// <param name="va">Virtual address of the range to be unmapped</param>
+ /// <param name="size">Size of the range to be unmapped</param>
void Unmap(ulong va, ulong size);
+ /// <summary>
+ /// Reads data from CPU mapped memory.
+ /// </summary>
+ /// <typeparam name="T">Type of the data being read</typeparam>
+ /// <param name="va">Virtual address of the data in memory</param>
+ /// <returns>The data</returns>
+ /// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
T Read<T>(ulong va) where T : unmanaged;
+
+ /// <summary>
+ /// Reads data from CPU mapped memory.
+ /// </summary>
+ /// <param name="va">Virtual address of the data in memory</param>
+ /// <param name="data">Span to store the data being read into</param>
+ /// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
void Read(ulong va, Span<byte> data);
+ /// <summary>
+ /// Writes data to CPU mapped memory.
+ /// </summary>
+ /// <typeparam name="T">Type of the data being written</typeparam>
+ /// <param name="va">Virtual address to write the data into</param>
+ /// <param name="value">Data to be written</param>
+ /// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
void Write<T>(ulong va, T value) where T : unmanaged;
+
+ /// <summary>
+ /// Writes data to CPU mapped memory, with write tracking.
+ /// </summary>
+ /// <param name="va">Virtual address to write the data into</param>
+ /// <param name="data">Data to be written</param>
+ /// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
void Write(ulong va, ReadOnlySpan<byte> data);
void Fill(ulong va, ulong size, byte value)
@@ -25,17 +70,76 @@ namespace Ryujinx.Memory
}
}
+ /// <summary>
+ /// Gets a read-only span of data from CPU mapped memory.
+ /// </summary>
+ /// <param name="va">Virtual address of the data</param>
+ /// <param name="size">Size of the data</param>
+ /// <param name="tracked">True if read tracking is triggered on the span</param>
+ /// <returns>A read-only span of the data</returns>
+ /// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
ReadOnlySpan<byte> GetSpan(ulong va, int size, bool tracked = false);
+
+ /// <summary>
+ /// Gets a region of memory that can be written to.
+ /// </summary>
+ /// <param name="va">Virtual address of the data</param>
+ /// <param name="size">Size of the data</param>
+ /// <returns>A writable region of memory containing the data</returns>
+ /// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
WritableRegion GetWritableRegion(ulong va, int size);
+
+ /// <summary>
+ /// Gets a reference for the given type at the specified virtual memory address.
+ /// </summary>
+ /// <remarks>
+ /// The data must be located at a contiguous memory region.
+ /// </remarks>
+ /// <typeparam name="T">Type of the data to get the reference</typeparam>
+ /// <param name="va">Virtual address of the data</param>
+ /// <returns>A reference to the data in memory</returns>
+ /// <exception cref="MemoryNotContiguousException">Throw if the specified memory region is not contiguous in physical memory</exception>
ref T GetRef<T>(ulong va) where T : unmanaged;
- (ulong address, ulong size)[] GetPhysicalRegions(ulong va, ulong size);
+ /// <summary>
+ /// Gets the physical regions that make up the given virtual address region.
+ /// If any part of the virtual region is unmapped, null is returned.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range</param>
+ /// <returns>Array of physical regions</returns>
+ IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size);
+ /// <summary>
+ /// Checks if the page at a given CPU virtual address is mapped.
+ /// </summary>
+ /// <param name="va">Virtual address to check</param>
+ /// <returns>True if the address is mapped, false otherwise</returns>
bool IsMapped(ulong va);
+
+ /// <summary>
+ /// Checks if a memory range is mapped.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range in bytes</param>
+ /// <returns>True if the entire range is mapped, false otherwise</returns>
bool IsRangeMapped(ulong va, ulong size);
- ulong GetPhysicalAddress(ulong va);
+ /// <summary>
+ /// Alerts the memory tracking that a given region has been read from or written to.
+ /// This should be called before read/write is performed.
+ /// </summary>
+ /// <param name="va">Virtual address of the region</param>
+ /// <param name="size">Size of the region</param>
+ /// <param name="write">True if the region was written, false if read</param>
void SignalMemoryTracking(ulong va, ulong size, bool write);
+
+ /// <summary>
+ /// Reprotect a region of virtual memory for tracking.
+ /// </summary>
+ /// <param name="va">Virtual address base</param>
+ /// <param name="size">Size of the region to protect</param>
+ /// <param name="protection">Memory protection to set</param>
void TrackingReprotect(ulong va, ulong size, MemoryPermission protection);
}
}
diff --git a/Ryujinx.Memory/InvalidAccessHandler.cs b/Ryujinx.Memory/InvalidAccessHandler.cs
new file mode 100644
index 00000000..3dadb766
--- /dev/null
+++ b/Ryujinx.Memory/InvalidAccessHandler.cs
@@ -0,0 +1,9 @@
+namespace Ryujinx.Memory
+{
+ /// <summary>
+ /// Function that handles a invalid memory access from the emulated CPU.
+ /// </summary>
+ /// <param name="va">Virtual address of the invalid region that is being accessed</param>
+ /// <returns>True if the invalid access should be ignored, false otherwise</returns>
+ public delegate bool InvalidAccessHandler(ulong va);
+}
diff --git a/Ryujinx.Memory/MemoryAllocationFlags.cs b/Ryujinx.Memory/MemoryAllocationFlags.cs
index 94025d38..d9420dd3 100644
--- a/Ryujinx.Memory/MemoryAllocationFlags.cs
+++ b/Ryujinx.Memory/MemoryAllocationFlags.cs
@@ -23,6 +23,12 @@ namespace Ryujinx.Memory
/// Enables read and write tracking of the memory block.
/// This currently does nothing and is reserved for future use.
/// </summary>
- Tracked = 1 << 1
+ Tracked = 1 << 1,
+
+ /// <summary>
+ /// Enables mirroring of the memory block through aliasing of memory pages.
+ /// When enabled, this allows creating more memory blocks sharing the same backing storage.
+ /// </summary>
+ Mirrorable = 1 << 2
}
}
diff --git a/Ryujinx.Memory/MemoryBlock.cs b/Ryujinx.Memory/MemoryBlock.cs
index 4e775bba..e331a453 100644
--- a/Ryujinx.Memory/MemoryBlock.cs
+++ b/Ryujinx.Memory/MemoryBlock.cs
@@ -7,8 +7,11 @@ namespace Ryujinx.Memory
/// <summary>
/// Represents a block of contiguous physical guest memory.
/// </summary>
- public sealed class MemoryBlock : IDisposable
+ public sealed class MemoryBlock : IWritableBlock, IDisposable
{
+ private readonly bool _usesSharedMemory;
+ private readonly bool _isMirror;
+ private IntPtr _sharedMemory;
private IntPtr _pointer;
/// <summary>
@@ -22,15 +25,21 @@ namespace Ryujinx.Memory
public ulong Size { get; }
/// <summary>
- /// Initializes a new instance of the memory block class.
+ /// Creates a new instance of the memory block class.
/// </summary>
- /// <param name="size">Size of the memory block</param>
+ /// <param name="size">Size of the memory block in bytes</param>
/// <param name="flags">Flags that controls memory block memory allocation</param>
/// <exception cref="OutOfMemoryException">Throw when there's no enough memory to allocate the requested size</exception>
/// <exception cref="PlatformNotSupportedException">Throw when the current platform is not supported</exception>
public MemoryBlock(ulong size, MemoryAllocationFlags flags = MemoryAllocationFlags.None)
{
- if (flags.HasFlag(MemoryAllocationFlags.Reserve))
+ if (flags.HasFlag(MemoryAllocationFlags.Mirrorable))
+ {
+ _sharedMemory = MemoryManagement.CreateSharedMemory(size, flags.HasFlag(MemoryAllocationFlags.Reserve));
+ _pointer = MemoryManagement.MapSharedMemory(_sharedMemory);
+ _usesSharedMemory = true;
+ }
+ else if (flags.HasFlag(MemoryAllocationFlags.Reserve))
{
_pointer = MemoryManagement.Reserve(size);
}
@@ -43,6 +52,39 @@ namespace Ryujinx.Memory
}
/// <summary>
+ /// Creates a new instance of the memory block class, with a existing backing storage.
+ /// </summary>
+ /// <param name="size">Size of the memory block in bytes</param>
+ /// <param name="sharedMemory">Shared memory to use as backing storage for this block</param>
+ /// <exception cref="OutOfMemoryException">Throw when there's no enough address space left to map the shared memory</exception>
+ /// <exception cref="PlatformNotSupportedException">Throw when the current platform is not supported</exception>
+ private MemoryBlock(ulong size, IntPtr sharedMemory)
+ {
+ _pointer = MemoryManagement.MapSharedMemory(sharedMemory);
+ Size = size;
+ _usesSharedMemory = true;
+ _isMirror = true;
+ }
+
+ /// <summary>
+ /// Creates a memory block that shares the backing storage with this block.
+ /// The memory and page commitments will be shared, however memory protections are separate.
+ /// </summary>
+ /// <returns>A new memory block that shares storage with this one</returns>
+ /// <exception cref="NotSupportedException">Throw when the current memory block does not support mirroring</exception>
+ /// <exception cref="OutOfMemoryException">Throw when there's no enough address space left to map the shared memory</exception>
+ /// <exception cref="PlatformNotSupportedException">Throw when the current platform is not supported</exception>
+ public MemoryBlock CreateMirror()
+ {
+ if (_sharedMemory == IntPtr.Zero)
+ {
+ throw new NotSupportedException("Mirroring is not supported on the memory block because the Mirrorable flag was not set.");
+ }
+
+ return new MemoryBlock(Size, _sharedMemory);
+ }
+
+ /// <summary>
/// Commits a region of memory that has previously been reserved.
/// This can be used to allocate memory on demand.
/// </summary>
@@ -57,17 +99,46 @@ namespace Ryujinx.Memory
}
/// <summary>
+ /// Decommits a region of memory that has previously been reserved and optionally comitted.
+ /// This can be used to free previously allocated memory on demand.
+ /// </summary>
+ /// <param name="offset">Starting offset of the range to be decommitted</param>
+ /// <param name="size">Size of the range to be decommitted</param>
+ /// <returns>True if the operation was successful, false otherwise</returns>
+ /// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
+ /// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
+ public bool Decommit(ulong offset, ulong size)
+ {
+ return MemoryManagement.Decommit(GetPointerInternal(offset, size), size);
+ }
+
+ /// <summary>
/// Reprotects a region of memory.
/// </summary>
/// <param name="offset">Starting offset of the range to be reprotected</param>
/// <param name="size">Size of the range to be reprotected</param>
/// <param name="permission">New memory permissions</param>
+ /// <param name="throwOnFail">True if a failed reprotect should throw</param>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
/// <exception cref="MemoryProtectionException">Throw when <paramref name="permission"/> is invalid</exception>
- public void Reprotect(ulong offset, ulong size, MemoryPermission permission)
+ public void Reprotect(ulong offset, ulong size, MemoryPermission permission, bool throwOnFail = true)
{
- MemoryManagement.Reprotect(GetPointerInternal(offset, size), size, permission);
+ MemoryManagement.Reprotect(GetPointerInternal(offset, size), size, permission, throwOnFail);
+ }
+
+ /// <summary>
+ /// Remaps a region of memory into this memory block.
+ /// </summary>
+ /// <param name="offset">Starting offset of the range to be remapped into</param>
+ /// <param name="sourceAddress">Starting offset of the range to be remapped from</param>
+ /// <param name="size">Size of the range to be remapped</param>
+ /// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
+ /// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
+ /// <exception cref="MemoryProtectionException">Throw when <paramref name="permission"/> is invalid</exception>
+ public void Remap(ulong offset, IntPtr sourceAddress, ulong size)
+ {
+ MemoryManagement.Remap(GetPointerInternal(offset, size), sourceAddress, size);
}
/// <summary>
@@ -202,7 +273,7 @@ namespace Ryujinx.Memory
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
- public IntPtr GetPointer(ulong offset, int size) => GetPointerInternal(offset, (ulong)size);
+ public nuint GetPointer(ulong offset, ulong size) => (nuint)(ulong)GetPointerInternal(offset, size);
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private IntPtr GetPointerInternal(ulong offset, ulong size)
@@ -235,7 +306,7 @@ namespace Ryujinx.Memory
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public unsafe Span<byte> GetSpan(ulong offset, int size)
{
- return new Span<byte>((void*)GetPointer(offset, size), size);
+ return new Span<byte>((void*)GetPointerInternal(offset, (ulong)size), size);
}
/// <summary>
@@ -249,7 +320,20 @@ namespace Ryujinx.Memory
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public unsafe Memory<byte> GetMemory(ulong offset, int size)
{
- return new NativeMemoryManager<byte>((byte*)GetPointer(offset, size), size).Memory;
+ return new NativeMemoryManager<byte>((byte*)GetPointerInternal(offset, (ulong)size), size).Memory;
+ }
+
+ /// <summary>
+ /// Gets a writable region of a given memory block region.
+ /// </summary>
+ /// <param name="offset">Start offset of the memory region</param>
+ /// <param name="size">Size in bytes of the region</param>
+ /// <returns>Writable region of the memory region</returns>
+ /// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
+ /// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
+ public WritableRegion GetWritableRegion(ulong offset, int size)
+ {
+ return new WritableRegion(this, offset, GetMemory(offset, size));
}
/// <summary>
@@ -280,7 +364,20 @@ namespace Ryujinx.Memory
// If pointer is null, the memory was already freed or never allocated.
if (ptr != IntPtr.Zero)
{
- MemoryManagement.Free(ptr);
+ if (_usesSharedMemory)
+ {
+ MemoryManagement.UnmapSharedMemory(ptr);
+
+ if (_sharedMemory != IntPtr.Zero && !_isMirror)
+ {
+ MemoryManagement.DestroySharedMemory(_sharedMemory);
+ _sharedMemory = IntPtr.Zero;
+ }
+ }
+ else
+ {
+ MemoryManagement.Free(ptr);
+ }
}
}
diff --git a/Ryujinx.Memory/MemoryManagement.cs b/Ryujinx.Memory/MemoryManagement.cs
index 2525bef7..3e5ec341 100644
--- a/Ryujinx.Memory/MemoryManagement.cs
+++ b/Ryujinx.Memory/MemoryManagement.cs
@@ -62,7 +62,26 @@ namespace Ryujinx.Memory
}
}
- public static void Reprotect(IntPtr address, ulong size, MemoryPermission permission)
+ public static bool Decommit(IntPtr address, ulong size)
+ {
+ if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
+ {
+ IntPtr sizeNint = new IntPtr((long)size);
+
+ return MemoryManagementWindows.Decommit(address, sizeNint);
+ }
+ else if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux) ||
+ RuntimeInformation.IsOSPlatform(OSPlatform.OSX))
+ {
+ return MemoryManagementUnix.Decommit(address, size);
+ }
+ else
+ {
+ throw new PlatformNotSupportedException();
+ }
+ }
+
+ public static void Reprotect(IntPtr address, ulong size, MemoryPermission permission, bool throwOnFail)
{
bool result;
@@ -82,7 +101,7 @@ namespace Ryujinx.Memory
throw new PlatformNotSupportedException();
}
- if (!result)
+ if (!result && throwOnFail)
{
throw new MemoryProtectionException(permission);
}
@@ -104,5 +123,88 @@ namespace Ryujinx.Memory
throw new PlatformNotSupportedException();
}
}
+
+ public static IntPtr CreateSharedMemory(ulong size, bool reserve)
+ {
+ if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
+ {
+ IntPtr sizeNint = new IntPtr((long)size);
+
+ return MemoryManagementWindows.CreateSharedMemory(sizeNint, reserve);
+ }
+ else if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux) ||
+ RuntimeInformation.IsOSPlatform(OSPlatform.OSX))
+ {
+ return MemoryManagementUnix.CreateSharedMemory(size, reserve);
+ }
+ else
+ {
+ throw new PlatformNotSupportedException();
+ }
+ }
+
+ public static void DestroySharedMemory(IntPtr handle)
+ {
+ if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
+ {
+ MemoryManagementWindows.DestroySharedMemory(handle);
+ }
+ else if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux) ||
+ RuntimeInformation.IsOSPlatform(OSPlatform.OSX))
+ {
+ MemoryManagementUnix.DestroySharedMemory(handle);
+ }
+ else
+ {
+ throw new PlatformNotSupportedException();
+ }
+ }
+
+ public static IntPtr MapSharedMemory(IntPtr handle)
+ {
+ if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
+ {
+ return MemoryManagementWindows.MapSharedMemory(handle);
+ }
+ else if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux) ||
+ RuntimeInformation.IsOSPlatform(OSPlatform.OSX))
+ {
+ return MemoryManagementUnix.MapSharedMemory(handle);
+ }
+ else
+ {
+ throw new PlatformNotSupportedException();
+ }
+ }
+
+ public static void UnmapSharedMemory(IntPtr address)
+ {
+ if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
+ {
+ MemoryManagementWindows.UnmapSharedMemory(address);
+ }
+ else if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux) ||
+ RuntimeInformation.IsOSPlatform(OSPlatform.OSX))
+ {
+ MemoryManagementUnix.UnmapSharedMemory(address);
+ }
+ else
+ {
+ throw new PlatformNotSupportedException();
+ }
+ }
+
+ public static IntPtr Remap(IntPtr target, IntPtr source, ulong size)
+ {
+ if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux) ||
+ RuntimeInformation.IsOSPlatform(OSPlatform.OSX))
+ {
+ return MemoryManagementUnix.Remap(target, source, size);
+ }
+ else
+ {
+ throw new PlatformNotSupportedException();
+ }
+ }
}
} \ No newline at end of file
diff --git a/Ryujinx.Memory/MemoryManagementUnix.cs b/Ryujinx.Memory/MemoryManagementUnix.cs
index 81096867..69852787 100644
--- a/Ryujinx.Memory/MemoryManagementUnix.cs
+++ b/Ryujinx.Memory/MemoryManagementUnix.cs
@@ -1,11 +1,31 @@
using Mono.Unix.Native;
using System;
using System.Collections.Concurrent;
+using System.Collections.Generic;
+using System.Runtime.InteropServices;
namespace Ryujinx.Memory
{
static class MemoryManagementUnix
{
+ private struct UnixSharedMemory
+ {
+ public IntPtr Pointer;
+ public ulong Size;
+ public IntPtr SourcePointer;
+ }
+
+ [DllImport("libc", SetLastError = true)]
+ public static extern IntPtr mremap(IntPtr old_address, ulong old_size, ulong new_size, MremapFlags flags, IntPtr new_address);
+
+ [DllImport("libc", SetLastError = true)]
+ public static extern int madvise(IntPtr address, ulong size, int advice);
+
+ private const int MADV_DONTNEED = 4;
+ private const int MADV_REMOVE = 9;
+
+ private static readonly List<UnixSharedMemory> _sharedMemory = new List<UnixSharedMemory>();
+ private static readonly ConcurrentDictionary<IntPtr, ulong> _sharedMemorySource = new ConcurrentDictionary<IntPtr, ulong>();
private static readonly ConcurrentDictionary<IntPtr, ulong> _allocations = new ConcurrentDictionary<IntPtr, ulong>();
public static IntPtr Allocate(ulong size)
@@ -18,9 +38,23 @@ namespace Ryujinx.Memory
return AllocateInternal(size, MmapProts.PROT_NONE);
}
- private static IntPtr AllocateInternal(ulong size, MmapProts prot)
+ private static IntPtr AllocateInternal(ulong size, MmapProts prot, bool shared = false)
{
- const MmapFlags flags = MmapFlags.MAP_PRIVATE | MmapFlags.MAP_ANONYMOUS;
+ MmapFlags flags = MmapFlags.MAP_ANONYMOUS;
+
+ if (shared)
+ {
+ flags |= MmapFlags.MAP_SHARED | (MmapFlags)0x80000;
+ }
+ else
+ {
+ flags |= MmapFlags.MAP_PRIVATE;
+ }
+
+ if (prot == MmapProts.PROT_NONE)
+ {
+ flags |= MmapFlags.MAP_NORESERVE;
+ }
IntPtr ptr = Syscall.mmap(IntPtr.Zero, size, prot, flags, -1, 0);
@@ -40,7 +74,42 @@ namespace Ryujinx.Memory
public static bool Commit(IntPtr address, ulong size)
{
- return Syscall.mprotect(address, size, MmapProts.PROT_READ | MmapProts.PROT_WRITE) == 0;
+ bool success = Syscall.mprotect(address, size, MmapProts.PROT_READ | MmapProts.PROT_WRITE) == 0;
+
+ if (success)
+ {
+ foreach (var shared in _sharedMemory)
+ {
+ if ((ulong)address + size > (ulong)shared.SourcePointer && (ulong)address < (ulong)shared.SourcePointer + shared.Size)
+ {
+ ulong sharedAddress = ((ulong)address - (ulong)shared.SourcePointer) + (ulong)shared.Pointer;
+
+ if (Syscall.mprotect((IntPtr)sharedAddress, size, MmapProts.PROT_READ | MmapProts.PROT_WRITE) != 0)
+ {
+ return false;
+ }
+ }
+ }
+ }
+
+ return success;
+ }
+
+ public static bool Decommit(IntPtr address, ulong size)
+ {
+ bool isShared;
+
+ lock (_sharedMemory)
+ {
+ isShared = _sharedMemory.Exists(x => (ulong)address >= (ulong)x.Pointer && (ulong)address + size <= (ulong)x.Pointer + x.Size);
+ }
+
+ // Must be writable for madvise to work properly.
+ Syscall.mprotect(address, size, MmapProts.PROT_READ | MmapProts.PROT_WRITE);
+
+ madvise(address, size, isShared ? MADV_REMOVE : MADV_DONTNEED);
+
+ return Syscall.mprotect(address, size, MmapProts.PROT_NONE) == 0;
}
public static bool Reprotect(IntPtr address, ulong size, MemoryPermission permission)
@@ -71,5 +140,140 @@ namespace Ryujinx.Memory
return false;
}
+
+ public static IntPtr Remap(IntPtr target, IntPtr source, ulong size)
+ {
+ int flags = (int)MremapFlags.MREMAP_MAYMOVE;
+
+ if (target != IntPtr.Zero)
+ {
+ flags |= 2;
+ }
+
+ IntPtr result = mremap(source, 0, size, (MremapFlags)(flags), target);
+
+ if (result == IntPtr.Zero)
+ {
+ throw new InvalidOperationException();
+ }
+
+ return result;
+ }
+
+ public static IntPtr CreateSharedMemory(ulong size, bool reserve)
+ {
+ IntPtr result = AllocateInternal(
+ size,
+ reserve ? MmapProts.PROT_NONE : MmapProts.PROT_READ | MmapProts.PROT_WRITE,
+ true);
+
+ if (result == IntPtr.Zero)
+ {
+ throw new OutOfMemoryException();
+ }
+
+ _sharedMemorySource[result] = (ulong)size;
+
+ return result;
+ }
+
+ public static void DestroySharedMemory(IntPtr handle)
+ {
+ lock (_sharedMemory)
+ {
+ foreach (var memory in _sharedMemory)
+ {
+ if (memory.SourcePointer == handle)
+ {
+ throw new InvalidOperationException("Shared memory cannot be destroyed unless fully unmapped.");
+ }
+ }
+ }
+
+ _sharedMemorySource.Remove(handle, out ulong _);
+ }
+
+ public static IntPtr MapSharedMemory(IntPtr handle)
+ {
+ // Try find the handle for this shared memory. If it is mapped, then we want to map
+ // it a second time in another location.
+ // If it is not mapped, then its handle is the mapping.
+
+ ulong size = _sharedMemorySource[handle];
+
+ if (size == 0)
+ {
+ throw new InvalidOperationException("Shared memory cannot be mapped after its source is unmapped.");
+ }
+
+ lock (_sharedMemory)
+ {
+ foreach (var memory in _sharedMemory)
+ {
+ if (memory.Pointer == handle)
+ {
+ IntPtr result = AllocateInternal(
+ memory.Size,
+ MmapProts.PROT_NONE
+ );
+
+ if (result == IntPtr.Zero)
+ {
+ throw new OutOfMemoryException();
+ }
+
+ Remap(result, handle, memory.Size);
+
+ _sharedMemory.Add(new UnixSharedMemory
+ {
+ Pointer = result,
+ Size = memory.Size,
+
+ SourcePointer = handle
+ });
+
+ return result;
+ }
+ }
+
+ _sharedMemory.Add(new UnixSharedMemory
+ {
+ Pointer = handle,
+ Size = size,
+
+ SourcePointer = handle
+ });
+ }
+
+ return handle;
+ }
+
+ public static void UnmapSharedMemory(IntPtr address)
+ {
+ lock (_sharedMemory)
+ {
+ int removed = _sharedMemory.RemoveAll(memory =>
+ {
+ if (memory.Pointer == address)
+ {
+ if (memory.Pointer == memory.SourcePointer)
+ {
+ // After removing the original mapping, it cannot be mapped again.
+ _sharedMemorySource[memory.SourcePointer] = 0;
+ }
+
+ Free(address);
+ return true;
+ }
+
+ return false;
+ });
+
+ if (removed == 0)
+ {
+ throw new InvalidOperationException("Shared memory mapping could not be found.");
+ }
+ }
+ }
}
} \ No newline at end of file
diff --git a/Ryujinx.Memory/MemoryManagementWindows.cs b/Ryujinx.Memory/MemoryManagementWindows.cs
index 9513bb54..b14fb6c1 100644
--- a/Ryujinx.Memory/MemoryManagementWindows.cs
+++ b/Ryujinx.Memory/MemoryManagementWindows.cs
@@ -1,57 +1,69 @@
-using System;
+using Ryujinx.Memory.WindowsShared;
+using System;
+using System.Collections.Generic;
using System.Runtime.InteropServices;
namespace Ryujinx.Memory
{
static class MemoryManagementWindows
{
- [Flags]
- private enum AllocationType : uint
- {
- Commit = 0x1000,
- Reserve = 0x2000,
- Decommit = 0x4000,
- Release = 0x8000,
- Reset = 0x80000,
- Physical = 0x400000,
- TopDown = 0x100000,
- WriteWatch = 0x200000,
- LargePages = 0x20000000
- }
+ private static readonly IntPtr InvalidHandleValue = new IntPtr(-1);
+ private static bool UseWin10Placeholders;
- [Flags]
- private enum MemoryProtection : uint
- {
- NoAccess = 0x01,
- ReadOnly = 0x02,
- ReadWrite = 0x04,
- WriteCopy = 0x08,
- Execute = 0x10,
- ExecuteRead = 0x20,
- ExecuteReadWrite = 0x40,
- ExecuteWriteCopy = 0x80,
- GuardModifierflag = 0x100,
- NoCacheModifierflag = 0x200,
- WriteCombineModifierflag = 0x400
- }
+ private static object _emulatedHandleLock = new object();
+ private static EmulatedSharedMemoryWindows[] _emulatedShared = new EmulatedSharedMemoryWindows[64];
+ private static List<EmulatedSharedMemoryWindows> _emulatedSharedList = new List<EmulatedSharedMemoryWindows>();
- [DllImport("kernel32.dll")]
+ [DllImport("kernel32.dll", SetLastError = true)]
private static extern IntPtr VirtualAlloc(
IntPtr lpAddress,
IntPtr dwSize,
AllocationType flAllocationType,
MemoryProtection flProtect);
- [DllImport("kernel32.dll")]
+ [DllImport("kernel32.dll", SetLastError = true)]
private static extern bool VirtualProtect(
IntPtr lpAddress,
IntPtr dwSize,
MemoryProtection flNewProtect,
out MemoryProtection lpflOldProtect);
- [DllImport("kernel32.dll")]
+ [DllImport("kernel32.dll", SetLastError = true)]
private static extern bool VirtualFree(IntPtr lpAddress, IntPtr dwSize, AllocationType dwFreeType);
+ [DllImport("kernel32.dll", SetLastError = true)]
+ private static extern IntPtr CreateFileMapping(
+ IntPtr hFile,
+ IntPtr lpFileMappingAttributes,
+ FileMapProtection flProtect,
+ uint dwMaximumSizeHigh,
+ uint dwMaximumSizeLow,
+ [MarshalAs(UnmanagedType.LPWStr)] string lpName);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ private static extern bool CloseHandle(IntPtr hObject);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ private static extern IntPtr MapViewOfFile(
+ IntPtr hFileMappingObject,
+ uint dwDesiredAccess,
+ uint dwFileOffsetHigh,
+ uint dwFileOffsetLow,
+ IntPtr dwNumberOfBytesToMap);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ private static extern bool UnmapViewOfFile(IntPtr lpBaseAddress);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ private static extern uint GetLastError();
+
+ static MemoryManagementWindows()
+ {
+ Version version = Environment.OSVersion.Version;
+
+ UseWin10Placeholders = (version.Major == 10 && version.Build >= 17134) || version.Major > 10;
+ }
+
public static IntPtr Allocate(IntPtr size)
{
return AllocateInternal(size, AllocationType.Reserve | AllocationType.Commit);
@@ -76,12 +88,68 @@ namespace Ryujinx.Memory
public static bool Commit(IntPtr location, IntPtr size)
{
+ if (UseWin10Placeholders)
+ {
+ lock (_emulatedSharedList)
+ {
+ foreach (var shared in _emulatedSharedList)
+ {
+ if (shared.CommitMap(location, size))
+ {
+ return true;
+ }
+ }
+ }
+ }
+
return VirtualAlloc(location, size, AllocationType.Commit, MemoryProtection.ReadWrite) != IntPtr.Zero;
}
+ public static bool Decommit(IntPtr location, IntPtr size)
+ {
+ if (UseWin10Placeholders)
+ {
+ lock (_emulatedSharedList)
+ {
+ foreach (var shared in _emulatedSharedList)
+ {
+ if (shared.DecommitMap(location, size))
+ {
+ return true;
+ }
+ }
+ }
+ }
+
+ return VirtualFree(location, size, AllocationType.Decommit);
+ }
+
public static bool Reprotect(IntPtr address, IntPtr size, MemoryPermission permission)
{
- return VirtualProtect(address, size, GetProtection(permission), out _);
+ if (UseWin10Placeholders)
+ {
+ ulong uaddress = (ulong)address;
+ ulong usize = (ulong)size;
+ while (usize > 0)
+ {
+ ulong nextGranular = (uaddress & ~EmulatedSharedMemoryWindows.MappingMask) + EmulatedSharedMemoryWindows.MappingGranularity;
+ ulong mapSize = Math.Min(usize, nextGranular - uaddress);
+
+ if (!VirtualProtect((IntPtr)uaddress, (IntPtr)mapSize, GetProtection(permission), out _))
+ {
+ return false;
+ }
+
+ uaddress = nextGranular;
+ usize -= mapSize;
+ }
+
+ return true;
+ }
+ else
+ {
+ return VirtualProtect(address, size, GetProtection(permission), out _);
+ }
}
private static MemoryProtection GetProtection(MemoryPermission permission)
@@ -102,5 +170,132 @@ namespace Ryujinx.Memory
{
return VirtualFree(address, IntPtr.Zero, AllocationType.Release);
}
+
+ private static int GetEmulatedHandle()
+ {
+ // Assumes we have the handle lock.
+
+ for (int i = 0; i < _emulatedShared.Length; i++)
+ {
+ if (_emulatedShared[i] == null)
+ {
+ return i + 1;
+ }
+ }
+
+ throw new InvalidProgramException("Too many shared memory handles were created.");
+ }
+
+ public static bool EmulatedHandleValid(ref int handle)
+ {
+ handle--;
+ return handle >= 0 && handle < _emulatedShared.Length && _emulatedShared[handle] != null;
+ }
+
+ public static IntPtr CreateSharedMemory(IntPtr size, bool reserve)
+ {
+ if (UseWin10Placeholders && reserve)
+ {
+ lock (_emulatedHandleLock)
+ {
+ int handle = GetEmulatedHandle();
+ _emulatedShared[handle - 1] = new EmulatedSharedMemoryWindows((ulong)size);
+ _emulatedSharedList.Add(_emulatedShared[handle - 1]);
+
+ return (IntPtr)handle;
+ }
+ }
+ else
+ {
+ var prot = reserve ? FileMapProtection.SectionReserve : FileMapProtection.SectionCommit;
+
+ IntPtr handle = CreateFileMapping(
+ InvalidHandleValue,
+ IntPtr.Zero,
+ FileMapProtection.PageReadWrite | prot,
+ (uint)(size.ToInt64() >> 32),
+ (uint)size.ToInt64(),
+ null);
+
+ if (handle == IntPtr.Zero)
+ {
+ throw new OutOfMemoryException();
+ }
+
+ return handle;
+ }
+ }
+
+ public static void DestroySharedMemory(IntPtr handle)
+ {
+ if (UseWin10Placeholders)
+ {
+ lock (_emulatedHandleLock)
+ {
+ int iHandle = (int)(ulong)handle;
+
+ if (EmulatedHandleValid(ref iHandle))
+ {
+ _emulatedSharedList.Remove(_emulatedShared[iHandle]);
+ _emulatedShared[iHandle].Dispose();
+ _emulatedShared[iHandle] = null;
+
+ return;
+ }
+ }
+ }
+
+ if (!CloseHandle(handle))
+ {
+ throw new ArgumentException("Invalid handle.", nameof(handle));
+ }
+ }
+
+ public static IntPtr MapSharedMemory(IntPtr handle)
+ {
+ if (UseWin10Placeholders)
+ {
+ lock (_emulatedHandleLock)
+ {
+ int iHandle = (int)(ulong)handle;
+
+ if (EmulatedHandleValid(ref iHandle))
+ {
+ return _emulatedShared[iHandle].Map();
+ }
+ }
+ }
+
+ IntPtr ptr = MapViewOfFile(handle, 4 | 2, 0, 0, IntPtr.Zero);
+
+ if (ptr == IntPtr.Zero)
+ {
+ throw new OutOfMemoryException();
+ }
+
+ return ptr;
+ }
+
+ public static void UnmapSharedMemory(IntPtr address)
+ {
+ if (UseWin10Placeholders)
+ {
+ lock (_emulatedHandleLock)
+ {
+ foreach (EmulatedSharedMemoryWindows shared in _emulatedSharedList)
+ {
+ if (shared.Unmap((ulong)address))
+ {
+ return;
+ }
+ }
+ }
+ }
+
+ if (!UnmapViewOfFile(address))
+ {
+ throw new ArgumentException("Invalid address.", nameof(address));
+ }
+ }
}
} \ No newline at end of file
diff --git a/Ryujinx.Memory/MemoryPermission.cs b/Ryujinx.Memory/MemoryPermission.cs
index 38f2d909..8c3e33cf 100644
--- a/Ryujinx.Memory/MemoryPermission.cs
+++ b/Ryujinx.Memory/MemoryPermission.cs
@@ -41,6 +41,11 @@ namespace Ryujinx.Memory
/// <summary>
/// Allow reads, writes, and code execution on the memory region.
/// </summary>
- ReadWriteExecute = Read | Write | Execute
+ ReadWriteExecute = Read | Write | Execute,
+
+ /// <summary>
+ /// Indicates an invalid protection.
+ /// </summary>
+ Invalid = 255
}
}
diff --git a/Ryujinx.Memory/NativeMemoryManager.cs b/Ryujinx.Memory/NativeMemoryManager.cs
index ef599dd4..d1757935 100644
--- a/Ryujinx.Memory/NativeMemoryManager.cs
+++ b/Ryujinx.Memory/NativeMemoryManager.cs
@@ -3,7 +3,7 @@ using System.Buffers;
namespace Ryujinx.Memory
{
- unsafe class NativeMemoryManager<T> : MemoryManager<T> where T : unmanaged
+ public unsafe class NativeMemoryManager<T> : MemoryManager<T> where T : unmanaged
{
private readonly T* _pointer;
private readonly int _length;
diff --git a/Ryujinx.Memory/PageTable.cs b/Ryujinx.Memory/PageTable.cs
new file mode 100644
index 00000000..71db1e76
--- /dev/null
+++ b/Ryujinx.Memory/PageTable.cs
@@ -0,0 +1,141 @@
+namespace Ryujinx.Memory
+{
+ class PageTable<T> where T : unmanaged
+ {
+ public const int PageBits = 12;
+ public const int PageSize = 1 << PageBits;
+ public const int PageMask = PageSize - 1;
+
+ private const int PtLevelBits = 9; // 9 * 4 + 12 = 48 (max address space size)
+ private const int PtLevelSize = 1 << PtLevelBits;
+ private const int PtLevelMask = PtLevelSize - 1;
+
+ private readonly T[][][][] _pageTable;
+
+ public PageTable()
+ {
+ _pageTable = new T[PtLevelSize][][][];
+ }
+
+ public T Read(ulong va)
+ {
+ int l3 = (int)(va >> PageBits) & PtLevelMask;
+ int l2 = (int)(va >> (PageBits + PtLevelBits)) & PtLevelMask;
+ int l1 = (int)(va >> (PageBits + PtLevelBits * 2)) & PtLevelMask;
+ int l0 = (int)(va >> (PageBits + PtLevelBits * 3)) & PtLevelMask;
+
+ if (_pageTable[l0] == null)
+ {
+ return default;
+ }
+
+ if (_pageTable[l0][l1] == null)
+ {
+ return default;
+ }
+
+ if (_pageTable[l0][l1][l2] == null)
+ {
+ return default;
+ }
+
+ return _pageTable[l0][l1][l2][l3];
+ }
+
+ public void Map(ulong va, T value)
+ {
+ int l3 = (int)(va >> PageBits) & PtLevelMask;
+ int l2 = (int)(va >> (PageBits + PtLevelBits)) & PtLevelMask;
+ int l1 = (int)(va >> (PageBits + PtLevelBits * 2)) & PtLevelMask;
+ int l0 = (int)(va >> (PageBits + PtLevelBits * 3)) & PtLevelMask;
+
+ if (_pageTable[l0] == null)
+ {
+ _pageTable[l0] = new T[PtLevelSize][][];
+ }
+
+ if (_pageTable[l0][l1] == null)
+ {
+ _pageTable[l0][l1] = new T[PtLevelSize][];
+ }
+
+ if (_pageTable[l0][l1][l2] == null)
+ {
+ _pageTable[l0][l1][l2] = new T[PtLevelSize];
+ }
+
+ _pageTable[l0][l1][l2][l3] = value;
+ }
+
+ public void Unmap(ulong va)
+ {
+ int l3 = (int)(va >> PageBits) & PtLevelMask;
+ int l2 = (int)(va >> (PageBits + PtLevelBits)) & PtLevelMask;
+ int l1 = (int)(va >> (PageBits + PtLevelBits * 2)) & PtLevelMask;
+ int l0 = (int)(va >> (PageBits + PtLevelBits * 3)) & PtLevelMask;
+
+ if (_pageTable[l0] == null)
+ {
+ return;
+ }
+
+ if (_pageTable[l0][l1] == null)
+ {
+ return;
+ }
+
+ if (_pageTable[l0][l1][l2] == null)
+ {
+ return;
+ }
+
+ _pageTable[l0][l1][l2][l3] = default;
+
+ bool empty = true;
+
+ for (int i = 0; i < _pageTable[l0][l1][l2].Length; i++)
+ {
+ empty &= _pageTable[l0][l1][l2][i].Equals(default);
+ }
+
+ if (empty)
+ {
+ _pageTable[l0][l1][l2] = null;
+
+ RemoveIfAllNull(l0, l1);
+ }
+ }
+
+ private void RemoveIfAllNull(int l0, int l1)
+ {
+ bool empty = true;
+
+ for (int i = 0; i < _pageTable[l0][l1].Length; i++)
+ {
+ empty &= (_pageTable[l0][l1][i] == null);
+ }
+
+ if (empty)
+ {
+ _pageTable[l0][l1] = null;
+
+ RemoveIfAllNull(l0);
+ }
+ }
+
+ private void RemoveIfAllNull(int l0)
+ {
+ bool empty = true;
+
+ for (int i = 0; i < _pageTable[l0].Length; i++)
+ {
+ empty &= (_pageTable[l0][i] == null);
+ }
+
+ if (empty)
+ {
+ _pageTable[l0] = null;
+ }
+ }
+ }
+}
diff --git a/Ryujinx.Memory/Range/HostMemoryRange.cs b/Ryujinx.Memory/Range/HostMemoryRange.cs
new file mode 100644
index 00000000..c6d8689c
--- /dev/null
+++ b/Ryujinx.Memory/Range/HostMemoryRange.cs
@@ -0,0 +1,71 @@
+using System;
+
+namespace Ryujinx.Memory.Range
+{
+ /// <summary>
+ /// Range of memory composed of an address and size.
+ /// </summary>
+ public struct HostMemoryRange : IEquatable<HostMemoryRange>
+ {
+ /// <summary>
+ /// An empty memory range, with a null address and zero size.
+ /// </summary>
+ public static HostMemoryRange Empty => new HostMemoryRange(0, 0);
+
+ /// <summary>
+ /// Start address of the range.
+ /// </summary>
+ public nuint Address { get; }
+
+ /// <summary>
+ /// Size of the range in bytes.
+ /// </summary>
+ public ulong Size { get; }
+
+ /// <summary>
+ /// Address where the range ends (exclusive).
+ /// </summary>
+ public nuint EndAddress => Address + (nuint)Size;
+
+ /// <summary>
+ /// Creates a new memory range with the specified address and size.
+ /// </summary>
+ /// <param name="address">Start address</param>
+ /// <param name="size">Size in bytes</param>
+ public HostMemoryRange(nuint address, ulong size)
+ {
+ Address = address;
+ Size = size;
+ }
+
+ /// <summary>
+ /// Checks if the range overlaps with another.
+ /// </summary>
+ /// <param name="other">The other range to check for overlap</param>
+ /// <returns>True if the ranges overlap, false otherwise</returns>
+ public bool OverlapsWith(HostMemoryRange other)
+ {
+ nuint thisAddress = Address;
+ nuint thisEndAddress = EndAddress;
+ nuint otherAddress = other.Address;
+ nuint otherEndAddress = other.EndAddress;
+
+ return thisAddress < otherEndAddress && otherAddress < thisEndAddress;
+ }
+
+ public override bool Equals(object obj)
+ {
+ return obj is HostMemoryRange other && Equals(other);
+ }
+
+ public bool Equals(HostMemoryRange other)
+ {
+ return Address == other.Address && Size == other.Size;
+ }
+
+ public override int GetHashCode()
+ {
+ return HashCode.Combine(Address, Size);
+ }
+ }
+}
diff --git a/Ryujinx.Memory/Tracking/IMultiRegionHandle.cs b/Ryujinx.Memory/Tracking/IMultiRegionHandle.cs
index 357b8c5c..71bd602f 100644
--- a/Ryujinx.Memory/Tracking/IMultiRegionHandle.cs
+++ b/Ryujinx.Memory/Tracking/IMultiRegionHandle.cs
@@ -10,6 +10,13 @@ namespace Ryujinx.Memory.Tracking
bool Dirty { get; }
/// <summary>
+ /// Force the range of handles to be dirty, without reprotecting.
+ /// </summary>
+ /// <param name="address">Start address of the range</param>
+ /// <param name="size">Size of the range</param>
+ public void ForceDirty(ulong address, ulong size);
+
+ /// <summary>
/// Check if any part of the region has been modified, and perform an action for each.
/// Contiguous modified regions are combined.
/// </summary>
diff --git a/Ryujinx.Memory/Tracking/IRegionHandle.cs b/Ryujinx.Memory/Tracking/IRegionHandle.cs
index cd33e5c8..ec802cb3 100644
--- a/Ryujinx.Memory/Tracking/IRegionHandle.cs
+++ b/Ryujinx.Memory/Tracking/IRegionHandle.cs
@@ -10,6 +10,7 @@ namespace Ryujinx.Memory.Tracking
ulong Size { get; }
ulong EndAddress { get; }
+ void ForceDirty();
void Reprotect(bool asDirty = false);
void RegisterAction(RegionSignal action);
}
diff --git a/Ryujinx.Memory/Tracking/MemoryTracking.cs b/Ryujinx.Memory/Tracking/MemoryTracking.cs
index 425552f8..70951e8c 100644
--- a/Ryujinx.Memory/Tracking/MemoryTracking.cs
+++ b/Ryujinx.Memory/Tracking/MemoryTracking.cs
@@ -9,7 +9,7 @@ namespace Ryujinx.Memory.Tracking
public class MemoryTracking
{
private readonly IVirtualMemoryManager _memoryManager;
- private readonly MemoryBlock _block;
+ private readonly InvalidAccessHandler _invalidAccessHandler;
// Only use these from within the lock.
private readonly NonOverlappingRangeList<VirtualRegion> _virtualRegions;
@@ -25,8 +25,6 @@ namespace Ryujinx.Memory.Tracking
/// </summary>
internal object TrackingLock = new object();
- public bool EnablePhysicalProtection { get; set; }
-
/// <summary>
/// Create a new tracking structure for the given "physical" memory block,
/// with a given "virtual" memory manager that will provide mappings and virtual memory protection.
@@ -34,11 +32,11 @@ namespace Ryujinx.Memory.Tracking
/// <param name="memoryManager">Virtual memory manager</param>
/// <param name="block">Physical memory block</param>
/// <param name="pageSize">Page size of the virtual memory space</param>
- public MemoryTracking(IVirtualMemoryManager memoryManager, MemoryBlock block, int pageSize)
+ public MemoryTracking(IVirtualMemoryManager memoryManager, int pageSize, InvalidAccessHandler invalidAccessHandler = null)
{
_memoryManager = memoryManager;
- _block = block;
_pageSize = pageSize;
+ _invalidAccessHandler = invalidAccessHandler;
_virtualRegions = new NonOverlappingRangeList<VirtualRegion>();
}
@@ -56,9 +54,8 @@ namespace Ryujinx.Memory.Tracking
/// Should be called after the mapping is complete.
/// </summary>
/// <param name="va">Virtual memory address</param>
- /// <param name="pa">Physical memory address</param>
/// <param name="size">Size to be mapped</param>
- public void Map(ulong va, ulong pa, ulong size)
+ public void Map(ulong va, ulong size)
{
// A mapping may mean we need to re-evaluate each VirtualRegion's affected area.
// Find all handles that overlap with the range, we need to recalculate their physical regions
@@ -208,6 +205,15 @@ namespace Ryujinx.Memory.Tracking
if (count == 0)
{
+ if (!_memoryManager.IsMapped(address))
+ {
+ _invalidAccessHandler?.Invoke(address);
+
+ // We can't continue - it's impossible to remove protection from the page.
+ // Even if the access handler wants us to continue, we wouldn't be able to.
+ throw new InvalidMemoryRegionException();
+ }
+
_memoryManager.TrackingReprotect(address & ~(ulong)(_pageSize - 1), (ulong)_pageSize, MemoryPermission.ReadAndWrite);
return false; // We can't handle this - it's probably a real invalid access.
}
diff --git a/Ryujinx.Memory/Tracking/MultiRegionHandle.cs b/Ryujinx.Memory/Tracking/MultiRegionHandle.cs
index df154bc2..1f09807a 100644
--- a/Ryujinx.Memory/Tracking/MultiRegionHandle.cs
+++ b/Ryujinx.Memory/Tracking/MultiRegionHandle.cs
@@ -34,6 +34,20 @@ namespace Ryujinx.Memory.Tracking
Size = size;
}
+ public void ForceDirty(ulong address, ulong size)
+ {
+ Dirty = true;
+
+ int startHandle = (int)((address - Address) / Granularity);
+ int lastHandle = (int)((address + (size - 1) - Address) / Granularity);
+
+ for (int i = startHandle; i <= lastHandle; i++)
+ {
+ _handles[i].SequenceNumber--;
+ _handles[i].ForceDirty();
+ }
+ }
+
public void SignalWrite()
{
Dirty = true;
@@ -98,7 +112,7 @@ namespace Ryujinx.Memory.Tracking
{
RegionHandle handle = _handles[i];
- if (handle.Dirty && sequenceNumber != handle.SequenceNumber)
+ if (sequenceNumber != handle.SequenceNumber && handle.DirtyOrVolatile())
{
rgSize += handle.Size;
handle.Reprotect();
diff --git a/Ryujinx.Memory/Tracking/RegionHandle.cs b/Ryujinx.Memory/Tracking/RegionHandle.cs
index 5c32fba4..69d77977 100644
--- a/Ryujinx.Memory/Tracking/RegionHandle.cs
+++ b/Ryujinx.Memory/Tracking/RegionHandle.cs
@@ -11,6 +11,17 @@ namespace Ryujinx.Memory.Tracking
/// </summary>
public class RegionHandle : IRegionHandle, IRange
{
+ /// <summary>
+ /// If more than this number of checks have been performed on a dirty flag since its last reprotect,
+ /// then it is dirtied infrequently.
+ /// </summary>
+ private static int CheckCountForInfrequent = 3;
+
+ /// <summary>
+ /// Number of frequent dirty/consume in a row to make this handle volatile.
+ /// </summary>
+ private static int VolatileThreshold = 5;
+
public bool Dirty { get; private set; }
public bool Unmapped { get; private set; }
@@ -28,6 +39,10 @@ namespace Ryujinx.Memory.Tracking
private readonly MemoryTracking _tracking;
private bool _disposed;
+ private int _checkCount = 0;
+ private int _volatileCount = 0;
+ private bool _volatile;
+
internal MemoryPermission RequiredPermission => _preAction != null ? MemoryPermission.None : (Dirty ? MemoryPermission.ReadAndWrite : MemoryPermission.Read);
internal RegionSignal PreAction => _preAction;
@@ -56,6 +71,25 @@ namespace Ryujinx.Memory.Tracking
}
/// <summary>
+ /// Clear the volatile state of this handle.
+ /// </summary>
+ private void ClearVolatile()
+ {
+ _volatileCount = 0;
+ _volatile = false;
+ }
+
+ /// <summary>
+ /// Check if this handle is dirty, or if it is volatile. (changes very often)
+ /// </summary>
+ /// <returns>True if the handle is dirty or volatile, false otherwise</returns>
+ public bool DirtyOrVolatile()
+ {
+ _checkCount++;
+ return Dirty || _volatile;
+ }
+
+ /// <summary>
/// Signal that a memory action occurred within this handle's virtual regions.
/// </summary>
/// <param name="write">Whether the region was written to or read</param>
@@ -77,18 +111,56 @@ namespace Ryujinx.Memory.Tracking
}
/// <summary>
+ /// Force this handle to be dirty, without reprotecting.
+ /// </summary>
+ public void ForceDirty()
+ {
+ Dirty = true;
+ }
+
+ /// <summary>
/// Consume the dirty flag for this handle, and reprotect so it can be set on the next write.
/// </summary>
public void Reprotect(bool asDirty = false)
{
+ if (_volatile) return;
+
Dirty = asDirty;
+
+ bool protectionChanged = false;
+
lock (_tracking.TrackingLock)
{
foreach (VirtualRegion region in _regions)
{
- region.UpdateProtection();
+ protectionChanged |= region.UpdateProtection();
}
}
+
+ if (!protectionChanged)
+ {
+ // Counteract the check count being incremented when this handle was forced dirty.
+ // It doesn't count for protected write tracking.
+
+ _checkCount--;
+ }
+ else if (!asDirty)
+ {
+ if (_checkCount > 0 && _checkCount < CheckCountForInfrequent)
+ {
+ if (++_volatileCount >= VolatileThreshold && _preAction == null)
+ {
+ _volatile = true;
+ return;
+ }
+ }
+ else
+ {
+ _volatileCount = 0;
+ }
+
+ _checkCount = 0;
+ }
}
/// <summary>
@@ -98,6 +170,8 @@ namespace Ryujinx.Memory.Tracking
/// <param name="action">Action to call on read or write</param>
public void RegisterAction(RegionSignal action)
{
+ ClearVolatile();
+
RegionSignal lastAction = Interlocked.Exchange(ref _preAction, action);
if (lastAction == null && action != lastAction)
{
@@ -142,6 +216,7 @@ namespace Ryujinx.Memory.Tracking
if (Unmapped)
{
+ ClearVolatile();
Dirty = false;
}
}
diff --git a/Ryujinx.Memory/Tracking/SmartMultiRegionHandle.cs b/Ryujinx.Memory/Tracking/SmartMultiRegionHandle.cs
index 8bc10c41..eabbd723 100644
--- a/Ryujinx.Memory/Tracking/SmartMultiRegionHandle.cs
+++ b/Ryujinx.Memory/Tracking/SmartMultiRegionHandle.cs
@@ -41,6 +41,17 @@ namespace Ryujinx.Memory.Tracking
Dirty = true;
}
+ public void ForceDirty(ulong address, ulong size)
+ {
+ foreach (var handle in _handles)
+ {
+ if (handle != null && handle.OverlapsWith(address, size))
+ {
+ handle.ForceDirty();
+ }
+ }
+ }
+
public void RegisterAction(RegionSignal action)
{
foreach (var handle in _handles)
diff --git a/Ryujinx.Memory/Tracking/VirtualRegion.cs b/Ryujinx.Memory/Tracking/VirtualRegion.cs
index 696d3560..e758f38e 100644
--- a/Ryujinx.Memory/Tracking/VirtualRegion.cs
+++ b/Ryujinx.Memory/Tracking/VirtualRegion.cs
@@ -11,9 +11,11 @@ namespace Ryujinx.Memory.Tracking
public List<RegionHandle> Handles = new List<RegionHandle>();
private readonly MemoryTracking _tracking;
+ private MemoryPermission _lastPermission;
- public VirtualRegion(MemoryTracking tracking, ulong address, ulong size) : base(address, size)
+ public VirtualRegion(MemoryTracking tracking, ulong address, ulong size, MemoryPermission lastPermission = MemoryPermission.Invalid) : base(address, size)
{
+ _lastPermission = lastPermission;
_tracking = tracking;
}
@@ -33,6 +35,8 @@ namespace Ryujinx.Memory.Tracking
/// <param name="mapped">True if the region has been mapped, false if unmapped</param>
public void SignalMappingChanged(bool mapped)
{
+ _lastPermission = MemoryPermission.Invalid;
+
foreach (RegionHandle handle in Handles)
{
handle.SignalMappingChanged(mapped);
@@ -61,9 +65,19 @@ namespace Ryujinx.Memory.Tracking
/// <summary>
/// Updates the protection for this virtual region.
/// </summary>
- public void UpdateProtection()
+ public bool UpdateProtection()
{
- _tracking.ProtectVirtualRegion(this, GetRequiredPermission());
+ MemoryPermission permission = GetRequiredPermission();
+
+ if (_lastPermission != permission)
+ {
+ _tracking.ProtectVirtualRegion(this, permission);
+ _lastPermission = permission;
+
+ return true;
+ }
+
+ return false;
}
/// <summary>
@@ -85,7 +99,7 @@ namespace Ryujinx.Memory.Tracking
public override INonOverlappingRange Split(ulong splitAddress)
{
- VirtualRegion newRegion = new VirtualRegion(_tracking, splitAddress, EndAddress - splitAddress);
+ VirtualRegion newRegion = new VirtualRegion(_tracking, splitAddress, EndAddress - splitAddress, _lastPermission);
Size = splitAddress - Address;
// The new region inherits all of our parents.
diff --git a/Ryujinx.Memory/WindowsShared/EmulatedSharedMemoryWindows.cs b/Ryujinx.Memory/WindowsShared/EmulatedSharedMemoryWindows.cs
new file mode 100644
index 00000000..46399504
--- /dev/null
+++ b/Ryujinx.Memory/WindowsShared/EmulatedSharedMemoryWindows.cs
@@ -0,0 +1,698 @@
+using Ryujinx.Memory.Range;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Memory.WindowsShared
+{
+ class EmulatedSharedMemoryWindows : IDisposable
+ {
+ private static readonly IntPtr InvalidHandleValue = new IntPtr(-1);
+ private static readonly IntPtr CurrentProcessHandle = new IntPtr(-1);
+
+ public const int MappingBits = 16; // Windows 64kb granularity.
+ public const ulong MappingGranularity = 1 << MappingBits;
+ public const ulong MappingMask = MappingGranularity - 1;
+
+ public const ulong BackingSize32GB = 32UL * 1024UL * 1024UL * 1024UL; // Reasonable max size of 32GB.
+
+ private class SharedMemoryMapping : INonOverlappingRange
+ {
+ public ulong Address { get; }
+
+ public ulong Size { get; private set; }
+
+ public ulong EndAddress { get; private set; }
+
+ public List<int> Blocks;
+
+ public SharedMemoryMapping(ulong address, ulong size, List<int> blocks = null)
+ {
+ Address = address;
+ Size = size;
+ EndAddress = address + size;
+
+ Blocks = blocks ?? new List<int>();
+ }
+
+ public bool OverlapsWith(ulong address, ulong size)
+ {
+ return Address < address + size && address < EndAddress;
+ }
+
+ public void ExtendTo(ulong endAddress)
+ {
+ EndAddress = endAddress;
+ Size = endAddress - Address;
+ }
+
+ public void AddBlocks(IEnumerable<int> blocks)
+ {
+ if (Blocks.Count > 0 && blocks.Count() > 0 && Blocks.Last() == blocks.First())
+ {
+ Blocks.AddRange(blocks.Skip(1));
+ }
+ else
+ {
+ Blocks.AddRange(blocks);
+ }
+ }
+
+ public INonOverlappingRange Split(ulong splitAddress)
+ {
+ SharedMemoryMapping newRegion = new SharedMemoryMapping(splitAddress, EndAddress - splitAddress);
+
+ int end = (int)((EndAddress + MappingMask) >> MappingBits);
+ int start = (int)(Address >> MappingBits);
+
+ Size = splitAddress - Address;
+ EndAddress = splitAddress;
+
+ int splitEndBlock = (int)((splitAddress + MappingMask) >> MappingBits);
+ int splitStartBlock = (int)(splitAddress >> MappingBits);
+
+ newRegion.AddBlocks(Blocks.Skip(splitStartBlock - start));
+ Blocks.RemoveRange(splitEndBlock - start, end - splitEndBlock);
+
+ return newRegion;
+ }
+ }
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ private static extern IntPtr CreateFileMapping(
+ IntPtr hFile,
+ IntPtr lpFileMappingAttributes,
+ FileMapProtection flProtect,
+ uint dwMaximumSizeHigh,
+ uint dwMaximumSizeLow,
+ [MarshalAs(UnmanagedType.LPWStr)] string lpName);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ private static extern bool CloseHandle(IntPtr hObject);
+
+ [DllImport("KernelBase.dll", SetLastError = true)]
+ private static extern IntPtr VirtualAlloc2(
+ IntPtr process,
+ IntPtr lpAddress,
+ IntPtr dwSize,
+ AllocationType flAllocationType,
+ MemoryProtection flProtect,
+ IntPtr extendedParameters,
+ ulong parameterCount);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ private static extern bool VirtualFree(IntPtr lpAddress, IntPtr dwSize, AllocationType dwFreeType);
+
+ [DllImport("KernelBase.dll", SetLastError = true)]
+ private static extern IntPtr MapViewOfFile3(
+ IntPtr hFileMappingObject,
+ IntPtr process,
+ IntPtr baseAddress,
+ ulong offset,
+ IntPtr dwNumberOfBytesToMap,
+ ulong allocationType,
+ MemoryProtection dwDesiredAccess,
+ IntPtr extendedParameters,
+ ulong parameterCount);
+
+ [DllImport("KernelBase.dll", SetLastError = true)]
+ private static extern bool UnmapViewOfFile2(IntPtr process, IntPtr lpBaseAddress, ulong unmapFlags);
+
+ private ulong _size;
+
+ private object _lock = new object();
+
+ private ulong _backingSize;
+ private IntPtr _backingMemHandle;
+ private int _backingEnd;
+ private int _backingAllocated;
+ private Queue<int> _backingFreeList;
+
+ private List<ulong> _mappedBases;
+ private RangeList<SharedMemoryMapping> _mappings;
+ private SharedMemoryMapping[] _foundMappings = new SharedMemoryMapping[32];
+ private PlaceholderList _placeholders;
+
+ public EmulatedSharedMemoryWindows(ulong size)
+ {
+ ulong backingSize = BackingSize32GB;
+
+ _size = size;
+ _backingSize = backingSize;
+
+ _backingMemHandle = CreateFileMapping(
+ InvalidHandleValue,
+ IntPtr.Zero,
+ FileMapProtection.PageReadWrite | FileMapProtection.SectionReserve,
+ (uint)(backingSize >> 32),
+ (uint)backingSize,
+ null);
+
+ if (_backingMemHandle == IntPtr.Zero)
+ {
+ throw new OutOfMemoryException();
+ }
+
+ _backingFreeList = new Queue<int>();
+ _mappings = new RangeList<SharedMemoryMapping>();
+ _mappedBases = new List<ulong>();
+ _placeholders = new PlaceholderList(size >> MappingBits);
+ }
+
+ private (ulong granularStart, ulong granularEnd) GetAlignedRange(ulong address, ulong size)
+ {
+ return (address & (~MappingMask), (address + size + MappingMask) & (~MappingMask));
+ }
+
+ private void Commit(ulong address, ulong size)
+ {
+ (ulong granularStart, ulong granularEnd) = GetAlignedRange(address, size);
+
+ ulong endAddress = address + size;
+
+ lock (_lock)
+ {
+ // Search a bit before and after the new mapping.
+ // When adding our new mapping, we may need to join an existing mapping into our new mapping (or in some cases, to the other side!)
+ ulong searchStart = granularStart == 0 ? 0 : (granularStart - 1);
+ int mappingCount = _mappings.FindOverlapsNonOverlapping(searchStart, (granularEnd - searchStart) + 1, ref _foundMappings);
+
+ int first = -1;
+ int last = -1;
+ SharedMemoryMapping startOverlap = null;
+ SharedMemoryMapping endOverlap = null;
+
+ int lastIndex = (int)(address >> MappingBits);
+ int endIndex = (int)((endAddress + MappingMask) >> MappingBits);
+ int firstBlock = -1;
+ int endBlock = -1;
+
+ for (int i = 0; i < mappingCount; i++)
+ {
+ SharedMemoryMapping mapping = _foundMappings[i];
+
+ if (mapping.Address < address)
+ {
+ if (mapping.EndAddress >= address)
+ {
+ startOverlap = mapping;
+ }
+
+ if ((int)((mapping.EndAddress - 1) >> MappingBits) == lastIndex)
+ {
+ lastIndex = (int)((mapping.EndAddress + MappingMask) >> MappingBits);
+ firstBlock = mapping.Blocks.Last();
+ }
+ }
+
+ if (mapping.EndAddress > endAddress)
+ {
+ if (mapping.Address <= endAddress)
+ {
+ endOverlap = mapping;
+ }
+
+ if ((int)((mapping.Address) >> MappingBits) + 1 == endIndex)
+ {
+ endIndex = (int)((mapping.Address) >> MappingBits);
+ endBlock = mapping.Blocks.First();
+ }
+ }
+
+ if (mapping.OverlapsWith(address, size))
+ {
+ if (first == -1)
+ {
+ first = i;
+ }
+
+ last = i;
+ }
+ }
+
+ if (startOverlap == endOverlap && startOverlap != null)
+ {
+ // Already fully committed.
+ return;
+ }
+
+ var blocks = new List<int>();
+ int lastBlock = -1;
+
+ if (firstBlock != -1)
+ {
+ blocks.Add(firstBlock);
+ lastBlock = firstBlock;
+ }
+
+ bool hasMapped = false;
+ Action map = () =>
+ {
+ if (!hasMapped)
+ {
+ _placeholders.EnsurePlaceholders(address >> MappingBits, (granularEnd - granularStart) >> MappingBits, SplitPlaceholder);
+ hasMapped = true;
+ }
+
+ // There's a gap between this index and the last. Allocate blocks to fill it.
+ blocks.Add(MapBackingBlock(MappingGranularity * (ulong)lastIndex++));
+ };
+
+ if (first != -1)
+ {
+ for (int i = first; i <= last; i++)
+ {
+ SharedMemoryMapping mapping = _foundMappings[i];
+ int mapIndex = (int)(mapping.Address >> MappingBits);
+
+ while (lastIndex < mapIndex)
+ {
+ map();
+ }
+
+ if (lastBlock == mapping.Blocks[0])
+ {
+ blocks.AddRange(mapping.Blocks.Skip(1));
+ }
+ else
+ {
+ blocks.AddRange(mapping.Blocks);
+ }
+
+ lastIndex = (int)((mapping.EndAddress - 1) >> MappingBits) + 1;
+ }
+ }
+
+ while (lastIndex < endIndex)
+ {
+ map();
+ }
+
+ if (endBlock != -1 && endBlock != lastBlock)
+ {
+ blocks.Add(endBlock);
+ }
+
+ if (startOverlap != null && endOverlap != null)
+ {
+ // Both sides should be coalesced. Extend the start overlap to contain the end overlap, and add together their blocks.
+
+ _mappings.Remove(endOverlap);
+
+ startOverlap.ExtendTo(endOverlap.EndAddress);
+
+ startOverlap.AddBlocks(blocks);
+ startOverlap.AddBlocks(endOverlap.Blocks);
+ }
+ else if (startOverlap != null)
+ {
+ startOverlap.ExtendTo(endAddress);
+
+ startOverlap.AddBlocks(blocks);
+ }
+ else
+ {
+ var mapping = new SharedMemoryMapping(address, size, blocks);
+
+ if (endOverlap != null)
+ {
+ mapping.ExtendTo(endOverlap.EndAddress);
+
+ mapping.AddBlocks(endOverlap.Blocks);
+
+ _mappings.Remove(endOverlap);
+ }
+
+ _mappings.Add(mapping);
+ }
+ }
+ }
+
+ private void Decommit(ulong address, ulong size)
+ {
+ (ulong granularStart, ulong granularEnd) = GetAlignedRange(address, size);
+ ulong endAddress = address + size;
+
+ lock (_lock)
+ {
+ int mappingCount = _mappings.FindOverlapsNonOverlapping(granularStart, granularEnd - granularStart, ref _foundMappings);
+
+ int first = -1;
+ int last = -1;
+
+ for (int i = 0; i < mappingCount; i++)
+ {
+ SharedMemoryMapping mapping = _foundMappings[i];
+
+ if (mapping.OverlapsWith(address, size))
+ {
+ if (first == -1)
+ {
+ first = i;
+ }
+
+ last = i;
+ }
+ }
+
+ if (first == -1)
+ {
+ return; // Could not find any regions to decommit.
+ }
+
+ int lastReleasedBlock = -1;
+
+ bool releasedFirst = false;
+ bool releasedLast = false;
+
+ for (int i = last; i >= first; i--)
+ {
+ SharedMemoryMapping mapping = _foundMappings[i];
+ bool releaseEnd = true;
+ bool releaseStart = true;
+
+ if (i == last)
+ {
+ // If this is the last region, do not release the block if there is a page ahead of us, or the block continues after us. (it is keeping the block alive)
+ releaseEnd = last == mappingCount - 1;
+
+ // If the end region starts after the decommit end address, split and readd it after modifying its base address.
+ if (mapping.EndAddress > endAddress)
+ {
+ var newMapping = (SharedMemoryMapping)mapping.Split(endAddress);
+ _mappings.Add(newMapping);
+
+ if ((endAddress & MappingMask) != 0)
+ {
+ releaseEnd = false;
+ }
+ }
+
+ releasedLast = releaseEnd;
+ }
+
+ if (i == first)
+ {
+ // If this is the first region, do not release the block if there is a region behind us. (it is keeping the block alive)
+ releaseStart = first == 0;
+
+ // If the first region starts before the decommit address, split it by modifying its end address.
+ if (mapping.Address < address)
+ {
+ mapping = (SharedMemoryMapping)mapping.Split(address);
+
+ if ((address & MappingMask) != 0)
+ {
+ releaseStart = false;
+ }
+ }
+
+ releasedFirst = releaseStart;
+ }
+
+ _mappings.Remove(mapping);
+
+ ulong releasePointer = (mapping.EndAddress + MappingMask) & (~MappingMask);
+ for (int j = mapping.Blocks.Count - 1; j >= 0; j--)
+ {
+ int blockId = mapping.Blocks[j];
+
+ releasePointer -= MappingGranularity;
+
+ if (lastReleasedBlock == blockId)
+ {
+ // When committed regions are fragmented, multiple will have the same block id for their start/end granular block.
+ // Avoid releasing these blocks twice.
+ continue;
+ }
+
+ if ((j != 0 || releaseStart) && (j != mapping.Blocks.Count - 1 || releaseEnd))
+ {
+ ReleaseBackingBlock(releasePointer, blockId);
+ }
+
+ lastReleasedBlock = blockId;
+ }
+ }
+
+ ulong placeholderStart = (granularStart >> MappingBits) + (releasedFirst ? 0UL : 1UL);
+ ulong placeholderEnd = (granularEnd >> MappingBits) - (releasedLast ? 0UL : 1UL);
+
+ if (placeholderEnd > placeholderStart)
+ {
+ _placeholders.RemovePlaceholders(placeholderStart, placeholderEnd - placeholderStart, CoalescePlaceholder);
+ }
+ }
+ }
+
+ public bool CommitMap(IntPtr address, IntPtr size)
+ {
+ lock (_lock)
+ {
+ foreach (ulong mapping in _mappedBases)
+ {
+ ulong offset = (ulong)address - mapping;
+
+ if (offset < _size)
+ {
+ Commit(offset, (ulong)size);
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ public bool DecommitMap(IntPtr address, IntPtr size)
+ {
+ lock (_lock)
+ {
+ foreach (ulong mapping in _mappedBases)
+ {
+ ulong offset = (ulong)address - mapping;
+
+ if (offset < _size)
+ {
+ Decommit(offset, (ulong)size);
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ private int MapBackingBlock(ulong offset)
+ {
+ bool allocate = false;
+ int backing;
+
+ if (_backingFreeList.Count > 0)
+ {
+ backing = _backingFreeList.Dequeue();
+ }
+ else
+ {
+ if (_backingAllocated == _backingEnd)
+ {
+ // Allocate the backing.
+ _backingAllocated++;
+ allocate = true;
+ }
+
+ backing = _backingEnd++;
+ }
+
+ ulong backingOffset = MappingGranularity * (ulong)backing;
+
+ foreach (ulong baseAddress in _mappedBases)
+ {
+ CommitToMap(baseAddress, offset, MappingGranularity, backingOffset, allocate);
+ allocate = false;
+ }
+
+ return backing;
+ }
+
+ private void ReleaseBackingBlock(ulong offset, int id)
+ {
+ foreach (ulong baseAddress in _mappedBases)
+ {
+ DecommitFromMap(baseAddress, offset);
+ }
+
+ if (_backingEnd - 1 == id)
+ {
+ _backingEnd = id;
+ }
+ else
+ {
+ _backingFreeList.Enqueue(id);
+ }
+ }
+
+ public IntPtr Map()
+ {
+ IntPtr newMapping = VirtualAlloc2(
+ CurrentProcessHandle,
+ IntPtr.Zero,
+ (IntPtr)_size,
+ AllocationType.Reserve | AllocationType.ReservePlaceholder,
+ MemoryProtection.NoAccess,
+ IntPtr.Zero,
+ 0);
+
+ if (newMapping == IntPtr.Zero)
+ {
+ throw new OutOfMemoryException();
+ }
+
+ // Apply all existing mappings to the new mapping
+ lock (_lock)
+ {
+ int lastBlock = -1;
+ foreach (SharedMemoryMapping mapping in _mappings)
+ {
+ ulong blockAddress = mapping.Address & (~MappingMask);
+ foreach (int block in mapping.Blocks)
+ {
+ if (block != lastBlock)
+ {
+ ulong backingOffset = MappingGranularity * (ulong)block;
+
+ CommitToMap((ulong)newMapping, blockAddress, MappingGranularity, backingOffset, false);
+
+ lastBlock = block;
+ }
+
+ blockAddress += MappingGranularity;
+ }
+ }
+
+ _mappedBases.Add((ulong)newMapping);
+ }
+
+ return newMapping;
+ }
+
+ private void SplitPlaceholder(ulong address, ulong size)
+ {
+ ulong byteAddress = address << MappingBits;
+ IntPtr byteSize = (IntPtr)(size << MappingBits);
+
+ foreach (ulong mapAddress in _mappedBases)
+ {
+ bool result = VirtualFree((IntPtr)(mapAddress + byteAddress), byteSize, AllocationType.PreservePlaceholder | AllocationType.Release);
+
+ if (!result)
+ {
+ throw new InvalidOperationException("Placeholder could not be split.");
+ }
+ }
+ }
+
+ private void CoalescePlaceholder(ulong address, ulong size)
+ {
+ ulong byteAddress = address << MappingBits;
+ IntPtr byteSize = (IntPtr)(size << MappingBits);
+
+ foreach (ulong mapAddress in _mappedBases)
+ {
+ bool result = VirtualFree((IntPtr)(mapAddress + byteAddress), byteSize, AllocationType.CoalescePlaceholders | AllocationType.Release);
+
+ if (!result)
+ {
+ throw new InvalidOperationException("Placeholder could not be coalesced.");
+ }
+ }
+ }
+
+ private void CommitToMap(ulong mapAddress, ulong address, ulong size, ulong backingOffset, bool allocate)
+ {
+ IntPtr targetAddress = (IntPtr)(mapAddress + address);
+
+ // Assume the placeholder worked (or already exists)
+ // Map the backing memory into the mapped location.
+
+ IntPtr mapped = MapViewOfFile3(
+ _backingMemHandle,
+ CurrentProcessHandle,
+ targetAddress,
+ backingOffset,
+ (IntPtr)MappingGranularity,
+ 0x4000, // REPLACE_PLACEHOLDER
+ MemoryProtection.ReadWrite,
+ IntPtr.Zero,
+ 0);
+
+ if (mapped == IntPtr.Zero)
+ {
+ throw new InvalidOperationException($"Could not map view of backing memory. (va=0x{address:X16} size=0x{size:X16}, error code {Marshal.GetLastWin32Error()})");
+ }
+
+ if (allocate)
+ {
+ // Commit this part of the shared memory.
+ VirtualAlloc2(CurrentProcessHandle, targetAddress, (IntPtr)MappingGranularity, AllocationType.Commit, MemoryProtection.ReadWrite, IntPtr.Zero, 0);
+ }
+ }
+
+ private void DecommitFromMap(ulong baseAddress, ulong address)
+ {
+ UnmapViewOfFile2(CurrentProcessHandle, (IntPtr)(baseAddress + address), 2);
+ }
+
+ public bool Unmap(ulong baseAddress)
+ {
+ lock (_lock)
+ {
+ if (_mappedBases.Remove(baseAddress))
+ {
+ int lastBlock = -1;
+
+ foreach (SharedMemoryMapping mapping in _mappings)
+ {
+ ulong blockAddress = mapping.Address & (~MappingMask);
+ foreach (int block in mapping.Blocks)
+ {
+ if (block != lastBlock)
+ {
+ DecommitFromMap(baseAddress, blockAddress);
+
+ lastBlock = block;
+ }
+
+ blockAddress += MappingGranularity;
+ }
+ }
+
+ if (!VirtualFree((IntPtr)baseAddress, (IntPtr)0, AllocationType.Release))
+ {
+ throw new InvalidOperationException("Couldn't free mapping placeholder.");
+ }
+
+ return true;
+ }
+
+ return false;
+ }
+ }
+
+ public void Dispose()
+ {
+ // Remove all file mappings
+ lock (_lock)
+ {
+ foreach (ulong baseAddress in _mappedBases.ToArray())
+ {
+ Unmap(baseAddress);
+ }
+ }
+
+ // Finally, delete the file mapping.
+ CloseHandle(_backingMemHandle);
+ }
+ }
+}
diff --git a/Ryujinx.Memory/WindowsShared/PlaceholderList.cs b/Ryujinx.Memory/WindowsShared/PlaceholderList.cs
new file mode 100644
index 00000000..be8cef9c
--- /dev/null
+++ b/Ryujinx.Memory/WindowsShared/PlaceholderList.cs
@@ -0,0 +1,291 @@
+using Ryujinx.Memory.Range;
+using System;
+using System.Diagnostics;
+
+namespace Ryujinx.Memory.WindowsShared
+{
+ /// <summary>
+ /// A specialized list used for keeping track of Windows 10's memory placeholders.
+ /// This is used to make splitting a large placeholder into equally small
+ /// granular chunks much easier, while avoiding slowdown due to a large number of
+ /// placeholders by coalescing adjacent granular placeholders after they are unused.
+ /// </summary>
+ class PlaceholderList
+ {
+ private class PlaceholderBlock : IRange
+ {
+ public ulong Address { get; }
+ public ulong Size { get; private set; }
+ public ulong EndAddress { get; private set; }
+ public bool IsGranular { get; set; }
+
+ public PlaceholderBlock(ulong id, ulong size, bool isGranular)
+ {
+ Address = id;
+ Size = size;
+ EndAddress = id + size;
+ IsGranular = isGranular;
+ }
+
+ public bool OverlapsWith(ulong address, ulong size)
+ {
+ return Address < address + size && address < EndAddress;
+ }
+
+ public void ExtendTo(ulong end)
+ {
+ EndAddress = end;
+ Size = end - Address;
+ }
+ }
+
+ private RangeList<PlaceholderBlock> _placeholders;
+ private PlaceholderBlock[] _foundBlocks = new PlaceholderBlock[32];
+
+ /// <summary>
+ /// Create a new list to manage placeholders.
+ /// Note that a size is measured in granular placeholders.
+ /// If the placeholder granularity is 65536 bytes, then a 65536 region will be covered by 1 placeholder granularity.
+ /// </summary>
+ /// <param name="size">Size measured in granular placeholders</param>
+ public PlaceholderList(ulong size)
+ {
+ _placeholders = new RangeList<PlaceholderBlock>();
+
+ _placeholders.Add(new PlaceholderBlock(0, size, false));
+ }
+
+ /// <summary>
+ /// Ensure that the given range of placeholders is granular.
+ /// </summary>
+ /// <param name="id">Start of the range, measured in granular placeholders</param>
+ /// <param name="size">Size of the range, measured in granular placeholders</param>
+ /// <param name="splitPlaceholderCallback">Callback function to run when splitting placeholders, calls with (start, middle)</param>
+ public void EnsurePlaceholders(ulong id, ulong size, Action<ulong, ulong> splitPlaceholderCallback)
+ {
+ // Search 1 before and after the placeholders, as we may need to expand/join granular regions surrounding the requested area.
+
+ ulong endId = id + size;
+ ulong searchStartId = id == 0 ? 0 : (id - 1);
+ int blockCount = _placeholders.FindOverlapsNonOverlapping(searchStartId, (endId - searchStartId) + 1, ref _foundBlocks);
+
+ PlaceholderBlock first = _foundBlocks[0];
+ PlaceholderBlock last = _foundBlocks[blockCount - 1];
+ bool overlapStart = first.EndAddress >= id && id != 0;
+ bool overlapEnd = last.Address <= endId;
+
+ for (int i = 0; i < blockCount; i++)
+ {
+ // Go through all non-granular blocks in the range and create placeholders.
+ PlaceholderBlock block = _foundBlocks[i];
+
+ if (block.Address <= id && block.EndAddress >= endId && block.IsGranular)
+ {
+ return; // The region we're searching for is already granular.
+ }
+
+ if (!block.IsGranular)
+ {
+ ulong placeholderStart = Math.Max(block.Address, id);
+ ulong placeholderEnd = Math.Min(block.EndAddress - 1, endId);
+
+ if (placeholderStart != block.Address && placeholderStart != block.EndAddress)
+ {
+ splitPlaceholderCallback(block.Address, placeholderStart - block.Address);
+ }
+
+ for (ulong j = placeholderStart; j < placeholderEnd; j++)
+ {
+ splitPlaceholderCallback(j, 1);
+ }
+ }
+
+ if (!((block == first && overlapStart) || (block == last && overlapEnd)))
+ {
+ // Remove blocks that will be replaced
+ _placeholders.Remove(block);
+ }
+ }
+
+ if (overlapEnd)
+ {
+ if (!(first == last && overlapStart))
+ {
+ _placeholders.Remove(last);
+ }
+
+ if (last.IsGranular)
+ {
+ endId = last.EndAddress;
+ }
+ else if (last.EndAddress != endId)
+ {
+ _placeholders.Add(new PlaceholderBlock(endId, last.EndAddress - endId, false));
+ }
+ }
+
+ if (overlapStart && first.IsGranular)
+ {
+ first.ExtendTo(endId);
+ }
+ else
+ {
+ if (overlapStart)
+ {
+ first.ExtendTo(id);
+ }
+
+ _placeholders.Add(new PlaceholderBlock(id, endId - id, true));
+ }
+
+ ValidateList();
+ }
+
+ /// <summary>
+ /// Coalesces placeholders in a given region, as they are not being used.
+ /// This assumes that the region only contains placeholders - all views and allocations must have been replaced with placeholders.
+ /// </summary>
+ /// <param name="id">Start of the range, measured in granular placeholders</param>
+ /// <param name="size">Size of the range, measured in granular placeholders</param>
+ /// <param name="coalescePlaceholderCallback">Callback function to run when coalescing two placeholders, calls with (start, end)</param>
+ public void RemovePlaceholders(ulong id, ulong size, Action<ulong, ulong> coalescePlaceholderCallback)
+ {
+ ulong endId = id + size;
+ int blockCount = _placeholders.FindOverlapsNonOverlapping(id, size, ref _foundBlocks);
+
+ PlaceholderBlock first = _foundBlocks[0];
+ PlaceholderBlock last = _foundBlocks[blockCount - 1];
+
+ // All granular blocks must have non-granular blocks surrounding them, unless they start at 0.
+ // We must extend the non-granular blocks into the granular ones. This does mean that we need to search twice.
+
+ if (first.IsGranular || last.IsGranular)
+ {
+ ulong surroundStart = Math.Max(0, (first.IsGranular && first.Address != 0) ? first.Address - 1 : id);
+ blockCount = _placeholders.FindOverlapsNonOverlapping(
+ surroundStart,
+ (last.IsGranular ? last.EndAddress + 1 : endId) - surroundStart,
+ ref _foundBlocks);
+
+ first = _foundBlocks[0];
+ last = _foundBlocks[blockCount - 1];
+ }
+
+ if (first == last)
+ {
+ return; // Already coalesced.
+ }
+
+ PlaceholderBlock extendBlock = id == 0 ? null : first;
+ bool newBlock = false;
+ for (int i = extendBlock == null ? 0 : 1; i < blockCount; i++)
+ {
+ // Go through all granular blocks in the range and extend placeholders.
+ PlaceholderBlock block = _foundBlocks[i];
+
+ ulong blockEnd = block.EndAddress;
+ ulong extendFrom;
+ ulong extent = Math.Min(blockEnd, endId);
+
+ if (block.Address < id && blockEnd > id)
+ {
+ block.ExtendTo(id);
+ extendBlock = null;
+ }
+ else
+ {
+ _placeholders.Remove(block);
+ }
+
+ if (extendBlock == null)
+ {
+ extendFrom = id;
+ extendBlock = new PlaceholderBlock(id, extent - id, false);
+ _placeholders.Add(extendBlock);
+
+ if (blockEnd > extent)
+ {
+ _placeholders.Add(new PlaceholderBlock(extent, blockEnd - extent, true));
+
+ // Skip the next non-granular block, and extend from that into the granular block afterwards.
+ // (assuming that one is still in the requested range)
+
+ if (i + 1 < blockCount)
+ {
+ extendBlock = _foundBlocks[i + 1];
+ }
+
+ i++;
+ }
+
+ newBlock = true;
+ }
+ else
+ {
+ extendFrom = extendBlock.Address;
+ extendBlock.ExtendTo(block.IsGranular ? extent : block.EndAddress);
+ }
+
+ if (block.IsGranular)
+ {
+ ulong placeholderStart = Math.Max(block.Address, id);
+ ulong placeholderEnd = extent;
+
+ if (newBlock)
+ {
+ placeholderStart++;
+ newBlock = false;
+ }
+
+ for (ulong j = placeholderStart; j < placeholderEnd; j++)
+ {
+ coalescePlaceholderCallback(extendFrom, (j + 1) - extendFrom);
+ }
+
+ if (extent < block.EndAddress)
+ {
+ _placeholders.Add(new PlaceholderBlock(placeholderEnd, block.EndAddress - placeholderEnd, true));
+ ValidateList();
+ return;
+ }
+ }
+ else
+ {
+ coalescePlaceholderCallback(extendFrom, block.EndAddress - extendFrom);
+ }
+ }
+
+ ValidateList();
+ }
+
+ /// <summary>
+ /// Ensure that the placeholder list is valid.
+ /// A valid list should not have any gaps between the placeholders,
+ /// and there may be no placehonders with the same IsGranular value next to each other.
+ /// </summary>
+ [Conditional("DEBUG")]
+ private void ValidateList()
+ {
+ bool isGranular = false;
+ bool first = true;
+ ulong lastAddress = 0;
+
+ foreach (var placeholder in _placeholders)
+ {
+ if (placeholder.Address != lastAddress)
+ {
+ throw new InvalidOperationException("Gap in placeholder list.");
+ }
+
+ if (isGranular == placeholder.IsGranular && !first)
+ {
+ throw new InvalidOperationException("Placeholder list not alternating.");
+ }
+
+ first = false;
+ isGranular = placeholder.IsGranular;
+ lastAddress = placeholder.EndAddress;
+ }
+ }
+ }
+}
diff --git a/Ryujinx.Memory/WindowsShared/WindowsFlags.cs b/Ryujinx.Memory/WindowsShared/WindowsFlags.cs
new file mode 100644
index 00000000..ca69cfe9
--- /dev/null
+++ b/Ryujinx.Memory/WindowsShared/WindowsFlags.cs
@@ -0,0 +1,52 @@
+using System;
+
+namespace Ryujinx.Memory.WindowsShared
+{
+ [Flags]
+ enum AllocationType : uint
+ {
+ CoalescePlaceholders = 0x1,
+ PreservePlaceholder = 0x2,
+ Commit = 0x1000,
+ Reserve = 0x2000,
+ Decommit = 0x4000,
+ ReplacePlaceholder = 0x4000,
+ Release = 0x8000,
+ ReservePlaceholder = 0x40000,
+ Reset = 0x80000,
+ Physical = 0x400000,
+ TopDown = 0x100000,
+ WriteWatch = 0x200000,
+ LargePages = 0x20000000
+ }
+
+ [Flags]
+ enum MemoryProtection : uint
+ {
+ NoAccess = 0x01,
+ ReadOnly = 0x02,
+ ReadWrite = 0x04,
+ WriteCopy = 0x08,
+ Execute = 0x10,
+ ExecuteRead = 0x20,
+ ExecuteReadWrite = 0x40,
+ ExecuteWriteCopy = 0x80,
+ GuardModifierflag = 0x100,
+ NoCacheModifierflag = 0x200,
+ WriteCombineModifierflag = 0x400
+ }
+
+ [Flags]
+ enum FileMapProtection : uint
+ {
+ PageReadonly = 0x02,
+ PageReadWrite = 0x04,
+ PageWriteCopy = 0x08,
+ PageExecuteRead = 0x20,
+ PageExecuteReadWrite = 0x40,
+ SectionCommit = 0x8000000,
+ SectionImage = 0x1000000,
+ SectionNoCache = 0x10000000,
+ SectionReserve = 0x4000000
+ }
+}