aboutsummaryrefslogtreecommitdiff
path: root/Ryujinx.Memory/WindowsShared
diff options
context:
space:
mode:
Diffstat (limited to 'Ryujinx.Memory/WindowsShared')
-rw-r--r--Ryujinx.Memory/WindowsShared/EmulatedSharedMemoryWindows.cs698
-rw-r--r--Ryujinx.Memory/WindowsShared/PlaceholderList.cs291
-rw-r--r--Ryujinx.Memory/WindowsShared/WindowsFlags.cs52
3 files changed, 1041 insertions, 0 deletions
diff --git a/Ryujinx.Memory/WindowsShared/EmulatedSharedMemoryWindows.cs b/Ryujinx.Memory/WindowsShared/EmulatedSharedMemoryWindows.cs
new file mode 100644
index 00000000..46399504
--- /dev/null
+++ b/Ryujinx.Memory/WindowsShared/EmulatedSharedMemoryWindows.cs
@@ -0,0 +1,698 @@
+using Ryujinx.Memory.Range;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Memory.WindowsShared
+{
+ class EmulatedSharedMemoryWindows : IDisposable
+ {
+ private static readonly IntPtr InvalidHandleValue = new IntPtr(-1);
+ private static readonly IntPtr CurrentProcessHandle = new IntPtr(-1);
+
+ public const int MappingBits = 16; // Windows 64kb granularity.
+ public const ulong MappingGranularity = 1 << MappingBits;
+ public const ulong MappingMask = MappingGranularity - 1;
+
+ public const ulong BackingSize32GB = 32UL * 1024UL * 1024UL * 1024UL; // Reasonable max size of 32GB.
+
+ private class SharedMemoryMapping : INonOverlappingRange
+ {
+ public ulong Address { get; }
+
+ public ulong Size { get; private set; }
+
+ public ulong EndAddress { get; private set; }
+
+ public List<int> Blocks;
+
+ public SharedMemoryMapping(ulong address, ulong size, List<int> blocks = null)
+ {
+ Address = address;
+ Size = size;
+ EndAddress = address + size;
+
+ Blocks = blocks ?? new List<int>();
+ }
+
+ public bool OverlapsWith(ulong address, ulong size)
+ {
+ return Address < address + size && address < EndAddress;
+ }
+
+ public void ExtendTo(ulong endAddress)
+ {
+ EndAddress = endAddress;
+ Size = endAddress - Address;
+ }
+
+ public void AddBlocks(IEnumerable<int> blocks)
+ {
+ if (Blocks.Count > 0 && blocks.Count() > 0 && Blocks.Last() == blocks.First())
+ {
+ Blocks.AddRange(blocks.Skip(1));
+ }
+ else
+ {
+ Blocks.AddRange(blocks);
+ }
+ }
+
+ public INonOverlappingRange Split(ulong splitAddress)
+ {
+ SharedMemoryMapping newRegion = new SharedMemoryMapping(splitAddress, EndAddress - splitAddress);
+
+ int end = (int)((EndAddress + MappingMask) >> MappingBits);
+ int start = (int)(Address >> MappingBits);
+
+ Size = splitAddress - Address;
+ EndAddress = splitAddress;
+
+ int splitEndBlock = (int)((splitAddress + MappingMask) >> MappingBits);
+ int splitStartBlock = (int)(splitAddress >> MappingBits);
+
+ newRegion.AddBlocks(Blocks.Skip(splitStartBlock - start));
+ Blocks.RemoveRange(splitEndBlock - start, end - splitEndBlock);
+
+ return newRegion;
+ }
+ }
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ private static extern IntPtr CreateFileMapping(
+ IntPtr hFile,
+ IntPtr lpFileMappingAttributes,
+ FileMapProtection flProtect,
+ uint dwMaximumSizeHigh,
+ uint dwMaximumSizeLow,
+ [MarshalAs(UnmanagedType.LPWStr)] string lpName);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ private static extern bool CloseHandle(IntPtr hObject);
+
+ [DllImport("KernelBase.dll", SetLastError = true)]
+ private static extern IntPtr VirtualAlloc2(
+ IntPtr process,
+ IntPtr lpAddress,
+ IntPtr dwSize,
+ AllocationType flAllocationType,
+ MemoryProtection flProtect,
+ IntPtr extendedParameters,
+ ulong parameterCount);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ private static extern bool VirtualFree(IntPtr lpAddress, IntPtr dwSize, AllocationType dwFreeType);
+
+ [DllImport("KernelBase.dll", SetLastError = true)]
+ private static extern IntPtr MapViewOfFile3(
+ IntPtr hFileMappingObject,
+ IntPtr process,
+ IntPtr baseAddress,
+ ulong offset,
+ IntPtr dwNumberOfBytesToMap,
+ ulong allocationType,
+ MemoryProtection dwDesiredAccess,
+ IntPtr extendedParameters,
+ ulong parameterCount);
+
+ [DllImport("KernelBase.dll", SetLastError = true)]
+ private static extern bool UnmapViewOfFile2(IntPtr process, IntPtr lpBaseAddress, ulong unmapFlags);
+
+ private ulong _size;
+
+ private object _lock = new object();
+
+ private ulong _backingSize;
+ private IntPtr _backingMemHandle;
+ private int _backingEnd;
+ private int _backingAllocated;
+ private Queue<int> _backingFreeList;
+
+ private List<ulong> _mappedBases;
+ private RangeList<SharedMemoryMapping> _mappings;
+ private SharedMemoryMapping[] _foundMappings = new SharedMemoryMapping[32];
+ private PlaceholderList _placeholders;
+
+ public EmulatedSharedMemoryWindows(ulong size)
+ {
+ ulong backingSize = BackingSize32GB;
+
+ _size = size;
+ _backingSize = backingSize;
+
+ _backingMemHandle = CreateFileMapping(
+ InvalidHandleValue,
+ IntPtr.Zero,
+ FileMapProtection.PageReadWrite | FileMapProtection.SectionReserve,
+ (uint)(backingSize >> 32),
+ (uint)backingSize,
+ null);
+
+ if (_backingMemHandle == IntPtr.Zero)
+ {
+ throw new OutOfMemoryException();
+ }
+
+ _backingFreeList = new Queue<int>();
+ _mappings = new RangeList<SharedMemoryMapping>();
+ _mappedBases = new List<ulong>();
+ _placeholders = new PlaceholderList(size >> MappingBits);
+ }
+
+ private (ulong granularStart, ulong granularEnd) GetAlignedRange(ulong address, ulong size)
+ {
+ return (address & (~MappingMask), (address + size + MappingMask) & (~MappingMask));
+ }
+
+ private void Commit(ulong address, ulong size)
+ {
+ (ulong granularStart, ulong granularEnd) = GetAlignedRange(address, size);
+
+ ulong endAddress = address + size;
+
+ lock (_lock)
+ {
+ // Search a bit before and after the new mapping.
+ // When adding our new mapping, we may need to join an existing mapping into our new mapping (or in some cases, to the other side!)
+ ulong searchStart = granularStart == 0 ? 0 : (granularStart - 1);
+ int mappingCount = _mappings.FindOverlapsNonOverlapping(searchStart, (granularEnd - searchStart) + 1, ref _foundMappings);
+
+ int first = -1;
+ int last = -1;
+ SharedMemoryMapping startOverlap = null;
+ SharedMemoryMapping endOverlap = null;
+
+ int lastIndex = (int)(address >> MappingBits);
+ int endIndex = (int)((endAddress + MappingMask) >> MappingBits);
+ int firstBlock = -1;
+ int endBlock = -1;
+
+ for (int i = 0; i < mappingCount; i++)
+ {
+ SharedMemoryMapping mapping = _foundMappings[i];
+
+ if (mapping.Address < address)
+ {
+ if (mapping.EndAddress >= address)
+ {
+ startOverlap = mapping;
+ }
+
+ if ((int)((mapping.EndAddress - 1) >> MappingBits) == lastIndex)
+ {
+ lastIndex = (int)((mapping.EndAddress + MappingMask) >> MappingBits);
+ firstBlock = mapping.Blocks.Last();
+ }
+ }
+
+ if (mapping.EndAddress > endAddress)
+ {
+ if (mapping.Address <= endAddress)
+ {
+ endOverlap = mapping;
+ }
+
+ if ((int)((mapping.Address) >> MappingBits) + 1 == endIndex)
+ {
+ endIndex = (int)((mapping.Address) >> MappingBits);
+ endBlock = mapping.Blocks.First();
+ }
+ }
+
+ if (mapping.OverlapsWith(address, size))
+ {
+ if (first == -1)
+ {
+ first = i;
+ }
+
+ last = i;
+ }
+ }
+
+ if (startOverlap == endOverlap && startOverlap != null)
+ {
+ // Already fully committed.
+ return;
+ }
+
+ var blocks = new List<int>();
+ int lastBlock = -1;
+
+ if (firstBlock != -1)
+ {
+ blocks.Add(firstBlock);
+ lastBlock = firstBlock;
+ }
+
+ bool hasMapped = false;
+ Action map = () =>
+ {
+ if (!hasMapped)
+ {
+ _placeholders.EnsurePlaceholders(address >> MappingBits, (granularEnd - granularStart) >> MappingBits, SplitPlaceholder);
+ hasMapped = true;
+ }
+
+ // There's a gap between this index and the last. Allocate blocks to fill it.
+ blocks.Add(MapBackingBlock(MappingGranularity * (ulong)lastIndex++));
+ };
+
+ if (first != -1)
+ {
+ for (int i = first; i <= last; i++)
+ {
+ SharedMemoryMapping mapping = _foundMappings[i];
+ int mapIndex = (int)(mapping.Address >> MappingBits);
+
+ while (lastIndex < mapIndex)
+ {
+ map();
+ }
+
+ if (lastBlock == mapping.Blocks[0])
+ {
+ blocks.AddRange(mapping.Blocks.Skip(1));
+ }
+ else
+ {
+ blocks.AddRange(mapping.Blocks);
+ }
+
+ lastIndex = (int)((mapping.EndAddress - 1) >> MappingBits) + 1;
+ }
+ }
+
+ while (lastIndex < endIndex)
+ {
+ map();
+ }
+
+ if (endBlock != -1 && endBlock != lastBlock)
+ {
+ blocks.Add(endBlock);
+ }
+
+ if (startOverlap != null && endOverlap != null)
+ {
+ // Both sides should be coalesced. Extend the start overlap to contain the end overlap, and add together their blocks.
+
+ _mappings.Remove(endOverlap);
+
+ startOverlap.ExtendTo(endOverlap.EndAddress);
+
+ startOverlap.AddBlocks(blocks);
+ startOverlap.AddBlocks(endOverlap.Blocks);
+ }
+ else if (startOverlap != null)
+ {
+ startOverlap.ExtendTo(endAddress);
+
+ startOverlap.AddBlocks(blocks);
+ }
+ else
+ {
+ var mapping = new SharedMemoryMapping(address, size, blocks);
+
+ if (endOverlap != null)
+ {
+ mapping.ExtendTo(endOverlap.EndAddress);
+
+ mapping.AddBlocks(endOverlap.Blocks);
+
+ _mappings.Remove(endOverlap);
+ }
+
+ _mappings.Add(mapping);
+ }
+ }
+ }
+
+ private void Decommit(ulong address, ulong size)
+ {
+ (ulong granularStart, ulong granularEnd) = GetAlignedRange(address, size);
+ ulong endAddress = address + size;
+
+ lock (_lock)
+ {
+ int mappingCount = _mappings.FindOverlapsNonOverlapping(granularStart, granularEnd - granularStart, ref _foundMappings);
+
+ int first = -1;
+ int last = -1;
+
+ for (int i = 0; i < mappingCount; i++)
+ {
+ SharedMemoryMapping mapping = _foundMappings[i];
+
+ if (mapping.OverlapsWith(address, size))
+ {
+ if (first == -1)
+ {
+ first = i;
+ }
+
+ last = i;
+ }
+ }
+
+ if (first == -1)
+ {
+ return; // Could not find any regions to decommit.
+ }
+
+ int lastReleasedBlock = -1;
+
+ bool releasedFirst = false;
+ bool releasedLast = false;
+
+ for (int i = last; i >= first; i--)
+ {
+ SharedMemoryMapping mapping = _foundMappings[i];
+ bool releaseEnd = true;
+ bool releaseStart = true;
+
+ if (i == last)
+ {
+ // If this is the last region, do not release the block if there is a page ahead of us, or the block continues after us. (it is keeping the block alive)
+ releaseEnd = last == mappingCount - 1;
+
+ // If the end region starts after the decommit end address, split and readd it after modifying its base address.
+ if (mapping.EndAddress > endAddress)
+ {
+ var newMapping = (SharedMemoryMapping)mapping.Split(endAddress);
+ _mappings.Add(newMapping);
+
+ if ((endAddress & MappingMask) != 0)
+ {
+ releaseEnd = false;
+ }
+ }
+
+ releasedLast = releaseEnd;
+ }
+
+ if (i == first)
+ {
+ // If this is the first region, do not release the block if there is a region behind us. (it is keeping the block alive)
+ releaseStart = first == 0;
+
+ // If the first region starts before the decommit address, split it by modifying its end address.
+ if (mapping.Address < address)
+ {
+ mapping = (SharedMemoryMapping)mapping.Split(address);
+
+ if ((address & MappingMask) != 0)
+ {
+ releaseStart = false;
+ }
+ }
+
+ releasedFirst = releaseStart;
+ }
+
+ _mappings.Remove(mapping);
+
+ ulong releasePointer = (mapping.EndAddress + MappingMask) & (~MappingMask);
+ for (int j = mapping.Blocks.Count - 1; j >= 0; j--)
+ {
+ int blockId = mapping.Blocks[j];
+
+ releasePointer -= MappingGranularity;
+
+ if (lastReleasedBlock == blockId)
+ {
+ // When committed regions are fragmented, multiple will have the same block id for their start/end granular block.
+ // Avoid releasing these blocks twice.
+ continue;
+ }
+
+ if ((j != 0 || releaseStart) && (j != mapping.Blocks.Count - 1 || releaseEnd))
+ {
+ ReleaseBackingBlock(releasePointer, blockId);
+ }
+
+ lastReleasedBlock = blockId;
+ }
+ }
+
+ ulong placeholderStart = (granularStart >> MappingBits) + (releasedFirst ? 0UL : 1UL);
+ ulong placeholderEnd = (granularEnd >> MappingBits) - (releasedLast ? 0UL : 1UL);
+
+ if (placeholderEnd > placeholderStart)
+ {
+ _placeholders.RemovePlaceholders(placeholderStart, placeholderEnd - placeholderStart, CoalescePlaceholder);
+ }
+ }
+ }
+
+ public bool CommitMap(IntPtr address, IntPtr size)
+ {
+ lock (_lock)
+ {
+ foreach (ulong mapping in _mappedBases)
+ {
+ ulong offset = (ulong)address - mapping;
+
+ if (offset < _size)
+ {
+ Commit(offset, (ulong)size);
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ public bool DecommitMap(IntPtr address, IntPtr size)
+ {
+ lock (_lock)
+ {
+ foreach (ulong mapping in _mappedBases)
+ {
+ ulong offset = (ulong)address - mapping;
+
+ if (offset < _size)
+ {
+ Decommit(offset, (ulong)size);
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ private int MapBackingBlock(ulong offset)
+ {
+ bool allocate = false;
+ int backing;
+
+ if (_backingFreeList.Count > 0)
+ {
+ backing = _backingFreeList.Dequeue();
+ }
+ else
+ {
+ if (_backingAllocated == _backingEnd)
+ {
+ // Allocate the backing.
+ _backingAllocated++;
+ allocate = true;
+ }
+
+ backing = _backingEnd++;
+ }
+
+ ulong backingOffset = MappingGranularity * (ulong)backing;
+
+ foreach (ulong baseAddress in _mappedBases)
+ {
+ CommitToMap(baseAddress, offset, MappingGranularity, backingOffset, allocate);
+ allocate = false;
+ }
+
+ return backing;
+ }
+
+ private void ReleaseBackingBlock(ulong offset, int id)
+ {
+ foreach (ulong baseAddress in _mappedBases)
+ {
+ DecommitFromMap(baseAddress, offset);
+ }
+
+ if (_backingEnd - 1 == id)
+ {
+ _backingEnd = id;
+ }
+ else
+ {
+ _backingFreeList.Enqueue(id);
+ }
+ }
+
+ public IntPtr Map()
+ {
+ IntPtr newMapping = VirtualAlloc2(
+ CurrentProcessHandle,
+ IntPtr.Zero,
+ (IntPtr)_size,
+ AllocationType.Reserve | AllocationType.ReservePlaceholder,
+ MemoryProtection.NoAccess,
+ IntPtr.Zero,
+ 0);
+
+ if (newMapping == IntPtr.Zero)
+ {
+ throw new OutOfMemoryException();
+ }
+
+ // Apply all existing mappings to the new mapping
+ lock (_lock)
+ {
+ int lastBlock = -1;
+ foreach (SharedMemoryMapping mapping in _mappings)
+ {
+ ulong blockAddress = mapping.Address & (~MappingMask);
+ foreach (int block in mapping.Blocks)
+ {
+ if (block != lastBlock)
+ {
+ ulong backingOffset = MappingGranularity * (ulong)block;
+
+ CommitToMap((ulong)newMapping, blockAddress, MappingGranularity, backingOffset, false);
+
+ lastBlock = block;
+ }
+
+ blockAddress += MappingGranularity;
+ }
+ }
+
+ _mappedBases.Add((ulong)newMapping);
+ }
+
+ return newMapping;
+ }
+
+ private void SplitPlaceholder(ulong address, ulong size)
+ {
+ ulong byteAddress = address << MappingBits;
+ IntPtr byteSize = (IntPtr)(size << MappingBits);
+
+ foreach (ulong mapAddress in _mappedBases)
+ {
+ bool result = VirtualFree((IntPtr)(mapAddress + byteAddress), byteSize, AllocationType.PreservePlaceholder | AllocationType.Release);
+
+ if (!result)
+ {
+ throw new InvalidOperationException("Placeholder could not be split.");
+ }
+ }
+ }
+
+ private void CoalescePlaceholder(ulong address, ulong size)
+ {
+ ulong byteAddress = address << MappingBits;
+ IntPtr byteSize = (IntPtr)(size << MappingBits);
+
+ foreach (ulong mapAddress in _mappedBases)
+ {
+ bool result = VirtualFree((IntPtr)(mapAddress + byteAddress), byteSize, AllocationType.CoalescePlaceholders | AllocationType.Release);
+
+ if (!result)
+ {
+ throw new InvalidOperationException("Placeholder could not be coalesced.");
+ }
+ }
+ }
+
+ private void CommitToMap(ulong mapAddress, ulong address, ulong size, ulong backingOffset, bool allocate)
+ {
+ IntPtr targetAddress = (IntPtr)(mapAddress + address);
+
+ // Assume the placeholder worked (or already exists)
+ // Map the backing memory into the mapped location.
+
+ IntPtr mapped = MapViewOfFile3(
+ _backingMemHandle,
+ CurrentProcessHandle,
+ targetAddress,
+ backingOffset,
+ (IntPtr)MappingGranularity,
+ 0x4000, // REPLACE_PLACEHOLDER
+ MemoryProtection.ReadWrite,
+ IntPtr.Zero,
+ 0);
+
+ if (mapped == IntPtr.Zero)
+ {
+ throw new InvalidOperationException($"Could not map view of backing memory. (va=0x{address:X16} size=0x{size:X16}, error code {Marshal.GetLastWin32Error()})");
+ }
+
+ if (allocate)
+ {
+ // Commit this part of the shared memory.
+ VirtualAlloc2(CurrentProcessHandle, targetAddress, (IntPtr)MappingGranularity, AllocationType.Commit, MemoryProtection.ReadWrite, IntPtr.Zero, 0);
+ }
+ }
+
+ private void DecommitFromMap(ulong baseAddress, ulong address)
+ {
+ UnmapViewOfFile2(CurrentProcessHandle, (IntPtr)(baseAddress + address), 2);
+ }
+
+ public bool Unmap(ulong baseAddress)
+ {
+ lock (_lock)
+ {
+ if (_mappedBases.Remove(baseAddress))
+ {
+ int lastBlock = -1;
+
+ foreach (SharedMemoryMapping mapping in _mappings)
+ {
+ ulong blockAddress = mapping.Address & (~MappingMask);
+ foreach (int block in mapping.Blocks)
+ {
+ if (block != lastBlock)
+ {
+ DecommitFromMap(baseAddress, blockAddress);
+
+ lastBlock = block;
+ }
+
+ blockAddress += MappingGranularity;
+ }
+ }
+
+ if (!VirtualFree((IntPtr)baseAddress, (IntPtr)0, AllocationType.Release))
+ {
+ throw new InvalidOperationException("Couldn't free mapping placeholder.");
+ }
+
+ return true;
+ }
+
+ return false;
+ }
+ }
+
+ public void Dispose()
+ {
+ // Remove all file mappings
+ lock (_lock)
+ {
+ foreach (ulong baseAddress in _mappedBases.ToArray())
+ {
+ Unmap(baseAddress);
+ }
+ }
+
+ // Finally, delete the file mapping.
+ CloseHandle(_backingMemHandle);
+ }
+ }
+}
diff --git a/Ryujinx.Memory/WindowsShared/PlaceholderList.cs b/Ryujinx.Memory/WindowsShared/PlaceholderList.cs
new file mode 100644
index 00000000..be8cef9c
--- /dev/null
+++ b/Ryujinx.Memory/WindowsShared/PlaceholderList.cs
@@ -0,0 +1,291 @@
+using Ryujinx.Memory.Range;
+using System;
+using System.Diagnostics;
+
+namespace Ryujinx.Memory.WindowsShared
+{
+ /// <summary>
+ /// A specialized list used for keeping track of Windows 10's memory placeholders.
+ /// This is used to make splitting a large placeholder into equally small
+ /// granular chunks much easier, while avoiding slowdown due to a large number of
+ /// placeholders by coalescing adjacent granular placeholders after they are unused.
+ /// </summary>
+ class PlaceholderList
+ {
+ private class PlaceholderBlock : IRange
+ {
+ public ulong Address { get; }
+ public ulong Size { get; private set; }
+ public ulong EndAddress { get; private set; }
+ public bool IsGranular { get; set; }
+
+ public PlaceholderBlock(ulong id, ulong size, bool isGranular)
+ {
+ Address = id;
+ Size = size;
+ EndAddress = id + size;
+ IsGranular = isGranular;
+ }
+
+ public bool OverlapsWith(ulong address, ulong size)
+ {
+ return Address < address + size && address < EndAddress;
+ }
+
+ public void ExtendTo(ulong end)
+ {
+ EndAddress = end;
+ Size = end - Address;
+ }
+ }
+
+ private RangeList<PlaceholderBlock> _placeholders;
+ private PlaceholderBlock[] _foundBlocks = new PlaceholderBlock[32];
+
+ /// <summary>
+ /// Create a new list to manage placeholders.
+ /// Note that a size is measured in granular placeholders.
+ /// If the placeholder granularity is 65536 bytes, then a 65536 region will be covered by 1 placeholder granularity.
+ /// </summary>
+ /// <param name="size">Size measured in granular placeholders</param>
+ public PlaceholderList(ulong size)
+ {
+ _placeholders = new RangeList<PlaceholderBlock>();
+
+ _placeholders.Add(new PlaceholderBlock(0, size, false));
+ }
+
+ /// <summary>
+ /// Ensure that the given range of placeholders is granular.
+ /// </summary>
+ /// <param name="id">Start of the range, measured in granular placeholders</param>
+ /// <param name="size">Size of the range, measured in granular placeholders</param>
+ /// <param name="splitPlaceholderCallback">Callback function to run when splitting placeholders, calls with (start, middle)</param>
+ public void EnsurePlaceholders(ulong id, ulong size, Action<ulong, ulong> splitPlaceholderCallback)
+ {
+ // Search 1 before and after the placeholders, as we may need to expand/join granular regions surrounding the requested area.
+
+ ulong endId = id + size;
+ ulong searchStartId = id == 0 ? 0 : (id - 1);
+ int blockCount = _placeholders.FindOverlapsNonOverlapping(searchStartId, (endId - searchStartId) + 1, ref _foundBlocks);
+
+ PlaceholderBlock first = _foundBlocks[0];
+ PlaceholderBlock last = _foundBlocks[blockCount - 1];
+ bool overlapStart = first.EndAddress >= id && id != 0;
+ bool overlapEnd = last.Address <= endId;
+
+ for (int i = 0; i < blockCount; i++)
+ {
+ // Go through all non-granular blocks in the range and create placeholders.
+ PlaceholderBlock block = _foundBlocks[i];
+
+ if (block.Address <= id && block.EndAddress >= endId && block.IsGranular)
+ {
+ return; // The region we're searching for is already granular.
+ }
+
+ if (!block.IsGranular)
+ {
+ ulong placeholderStart = Math.Max(block.Address, id);
+ ulong placeholderEnd = Math.Min(block.EndAddress - 1, endId);
+
+ if (placeholderStart != block.Address && placeholderStart != block.EndAddress)
+ {
+ splitPlaceholderCallback(block.Address, placeholderStart - block.Address);
+ }
+
+ for (ulong j = placeholderStart; j < placeholderEnd; j++)
+ {
+ splitPlaceholderCallback(j, 1);
+ }
+ }
+
+ if (!((block == first && overlapStart) || (block == last && overlapEnd)))
+ {
+ // Remove blocks that will be replaced
+ _placeholders.Remove(block);
+ }
+ }
+
+ if (overlapEnd)
+ {
+ if (!(first == last && overlapStart))
+ {
+ _placeholders.Remove(last);
+ }
+
+ if (last.IsGranular)
+ {
+ endId = last.EndAddress;
+ }
+ else if (last.EndAddress != endId)
+ {
+ _placeholders.Add(new PlaceholderBlock(endId, last.EndAddress - endId, false));
+ }
+ }
+
+ if (overlapStart && first.IsGranular)
+ {
+ first.ExtendTo(endId);
+ }
+ else
+ {
+ if (overlapStart)
+ {
+ first.ExtendTo(id);
+ }
+
+ _placeholders.Add(new PlaceholderBlock(id, endId - id, true));
+ }
+
+ ValidateList();
+ }
+
+ /// <summary>
+ /// Coalesces placeholders in a given region, as they are not being used.
+ /// This assumes that the region only contains placeholders - all views and allocations must have been replaced with placeholders.
+ /// </summary>
+ /// <param name="id">Start of the range, measured in granular placeholders</param>
+ /// <param name="size">Size of the range, measured in granular placeholders</param>
+ /// <param name="coalescePlaceholderCallback">Callback function to run when coalescing two placeholders, calls with (start, end)</param>
+ public void RemovePlaceholders(ulong id, ulong size, Action<ulong, ulong> coalescePlaceholderCallback)
+ {
+ ulong endId = id + size;
+ int blockCount = _placeholders.FindOverlapsNonOverlapping(id, size, ref _foundBlocks);
+
+ PlaceholderBlock first = _foundBlocks[0];
+ PlaceholderBlock last = _foundBlocks[blockCount - 1];
+
+ // All granular blocks must have non-granular blocks surrounding them, unless they start at 0.
+ // We must extend the non-granular blocks into the granular ones. This does mean that we need to search twice.
+
+ if (first.IsGranular || last.IsGranular)
+ {
+ ulong surroundStart = Math.Max(0, (first.IsGranular && first.Address != 0) ? first.Address - 1 : id);
+ blockCount = _placeholders.FindOverlapsNonOverlapping(
+ surroundStart,
+ (last.IsGranular ? last.EndAddress + 1 : endId) - surroundStart,
+ ref _foundBlocks);
+
+ first = _foundBlocks[0];
+ last = _foundBlocks[blockCount - 1];
+ }
+
+ if (first == last)
+ {
+ return; // Already coalesced.
+ }
+
+ PlaceholderBlock extendBlock = id == 0 ? null : first;
+ bool newBlock = false;
+ for (int i = extendBlock == null ? 0 : 1; i < blockCount; i++)
+ {
+ // Go through all granular blocks in the range and extend placeholders.
+ PlaceholderBlock block = _foundBlocks[i];
+
+ ulong blockEnd = block.EndAddress;
+ ulong extendFrom;
+ ulong extent = Math.Min(blockEnd, endId);
+
+ if (block.Address < id && blockEnd > id)
+ {
+ block.ExtendTo(id);
+ extendBlock = null;
+ }
+ else
+ {
+ _placeholders.Remove(block);
+ }
+
+ if (extendBlock == null)
+ {
+ extendFrom = id;
+ extendBlock = new PlaceholderBlock(id, extent - id, false);
+ _placeholders.Add(extendBlock);
+
+ if (blockEnd > extent)
+ {
+ _placeholders.Add(new PlaceholderBlock(extent, blockEnd - extent, true));
+
+ // Skip the next non-granular block, and extend from that into the granular block afterwards.
+ // (assuming that one is still in the requested range)
+
+ if (i + 1 < blockCount)
+ {
+ extendBlock = _foundBlocks[i + 1];
+ }
+
+ i++;
+ }
+
+ newBlock = true;
+ }
+ else
+ {
+ extendFrom = extendBlock.Address;
+ extendBlock.ExtendTo(block.IsGranular ? extent : block.EndAddress);
+ }
+
+ if (block.IsGranular)
+ {
+ ulong placeholderStart = Math.Max(block.Address, id);
+ ulong placeholderEnd = extent;
+
+ if (newBlock)
+ {
+ placeholderStart++;
+ newBlock = false;
+ }
+
+ for (ulong j = placeholderStart; j < placeholderEnd; j++)
+ {
+ coalescePlaceholderCallback(extendFrom, (j + 1) - extendFrom);
+ }
+
+ if (extent < block.EndAddress)
+ {
+ _placeholders.Add(new PlaceholderBlock(placeholderEnd, block.EndAddress - placeholderEnd, true));
+ ValidateList();
+ return;
+ }
+ }
+ else
+ {
+ coalescePlaceholderCallback(extendFrom, block.EndAddress - extendFrom);
+ }
+ }
+
+ ValidateList();
+ }
+
+ /// <summary>
+ /// Ensure that the placeholder list is valid.
+ /// A valid list should not have any gaps between the placeholders,
+ /// and there may be no placehonders with the same IsGranular value next to each other.
+ /// </summary>
+ [Conditional("DEBUG")]
+ private void ValidateList()
+ {
+ bool isGranular = false;
+ bool first = true;
+ ulong lastAddress = 0;
+
+ foreach (var placeholder in _placeholders)
+ {
+ if (placeholder.Address != lastAddress)
+ {
+ throw new InvalidOperationException("Gap in placeholder list.");
+ }
+
+ if (isGranular == placeholder.IsGranular && !first)
+ {
+ throw new InvalidOperationException("Placeholder list not alternating.");
+ }
+
+ first = false;
+ isGranular = placeholder.IsGranular;
+ lastAddress = placeholder.EndAddress;
+ }
+ }
+ }
+}
diff --git a/Ryujinx.Memory/WindowsShared/WindowsFlags.cs b/Ryujinx.Memory/WindowsShared/WindowsFlags.cs
new file mode 100644
index 00000000..ca69cfe9
--- /dev/null
+++ b/Ryujinx.Memory/WindowsShared/WindowsFlags.cs
@@ -0,0 +1,52 @@
+using System;
+
+namespace Ryujinx.Memory.WindowsShared
+{
+ [Flags]
+ enum AllocationType : uint
+ {
+ CoalescePlaceholders = 0x1,
+ PreservePlaceholder = 0x2,
+ Commit = 0x1000,
+ Reserve = 0x2000,
+ Decommit = 0x4000,
+ ReplacePlaceholder = 0x4000,
+ Release = 0x8000,
+ ReservePlaceholder = 0x40000,
+ Reset = 0x80000,
+ Physical = 0x400000,
+ TopDown = 0x100000,
+ WriteWatch = 0x200000,
+ LargePages = 0x20000000
+ }
+
+ [Flags]
+ enum MemoryProtection : uint
+ {
+ NoAccess = 0x01,
+ ReadOnly = 0x02,
+ ReadWrite = 0x04,
+ WriteCopy = 0x08,
+ Execute = 0x10,
+ ExecuteRead = 0x20,
+ ExecuteReadWrite = 0x40,
+ ExecuteWriteCopy = 0x80,
+ GuardModifierflag = 0x100,
+ NoCacheModifierflag = 0x200,
+ WriteCombineModifierflag = 0x400
+ }
+
+ [Flags]
+ enum FileMapProtection : uint
+ {
+ PageReadonly = 0x02,
+ PageReadWrite = 0x04,
+ PageWriteCopy = 0x08,
+ PageExecuteRead = 0x20,
+ PageExecuteReadWrite = 0x40,
+ SectionCommit = 0x8000000,
+ SectionImage = 0x1000000,
+ SectionNoCache = 0x10000000,
+ SectionReserve = 0x4000000
+ }
+}