aboutsummaryrefslogtreecommitdiff
path: root/Ryujinx.HLE/HOS/Kernel/Memory
diff options
context:
space:
mode:
Diffstat (limited to 'Ryujinx.HLE/HOS/Kernel/Memory')
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/DramMemoryMap.cs5
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs4
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockManager.cs329
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockSlabManager.cs (renamed from Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockAllocator.cs)4
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/KMemoryManager.cs3322
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs263
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs20
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs221
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs2797
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/KPageTableHostMapped.cs125
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/KScopedPageList.cs27
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs55
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/KTransferMemory.cs13
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/MemoryOperation.cs12
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs103
15 files changed, 3864 insertions, 3436 deletions
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/DramMemoryMap.cs b/Ryujinx.HLE/HOS/Kernel/Memory/DramMemoryMap.cs
index dea2a4ef..4941d5b7 100644
--- a/Ryujinx.HLE/HOS/Kernel/Memory/DramMemoryMap.cs
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/DramMemoryMap.cs
@@ -9,5 +9,10 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
public const ulong SlabHeapBase = KernelReserveBase + 0x85000;
public const ulong SlapHeapSize = 0xa21000;
public const ulong SlabHeapEnd = SlabHeapBase + SlapHeapSize;
+
+ public static bool IsHeapPhysicalAddress(ulong address)
+ {
+ return address >= SlabHeapEnd;
+ }
}
} \ No newline at end of file
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs
index b93b68d9..b612022c 100644
--- a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs
@@ -84,7 +84,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
ulong leftAddress = BaseAddress;
- ulong leftPagesCount = (address - leftAddress) / KMemoryManager.PageSize;
+ ulong leftPagesCount = (address - leftAddress) / KPageTableBase.PageSize;
BaseAddress = address;
@@ -107,7 +107,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
public KMemoryInfo GetInfo()
{
- ulong size = PagesCount * KMemoryManager.PageSize;
+ ulong size = PagesCount * KPageTableBase.PageSize;
return new KMemoryInfo(
BaseAddress,
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockManager.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockManager.cs
new file mode 100644
index 00000000..c0d11a95
--- /dev/null
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockManager.cs
@@ -0,0 +1,329 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+using System.Collections.Generic;
+using System.Diagnostics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KMemoryBlockManager
+ {
+ private const int PageSize = KPageTableBase.PageSize;
+
+ private readonly LinkedList<KMemoryBlock> _blocks;
+
+ public int BlocksCount => _blocks.Count;
+
+ private KMemoryBlockSlabManager _slabManager;
+
+ private ulong _addrSpaceEnd;
+
+ public KMemoryBlockManager()
+ {
+ _blocks = new LinkedList<KMemoryBlock>();
+ }
+
+ public KernelResult Initialize(ulong addrSpaceStart, ulong addrSpaceEnd, KMemoryBlockSlabManager slabManager)
+ {
+ _slabManager = slabManager;
+ _addrSpaceEnd = addrSpaceEnd;
+
+ // First insertion will always need only a single block,
+ // because there's nothing else to split.
+ if (!slabManager.CanAllocate(1))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong addrSpacePagesCount = (addrSpaceEnd - addrSpaceStart) / PageSize;
+
+ _blocks.AddFirst(new KMemoryBlock(
+ addrSpaceStart,
+ addrSpacePagesCount,
+ MemoryState.Unmapped,
+ KMemoryPermission.None,
+ MemoryAttribute.None));
+
+ return KernelResult.Success;
+ }
+
+ public void InsertBlock(
+ ulong baseAddress,
+ ulong pagesCount,
+ MemoryState oldState,
+ KMemoryPermission oldPermission,
+ MemoryAttribute oldAttribute,
+ MemoryState newState,
+ KMemoryPermission newPermission,
+ MemoryAttribute newAttribute)
+ {
+ // Insert new block on the list only on areas where the state
+ // of the block matches the state specified on the old* state
+ // arguments, otherwise leave it as is.
+ int oldCount = _blocks.Count;
+
+ oldAttribute |= MemoryAttribute.IpcAndDeviceMapped;
+
+ ulong endAddr = baseAddress + pagesCount * PageSize;
+
+ LinkedListNode<KMemoryBlock> node = _blocks.First;
+
+ while (node != null)
+ {
+ LinkedListNode<KMemoryBlock> newNode = node;
+
+ KMemoryBlock currBlock = node.Value;
+
+ ulong currBaseAddr = currBlock.BaseAddress;
+ ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr;
+
+ if (baseAddress < currEndAddr && currBaseAddr < endAddr)
+ {
+ MemoryAttribute currBlockAttr = currBlock.Attribute | MemoryAttribute.IpcAndDeviceMapped;
+
+ if (currBlock.State != oldState ||
+ currBlock.Permission != oldPermission ||
+ currBlockAttr != oldAttribute)
+ {
+ node = node.Next;
+
+ continue;
+ }
+
+ if (baseAddress > currBaseAddr)
+ {
+ _blocks.AddBefore(node, currBlock.SplitRightAtAddress(baseAddress));
+ }
+
+ if (endAddr < currEndAddr)
+ {
+ newNode = _blocks.AddBefore(node, currBlock.SplitRightAtAddress(endAddr));
+ }
+
+ newNode.Value.SetState(newPermission, newState, newAttribute);
+
+ newNode = MergeEqualStateNeighbors(newNode);
+ }
+
+ if (currEndAddr - 1 >= endAddr - 1)
+ {
+ break;
+ }
+
+ node = newNode.Next;
+ }
+
+ _slabManager.Count += _blocks.Count - oldCount;
+
+ ValidateInternalState();
+ }
+
+ public void InsertBlock(
+ ulong baseAddress,
+ ulong pagesCount,
+ MemoryState state,
+ KMemoryPermission permission = KMemoryPermission.None,
+ MemoryAttribute attribute = MemoryAttribute.None)
+ {
+ // Inserts new block at the list, replacing and splitting
+ // existing blocks as needed.
+ int oldCount = _blocks.Count;
+
+ ulong endAddr = baseAddress + pagesCount * PageSize;
+
+ LinkedListNode<KMemoryBlock> node = _blocks.First;
+
+ while (node != null)
+ {
+ LinkedListNode<KMemoryBlock> newNode = node;
+
+ KMemoryBlock currBlock = node.Value;
+
+ ulong currBaseAddr = currBlock.BaseAddress;
+ ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr;
+
+ if (baseAddress < currEndAddr && currBaseAddr < endAddr)
+ {
+ if (baseAddress > currBaseAddr)
+ {
+ _blocks.AddBefore(node, currBlock.SplitRightAtAddress(baseAddress));
+ }
+
+ if (endAddr < currEndAddr)
+ {
+ newNode = _blocks.AddBefore(node, currBlock.SplitRightAtAddress(endAddr));
+ }
+
+ newNode.Value.SetState(permission, state, attribute);
+
+ newNode = MergeEqualStateNeighbors(newNode);
+ }
+
+ if (currEndAddr - 1 >= endAddr - 1)
+ {
+ break;
+ }
+
+ node = newNode.Next;
+ }
+
+ _slabManager.Count += _blocks.Count - oldCount;
+
+ ValidateInternalState();
+ }
+
+ public delegate void BlockMutator(KMemoryBlock block, KMemoryPermission newPerm);
+
+ public void InsertBlock(
+ ulong baseAddress,
+ ulong pagesCount,
+ BlockMutator blockMutate,
+ KMemoryPermission permission = KMemoryPermission.None)
+ {
+ // Inserts new block at the list, replacing and splitting
+ // existing blocks as needed, then calling the callback
+ // function on the new block.
+ int oldCount = _blocks.Count;
+
+ ulong endAddr = baseAddress + pagesCount * PageSize;
+
+ LinkedListNode<KMemoryBlock> node = _blocks.First;
+
+ while (node != null)
+ {
+ LinkedListNode<KMemoryBlock> newNode = node;
+
+ KMemoryBlock currBlock = node.Value;
+
+ ulong currBaseAddr = currBlock.BaseAddress;
+ ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr;
+
+ if (baseAddress < currEndAddr && currBaseAddr < endAddr)
+ {
+ if (baseAddress > currBaseAddr)
+ {
+ _blocks.AddBefore(node, currBlock.SplitRightAtAddress(baseAddress));
+ }
+
+ if (endAddr < currEndAddr)
+ {
+ newNode = _blocks.AddBefore(node, currBlock.SplitRightAtAddress(endAddr));
+ }
+
+ KMemoryBlock newBlock = newNode.Value;
+
+ blockMutate(newBlock, permission);
+
+ newNode = MergeEqualStateNeighbors(newNode);
+ }
+
+ if (currEndAddr - 1 >= endAddr - 1)
+ {
+ break;
+ }
+
+ node = newNode.Next;
+ }
+
+ _slabManager.Count += _blocks.Count - oldCount;
+
+ ValidateInternalState();
+ }
+
+ [Conditional("DEBUG")]
+ private void ValidateInternalState()
+ {
+ ulong expectedAddress = 0;
+
+ LinkedListNode<KMemoryBlock> node = _blocks.First;
+
+ while (node != null)
+ {
+ LinkedListNode<KMemoryBlock> newNode = node;
+
+ KMemoryBlock currBlock = node.Value;
+
+ Debug.Assert(currBlock.BaseAddress == expectedAddress);
+
+ expectedAddress = currBlock.BaseAddress + currBlock.PagesCount * PageSize;
+
+ node = newNode.Next;
+ }
+
+ Debug.Assert(expectedAddress == _addrSpaceEnd);
+ }
+
+ private LinkedListNode<KMemoryBlock> MergeEqualStateNeighbors(LinkedListNode<KMemoryBlock> node)
+ {
+ KMemoryBlock block = node.Value;
+
+ if (node.Previous != null)
+ {
+ KMemoryBlock previousBlock = node.Previous.Value;
+
+ if (BlockStateEquals(block, previousBlock))
+ {
+ LinkedListNode<KMemoryBlock> previousNode = node.Previous;
+
+ _blocks.Remove(node);
+
+ previousBlock.AddPages(block.PagesCount);
+
+ node = previousNode;
+ block = previousBlock;
+ }
+ }
+
+ if (node.Next != null)
+ {
+ KMemoryBlock nextBlock = node.Next.Value;
+
+ if (BlockStateEquals(block, nextBlock))
+ {
+ _blocks.Remove(node.Next);
+
+ block.AddPages(nextBlock.PagesCount);
+ }
+ }
+
+ return node;
+ }
+
+ private static bool BlockStateEquals(KMemoryBlock lhs, KMemoryBlock rhs)
+ {
+ return lhs.State == rhs.State &&
+ lhs.Permission == rhs.Permission &&
+ lhs.Attribute == rhs.Attribute &&
+ lhs.SourcePermission == rhs.SourcePermission &&
+ lhs.DeviceRefCount == rhs.DeviceRefCount &&
+ lhs.IpcRefCount == rhs.IpcRefCount;
+ }
+
+ public KMemoryBlock FindBlock(ulong address)
+ {
+ return FindBlockNode(address)?.Value;
+ }
+
+ public LinkedListNode<KMemoryBlock> FindBlockNode(ulong address)
+ {
+ lock (_blocks)
+ {
+ LinkedListNode<KMemoryBlock> node = _blocks.First;
+
+ while (node != null)
+ {
+ KMemoryBlock block = node.Value;
+
+ ulong currEndAddr = block.PagesCount * PageSize + block.BaseAddress;
+
+ if (block.BaseAddress <= address && currEndAddr - 1 >= address)
+ {
+ return node;
+ }
+
+ node = node.Next;
+ }
+ }
+
+ return null;
+ }
+ }
+}
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockAllocator.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockSlabManager.cs
index ae68bf39..8732b507 100644
--- a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockAllocator.cs
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockSlabManager.cs
@@ -1,12 +1,12 @@
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
- class KMemoryBlockAllocator
+ class KMemoryBlockSlabManager
{
private ulong _capacityElements;
public int Count { get; set; }
- public KMemoryBlockAllocator(ulong capacityElements)
+ public KMemoryBlockSlabManager(ulong capacityElements)
{
_capacityElements = capacityElements;
}
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryManager.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryManager.cs
index 5b6df53b..6d0a1658 100644
--- a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryManager.cs
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryManager.cs
@@ -1,3335 +1,65 @@
-using Ryujinx.Common;
-using Ryujinx.HLE.HOS.Kernel.Common;
-using Ryujinx.HLE.HOS.Kernel.Process;
-using Ryujinx.Memory;
+using Ryujinx.HLE.HOS.Kernel.Common;
using System;
-using System.Collections.Generic;
-using System.Diagnostics;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KMemoryManager
{
- private static readonly int[] MappingUnitSizes = new int[]
- {
- 0x1000,
- 0x10000,
- 0x200000,
- 0x400000,
- 0x2000000,
- 0x40000000
- };
-
- public const int PageSize = 0x1000;
-
- private const int KMemoryBlockSize = 0x40;
-
- // We need 2 blocks for the case where a big block
- // needs to be split in 2, plus one block that will be the new one inserted.
- private const int MaxBlocksNeededForInsertion = 2;
-
- private readonly LinkedList<KMemoryBlock> _blocks;
-
- private readonly IVirtualMemoryManager _cpuMemory;
-
- private readonly KernelContext _context;
-
- public ulong AddrSpaceStart { get; private set; }
- public ulong AddrSpaceEnd { get; private set; }
-
- public ulong CodeRegionStart { get; private set; }
- public ulong CodeRegionEnd { get; private set; }
-
- public ulong HeapRegionStart { get; private set; }
- public ulong HeapRegionEnd { get; private set; }
-
- private ulong _currentHeapAddr;
-
- public ulong AliasRegionStart { get; private set; }
- public ulong AliasRegionEnd { get; private set; }
-
- public ulong StackRegionStart { get; private set; }
- public ulong StackRegionEnd { get; private set; }
-
- public ulong TlsIoRegionStart { get; private set; }
- public ulong TlsIoRegionEnd { get; private set; }
-
- private ulong _heapCapacity;
-
- public ulong PhysicalMemoryUsage { get; private set; }
-
- private MemoryRegion _memRegion;
-
- private bool _aslrDisabled;
-
- public int AddrSpaceWidth { get; private set; }
-
- private bool _isKernel;
-
- private bool _aslrEnabled;
-
- private KMemoryBlockAllocator _blockAllocator;
-
- private int _contextId;
-
- private MersenneTwister _randomNumberGenerator;
-
- public KMemoryManager(KernelContext context, IVirtualMemoryManager cpuMemory)
- {
- _context = context;
- _cpuMemory = cpuMemory;
-
- _blocks = new LinkedList<KMemoryBlock>();
-
- _isKernel = false;
- }
-
- private static readonly int[] AddrSpaceSizes = new int[] { 32, 36, 32, 39 };
-
- public KernelResult InitializeForProcess(
- AddressSpaceType addrSpaceType,
- bool aslrEnabled,
- bool aslrDisabled,
- MemoryRegion memRegion,
- ulong address,
- ulong size,
- KMemoryBlockAllocator blockAllocator)
- {
- if ((uint)addrSpaceType > (uint)AddressSpaceType.Addr39Bits)
- {
- throw new ArgumentException(nameof(addrSpaceType));
- }
-
- _contextId = _context.ContextIdManager.GetId();
-
- ulong addrSpaceBase = 0;
- ulong addrSpaceSize = 1UL << AddrSpaceSizes[(int)addrSpaceType];
-
- KernelResult result = CreateUserAddressSpace(
- addrSpaceType,
- aslrEnabled,
- aslrDisabled,
- addrSpaceBase,
- addrSpaceSize,
- memRegion,
- address,
- size,
- blockAllocator);
-
- if (result != KernelResult.Success)
- {
- _context.ContextIdManager.PutId(_contextId);
- }
-
- return result;
- }
-
- private class Region
- {
- public ulong Start;
- public ulong End;
- public ulong Size;
- public ulong AslrOffset;
- }
-
- private KernelResult CreateUserAddressSpace(
- AddressSpaceType addrSpaceType,
- bool aslrEnabled,
- bool aslrDisabled,
- ulong addrSpaceStart,
- ulong addrSpaceEnd,
- MemoryRegion memRegion,
- ulong address,
- ulong size,
- KMemoryBlockAllocator blockAllocator)
- {
- ulong endAddr = address + size;
-
- Region aliasRegion = new Region();
- Region heapRegion = new Region();
- Region stackRegion = new Region();
- Region tlsIoRegion = new Region();
-
- ulong codeRegionSize;
- ulong stackAndTlsIoStart;
- ulong stackAndTlsIoEnd;
- ulong baseAddress;
-
- switch (addrSpaceType)
- {
- case AddressSpaceType.Addr32Bits:
- aliasRegion.Size = 0x40000000;
- heapRegion.Size = 0x40000000;
- stackRegion.Size = 0;
- tlsIoRegion.Size = 0;
- CodeRegionStart = 0x200000;
- codeRegionSize = 0x3fe00000;
- stackAndTlsIoStart = 0x200000;
- stackAndTlsIoEnd = 0x40000000;
- baseAddress = 0x200000;
- AddrSpaceWidth = 32;
- break;
-
- case AddressSpaceType.Addr36Bits:
- aliasRegion.Size = 0x180000000;
- heapRegion.Size = 0x180000000;
- stackRegion.Size = 0;
- tlsIoRegion.Size = 0;
- CodeRegionStart = 0x8000000;
- codeRegionSize = 0x78000000;
- stackAndTlsIoStart = 0x8000000;
- stackAndTlsIoEnd = 0x80000000;
- baseAddress = 0x8000000;
- AddrSpaceWidth = 36;
- break;
-
- case AddressSpaceType.Addr32BitsNoMap:
- aliasRegion.Size = 0;
- heapRegion.Size = 0x80000000;
- stackRegion.Size = 0;
- tlsIoRegion.Size = 0;
- CodeRegionStart = 0x200000;
- codeRegionSize = 0x3fe00000;
- stackAndTlsIoStart = 0x200000;
- stackAndTlsIoEnd = 0x40000000;
- baseAddress = 0x200000;
- AddrSpaceWidth = 32;
- break;
-
- case AddressSpaceType.Addr39Bits:
- aliasRegion.Size = 0x1000000000;
- heapRegion.Size = 0x180000000;
- stackRegion.Size = 0x80000000;
- tlsIoRegion.Size = 0x1000000000;
- CodeRegionStart = BitUtils.AlignDown(address, 0x200000);
- codeRegionSize = BitUtils.AlignUp (endAddr, 0x200000) - CodeRegionStart;
- stackAndTlsIoStart = 0;
- stackAndTlsIoEnd = 0;
- baseAddress = 0x8000000;
- AddrSpaceWidth = 39;
- break;
-
- default: throw new ArgumentException(nameof(addrSpaceType));
- }
-
- CodeRegionEnd = CodeRegionStart + codeRegionSize;
-
- ulong mapBaseAddress;
- ulong mapAvailableSize;
-
- if (CodeRegionStart - baseAddress >= addrSpaceEnd - CodeRegionEnd)
- {
- // Has more space before the start of the code region.
- mapBaseAddress = baseAddress;
- mapAvailableSize = CodeRegionStart - baseAddress;
- }
- else
- {
- // Has more space after the end of the code region.
- mapBaseAddress = CodeRegionEnd;
- mapAvailableSize = addrSpaceEnd - CodeRegionEnd;
- }
-
- ulong mapTotalSize = aliasRegion.Size + heapRegion.Size + stackRegion.Size + tlsIoRegion.Size;
-
- ulong aslrMaxOffset = mapAvailableSize - mapTotalSize;
-
- _aslrEnabled = aslrEnabled;
-
- AddrSpaceStart = addrSpaceStart;
- AddrSpaceEnd = addrSpaceEnd;
-
- _blockAllocator = blockAllocator;
-
- if (mapAvailableSize < mapTotalSize)
- {
- return KernelResult.OutOfMemory;
- }
-
- if (aslrEnabled)
- {
- aliasRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21;
- heapRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21;
- stackRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21;
- tlsIoRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21;
- }
-
- // Regions are sorted based on ASLR offset.
- // When ASLR is disabled, the order is Map, Heap, NewMap and TlsIo.
- aliasRegion.Start = mapBaseAddress + aliasRegion.AslrOffset;
- aliasRegion.End = aliasRegion.Start + aliasRegion.Size;
- heapRegion.Start = mapBaseAddress + heapRegion.AslrOffset;
- heapRegion.End = heapRegion.Start + heapRegion.Size;
- stackRegion.Start = mapBaseAddress + stackRegion.AslrOffset;
- stackRegion.End = stackRegion.Start + stackRegion.Size;
- tlsIoRegion.Start = mapBaseAddress + tlsIoRegion.AslrOffset;
- tlsIoRegion.End = tlsIoRegion.Start + tlsIoRegion.Size;
-
- SortRegion(heapRegion, aliasRegion);
-
- if (stackRegion.Size != 0)
- {
- SortRegion(stackRegion, aliasRegion);
- SortRegion(stackRegion, heapRegion);
- }
- else
- {
- stackRegion.Start = stackAndTlsIoStart;
- stackRegion.End = stackAndTlsIoEnd;
- }
-
- if (tlsIoRegion.Size != 0)
- {
- SortRegion(tlsIoRegion, aliasRegion);
- SortRegion(tlsIoRegion, heapRegion);
- SortRegion(tlsIoRegion, stackRegion);
- }
- else
- {
- tlsIoRegion.Start = stackAndTlsIoStart;
- tlsIoRegion.End = stackAndTlsIoEnd;
- }
-
- AliasRegionStart = aliasRegion.Start;
- AliasRegionEnd = aliasRegion.End;
- HeapRegionStart = heapRegion.Start;
- HeapRegionEnd = heapRegion.End;
- StackRegionStart = stackRegion.Start;
- StackRegionEnd = stackRegion.End;
- TlsIoRegionStart = tlsIoRegion.Start;
- TlsIoRegionEnd = tlsIoRegion.End;
-
- _currentHeapAddr = HeapRegionStart;
- _heapCapacity = 0;
- PhysicalMemoryUsage = 0;
-
- _memRegion = memRegion;
- _aslrDisabled = aslrDisabled;
-
- return InitializeBlocks(addrSpaceStart, addrSpaceEnd);
- }
-
- private ulong GetRandomValue(ulong min, ulong max)
- {
- return (ulong)GetRandomValue((long)min, (long)max);
- }
-
- private long GetRandomValue(long min, long max)
- {
- if (_randomNumberGenerator == null)
- {
- _randomNumberGenerator = new MersenneTwister(0);
- }
-
- return _randomNumberGenerator.GenRandomNumber(min, max);
- }
-
- private static void SortRegion(Region lhs, Region rhs)
- {
- if (lhs.AslrOffset < rhs.AslrOffset)
- {
- rhs.Start += lhs.Size;
- rhs.End += lhs.Size;
- }
- else
- {
- lhs.Start += rhs.Size;
- lhs.End += rhs.Size;
- }
- }
-
- private KernelResult InitializeBlocks(ulong addrSpaceStart, ulong addrSpaceEnd)
- {
- // First insertion will always need only a single block,
- // because there's nothing else to split.
- if (!_blockAllocator.CanAllocate(1))
- {
- return KernelResult.OutOfResource;
- }
-
- ulong addrSpacePagesCount = (addrSpaceEnd - addrSpaceStart) / PageSize;
-
- _blocks.AddFirst(new KMemoryBlock(
- addrSpaceStart,
- addrSpacePagesCount,
- MemoryState.Unmapped,
- KMemoryPermission.None,
- MemoryAttribute.None));
-
- return KernelResult.Success;
- }
-
- public KernelResult MapPages(
- ulong address,
- KPageList pageList,
- MemoryState state,
- KMemoryPermission permission)
- {
- ulong pagesCount = pageList.GetPagesCount();
-
- ulong size = pagesCount * PageSize;
-
- if (!CanContain(address, size, state))
- {
- return KernelResult.InvalidMemState;
- }
-
- lock (_blocks)
- {
- if (!IsUnmapped(address, pagesCount * PageSize))
- {
- return KernelResult.InvalidMemState;
- }
-
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion))
- {
- return KernelResult.OutOfResource;
- }
-
- KernelResult result = MapPages(address, pageList, permission);
-
- if (result == KernelResult.Success)
- {
- InsertBlock(address, pagesCount, state, permission);
- }
-
- return result;
- }
- }
-
- public KernelResult UnmapPages(ulong address, KPageList pageList, MemoryState stateExpected)
- {
- ulong pagesCount = pageList.GetPagesCount();
-
- ulong size = pagesCount * PageSize;
-
- ulong endAddr = address + size;
-
- ulong addrSpacePagesCount = (AddrSpaceEnd - AddrSpaceStart) / PageSize;
-
- if (AddrSpaceStart > address)
- {
- return KernelResult.InvalidMemState;
- }
-
- if (addrSpacePagesCount < pagesCount)
- {
- return KernelResult.InvalidMemState;
- }
-
- if (endAddr - 1 > AddrSpaceEnd - 1)
- {
- return KernelResult.InvalidMemState;
- }
-
- lock (_blocks)
- {
- KPageList currentPageList = new KPageList();
-
- AddVaRangeToPageList(currentPageList, address, pagesCount);
-
- if (!currentPageList.IsEqual(pageList))
- {
- return KernelResult.InvalidMemRange;
- }
-
- if (CheckRange(
- address,
- size,
- MemoryState.Mask,
- stateExpected,
- KMemoryPermission.None,
- KMemoryPermission.None,
- MemoryAttribute.Mask,
- MemoryAttribute.None,
- MemoryAttribute.IpcAndDeviceMapped,
- out MemoryState state,
- out _,
- out _))
- {
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion))
- {
- return KernelResult.OutOfResource;
- }
-
- KernelResult result = MmuUnmap(address, pagesCount);
-
- if (result == KernelResult.Success)
- {
- InsertBlock(address, pagesCount, MemoryState.Unmapped);
- }
-
- return result;
- }
- else
- {
- return KernelResult.InvalidMemState;
- }
- }
- }
-
- public KernelResult MapNormalMemory(long address, long size, KMemoryPermission permission)
- {
- // TODO.
- return KernelResult.Success;
- }
-
- public KernelResult MapIoMemory(long address, long size, KMemoryPermission permission)
- {
- // TODO.
- return KernelResult.Success;
- }
-
- public KernelResult AllocateOrMapPa(
- ulong neededPagesCount,
- int alignment,
- ulong srcPa,
- bool map,
- ulong regionStart,
- ulong regionPagesCount,
- MemoryState state,
- KMemoryPermission permission,
- out ulong address)
- {
- address = 0;
-
- ulong regionSize = regionPagesCount * PageSize;
-
- ulong regionEndAddr = regionStart + regionSize;
-
- if (!CanContain(regionStart, regionSize, state))
- {
- return KernelResult.InvalidMemState;
- }
-
- if (regionPagesCount <= neededPagesCount)
- {
- return KernelResult.OutOfMemory;
- }
-
- lock (_blocks)
- {
- address = AllocateVa(regionStart, regionPagesCount, neededPagesCount, alignment);
-
- if (address == 0)
- {
- return KernelResult.OutOfMemory;
- }
-
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion))
- {
- return KernelResult.OutOfResource;
- }
-
- MemoryOperation operation = map
- ? MemoryOperation.MapPa
- : MemoryOperation.Allocate;
-
- KernelResult result = DoMmuOperation(
- address,
- neededPagesCount,
- srcPa,
- map,
- permission,
- operation);
-
- if (result != KernelResult.Success)
- {
- return result;
- }
-
- InsertBlock(address, neededPagesCount, state, permission);
- }
-
- return KernelResult.Success;
- }
-
- public KernelResult MapNewProcessCode(
- ulong address,
- ulong pagesCount,
- MemoryState state,
- KMemoryPermission permission)
- {
- ulong size = pagesCount * PageSize;
-
- if (!CanContain(address, size, state))
- {
- return KernelResult.InvalidMemState;
- }
-
- lock (_blocks)
- {
- if (!IsUnmapped(address, size))
- {
- return KernelResult.InvalidMemState;
- }
-
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion))
- {
- return KernelResult.OutOfResource;
- }
-
- KernelResult result = DoMmuOperation(
- address,
- pagesCount,
- 0,
- false,
- permission,
- MemoryOperation.Allocate);
-
- if (result == KernelResult.Success)
- {
- InsertBlock(address, pagesCount, state, permission);
- }
-
- return result;
- }
- }
-
- public KernelResult MapProcessCodeMemory(ulong dst, ulong src, ulong size)
- {
- ulong pagesCount = size / PageSize;
-
- lock (_blocks)
- {
- bool success = CheckRange(
- src,
- size,
- MemoryState.Mask,
- MemoryState.Heap,
- KMemoryPermission.Mask,
- KMemoryPermission.ReadAndWrite,
- MemoryAttribute.Mask,
- MemoryAttribute.None,
- MemoryAttribute.IpcAndDeviceMapped,
- out MemoryState state,
- out KMemoryPermission permission,
- out _);
-
- success &= IsUnmapped(dst, size);
-
- if (success)
- {
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion * 2))
- {
- return KernelResult.OutOfResource;
- }
-
- KPageList pageList = new KPageList();
-
- AddVaRangeToPageList(pageList, src, pagesCount);
-
- KernelResult result = MmuChangePermission(src, pagesCount, KMemoryPermission.None);
-
- if (result != KernelResult.Success)
- {
- return result;
- }
-
- result = MapPages(dst, pageList, KMemoryPermission.None);
-
- if (result != KernelResult.Success)
- {
- MmuChangePermission(src, pagesCount, permission);
-
- return result;
- }
-
- InsertBlock(src, pagesCount, state, KMemoryPermission.None, MemoryAttribute.Borrowed);
- InsertBlock(dst, pagesCount, MemoryState.ModCodeStatic);
-
- return KernelResult.Success;
- }
- else
- {
- return KernelResult.InvalidMemState;
- }
- }
- }
-
- public KernelResult UnmapProcessCodeMemory(ulong dst, ulong src, ulong size)
- {
- ulong pagesCount = size / PageSize;
-
- lock (_blocks)
- {
- bool success = CheckRange(
- src,
- size,
- MemoryState.Mask,
- MemoryState.Heap,
- KMemoryPermission.None,
- KMemoryPermission.None,
- MemoryAttribute.Mask,
- MemoryAttribute.Borrowed,
- MemoryAttribute.IpcAndDeviceMapped,
- out _,
- out _,
- out _);
-
- success &= CheckRange(
- dst,
- PageSize,
- MemoryState.UnmapProcessCodeMemoryAllowed,
- MemoryState.UnmapProcessCodeMemoryAllowed,
- KMemoryPermission.None,
- KMemoryPermission.None,
- MemoryAttribute.Mask,
- MemoryAttribute.None,
- MemoryAttribute.IpcAndDeviceMapped,
- out MemoryState state,
- out _,
- out _);
-
- success &= CheckRange(
- dst,
- size,
- MemoryState.Mask,
- state,
- KMemoryPermission.None,
- KMemoryPermission.None,
- MemoryAttribute.Mask,
- MemoryAttribute.None);
-
- if (success)
- {
- KernelResult result = MmuUnmap(dst, pagesCount);
-
- if (result != KernelResult.Success)
- {
- return result;
- }
-
- // TODO: Missing some checks here.
-
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion * 2))
- {
- return KernelResult.OutOfResource;
- }
-
- InsertBlock(dst, pagesCount, MemoryState.Unmapped);
- InsertBlock(src, pagesCount, MemoryState.Heap, KMemoryPermission.ReadAndWrite);
-
- return KernelResult.Success;
- }
- else
- {
- return KernelResult.InvalidMemState;
- }
- }
- }
-
- public KernelResult SetHeapSize(ulong size, out ulong address)
- {
- address = 0;
-
- if (size > HeapRegionEnd - HeapRegionStart)
- {
- return KernelResult.OutOfMemory;
- }
-
- KProcess currentProcess = KernelStatic.GetCurrentProcess();
-
- lock (_blocks)
- {
- ulong currentHeapSize = GetHeapSize();
-
- if (currentHeapSize <= size)
- {
- // Expand.
- ulong sizeDelta = size - currentHeapSize;
-
- if (currentProcess.ResourceLimit != null && sizeDelta != 0 &&
- !currentProcess.ResourceLimit.Reserve(LimitableResource.Memory, sizeDelta))
- {
- return KernelResult.ResLimitExceeded;
- }
-
- ulong pagesCount = sizeDelta / PageSize;
-
- KMemoryRegionManager region = GetMemoryRegionManager();
-
- KernelResult result = region.AllocatePages(pagesCount, _aslrDisabled, out KPageList pageList);
-
- void CleanUpForError()
- {
- if (pageList != null)
- {
- region.FreePages(pageList);
- }
-
- if (currentProcess.ResourceLimit != null && sizeDelta != 0)
- {
- currentProcess.ResourceLimit.Release(LimitableResource.Memory, sizeDelta);
- }
- }
-
- if (result != KernelResult.Success)
- {
- CleanUpForError();
-
- return result;
- }
-
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion))
- {
- CleanUpForError();
-
- return KernelResult.OutOfResource;
- }
-
- if (!IsUnmapped(_currentHeapAddr, sizeDelta))
- {
- CleanUpForError();
-
- return KernelResult.InvalidMemState;
- }
-
- result = DoMmuOperation(
- _currentHeapAddr,
- pagesCount,
- pageList,
- KMemoryPermission.ReadAndWrite,
- MemoryOperation.MapVa);
-
- if (result != KernelResult.Success)
- {
- CleanUpForError();
-
- return result;
- }
-
- InsertBlock(_currentHeapAddr, pagesCount, MemoryState.Heap, KMemoryPermission.ReadAndWrite);
- }
- else
- {
- // Shrink.
- ulong freeAddr = HeapRegionStart + size;
- ulong sizeDelta = currentHeapSize - size;
-
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion))
- {
- return KernelResult.OutOfResource;
- }
-
- if (!CheckRange(
- freeAddr,
- sizeDelta,
- MemoryState.Mask,
- MemoryState.Heap,
- KMemoryPermission.Mask,
- KMemoryPermission.ReadAndWrite,
- MemoryAttribute.Mask,
- MemoryAttribute.None,
- MemoryAttribute.IpcAndDeviceMapped,
- out _,
- out _,
- out _))
- {
- return KernelResult.InvalidMemState;
- }
-
- ulong pagesCount = sizeDelta / PageSize;
-
- KernelResult result = MmuUnmap(freeAddr, pagesCount);
-
- if (result != KernelResult.Success)
- {
- return result;
- }
-
- currentProcess.ResourceLimit?.Release(LimitableResource.Memory, sizeDelta);
-
- InsertBlock(freeAddr, pagesCount, MemoryState.Unmapped);
- }
-
- _currentHeapAddr = HeapRegionStart + size;
- }
-
- address = HeapRegionStart;
-
- return KernelResult.Success;
- }
-
- public ulong GetTotalHeapSize()
- {
- lock (_blocks)
- {
- return GetHeapSize() + PhysicalMemoryUsage;
- }
- }
-
- private ulong GetHeapSize()
- {
- return _currentHeapAddr - HeapRegionStart;
- }
-
- public KernelResult SetHeapCapacity(ulong capacity)
- {
- lock (_blocks)
- {
- _heapCapacity = capacity;
- }
-
- return KernelResult.Success;
- }
-
- public KernelResult SetMemoryAttribute(
- ulong address,
- ulong size,
- MemoryAttribute attributeMask,
- MemoryAttribute attributeValue)
- {
- lock (_blocks)
- {
- if (CheckRange(
- address,
- size,
- MemoryState.AttributeChangeAllowed,
- MemoryState.AttributeChangeAllowed,
- KMemoryPermission.None,
- KMemoryPermission.None,
- MemoryAttribute.BorrowedAndIpcMapped,
- MemoryAttribute.None,
- MemoryAttribute.DeviceMappedAndUncached,
- out MemoryState state,
- out KMemoryPermission permission,
- out MemoryAttribute attribute))
- {
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion))
- {
- return KernelResult.OutOfResource;
- }
-
- ulong pagesCount = size / PageSize;
-
- attribute &= ~attributeMask;
- attribute |= attributeMask & attributeValue;
-
- InsertBlock(address, pagesCount, state, permission, attribute);
-
- return KernelResult.Success;
- }
- else
- {
- return KernelResult.InvalidMemState;
- }
- }
- }
-
- public KMemoryInfo QueryMemory(ulong address)
- {
- if (address >= AddrSpaceStart &&
- address < AddrSpaceEnd)
- {
- lock (_blocks)
- {
- return FindBlock(address).GetInfo();
- }
- }
- else
- {
- return new KMemoryInfo(
- AddrSpaceEnd,
- ~AddrSpaceEnd + 1,
- MemoryState.Reserved,
- KMemoryPermission.None,
- MemoryAttribute.None,
- KMemoryPermission.None,
- 0,
- 0);
- }
- }
-
- public KernelResult Map(ulong dst, ulong src, ulong size)
- {
- bool success;
-
- lock (_blocks)
- {
- success = CheckRange(
- src,
- size,
- MemoryState.MapAllowed,
- MemoryState.MapAllowed,
- KMemoryPermission.Mask,
- KMemoryPermission.ReadAndWrite,
- MemoryAttribute.Mask,
- MemoryAttribute.None,
- MemoryAttribute.IpcAndDeviceMapped,
- out MemoryState srcState,
- out _,
- out _);
-
- success &= IsUnmapped(dst, size);
-
- if (success)
- {
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion * 2))
- {
- return KernelResult.OutOfResource;
- }
-
- ulong pagesCount = size / PageSize;
-
- KPageList pageList = new KPageList();
-
- AddVaRangeToPageList(pageList, src, pagesCount);
-
- KernelResult result = MmuChangePermission(src, pagesCount, KMemoryPermission.None);
-
- if (result != KernelResult.Success)
- {
- return result;
- }
-
- result = MapPages(dst, pageList, KMemoryPermission.ReadAndWrite);
-
- if (result != KernelResult.Success)
- {
- if (MmuChangePermission(src, pagesCount, KMemoryPermission.ReadAndWrite) != KernelResult.Success)
- {
- throw new InvalidOperationException("Unexpected failure reverting memory permission.");
- }
-
- return result;
- }
-
- InsertBlock(src, pagesCount, srcState, KMemoryPermission.None, MemoryAttribute.Borrowed);
- InsertBlock(dst, pagesCount, MemoryState.Stack, KMemoryPermission.ReadAndWrite);
-
- return KernelResult.Success;
- }
- else
- {
- return KernelResult.InvalidMemState;
- }
- }
- }
-
- public KernelResult UnmapForKernel(ulong address, ulong pagesCount, MemoryState stateExpected)
- {
- ulong size = pagesCount * PageSize;
-
- lock (_blocks)
- {
- if (CheckRange(
- address,
- size,
- MemoryState.Mask,
- stateExpected,
- KMemoryPermission.None,
- KMemoryPermission.None,
- MemoryAttribute.Mask,
- MemoryAttribute.None,
- MemoryAttribute.IpcAndDeviceMapped,
- out _,
- out _,
- out _))
- {
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion))
- {
- return KernelResult.OutOfResource;
- }
-
- KernelResult result = MmuUnmap(address, pagesCount);
-
- if (result == KernelResult.Success)
- {
- InsertBlock(address, pagesCount, MemoryState.Unmapped);
- }
-
- return KernelResult.Success;
- }
- else
- {
- return KernelResult.InvalidMemState;
- }
- }
- }
-
- public KernelResult Unmap(ulong dst, ulong src, ulong size)
- {
- bool success;
-
- lock (_blocks)
- {
- success = CheckRange(
- src,
- size,
- MemoryState.MapAllowed,
- MemoryState.MapAllowed,
- KMemoryPermission.Mask,
- KMemoryPermission.None,
- MemoryAttribute.Mask,
- MemoryAttribute.Borrowed,
- MemoryAttribute.IpcAndDeviceMapped,
- out MemoryState srcState,
- out _,
- out _);
-
- success &= CheckRange(
- dst,
- size,
- MemoryState.Mask,
- MemoryState.Stack,
- KMemoryPermission.None,
- KMemoryPermission.None,
- MemoryAttribute.Mask,
- MemoryAttribute.None,
- MemoryAttribute.IpcAndDeviceMapped,
- out _,
- out KMemoryPermission dstPermission,
- out _);
-
- if (success)
- {
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion * 2))
- {
- return KernelResult.OutOfResource;
- }
-
- ulong pagesCount = size / PageSize;
-
- KPageList srcPageList = new KPageList();
- KPageList dstPageList = new KPageList();
-
- AddVaRangeToPageList(srcPageList, src, pagesCount);
- AddVaRangeToPageList(dstPageList, dst, pagesCount);
-
- if (!dstPageList.IsEqual(srcPageList))
- {
- return KernelResult.InvalidMemRange;
- }
-
- KernelResult result = MmuUnmap(dst, pagesCount);
-
- if (result != KernelResult.Success)
- {
- return result;
- }
-
- result = MmuChangePermission(src, pagesCount, KMemoryPermission.ReadAndWrite);
-
- if (result != KernelResult.Success)
- {
- MapPages(dst, dstPageList, dstPermission);
-
- return result;
- }
-
- InsertBlock(src, pagesCount, srcState, KMemoryPermission.ReadAndWrite);
- InsertBlock(dst, pagesCount, MemoryState.Unmapped);
-
- return KernelResult.Success;
- }
- else
- {
- return KernelResult.InvalidMemState;
- }
- }
- }
-
- public KernelResult SetProcessMemoryPermission(ulong address, ulong size, KMemoryPermission permission)
- {
- lock (_blocks)
- {
- if (CheckRange(
- address,
- size,
- MemoryState.ProcessPermissionChangeAllowed,
- MemoryState.ProcessPermissionChangeAllowed,
- KMemoryPermission.None,
- KMemoryPermission.None,
- MemoryAttribute.Mask,
- MemoryAttribute.None,
- MemoryAttribute.IpcAndDeviceMapped,
- out MemoryState oldState,
- out KMemoryPermission oldPermission,
- out _))
- {
- MemoryState newState = oldState;
-
- // If writing into the code region is allowed, then we need
- // to change it to mutable.
- if ((permission & KMemoryPermission.Write) != 0)
- {
- if (oldState == MemoryState.CodeStatic)
- {
- newState = MemoryState.CodeMutable;
- }
- else if (oldState == MemoryState.ModCodeStatic)
- {
- newState = MemoryState.ModCodeMutable;
- }
- else
- {
- throw new InvalidOperationException($"Memory state \"{oldState}\" not valid for this operation.");
- }
- }
-
- if (newState != oldState || permission != oldPermission)
- {
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion))
- {
- return KernelResult.OutOfResource;
- }
-
- ulong pagesCount = size / PageSize;
-
- MemoryOperation operation = (permission & KMemoryPermission.Execute) != 0
- ? MemoryOperation.ChangePermsAndAttributes
- : MemoryOperation.ChangePermRw;
-
- KernelResult result = DoMmuOperation(address, pagesCount, 0, false, permission, operation);
-
- if (result != KernelResult.Success)
- {
- return result;
- }
-
- InsertBlock(address, pagesCount, newState, permission);
- }
-
- return KernelResult.Success;
- }
- else
- {
- return KernelResult.InvalidMemState;
- }
- }
- }
-
- public KernelResult MapPhysicalMemory(ulong address, ulong size)
- {
- ulong endAddr = address + size;
-
- lock (_blocks)
- {
- ulong mappedSize = 0;
-
- foreach (KMemoryInfo info in IterateOverRange(address, endAddr))
- {
- if (info.State != MemoryState.Unmapped)
- {
- mappedSize += GetSizeInRange(info, address, endAddr);
- }
- }
-
- if (mappedSize == size)
- {
- return KernelResult.Success;
- }
-
- ulong remainingSize = size - mappedSize;
-
- ulong remainingPages = remainingSize / PageSize;
-
- KProcess currentProcess = KernelStatic.GetCurrentProcess();
-
- if (currentProcess.ResourceLimit != null &&
- !currentProcess.ResourceLimit.Reserve(LimitableResource.Memory, remainingSize))
- {
- return KernelResult.ResLimitExceeded;
- }
-
- KMemoryRegionManager region = GetMemoryRegionManager();
-
- KernelResult result = region.AllocatePages(remainingPages, _aslrDisabled, out KPageList pageList);
-
- void CleanUpForError()
- {
- if (pageList != null)
- {
- region.FreePages(pageList);
- }
-
- currentProcess.ResourceLimit?.Release(LimitableResource.Memory, remainingSize);
- }
-
- if (result != KernelResult.Success)
- {
- CleanUpForError();
-
- return result;
- }
-
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion))
- {
- CleanUpForError();
-
- return KernelResult.OutOfResource;
- }
-
- MapPhysicalMemory(pageList, address, endAddr);
-
- PhysicalMemoryUsage += remainingSize;
-
- ulong pagesCount = size / PageSize;
-
- InsertBlock(
- address,
- pagesCount,
- MemoryState.Unmapped,
- KMemoryPermission.None,
- MemoryAttribute.None,
- MemoryState.Heap,
- KMemoryPermission.ReadAndWrite,
- MemoryAttribute.None);
- }
-
- return KernelResult.Success;
- }
-
- public KernelResult UnmapPhysicalMemory(ulong address, ulong size)
- {
- ulong endAddr = address + size;
-
- lock (_blocks)
- {
- // Scan, ensure that the region can be unmapped (all blocks are heap or
- // already unmapped), fill pages list for freeing memory.
- ulong heapMappedSize = 0;
-
- KPageList pageList = new KPageList();
-
- foreach (KMemoryInfo info in IterateOverRange(address, endAddr))
- {
- if (info.State == MemoryState.Heap)
- {
- if (info.Attribute != MemoryAttribute.None)
- {
- return KernelResult.InvalidMemState;
- }
-
- ulong blockSize = GetSizeInRange(info, address, endAddr);
- ulong blockAddress = GetAddrInRange(info, address);
-
- AddVaRangeToPageList(pageList, blockAddress, blockSize / PageSize);
-
- heapMappedSize += blockSize;
- }
- else if (info.State != MemoryState.Unmapped)
- {
- return KernelResult.InvalidMemState;
- }
- }
-
- if (heapMappedSize == 0)
- {
- return KernelResult.Success;
- }
-
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion))
- {
- return KernelResult.OutOfResource;
- }
-
- // Try to unmap all the heap mapped memory inside range.
- KernelResult result = KernelResult.Success;
-
- foreach (KMemoryInfo info in IterateOverRange(address, endAddr))
- {
- if (info.State == MemoryState.Heap)
- {
- ulong blockSize = GetSizeInRange(info, address, endAddr);
- ulong blockAddress = GetAddrInRange(info, address);
-
- ulong blockPagesCount = blockSize / PageSize;
-
- result = MmuUnmap(blockAddress, blockPagesCount);
-
- if (result != KernelResult.Success)
- {
- // If we failed to unmap, we need to remap everything back again.
- MapPhysicalMemory(pageList, address, blockAddress + blockSize);
-
- break;
- }
- }
- }
-
- if (result == KernelResult.Success)
- {
- GetMemoryRegionManager().FreePages(pageList);
-
- PhysicalMemoryUsage -= heapMappedSize;
-
- KProcess currentProcess = KernelStatic.GetCurrentProcess();
-
- currentProcess.ResourceLimit?.Release(LimitableResource.Memory, heapMappedSize);
-
- ulong pagesCount = size / PageSize;
-
- InsertBlock(address, pagesCount, MemoryState.Unmapped);
- }
-
- return result;
- }
- }
-
- private void MapPhysicalMemory(KPageList pageList, ulong address, ulong endAddr)
- {
- LinkedListNode<KPageNode> pageListNode = pageList.Nodes.First;
-
- KPageNode pageNode = pageListNode.Value;
-
- ulong srcPa = pageNode.Address;
- ulong srcPaPages = pageNode.PagesCount;
-
- foreach (KMemoryInfo info in IterateOverRange(address, endAddr))
- {
- if (info.State == MemoryState.Unmapped)
- {
- ulong blockSize = GetSizeInRange(info, address, endAddr);
-
- ulong dstVaPages = blockSize / PageSize;
-
- ulong dstVa = GetAddrInRange(info, address);
-
- while (dstVaPages > 0)
- {
- if (srcPaPages == 0)
- {
- pageListNode = pageListNode.Next;
-
- pageNode = pageListNode.Value;
-
- srcPa = pageNode.Address;
- srcPaPages = pageNode.PagesCount;
- }
-
- ulong pagesCount = srcPaPages;
-
- if (pagesCount > dstVaPages)
- {
- pagesCount = dstVaPages;
- }
-
- DoMmuOperation(
- dstVa,
- pagesCount,
- srcPa,
- true,
- KMemoryPermission.ReadAndWrite,
- MemoryOperation.MapPa);
-
- dstVa += pagesCount * PageSize;
- srcPa += pagesCount * PageSize;
- srcPaPages -= pagesCount;
- dstVaPages -= pagesCount;
- }
- }
- }
- }
-
- public KernelResult CopyDataToCurrentProcess(
- ulong dst,
- ulong size,
- ulong src,
- MemoryState stateMask,
- MemoryState stateExpected,
- KMemoryPermission permission,
- MemoryAttribute attributeMask,
- MemoryAttribute attributeExpected)
- {
- // Client -> server.
- return CopyDataFromOrToCurrentProcess(
- size,
- src,
- dst,
- stateMask,
- stateExpected,
- permission,
- attributeMask,
- attributeExpected,
- toServer: true);
- }
-
- public KernelResult CopyDataFromCurrentProcess(
- ulong dst,
- ulong size,
- MemoryState stateMask,
- MemoryState stateExpected,
- KMemoryPermission permission,
- MemoryAttribute attributeMask,
- MemoryAttribute attributeExpected,
- ulong src)
- {
- // Server -> client.
- return CopyDataFromOrToCurrentProcess(
- size,
- dst,
- src,
- stateMask,
- stateExpected,
- permission,
- attributeMask,
- attributeExpected,
- toServer: false);
- }
-
- private KernelResult CopyDataFromOrToCurrentProcess(
- ulong size,
- ulong clientAddress,
- ulong serverAddress,
- MemoryState stateMask,
- MemoryState stateExpected,
- KMemoryPermission permission,
- MemoryAttribute attributeMask,
- MemoryAttribute attributeExpected,
- bool toServer)
- {
- if (AddrSpaceStart > clientAddress)
- {
- return KernelResult.InvalidMemState;
- }
-
- ulong srcEndAddr = clientAddress + size;
-
- if (srcEndAddr <= clientAddress || srcEndAddr - 1 > AddrSpaceEnd - 1)
- {
- return KernelResult.InvalidMemState;
- }
-
- lock (_blocks)
- {
- if (CheckRange(
- clientAddress,
- size,
- stateMask,
- stateExpected,
- permission,
- permission,
- attributeMask | MemoryAttribute.Uncached,
- attributeExpected))
- {
- KProcess currentProcess = KernelStatic.GetCurrentProcess();
-
- while (size > 0)
- {
- ulong copySize = Math.Min(PageSize - (serverAddress & (PageSize - 1)), PageSize - (clientAddress & (PageSize - 1)));
-
- if (copySize > size)
- {
- copySize = size;
- }
-
- ulong serverDramAddr = currentProcess.MemoryManager.GetDramAddressFromVa(serverAddress);
- ulong clientDramAddr = GetDramAddressFromVa(clientAddress);
-
- if (serverDramAddr != clientDramAddr)
- {
- if (toServer)
- {
- _context.Memory.Copy(serverDramAddr, clientDramAddr, copySize);
- }
- else
- {
- _context.Memory.Copy(clientDramAddr, serverDramAddr, copySize);
- }
- }
-
- serverAddress += copySize;
- clientAddress += copySize;
- size -= copySize;
- }
-
- return KernelResult.Success;
- }
- else
- {
- return KernelResult.InvalidMemState;
- }
- }
- }
-
- public KernelResult MapBufferFromClientProcess(
- ulong size,
- ulong src,
- KMemoryManager sourceMemMgr,
- KMemoryPermission permission,
- MemoryState state,
- bool copyData,
- out ulong dst)
- {
- dst = 0;
-
- KernelResult result = sourceMemMgr.GetPagesForMappingIntoAnotherProcess(
- src,
- size,
- permission,
- state,
- copyData,
- _aslrDisabled,
- _memRegion,
- out KPageList pageList);
-
- if (result != KernelResult.Success)
- {
- return result;
- }
-
- result = MapPagesFromAnotherProcess(size, src, permission, state, pageList, out ulong va);
-
- if (result != KernelResult.Success)
- {
- sourceMemMgr.UnmapIpcRestorePermission(src, size, state);
- }
- else
- {
- dst = va;
- }
-
- return result;
- }
-
- private KernelResult GetPagesForMappingIntoAnotherProcess(
- ulong address,
- ulong size,
- KMemoryPermission permission,
- MemoryState state,
- bool copyData,
- bool aslrDisabled,
- MemoryRegion region,
- out KPageList pageList)
- {
- pageList = null;
-
- if (AddrSpaceStart > address)
- {
- return KernelResult.InvalidMemState;
- }
-
- ulong endAddr = address + size;
-
- if (endAddr <= address || endAddr - 1 > AddrSpaceEnd - 1)
- {
- return KernelResult.InvalidMemState;
- }
-
- MemoryState stateMask;
-
- switch (state)
- {
- case MemoryState.IpcBuffer0: stateMask = MemoryState.IpcSendAllowedType0; break;
- case MemoryState.IpcBuffer1: stateMask = MemoryState.IpcSendAllowedType1; break;
- case MemoryState.IpcBuffer3: stateMask = MemoryState.IpcSendAllowedType3; break;
-
- default: return KernelResult.InvalidCombination;
- }
-
- KMemoryPermission permissionMask = permission == KMemoryPermission.ReadAndWrite
- ? KMemoryPermission.None
- : KMemoryPermission.Read;
-
- MemoryAttribute attributeMask = MemoryAttribute.Borrowed | MemoryAttribute.Uncached;
-
- if (state == MemoryState.IpcBuffer0)
- {
- attributeMask |= MemoryAttribute.DeviceMapped;
- }
-
- ulong addressRounded = BitUtils.AlignUp (address, PageSize);
- ulong addressTruncated = BitUtils.AlignDown(address, PageSize);
- ulong endAddrRounded = BitUtils.AlignUp (endAddr, PageSize);
- ulong endAddrTruncated = BitUtils.AlignDown(endAddr, PageSize);
-
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion))
- {
- return KernelResult.OutOfResource;
- }
-
- ulong visitedSize = 0;
-
- void CleanUpForError()
- {
- if (visitedSize == 0)
- {
- return;
- }
-
- ulong endAddrVisited = address + visitedSize;
-
- foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrVisited))
- {
- if ((info.Permission & KMemoryPermission.ReadAndWrite) != permissionMask && info.IpcRefCount == 0)
- {
- ulong blockAddress = GetAddrInRange(info, addressRounded);
- ulong blockSize = GetSizeInRange(info, addressRounded, endAddrVisited);
-
- ulong blockPagesCount = blockSize / PageSize;
-
- if (DoMmuOperation(
- blockAddress,
- blockPagesCount,
- 0,
- false,
- info.Permission,
- MemoryOperation.ChangePermRw) != KernelResult.Success)
- {
- throw new InvalidOperationException("Unexpected failure trying to restore permission.");
- }
- }
- }
- }
-
- // Signal a read for any resources tracking reads in the region, as the other process is likely to use their data.
- _cpuMemory.SignalMemoryTracking(addressTruncated, endAddrRounded - addressTruncated, false);
-
- lock (_blocks)
- {
- KernelResult result;
-
- if (addressRounded < endAddrTruncated)
- {
- foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrTruncated))
- {
- // Check if the block state matches what we expect.
- if ((info.State & stateMask) != stateMask ||
- (info.Permission & permission) != permission ||
- (info.Attribute & attributeMask) != MemoryAttribute.None)
- {
- CleanUpForError();
-
- return KernelResult.InvalidMemState;
- }
-
- ulong blockAddress = GetAddrInRange(info, addressRounded);
- ulong blockSize = GetSizeInRange(info, addressRounded, endAddrTruncated);
-
- ulong blockPagesCount = blockSize / PageSize;
-
- if ((info.Permission & KMemoryPermission.ReadAndWrite) != permissionMask && info.IpcRefCount == 0)
- {
- result = DoMmuOperation(
- blockAddress,
- blockPagesCount,
- 0,
- false,
- permissionMask,
- MemoryOperation.ChangePermRw);
-
- if (result != KernelResult.Success)
- {
- CleanUpForError();
-
- return result;
- }
- }
-
- visitedSize += blockSize;
- }
- }
-
- result = GetPagesForIpcTransfer(address, size, copyData, aslrDisabled, region, out pageList);
-
- if (result != KernelResult.Success)
- {
- CleanUpForError();
-
- return result;
- }
-
- if (visitedSize != 0)
- {
- InsertBlock(addressRounded, visitedSize / PageSize, SetIpcMappingPermissions, permissionMask);
- }
- }
-
- return KernelResult.Success;
- }
-
- private KernelResult GetPagesForIpcTransfer(
- ulong address,
- ulong size,
- bool copyData,
- bool aslrDisabled,
- MemoryRegion region,
- out KPageList pageList)
- {
- // When the start address is unaligned, we can't safely map the
- // first page as it would expose other undesirable information on the
- // target process. So, instead we allocate new pages, copy the data
- // inside the range, and then clear the remaining space.
- // The same also holds for the last page, if the end address
- // (address + size) is also not aligned.
-
- pageList = null;
-
- KPageList pages = new KPageList();
-
- ulong addressTruncated = BitUtils.AlignDown(address, PageSize);
- ulong addressRounded = BitUtils.AlignUp (address, PageSize);
-
- ulong endAddr = address + size;
-
- ulong dstFirstPagePa = 0;
- ulong dstLastPagePa = 0;
-
- void CleanUpForError()
- {
- if (dstFirstPagePa != 0)
- {
- FreeSinglePage(region, dstFirstPagePa);
- }
-
- if (dstLastPagePa != 0)
- {
- FreeSinglePage(region, dstLastPagePa);
- }
- }
-
- // Is the first page address aligned?
- // If not, allocate a new page and copy the unaligned chunck.
- if (addressTruncated < addressRounded)
- {
- dstFirstPagePa = AllocateSinglePage(region, aslrDisabled);
-
- if (dstFirstPagePa == 0)
- {
- return KernelResult.OutOfMemory;
- }
-
- ulong firstPageFillAddress = dstFirstPagePa;
-
- if (!TryConvertVaToPa(addressTruncated, out ulong srcFirstPagePa))
- {
- CleanUpForError();
-
- return KernelResult.InvalidMemState;
- }
-
- ulong unusedSizeAfter;
-
- if (copyData)
- {
- ulong unusedSizeBefore = address - addressTruncated;
-
- _context.Memory.ZeroFill(GetDramAddressFromPa(dstFirstPagePa), unusedSizeBefore);
-
- ulong copySize = addressRounded <= endAddr ? addressRounded - address : size;
-
- _context.Memory.Copy(
- GetDramAddressFromPa(dstFirstPagePa + unusedSizeBefore),
- GetDramAddressFromPa(srcFirstPagePa + unusedSizeBefore), copySize);
-
- firstPageFillAddress += unusedSizeBefore + copySize;
-
- unusedSizeAfter = addressRounded > endAddr ? addressRounded - endAddr : 0;
- }
- else
- {
- unusedSizeAfter = PageSize;
- }
-
- if (unusedSizeAfter != 0)
- {
- _context.Memory.ZeroFill(GetDramAddressFromPa(firstPageFillAddress), unusedSizeAfter);
- }
-
- if (pages.AddRange(dstFirstPagePa, 1) != KernelResult.Success)
- {
- CleanUpForError();
-
- return KernelResult.OutOfResource;
- }
- }
-
- ulong endAddrTruncated = BitUtils.AlignDown(endAddr, PageSize);
- ulong endAddrRounded = BitUtils.AlignUp (endAddr, PageSize);
-
- if (endAddrTruncated > addressRounded)
- {
- ulong alignedPagesCount = (endAddrTruncated - addressRounded) / PageSize;
-
- AddVaRangeToPageList(pages, addressRounded, alignedPagesCount);
- }
-
- // Is the last page end address aligned?
- // If not, allocate a new page and copy the unaligned chunck.
- if (endAddrTruncated < endAddrRounded && (addressTruncated == addressRounded || addressTruncated < endAddrTruncated))
- {
- dstLastPagePa = AllocateSinglePage(region, aslrDisabled);
-
- if (dstLastPagePa == 0)
- {
- CleanUpForError();
-
- return KernelResult.OutOfMemory;
- }
-
- ulong lastPageFillAddr = dstLastPagePa;
-
- if (!TryConvertVaToPa(endAddrTruncated, out ulong srcLastPagePa))
- {
- CleanUpForError();
-
- return KernelResult.InvalidMemState;
- }
-
- ulong unusedSizeAfter;
-
- if (copyData)
- {
- ulong copySize = endAddr - endAddrTruncated;
-
- _context.Memory.Copy(
- GetDramAddressFromPa(dstLastPagePa),
- GetDramAddressFromPa(srcLastPagePa), copySize);
-
- lastPageFillAddr += copySize;
-
- unusedSizeAfter = PageSize - copySize;
- }
- else
- {
- unusedSizeAfter = PageSize;
- }
-
- _context.Memory.ZeroFill(GetDramAddressFromPa(lastPageFillAddr), unusedSizeAfter);
-
- if (pages.AddRange(dstLastPagePa, 1) != KernelResult.Success)
- {
- CleanUpForError();
-
- return KernelResult.OutOfResource;
- }
- }
-
- pageList = pages;
-
- return KernelResult.Success;
- }
-
- private ulong AllocateSinglePage(MemoryRegion region, bool aslrDisabled)
- {
- KMemoryRegionManager regionMgr = _context.MemoryRegions[(int)region];
-
- return regionMgr.AllocatePagesContiguous(1, aslrDisabled);
- }
-
- private void FreeSinglePage(MemoryRegion region, ulong address)
- {
- KMemoryRegionManager regionMgr = _context.MemoryRegions[(int)region];
-
- regionMgr.FreePage(address);
- }
-
- private KernelResult MapPagesFromAnotherProcess(
- ulong size,
- ulong address,
- KMemoryPermission permission,
- MemoryState state,
- KPageList pageList,
- out ulong dst)
- {
- dst = 0;
-
- lock (_blocks)
- {
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion))
- {
- return KernelResult.OutOfResource;
- }
-
- ulong endAddr = address + size;
-
- ulong addressTruncated = BitUtils.AlignDown(address, PageSize);
- ulong endAddrRounded = BitUtils.AlignUp (endAddr, PageSize);
-
- ulong neededSize = endAddrRounded - addressTruncated;
-
- ulong neededPagesCount = neededSize / PageSize;
-
- ulong regionPagesCount = (AliasRegionEnd - AliasRegionStart) / PageSize;
-
- ulong va = 0;
-
- for (int unit = MappingUnitSizes.Length - 1; unit >= 0 && va == 0; unit--)
- {
- int alignment = MappingUnitSizes[unit];
-
- va = AllocateVa(AliasRegionStart, regionPagesCount, neededPagesCount, alignment);
- }
-
- if (va == 0)
- {
- return KernelResult.OutOfVaSpace;
- }
-
- if (pageList.Nodes.Count != 0)
- {
- KernelResult result = MapPages(va, pageList, permission);
-
- if (result != KernelResult.Success)
- {
- return result;
- }
- }
-
- InsertBlock(va, neededPagesCount, state, permission);
-
- dst = va + (address - addressTruncated);
- }
-
- return KernelResult.Success;
- }
-
- public KernelResult UnmapNoAttributeIfStateEquals(ulong address, ulong size, MemoryState state)
- {
- if (AddrSpaceStart > address)
- {
- return KernelResult.InvalidMemState;
- }
-
- ulong endAddr = address + size;
-
- if (endAddr <= address || endAddr - 1 > AddrSpaceEnd - 1)
- {
- return KernelResult.InvalidMemState;
- }
-
- lock (_blocks)
- {
- if (CheckRange(
- address,
- size,
- MemoryState.Mask,
- state,
- KMemoryPermission.Read,
- KMemoryPermission.Read,
- MemoryAttribute.Mask,
- MemoryAttribute.None,
- MemoryAttribute.IpcAndDeviceMapped,
- out _,
- out _,
- out _))
- {
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion))
- {
- return KernelResult.OutOfResource;
- }
-
- ulong addressTruncated = BitUtils.AlignDown(address, PageSize);
- ulong addressRounded = BitUtils.AlignUp (address, PageSize);
- ulong endAddrTruncated = BitUtils.AlignDown(endAddr, PageSize);
- ulong endAddrRounded = BitUtils.AlignUp (endAddr, PageSize);
-
- ulong pagesCount = (endAddrRounded - addressTruncated) / PageSize;
-
- // Free pages we had to create on-demand, if any of the buffer was not page aligned.
- // Real kernel has page ref counting, so this is done as part of the unmap operation.
- if (addressTruncated != addressRounded)
- {
- FreeSinglePage(_memRegion, ConvertVaToPa(addressTruncated));
- }
-
- if (endAddrTruncated < endAddrRounded && (addressTruncated == addressRounded || addressTruncated < endAddrTruncated))
- {
- FreeSinglePage(_memRegion, ConvertVaToPa(endAddrTruncated));
- }
-
- KernelResult result = DoMmuOperation(
- addressTruncated,
- pagesCount,
- 0,
- false,
- KMemoryPermission.None,
- MemoryOperation.Unmap);
-
- if (result == KernelResult.Success)
- {
- InsertBlock(addressTruncated, pagesCount, MemoryState.Unmapped);
- }
-
- return result;
- }
- else
- {
- return KernelResult.InvalidMemState;
- }
- }
- }
-
- public KernelResult UnmapIpcRestorePermission(ulong address, ulong size, MemoryState state)
- {
- ulong endAddr = address + size;
-
- ulong addressRounded = BitUtils.AlignUp (address, PageSize);
- ulong addressTruncated = BitUtils.AlignDown(address, PageSize);
- ulong endAddrRounded = BitUtils.AlignUp (endAddr, PageSize);
- ulong endAddrTruncated = BitUtils.AlignDown(endAddr, PageSize);
-
- ulong pagesCount = addressRounded < endAddrTruncated ? (endAddrTruncated - addressRounded) / PageSize : 0;
-
- if (pagesCount == 0)
- {
- return KernelResult.Success;
- }
-
- MemoryState stateMask;
-
- switch (state)
- {
- case MemoryState.IpcBuffer0: stateMask = MemoryState.IpcSendAllowedType0; break;
- case MemoryState.IpcBuffer1: stateMask = MemoryState.IpcSendAllowedType1; break;
- case MemoryState.IpcBuffer3: stateMask = MemoryState.IpcSendAllowedType3; break;
-
- default: return KernelResult.InvalidCombination;
- }
-
- MemoryAttribute attributeMask =
- MemoryAttribute.Borrowed |
- MemoryAttribute.IpcMapped |
- MemoryAttribute.Uncached;
-
- if (state == MemoryState.IpcBuffer0)
- {
- attributeMask |= MemoryAttribute.DeviceMapped;
- }
-
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion))
- {
- return KernelResult.OutOfResource;
- }
-
- // Anything on the client side should see this memory as modified.
- _cpuMemory.SignalMemoryTracking(addressTruncated, endAddrRounded - addressTruncated, true);
-
- lock (_blocks)
- {
- foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrTruncated))
- {
- // Check if the block state matches what we expect.
- if ((info.State & stateMask) != stateMask ||
- (info.Attribute & attributeMask) != MemoryAttribute.IpcMapped)
- {
- return KernelResult.InvalidMemState;
- }
-
- if (info.Permission != info.SourcePermission && info.IpcRefCount == 1)
- {
- ulong blockAddress = GetAddrInRange(info, addressRounded);
- ulong blockSize = GetSizeInRange(info, addressRounded, endAddrTruncated);
-
- ulong blockPagesCount = blockSize / PageSize;
-
- KernelResult result = DoMmuOperation(
- blockAddress,
- blockPagesCount,
- 0,
- false,
- info.SourcePermission,
- MemoryOperation.ChangePermRw);
-
- if (result != KernelResult.Success)
- {
- return result;
- }
- }
- }
-
- InsertBlock(addressRounded, pagesCount, RestoreIpcMappingPermissions);
-
- return KernelResult.Success;
- }
- }
-
- public KernelResult BorrowIpcBuffer(ulong address, ulong size)
- {
- return SetAttributesAndChangePermission(
- address,
- size,
- MemoryState.IpcBufferAllowed,
- MemoryState.IpcBufferAllowed,
- KMemoryPermission.Mask,
- KMemoryPermission.ReadAndWrite,
- MemoryAttribute.Mask,
- MemoryAttribute.None,
- KMemoryPermission.None,
- MemoryAttribute.Borrowed);
- }
-
- public KernelResult BorrowTransferMemory(KPageList pageList, ulong address, ulong size, KMemoryPermission permission)
- {
- return SetAttributesAndChangePermission(
- address,
- size,
- MemoryState.TransferMemoryAllowed,
- MemoryState.TransferMemoryAllowed,
- KMemoryPermission.Mask,
- KMemoryPermission.ReadAndWrite,
- MemoryAttribute.Mask,
- MemoryAttribute.None,
- permission,
- MemoryAttribute.Borrowed,
- pageList);
- }
-
- private KernelResult SetAttributesAndChangePermission(
- ulong address,
- ulong size,
- MemoryState stateMask,
- MemoryState stateExpected,
- KMemoryPermission permissionMask,
- KMemoryPermission permissionExpected,
- MemoryAttribute attributeMask,
- MemoryAttribute attributeExpected,
- KMemoryPermission newPermission,
- MemoryAttribute attributeSetMask,
- KPageList pageList = null)
- {
- if (address + size <= address || !InsideAddrSpace(address, size))
- {
- return KernelResult.InvalidMemState;
- }
-
- lock (_blocks)
- {
- if (CheckRange(
- address,
- size,
- stateMask | MemoryState.IsPoolAllocated,
- stateExpected | MemoryState.IsPoolAllocated,
- permissionMask,
- permissionExpected,
- attributeMask,
- attributeExpected,
- MemoryAttribute.IpcAndDeviceMapped,
- out MemoryState oldState,
- out KMemoryPermission oldPermission,
- out MemoryAttribute oldAttribute))
- {
- ulong pagesCount = size / PageSize;
-
- if (pageList != null)
- {
- AddVaRangeToPageList(pageList, address, pagesCount);
- }
-
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion))
- {
- return KernelResult.OutOfResource;
- }
-
- if (newPermission == KMemoryPermission.None)
- {
- newPermission = oldPermission;
- }
-
- if (newPermission != oldPermission)
- {
- KernelResult result = DoMmuOperation(
- address,
- pagesCount,
- 0,
- false,
- newPermission,
- MemoryOperation.ChangePermRw);
-
- if (result != KernelResult.Success)
- {
- return result;
- }
- }
-
- MemoryAttribute newAttribute = oldAttribute | attributeSetMask;
-
- InsertBlock(address, pagesCount, oldState, newPermission, newAttribute);
-
- return KernelResult.Success;
- }
- else
- {
- return KernelResult.InvalidMemState;
- }
- }
- }
-
- public KernelResult UnborrowIpcBuffer(ulong address, ulong size)
- {
- return ClearAttributesAndChangePermission(
- address,
- size,
- MemoryState.IpcBufferAllowed,
- MemoryState.IpcBufferAllowed,
- KMemoryPermission.None,
- KMemoryPermission.None,
- MemoryAttribute.Mask,
- MemoryAttribute.Borrowed,
- KMemoryPermission.ReadAndWrite,
- MemoryAttribute.Borrowed);
- }
-
- public KernelResult UnborrowTransferMemory(ulong address, ulong size, KPageList pageList)
- {
- return ClearAttributesAndChangePermission(
- address,
- size,
- MemoryState.TransferMemoryAllowed,
- MemoryState.TransferMemoryAllowed,
- KMemoryPermission.None,
- KMemoryPermission.None,
- MemoryAttribute.Mask,
- MemoryAttribute.Borrowed,
- KMemoryPermission.ReadAndWrite,
- MemoryAttribute.Borrowed,
- pageList);
- }
-
- private KernelResult ClearAttributesAndChangePermission(
- ulong address,
- ulong size,
- MemoryState stateMask,
- MemoryState stateExpected,
- KMemoryPermission permissionMask,
- KMemoryPermission permissionExpected,
- MemoryAttribute attributeMask,
- MemoryAttribute attributeExpected,
- KMemoryPermission newPermission,
- MemoryAttribute attributeClearMask,
- KPageList pageList = null)
- {
- if (address + size <= address || !InsideAddrSpace(address, size))
- {
- return KernelResult.InvalidMemState;
- }
-
- lock (_blocks)
- {
- if (CheckRange(
- address,
- size,
- stateMask | MemoryState.IsPoolAllocated,
- stateExpected | MemoryState.IsPoolAllocated,
- permissionMask,
- permissionExpected,
- attributeMask,
- attributeExpected,
- MemoryAttribute.IpcAndDeviceMapped,
- out MemoryState oldState,
- out KMemoryPermission oldPermission,
- out MemoryAttribute oldAttribute))
- {
- ulong pagesCount = size / PageSize;
-
- if (pageList != null)
- {
- KPageList currPageList = new KPageList();
-
- AddVaRangeToPageList(currPageList, address, pagesCount);
-
- if (!currPageList.IsEqual(pageList))
- {
- return KernelResult.InvalidMemRange;
- }
- }
-
- if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion))
- {
- return KernelResult.OutOfResource;
- }
-
- if (newPermission == KMemoryPermission.None)
- {
- newPermission = oldPermission;
- }
-
- if (newPermission != oldPermission)
- {
- KernelResult result = DoMmuOperation(
- address,
- pagesCount,
- 0,
- false,
- newPermission,
- MemoryOperation.ChangePermRw);
-
- if (result != KernelResult.Success)
- {
- return result;
- }
- }
-
- MemoryAttribute newAttribute = oldAttribute & ~attributeClearMask;
-
- InsertBlock(address, pagesCount, oldState, newPermission, newAttribute);
-
- return KernelResult.Success;
- }
- else
- {
- return KernelResult.InvalidMemState;
- }
- }
- }
-
- private void AddVaRangeToPageList(KPageList pageList, ulong start, ulong pagesCount)
- {
- ulong address = start;
-
- while (address < start + pagesCount * PageSize)
- {
- if (!TryConvertVaToPa(address, out ulong pa))
- {
- throw new InvalidOperationException("Unexpected failure translating virtual address.");
- }
-
- pageList.AddRange(pa, 1);
-
- address += PageSize;
- }
- }
-
- private static ulong GetAddrInRange(KMemoryInfo info, ulong start)
- {
- if (info.Address < start)
- {
- return start;
- }
-
- return info.Address;
- }
-
- private static ulong GetSizeInRange(KMemoryInfo info, ulong start, ulong end)
- {
- ulong endAddr = info.Size + info.Address;
- ulong size = info.Size;
-
- if (info.Address < start)
- {
- size -= start - info.Address;
- }
-
- if (endAddr > end)
- {
- size -= endAddr - end;
- }
-
- return size;
- }
-
- private bool IsUnmapped(ulong address, ulong size)
- {
- return CheckRange(
- address,
- size,
- MemoryState.Mask,
- MemoryState.Unmapped,
- KMemoryPermission.Mask,
- KMemoryPermission.None,
- MemoryAttribute.Mask,
- MemoryAttribute.None,
- MemoryAttribute.IpcAndDeviceMapped,
- out _,
- out _,
- out _);
- }
-
- private bool CheckRange(
- ulong address,
- ulong size,
- MemoryState stateMask,
- MemoryState stateExpected,
- KMemoryPermission permissionMask,
- KMemoryPermission permissionExpected,
- MemoryAttribute attributeMask,
- MemoryAttribute attributeExpected,
- MemoryAttribute attributeIgnoreMask,
- out MemoryState outState,
- out KMemoryPermission outPermission,
- out MemoryAttribute outAttribute)
- {
- ulong endAddr = address + size;
-
- LinkedListNode<KMemoryBlock> node = FindBlockNode(address);
-
- KMemoryInfo info = node.Value.GetInfo();
-
- MemoryState firstState = info.State;
- KMemoryPermission firstPermission = info.Permission;
- MemoryAttribute firstAttribute = info.Attribute;
-
- do
- {
- info = node.Value.GetInfo();
-
- // Check if the block state matches what we expect.
- if ( firstState != info.State ||
- firstPermission != info.Permission ||
- (info.Attribute & attributeMask) != attributeExpected ||
- (firstAttribute | attributeIgnoreMask) != (info.Attribute | attributeIgnoreMask) ||
- (firstState & stateMask) != stateExpected ||
- (firstPermission & permissionMask) != permissionExpected)
- {
- outState = MemoryState.Unmapped;
- outPermission = KMemoryPermission.None;
- outAttribute = MemoryAttribute.None;
-
- return false;
- }
- }
- while (info.Address + info.Size - 1 < endAddr - 1 && (node = node.Next) != null);
-
- outState = firstState;
- outPermission = firstPermission;
- outAttribute = firstAttribute & ~attributeIgnoreMask;
-
- return true;
- }
-
- private bool CheckRange(
- ulong address,
- ulong size,
- MemoryState stateMask,
- MemoryState stateExpected,
- KMemoryPermission permissionMask,
- KMemoryPermission permissionExpected,
- MemoryAttribute attributeMask,
- MemoryAttribute attributeExpected)
- {
- foreach (KMemoryInfo info in IterateOverRange(address, address + size))
- {
- // Check if the block state matches what we expect.
- if ((info.State & stateMask) != stateExpected ||
- (info.Permission & permissionMask) != permissionExpected ||
- (info.Attribute & attributeMask) != attributeExpected)
- {
- return false;
- }
- }
-
- return true;
- }
-
- private IEnumerable<KMemoryInfo> IterateOverRange(ulong start, ulong end)
- {
- LinkedListNode<KMemoryBlock> node = FindBlockNode(start);
-
- KMemoryInfo info;
-
- do
- {
- info = node.Value.GetInfo();
-
- yield return info;
- }
- while (info.Address + info.Size - 1 < end - 1 && (node = node.Next) != null);
- }
-
- private void InsertBlock(
- ulong baseAddress,
- ulong pagesCount,
- MemoryState oldState,
- KMemoryPermission oldPermission,
- MemoryAttribute oldAttribute,
- MemoryState newState,
- KMemoryPermission newPermission,
- MemoryAttribute newAttribute)
- {
- // Insert new block on the list only on areas where the state
- // of the block matches the state specified on the old* state
- // arguments, otherwise leave it as is.
- int oldCount = _blocks.Count;
-
- oldAttribute |= MemoryAttribute.IpcAndDeviceMapped;
-
- ulong endAddr = baseAddress + pagesCount * PageSize;
-
- LinkedListNode<KMemoryBlock> node = _blocks.First;
-
- while (node != null)
- {
- LinkedListNode<KMemoryBlock> newNode = node;
-
- KMemoryBlock currBlock = node.Value;
-
- ulong currBaseAddr = currBlock.BaseAddress;
- ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr;
-
- if (baseAddress < currEndAddr && currBaseAddr < endAddr)
- {
- MemoryAttribute currBlockAttr = currBlock.Attribute | MemoryAttribute.IpcAndDeviceMapped;
+ public KMemoryRegionManager[] MemoryRegions { get; }
- if (currBlock.State != oldState ||
- currBlock.Permission != oldPermission ||
- currBlockAttr != oldAttribute)
- {
- node = node.Next;
-
- continue;
- }
-
- if (baseAddress > currBaseAddr)
- {
- _blocks.AddBefore(node, currBlock.SplitRightAtAddress(baseAddress));
- }
-
- if (endAddr < currEndAddr)
- {
- newNode = _blocks.AddBefore(node, currBlock.SplitRightAtAddress(endAddr));
- }
-
- newNode.Value.SetState(newPermission, newState, newAttribute);
-
- newNode = MergeEqualStateNeighbors(newNode);
- }
-
- if (currEndAddr - 1 >= endAddr - 1)
- {
- break;
- }
-
- node = newNode.Next;
- }
-
- _blockAllocator.Count += _blocks.Count - oldCount;
-
- ValidateInternalState();
- }
-
- private void InsertBlock(
- ulong baseAddress,
- ulong pagesCount,
- MemoryState state,
- KMemoryPermission permission = KMemoryPermission.None,
- MemoryAttribute attribute = MemoryAttribute.None)
- {
- // Inserts new block at the list, replacing and splitting
- // existing blocks as needed.
- int oldCount = _blocks.Count;
-
- ulong endAddr = baseAddress + pagesCount * PageSize;
-
- LinkedListNode<KMemoryBlock> node = _blocks.First;
-
- while (node != null)
- {
- LinkedListNode<KMemoryBlock> newNode = node;
-
- KMemoryBlock currBlock = node.Value;
-
- ulong currBaseAddr = currBlock.BaseAddress;
- ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr;
-
- if (baseAddress < currEndAddr && currBaseAddr < endAddr)
- {
- if (baseAddress > currBaseAddr)
- {
- _blocks.AddBefore(node, currBlock.SplitRightAtAddress(baseAddress));
- }
-
- if (endAddr < currEndAddr)
- {
- newNode = _blocks.AddBefore(node, currBlock.SplitRightAtAddress(endAddr));
- }
-
- newNode.Value.SetState(permission, state, attribute);
-
- newNode = MergeEqualStateNeighbors(newNode);
- }
-
- if (currEndAddr - 1 >= endAddr - 1)
- {
- break;
- }
-
- node = newNode.Next;
- }
-
- _blockAllocator.Count += _blocks.Count - oldCount;
-
- ValidateInternalState();
- }
-
- private static void SetIpcMappingPermissions(KMemoryBlock block, KMemoryPermission permission)
+ public KMemoryManager(MemorySize size, MemoryArrange arrange)
{
- block.SetIpcMappingPermission(permission);
+ MemoryRegions = KernelInit.GetMemoryRegions(size, arrange);
}
- private static void RestoreIpcMappingPermissions(KMemoryBlock block, KMemoryPermission permission)
+ private KMemoryRegionManager GetMemoryRegion(ulong address)
{
- block.RestoreIpcMappingPermission();
- }
-
- private delegate void BlockMutator(KMemoryBlock block, KMemoryPermission newPerm);
-
- private void InsertBlock(
- ulong baseAddress,
- ulong pagesCount,
- BlockMutator blockMutate,
- KMemoryPermission permission = KMemoryPermission.None)
- {
- // Inserts new block at the list, replacing and splitting
- // existing blocks as needed, then calling the callback
- // function on the new block.
- int oldCount = _blocks.Count;
-
- ulong endAddr = baseAddress + pagesCount * PageSize;
-
- LinkedListNode<KMemoryBlock> node = _blocks.First;
-
- while (node != null)
+ for (int i = 0; i < MemoryRegions.Length; i++)
{
- LinkedListNode<KMemoryBlock> newNode = node;
-
- KMemoryBlock currBlock = node.Value;
-
- ulong currBaseAddr = currBlock.BaseAddress;
- ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr;
-
- if (baseAddress < currEndAddr && currBaseAddr < endAddr)
- {
- if (baseAddress > currBaseAddr)
- {
- _blocks.AddBefore(node, currBlock.SplitRightAtAddress(baseAddress));
- }
-
- if (endAddr < currEndAddr)
- {
- newNode = _blocks.AddBefore(node, currBlock.SplitRightAtAddress(endAddr));
- }
+ var region = MemoryRegions[i];
- KMemoryBlock newBlock = newNode.Value;
-
- blockMutate(newBlock, permission);
-
- newNode = MergeEqualStateNeighbors(newNode);
- }
-
- if (currEndAddr - 1 >= endAddr - 1)
+ if (address >= region.Address && address < region.EndAddr)
{
- break;
+ return region;
}
-
- node = newNode.Next;
}
- _blockAllocator.Count += _blocks.Count - oldCount;
-
- ValidateInternalState();
+ return null;
}
- [Conditional("DEBUG")]
- private void ValidateInternalState()
+ public void IncrementPagesReferenceCount(ulong address, ulong pagesCount)
{
- ulong expectedAddress = 0;
-
- LinkedListNode<KMemoryBlock> node = _blocks.First;
-
- while (node != null)
- {
- LinkedListNode<KMemoryBlock> newNode = node;
-
- KMemoryBlock currBlock = node.Value;
-
- Debug.Assert(currBlock.BaseAddress == expectedAddress);
-
- expectedAddress = currBlock.BaseAddress + currBlock.PagesCount * PageSize;
-
- node = newNode.Next;
- }
-
- Debug.Assert(expectedAddress == AddrSpaceEnd);
+ IncrementOrDecrementPagesReferenceCount(address, pagesCount, true);
}
- private LinkedListNode<KMemoryBlock> MergeEqualStateNeighbors(LinkedListNode<KMemoryBlock> node)
+ public void DecrementPagesReferenceCount(ulong address, ulong pagesCount)
{
- KMemoryBlock block = node.Value;
-
- if (node.Previous != null)
- {
- KMemoryBlock previousBlock = node.Previous.Value;
-
- if (BlockStateEquals(block, previousBlock))
- {
- LinkedListNode<KMemoryBlock> previousNode = node.Previous;
-
- _blocks.Remove(node);
-
- previousBlock.AddPages(block.PagesCount);
-
- node = previousNode;
- block = previousBlock;
- }
- }
-
- if (node.Next != null)
- {
- KMemoryBlock nextBlock = node.Next.Value;
-
- if (BlockStateEquals(block, nextBlock))
- {
- _blocks.Remove(node.Next);
-
- block.AddPages(nextBlock.PagesCount);
- }
- }
-
- return node;
+ IncrementOrDecrementPagesReferenceCount(address, pagesCount, false);
}
- private static bool BlockStateEquals(KMemoryBlock lhs, KMemoryBlock rhs)
+ private void IncrementOrDecrementPagesReferenceCount(ulong address, ulong pagesCount, bool increment)
{
- return lhs.State == rhs.State &&
- lhs.Permission == rhs.Permission &&
- lhs.Attribute == rhs.Attribute &&
- lhs.SourcePermission == rhs.SourcePermission &&
- lhs.DeviceRefCount == rhs.DeviceRefCount &&
- lhs.IpcRefCount == rhs.IpcRefCount;
- }
-
- private ulong AllocateVa(
- ulong regionStart,
- ulong regionPagesCount,
- ulong neededPagesCount,
- int alignment)
- {
- ulong address = 0;
-
- ulong regionEndAddr = regionStart + regionPagesCount * PageSize;
-
- ulong reservedPagesCount = _isKernel ? 1UL : 4UL;
-
- if (_aslrEnabled)
+ while (pagesCount != 0)
{
- ulong totalNeededSize = (reservedPagesCount + neededPagesCount) * PageSize;
-
- ulong remainingPages = regionPagesCount - neededPagesCount;
+ var region = GetMemoryRegion(address);
- ulong aslrMaxOffset = ((remainingPages + reservedPagesCount) * PageSize) / (ulong)alignment;
+ ulong countToProcess = Math.Min(pagesCount, region.GetPageOffsetFromEnd(address));
- for (int attempt = 0; attempt < 8; attempt++)
+ lock (region)
{
- address = BitUtils.AlignDown(regionStart + GetRandomValue(0, aslrMaxOffset) * (ulong)alignment, alignment);
-
- ulong endAddr = address + totalNeededSize;
-
- KMemoryInfo info = FindBlock(address).GetInfo();
-
- if (info.State != MemoryState.Unmapped)
+ if (increment)
{
- continue;
+ region.IncrementPagesReferenceCount(address, countToProcess);
}
-
- ulong currBaseAddr = info.Address + reservedPagesCount * PageSize;
- ulong currEndAddr = info.Address + info.Size;
-
- if (address >= regionStart &&
- address >= currBaseAddr &&
- endAddr - 1 <= regionEndAddr - 1 &&
- endAddr - 1 <= currEndAddr - 1)
+ else
{
- break;
+ region.DecrementPagesReferenceCount(address, countToProcess);
}
}
- if (address == 0)
- {
- ulong aslrPage = GetRandomValue(0, aslrMaxOffset);
-
- address = FindFirstFit(
- regionStart + aslrPage * PageSize,
- regionPagesCount - aslrPage,
- neededPagesCount,
- alignment,
- 0,
- reservedPagesCount);
- }
+ pagesCount -= countToProcess;
+ address += countToProcess * KPageTableBase.PageSize;
}
-
- if (address == 0)
- {
- address = FindFirstFit(
- regionStart,
- regionPagesCount,
- neededPagesCount,
- alignment,
- 0,
- reservedPagesCount);
- }
-
- return address;
- }
-
- private ulong FindFirstFit(
- ulong regionStart,
- ulong regionPagesCount,
- ulong neededPagesCount,
- int alignment,
- ulong reservedStart,
- ulong reservedPagesCount)
- {
- ulong reservedSize = reservedPagesCount * PageSize;
-
- ulong totalNeededSize = reservedSize + neededPagesCount * PageSize;
-
- ulong regionEndAddr = regionStart + regionPagesCount * PageSize;
-
- LinkedListNode<KMemoryBlock> node = FindBlockNode(regionStart);
-
- KMemoryInfo info = node.Value.GetInfo();
-
- while (regionEndAddr >= info.Address)
- {
- if (info.State == MemoryState.Unmapped)
- {
- ulong currBaseAddr = info.Address + reservedSize;
- ulong currEndAddr = info.Address + info.Size - 1;
-
- ulong address = BitUtils.AlignDown(currBaseAddr, alignment) + reservedStart;
-
- if (currBaseAddr > address)
- {
- address += (ulong)alignment;
- }
-
- ulong allocationEndAddr = address + totalNeededSize - 1;
-
- if (allocationEndAddr <= regionEndAddr &&
- allocationEndAddr <= currEndAddr &&
- address < allocationEndAddr)
- {
- return address;
- }
- }
-
- node = node.Next;
-
- if (node == null)
- {
- break;
- }
-
- info = node.Value.GetInfo();
- }
-
- return 0;
- }
-
- private KMemoryBlock FindBlock(ulong address)
- {
- return FindBlockNode(address)?.Value;
- }
-
- private LinkedListNode<KMemoryBlock> FindBlockNode(ulong address)
- {
- lock (_blocks)
- {
- LinkedListNode<KMemoryBlock> node = _blocks.First;
-
- while (node != null)
- {
- KMemoryBlock block = node.Value;
-
- ulong currEndAddr = block.PagesCount * PageSize + block.BaseAddress;
-
- if (block.BaseAddress <= address && currEndAddr - 1 >= address)
- {
- return node;
- }
-
- node = node.Next;
- }
- }
-
- return null;
- }
-
- public bool CanContain(ulong address, ulong size, MemoryState state)
- {
- ulong endAddr = address + size;
-
- ulong regionBaseAddr = GetBaseAddress(state);
- ulong regionEndAddr = regionBaseAddr + GetSize(state);
-
- bool InsideRegion()
- {
- return regionBaseAddr <= address &&
- endAddr > address &&
- endAddr - 1 <= regionEndAddr - 1;
- }
-
- bool OutsideHeapRegion() => endAddr <= HeapRegionStart || address >= HeapRegionEnd;
- bool OutsideAliasRegion() => endAddr <= AliasRegionStart || address >= AliasRegionEnd;
-
- switch (state)
- {
- case MemoryState.Io:
- case MemoryState.Normal:
- case MemoryState.CodeStatic:
- case MemoryState.CodeMutable:
- case MemoryState.SharedMemory:
- case MemoryState.ModCodeStatic:
- case MemoryState.ModCodeMutable:
- case MemoryState.Stack:
- case MemoryState.ThreadLocal:
- case MemoryState.TransferMemoryIsolated:
- case MemoryState.TransferMemory:
- case MemoryState.ProcessMemory:
- case MemoryState.CodeReadOnly:
- case MemoryState.CodeWritable:
- return InsideRegion() && OutsideHeapRegion() && OutsideAliasRegion();
-
- case MemoryState.Heap:
- return InsideRegion() && OutsideAliasRegion();
-
- case MemoryState.IpcBuffer0:
- case MemoryState.IpcBuffer1:
- case MemoryState.IpcBuffer3:
- return InsideRegion() && OutsideHeapRegion();
-
- case MemoryState.KernelStack:
- return InsideRegion();
- }
-
- throw new ArgumentException($"Invalid state value \"{state}\".");
- }
-
- private ulong GetBaseAddress(MemoryState state)
- {
- switch (state)
- {
- case MemoryState.Io:
- case MemoryState.Normal:
- case MemoryState.ThreadLocal:
- return TlsIoRegionStart;
-
- case MemoryState.CodeStatic:
- case MemoryState.CodeMutable:
- case MemoryState.SharedMemory:
- case MemoryState.ModCodeStatic:
- case MemoryState.ModCodeMutable:
- case MemoryState.TransferMemoryIsolated:
- case MemoryState.TransferMemory:
- case MemoryState.ProcessMemory:
- case MemoryState.CodeReadOnly:
- case MemoryState.CodeWritable:
- return GetAddrSpaceBaseAddr();
-
- case MemoryState.Heap:
- return HeapRegionStart;
-
- case MemoryState.IpcBuffer0:
- case MemoryState.IpcBuffer1:
- case MemoryState.IpcBuffer3:
- return AliasRegionStart;
-
- case MemoryState.Stack:
- return StackRegionStart;
-
- case MemoryState.KernelStack:
- return AddrSpaceStart;
- }
-
- throw new ArgumentException($"Invalid state value \"{state}\".");
- }
-
- private ulong GetSize(MemoryState state)
- {
- switch (state)
- {
- case MemoryState.Io:
- case MemoryState.Normal:
- case MemoryState.ThreadLocal:
- return TlsIoRegionEnd - TlsIoRegionStart;
-
- case MemoryState.CodeStatic:
- case MemoryState.CodeMutable:
- case MemoryState.SharedMemory:
- case MemoryState.ModCodeStatic:
- case MemoryState.ModCodeMutable:
- case MemoryState.TransferMemoryIsolated:
- case MemoryState.TransferMemory:
- case MemoryState.ProcessMemory:
- case MemoryState.CodeReadOnly:
- case MemoryState.CodeWritable:
- return GetAddrSpaceSize();
-
- case MemoryState.Heap:
- return HeapRegionEnd - HeapRegionStart;
-
- case MemoryState.IpcBuffer0:
- case MemoryState.IpcBuffer1:
- case MemoryState.IpcBuffer3:
- return AliasRegionEnd - AliasRegionStart;
-
- case MemoryState.Stack:
- return StackRegionEnd - StackRegionStart;
-
- case MemoryState.KernelStack:
- return AddrSpaceEnd - AddrSpaceStart;
- }
-
- throw new ArgumentException($"Invalid state value \"{state}\".");
- }
-
- public ulong GetAddrSpaceBaseAddr()
- {
- if (AddrSpaceWidth == 36 || AddrSpaceWidth == 39)
- {
- return 0x8000000;
- }
- else if (AddrSpaceWidth == 32)
- {
- return 0x200000;
- }
- else
- {
- throw new InvalidOperationException("Invalid address space width!");
- }
- }
-
- public ulong GetAddrSpaceSize()
- {
- if (AddrSpaceWidth == 36)
- {
- return 0xff8000000;
- }
- else if (AddrSpaceWidth == 39)
- {
- return 0x7ff8000000;
- }
- else if (AddrSpaceWidth == 32)
- {
- return 0xffe00000;
- }
- else
- {
- throw new InvalidOperationException("Invalid address space width!");
- }
- }
-
- private KernelResult MapPages(ulong address, KPageList pageList, KMemoryPermission permission)
- {
- ulong currAddr = address;
-
- KernelResult result = KernelResult.Success;
-
- foreach (KPageNode pageNode in pageList)
- {
- result = DoMmuOperation(
- currAddr,
- pageNode.PagesCount,
- pageNode.Address,
- true,
- permission,
- MemoryOperation.MapPa);
-
- if (result != KernelResult.Success)
- {
- KMemoryInfo info = FindBlock(currAddr).GetInfo();
-
- ulong pagesCount = (address - currAddr) / PageSize;
-
- result = MmuUnmap(address, pagesCount);
-
- break;
- }
-
- currAddr += pageNode.PagesCount * PageSize;
- }
-
- return result;
- }
-
- private KernelResult MmuUnmap(ulong address, ulong pagesCount)
- {
- return DoMmuOperation(
- address,
- pagesCount,
- 0,
- false,
- KMemoryPermission.None,
- MemoryOperation.Unmap);
- }
-
- private KernelResult MmuChangePermission(ulong address, ulong pagesCount, KMemoryPermission permission)
- {
- return DoMmuOperation(
- address,
- pagesCount,
- 0,
- false,
- permission,
- MemoryOperation.ChangePermRw);
- }
-
- private KernelResult DoMmuOperation(
- ulong dstVa,
- ulong pagesCount,
- ulong srcPa,
- bool map,
- KMemoryPermission permission,
- MemoryOperation operation)
- {
- if (map != (operation == MemoryOperation.MapPa))
- {
- throw new ArgumentException(nameof(map) + " value is invalid for this operation.");
- }
-
- KernelResult result;
-
- switch (operation)
- {
- case MemoryOperation.MapPa:
- {
- ulong size = pagesCount * PageSize;
-
- _cpuMemory.Map(dstVa, srcPa - DramMemoryMap.DramBase, size);
-
- result = KernelResult.Success;
-
- break;
- }
-
- case MemoryOperation.Allocate:
- {
- KMemoryRegionManager region = GetMemoryRegionManager();
-
- result = region.AllocatePages(pagesCount, _aslrDisabled, out KPageList pageList);
-
- if (result == KernelResult.Success)
- {
- result = MmuMapPages(dstVa, pageList);
- }
-
- break;
- }
-
- case MemoryOperation.Unmap:
- {
- ulong size = pagesCount * PageSize;
-
- _cpuMemory.Unmap(dstVa, size);
-
- result = KernelResult.Success;
-
- break;
- }
-
- case MemoryOperation.ChangePermRw: result = KernelResult.Success; break;
- case MemoryOperation.ChangePermsAndAttributes: result = KernelResult.Success; break;
-
- default: throw new ArgumentException($"Invalid operation \"{operation}\".");
- }
-
- return result;
- }
-
- private KernelResult DoMmuOperation(
- ulong address,
- ulong pagesCount,
- KPageList pageList,
- KMemoryPermission permission,
- MemoryOperation operation)
- {
- if (operation != MemoryOperation.MapVa)
- {
- throw new ArgumentException($"Invalid memory operation \"{operation}\" specified.");
- }
-
- return MmuMapPages(address, pageList);
- }
-
- private KMemoryRegionManager GetMemoryRegionManager()
- {
- return _context.MemoryRegions[(int)_memRegion];
- }
-
- private KernelResult MmuMapPages(ulong address, KPageList pageList)
- {
- foreach (KPageNode pageNode in pageList)
- {
- ulong size = pageNode.PagesCount * PageSize;
-
- _cpuMemory.Map(address, pageNode.Address - DramMemoryMap.DramBase, size);
-
- address += size;
- }
-
- return KernelResult.Success;
- }
-
- public ulong GetDramAddressFromVa(ulong va)
- {
- return _cpuMemory.GetPhysicalAddress(va);
- }
-
- public ulong ConvertVaToPa(ulong va)
- {
- if (!TryConvertVaToPa(va, out ulong pa))
- {
- throw new ArgumentException($"Invalid virtual address 0x{va:X} specified.");
- }
-
- return pa;
- }
-
- public bool TryConvertVaToPa(ulong va, out ulong pa)
- {
- pa = DramMemoryMap.DramBase + _cpuMemory.GetPhysicalAddress(va);
-
- return true;
- }
-
- public static ulong GetDramAddressFromPa(ulong pa)
- {
- return pa - DramMemoryMap.DramBase;
- }
-
- public long GetMmUsedPages()
- {
- lock (_blocks)
- {
- return BitUtils.DivRoundUp(GetMmUsedSize(), PageSize);
- }
- }
-
- private long GetMmUsedSize()
- {
- return _blocks.Count * KMemoryBlockSize;
- }
-
- public bool IsInvalidRegion(ulong address, ulong size)
- {
- return address + size - 1 > GetAddrSpaceBaseAddr() + GetAddrSpaceSize() - 1;
- }
-
- public bool InsideAddrSpace(ulong address, ulong size)
- {
- return AddrSpaceStart <= address && address + size - 1 <= AddrSpaceEnd - 1;
- }
-
- public bool InsideAliasRegion(ulong address, ulong size)
- {
- return address + size > AliasRegionStart && AliasRegionEnd > address;
- }
-
- public bool InsideHeapRegion(ulong address, ulong size)
- {
- return address + size > HeapRegionStart && HeapRegionEnd > address;
- }
-
- public bool InsideStackRegion(ulong address, ulong size)
- {
- return address + size > StackRegionStart && StackRegionEnd > address;
- }
-
- public bool OutsideAliasRegion(ulong address, ulong size)
- {
- return AliasRegionStart > address || address + size - 1 > AliasRegionEnd - 1;
- }
-
- public bool OutsideAddrSpace(ulong address, ulong size)
- {
- return AddrSpaceStart > address || address + size - 1 > AddrSpaceEnd - 1;
- }
-
- public bool OutsideStackRegion(ulong address, ulong size)
- {
- return StackRegionStart > address || address + size - 1 > StackRegionEnd - 1;
}
}
-} \ No newline at end of file
+}
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs
index bb4989fc..f35a3c36 100644
--- a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs
@@ -1,5 +1,6 @@
using Ryujinx.Common;
using Ryujinx.HLE.HOS.Kernel.Common;
+using System.Diagnostics;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
@@ -13,7 +14,9 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
private int _blockOrdersCount;
- private KMemoryRegionBlock[] _blocks;
+ private readonly KMemoryRegionBlock[] _blocks;
+
+ private readonly ushort[] _pageReferenceCounts;
public KMemoryRegionManager(ulong address, ulong size, ulong endAddr)
{
@@ -80,9 +83,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
}
}
+ _pageReferenceCounts = new ushort[size / KPageTableBase.PageSize];
+
if (size != 0)
{
- FreePages(address, size / KMemoryManager.PageSize);
+ FreePages(address, size / KPageTableBase.PageSize);
}
}
@@ -90,15 +95,33 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
lock (_blocks)
{
- return AllocatePagesImpl(pagesCount, backwards, out pageList);
+ KernelResult result = AllocatePagesImpl(pagesCount, backwards, out pageList);
+
+ if (result == KernelResult.Success)
+ {
+ foreach (var node in pageList)
+ {
+ IncrementPagesReferenceCount(node.Address, node.PagesCount);
+ }
+ }
+
+ return result;
}
}
- public ulong AllocatePagesContiguous(ulong pagesCount, bool backwards)
+ public ulong AllocatePagesContiguous(KernelContext context, ulong pagesCount, bool backwards)
{
lock (_blocks)
{
- return AllocatePagesContiguousImpl(pagesCount, backwards);
+ ulong address = AllocatePagesContiguousImpl(pagesCount, backwards);
+
+ if (address != 0)
+ {
+ IncrementPagesReferenceCount(address, pagesCount);
+ context.Memory.Commit(address - DramMemoryMap.DramBase, pagesCount * KPageTableBase.PageSize);
+ }
+
+ return address;
}
}
@@ -124,7 +147,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
ulong bestFitBlockSize = 1UL << block.Order;
- ulong blockPagesCount = bestFitBlockSize / KMemoryManager.PageSize;
+ ulong blockPagesCount = bestFitBlockSize / KPageTableBase.PageSize;
// Check if this is the best fit for this page size.
// If so, try allocating as much requested pages as possible.
@@ -185,7 +208,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
int blockIndex = 0;
- while ((1UL << _blocks[blockIndex].Order) / KMemoryManager.PageSize < pagesCount)
+ while ((1UL << _blocks[blockIndex].Order) / KPageTableBase.PageSize < pagesCount)
{
if (++blockIndex >= _blocks.Length)
{
@@ -197,11 +220,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
ulong address = AllocatePagesForOrder(blockIndex, backwards, tightestFitBlockSize);
- ulong requiredSize = pagesCount * KMemoryManager.PageSize;
+ ulong requiredSize = pagesCount * KPageTableBase.PageSize;
if (address != 0 && tightestFitBlockSize > requiredSize)
{
- FreePages(address + requiredSize, (tightestFitBlockSize - requiredSize) / KMemoryManager.PageSize);
+ FreePages(address + requiredSize, (tightestFitBlockSize - requiredSize) / KPageTableBase.PageSize);
}
return address;
@@ -327,136 +350,120 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
if (firstFreeBlockSize > bestFitBlockSize)
{
- FreePages(address + bestFitBlockSize, (firstFreeBlockSize - bestFitBlockSize) / KMemoryManager.PageSize);
+ FreePages(address + bestFitBlockSize, (firstFreeBlockSize - bestFitBlockSize) / KPageTableBase.PageSize);
}
}
return address;
}
- public void FreePage(ulong address)
- {
- lock (_blocks)
- {
- FreePages(address, 1);
- }
- }
-
- public void FreePages(KPageList pageList)
+ private void FreePages(ulong address, ulong pagesCount)
{
lock (_blocks)
{
- foreach (KPageNode pageNode in pageList)
- {
- FreePages(pageNode.Address, pageNode.PagesCount);
- }
- }
- }
-
- private void FreePages(ulong address, ulong pagesCount)
- {
- ulong endAddr = address + pagesCount * KMemoryManager.PageSize;
+ ulong endAddr = address + pagesCount * KPageTableBase.PageSize;
- int blockIndex = _blockOrdersCount - 1;
+ int blockIndex = _blockOrdersCount - 1;
- ulong addressRounded = 0;
- ulong endAddrTruncated = 0;
+ ulong addressRounded = 0;
+ ulong endAddrTruncated = 0;
- for (; blockIndex >= 0; blockIndex--)
- {
- KMemoryRegionBlock allocInfo = _blocks[blockIndex];
+ for (; blockIndex >= 0; blockIndex--)
+ {
+ KMemoryRegionBlock allocInfo = _blocks[blockIndex];
- int blockSize = 1 << allocInfo.Order;
+ int blockSize = 1 << allocInfo.Order;
- addressRounded = BitUtils.AlignUp (address, blockSize);
- endAddrTruncated = BitUtils.AlignDown(endAddr, blockSize);
+ addressRounded = BitUtils.AlignUp (address, blockSize);
+ endAddrTruncated = BitUtils.AlignDown(endAddr, blockSize);
- if (addressRounded < endAddrTruncated)
- {
- break;
+ if (addressRounded < endAddrTruncated)
+ {
+ break;
+ }
}
- }
- void FreeRegion(ulong currAddress)
- {
- for (int currBlockIndex = blockIndex;
- currBlockIndex < _blockOrdersCount && currAddress != 0;
- currBlockIndex++)
+ void FreeRegion(ulong currAddress)
{
- KMemoryRegionBlock block = _blocks[currBlockIndex];
+ for (int currBlockIndex = blockIndex;
+ currBlockIndex < _blockOrdersCount && currAddress != 0;
+ currBlockIndex++)
+ {
+ KMemoryRegionBlock block = _blocks[currBlockIndex];
- block.FreeCount++;
+ block.FreeCount++;
- ulong freedBlocks = (currAddress - block.StartAligned) >> block.Order;
+ ulong freedBlocks = (currAddress - block.StartAligned) >> block.Order;
- int index = (int)freedBlocks;
+ int index = (int)freedBlocks;
- for (int level = block.MaxLevel - 1; level >= 0; level--, index /= 64)
- {
- long mask = block.Masks[level][index / 64];
+ for (int level = block.MaxLevel - 1; level >= 0; level--, index /= 64)
+ {
+ long mask = block.Masks[level][index / 64];
- block.Masks[level][index / 64] = mask | (1L << (index & 63));
+ block.Masks[level][index / 64] = mask | (1L << (index & 63));
- if (mask != 0)
- {
- break;
+ if (mask != 0)
+ {
+ break;
+ }
}
- }
- int blockSizeDelta = 1 << (block.NextOrder - block.Order);
+ int blockSizeDelta = 1 << (block.NextOrder - block.Order);
- int freedBlocksTruncated = BitUtils.AlignDown((int)freedBlocks, blockSizeDelta);
+ int freedBlocksTruncated = BitUtils.AlignDown((int)freedBlocks, blockSizeDelta);
- if (!block.TryCoalesce(freedBlocksTruncated, blockSizeDelta))
- {
- break;
- }
+ if (!block.TryCoalesce(freedBlocksTruncated, blockSizeDelta))
+ {
+ break;
+ }
- currAddress = block.StartAligned + ((ulong)freedBlocksTruncated << block.Order);
+ currAddress = block.StartAligned + ((ulong)freedBlocksTruncated << block.Order);
+ }
}
- }
- // Free inside aligned region.
- ulong baseAddress = addressRounded;
+ // Free inside aligned region.
+ ulong baseAddress = addressRounded;
- while (baseAddress < endAddrTruncated)
- {
- ulong blockSize = 1UL << _blocks[blockIndex].Order;
-
- FreeRegion(baseAddress);
+ while (baseAddress < endAddrTruncated)
+ {
+ ulong blockSize = 1UL << _blocks[blockIndex].Order;
- baseAddress += blockSize;
- }
+ FreeRegion(baseAddress);
- int nextBlockIndex = blockIndex - 1;
+ baseAddress += blockSize;
+ }
- // Free region between Address and aligned region start.
- baseAddress = addressRounded;
+ int nextBlockIndex = blockIndex - 1;
- for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--)
- {
- ulong blockSize = 1UL << _blocks[blockIndex].Order;
+ // Free region between Address and aligned region start.
+ baseAddress = addressRounded;
- while (baseAddress - blockSize >= address)
+ for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--)
{
- baseAddress -= blockSize;
+ ulong blockSize = 1UL << _blocks[blockIndex].Order;
- FreeRegion(baseAddress);
- }
- }
+ while (baseAddress - blockSize >= address)
+ {
+ baseAddress -= blockSize;
- // Free region between aligned region end and End Address.
- baseAddress = endAddrTruncated;
+ FreeRegion(baseAddress);
+ }
+ }
- for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--)
- {
- ulong blockSize = 1UL << _blocks[blockIndex].Order;
+ // Free region between aligned region end and End Address.
+ baseAddress = endAddrTruncated;
- while (baseAddress + blockSize <= endAddr)
+ for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--)
{
- FreeRegion(baseAddress);
+ ulong blockSize = 1UL << _blocks[blockIndex].Order;
- baseAddress += blockSize;
+ while (baseAddress + blockSize <= endAddr)
+ {
+ FreeRegion(baseAddress);
+
+ baseAddress += blockSize;
+ }
}
}
}
@@ -477,12 +484,76 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
KMemoryRegionBlock block = _blocks[blockIndex];
- ulong blockPagesCount = (1UL << block.Order) / KMemoryManager.PageSize;
+ ulong blockPagesCount = (1UL << block.Order) / KPageTableBase.PageSize;
availablePages += blockPagesCount * block.FreeCount;
}
return availablePages;
}
+
+ public void IncrementPagesReferenceCount(ulong address, ulong pagesCount)
+ {
+ ulong index = GetPageOffset(address);
+ ulong endIndex = index + pagesCount;
+
+ while (index < endIndex)
+ {
+ ushort referenceCount = ++_pageReferenceCounts[index];
+ Debug.Assert(referenceCount >= 1);
+
+ index++;
+ }
+ }
+
+ public void DecrementPagesReferenceCount(ulong address, ulong pagesCount)
+ {
+ ulong index = GetPageOffset(address);
+ ulong endIndex = index + pagesCount;
+
+ ulong freeBaseIndex = 0;
+ ulong freePagesCount = 0;
+
+ while (index < endIndex)
+ {
+ Debug.Assert(_pageReferenceCounts[index] > 0);
+ ushort referenceCount = --_pageReferenceCounts[index];
+
+ if (referenceCount == 0)
+ {
+ if (freePagesCount != 0)
+ {
+ freePagesCount++;
+ }
+ else
+ {
+ freeBaseIndex = index;
+ freePagesCount = 1;
+ }
+ }
+ else if (freePagesCount != 0)
+ {
+ FreePages(Address + freeBaseIndex * KPageTableBase.PageSize, freePagesCount);
+ freePagesCount = 0;
+ }
+
+ index++;
+ }
+
+ if (freePagesCount != 0)
+ {
+ FreePages(Address + freeBaseIndex * KPageTableBase.PageSize, freePagesCount);
+ }
+ }
+
+ public ulong GetPageOffset(ulong address)
+ {
+ return (address - Address) / KPageTableBase.PageSize;
+ }
+
+ public ulong GetPageOffsetFromEnd(ulong address)
+ {
+ return (EndAddr - address) / KPageTableBase.PageSize;
+ }
}
} \ No newline at end of file
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs
index f0935dcc..7f2f1ba6 100644
--- a/Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs
@@ -6,7 +6,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KPageList : IEnumerable<KPageNode>
{
- public LinkedList<KPageNode> Nodes { get; private set; }
+ public LinkedList<KPageNode> Nodes { get; }
public KPageList()
{
@@ -21,7 +21,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
KPageNode lastNode = Nodes.Last.Value;
- if (lastNode.Address + lastNode.PagesCount * KMemoryManager.PageSize == address)
+ if (lastNode.Address + lastNode.PagesCount * KPageTableBase.PageSize == address)
{
address = lastNode.Address;
pagesCount += lastNode.PagesCount;
@@ -68,6 +68,22 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return thisNode == null && otherNode == null;
}
+ public void IncrementPagesReferenceCount(KMemoryManager manager)
+ {
+ foreach (var node in this)
+ {
+ manager.IncrementPagesReferenceCount(node.Address, node.PagesCount);
+ }
+ }
+
+ public void DecrementPagesReferenceCount(KMemoryManager manager)
+ {
+ foreach (var node in this)
+ {
+ manager.DecrementPagesReferenceCount(node.Address, node.PagesCount);
+ }
+ }
+
public IEnumerator<KPageNode> GetEnumerator()
{
return Nodes.GetEnumerator();
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs
new file mode 100644
index 00000000..20a13f57
--- /dev/null
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs
@@ -0,0 +1,221 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Range;
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Linq;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KPageTable : KPageTableBase
+ {
+ private readonly IVirtualMemoryManager _cpuMemory;
+
+ public override bool SupportsMemoryAliasing => true;
+
+ public KPageTable(KernelContext context, IVirtualMemoryManager cpuMemory) : base(context)
+ {
+ _cpuMemory = cpuMemory;
+ }
+
+ /// <inheritdoc/>
+ protected override IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size)
+ {
+ return _cpuMemory.GetPhysicalRegions(va, size);
+ }
+
+ /// <inheritdoc/>
+ protected override ReadOnlySpan<byte> GetSpan(ulong va, int size)
+ {
+ return _cpuMemory.GetSpan(va, size);
+ }
+
+ /// <inheritdoc/>
+ protected override KernelResult MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission)
+ {
+ var srcRanges = GetPhysicalRegions(src, pagesCount * PageSize);
+
+ KernelResult result = Reprotect(src, pagesCount, KMemoryPermission.None);
+
+ if (result != KernelResult.Success)
+ {
+ return result;
+ }
+
+ result = MapPages(dst, srcRanges, newDstPermission);
+
+ if (result != KernelResult.Success)
+ {
+ KernelResult reprotectResult = Reprotect(src, pagesCount, oldSrcPermission);
+ Debug.Assert(reprotectResult == KernelResult.Success);
+ }
+
+ return result;
+ }
+
+ /// <inheritdoc/>
+ protected override KernelResult UnmapMemory(ulong dst, ulong src, ulong pagesCount, KMemoryPermission oldDstPermission, KMemoryPermission newSrcPermission)
+ {
+ ulong size = pagesCount * PageSize;
+
+ var srcRanges = GetPhysicalRegions(src, size);
+ var dstRanges = GetPhysicalRegions(dst, size);
+
+ if (!dstRanges.SequenceEqual(srcRanges))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ KernelResult result = Unmap(dst, pagesCount);
+
+ if (result != KernelResult.Success)
+ {
+ return result;
+ }
+
+ result = Reprotect(src, pagesCount, newSrcPermission);
+
+ if (result != KernelResult.Success)
+ {
+ KernelResult mapResult = MapPages(dst, dstRanges, oldDstPermission);
+ Debug.Assert(mapResult == KernelResult.Success);
+ }
+
+ return result;
+ }
+
+ /// <inheritdoc/>
+ protected override KernelResult MapPages(ulong dstVa, ulong pagesCount, ulong srcPa, KMemoryPermission permission)
+ {
+ ulong size = pagesCount * PageSize;
+
+ Context.Memory.Commit(srcPa - DramMemoryMap.DramBase, size);
+
+ _cpuMemory.Map(dstVa, Context.Memory.GetPointer(srcPa - DramMemoryMap.DramBase, size), size);
+
+ if (DramMemoryMap.IsHeapPhysicalAddress(srcPa))
+ {
+ Context.MemoryManager.IncrementPagesReferenceCount(srcPa, pagesCount);
+ }
+
+ return KernelResult.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override KernelResult MapPages(ulong address, KPageList pageList, KMemoryPermission permission)
+ {
+ using var scopedPageList = new KScopedPageList(Context.MemoryManager, pageList);
+
+ ulong currentVa = address;
+
+ foreach (var pageNode in pageList)
+ {
+ ulong addr = pageNode.Address - DramMemoryMap.DramBase;
+ ulong size = pageNode.PagesCount * PageSize;
+
+ Context.Memory.Commit(addr, size);
+
+ _cpuMemory.Map(currentVa, Context.Memory.GetPointer(addr, size), size);
+
+ currentVa += size;
+ }
+
+ scopedPageList.SignalSuccess();
+
+ return KernelResult.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override KernelResult MapPages(ulong address, IEnumerable<HostMemoryRange> ranges, KMemoryPermission permission)
+ {
+ ulong currentVa = address;
+
+ foreach (var range in ranges)
+ {
+ ulong size = range.Size;
+
+ ulong pa = GetDramAddressFromHostAddress(range.Address);
+ if (pa != ulong.MaxValue)
+ {
+ pa += DramMemoryMap.DramBase;
+ if (DramMemoryMap.IsHeapPhysicalAddress(pa))
+ {
+ Context.MemoryManager.IncrementPagesReferenceCount(pa, size / PageSize);
+ }
+ }
+
+ _cpuMemory.Map(currentVa, range.Address, size);
+
+ currentVa += size;
+ }
+
+ return KernelResult.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override KernelResult Unmap(ulong address, ulong pagesCount)
+ {
+ KPageList pagesToClose = new KPageList();
+
+ var regions = _cpuMemory.GetPhysicalRegions(address, pagesCount * PageSize);
+
+ foreach (var region in regions)
+ {
+ ulong pa = GetDramAddressFromHostAddress(region.Address);
+ if (pa == ulong.MaxValue)
+ {
+ continue;
+ }
+
+ pa += DramMemoryMap.DramBase;
+ if (DramMemoryMap.IsHeapPhysicalAddress(pa))
+ {
+ pagesToClose.AddRange(pa, region.Size / PageSize);
+ }
+ }
+
+ _cpuMemory.Unmap(address, pagesCount * PageSize);
+
+ pagesToClose.DecrementPagesReferenceCount(Context.MemoryManager);
+
+ return KernelResult.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override KernelResult Reprotect(ulong address, ulong pagesCount, KMemoryPermission permission)
+ {
+ // TODO.
+ return KernelResult.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override KernelResult ReprotectWithAttributes(ulong address, ulong pagesCount, KMemoryPermission permission)
+ {
+ // TODO.
+ return KernelResult.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override void SignalMemoryTracking(ulong va, ulong size, bool write)
+ {
+ _cpuMemory.SignalMemoryTracking(va, size, write);
+ }
+
+ /// <inheritdoc/>
+ protected override void Write(ulong va, ReadOnlySpan<byte> data)
+ {
+ _cpuMemory.Write(va, data);
+ }
+
+ private ulong GetDramAddressFromHostAddress(nuint hostAddress)
+ {
+ if (hostAddress < (nuint)(ulong)Context.Memory.Pointer || hostAddress >= (nuint)((ulong)Context.Memory.Pointer + Context.Memory.Size))
+ {
+ return ulong.MaxValue;
+ }
+
+ return hostAddress - (ulong)Context.Memory.Pointer;
+ }
+ }
+}
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs
new file mode 100644
index 00000000..a2db8dcc
--- /dev/null
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs
@@ -0,0 +1,2797 @@
+using Ryujinx.Common;
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.Memory.Range;
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Linq;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ abstract class KPageTableBase
+ {
+ private static readonly int[] MappingUnitSizes = new int[]
+ {
+ 0x1000,
+ 0x10000,
+ 0x200000,
+ 0x400000,
+ 0x2000000,
+ 0x40000000
+ };
+
+ public const int PageSize = 0x1000;
+
+ private const int KMemoryBlockSize = 0x40;
+
+ // We need 2 blocks for the case where a big block
+ // needs to be split in 2, plus one block that will be the new one inserted.
+ private const int MaxBlocksNeededForInsertion = 2;
+
+ protected readonly KernelContext Context;
+
+ public ulong AddrSpaceStart { get; private set; }
+ public ulong AddrSpaceEnd { get; private set; }
+
+ public ulong CodeRegionStart { get; private set; }
+ public ulong CodeRegionEnd { get; private set; }
+
+ public ulong HeapRegionStart { get; private set; }
+ public ulong HeapRegionEnd { get; private set; }
+
+ private ulong _currentHeapAddr;
+
+ public ulong AliasRegionStart { get; private set; }
+ public ulong AliasRegionEnd { get; private set; }
+
+ public ulong StackRegionStart { get; private set; }
+ public ulong StackRegionEnd { get; private set; }
+
+ public ulong TlsIoRegionStart { get; private set; }
+ public ulong TlsIoRegionEnd { get; private set; }
+
+ private ulong _heapCapacity;
+
+ public ulong PhysicalMemoryUsage { get; private set; }
+
+ private readonly KMemoryBlockManager _blockManager;
+
+ private MemoryRegion _memRegion;
+
+ private bool _aslrDisabled;
+
+ public int AddrSpaceWidth { get; private set; }
+
+ private bool _isKernel;
+
+ private bool _aslrEnabled;
+
+ private KMemoryBlockSlabManager _slabManager;
+
+ private int _contextId;
+
+ private MersenneTwister _randomNumberGenerator;
+
+ public abstract bool SupportsMemoryAliasing { get; }
+
+ public KPageTableBase(KernelContext context)
+ {
+ Context = context;
+
+ _blockManager = new KMemoryBlockManager();
+
+ _isKernel = false;
+ }
+
+ private static readonly int[] AddrSpaceSizes = new int[] { 32, 36, 32, 39 };
+
+ public KernelResult InitializeForProcess(
+ AddressSpaceType addrSpaceType,
+ bool aslrEnabled,
+ bool aslrDisabled,
+ MemoryRegion memRegion,
+ ulong address,
+ ulong size,
+ KMemoryBlockSlabManager slabManager)
+ {
+ if ((uint)addrSpaceType > (uint)AddressSpaceType.Addr39Bits)
+ {
+ throw new ArgumentException(nameof(addrSpaceType));
+ }
+
+ _contextId = Context.ContextIdManager.GetId();
+
+ ulong addrSpaceBase = 0;
+ ulong addrSpaceSize = 1UL << AddrSpaceSizes[(int)addrSpaceType];
+
+ KernelResult result = CreateUserAddressSpace(
+ addrSpaceType,
+ aslrEnabled,
+ aslrDisabled,
+ addrSpaceBase,
+ addrSpaceSize,
+ memRegion,
+ address,
+ size,
+ slabManager);
+
+ if (result != KernelResult.Success)
+ {
+ Context.ContextIdManager.PutId(_contextId);
+ }
+
+ return result;
+ }
+
+ private class Region
+ {
+ public ulong Start;
+ public ulong End;
+ public ulong Size;
+ public ulong AslrOffset;
+ }
+
+ private KernelResult CreateUserAddressSpace(
+ AddressSpaceType addrSpaceType,
+ bool aslrEnabled,
+ bool aslrDisabled,
+ ulong addrSpaceStart,
+ ulong addrSpaceEnd,
+ MemoryRegion memRegion,
+ ulong address,
+ ulong size,
+ KMemoryBlockSlabManager slabManager)
+ {
+ ulong endAddr = address + size;
+
+ Region aliasRegion = new Region();
+ Region heapRegion = new Region();
+ Region stackRegion = new Region();
+ Region tlsIoRegion = new Region();
+
+ ulong codeRegionSize;
+ ulong stackAndTlsIoStart;
+ ulong stackAndTlsIoEnd;
+ ulong baseAddress;
+
+ switch (addrSpaceType)
+ {
+ case AddressSpaceType.Addr32Bits:
+ aliasRegion.Size = 0x40000000;
+ heapRegion.Size = 0x40000000;
+ stackRegion.Size = 0;
+ tlsIoRegion.Size = 0;
+ CodeRegionStart = 0x200000;
+ codeRegionSize = 0x3fe00000;
+ stackAndTlsIoStart = 0x200000;
+ stackAndTlsIoEnd = 0x40000000;
+ baseAddress = 0x200000;
+ AddrSpaceWidth = 32;
+ break;
+
+ case AddressSpaceType.Addr36Bits:
+ aliasRegion.Size = 0x180000000;
+ heapRegion.Size = 0x180000000;
+ stackRegion.Size = 0;
+ tlsIoRegion.Size = 0;
+ CodeRegionStart = 0x8000000;
+ codeRegionSize = 0x78000000;
+ stackAndTlsIoStart = 0x8000000;
+ stackAndTlsIoEnd = 0x80000000;
+ baseAddress = 0x8000000;
+ AddrSpaceWidth = 36;
+ break;
+
+ case AddressSpaceType.Addr32BitsNoMap:
+ aliasRegion.Size = 0;
+ heapRegion.Size = 0x80000000;
+ stackRegion.Size = 0;
+ tlsIoRegion.Size = 0;
+ CodeRegionStart = 0x200000;
+ codeRegionSize = 0x3fe00000;
+ stackAndTlsIoStart = 0x200000;
+ stackAndTlsIoEnd = 0x40000000;
+ baseAddress = 0x200000;
+ AddrSpaceWidth = 32;
+ break;
+
+ case AddressSpaceType.Addr39Bits:
+ aliasRegion.Size = 0x1000000000;
+ heapRegion.Size = 0x180000000;
+ stackRegion.Size = 0x80000000;
+ tlsIoRegion.Size = 0x1000000000;
+ CodeRegionStart = BitUtils.AlignDown(address, 0x200000);
+ codeRegionSize = BitUtils.AlignUp(endAddr, 0x200000) - CodeRegionStart;
+ stackAndTlsIoStart = 0;
+ stackAndTlsIoEnd = 0;
+ baseAddress = 0x8000000;
+ AddrSpaceWidth = 39;
+ break;
+
+ default: throw new ArgumentException(nameof(addrSpaceType));
+ }
+
+ CodeRegionEnd = CodeRegionStart + codeRegionSize;
+
+ ulong mapBaseAddress;
+ ulong mapAvailableSize;
+
+ if (CodeRegionStart - baseAddress >= addrSpaceEnd - CodeRegionEnd)
+ {
+ // Has more space before the start of the code region.
+ mapBaseAddress = baseAddress;
+ mapAvailableSize = CodeRegionStart - baseAddress;
+ }
+ else
+ {
+ // Has more space after the end of the code region.
+ mapBaseAddress = CodeRegionEnd;
+ mapAvailableSize = addrSpaceEnd - CodeRegionEnd;
+ }
+
+ ulong mapTotalSize = aliasRegion.Size + heapRegion.Size + stackRegion.Size + tlsIoRegion.Size;
+
+ ulong aslrMaxOffset = mapAvailableSize - mapTotalSize;
+
+ _aslrEnabled = aslrEnabled;
+
+ AddrSpaceStart = addrSpaceStart;
+ AddrSpaceEnd = addrSpaceEnd;
+
+ _slabManager = slabManager;
+
+ if (mapAvailableSize < mapTotalSize)
+ {
+ return KernelResult.OutOfMemory;
+ }
+
+ if (aslrEnabled)
+ {
+ aliasRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21;
+ heapRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21;
+ stackRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21;
+ tlsIoRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21;
+ }
+
+ // Regions are sorted based on ASLR offset.
+ // When ASLR is disabled, the order is Map, Heap, NewMap and TlsIo.
+ aliasRegion.Start = mapBaseAddress + aliasRegion.AslrOffset;
+ aliasRegion.End = aliasRegion.Start + aliasRegion.Size;
+ heapRegion.Start = mapBaseAddress + heapRegion.AslrOffset;
+ heapRegion.End = heapRegion.Start + heapRegion.Size;
+ stackRegion.Start = mapBaseAddress + stackRegion.AslrOffset;
+ stackRegion.End = stackRegion.Start + stackRegion.Size;
+ tlsIoRegion.Start = mapBaseAddress + tlsIoRegion.AslrOffset;
+ tlsIoRegion.End = tlsIoRegion.Start + tlsIoRegion.Size;
+
+ SortRegion(heapRegion, aliasRegion);
+
+ if (stackRegion.Size != 0)
+ {
+ SortRegion(stackRegion, aliasRegion);
+ SortRegion(stackRegion, heapRegion);
+ }
+ else
+ {
+ stackRegion.Start = stackAndTlsIoStart;
+ stackRegion.End = stackAndTlsIoEnd;
+ }
+
+ if (tlsIoRegion.Size != 0)
+ {
+ SortRegion(tlsIoRegion, aliasRegion);
+ SortRegion(tlsIoRegion, heapRegion);
+ SortRegion(tlsIoRegion, stackRegion);
+ }
+ else
+ {
+ tlsIoRegion.Start = stackAndTlsIoStart;
+ tlsIoRegion.End = stackAndTlsIoEnd;
+ }
+
+ AliasRegionStart = aliasRegion.Start;
+ AliasRegionEnd = aliasRegion.End;
+ HeapRegionStart = heapRegion.Start;
+ HeapRegionEnd = heapRegion.End;
+ StackRegionStart = stackRegion.Start;
+ StackRegionEnd = stackRegion.End;
+ TlsIoRegionStart = tlsIoRegion.Start;
+ TlsIoRegionEnd = tlsIoRegion.End;
+
+ _currentHeapAddr = HeapRegionStart;
+ _heapCapacity = 0;
+ PhysicalMemoryUsage = 0;
+
+ _memRegion = memRegion;
+ _aslrDisabled = aslrDisabled;
+
+ return _blockManager.Initialize(addrSpaceStart, addrSpaceEnd, slabManager);
+ }
+
+ private ulong GetRandomValue(ulong min, ulong max)
+ {
+ return (ulong)GetRandomValue((long)min, (long)max);
+ }
+
+ private long GetRandomValue(long min, long max)
+ {
+ if (_randomNumberGenerator == null)
+ {
+ _randomNumberGenerator = new MersenneTwister(0);
+ }
+
+ return _randomNumberGenerator.GenRandomNumber(min, max);
+ }
+
+ private static void SortRegion(Region lhs, Region rhs)
+ {
+ if (lhs.AslrOffset < rhs.AslrOffset)
+ {
+ rhs.Start += lhs.Size;
+ rhs.End += lhs.Size;
+ }
+ else
+ {
+ lhs.Start += rhs.Size;
+ lhs.End += rhs.Size;
+ }
+ }
+
+ public KernelResult MapPages(ulong address, KPageList pageList, MemoryState state, KMemoryPermission permission)
+ {
+ ulong pagesCount = pageList.GetPagesCount();
+
+ ulong size = pagesCount * PageSize;
+
+ if (!CanContain(address, size, state))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (!IsUnmapped(address, pagesCount * PageSize))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ KernelResult result = MapPages(address, pageList, permission);
+
+ if (result == KernelResult.Success)
+ {
+ _blockManager.InsertBlock(address, pagesCount, state, permission);
+ }
+
+ return result;
+ }
+ }
+
+ public KernelResult UnmapPages(ulong address, ulong pagesCount, IEnumerable<HostMemoryRange> ranges, MemoryState stateExpected)
+ {
+ ulong size = pagesCount * PageSize;
+
+ ulong endAddr = address + size;
+
+ ulong addrSpacePagesCount = (AddrSpaceEnd - AddrSpaceStart) / PageSize;
+
+ if (AddrSpaceStart > address)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (addrSpacePagesCount < pagesCount)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (endAddr - 1 > AddrSpaceEnd - 1)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ var currentRanges = GetPhysicalRegions(address, size);
+
+ if (!currentRanges.SequenceEqual(ranges))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ if (CheckRange(
+ address,
+ size,
+ MemoryState.Mask,
+ stateExpected,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState state,
+ out _,
+ out _))
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ KernelResult result = Unmap(address, pagesCount);
+
+ if (result == KernelResult.Success)
+ {
+ _blockManager.InsertBlock(address, pagesCount, MemoryState.Unmapped);
+ }
+
+ return result;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public KernelResult MapNormalMemory(long address, long size, KMemoryPermission permission)
+ {
+ // TODO.
+ return KernelResult.Success;
+ }
+
+ public KernelResult MapIoMemory(long address, long size, KMemoryPermission permission)
+ {
+ // TODO.
+ return KernelResult.Success;
+ }
+
+ public KernelResult MapPages(
+ ulong pagesCount,
+ int alignment,
+ ulong srcPa,
+ bool paIsValid,
+ ulong regionStart,
+ ulong regionPagesCount,
+ MemoryState state,
+ KMemoryPermission permission,
+ out ulong address)
+ {
+ address = 0;
+
+ ulong regionSize = regionPagesCount * PageSize;
+
+ if (!CanContain(regionStart, regionSize, state))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (regionPagesCount <= pagesCount)
+ {
+ return KernelResult.OutOfMemory;
+ }
+
+ lock (_blockManager)
+ {
+ address = AllocateVa(regionStart, regionPagesCount, pagesCount, alignment);
+
+ if (address == 0)
+ {
+ return KernelResult.OutOfMemory;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ KernelResult result;
+
+ if (paIsValid)
+ {
+ result = MapPages(address, pagesCount, srcPa, permission);
+ }
+ else
+ {
+ result = AllocateAndMapPages(address, pagesCount, permission);
+ }
+
+ if (result != KernelResult.Success)
+ {
+ return result;
+ }
+
+ _blockManager.InsertBlock(address, pagesCount, state, permission);
+ }
+
+ return KernelResult.Success;
+ }
+
+ public KernelResult MapPages(ulong address, ulong pagesCount, MemoryState state, KMemoryPermission permission)
+ {
+ ulong size = pagesCount * PageSize;
+
+ if (!CanContain(address, size, state))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (!IsUnmapped(address, size))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ KernelResult result = AllocateAndMapPages(address, pagesCount, permission);
+
+ if (result == KernelResult.Success)
+ {
+ _blockManager.InsertBlock(address, pagesCount, state, permission);
+ }
+
+ return result;
+ }
+ }
+
+ private KernelResult AllocateAndMapPages(ulong address, ulong pagesCount, KMemoryPermission permission)
+ {
+ KMemoryRegionManager region = GetMemoryRegionManager();
+
+ KernelResult result = region.AllocatePages(pagesCount, _aslrDisabled, out KPageList pageList);
+
+ if (result != KernelResult.Success)
+ {
+ return result;
+ }
+
+ using var _ = new OnScopeExit(() => pageList.DecrementPagesReferenceCount(Context.MemoryManager));
+
+ return MapPages(address, pageList, permission);
+ }
+
+ public KernelResult MapProcessCodeMemory(ulong dst, ulong src, ulong size)
+ {
+ lock (_blockManager)
+ {
+ bool success = CheckRange(
+ src,
+ size,
+ MemoryState.Mask,
+ MemoryState.Heap,
+ KMemoryPermission.Mask,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState state,
+ out KMemoryPermission permission,
+ out _);
+
+ success &= IsUnmapped(dst, size);
+
+ if (success)
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion * 2))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ KernelResult result = MapMemory(src, dst, pagesCount, permission, KMemoryPermission.None);
+
+ _blockManager.InsertBlock(src, pagesCount, state, KMemoryPermission.None, MemoryAttribute.Borrowed);
+ _blockManager.InsertBlock(dst, pagesCount, MemoryState.ModCodeStatic);
+
+ return KernelResult.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public KernelResult UnmapProcessCodeMemory(ulong dst, ulong src, ulong size)
+ {
+ lock (_blockManager)
+ {
+ bool success = CheckRange(
+ src,
+ size,
+ MemoryState.Mask,
+ MemoryState.Heap,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.Borrowed,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _);
+
+ success &= CheckRange(
+ dst,
+ PageSize,
+ MemoryState.UnmapProcessCodeMemoryAllowed,
+ MemoryState.UnmapProcessCodeMemoryAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState state,
+ out _,
+ out _);
+
+ success &= CheckRange(
+ dst,
+ size,
+ MemoryState.Mask,
+ state,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None);
+
+ if (success)
+ {
+ ulong pagesCount = size / PageSize;
+
+ KernelResult result = Unmap(dst, pagesCount);
+
+ if (result != KernelResult.Success)
+ {
+ return result;
+ }
+
+ // TODO: Missing some checks here.
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion * 2))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ _blockManager.InsertBlock(dst, pagesCount, MemoryState.Unmapped);
+ _blockManager.InsertBlock(src, pagesCount, MemoryState.Heap, KMemoryPermission.ReadAndWrite);
+
+ return KernelResult.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public KernelResult SetHeapSize(ulong size, out ulong address)
+ {
+ address = 0;
+
+ if (size > HeapRegionEnd - HeapRegionStart)
+ {
+ return KernelResult.OutOfMemory;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ lock (_blockManager)
+ {
+ ulong currentHeapSize = GetHeapSize();
+
+ if (currentHeapSize <= size)
+ {
+ // Expand.
+ ulong sizeDelta = size - currentHeapSize;
+
+ if (currentProcess.ResourceLimit != null && sizeDelta != 0 &&
+ !currentProcess.ResourceLimit.Reserve(LimitableResource.Memory, sizeDelta))
+ {
+ return KernelResult.ResLimitExceeded;
+ }
+
+ ulong pagesCount = sizeDelta / PageSize;
+
+ KMemoryRegionManager region = GetMemoryRegionManager();
+
+ KernelResult result = region.AllocatePages(pagesCount, _aslrDisabled, out KPageList pageList);
+
+ using var _ = new OnScopeExit(() => pageList.DecrementPagesReferenceCount(Context.MemoryManager));
+
+ void CleanUpForError()
+ {
+ if (currentProcess.ResourceLimit != null && sizeDelta != 0)
+ {
+ currentProcess.ResourceLimit.Release(LimitableResource.Memory, sizeDelta);
+ }
+ }
+
+ if (result != KernelResult.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ CleanUpForError();
+
+ return KernelResult.OutOfResource;
+ }
+
+ if (!IsUnmapped(_currentHeapAddr, sizeDelta))
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidMemState;
+ }
+
+ result = MapPages(_currentHeapAddr, pageList, KMemoryPermission.ReadAndWrite);
+
+ if (result != KernelResult.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ _blockManager.InsertBlock(_currentHeapAddr, pagesCount, MemoryState.Heap, KMemoryPermission.ReadAndWrite);
+ }
+ else
+ {
+ // Shrink.
+ ulong freeAddr = HeapRegionStart + size;
+ ulong sizeDelta = currentHeapSize - size;
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ if (!CheckRange(
+ freeAddr,
+ sizeDelta,
+ MemoryState.Mask,
+ MemoryState.Heap,
+ KMemoryPermission.Mask,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong pagesCount = sizeDelta / PageSize;
+
+ KernelResult result = Unmap(freeAddr, pagesCount);
+
+ if (result != KernelResult.Success)
+ {
+ return result;
+ }
+
+ currentProcess.ResourceLimit?.Release(LimitableResource.Memory, sizeDelta);
+
+ _blockManager.InsertBlock(freeAddr, pagesCount, MemoryState.Unmapped);
+ }
+
+ _currentHeapAddr = HeapRegionStart + size;
+ }
+
+ address = HeapRegionStart;
+
+ return KernelResult.Success;
+ }
+
+ public ulong GetTotalHeapSize()
+ {
+ lock (_blockManager)
+ {
+ return GetHeapSize() + PhysicalMemoryUsage;
+ }
+ }
+
+ private ulong GetHeapSize()
+ {
+ return _currentHeapAddr - HeapRegionStart;
+ }
+
+ public KernelResult SetHeapCapacity(ulong capacity)
+ {
+ lock (_blockManager)
+ {
+ _heapCapacity = capacity;
+ }
+
+ return KernelResult.Success;
+ }
+
+ public KernelResult SetMemoryAttribute(
+ ulong address,
+ ulong size,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeValue)
+ {
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ MemoryState.AttributeChangeAllowed,
+ MemoryState.AttributeChangeAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.BorrowedAndIpcMapped,
+ MemoryAttribute.None,
+ MemoryAttribute.DeviceMappedAndUncached,
+ out MemoryState state,
+ out KMemoryPermission permission,
+ out MemoryAttribute attribute))
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ attribute &= ~attributeMask;
+ attribute |= attributeMask & attributeValue;
+
+ _blockManager.InsertBlock(address, pagesCount, state, permission, attribute);
+
+ return KernelResult.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public KMemoryInfo QueryMemory(ulong address)
+ {
+ if (address >= AddrSpaceStart &&
+ address < AddrSpaceEnd)
+ {
+ lock (_blockManager)
+ {
+ return _blockManager.FindBlock(address).GetInfo();
+ }
+ }
+ else
+ {
+ return new KMemoryInfo(
+ AddrSpaceEnd,
+ ~AddrSpaceEnd + 1,
+ MemoryState.Reserved,
+ KMemoryPermission.None,
+ MemoryAttribute.None,
+ KMemoryPermission.None,
+ 0,
+ 0);
+ }
+ }
+
+ public KernelResult Map(ulong dst, ulong src, ulong size)
+ {
+ bool success;
+
+ lock (_blockManager)
+ {
+ success = CheckRange(
+ src,
+ size,
+ MemoryState.MapAllowed,
+ MemoryState.MapAllowed,
+ KMemoryPermission.Mask,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState srcState,
+ out _,
+ out _);
+
+ success &= IsUnmapped(dst, size);
+
+ if (success)
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion * 2))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ KernelResult result = MapMemory(src, dst, pagesCount, KMemoryPermission.ReadAndWrite, KMemoryPermission.ReadAndWrite);
+
+ if (result != KernelResult.Success)
+ {
+ return result;
+ }
+
+ _blockManager.InsertBlock(src, pagesCount, srcState, KMemoryPermission.None, MemoryAttribute.Borrowed);
+ _blockManager.InsertBlock(dst, pagesCount, MemoryState.Stack, KMemoryPermission.ReadAndWrite);
+
+ return KernelResult.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public KernelResult UnmapForKernel(ulong address, ulong pagesCount, MemoryState stateExpected)
+ {
+ ulong size = pagesCount * PageSize;
+
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ MemoryState.Mask,
+ stateExpected,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _))
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ KernelResult result = Unmap(address, pagesCount);
+
+ if (result == KernelResult.Success)
+ {
+ _blockManager.InsertBlock(address, pagesCount, MemoryState.Unmapped);
+ }
+
+ return KernelResult.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public KernelResult Unmap(ulong dst, ulong src, ulong size)
+ {
+ bool success;
+
+ lock (_blockManager)
+ {
+ success = CheckRange(
+ src,
+ size,
+ MemoryState.MapAllowed,
+ MemoryState.MapAllowed,
+ KMemoryPermission.Mask,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.Borrowed,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState srcState,
+ out _,
+ out _);
+
+ success &= CheckRange(
+ dst,
+ size,
+ MemoryState.Mask,
+ MemoryState.Stack,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out KMemoryPermission dstPermission,
+ out _);
+
+ if (success)
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion * 2))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ KernelResult result = UnmapMemory(dst, src, pagesCount, dstPermission, KMemoryPermission.ReadAndWrite);
+
+ if (result != KernelResult.Success)
+ {
+ return result;
+ }
+
+ _blockManager.InsertBlock(src, pagesCount, srcState, KMemoryPermission.ReadAndWrite);
+ _blockManager.InsertBlock(dst, pagesCount, MemoryState.Unmapped);
+
+ return KernelResult.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public KernelResult SetProcessMemoryPermission(ulong address, ulong size, KMemoryPermission permission)
+ {
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ MemoryState.ProcessPermissionChangeAllowed,
+ MemoryState.ProcessPermissionChangeAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState oldState,
+ out KMemoryPermission oldPermission,
+ out _))
+ {
+ MemoryState newState = oldState;
+
+ // If writing into the code region is allowed, then we need
+ // to change it to mutable.
+ if ((permission & KMemoryPermission.Write) != 0)
+ {
+ if (oldState == MemoryState.CodeStatic)
+ {
+ newState = MemoryState.CodeMutable;
+ }
+ else if (oldState == MemoryState.ModCodeStatic)
+ {
+ newState = MemoryState.ModCodeMutable;
+ }
+ else
+ {
+ throw new InvalidOperationException($"Memory state \"{oldState}\" not valid for this operation.");
+ }
+ }
+
+ if (newState != oldState || permission != oldPermission)
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ KernelResult result;
+
+ if ((oldPermission & KMemoryPermission.Execute) != 0)
+ {
+ result = ReprotectWithAttributes(address, pagesCount, permission);
+ }
+ else
+ {
+ result = Reprotect(address, pagesCount, permission);
+ }
+
+ if (result != KernelResult.Success)
+ {
+ return result;
+ }
+
+ _blockManager.InsertBlock(address, pagesCount, newState, permission);
+ }
+
+ return KernelResult.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public KernelResult MapPhysicalMemory(ulong address, ulong size)
+ {
+ ulong endAddr = address + size;
+
+ lock (_blockManager)
+ {
+ ulong mappedSize = 0;
+
+ foreach (KMemoryInfo info in IterateOverRange(address, endAddr))
+ {
+ if (info.State != MemoryState.Unmapped)
+ {
+ mappedSize += GetSizeInRange(info, address, endAddr);
+ }
+ }
+
+ if (mappedSize == size)
+ {
+ return KernelResult.Success;
+ }
+
+ ulong remainingSize = size - mappedSize;
+
+ ulong remainingPages = remainingSize / PageSize;
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (currentProcess.ResourceLimit != null &&
+ !currentProcess.ResourceLimit.Reserve(LimitableResource.Memory, remainingSize))
+ {
+ return KernelResult.ResLimitExceeded;
+ }
+
+ KMemoryRegionManager region = GetMemoryRegionManager();
+
+ KernelResult result = region.AllocatePages(remainingPages, _aslrDisabled, out KPageList pageList);
+
+ using var _ = new OnScopeExit(() => pageList.DecrementPagesReferenceCount(Context.MemoryManager));
+
+ void CleanUpForError()
+ {
+ currentProcess.ResourceLimit?.Release(LimitableResource.Memory, remainingSize);
+ }
+
+ if (result != KernelResult.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ CleanUpForError();
+
+ return KernelResult.OutOfResource;
+ }
+
+ LinkedListNode<KPageNode> pageListNode = pageList.Nodes.First;
+
+ KPageNode pageNode = pageListNode.Value;
+
+ ulong srcPa = pageNode.Address;
+ ulong srcPaPages = pageNode.PagesCount;
+
+ foreach (KMemoryInfo info in IterateOverRange(address, endAddr))
+ {
+ if (info.State != MemoryState.Unmapped)
+ {
+ continue;
+ }
+
+ ulong blockSize = GetSizeInRange(info, address, endAddr);
+
+ ulong dstVaPages = blockSize / PageSize;
+
+ ulong dstVa = GetAddrInRange(info, address);
+
+ while (dstVaPages > 0)
+ {
+ if (srcPaPages == 0)
+ {
+ pageListNode = pageListNode.Next;
+
+ pageNode = pageListNode.Value;
+
+ srcPa = pageNode.Address;
+ srcPaPages = pageNode.PagesCount;
+ }
+
+ ulong currentPagesCount = Math.Min(srcPaPages, dstVaPages);
+
+ MapPages(dstVa, currentPagesCount, srcPa, KMemoryPermission.ReadAndWrite);
+
+ dstVa += currentPagesCount * PageSize;
+ srcPa += currentPagesCount * PageSize;
+ srcPaPages -= currentPagesCount;
+ dstVaPages -= currentPagesCount;
+ }
+ }
+
+ PhysicalMemoryUsage += remainingSize;
+
+ ulong pagesCount = size / PageSize;
+
+ _blockManager.InsertBlock(
+ address,
+ pagesCount,
+ MemoryState.Unmapped,
+ KMemoryPermission.None,
+ MemoryAttribute.None,
+ MemoryState.Heap,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.None);
+ }
+
+ return KernelResult.Success;
+ }
+
+ public KernelResult UnmapPhysicalMemory(ulong address, ulong size)
+ {
+ ulong endAddr = address + size;
+
+ lock (_blockManager)
+ {
+ // Scan, ensure that the region can be unmapped (all blocks are heap or
+ // already unmapped), fill pages list for freeing memory.
+ ulong heapMappedSize = 0;
+
+ foreach (KMemoryInfo info in IterateOverRange(address, endAddr))
+ {
+ if (info.State == MemoryState.Heap)
+ {
+ if (info.Attribute != MemoryAttribute.None)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong blockSize = GetSizeInRange(info, address, endAddr);
+
+ heapMappedSize += blockSize;
+ }
+ else if (info.State != MemoryState.Unmapped)
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+
+ if (heapMappedSize == 0)
+ {
+ return KernelResult.Success;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ // Try to unmap all the heap mapped memory inside range.
+ KernelResult result = KernelResult.Success;
+
+ foreach (KMemoryInfo info in IterateOverRange(address, endAddr))
+ {
+ if (info.State == MemoryState.Heap)
+ {
+ ulong blockSize = GetSizeInRange(info, address, endAddr);
+ ulong blockAddress = GetAddrInRange(info, address);
+
+ ulong blockPagesCount = blockSize / PageSize;
+
+ result = Unmap(blockAddress, blockPagesCount);
+
+ // The kernel would attempt to remap if this fails, but we don't because:
+ // - The implementation may not support remapping if memory aliasing is not supported on the platform.
+ // - Unmap can't ever fail here anyway.
+ Debug.Assert(result == KernelResult.Success);
+ }
+ }
+
+ if (result == KernelResult.Success)
+ {
+ PhysicalMemoryUsage -= heapMappedSize;
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ currentProcess.ResourceLimit?.Release(LimitableResource.Memory, heapMappedSize);
+
+ ulong pagesCount = size / PageSize;
+
+ _blockManager.InsertBlock(address, pagesCount, MemoryState.Unmapped);
+ }
+
+ return result;
+ }
+ }
+
+ public KernelResult CopyDataToCurrentProcess(
+ ulong dst,
+ ulong size,
+ ulong src,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permission,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected)
+ {
+ // Client -> server.
+ return CopyDataFromOrToCurrentProcess(
+ size,
+ src,
+ dst,
+ stateMask,
+ stateExpected,
+ permission,
+ attributeMask,
+ attributeExpected,
+ toServer: true);
+ }
+
+ public KernelResult CopyDataFromCurrentProcess(
+ ulong dst,
+ ulong size,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permission,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected,
+ ulong src)
+ {
+ // Server -> client.
+ return CopyDataFromOrToCurrentProcess(
+ size,
+ dst,
+ src,
+ stateMask,
+ stateExpected,
+ permission,
+ attributeMask,
+ attributeExpected,
+ toServer: false);
+ }
+
+ private KernelResult CopyDataFromOrToCurrentProcess(
+ ulong size,
+ ulong clientAddress,
+ ulong serverAddress,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permission,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected,
+ bool toServer)
+ {
+ if (AddrSpaceStart > clientAddress)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong srcEndAddr = clientAddress + size;
+
+ if (srcEndAddr <= clientAddress || srcEndAddr - 1 > AddrSpaceEnd - 1)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ clientAddress,
+ size,
+ stateMask,
+ stateExpected,
+ permission,
+ permission,
+ attributeMask | MemoryAttribute.Uncached,
+ attributeExpected))
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ while (size > 0)
+ {
+ ulong copySize = 0x100000; // Copy chunck size. Any value will do, moderate sizes are recommended.
+
+ if (copySize > size)
+ {
+ copySize = size;
+ }
+
+ if (toServer)
+ {
+ currentProcess.CpuMemory.Write(serverAddress, GetSpan(clientAddress, (int)copySize));
+ }
+ else
+ {
+ Write(clientAddress, currentProcess.CpuMemory.GetSpan(serverAddress, (int)copySize));
+ }
+
+ serverAddress += copySize;
+ clientAddress += copySize;
+ size -= copySize;
+ }
+
+ return KernelResult.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public KernelResult MapBufferFromClientProcess(
+ ulong size,
+ ulong src,
+ KPageTableBase srcPageTable,
+ KMemoryPermission permission,
+ MemoryState state,
+ bool send,
+ out ulong dst)
+ {
+ dst = 0;
+
+ lock (srcPageTable._blockManager)
+ {
+ lock (_blockManager)
+ {
+ KernelResult result = srcPageTable.ReprotectClientProcess(
+ src,
+ size,
+ permission,
+ state,
+ out int blocksNeeded);
+
+ if (result != KernelResult.Success)
+ {
+ return result;
+ }
+
+ if (!srcPageTable._slabManager.CanAllocate(blocksNeeded))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong srcMapAddress = BitUtils.AlignUp(src, PageSize);
+ ulong srcMapEndAddr = BitUtils.AlignDown(src + size, PageSize);
+ ulong srcMapSize = srcMapEndAddr - srcMapAddress;
+
+ result = MapPagesFromClientProcess(size, src, permission, state, srcPageTable, send, out ulong va);
+
+ if (result != KernelResult.Success)
+ {
+ if (srcMapEndAddr > srcMapAddress)
+ {
+ srcPageTable.UnmapIpcRestorePermission(src, size, state);
+ }
+
+ return result;
+ }
+
+ if (srcMapAddress < srcMapEndAddr)
+ {
+ KMemoryPermission permissionMask = permission == KMemoryPermission.ReadAndWrite
+ ? KMemoryPermission.None
+ : KMemoryPermission.Read;
+
+ srcPageTable._blockManager.InsertBlock(srcMapAddress, srcMapSize / PageSize, SetIpcMappingPermissions, permissionMask);
+ }
+
+ dst = va;
+ }
+ }
+
+ return KernelResult.Success;
+ }
+
+ private KernelResult ReprotectClientProcess(
+ ulong address,
+ ulong size,
+ KMemoryPermission permission,
+ MemoryState state,
+ out int blocksNeeded)
+ {
+ blocksNeeded = 0;
+
+ if (AddrSpaceStart > address)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong endAddr = address + size;
+
+ if (endAddr <= address || endAddr - 1 > AddrSpaceEnd - 1)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ MemoryState stateMask;
+
+ switch (state)
+ {
+ case MemoryState.IpcBuffer0: stateMask = MemoryState.IpcSendAllowedType0; break;
+ case MemoryState.IpcBuffer1: stateMask = MemoryState.IpcSendAllowedType1; break;
+ case MemoryState.IpcBuffer3: stateMask = MemoryState.IpcSendAllowedType3; break;
+
+ default: return KernelResult.InvalidCombination;
+ }
+
+ KMemoryPermission permissionMask = permission == KMemoryPermission.ReadAndWrite
+ ? KMemoryPermission.None
+ : KMemoryPermission.Read;
+
+ MemoryAttribute attributeMask = MemoryAttribute.Borrowed | MemoryAttribute.Uncached;
+
+ if (state == MemoryState.IpcBuffer0)
+ {
+ attributeMask |= MemoryAttribute.DeviceMapped;
+ }
+
+ ulong addressRounded = BitUtils.AlignUp(address, PageSize);
+ ulong addressTruncated = BitUtils.AlignDown(address, PageSize);
+ ulong endAddrRounded = BitUtils.AlignUp(endAddr, PageSize);
+ ulong endAddrTruncated = BitUtils.AlignDown(endAddr, PageSize);
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong visitedSize = 0;
+
+ void CleanUpForError()
+ {
+ if (visitedSize == 0)
+ {
+ return;
+ }
+
+ ulong endAddrVisited = address + visitedSize;
+
+ foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrVisited))
+ {
+ if ((info.Permission & KMemoryPermission.ReadAndWrite) != permissionMask && info.IpcRefCount == 0)
+ {
+ ulong blockAddress = GetAddrInRange(info, addressRounded);
+ ulong blockSize = GetSizeInRange(info, addressRounded, endAddrVisited);
+
+ ulong blockPagesCount = blockSize / PageSize;
+
+ KernelResult reprotectResult = Reprotect(blockAddress, blockPagesCount, info.Permission);
+ Debug.Assert(reprotectResult == KernelResult.Success);
+ }
+ }
+ }
+
+ // Signal a read for any resources tracking reads in the region, as the other process is likely to use their data.
+ SignalMemoryTracking(addressTruncated, endAddrRounded - addressTruncated, false);
+
+ // Reprotect the aligned pages range on the client to make them inaccessible from the client process.
+ KernelResult result;
+
+ if (addressRounded < endAddrTruncated)
+ {
+ foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrTruncated))
+ {
+ // Check if the block state matches what we expect.
+ if ((info.State & stateMask) != stateMask ||
+ (info.Permission & permission) != permission ||
+ (info.Attribute & attributeMask) != MemoryAttribute.None)
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong blockAddress = GetAddrInRange(info, addressRounded);
+ ulong blockSize = GetSizeInRange(info, addressRounded, endAddrTruncated);
+
+ ulong blockPagesCount = blockSize / PageSize;
+
+ // If the first block starts before the aligned range, it will need to be split.
+ if (info.Address < addressRounded)
+ {
+ blocksNeeded++;
+ }
+
+ // If the last block ends after the aligned range, it will need to be split.
+ if (endAddrTruncated - 1 < info.Address + info.Size - 1)
+ {
+ blocksNeeded++;
+ }
+
+ if ((info.Permission & KMemoryPermission.ReadAndWrite) != permissionMask && info.IpcRefCount == 0)
+ {
+ result = Reprotect(blockAddress, blockPagesCount, permissionMask);
+
+ if (result != KernelResult.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+ }
+
+ visitedSize += blockSize;
+ }
+ }
+
+ return KernelResult.Success;
+ }
+
+ private KernelResult MapPagesFromClientProcess(
+ ulong size,
+ ulong address,
+ KMemoryPermission permission,
+ MemoryState state,
+ KPageTableBase srcPageTable,
+ bool send,
+ out ulong dst)
+ {
+ if (!SupportsMemoryAliasing)
+ {
+ throw new NotSupportedException("Memory aliasing not supported, can't map IPC buffers.");
+ }
+
+ dst = 0;
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong endAddr = address + size;
+
+ ulong addressTruncated = BitUtils.AlignDown(address, PageSize);
+ ulong addressRounded = BitUtils.AlignUp(address, PageSize);
+ ulong endAddrTruncated = BitUtils.AlignDown(endAddr, PageSize);
+ ulong endAddrRounded = BitUtils.AlignUp(endAddr, PageSize);
+
+ ulong neededSize = endAddrRounded - addressTruncated;
+
+ ulong neededPagesCount = neededSize / PageSize;
+
+ ulong regionPagesCount = (AliasRegionEnd - AliasRegionStart) / PageSize;
+
+ ulong va = 0;
+
+ for (int unit = MappingUnitSizes.Length - 1; unit >= 0 && va == 0; unit--)
+ {
+ int alignment = MappingUnitSizes[unit];
+
+ va = AllocateVa(AliasRegionStart, regionPagesCount, neededPagesCount, alignment);
+ }
+
+ if (va == 0)
+ {
+ return KernelResult.OutOfVaSpace;
+ }
+
+ ulong dstFirstPagePa = 0;
+ ulong dstLastPagePa = 0;
+ ulong currentVa = va;
+
+ using var _ = new OnScopeExit(() =>
+ {
+ if (dstFirstPagePa != 0)
+ {
+ Context.MemoryManager.DecrementPagesReferenceCount(dstFirstPagePa, 1);
+ }
+
+ if (dstLastPagePa != 0)
+ {
+ Context.MemoryManager.DecrementPagesReferenceCount(dstLastPagePa, 1);
+ }
+ });
+
+ void CleanUpForError()
+ {
+ if (currentVa != va)
+ {
+ Unmap(va, (currentVa - va) / PageSize);
+ }
+ }
+
+ // Is the first page address aligned?
+ // If not, allocate a new page and copy the unaligned chunck.
+ if (addressTruncated < addressRounded)
+ {
+ dstFirstPagePa = GetMemoryRegionManager().AllocatePagesContiguous(Context, 1, _aslrDisabled);
+
+ if (dstFirstPagePa == 0)
+ {
+ CleanUpForError();
+
+ return KernelResult.OutOfMemory;
+ }
+ }
+
+ // Is the last page end address aligned?
+ // If not, allocate a new page and copy the unaligned chunck.
+ if (endAddrTruncated < endAddrRounded && (addressTruncated == addressRounded || addressTruncated < endAddrTruncated))
+ {
+ dstLastPagePa = GetMemoryRegionManager().AllocatePagesContiguous(Context, 1, _aslrDisabled);
+
+ if (dstLastPagePa == 0)
+ {
+ CleanUpForError();
+
+ return KernelResult.OutOfMemory;
+ }
+ }
+
+ if (dstFirstPagePa != 0)
+ {
+ ulong firstPageFillAddress = dstFirstPagePa;
+ ulong unusedSizeAfter;
+
+ if (send)
+ {
+ ulong unusedSizeBefore = address - addressTruncated;
+
+ Context.Memory.ZeroFill(GetDramAddressFromPa(dstFirstPagePa), unusedSizeBefore);
+
+ ulong copySize = addressRounded <= endAddr ? addressRounded - address : size;
+ var data = srcPageTable.GetSpan(addressTruncated + unusedSizeBefore, (int)copySize);
+
+ Context.Memory.Write(GetDramAddressFromPa(dstFirstPagePa + unusedSizeBefore), data);
+
+ firstPageFillAddress += unusedSizeBefore + copySize;
+
+ unusedSizeAfter = addressRounded > endAddr ? addressRounded - endAddr : 0;
+ }
+ else
+ {
+ unusedSizeAfter = PageSize;
+ }
+
+ if (unusedSizeAfter != 0)
+ {
+ Context.Memory.ZeroFill(GetDramAddressFromPa(firstPageFillAddress), unusedSizeAfter);
+ }
+
+ KernelResult result = MapPages(currentVa, 1, dstFirstPagePa, permission);
+
+ if (result != KernelResult.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ currentVa += PageSize;
+ }
+
+ if (endAddrTruncated > addressRounded)
+ {
+ ulong alignedSize = endAddrTruncated - addressRounded;
+
+ KernelResult result = MapPages(currentVa, srcPageTable.GetPhysicalRegions(addressRounded, alignedSize), permission);
+
+ if (result != KernelResult.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ currentVa += alignedSize;
+ }
+
+ if (dstLastPagePa != 0)
+ {
+ ulong lastPageFillAddr = dstLastPagePa;
+ ulong unusedSizeAfter;
+
+ if (send)
+ {
+ ulong copySize = endAddr - endAddrTruncated;
+ var data = srcPageTable.GetSpan(endAddrTruncated, (int)copySize);
+
+ Context.Memory.Write(GetDramAddressFromPa(dstLastPagePa), data);
+
+ lastPageFillAddr += copySize;
+
+ unusedSizeAfter = PageSize - copySize;
+ }
+ else
+ {
+ unusedSizeAfter = PageSize;
+ }
+
+ Context.Memory.ZeroFill(GetDramAddressFromPa(lastPageFillAddr), unusedSizeAfter);
+
+ KernelResult result = MapPages(currentVa, 1, dstLastPagePa, permission);
+
+ if (result != KernelResult.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+ }
+
+ _blockManager.InsertBlock(va, neededPagesCount, state, permission);
+
+ dst = va + (address - addressTruncated);
+
+ return KernelResult.Success;
+ }
+
+ public KernelResult UnmapNoAttributeIfStateEquals(ulong address, ulong size, MemoryState state)
+ {
+ if (AddrSpaceStart > address)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong endAddr = address + size;
+
+ if (endAddr <= address || endAddr - 1 > AddrSpaceEnd - 1)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ MemoryState.Mask,
+ state,
+ KMemoryPermission.Read,
+ KMemoryPermission.Read,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _))
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong addressTruncated = BitUtils.AlignDown(address, PageSize);
+ ulong addressRounded = BitUtils.AlignUp(address, PageSize);
+ ulong endAddrTruncated = BitUtils.AlignDown(endAddr, PageSize);
+ ulong endAddrRounded = BitUtils.AlignUp(endAddr, PageSize);
+
+ ulong pagesCount = (endAddrRounded - addressTruncated) / PageSize;
+
+ KernelResult result = Unmap(addressTruncated, pagesCount);
+
+ if (result == KernelResult.Success)
+ {
+ _blockManager.InsertBlock(addressTruncated, pagesCount, MemoryState.Unmapped);
+ }
+
+ return result;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public KernelResult UnmapIpcRestorePermission(ulong address, ulong size, MemoryState state)
+ {
+ ulong endAddr = address + size;
+
+ ulong addressRounded = BitUtils.AlignUp(address, PageSize);
+ ulong addressTruncated = BitUtils.AlignDown(address, PageSize);
+ ulong endAddrRounded = BitUtils.AlignUp(endAddr, PageSize);
+ ulong endAddrTruncated = BitUtils.AlignDown(endAddr, PageSize);
+
+ ulong pagesCount = addressRounded < endAddrTruncated ? (endAddrTruncated - addressRounded) / PageSize : 0;
+
+ if (pagesCount == 0)
+ {
+ return KernelResult.Success;
+ }
+
+ MemoryState stateMask;
+
+ switch (state)
+ {
+ case MemoryState.IpcBuffer0: stateMask = MemoryState.IpcSendAllowedType0; break;
+ case MemoryState.IpcBuffer1: stateMask = MemoryState.IpcSendAllowedType1; break;
+ case MemoryState.IpcBuffer3: stateMask = MemoryState.IpcSendAllowedType3; break;
+
+ default: return KernelResult.InvalidCombination;
+ }
+
+ MemoryAttribute attributeMask =
+ MemoryAttribute.Borrowed |
+ MemoryAttribute.IpcMapped |
+ MemoryAttribute.Uncached;
+
+ if (state == MemoryState.IpcBuffer0)
+ {
+ attributeMask |= MemoryAttribute.DeviceMapped;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ // Anything on the client side should see this memory as modified.
+ SignalMemoryTracking(addressTruncated, endAddrRounded - addressTruncated, true);
+
+ lock (_blockManager)
+ {
+ foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrTruncated))
+ {
+ // Check if the block state matches what we expect.
+ if ((info.State & stateMask) != stateMask ||
+ (info.Attribute & attributeMask) != MemoryAttribute.IpcMapped)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (info.Permission != info.SourcePermission && info.IpcRefCount == 1)
+ {
+ ulong blockAddress = GetAddrInRange(info, addressRounded);
+ ulong blockSize = GetSizeInRange(info, addressRounded, endAddrTruncated);
+
+ ulong blockPagesCount = blockSize / PageSize;
+
+ KernelResult result = Reprotect(blockAddress, blockPagesCount, info.SourcePermission);
+
+ if (result != KernelResult.Success)
+ {
+ return result;
+ }
+ }
+ }
+
+ _blockManager.InsertBlock(addressRounded, pagesCount, RestoreIpcMappingPermissions);
+
+ return KernelResult.Success;
+ }
+ }
+
+ private static void SetIpcMappingPermissions(KMemoryBlock block, KMemoryPermission permission)
+ {
+ block.SetIpcMappingPermission(permission);
+ }
+
+ private static void RestoreIpcMappingPermissions(KMemoryBlock block, KMemoryPermission permission)
+ {
+ block.RestoreIpcMappingPermission();
+ }
+
+ public KernelResult BorrowIpcBuffer(ulong address, ulong size)
+ {
+ return SetAttributesAndChangePermission(
+ address,
+ size,
+ MemoryState.IpcBufferAllowed,
+ MemoryState.IpcBufferAllowed,
+ KMemoryPermission.Mask,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Borrowed);
+ }
+
+ public KernelResult BorrowTransferMemory(List<HostMemoryRange> ranges, ulong address, ulong size, KMemoryPermission permission)
+ {
+ return SetAttributesAndChangePermission(
+ address,
+ size,
+ MemoryState.TransferMemoryAllowed,
+ MemoryState.TransferMemoryAllowed,
+ KMemoryPermission.Mask,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ permission,
+ MemoryAttribute.Borrowed,
+ ranges);
+ }
+
+ private KernelResult SetAttributesAndChangePermission(
+ ulong address,
+ ulong size,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permissionMask,
+ KMemoryPermission permissionExpected,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected,
+ KMemoryPermission newPermission,
+ MemoryAttribute attributeSetMask,
+ List<HostMemoryRange> ranges = null)
+ {
+ if (address + size <= address || !InsideAddrSpace(address, size))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ stateMask | MemoryState.IsPoolAllocated,
+ stateExpected | MemoryState.IsPoolAllocated,
+ permissionMask,
+ permissionExpected,
+ attributeMask,
+ attributeExpected,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState oldState,
+ out KMemoryPermission oldPermission,
+ out MemoryAttribute oldAttribute))
+ {
+ ulong pagesCount = size / PageSize;
+
+ ranges?.AddRange(GetPhysicalRegions(address, size));
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ if (newPermission == KMemoryPermission.None)
+ {
+ newPermission = oldPermission;
+ }
+
+ if (newPermission != oldPermission)
+ {
+ KernelResult result = Reprotect(address, pagesCount, newPermission);
+
+ if (result != KernelResult.Success)
+ {
+ return result;
+ }
+ }
+
+ MemoryAttribute newAttribute = oldAttribute | attributeSetMask;
+
+ _blockManager.InsertBlock(address, pagesCount, oldState, newPermission, newAttribute);
+
+ return KernelResult.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public KernelResult UnborrowIpcBuffer(ulong address, ulong size)
+ {
+ return ClearAttributesAndChangePermission(
+ address,
+ size,
+ MemoryState.IpcBufferAllowed,
+ MemoryState.IpcBufferAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.Borrowed,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Borrowed);
+ }
+
+ public KernelResult UnborrowTransferMemory(ulong address, ulong size, List<HostMemoryRange> ranges)
+ {
+ return ClearAttributesAndChangePermission(
+ address,
+ size,
+ MemoryState.TransferMemoryAllowed,
+ MemoryState.TransferMemoryAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.Borrowed,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Borrowed,
+ ranges);
+ }
+
+ private KernelResult ClearAttributesAndChangePermission(
+ ulong address,
+ ulong size,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permissionMask,
+ KMemoryPermission permissionExpected,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected,
+ KMemoryPermission newPermission,
+ MemoryAttribute attributeClearMask,
+ List<HostMemoryRange> ranges = null)
+ {
+ if (address + size <= address || !InsideAddrSpace(address, size))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ stateMask | MemoryState.IsPoolAllocated,
+ stateExpected | MemoryState.IsPoolAllocated,
+ permissionMask,
+ permissionExpected,
+ attributeMask,
+ attributeExpected,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState oldState,
+ out KMemoryPermission oldPermission,
+ out MemoryAttribute oldAttribute))
+ {
+ ulong pagesCount = size / PageSize;
+
+ if (ranges != null)
+ {
+ var currentRanges = GetPhysicalRegions(address, size);
+
+ if (!currentRanges.SequenceEqual(ranges))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ if (newPermission == KMemoryPermission.None)
+ {
+ newPermission = oldPermission;
+ }
+
+ if (newPermission != oldPermission)
+ {
+ KernelResult result = Reprotect(address, pagesCount, newPermission);
+
+ if (result != KernelResult.Success)
+ {
+ return result;
+ }
+ }
+
+ MemoryAttribute newAttribute = oldAttribute & ~attributeClearMask;
+
+ _blockManager.InsertBlock(address, pagesCount, oldState, newPermission, newAttribute);
+
+ return KernelResult.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ private static ulong GetAddrInRange(KMemoryInfo info, ulong start)
+ {
+ if (info.Address < start)
+ {
+ return start;
+ }
+
+ return info.Address;
+ }
+
+ private static ulong GetSizeInRange(KMemoryInfo info, ulong start, ulong end)
+ {
+ ulong endAddr = info.Size + info.Address;
+ ulong size = info.Size;
+
+ if (info.Address < start)
+ {
+ size -= start - info.Address;
+ }
+
+ if (endAddr > end)
+ {
+ size -= endAddr - end;
+ }
+
+ return size;
+ }
+
+ private bool IsUnmapped(ulong address, ulong size)
+ {
+ return CheckRange(
+ address,
+ size,
+ MemoryState.Mask,
+ MemoryState.Unmapped,
+ KMemoryPermission.Mask,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _);
+ }
+
+ private bool CheckRange(
+ ulong address,
+ ulong size,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permissionMask,
+ KMemoryPermission permissionExpected,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected,
+ MemoryAttribute attributeIgnoreMask,
+ out MemoryState outState,
+ out KMemoryPermission outPermission,
+ out MemoryAttribute outAttribute)
+ {
+ ulong endAddr = address + size;
+
+ LinkedListNode<KMemoryBlock> node = _blockManager.FindBlockNode(address);
+
+ KMemoryInfo info = node.Value.GetInfo();
+
+ MemoryState firstState = info.State;
+ KMemoryPermission firstPermission = info.Permission;
+ MemoryAttribute firstAttribute = info.Attribute;
+
+ do
+ {
+ info = node.Value.GetInfo();
+
+ // Check if the block state matches what we expect.
+ if (firstState != info.State ||
+ firstPermission != info.Permission ||
+ (info.Attribute & attributeMask) != attributeExpected ||
+ (firstAttribute | attributeIgnoreMask) != (info.Attribute | attributeIgnoreMask) ||
+ (firstState & stateMask) != stateExpected ||
+ (firstPermission & permissionMask) != permissionExpected)
+ {
+ outState = MemoryState.Unmapped;
+ outPermission = KMemoryPermission.None;
+ outAttribute = MemoryAttribute.None;
+
+ return false;
+ }
+ }
+ while (info.Address + info.Size - 1 < endAddr - 1 && (node = node.Next) != null);
+
+ outState = firstState;
+ outPermission = firstPermission;
+ outAttribute = firstAttribute & ~attributeIgnoreMask;
+
+ return true;
+ }
+
+ private bool CheckRange(
+ ulong address,
+ ulong size,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permissionMask,
+ KMemoryPermission permissionExpected,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected)
+ {
+ foreach (KMemoryInfo info in IterateOverRange(address, address + size))
+ {
+ // Check if the block state matches what we expect.
+ if ((info.State & stateMask) != stateExpected ||
+ (info.Permission & permissionMask) != permissionExpected ||
+ (info.Attribute & attributeMask) != attributeExpected)
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ private IEnumerable<KMemoryInfo> IterateOverRange(ulong start, ulong end)
+ {
+ LinkedListNode<KMemoryBlock> node = _blockManager.FindBlockNode(start);
+
+ KMemoryInfo info;
+
+ do
+ {
+ info = node.Value.GetInfo();
+
+ yield return info;
+ }
+ while (info.Address + info.Size - 1 < end - 1 && (node = node.Next) != null);
+ }
+
+ private ulong AllocateVa(ulong regionStart, ulong regionPagesCount, ulong neededPagesCount, int alignment)
+ {
+ ulong address = 0;
+
+ ulong regionEndAddr = regionStart + regionPagesCount * PageSize;
+
+ ulong reservedPagesCount = _isKernel ? 1UL : 4UL;
+
+ if (_aslrEnabled)
+ {
+ ulong totalNeededSize = (reservedPagesCount + neededPagesCount) * PageSize;
+
+ ulong remainingPages = regionPagesCount - neededPagesCount;
+
+ ulong aslrMaxOffset = ((remainingPages + reservedPagesCount) * PageSize) / (ulong)alignment;
+
+ for (int attempt = 0; attempt < 8; attempt++)
+ {
+ address = BitUtils.AlignDown(regionStart + GetRandomValue(0, aslrMaxOffset) * (ulong)alignment, alignment);
+
+ ulong endAddr = address + totalNeededSize;
+
+ KMemoryInfo info = _blockManager.FindBlock(address).GetInfo();
+
+ if (info.State != MemoryState.Unmapped)
+ {
+ continue;
+ }
+
+ ulong currBaseAddr = info.Address + reservedPagesCount * PageSize;
+ ulong currEndAddr = info.Address + info.Size;
+
+ if (address >= regionStart &&
+ address >= currBaseAddr &&
+ endAddr - 1 <= regionEndAddr - 1 &&
+ endAddr - 1 <= currEndAddr - 1)
+ {
+ break;
+ }
+ }
+
+ if (address == 0)
+ {
+ ulong aslrPage = GetRandomValue(0, aslrMaxOffset);
+
+ address = FindFirstFit(
+ regionStart + aslrPage * PageSize,
+ regionPagesCount - aslrPage,
+ neededPagesCount,
+ alignment,
+ 0,
+ reservedPagesCount);
+ }
+ }
+
+ if (address == 0)
+ {
+ address = FindFirstFit(
+ regionStart,
+ regionPagesCount,
+ neededPagesCount,
+ alignment,
+ 0,
+ reservedPagesCount);
+ }
+
+ return address;
+ }
+
+ private ulong FindFirstFit(
+ ulong regionStart,
+ ulong regionPagesCount,
+ ulong neededPagesCount,
+ int alignment,
+ ulong reservedStart,
+ ulong reservedPagesCount)
+ {
+ ulong reservedSize = reservedPagesCount * PageSize;
+
+ ulong totalNeededSize = reservedSize + neededPagesCount * PageSize;
+
+ ulong regionEndAddr = regionStart + regionPagesCount * PageSize;
+
+ LinkedListNode<KMemoryBlock> node = _blockManager.FindBlockNode(regionStart);
+
+ KMemoryInfo info = node.Value.GetInfo();
+
+ while (regionEndAddr >= info.Address)
+ {
+ if (info.State == MemoryState.Unmapped)
+ {
+ ulong currBaseAddr = info.Address + reservedSize;
+ ulong currEndAddr = info.Address + info.Size - 1;
+
+ ulong address = BitUtils.AlignDown(currBaseAddr, alignment) + reservedStart;
+
+ if (currBaseAddr > address)
+ {
+ address += (ulong)alignment;
+ }
+
+ ulong allocationEndAddr = address + totalNeededSize - 1;
+
+ if (allocationEndAddr <= regionEndAddr &&
+ allocationEndAddr <= currEndAddr &&
+ address < allocationEndAddr)
+ {
+ return address;
+ }
+ }
+
+ node = node.Next;
+
+ if (node == null)
+ {
+ break;
+ }
+
+ info = node.Value.GetInfo();
+ }
+
+ return 0;
+ }
+
+ public bool CanContain(ulong address, ulong size, MemoryState state)
+ {
+ ulong endAddr = address + size;
+
+ ulong regionBaseAddr = GetBaseAddress(state);
+ ulong regionEndAddr = regionBaseAddr + GetSize(state);
+
+ bool InsideRegion()
+ {
+ return regionBaseAddr <= address &&
+ endAddr > address &&
+ endAddr - 1 <= regionEndAddr - 1;
+ }
+
+ bool OutsideHeapRegion()
+ {
+ return endAddr <= HeapRegionStart || address >= HeapRegionEnd;
+ }
+
+ bool OutsideAliasRegion()
+ {
+ return endAddr <= AliasRegionStart || address >= AliasRegionEnd;
+ }
+
+ switch (state)
+ {
+ case MemoryState.Io:
+ case MemoryState.Normal:
+ case MemoryState.CodeStatic:
+ case MemoryState.CodeMutable:
+ case MemoryState.SharedMemory:
+ case MemoryState.ModCodeStatic:
+ case MemoryState.ModCodeMutable:
+ case MemoryState.Stack:
+ case MemoryState.ThreadLocal:
+ case MemoryState.TransferMemoryIsolated:
+ case MemoryState.TransferMemory:
+ case MemoryState.ProcessMemory:
+ case MemoryState.CodeReadOnly:
+ case MemoryState.CodeWritable:
+ return InsideRegion() && OutsideHeapRegion() && OutsideAliasRegion();
+
+ case MemoryState.Heap:
+ return InsideRegion() && OutsideAliasRegion();
+
+ case MemoryState.IpcBuffer0:
+ case MemoryState.IpcBuffer1:
+ case MemoryState.IpcBuffer3:
+ return InsideRegion() && OutsideHeapRegion();
+
+ case MemoryState.KernelStack:
+ return InsideRegion();
+ }
+
+ throw new ArgumentException($"Invalid state value \"{state}\".");
+ }
+
+ private ulong GetBaseAddress(MemoryState state)
+ {
+ switch (state)
+ {
+ case MemoryState.Io:
+ case MemoryState.Normal:
+ case MemoryState.ThreadLocal:
+ return TlsIoRegionStart;
+
+ case MemoryState.CodeStatic:
+ case MemoryState.CodeMutable:
+ case MemoryState.SharedMemory:
+ case MemoryState.ModCodeStatic:
+ case MemoryState.ModCodeMutable:
+ case MemoryState.TransferMemoryIsolated:
+ case MemoryState.TransferMemory:
+ case MemoryState.ProcessMemory:
+ case MemoryState.CodeReadOnly:
+ case MemoryState.CodeWritable:
+ return GetAddrSpaceBaseAddr();
+
+ case MemoryState.Heap:
+ return HeapRegionStart;
+
+ case MemoryState.IpcBuffer0:
+ case MemoryState.IpcBuffer1:
+ case MemoryState.IpcBuffer3:
+ return AliasRegionStart;
+
+ case MemoryState.Stack:
+ return StackRegionStart;
+
+ case MemoryState.KernelStack:
+ return AddrSpaceStart;
+ }
+
+ throw new ArgumentException($"Invalid state value \"{state}\".");
+ }
+
+ private ulong GetSize(MemoryState state)
+ {
+ switch (state)
+ {
+ case MemoryState.Io:
+ case MemoryState.Normal:
+ case MemoryState.ThreadLocal:
+ return TlsIoRegionEnd - TlsIoRegionStart;
+
+ case MemoryState.CodeStatic:
+ case MemoryState.CodeMutable:
+ case MemoryState.SharedMemory:
+ case MemoryState.ModCodeStatic:
+ case MemoryState.ModCodeMutable:
+ case MemoryState.TransferMemoryIsolated:
+ case MemoryState.TransferMemory:
+ case MemoryState.ProcessMemory:
+ case MemoryState.CodeReadOnly:
+ case MemoryState.CodeWritable:
+ return GetAddrSpaceSize();
+
+ case MemoryState.Heap:
+ return HeapRegionEnd - HeapRegionStart;
+
+ case MemoryState.IpcBuffer0:
+ case MemoryState.IpcBuffer1:
+ case MemoryState.IpcBuffer3:
+ return AliasRegionEnd - AliasRegionStart;
+
+ case MemoryState.Stack:
+ return StackRegionEnd - StackRegionStart;
+
+ case MemoryState.KernelStack:
+ return AddrSpaceEnd - AddrSpaceStart;
+ }
+
+ throw new ArgumentException($"Invalid state value \"{state}\".");
+ }
+
+ public ulong GetAddrSpaceBaseAddr()
+ {
+ if (AddrSpaceWidth == 36 || AddrSpaceWidth == 39)
+ {
+ return 0x8000000;
+ }
+ else if (AddrSpaceWidth == 32)
+ {
+ return 0x200000;
+ }
+ else
+ {
+ throw new InvalidOperationException("Invalid address space width!");
+ }
+ }
+
+ public ulong GetAddrSpaceSize()
+ {
+ if (AddrSpaceWidth == 36)
+ {
+ return 0xff8000000;
+ }
+ else if (AddrSpaceWidth == 39)
+ {
+ return 0x7ff8000000;
+ }
+ else if (AddrSpaceWidth == 32)
+ {
+ return 0xffe00000;
+ }
+ else
+ {
+ throw new InvalidOperationException("Invalid address space width!");
+ }
+ }
+
+ private static ulong GetDramAddressFromPa(ulong pa)
+ {
+ return pa - DramMemoryMap.DramBase;
+ }
+
+ protected KMemoryRegionManager GetMemoryRegionManager()
+ {
+ return Context.MemoryManager.MemoryRegions[(int)_memRegion];
+ }
+
+ public long GetMmUsedPages()
+ {
+ lock (_blockManager)
+ {
+ return BitUtils.DivRoundUp(GetMmUsedSize(), PageSize);
+ }
+ }
+
+ private long GetMmUsedSize()
+ {
+ return _blockManager.BlocksCount * KMemoryBlockSize;
+ }
+
+ public bool IsInvalidRegion(ulong address, ulong size)
+ {
+ return address + size - 1 > GetAddrSpaceBaseAddr() + GetAddrSpaceSize() - 1;
+ }
+
+ public bool InsideAddrSpace(ulong address, ulong size)
+ {
+ return AddrSpaceStart <= address && address + size - 1 <= AddrSpaceEnd - 1;
+ }
+
+ public bool InsideAliasRegion(ulong address, ulong size)
+ {
+ return address + size > AliasRegionStart && AliasRegionEnd > address;
+ }
+
+ public bool InsideHeapRegion(ulong address, ulong size)
+ {
+ return address + size > HeapRegionStart && HeapRegionEnd > address;
+ }
+
+ public bool InsideStackRegion(ulong address, ulong size)
+ {
+ return address + size > StackRegionStart && StackRegionEnd > address;
+ }
+
+ public bool OutsideAliasRegion(ulong address, ulong size)
+ {
+ return AliasRegionStart > address || address + size - 1 > AliasRegionEnd - 1;
+ }
+
+ public bool OutsideAddrSpace(ulong address, ulong size)
+ {
+ return AddrSpaceStart > address || address + size - 1 > AddrSpaceEnd - 1;
+ }
+
+ public bool OutsideStackRegion(ulong address, ulong size)
+ {
+ return StackRegionStart > address || address + size - 1 > StackRegionEnd - 1;
+ }
+
+ /// <summary>
+ /// Gets the physical regions that make up the given virtual address region.
+ /// If any part of the virtual region is unmapped, null is returned.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range</param>
+ /// <returns>Array of physical regions</returns>
+ protected abstract IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size);
+
+ /// <summary>
+ /// Gets a read-only span of data from CPU mapped memory.
+ /// </summary>
+ /// <remarks>
+ /// This may perform a allocation if the data is not contiguous in memory.
+ /// For this reason, the span is read-only, you can't modify the data.
+ /// </remarks>
+ /// <param name="va">Virtual address of the data</param>
+ /// <param name="size">Size of the data</param>
+ /// <param name="tracked">True if read tracking is triggered on the span</param>
+ /// <returns>A read-only span of the data</returns>
+ /// <exception cref="Ryujinx.Memory.InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
+ protected abstract ReadOnlySpan<byte> GetSpan(ulong va, int size);
+
+ /// <summary>
+ /// Maps a new memory region with the contents of a existing memory region.
+ /// </summary>
+ /// <param name="src">Source memory region where the data will be taken from</param>
+ /// <param name="dst">Destination memory region to map</param>
+ /// <param name="pagesCount">Number of pages to map</param>
+ /// <param name="oldSrcPermission">Current protection of the source memory region</param>
+ /// <param name="newDstPermission">Desired protection for the destination memory region</param>
+ /// <returns>Result of the mapping operation</returns>
+ protected abstract KernelResult MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission);
+
+ /// <summary>
+ /// Unmaps a region of memory that was previously mapped with <see cref="MapMemory"/>.
+ /// </summary>
+ /// <param name="dst">Destination memory region to be unmapped</param>
+ /// <param name="src">Source memory region that was originally remapped</param>
+ /// <param name="pagesCount">Number of pages to unmap</param>
+ /// <param name="oldDstPermission">Current protection of the destination memory region</param>
+ /// <param name="newSrcPermission">Desired protection of the source memory region</param>
+ /// <returns>Result of the unmapping operation</returns>
+ protected abstract KernelResult UnmapMemory(ulong dst, ulong src, ulong pagesCount, KMemoryPermission oldDstPermission, KMemoryPermission newSrcPermission);
+
+ /// <summary>
+ /// Maps a region of memory into the specified physical memory region.
+ /// </summary>
+ /// <param name="dstVa">Destination virtual address that should be mapped</param>
+ /// <param name="pagesCount">Number of pages to map</param>
+ /// <param name="srcPa">Physical address where the pages should be mapped. May be ignored if aliasing is not supported</param>
+ /// <param name="permission">Permission of the region to be mapped</param>
+ /// <returns>Result of the mapping operation</returns>
+ protected abstract KernelResult MapPages(ulong dstVa, ulong pagesCount, ulong srcPa, KMemoryPermission permission);
+
+ /// <summary>
+ /// Maps a region of memory into the specified physical memory region.
+ /// </summary>
+ /// <param name="address">Destination virtual address that should be mapped</param>
+ /// <param name="pageList">List of physical memory pages where the pages should be mapped. May be ignored if aliasing is not supported</param>
+ /// <param name="permission">Permission of the region to be mapped</param>
+ /// <returns>Result of the mapping operation</returns>
+ protected abstract KernelResult MapPages(ulong address, KPageList pageList, KMemoryPermission permission);
+
+ /// <summary>
+ /// Maps a region of memory into the specified host memory ranges.
+ /// </summary>
+ /// <param name="address">Destination virtual address that should be mapped</param>
+ /// <param name="ranges">Ranges of host memory that should be mapped</param>
+ /// <param name="permission">Permission of the region to be mapped</param>
+ /// <returns>Result of the mapping operation</returns>
+ /// <exception cref="NotSupportedException">The implementation does not support memory aliasing</exception>
+ protected abstract KernelResult MapPages(ulong address, IEnumerable<HostMemoryRange> ranges, KMemoryPermission permission);
+
+ /// <summary>
+ /// Unmaps a region of memory that was previously mapped with one of the page mapping methods.
+ /// </summary>
+ /// <param name="address">Virtual address of the region to unmap</param>
+ /// <param name="pagesCount">Number of pages to unmap</param>
+ /// <returns>Result of the unmapping operation</returns>
+ protected abstract KernelResult Unmap(ulong address, ulong pagesCount);
+
+ /// <summary>
+ /// Changes the permissions of a given virtual memory region.
+ /// </summary>
+ /// <param name="address">Virtual address of the region to have the permission changes</param>
+ /// <param name="pagesCount">Number of pages to have their permissions changed</param>
+ /// <param name="permission">New permission</param>
+ /// <returns>Result of the permission change operation</returns>
+ protected abstract KernelResult Reprotect(ulong address, ulong pagesCount, KMemoryPermission permission);
+
+ /// <summary>
+ /// Changes the permissions of a given virtual memory region.
+ /// </summary>
+ /// <param name="address">Virtual address of the region to have the permission changes</param>
+ /// <param name="pagesCount">Number of pages to have their permissions changed</param>
+ /// <param name="permission">New permission</param>
+ /// <returns>Result of the permission change operation</returns>
+ protected abstract KernelResult ReprotectWithAttributes(ulong address, ulong pagesCount, KMemoryPermission permission);
+
+ /// <summary>
+ /// Alerts the memory tracking that a given region has been read from or written to.
+ /// This should be called before read/write is performed.
+ /// </summary>
+ /// <param name="va">Virtual address of the region</param>
+ /// <param name="size">Size of the region</param>
+ protected abstract void SignalMemoryTracking(ulong va, ulong size, bool write);
+
+ /// <summary>
+ /// Writes data to CPU mapped memory, with write tracking.
+ /// </summary>
+ /// <param name="va">Virtual address to write the data into</param>
+ /// <param name="data">Data to be written</param>
+ /// <exception cref="Ryujinx.Memory.InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
+ protected abstract void Write(ulong va, ReadOnlySpan<byte> data);
+ }
+} \ No newline at end of file
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableHostMapped.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableHostMapped.cs
new file mode 100644
index 00000000..cd51bab7
--- /dev/null
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableHostMapped.cs
@@ -0,0 +1,125 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Range;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KPageTableHostMapped : KPageTableBase
+ {
+ private const int CopyChunckSize = 0x100000;
+
+ private readonly IVirtualMemoryManager _cpuMemory;
+
+ public override bool SupportsMemoryAliasing => false;
+
+ public KPageTableHostMapped(KernelContext context, IVirtualMemoryManager cpuMemory) : base(context)
+ {
+ _cpuMemory = cpuMemory;
+ }
+
+ /// <inheritdoc/>
+ protected override IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size)
+ {
+ return _cpuMemory.GetPhysicalRegions(va, size);
+ }
+
+ /// <inheritdoc/>
+ protected override ReadOnlySpan<byte> GetSpan(ulong va, int size)
+ {
+ return _cpuMemory.GetSpan(va, size);
+ }
+
+ /// <inheritdoc/>
+ protected override KernelResult MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission)
+ {
+ ulong size = pagesCount * PageSize;
+
+ _cpuMemory.Map(dst, 0, size);
+
+ ulong currentSize = size;
+ while (currentSize > 0)
+ {
+ ulong copySize = Math.Min(currentSize, CopyChunckSize);
+ _cpuMemory.Write(dst, _cpuMemory.GetSpan(src, (int)copySize));
+ currentSize -= copySize;
+ }
+
+ return KernelResult.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override KernelResult UnmapMemory(ulong dst, ulong src, ulong pagesCount, KMemoryPermission oldDstPermission, KMemoryPermission newSrcPermission)
+ {
+ ulong size = pagesCount * PageSize;
+
+ // TODO: Validation.
+
+ ulong currentSize = size;
+ while (currentSize > 0)
+ {
+ ulong copySize = Math.Min(currentSize, CopyChunckSize);
+ _cpuMemory.Write(src, _cpuMemory.GetSpan(dst, (int)copySize));
+ currentSize -= copySize;
+ }
+
+ _cpuMemory.Unmap(dst, size);
+ return KernelResult.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override KernelResult MapPages(ulong dstVa, ulong pagesCount, ulong srcPa, KMemoryPermission permission)
+ {
+ _cpuMemory.Map(dstVa, 0, pagesCount * PageSize);
+ return KernelResult.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override KernelResult MapPages(ulong address, KPageList pageList, KMemoryPermission permission)
+ {
+ _cpuMemory.Map(address, 0, pageList.GetPagesCount() * PageSize);
+ return KernelResult.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override KernelResult MapPages(ulong address, IEnumerable<HostMemoryRange> ranges, KMemoryPermission permission)
+ {
+ throw new NotSupportedException();
+ }
+
+ /// <inheritdoc/>
+ protected override KernelResult Unmap(ulong address, ulong pagesCount)
+ {
+ _cpuMemory.Unmap(address, pagesCount * PageSize);
+ return KernelResult.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override KernelResult Reprotect(ulong address, ulong pagesCount, KMemoryPermission permission)
+ {
+ // TODO.
+ return KernelResult.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override KernelResult ReprotectWithAttributes(ulong address, ulong pagesCount, KMemoryPermission permission)
+ {
+ // TODO.
+ return KernelResult.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override void SignalMemoryTracking(ulong va, ulong size, bool write)
+ {
+ _cpuMemory.SignalMemoryTracking(va, size, write);
+ }
+
+ /// <inheritdoc/>
+ protected override void Write(ulong va, ReadOnlySpan<byte> data)
+ {
+ _cpuMemory.Write(va, data);
+ }
+ }
+}
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KScopedPageList.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KScopedPageList.cs
new file mode 100644
index 00000000..a0c19f9c
--- /dev/null
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/KScopedPageList.cs
@@ -0,0 +1,27 @@
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ struct KScopedPageList : IDisposable
+ {
+ private readonly KMemoryManager _manager;
+ private KPageList _pageList;
+
+ public KScopedPageList(KMemoryManager manager, KPageList pageList)
+ {
+ _manager = manager;
+ _pageList = pageList;
+ pageList.IncrementPagesReferenceCount(manager);
+ }
+
+ public void SignalSuccess()
+ {
+ _pageList = null;
+ }
+
+ public void Dispose()
+ {
+ _pageList?.DecrementPagesReferenceCount(_manager);
+ }
+ }
+}
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs
index ca0e3421..61c883d8 100644
--- a/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs
@@ -6,7 +6,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KSharedMemory : KAutoObject
{
- private readonly KPageList _pageList;
+ private readonly SharedMemoryStorage _storage;
private readonly long _ownerPid;
@@ -14,28 +14,29 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
private readonly KMemoryPermission _userPermission;
public KSharedMemory(
- KernelContext context,
- KPageList pageList,
- long ownerPid,
+ KernelContext context,
+ SharedMemoryStorage storage,
+ long ownerPid,
KMemoryPermission ownerPermission,
KMemoryPermission userPermission) : base(context)
{
- _pageList = pageList;
- _ownerPid = ownerPid;
+ _storage = storage;
+ _ownerPid = ownerPid;
_ownerPermission = ownerPermission;
- _userPermission = userPermission;
+ _userPermission = userPermission;
}
public KernelResult MapIntoProcess(
- KMemoryManager memoryManager,
- ulong address,
- ulong size,
- KProcess process,
+ KPageTableBase memoryManager,
+ ulong address,
+ ulong size,
+ KProcess process,
KMemoryPermission permission)
{
- ulong pagesCountRounded = BitUtils.DivRoundUp(size, KMemoryManager.PageSize);
+ ulong pagesCountRounded = BitUtils.DivRoundUp(size, KPageTableBase.PageSize);
- if (_pageList.GetPagesCount() != pagesCountRounded)
+ var pageList = _storage.GetPageList();
+ if (pageList.GetPagesCount() != pagesCountRounded)
{
return KernelResult.InvalidSize;
}
@@ -49,23 +50,35 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return KernelResult.InvalidPermission;
}
- return memoryManager.MapPages(address, _pageList, MemoryState.SharedMemory, permission);
+ KernelResult result = memoryManager.MapPages(address, pageList, MemoryState.SharedMemory, permission);
+
+ if (result == KernelResult.Success && !memoryManager.SupportsMemoryAliasing)
+ {
+ _storage.Borrow(process, address);
+ }
+
+ return result;
}
public KernelResult UnmapFromProcess(
- KMemoryManager memoryManager,
- ulong address,
- ulong size,
- KProcess process)
+ KPageTableBase memoryManager,
+ ulong address,
+ ulong size,
+ KProcess process)
{
- ulong pagesCountRounded = BitUtils.DivRoundUp(size, KMemoryManager.PageSize);
+ ulong pagesCountRounded = BitUtils.DivRoundUp(size, KPageTableBase.PageSize);
+
+ var pageList = _storage.GetPageList();
+ ulong pagesCount = pageList.GetPagesCount();
- if (_pageList.GetPagesCount() != pagesCountRounded)
+ if (pagesCount != pagesCountRounded)
{
return KernelResult.InvalidSize;
}
- return memoryManager.UnmapPages(address, _pageList, MemoryState.SharedMemory);
+ var ranges = _storage.GetRanges();
+
+ return memoryManager.UnmapPages(address, pagesCount, ranges, MemoryState.SharedMemory);
}
}
} \ No newline at end of file
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KTransferMemory.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KTransferMemory.cs
index 7107d497..c75d8e69 100644
--- a/Ryujinx.HLE/HOS/Kernel/Memory/KTransferMemory.cs
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/KTransferMemory.cs
@@ -1,6 +1,8 @@
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.Memory.Range;
using System;
+using System.Collections.Generic;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
@@ -11,10 +13,10 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
// TODO: Remove when we no longer need to read it from the owner directly.
public KProcess Creator => _creator;
- private readonly KPageList _pageList;
+ private readonly List<HostMemoryRange> _ranges;
public ulong Address { get; private set; }
- public ulong Size => _pageList.GetPagesCount() * KMemoryManager.PageSize;
+ public ulong Size { get; private set; }
public KMemoryPermission Permission { get; private set; }
@@ -23,7 +25,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
public KTransferMemory(KernelContext context) : base(context)
{
- _pageList = new KPageList();
+ _ranges = new List<HostMemoryRange>();
}
public KernelResult Initialize(ulong address, ulong size, KMemoryPermission permission)
@@ -32,7 +34,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
_creator = creator;
- KernelResult result = creator.MemoryManager.BorrowTransferMemory(_pageList, address, size, permission);
+ KernelResult result = creator.MemoryManager.BorrowTransferMemory(_ranges, address, size, permission);
if (result != KernelResult.Success)
{
@@ -43,6 +45,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
Permission = permission;
Address = address;
+ Size = size;
_hasBeenInitialized = true;
_isMapped = false;
@@ -53,7 +56,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
if (_hasBeenInitialized)
{
- if (!_isMapped && _creator.MemoryManager.UnborrowTransferMemory(Address, Size, _pageList) != KernelResult.Success)
+ if (!_isMapped && _creator.MemoryManager.UnborrowTransferMemory(Address, Size, _ranges) != KernelResult.Success)
{
throw new InvalidOperationException("Unexpected failure restoring transfer memory attributes.");
}
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/MemoryOperation.cs b/Ryujinx.HLE/HOS/Kernel/Memory/MemoryOperation.cs
deleted file mode 100644
index 7f7f29de..00000000
--- a/Ryujinx.HLE/HOS/Kernel/Memory/MemoryOperation.cs
+++ /dev/null
@@ -1,12 +0,0 @@
-namespace Ryujinx.HLE.HOS.Kernel.Memory
-{
- enum MemoryOperation
- {
- MapPa,
- MapVa,
- Allocate,
- Unmap,
- ChangePermRw,
- ChangePermsAndAttributes
- }
-} \ No newline at end of file
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs b/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs
new file mode 100644
index 00000000..cd22b65f
--- /dev/null
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs
@@ -0,0 +1,103 @@
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Range;
+using System;
+using System.Collections.Generic;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class SharedMemoryStorage
+ {
+ private readonly KernelContext _context;
+ private readonly KPageList _pageList;
+ private readonly ulong _size;
+
+ private IVirtualMemoryManager _borrowerMemory;
+ private ulong _borrowerVa;
+
+ public SharedMemoryStorage(KernelContext context, KPageList pageList)
+ {
+ _context = context;
+ _pageList = pageList;
+ _size = pageList.GetPagesCount() * KPageTableBase.PageSize;
+
+ foreach (KPageNode pageNode in pageList)
+ {
+ ulong address = pageNode.Address - DramMemoryMap.DramBase;
+ ulong size = pageNode.PagesCount * KPageTableBase.PageSize;
+ context.Memory.Commit(address, size);
+ }
+ }
+
+ public void Borrow(KProcess dstProcess, ulong va)
+ {
+ ulong currentOffset = 0;
+
+ foreach (KPageNode pageNode in _pageList)
+ {
+ ulong address = pageNode.Address - DramMemoryMap.DramBase;
+ ulong size = pageNode.PagesCount * KPageTableBase.PageSize;
+
+ dstProcess.CpuMemory.Write(va + currentOffset, _context.Memory.GetSpan(address + currentOffset, (int)size));
+
+ currentOffset += size;
+ }
+
+ _borrowerMemory = dstProcess.CpuMemory;
+ _borrowerVa = va;
+ }
+
+ public void ZeroFill()
+ {
+ for (ulong offset = 0; offset < _size; offset += sizeof(ulong))
+ {
+ GetRef<ulong>(offset) = 0;
+ }
+ }
+
+ public ref T GetRef<T>(ulong offset) where T : unmanaged
+ {
+ if (_borrowerMemory == null)
+ {
+ if (_pageList.Nodes.Count == 1)
+ {
+ ulong address = _pageList.Nodes.First.Value.Address - DramMemoryMap.DramBase;
+ return ref _context.Memory.GetRef<T>(address + offset);
+ }
+
+ throw new NotImplementedException("Non-contiguous shared memory is not yet supported.");
+ }
+ else
+ {
+ return ref _borrowerMemory.GetRef<T>(_borrowerVa + offset);
+ }
+ }
+
+ public IEnumerable<HostMemoryRange> GetRanges()
+ {
+ if (_borrowerMemory == null)
+ {
+ var ranges = new List<HostMemoryRange>();
+
+ foreach (KPageNode pageNode in _pageList)
+ {
+ ulong address = pageNode.Address - DramMemoryMap.DramBase;
+ ulong size = pageNode.PagesCount * KPageTableBase.PageSize;
+
+ ranges.Add(new HostMemoryRange(_context.Memory.GetPointer(address, size), size));
+ }
+
+ return ranges;
+ }
+ else
+ {
+ return _borrowerMemory.GetPhysicalRegions(_borrowerVa, _size);
+ }
+ }
+
+ public KPageList GetPageList()
+ {
+ return _pageList;
+ }
+ }
+}