aboutsummaryrefslogtreecommitdiff
path: root/src/Ryujinx.HLE/HOS/Kernel/Memory
diff options
context:
space:
mode:
Diffstat (limited to 'src/Ryujinx.HLE/HOS/Kernel/Memory')
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/AddressSpaceType.cs10
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/DramMemoryMap.cs18
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KCodeMemory.cs169
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs156
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockManager.cs288
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockSlabManager.cs19
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryInfo.cs36
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryManager.cs65
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs242
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KPageBitmap.cs298
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KPageHeap.cs283
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs97
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KPageNode.cs14
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs229
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs3043
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KScopedPageList.cs27
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs75
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KSlabHeap.cs50
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/KTransferMemory.cs130
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryAttribute.cs22
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryFillValue.cs10
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryPermission.cs20
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryRegion.cs10
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryState.cs50
-rw-r--r--src/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs49
25 files changed, 5410 insertions, 0 deletions
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/AddressSpaceType.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/AddressSpaceType.cs
new file mode 100644
index 00000000..8395c577
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/AddressSpaceType.cs
@@ -0,0 +1,10 @@
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ enum AddressSpaceType
+ {
+ Addr32Bits = 0,
+ Addr36Bits = 1,
+ Addr32BitsNoMap = 2,
+ Addr39Bits = 3
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/DramMemoryMap.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/DramMemoryMap.cs
new file mode 100644
index 00000000..4941d5b7
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/DramMemoryMap.cs
@@ -0,0 +1,18 @@
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ static class DramMemoryMap
+ {
+ public const ulong DramBase = 0x80000000;
+
+ public const ulong KernelReserveBase = DramBase + 0x60000;
+
+ public const ulong SlabHeapBase = KernelReserveBase + 0x85000;
+ public const ulong SlapHeapSize = 0xa21000;
+ public const ulong SlabHeapEnd = SlabHeapBase + SlapHeapSize;
+
+ public static bool IsHeapPhysicalAddress(ulong address)
+ {
+ return address >= SlabHeapEnd;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KCodeMemory.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KCodeMemory.cs
new file mode 100644
index 00000000..11474e49
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KCodeMemory.cs
@@ -0,0 +1,169 @@
+using Ryujinx.Common;
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.Horizon.Common;
+using System;
+using System.Diagnostics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KCodeMemory : KAutoObject
+ {
+ public KProcess Owner { get; private set; }
+ private readonly KPageList _pageList;
+ private readonly object _lock;
+ private ulong _address;
+ private bool _isOwnerMapped;
+ private bool _isMapped;
+
+ public KCodeMemory(KernelContext context) : base(context)
+ {
+ _pageList = new KPageList();
+ _lock = new object();
+ }
+
+ public Result Initialize(ulong address, ulong size)
+ {
+ Owner = KernelStatic.GetCurrentProcess();
+
+ Result result = Owner.MemoryManager.BorrowCodeMemory(_pageList, address, size);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ Owner.CpuMemory.Fill(address, size, 0xff);
+ Owner.IncrementReferenceCount();
+
+ _address = address;
+ _isMapped = false;
+ _isOwnerMapped = false;
+
+ return Result.Success;
+ }
+
+ public Result Map(ulong address, ulong size, KMemoryPermission perm)
+ {
+ if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, (ulong)KPageTableBase.PageSize))
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ lock (_lock)
+ {
+ if (_isMapped)
+ {
+ return KernelResult.InvalidState;
+ }
+
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ Result result = process.MemoryManager.MapPages(address, _pageList, MemoryState.CodeWritable, KMemoryPermission.ReadAndWrite);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ _isMapped = true;
+ }
+
+ return Result.Success;
+ }
+
+ public Result MapToOwner(ulong address, ulong size, KMemoryPermission permission)
+ {
+ if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, (ulong)KPageTableBase.PageSize))
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ lock (_lock)
+ {
+ if (_isOwnerMapped)
+ {
+ return KernelResult.InvalidState;
+ }
+
+ Debug.Assert(permission == KMemoryPermission.Read || permission == KMemoryPermission.ReadAndExecute);
+
+ Result result = Owner.MemoryManager.MapPages(address, _pageList, MemoryState.CodeReadOnly, permission);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ _isOwnerMapped = true;
+ }
+
+ return Result.Success;
+ }
+
+ public Result Unmap(ulong address, ulong size)
+ {
+ if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, (ulong)KPageTableBase.PageSize))
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ lock (_lock)
+ {
+ KProcess process = KernelStatic.GetCurrentProcess();
+
+ Result result = process.MemoryManager.UnmapPages(address, _pageList, MemoryState.CodeWritable);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ Debug.Assert(_isMapped);
+
+ _isMapped = false;
+ }
+
+ return Result.Success;
+ }
+
+ public Result UnmapFromOwner(ulong address, ulong size)
+ {
+ if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, KPageTableBase.PageSize))
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ lock (_lock)
+ {
+ Result result = Owner.MemoryManager.UnmapPages(address, _pageList, MemoryState.CodeReadOnly);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ Debug.Assert(_isOwnerMapped);
+
+ _isOwnerMapped = false;
+ }
+
+ return Result.Success;
+ }
+
+ protected override void Destroy()
+ {
+ if (!_isMapped && !_isOwnerMapped)
+ {
+ ulong size = _pageList.GetPagesCount() * KPageTableBase.PageSize;
+
+ if (Owner.MemoryManager.UnborrowCodeMemory(_address, size, _pageList) != Result.Success)
+ {
+ throw new InvalidOperationException("Unexpected failure restoring transfer memory attributes.");
+ }
+ }
+
+ Owner.DecrementReferenceCount();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs
new file mode 100644
index 00000000..e082105b
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs
@@ -0,0 +1,156 @@
+using Ryujinx.Common.Collections;
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KMemoryBlock : IntrusiveRedBlackTreeNode<KMemoryBlock>, IComparable<KMemoryBlock>, IComparable<ulong>
+ {
+ public ulong BaseAddress { get; private set; }
+ public ulong PagesCount { get; private set; }
+
+ public MemoryState State { get; private set; }
+ public KMemoryPermission Permission { get; private set; }
+ public MemoryAttribute Attribute { get; private set; }
+ public KMemoryPermission SourcePermission { get; private set; }
+
+ public int IpcRefCount { get; private set; }
+ public int DeviceRefCount { get; private set; }
+
+ public KMemoryBlock(
+ ulong baseAddress,
+ ulong pagesCount,
+ MemoryState state,
+ KMemoryPermission permission,
+ MemoryAttribute attribute,
+ int ipcRefCount = 0,
+ int deviceRefCount = 0)
+ {
+ BaseAddress = baseAddress;
+ PagesCount = pagesCount;
+ State = state;
+ Attribute = attribute;
+ Permission = permission;
+ IpcRefCount = ipcRefCount;
+ DeviceRefCount = deviceRefCount;
+ }
+
+ public void SetState(KMemoryPermission permission, MemoryState state, MemoryAttribute attribute)
+ {
+ Permission = permission;
+ State = state;
+ Attribute &= MemoryAttribute.IpcAndDeviceMapped;
+ Attribute |= attribute;
+ }
+
+ public void SetIpcMappingPermission(KMemoryPermission newPermission)
+ {
+ int oldIpcRefCount = IpcRefCount++;
+
+ if ((ushort)IpcRefCount == 0)
+ {
+ throw new InvalidOperationException("IPC reference count increment overflowed.");
+ }
+
+ if (oldIpcRefCount == 0)
+ {
+ SourcePermission = Permission;
+
+ Permission &= ~KMemoryPermission.ReadAndWrite;
+ Permission |= KMemoryPermission.ReadAndWrite & newPermission;
+ }
+
+ Attribute |= MemoryAttribute.IpcMapped;
+ }
+
+ public void RestoreIpcMappingPermission()
+ {
+ int oldIpcRefCount = IpcRefCount--;
+
+ if (oldIpcRefCount == 0)
+ {
+ throw new InvalidOperationException("IPC reference count decrement underflowed.");
+ }
+
+ if (oldIpcRefCount == 1)
+ {
+ Permission = SourcePermission;
+
+ SourcePermission = KMemoryPermission.None;
+
+ Attribute &= ~MemoryAttribute.IpcMapped;
+ }
+ }
+
+ public KMemoryBlock SplitRightAtAddress(ulong address)
+ {
+ ulong leftAddress = BaseAddress;
+
+ ulong leftPagesCount = (address - leftAddress) / KPageTableBase.PageSize;
+
+ BaseAddress = address;
+
+ PagesCount -= leftPagesCount;
+
+ return new KMemoryBlock(
+ leftAddress,
+ leftPagesCount,
+ State,
+ Permission,
+ Attribute,
+ IpcRefCount,
+ DeviceRefCount);
+ }
+
+ public void AddPages(ulong pagesCount)
+ {
+ PagesCount += pagesCount;
+ }
+
+ public KMemoryInfo GetInfo()
+ {
+ ulong size = PagesCount * KPageTableBase.PageSize;
+
+ return new KMemoryInfo(
+ BaseAddress,
+ size,
+ State,
+ Permission,
+ Attribute,
+ SourcePermission,
+ IpcRefCount,
+ DeviceRefCount);
+ }
+
+ public int CompareTo(KMemoryBlock other)
+ {
+ if (BaseAddress < other.BaseAddress)
+ {
+ return -1;
+ }
+ else if (BaseAddress <= other.BaseAddress + other.PagesCount * KPageTableBase.PageSize - 1UL)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+ }
+
+ public int CompareTo(ulong address)
+ {
+ if (address < BaseAddress)
+ {
+ return 1;
+ }
+ else if (address <= BaseAddress + PagesCount * KPageTableBase.PageSize - 1UL)
+ {
+ return 0;
+ }
+ else
+ {
+ return -1;
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockManager.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockManager.cs
new file mode 100644
index 00000000..e9146aeb
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockManager.cs
@@ -0,0 +1,288 @@
+using Ryujinx.Common.Collections;
+using Ryujinx.Horizon.Common;
+using System.Diagnostics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KMemoryBlockManager
+ {
+ private const int PageSize = KPageTableBase.PageSize;
+
+ private readonly IntrusiveRedBlackTree<KMemoryBlock> _blockTree;
+
+ public int BlocksCount => _blockTree.Count;
+
+ private KMemoryBlockSlabManager _slabManager;
+
+ private ulong _addrSpaceStart;
+ private ulong _addrSpaceEnd;
+
+ public KMemoryBlockManager()
+ {
+ _blockTree = new IntrusiveRedBlackTree<KMemoryBlock>();
+ }
+
+ public Result Initialize(ulong addrSpaceStart, ulong addrSpaceEnd, KMemoryBlockSlabManager slabManager)
+ {
+ _slabManager = slabManager;
+ _addrSpaceStart = addrSpaceStart;
+ _addrSpaceEnd = addrSpaceEnd;
+
+ // First insertion will always need only a single block, because there's nothing to split.
+ if (!slabManager.CanAllocate(1))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong addrSpacePagesCount = (addrSpaceEnd - addrSpaceStart) / PageSize;
+
+ _blockTree.Add(new KMemoryBlock(
+ addrSpaceStart,
+ addrSpacePagesCount,
+ MemoryState.Unmapped,
+ KMemoryPermission.None,
+ MemoryAttribute.None));
+
+ return Result.Success;
+ }
+
+ public void InsertBlock(
+ ulong baseAddress,
+ ulong pagesCount,
+ MemoryState oldState,
+ KMemoryPermission oldPermission,
+ MemoryAttribute oldAttribute,
+ MemoryState newState,
+ KMemoryPermission newPermission,
+ MemoryAttribute newAttribute)
+ {
+ // Insert new block on the list only on areas where the state
+ // of the block matches the state specified on the old* state
+ // arguments, otherwise leave it as is.
+
+ int oldCount = _blockTree.Count;
+
+ oldAttribute |= MemoryAttribute.IpcAndDeviceMapped;
+
+ ulong endAddr = baseAddress + pagesCount * PageSize;
+
+ KMemoryBlock currBlock = FindBlock(baseAddress);
+
+ while (currBlock != null)
+ {
+ ulong currBaseAddr = currBlock.BaseAddress;
+ ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr;
+
+ if (baseAddress < currEndAddr && currBaseAddr < endAddr)
+ {
+ MemoryAttribute currBlockAttr = currBlock.Attribute | MemoryAttribute.IpcAndDeviceMapped;
+
+ if (currBlock.State != oldState ||
+ currBlock.Permission != oldPermission ||
+ currBlockAttr != oldAttribute)
+ {
+ currBlock = currBlock.Successor;
+
+ continue;
+ }
+
+ if (baseAddress > currBaseAddr)
+ {
+ KMemoryBlock newBlock = currBlock.SplitRightAtAddress(baseAddress);
+ _blockTree.Add(newBlock);
+ }
+
+ if (endAddr < currEndAddr)
+ {
+ KMemoryBlock newBlock = currBlock.SplitRightAtAddress(endAddr);
+ _blockTree.Add(newBlock);
+ currBlock = newBlock;
+ }
+
+ currBlock.SetState(newPermission, newState, newAttribute);
+
+ currBlock = MergeEqualStateNeighbors(currBlock);
+ }
+
+ if (currEndAddr - 1 >= endAddr - 1)
+ {
+ break;
+ }
+
+ currBlock = currBlock.Successor;
+ }
+
+ _slabManager.Count += _blockTree.Count - oldCount;
+
+ ValidateInternalState();
+ }
+
+ public void InsertBlock(
+ ulong baseAddress,
+ ulong pagesCount,
+ MemoryState state,
+ KMemoryPermission permission = KMemoryPermission.None,
+ MemoryAttribute attribute = MemoryAttribute.None)
+ {
+ // Inserts new block at the list, replacing and splitting
+ // existing blocks as needed.
+
+ int oldCount = _blockTree.Count;
+
+ ulong endAddr = baseAddress + pagesCount * PageSize;
+
+ KMemoryBlock currBlock = FindBlock(baseAddress);
+
+ while (currBlock != null)
+ {
+ ulong currBaseAddr = currBlock.BaseAddress;
+ ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr;
+
+ if (baseAddress < currEndAddr && currBaseAddr < endAddr)
+ {
+ if (baseAddress > currBaseAddr)
+ {
+ KMemoryBlock newBlock = currBlock.SplitRightAtAddress(baseAddress);
+ _blockTree.Add(newBlock);
+ }
+
+ if (endAddr < currEndAddr)
+ {
+ KMemoryBlock newBlock = currBlock.SplitRightAtAddress(endAddr);
+ _blockTree.Add(newBlock);
+ currBlock = newBlock;
+ }
+
+ currBlock.SetState(permission, state, attribute);
+
+ currBlock = MergeEqualStateNeighbors(currBlock);
+ }
+
+ if (currEndAddr - 1 >= endAddr - 1)
+ {
+ break;
+ }
+
+ currBlock = currBlock.Successor;
+ }
+
+ _slabManager.Count += _blockTree.Count - oldCount;
+
+ ValidateInternalState();
+ }
+
+ public delegate void BlockMutator(KMemoryBlock block, KMemoryPermission newPerm);
+
+ public void InsertBlock(
+ ulong baseAddress,
+ ulong pagesCount,
+ BlockMutator blockMutate,
+ KMemoryPermission permission = KMemoryPermission.None)
+ {
+ // Inserts new block at the list, replacing and splitting
+ // existing blocks as needed, then calling the callback
+ // function on the new block.
+
+ int oldCount = _blockTree.Count;
+
+ ulong endAddr = baseAddress + pagesCount * PageSize;
+
+ KMemoryBlock currBlock = FindBlock(baseAddress);
+
+ while (currBlock != null)
+ {
+ ulong currBaseAddr = currBlock.BaseAddress;
+ ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr;
+
+ if (baseAddress < currEndAddr && currBaseAddr < endAddr)
+ {
+ if (baseAddress > currBaseAddr)
+ {
+ KMemoryBlock newBlock = currBlock.SplitRightAtAddress(baseAddress);
+ _blockTree.Add(newBlock);
+ }
+
+ if (endAddr < currEndAddr)
+ {
+ KMemoryBlock newBlock = currBlock.SplitRightAtAddress(endAddr);
+ _blockTree.Add(newBlock);
+ currBlock = newBlock;
+ }
+
+ blockMutate(currBlock, permission);
+
+ currBlock = MergeEqualStateNeighbors(currBlock);
+ }
+
+ if (currEndAddr - 1 >= endAddr - 1)
+ {
+ break;
+ }
+
+ currBlock = currBlock.Successor;
+ }
+
+ _slabManager.Count += _blockTree.Count - oldCount;
+
+ ValidateInternalState();
+ }
+
+ [Conditional("DEBUG")]
+ private void ValidateInternalState()
+ {
+ ulong expectedAddress = 0;
+
+ KMemoryBlock currBlock = FindBlock(_addrSpaceStart);
+
+ while (currBlock != null)
+ {
+ Debug.Assert(currBlock.BaseAddress == expectedAddress);
+
+ expectedAddress = currBlock.BaseAddress + currBlock.PagesCount * PageSize;
+
+ currBlock = currBlock.Successor;
+ }
+
+ Debug.Assert(expectedAddress == _addrSpaceEnd);
+ }
+
+ private KMemoryBlock MergeEqualStateNeighbors(KMemoryBlock block)
+ {
+ KMemoryBlock previousBlock = block.Predecessor;
+ KMemoryBlock nextBlock = block.Successor;
+
+ if (previousBlock != null && BlockStateEquals(block, previousBlock))
+ {
+ _blockTree.Remove(block);
+
+ previousBlock.AddPages(block.PagesCount);
+
+ block = previousBlock;
+ }
+
+ if (nextBlock != null && BlockStateEquals(block, nextBlock))
+ {
+ _blockTree.Remove(nextBlock);
+
+ block.AddPages(nextBlock.PagesCount);
+ }
+
+ return block;
+ }
+
+ private static bool BlockStateEquals(KMemoryBlock lhs, KMemoryBlock rhs)
+ {
+ return lhs.State == rhs.State &&
+ lhs.Permission == rhs.Permission &&
+ lhs.Attribute == rhs.Attribute &&
+ lhs.SourcePermission == rhs.SourcePermission &&
+ lhs.DeviceRefCount == rhs.DeviceRefCount &&
+ lhs.IpcRefCount == rhs.IpcRefCount;
+ }
+
+ public KMemoryBlock FindBlock(ulong address)
+ {
+ return _blockTree.GetNodeByKey(address);
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockSlabManager.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockSlabManager.cs
new file mode 100644
index 00000000..8732b507
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockSlabManager.cs
@@ -0,0 +1,19 @@
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KMemoryBlockSlabManager
+ {
+ private ulong _capacityElements;
+
+ public int Count { get; set; }
+
+ public KMemoryBlockSlabManager(ulong capacityElements)
+ {
+ _capacityElements = capacityElements;
+ }
+
+ public bool CanAllocate(int count)
+ {
+ return (ulong)(Count + count) <= _capacityElements;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryInfo.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryInfo.cs
new file mode 100644
index 00000000..af070ac2
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryInfo.cs
@@ -0,0 +1,36 @@
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KMemoryInfo
+ {
+ public ulong Address { get; }
+ public ulong Size { get; }
+
+ public MemoryState State { get; }
+ public KMemoryPermission Permission { get; }
+ public MemoryAttribute Attribute { get; }
+ public KMemoryPermission SourcePermission { get; }
+
+ public int IpcRefCount { get; }
+ public int DeviceRefCount { get; }
+
+ public KMemoryInfo(
+ ulong address,
+ ulong size,
+ MemoryState state,
+ KMemoryPermission permission,
+ MemoryAttribute attribute,
+ KMemoryPermission sourcePermission,
+ int ipcRefCount,
+ int deviceRefCount)
+ {
+ Address = address;
+ Size = size;
+ State = state;
+ Permission = permission;
+ Attribute = attribute;
+ SourcePermission = sourcePermission;
+ IpcRefCount = ipcRefCount;
+ DeviceRefCount = deviceRefCount;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryManager.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryManager.cs
new file mode 100644
index 00000000..6d0a1658
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryManager.cs
@@ -0,0 +1,65 @@
+using Ryujinx.HLE.HOS.Kernel.Common;
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KMemoryManager
+ {
+ public KMemoryRegionManager[] MemoryRegions { get; }
+
+ public KMemoryManager(MemorySize size, MemoryArrange arrange)
+ {
+ MemoryRegions = KernelInit.GetMemoryRegions(size, arrange);
+ }
+
+ private KMemoryRegionManager GetMemoryRegion(ulong address)
+ {
+ for (int i = 0; i < MemoryRegions.Length; i++)
+ {
+ var region = MemoryRegions[i];
+
+ if (address >= region.Address && address < region.EndAddr)
+ {
+ return region;
+ }
+ }
+
+ return null;
+ }
+
+ public void IncrementPagesReferenceCount(ulong address, ulong pagesCount)
+ {
+ IncrementOrDecrementPagesReferenceCount(address, pagesCount, true);
+ }
+
+ public void DecrementPagesReferenceCount(ulong address, ulong pagesCount)
+ {
+ IncrementOrDecrementPagesReferenceCount(address, pagesCount, false);
+ }
+
+ private void IncrementOrDecrementPagesReferenceCount(ulong address, ulong pagesCount, bool increment)
+ {
+ while (pagesCount != 0)
+ {
+ var region = GetMemoryRegion(address);
+
+ ulong countToProcess = Math.Min(pagesCount, region.GetPageOffsetFromEnd(address));
+
+ lock (region)
+ {
+ if (increment)
+ {
+ region.IncrementPagesReferenceCount(address, countToProcess);
+ }
+ else
+ {
+ region.DecrementPagesReferenceCount(address, countToProcess);
+ }
+ }
+
+ pagesCount -= countToProcess;
+ address += countToProcess * KPageTableBase.PageSize;
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs
new file mode 100644
index 00000000..4596b15d
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs
@@ -0,0 +1,242 @@
+using Ryujinx.Horizon.Common;
+using System.Diagnostics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KMemoryRegionManager
+ {
+ private readonly KPageHeap _pageHeap;
+
+ public ulong Address { get; }
+ public ulong Size { get; }
+ public ulong EndAddr => Address + Size;
+
+ private readonly ushort[] _pageReferenceCounts;
+
+ public KMemoryRegionManager(ulong address, ulong size, ulong endAddr)
+ {
+ Address = address;
+ Size = size;
+
+ _pageReferenceCounts = new ushort[size / KPageTableBase.PageSize];
+
+ _pageHeap = new KPageHeap(address, size);
+ _pageHeap.Free(address, size / KPageTableBase.PageSize);
+ _pageHeap.UpdateUsedSize();
+ }
+
+ public Result AllocatePages(out KPageList pageList, ulong pagesCount)
+ {
+ if (pagesCount == 0)
+ {
+ pageList = new KPageList();
+
+ return Result.Success;
+ }
+
+ lock (_pageHeap)
+ {
+ Result result = AllocatePagesImpl(out pageList, pagesCount, false);
+
+ if (result == Result.Success)
+ {
+ foreach (var node in pageList)
+ {
+ IncrementPagesReferenceCount(node.Address, node.PagesCount);
+ }
+ }
+
+ return result;
+ }
+ }
+
+ public ulong AllocatePagesContiguous(KernelContext context, ulong pagesCount, bool backwards)
+ {
+ if (pagesCount == 0)
+ {
+ return 0;
+ }
+
+ lock (_pageHeap)
+ {
+ ulong address = AllocatePagesContiguousImpl(pagesCount, 1, backwards);
+
+ if (address != 0)
+ {
+ IncrementPagesReferenceCount(address, pagesCount);
+ context.CommitMemory(address - DramMemoryMap.DramBase, pagesCount * KPageTableBase.PageSize);
+ }
+
+ return address;
+ }
+ }
+
+ private Result AllocatePagesImpl(out KPageList pageList, ulong pagesCount, bool random)
+ {
+ pageList = new KPageList();
+
+ int heapIndex = KPageHeap.GetBlockIndex(pagesCount);
+
+ if (heapIndex < 0)
+ {
+ return KernelResult.OutOfMemory;
+ }
+
+ for (int index = heapIndex; index >= 0; index--)
+ {
+ ulong pagesPerAlloc = KPageHeap.GetBlockPagesCount(index);
+
+ while (pagesCount >= pagesPerAlloc)
+ {
+ ulong allocatedBlock = _pageHeap.AllocateBlock(index, random);
+
+ if (allocatedBlock == 0)
+ {
+ break;
+ }
+
+ Result result = pageList.AddRange(allocatedBlock, pagesPerAlloc);
+
+ if (result != Result.Success)
+ {
+ FreePages(pageList);
+ _pageHeap.Free(allocatedBlock, pagesPerAlloc);
+
+ return result;
+ }
+
+ pagesCount -= pagesPerAlloc;
+ }
+ }
+
+ if (pagesCount != 0)
+ {
+ FreePages(pageList);
+
+ return KernelResult.OutOfMemory;
+ }
+
+ return Result.Success;
+ }
+
+ private ulong AllocatePagesContiguousImpl(ulong pagesCount, ulong alignPages, bool random)
+ {
+ int heapIndex = KPageHeap.GetAlignedBlockIndex(pagesCount, alignPages);
+
+ ulong allocatedBlock = _pageHeap.AllocateBlock(heapIndex, random);
+
+ if (allocatedBlock == 0)
+ {
+ return 0;
+ }
+
+ ulong allocatedPages = KPageHeap.GetBlockPagesCount(heapIndex);
+
+ if (allocatedPages > pagesCount)
+ {
+ _pageHeap.Free(allocatedBlock + pagesCount * KPageTableBase.PageSize, allocatedPages - pagesCount);
+ }
+
+ return allocatedBlock;
+ }
+
+ public void FreePage(ulong address)
+ {
+ lock (_pageHeap)
+ {
+ _pageHeap.Free(address, 1);
+ }
+ }
+
+ public void FreePages(KPageList pageList)
+ {
+ lock (_pageHeap)
+ {
+ foreach (KPageNode pageNode in pageList)
+ {
+ _pageHeap.Free(pageNode.Address, pageNode.PagesCount);
+ }
+ }
+ }
+
+ public void FreePages(ulong address, ulong pagesCount)
+ {
+ lock (_pageHeap)
+ {
+ _pageHeap.Free(address, pagesCount);
+ }
+ }
+
+ public ulong GetFreePages()
+ {
+ lock (_pageHeap)
+ {
+ return _pageHeap.GetFreePagesCount();
+ }
+ }
+
+ public void IncrementPagesReferenceCount(ulong address, ulong pagesCount)
+ {
+ ulong index = GetPageOffset(address);
+ ulong endIndex = index + pagesCount;
+
+ while (index < endIndex)
+ {
+ ushort referenceCount = ++_pageReferenceCounts[index];
+ Debug.Assert(referenceCount >= 1);
+
+ index++;
+ }
+ }
+
+ public void DecrementPagesReferenceCount(ulong address, ulong pagesCount)
+ {
+ ulong index = GetPageOffset(address);
+ ulong endIndex = index + pagesCount;
+
+ ulong freeBaseIndex = 0;
+ ulong freePagesCount = 0;
+
+ while (index < endIndex)
+ {
+ Debug.Assert(_pageReferenceCounts[index] > 0);
+ ushort referenceCount = --_pageReferenceCounts[index];
+
+ if (referenceCount == 0)
+ {
+ if (freePagesCount != 0)
+ {
+ freePagesCount++;
+ }
+ else
+ {
+ freeBaseIndex = index;
+ freePagesCount = 1;
+ }
+ }
+ else if (freePagesCount != 0)
+ {
+ FreePages(Address + freeBaseIndex * KPageTableBase.PageSize, freePagesCount);
+ freePagesCount = 0;
+ }
+
+ index++;
+ }
+
+ if (freePagesCount != 0)
+ {
+ FreePages(Address + freeBaseIndex * KPageTableBase.PageSize, freePagesCount);
+ }
+ }
+
+ public ulong GetPageOffset(ulong address)
+ {
+ return (address - Address) / KPageTableBase.PageSize;
+ }
+
+ public ulong GetPageOffsetFromEnd(ulong address)
+ {
+ return (EndAddr - address) / KPageTableBase.PageSize;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageBitmap.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageBitmap.cs
new file mode 100644
index 00000000..fa090b02
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageBitmap.cs
@@ -0,0 +1,298 @@
+using Ryujinx.Common;
+using System;
+using System.Numerics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KPageBitmap
+ {
+ private struct RandomNumberGenerator
+ {
+ private uint _entropy;
+ private uint _bitsAvailable;
+
+ private void RefreshEntropy()
+ {
+ _entropy = 0;
+ _bitsAvailable = sizeof(uint) * 8;
+ }
+
+ private bool GenerateRandomBit()
+ {
+ if (_bitsAvailable == 0)
+ {
+ RefreshEntropy();
+ }
+
+ bool bit = (_entropy & 1) != 0;
+
+ _entropy >>= 1;
+ _bitsAvailable--;
+
+ return bit;
+ }
+
+ public int SelectRandomBit(ulong bitmap)
+ {
+ int selected = 0;
+
+ int bitsCount = UInt64BitSize / 2;
+ ulong mask = (1UL << bitsCount) - 1;
+
+ while (bitsCount != 0)
+ {
+ ulong low = bitmap & mask;
+ ulong high = (bitmap >> bitsCount) & mask;
+
+ bool chooseLow;
+
+ if (high == 0)
+ {
+ chooseLow = true;
+ }
+ else if (low == 0)
+ {
+ chooseLow = false;
+ }
+ else
+ {
+ chooseLow = GenerateRandomBit();
+ }
+
+ if (chooseLow)
+ {
+ bitmap = low;
+ }
+ else
+ {
+ bitmap = high;
+ selected += bitsCount;
+ }
+
+ bitsCount /= 2;
+ mask >>= bitsCount;
+ }
+
+ return selected;
+ }
+ }
+
+ private const int UInt64BitSize = sizeof(ulong) * 8;
+ private const int MaxDepth = 4;
+
+ private readonly RandomNumberGenerator _rng;
+ private readonly ArraySegment<ulong>[] _bitStorages;
+ private int _usedDepths;
+
+ public int BitsCount { get; private set; }
+
+ public int HighestDepthIndex => _usedDepths - 1;
+
+ public KPageBitmap()
+ {
+ _rng = new RandomNumberGenerator();
+ _bitStorages = new ArraySegment<ulong>[MaxDepth];
+ }
+
+ public ArraySegment<ulong> Initialize(ArraySegment<ulong> storage, ulong size)
+ {
+ _usedDepths = GetRequiredDepth(size);
+
+ for (int depth = HighestDepthIndex; depth >= 0; depth--)
+ {
+ _bitStorages[depth] = storage;
+ size = BitUtils.DivRoundUp<ulong>(size, (ulong)UInt64BitSize);
+ storage = storage.Slice((int)size);
+ }
+
+ return storage;
+ }
+
+ public ulong FindFreeBlock(bool random)
+ {
+ ulong offset = 0;
+ int depth = 0;
+
+ if (random)
+ {
+ do
+ {
+ ulong v = _bitStorages[depth][(int)offset];
+
+ if (v == 0)
+ {
+ return ulong.MaxValue;
+ }
+
+ offset = offset * UInt64BitSize + (ulong)_rng.SelectRandomBit(v);
+ }
+ while (++depth < _usedDepths);
+ }
+ else
+ {
+ do
+ {
+ ulong v = _bitStorages[depth][(int)offset];
+
+ if (v == 0)
+ {
+ return ulong.MaxValue;
+ }
+
+ offset = offset * UInt64BitSize + (ulong)BitOperations.TrailingZeroCount(v);
+ }
+ while (++depth < _usedDepths);
+ }
+
+ return offset;
+ }
+
+ public void SetBit(ulong offset)
+ {
+ SetBit(HighestDepthIndex, offset);
+ BitsCount++;
+ }
+
+ public void ClearBit(ulong offset)
+ {
+ ClearBit(HighestDepthIndex, offset);
+ BitsCount--;
+ }
+
+ public bool ClearRange(ulong offset, int count)
+ {
+ int depth = HighestDepthIndex;
+ var bits = _bitStorages[depth];
+
+ int bitInd = (int)(offset / UInt64BitSize);
+
+ if (count < UInt64BitSize)
+ {
+ int shift = (int)(offset % UInt64BitSize);
+
+ ulong mask = ((1UL << count) - 1) << shift;
+
+ ulong v = bits[bitInd];
+
+ if ((v & mask) != mask)
+ {
+ return false;
+ }
+
+ v &= ~mask;
+ bits[bitInd] = v;
+
+ if (v == 0)
+ {
+ ClearBit(depth - 1, (ulong)bitInd);
+ }
+ }
+ else
+ {
+ int remaining = count;
+ int i = 0;
+
+ do
+ {
+ if (bits[bitInd + i++] != ulong.MaxValue)
+ {
+ return false;
+ }
+
+ remaining -= UInt64BitSize;
+ }
+ while (remaining > 0);
+
+ remaining = count;
+ i = 0;
+
+ do
+ {
+ bits[bitInd + i] = 0;
+ ClearBit(depth - 1, (ulong)(bitInd + i));
+ i++;
+ remaining -= UInt64BitSize;
+ }
+ while (remaining > 0);
+ }
+
+ BitsCount -= count;
+ return true;
+ }
+
+ private void SetBit(int depth, ulong offset)
+ {
+ while (depth >= 0)
+ {
+ int ind = (int)(offset / UInt64BitSize);
+ int which = (int)(offset % UInt64BitSize);
+
+ ulong mask = 1UL << which;
+
+ ulong v = _bitStorages[depth][ind];
+
+ _bitStorages[depth][ind] = v | mask;
+
+ if (v != 0)
+ {
+ break;
+ }
+
+ offset = (ulong)ind;
+ depth--;
+ }
+ }
+
+ private void ClearBit(int depth, ulong offset)
+ {
+ while (depth >= 0)
+ {
+ int ind = (int)(offset / UInt64BitSize);
+ int which = (int)(offset % UInt64BitSize);
+
+ ulong mask = 1UL << which;
+
+ ulong v = _bitStorages[depth][ind];
+
+ v &= ~mask;
+
+ _bitStorages[depth][ind] = v;
+
+ if (v != 0)
+ {
+ break;
+ }
+
+ offset = (ulong)ind;
+ depth--;
+ }
+ }
+
+ private static int GetRequiredDepth(ulong regionSize)
+ {
+ int depth = 0;
+
+ do
+ {
+ regionSize /= UInt64BitSize;
+ depth++;
+ }
+ while (regionSize != 0);
+
+ return depth;
+ }
+
+ public static int CalculateManagementOverheadSize(ulong regionSize)
+ {
+ int overheadBits = 0;
+
+ for (int depth = GetRequiredDepth(regionSize) - 1; depth >= 0; depth--)
+ {
+ regionSize = BitUtils.DivRoundUp<ulong>(regionSize, UInt64BitSize);
+ overheadBits += (int)regionSize;
+ }
+
+ return overheadBits * sizeof(ulong);
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageHeap.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageHeap.cs
new file mode 100644
index 00000000..c3586ed7
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageHeap.cs
@@ -0,0 +1,283 @@
+using Ryujinx.Common;
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KPageHeap
+ {
+ private class Block
+ {
+ private KPageBitmap _bitmap = new KPageBitmap();
+ private ulong _heapAddress;
+ private ulong _endOffset;
+
+ public int Shift { get; private set; }
+ public int NextShift { get; private set; }
+ public ulong Size => 1UL << Shift;
+ public int PagesCount => (int)(Size / KPageTableBase.PageSize);
+ public int FreeBlocksCount => _bitmap.BitsCount;
+ public int FreePagesCount => FreeBlocksCount * PagesCount;
+
+ public ArraySegment<ulong> Initialize(ulong address, ulong size, int blockShift, int nextBlockShift, ArraySegment<ulong> bitStorage)
+ {
+ Shift = blockShift;
+ NextShift = nextBlockShift;
+
+ ulong endAddress = address + size;
+
+ ulong align = nextBlockShift != 0
+ ? 1UL << nextBlockShift
+ : 1UL << blockShift;
+
+ address = BitUtils.AlignDown(address, align);
+ endAddress = BitUtils.AlignUp (endAddress, align);
+
+ _heapAddress = address;
+ _endOffset = (endAddress - address) / (1UL << blockShift);
+
+ return _bitmap.Initialize(bitStorage, _endOffset);
+ }
+
+ public ulong PushBlock(ulong address)
+ {
+ ulong offset = (address - _heapAddress) >> Shift;
+
+ _bitmap.SetBit(offset);
+
+ if (NextShift != 0)
+ {
+ int diff = 1 << (NextShift - Shift);
+
+ offset = BitUtils.AlignDown(offset, (ulong)diff);
+
+ if (_bitmap.ClearRange(offset, diff))
+ {
+ return _heapAddress + (offset << Shift);
+ }
+ }
+
+ return 0;
+ }
+
+ public ulong PopBlock(bool random)
+ {
+ long sOffset = (long)_bitmap.FindFreeBlock(random);
+
+ if (sOffset < 0L)
+ {
+ return 0;
+ }
+
+ ulong offset = (ulong)sOffset;
+
+ _bitmap.ClearBit(offset);
+
+ return _heapAddress + (offset << Shift);
+ }
+
+ public static int CalculateManagementOverheadSize(ulong regionSize, int currBlockShift, int nextBlockShift)
+ {
+ ulong currBlockSize = 1UL << currBlockShift;
+ ulong nextBlockSize = 1UL << nextBlockShift;
+ ulong align = nextBlockShift != 0 ? nextBlockSize : currBlockSize;
+ return KPageBitmap.CalculateManagementOverheadSize((align * 2 + BitUtils.AlignUp(regionSize, align)) / currBlockSize);
+ }
+ }
+
+ private static readonly int[] _memoryBlockPageShifts = new int[] { 12, 16, 21, 22, 25, 29, 30 };
+
+ private readonly ulong _heapAddress;
+ private readonly ulong _heapSize;
+ private ulong _usedSize;
+ private readonly int _blocksCount;
+ private readonly Block[] _blocks;
+
+ public KPageHeap(ulong address, ulong size) : this(address, size, _memoryBlockPageShifts)
+ {
+ }
+
+ public KPageHeap(ulong address, ulong size, int[] blockShifts)
+ {
+ _heapAddress = address;
+ _heapSize = size;
+ _blocksCount = blockShifts.Length;
+ _blocks = new Block[_memoryBlockPageShifts.Length];
+
+ var currBitmapStorage = new ArraySegment<ulong>(new ulong[CalculateManagementOverheadSize(size, blockShifts)]);
+
+ for (int i = 0; i < blockShifts.Length; i++)
+ {
+ int currBlockShift = blockShifts[i];
+ int nextBlockShift = i != blockShifts.Length - 1 ? blockShifts[i + 1] : 0;
+
+ _blocks[i] = new Block();
+
+ currBitmapStorage = _blocks[i].Initialize(address, size, currBlockShift, nextBlockShift, currBitmapStorage);
+ }
+ }
+
+ public void UpdateUsedSize()
+ {
+ _usedSize = _heapSize - (GetFreePagesCount() * KPageTableBase.PageSize);
+ }
+
+ public ulong GetFreePagesCount()
+ {
+ ulong freeCount = 0;
+
+ for (int i = 0; i < _blocksCount; i++)
+ {
+ freeCount += (ulong)_blocks[i].FreePagesCount;
+ }
+
+ return freeCount;
+ }
+
+ public ulong AllocateBlock(int index, bool random)
+ {
+ ulong neededSize = _blocks[index].Size;
+
+ for (int i = index; i < _blocksCount; i++)
+ {
+ ulong address = _blocks[i].PopBlock(random);
+
+ if (address != 0)
+ {
+ ulong allocatedSize = _blocks[i].Size;
+
+ if (allocatedSize > neededSize)
+ {
+ Free(address + neededSize, (allocatedSize - neededSize) / KPageTableBase.PageSize);
+ }
+
+ return address;
+ }
+ }
+
+ return 0;
+ }
+
+ private void FreeBlock(ulong block, int index)
+ {
+ do
+ {
+ block = _blocks[index++].PushBlock(block);
+ }
+ while (block != 0);
+ }
+
+ public void Free(ulong address, ulong pagesCount)
+ {
+ if (pagesCount == 0)
+ {
+ return;
+ }
+
+ int bigIndex = _blocksCount - 1;
+
+ ulong start = address;
+ ulong end = address + pagesCount * KPageTableBase.PageSize;
+ ulong beforeStart = start;
+ ulong beforeEnd = start;
+ ulong afterStart = end;
+ ulong afterEnd = end;
+
+ while (bigIndex >= 0)
+ {
+ ulong blockSize = _blocks[bigIndex].Size;
+
+ ulong bigStart = BitUtils.AlignUp (start, blockSize);
+ ulong bigEnd = BitUtils.AlignDown(end, blockSize);
+
+ if (bigStart < bigEnd)
+ {
+ for (ulong block = bigStart; block < bigEnd; block += blockSize)
+ {
+ FreeBlock(block, bigIndex);
+ }
+
+ beforeEnd = bigStart;
+ afterStart = bigEnd;
+
+ break;
+ }
+
+ bigIndex--;
+ }
+
+ for (int i = bigIndex - 1; i >= 0; i--)
+ {
+ ulong blockSize = _blocks[i].Size;
+
+ while (beforeStart + blockSize <= beforeEnd)
+ {
+ beforeEnd -= blockSize;
+ FreeBlock(beforeEnd, i);
+ }
+ }
+
+ for (int i = bigIndex - 1; i >= 0; i--)
+ {
+ ulong blockSize = _blocks[i].Size;
+
+ while (afterStart + blockSize <= afterEnd)
+ {
+ FreeBlock(afterStart, i);
+ afterStart += blockSize;
+ }
+ }
+ }
+
+ public static int GetAlignedBlockIndex(ulong pagesCount, ulong alignPages)
+ {
+ ulong targetPages = Math.Max(pagesCount, alignPages);
+
+ for (int i = 0; i < _memoryBlockPageShifts.Length; i++)
+ {
+ if (targetPages <= GetBlockPagesCount(i))
+ {
+ return i;
+ }
+ }
+
+ return -1;
+ }
+
+ public static int GetBlockIndex(ulong pagesCount)
+ {
+ for (int i = _memoryBlockPageShifts.Length - 1; i >= 0; i--)
+ {
+ if (pagesCount >= GetBlockPagesCount(i))
+ {
+ return i;
+ }
+ }
+
+ return -1;
+ }
+
+ public static ulong GetBlockSize(int index)
+ {
+ return 1UL << _memoryBlockPageShifts[index];
+ }
+
+ public static ulong GetBlockPagesCount(int index)
+ {
+ return GetBlockSize(index) / KPageTableBase.PageSize;
+ }
+
+ private static int CalculateManagementOverheadSize(ulong regionSize, int[] blockShifts)
+ {
+ int overheadSize = 0;
+
+ for (int i = 0; i < blockShifts.Length; i++)
+ {
+ int currBlockShift = blockShifts[i];
+ int nextBlockShift = i != blockShifts.Length - 1 ? blockShifts[i + 1] : 0;
+ overheadSize += Block.CalculateManagementOverheadSize(regionSize, currBlockShift, nextBlockShift);
+ }
+
+ return BitUtils.AlignUp(overheadSize, KPageTableBase.PageSize);
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs
new file mode 100644
index 00000000..3149faa9
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs
@@ -0,0 +1,97 @@
+using Ryujinx.Horizon.Common;
+using System.Collections;
+using System.Collections.Generic;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KPageList : IEnumerable<KPageNode>
+ {
+ public LinkedList<KPageNode> Nodes { get; }
+
+ public KPageList()
+ {
+ Nodes = new LinkedList<KPageNode>();
+ }
+
+ public Result AddRange(ulong address, ulong pagesCount)
+ {
+ if (pagesCount != 0)
+ {
+ if (Nodes.Last != null)
+ {
+ KPageNode lastNode = Nodes.Last.Value;
+
+ if (lastNode.Address + lastNode.PagesCount * KPageTableBase.PageSize == address)
+ {
+ address = lastNode.Address;
+ pagesCount += lastNode.PagesCount;
+
+ Nodes.RemoveLast();
+ }
+ }
+
+ Nodes.AddLast(new KPageNode(address, pagesCount));
+ }
+
+ return Result.Success;
+ }
+
+ public ulong GetPagesCount()
+ {
+ ulong sum = 0;
+
+ foreach (KPageNode node in Nodes)
+ {
+ sum += node.PagesCount;
+ }
+
+ return sum;
+ }
+
+ public bool IsEqual(KPageList other)
+ {
+ LinkedListNode<KPageNode> thisNode = Nodes.First;
+ LinkedListNode<KPageNode> otherNode = other.Nodes.First;
+
+ while (thisNode != null && otherNode != null)
+ {
+ if (thisNode.Value.Address != otherNode.Value.Address ||
+ thisNode.Value.PagesCount != otherNode.Value.PagesCount)
+ {
+ return false;
+ }
+
+ thisNode = thisNode.Next;
+ otherNode = otherNode.Next;
+ }
+
+ return thisNode == null && otherNode == null;
+ }
+
+ public void IncrementPagesReferenceCount(KMemoryManager manager)
+ {
+ foreach (var node in this)
+ {
+ manager.IncrementPagesReferenceCount(node.Address, node.PagesCount);
+ }
+ }
+
+ public void DecrementPagesReferenceCount(KMemoryManager manager)
+ {
+ foreach (var node in this)
+ {
+ manager.DecrementPagesReferenceCount(node.Address, node.PagesCount);
+ }
+ }
+
+ public IEnumerator<KPageNode> GetEnumerator()
+ {
+ return Nodes.GetEnumerator();
+ }
+
+ IEnumerator IEnumerable.GetEnumerator()
+ {
+ return GetEnumerator();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageNode.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageNode.cs
new file mode 100644
index 00000000..ada41687
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageNode.cs
@@ -0,0 +1,14 @@
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ struct KPageNode
+ {
+ public ulong Address;
+ public ulong PagesCount;
+
+ public KPageNode(ulong address, ulong pagesCount)
+ {
+ Address = address;
+ PagesCount = pagesCount;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs
new file mode 100644
index 00000000..28e9f90a
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs
@@ -0,0 +1,229 @@
+using Ryujinx.Horizon.Common;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Range;
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KPageTable : KPageTableBase
+ {
+ private readonly IVirtualMemoryManager _cpuMemory;
+
+ protected override bool Supports4KBPages => _cpuMemory.Supports4KBPages;
+
+ public KPageTable(KernelContext context, IVirtualMemoryManager cpuMemory) : base(context)
+ {
+ _cpuMemory = cpuMemory;
+ }
+
+ /// <inheritdoc/>
+ protected override IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size)
+ {
+ return _cpuMemory.GetHostRegions(va, size);
+ }
+
+ /// <inheritdoc/>
+ protected override void GetPhysicalRegions(ulong va, ulong size, KPageList pageList)
+ {
+ var ranges = _cpuMemory.GetPhysicalRegions(va, size);
+ foreach (var range in ranges)
+ {
+ pageList.AddRange(range.Address + DramMemoryMap.DramBase, range.Size / PageSize);
+ }
+ }
+
+ /// <inheritdoc/>
+ protected override ReadOnlySpan<byte> GetSpan(ulong va, int size)
+ {
+ return _cpuMemory.GetSpan(va, size);
+ }
+
+ /// <inheritdoc/>
+ protected override Result MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission)
+ {
+ KPageList pageList = new KPageList();
+ GetPhysicalRegions(src, pagesCount * PageSize, pageList);
+
+ Result result = Reprotect(src, pagesCount, KMemoryPermission.None);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ result = MapPages(dst, pageList, newDstPermission, MemoryMapFlags.Private, false, 0);
+
+ if (result != Result.Success)
+ {
+ Result reprotectResult = Reprotect(src, pagesCount, oldSrcPermission);
+ Debug.Assert(reprotectResult == Result.Success);
+ }
+
+ return result;
+ }
+
+ /// <inheritdoc/>
+ protected override Result UnmapMemory(ulong dst, ulong src, ulong pagesCount, KMemoryPermission oldDstPermission, KMemoryPermission newSrcPermission)
+ {
+ ulong size = pagesCount * PageSize;
+
+ KPageList srcPageList = new KPageList();
+ KPageList dstPageList = new KPageList();
+
+ GetPhysicalRegions(src, size, srcPageList);
+ GetPhysicalRegions(dst, size, dstPageList);
+
+ if (!dstPageList.IsEqual(srcPageList))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ Result result = Unmap(dst, pagesCount);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ result = Reprotect(src, pagesCount, newSrcPermission);
+
+ if (result != Result.Success)
+ {
+ Result mapResult = MapPages(dst, dstPageList, oldDstPermission, MemoryMapFlags.Private, false, 0);
+ Debug.Assert(mapResult == Result.Success);
+ }
+
+ return result;
+ }
+
+ /// <inheritdoc/>
+ protected override Result MapPages(
+ ulong dstVa,
+ ulong pagesCount,
+ ulong srcPa,
+ KMemoryPermission permission,
+ MemoryMapFlags flags,
+ bool shouldFillPages,
+ byte fillValue)
+ {
+ ulong size = pagesCount * PageSize;
+
+ Context.CommitMemory(srcPa - DramMemoryMap.DramBase, size);
+
+ _cpuMemory.Map(dstVa, srcPa - DramMemoryMap.DramBase, size, flags);
+
+ if (DramMemoryMap.IsHeapPhysicalAddress(srcPa))
+ {
+ Context.MemoryManager.IncrementPagesReferenceCount(srcPa, pagesCount);
+ }
+
+ if (shouldFillPages)
+ {
+ _cpuMemory.Fill(dstVa, size, fillValue);
+ }
+
+ return Result.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override Result MapPages(
+ ulong address,
+ KPageList pageList,
+ KMemoryPermission permission,
+ MemoryMapFlags flags,
+ bool shouldFillPages,
+ byte fillValue)
+ {
+ using var scopedPageList = new KScopedPageList(Context.MemoryManager, pageList);
+
+ ulong currentVa = address;
+
+ foreach (var pageNode in pageList)
+ {
+ ulong addr = pageNode.Address - DramMemoryMap.DramBase;
+ ulong size = pageNode.PagesCount * PageSize;
+
+ Context.CommitMemory(addr, size);
+
+ _cpuMemory.Map(currentVa, addr, size, flags);
+
+ if (shouldFillPages)
+ {
+ _cpuMemory.Fill(currentVa, size, fillValue);
+ }
+
+ currentVa += size;
+ }
+
+ scopedPageList.SignalSuccess();
+
+ return Result.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override Result MapForeign(IEnumerable<HostMemoryRange> regions, ulong va, ulong size)
+ {
+ ulong offset = 0;
+
+ foreach (var region in regions)
+ {
+ _cpuMemory.MapForeign(va + offset, region.Address, region.Size);
+
+ offset += region.Size;
+ }
+
+ return Result.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override Result Unmap(ulong address, ulong pagesCount)
+ {
+ KPageList pagesToClose = new KPageList();
+
+ var regions = _cpuMemory.GetPhysicalRegions(address, pagesCount * PageSize);
+
+ foreach (var region in regions)
+ {
+ ulong pa = region.Address + DramMemoryMap.DramBase;
+ if (DramMemoryMap.IsHeapPhysicalAddress(pa))
+ {
+ pagesToClose.AddRange(pa, region.Size / PageSize);
+ }
+ }
+
+ _cpuMemory.Unmap(address, pagesCount * PageSize);
+
+ pagesToClose.DecrementPagesReferenceCount(Context.MemoryManager);
+
+ return Result.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override Result Reprotect(ulong address, ulong pagesCount, KMemoryPermission permission)
+ {
+ // TODO.
+ return Result.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override Result ReprotectWithAttributes(ulong address, ulong pagesCount, KMemoryPermission permission)
+ {
+ // TODO.
+ return Result.Success;
+ }
+
+ /// <inheritdoc/>
+ protected override void SignalMemoryTracking(ulong va, ulong size, bool write)
+ {
+ _cpuMemory.SignalMemoryTracking(va, size, write);
+ }
+
+ /// <inheritdoc/>
+ protected override void Write(ulong va, ReadOnlySpan<byte> data)
+ {
+ _cpuMemory.Write(va, data);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs
new file mode 100644
index 00000000..614eb527
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs
@@ -0,0 +1,3043 @@
+using Ryujinx.Common;
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.Horizon.Common;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Range;
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ abstract class KPageTableBase
+ {
+ private static readonly int[] MappingUnitSizes = new int[]
+ {
+ 0x1000,
+ 0x10000,
+ 0x200000,
+ 0x400000,
+ 0x2000000,
+ 0x40000000
+ };
+
+ public const int PageSize = 0x1000;
+
+ private const int KMemoryBlockSize = 0x40;
+
+ // We need 2 blocks for the case where a big block
+ // needs to be split in 2, plus one block that will be the new one inserted.
+ private const int MaxBlocksNeededForInsertion = 2;
+
+ protected readonly KernelContext Context;
+ protected virtual bool Supports4KBPages => true;
+
+ public ulong AddrSpaceStart { get; private set; }
+ public ulong AddrSpaceEnd { get; private set; }
+
+ public ulong CodeRegionStart { get; private set; }
+ public ulong CodeRegionEnd { get; private set; }
+
+ public ulong HeapRegionStart { get; private set; }
+ public ulong HeapRegionEnd { get; private set; }
+
+ private ulong _currentHeapAddr;
+
+ public ulong AliasRegionStart { get; private set; }
+ public ulong AliasRegionEnd { get; private set; }
+
+ public ulong StackRegionStart { get; private set; }
+ public ulong StackRegionEnd { get; private set; }
+
+ public ulong TlsIoRegionStart { get; private set; }
+ public ulong TlsIoRegionEnd { get; private set; }
+
+ private ulong _heapCapacity;
+
+ public ulong PhysicalMemoryUsage { get; private set; }
+
+ private readonly KMemoryBlockManager _blockManager;
+
+ private MemoryRegion _memRegion;
+
+ private bool _aslrDisabled;
+
+ public int AddrSpaceWidth { get; private set; }
+
+ private bool _isKernel;
+
+ private bool _aslrEnabled;
+
+ private KMemoryBlockSlabManager _slabManager;
+
+ private int _contextId;
+
+ private MersenneTwister _randomNumberGenerator;
+
+ private MemoryFillValue _heapFillValue;
+ private MemoryFillValue _ipcFillValue;
+
+ public KPageTableBase(KernelContext context)
+ {
+ Context = context;
+
+ _blockManager = new KMemoryBlockManager();
+
+ _isKernel = false;
+
+ _heapFillValue = MemoryFillValue.Zero;
+ _ipcFillValue = MemoryFillValue.Zero;
+ }
+
+ private static readonly int[] AddrSpaceSizes = new int[] { 32, 36, 32, 39 };
+
+ public Result InitializeForProcess(
+ AddressSpaceType addrSpaceType,
+ bool aslrEnabled,
+ bool aslrDisabled,
+ MemoryRegion memRegion,
+ ulong address,
+ ulong size,
+ KMemoryBlockSlabManager slabManager)
+ {
+ if ((uint)addrSpaceType > (uint)AddressSpaceType.Addr39Bits)
+ {
+ throw new ArgumentException(nameof(addrSpaceType));
+ }
+
+ _contextId = Context.ContextIdManager.GetId();
+
+ ulong addrSpaceBase = 0;
+ ulong addrSpaceSize = 1UL << AddrSpaceSizes[(int)addrSpaceType];
+
+ Result result = CreateUserAddressSpace(
+ addrSpaceType,
+ aslrEnabled,
+ aslrDisabled,
+ addrSpaceBase,
+ addrSpaceSize,
+ memRegion,
+ address,
+ size,
+ slabManager);
+
+ if (result != Result.Success)
+ {
+ Context.ContextIdManager.PutId(_contextId);
+ }
+
+ return result;
+ }
+
+ private class Region
+ {
+ public ulong Start;
+ public ulong End;
+ public ulong Size;
+ public ulong AslrOffset;
+ }
+
+ private Result CreateUserAddressSpace(
+ AddressSpaceType addrSpaceType,
+ bool aslrEnabled,
+ bool aslrDisabled,
+ ulong addrSpaceStart,
+ ulong addrSpaceEnd,
+ MemoryRegion memRegion,
+ ulong address,
+ ulong size,
+ KMemoryBlockSlabManager slabManager)
+ {
+ ulong endAddr = address + size;
+
+ Region aliasRegion = new Region();
+ Region heapRegion = new Region();
+ Region stackRegion = new Region();
+ Region tlsIoRegion = new Region();
+
+ ulong codeRegionSize;
+ ulong stackAndTlsIoStart;
+ ulong stackAndTlsIoEnd;
+ ulong baseAddress;
+
+ switch (addrSpaceType)
+ {
+ case AddressSpaceType.Addr32Bits:
+ aliasRegion.Size = 0x40000000;
+ heapRegion.Size = 0x40000000;
+ stackRegion.Size = 0;
+ tlsIoRegion.Size = 0;
+ CodeRegionStart = 0x200000;
+ codeRegionSize = 0x3fe00000;
+ stackAndTlsIoStart = 0x200000;
+ stackAndTlsIoEnd = 0x40000000;
+ baseAddress = 0x200000;
+ AddrSpaceWidth = 32;
+ break;
+
+ case AddressSpaceType.Addr36Bits:
+ aliasRegion.Size = 0x180000000;
+ heapRegion.Size = 0x180000000;
+ stackRegion.Size = 0;
+ tlsIoRegion.Size = 0;
+ CodeRegionStart = 0x8000000;
+ codeRegionSize = 0x78000000;
+ stackAndTlsIoStart = 0x8000000;
+ stackAndTlsIoEnd = 0x80000000;
+ baseAddress = 0x8000000;
+ AddrSpaceWidth = 36;
+ break;
+
+ case AddressSpaceType.Addr32BitsNoMap:
+ aliasRegion.Size = 0;
+ heapRegion.Size = 0x80000000;
+ stackRegion.Size = 0;
+ tlsIoRegion.Size = 0;
+ CodeRegionStart = 0x200000;
+ codeRegionSize = 0x3fe00000;
+ stackAndTlsIoStart = 0x200000;
+ stackAndTlsIoEnd = 0x40000000;
+ baseAddress = 0x200000;
+ AddrSpaceWidth = 32;
+ break;
+
+ case AddressSpaceType.Addr39Bits:
+ aliasRegion.Size = 0x1000000000;
+ heapRegion.Size = 0x180000000;
+ stackRegion.Size = 0x80000000;
+ tlsIoRegion.Size = 0x1000000000;
+ CodeRegionStart = BitUtils.AlignDown<ulong>(address, 0x200000);
+ codeRegionSize = BitUtils.AlignUp<ulong>(endAddr, 0x200000) - CodeRegionStart;
+ stackAndTlsIoStart = 0;
+ stackAndTlsIoEnd = 0;
+ baseAddress = 0x8000000;
+ AddrSpaceWidth = 39;
+ break;
+
+ default: throw new ArgumentException(nameof(addrSpaceType));
+ }
+
+ CodeRegionEnd = CodeRegionStart + codeRegionSize;
+
+ ulong mapBaseAddress;
+ ulong mapAvailableSize;
+
+ if (CodeRegionStart - baseAddress >= addrSpaceEnd - CodeRegionEnd)
+ {
+ // Has more space before the start of the code region.
+ mapBaseAddress = baseAddress;
+ mapAvailableSize = CodeRegionStart - baseAddress;
+ }
+ else
+ {
+ // Has more space after the end of the code region.
+ mapBaseAddress = CodeRegionEnd;
+ mapAvailableSize = addrSpaceEnd - CodeRegionEnd;
+ }
+
+ ulong mapTotalSize = aliasRegion.Size + heapRegion.Size + stackRegion.Size + tlsIoRegion.Size;
+
+ ulong aslrMaxOffset = mapAvailableSize - mapTotalSize;
+
+ _aslrEnabled = aslrEnabled;
+
+ AddrSpaceStart = addrSpaceStart;
+ AddrSpaceEnd = addrSpaceEnd;
+
+ _slabManager = slabManager;
+
+ if (mapAvailableSize < mapTotalSize)
+ {
+ return KernelResult.OutOfMemory;
+ }
+
+ if (aslrEnabled)
+ {
+ aliasRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21;
+ heapRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21;
+ stackRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21;
+ tlsIoRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21;
+ }
+
+ // Regions are sorted based on ASLR offset.
+ // When ASLR is disabled, the order is Map, Heap, NewMap and TlsIo.
+ aliasRegion.Start = mapBaseAddress + aliasRegion.AslrOffset;
+ aliasRegion.End = aliasRegion.Start + aliasRegion.Size;
+ heapRegion.Start = mapBaseAddress + heapRegion.AslrOffset;
+ heapRegion.End = heapRegion.Start + heapRegion.Size;
+ stackRegion.Start = mapBaseAddress + stackRegion.AslrOffset;
+ stackRegion.End = stackRegion.Start + stackRegion.Size;
+ tlsIoRegion.Start = mapBaseAddress + tlsIoRegion.AslrOffset;
+ tlsIoRegion.End = tlsIoRegion.Start + tlsIoRegion.Size;
+
+ SortRegion(heapRegion, aliasRegion);
+
+ if (stackRegion.Size != 0)
+ {
+ SortRegion(stackRegion, aliasRegion);
+ SortRegion(stackRegion, heapRegion);
+ }
+ else
+ {
+ stackRegion.Start = stackAndTlsIoStart;
+ stackRegion.End = stackAndTlsIoEnd;
+ }
+
+ if (tlsIoRegion.Size != 0)
+ {
+ SortRegion(tlsIoRegion, aliasRegion);
+ SortRegion(tlsIoRegion, heapRegion);
+ SortRegion(tlsIoRegion, stackRegion);
+ }
+ else
+ {
+ tlsIoRegion.Start = stackAndTlsIoStart;
+ tlsIoRegion.End = stackAndTlsIoEnd;
+ }
+
+ AliasRegionStart = aliasRegion.Start;
+ AliasRegionEnd = aliasRegion.End;
+ HeapRegionStart = heapRegion.Start;
+ HeapRegionEnd = heapRegion.End;
+ StackRegionStart = stackRegion.Start;
+ StackRegionEnd = stackRegion.End;
+ TlsIoRegionStart = tlsIoRegion.Start;
+ TlsIoRegionEnd = tlsIoRegion.End;
+
+ // TODO: Check kernel configuration via secure monitor call when implemented to set memory fill values.
+
+ _currentHeapAddr = HeapRegionStart;
+ _heapCapacity = 0;
+ PhysicalMemoryUsage = 0;
+
+ _memRegion = memRegion;
+ _aslrDisabled = aslrDisabled;
+
+ return _blockManager.Initialize(addrSpaceStart, addrSpaceEnd, slabManager);
+ }
+
+ private ulong GetRandomValue(ulong min, ulong max)
+ {
+ return (ulong)GetRandomValue((long)min, (long)max);
+ }
+
+ private long GetRandomValue(long min, long max)
+ {
+ if (_randomNumberGenerator == null)
+ {
+ _randomNumberGenerator = new MersenneTwister(0);
+ }
+
+ return _randomNumberGenerator.GenRandomNumber(min, max);
+ }
+
+ private static void SortRegion(Region lhs, Region rhs)
+ {
+ if (lhs.AslrOffset < rhs.AslrOffset)
+ {
+ rhs.Start += lhs.Size;
+ rhs.End += lhs.Size;
+ }
+ else
+ {
+ lhs.Start += rhs.Size;
+ lhs.End += rhs.Size;
+ }
+ }
+
+ public Result MapPages(ulong address, KPageList pageList, MemoryState state, KMemoryPermission permission)
+ {
+ ulong pagesCount = pageList.GetPagesCount();
+
+ ulong size = pagesCount * PageSize;
+
+ if (!CanContain(address, size, state))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (!IsUnmapped(address, pagesCount * PageSize))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ Result result = MapPages(address, pageList, permission, MemoryMapFlags.None);
+
+ if (result == Result.Success)
+ {
+ _blockManager.InsertBlock(address, pagesCount, state, permission);
+ }
+
+ return result;
+ }
+ }
+
+ public Result UnmapPages(ulong address, KPageList pageList, MemoryState stateExpected)
+ {
+ ulong pagesCount = pageList.GetPagesCount();
+ ulong size = pagesCount * PageSize;
+
+ ulong endAddr = address + size;
+
+ ulong addrSpacePagesCount = (AddrSpaceEnd - AddrSpaceStart) / PageSize;
+
+ if (AddrSpaceStart > address)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (addrSpacePagesCount < pagesCount)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (endAddr - 1 > AddrSpaceEnd - 1)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ KPageList currentPageList = new KPageList();
+
+ GetPhysicalRegions(address, size, currentPageList);
+
+ if (!currentPageList.IsEqual(pageList))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+
+ if (CheckRange(
+ address,
+ size,
+ MemoryState.Mask,
+ stateExpected,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState state,
+ out _,
+ out _))
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ Result result = Unmap(address, pagesCount);
+
+ if (result == Result.Success)
+ {
+ _blockManager.InsertBlock(address, pagesCount, MemoryState.Unmapped);
+ }
+
+ return result;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result MapNormalMemory(long address, long size, KMemoryPermission permission)
+ {
+ // TODO.
+ return Result.Success;
+ }
+
+ public Result MapIoMemory(long address, long size, KMemoryPermission permission)
+ {
+ // TODO.
+ return Result.Success;
+ }
+
+ public Result MapPages(
+ ulong pagesCount,
+ int alignment,
+ ulong srcPa,
+ bool paIsValid,
+ ulong regionStart,
+ ulong regionPagesCount,
+ MemoryState state,
+ KMemoryPermission permission,
+ out ulong address)
+ {
+ address = 0;
+
+ ulong regionSize = regionPagesCount * PageSize;
+
+ if (!CanContain(regionStart, regionSize, state))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (regionPagesCount <= pagesCount)
+ {
+ return KernelResult.OutOfMemory;
+ }
+
+ lock (_blockManager)
+ {
+ address = AllocateVa(regionStart, regionPagesCount, pagesCount, alignment);
+
+ if (address == 0)
+ {
+ return KernelResult.OutOfMemory;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ Result result;
+
+ if (paIsValid)
+ {
+ result = MapPages(address, pagesCount, srcPa, permission, MemoryMapFlags.Private);
+ }
+ else
+ {
+ result = AllocateAndMapPages(address, pagesCount, permission);
+ }
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ _blockManager.InsertBlock(address, pagesCount, state, permission);
+ }
+
+ return Result.Success;
+ }
+
+ public Result MapPages(ulong address, ulong pagesCount, MemoryState state, KMemoryPermission permission)
+ {
+ ulong size = pagesCount * PageSize;
+
+ if (!CanContain(address, size, state))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (!IsUnmapped(address, size))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ Result result = AllocateAndMapPages(address, pagesCount, permission);
+
+ if (result == Result.Success)
+ {
+ _blockManager.InsertBlock(address, pagesCount, state, permission);
+ }
+
+ return result;
+ }
+ }
+
+ private Result AllocateAndMapPages(ulong address, ulong pagesCount, KMemoryPermission permission)
+ {
+ KMemoryRegionManager region = GetMemoryRegionManager();
+
+ Result result = region.AllocatePages(out KPageList pageList, pagesCount);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ using var _ = new OnScopeExit(() => pageList.DecrementPagesReferenceCount(Context.MemoryManager));
+
+ return MapPages(address, pageList, permission, MemoryMapFlags.Private);
+ }
+
+ public Result MapProcessCodeMemory(ulong dst, ulong src, ulong size)
+ {
+ lock (_blockManager)
+ {
+ bool success = CheckRange(
+ src,
+ size,
+ MemoryState.Mask,
+ MemoryState.Heap,
+ KMemoryPermission.Mask,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState state,
+ out KMemoryPermission permission,
+ out _);
+
+ success &= IsUnmapped(dst, size);
+
+ if (success)
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion * 2))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ Result result = MapMemory(src, dst, pagesCount, permission, KMemoryPermission.None);
+
+ _blockManager.InsertBlock(src, pagesCount, state, KMemoryPermission.None, MemoryAttribute.Borrowed);
+ _blockManager.InsertBlock(dst, pagesCount, MemoryState.ModCodeStatic);
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result UnmapProcessCodeMemory(ulong dst, ulong src, ulong size)
+ {
+ lock (_blockManager)
+ {
+ bool success = CheckRange(
+ src,
+ size,
+ MemoryState.Mask,
+ MemoryState.Heap,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.Borrowed,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _);
+
+ success &= CheckRange(
+ dst,
+ PageSize,
+ MemoryState.UnmapProcessCodeMemoryAllowed,
+ MemoryState.UnmapProcessCodeMemoryAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState state,
+ out _,
+ out _);
+
+ success &= CheckRange(
+ dst,
+ size,
+ MemoryState.Mask,
+ state,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None);
+
+ if (success)
+ {
+ ulong pagesCount = size / PageSize;
+
+ Result result = Unmap(dst, pagesCount);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ // TODO: Missing some checks here.
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion * 2))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ _blockManager.InsertBlock(dst, pagesCount, MemoryState.Unmapped);
+ _blockManager.InsertBlock(src, pagesCount, MemoryState.Heap, KMemoryPermission.ReadAndWrite);
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result SetHeapSize(ulong size, out ulong address)
+ {
+ address = 0;
+
+ if (size > HeapRegionEnd - HeapRegionStart)
+ {
+ return KernelResult.OutOfMemory;
+ }
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ lock (_blockManager)
+ {
+ ulong currentHeapSize = GetHeapSize();
+
+ if (currentHeapSize <= size)
+ {
+ // Expand.
+ ulong sizeDelta = size - currentHeapSize;
+
+ if (currentProcess.ResourceLimit != null && sizeDelta != 0 &&
+ !currentProcess.ResourceLimit.Reserve(LimitableResource.Memory, sizeDelta))
+ {
+ return KernelResult.ResLimitExceeded;
+ }
+
+ ulong pagesCount = sizeDelta / PageSize;
+
+ KMemoryRegionManager region = GetMemoryRegionManager();
+
+ Result result = region.AllocatePages(out KPageList pageList, pagesCount);
+
+ using var _ = new OnScopeExit(() => pageList.DecrementPagesReferenceCount(Context.MemoryManager));
+
+ void CleanUpForError()
+ {
+ if (currentProcess.ResourceLimit != null && sizeDelta != 0)
+ {
+ currentProcess.ResourceLimit.Release(LimitableResource.Memory, sizeDelta);
+ }
+ }
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ CleanUpForError();
+
+ return KernelResult.OutOfResource;
+ }
+
+ if (!IsUnmapped(_currentHeapAddr, sizeDelta))
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidMemState;
+ }
+
+ result = MapPages(_currentHeapAddr, pageList, KMemoryPermission.ReadAndWrite, MemoryMapFlags.Private, true, (byte)_heapFillValue);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ _blockManager.InsertBlock(_currentHeapAddr, pagesCount, MemoryState.Heap, KMemoryPermission.ReadAndWrite);
+ }
+ else
+ {
+ // Shrink.
+ ulong freeAddr = HeapRegionStart + size;
+ ulong sizeDelta = currentHeapSize - size;
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ if (!CheckRange(
+ freeAddr,
+ sizeDelta,
+ MemoryState.Mask,
+ MemoryState.Heap,
+ KMemoryPermission.Mask,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong pagesCount = sizeDelta / PageSize;
+
+ Result result = Unmap(freeAddr, pagesCount);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ currentProcess.ResourceLimit?.Release(LimitableResource.Memory, sizeDelta);
+
+ _blockManager.InsertBlock(freeAddr, pagesCount, MemoryState.Unmapped);
+ }
+
+ _currentHeapAddr = HeapRegionStart + size;
+ }
+
+ address = HeapRegionStart;
+
+ return Result.Success;
+ }
+
+ public Result SetMemoryPermission(ulong address, ulong size, KMemoryPermission permission)
+ {
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ MemoryState.PermissionChangeAllowed,
+ MemoryState.PermissionChangeAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState oldState,
+ out KMemoryPermission oldPermission,
+ out _))
+ {
+ if (permission != oldPermission)
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ Result result = Reprotect(address, pagesCount, permission);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ _blockManager.InsertBlock(address, pagesCount, oldState, permission);
+ }
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public ulong GetTotalHeapSize()
+ {
+ lock (_blockManager)
+ {
+ return GetHeapSize() + PhysicalMemoryUsage;
+ }
+ }
+
+ private ulong GetHeapSize()
+ {
+ return _currentHeapAddr - HeapRegionStart;
+ }
+
+ public Result SetHeapCapacity(ulong capacity)
+ {
+ lock (_blockManager)
+ {
+ _heapCapacity = capacity;
+ }
+
+ return Result.Success;
+ }
+
+ public Result SetMemoryAttribute(
+ ulong address,
+ ulong size,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeValue)
+ {
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ MemoryState.AttributeChangeAllowed,
+ MemoryState.AttributeChangeAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.BorrowedAndIpcMapped,
+ MemoryAttribute.None,
+ MemoryAttribute.DeviceMappedAndUncached,
+ out MemoryState state,
+ out KMemoryPermission permission,
+ out MemoryAttribute attribute))
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ attribute &= ~attributeMask;
+ attribute |= attributeMask & attributeValue;
+
+ _blockManager.InsertBlock(address, pagesCount, state, permission, attribute);
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public KMemoryInfo QueryMemory(ulong address)
+ {
+ if (address >= AddrSpaceStart &&
+ address < AddrSpaceEnd)
+ {
+ lock (_blockManager)
+ {
+ return _blockManager.FindBlock(address).GetInfo();
+ }
+ }
+ else
+ {
+ return new KMemoryInfo(
+ AddrSpaceEnd,
+ ~AddrSpaceEnd + 1,
+ MemoryState.Reserved,
+ KMemoryPermission.None,
+ MemoryAttribute.None,
+ KMemoryPermission.None,
+ 0,
+ 0);
+ }
+ }
+
+ public Result Map(ulong dst, ulong src, ulong size)
+ {
+ bool success;
+
+ lock (_blockManager)
+ {
+ success = CheckRange(
+ src,
+ size,
+ MemoryState.MapAllowed,
+ MemoryState.MapAllowed,
+ KMemoryPermission.Mask,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState srcState,
+ out _,
+ out _);
+
+ success &= IsUnmapped(dst, size);
+
+ if (success)
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion * 2))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ Result result = MapMemory(src, dst, pagesCount, KMemoryPermission.ReadAndWrite, KMemoryPermission.ReadAndWrite);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ _blockManager.InsertBlock(src, pagesCount, srcState, KMemoryPermission.None, MemoryAttribute.Borrowed);
+ _blockManager.InsertBlock(dst, pagesCount, MemoryState.Stack, KMemoryPermission.ReadAndWrite);
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result UnmapForKernel(ulong address, ulong pagesCount, MemoryState stateExpected)
+ {
+ ulong size = pagesCount * PageSize;
+
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ MemoryState.Mask,
+ stateExpected,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _))
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ Result result = Unmap(address, pagesCount);
+
+ if (result == Result.Success)
+ {
+ _blockManager.InsertBlock(address, pagesCount, MemoryState.Unmapped);
+ }
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result Unmap(ulong dst, ulong src, ulong size)
+ {
+ bool success;
+
+ lock (_blockManager)
+ {
+ success = CheckRange(
+ src,
+ size,
+ MemoryState.MapAllowed,
+ MemoryState.MapAllowed,
+ KMemoryPermission.Mask,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.Borrowed,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState srcState,
+ out _,
+ out _);
+
+ success &= CheckRange(
+ dst,
+ size,
+ MemoryState.Mask,
+ MemoryState.Stack,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out KMemoryPermission dstPermission,
+ out _);
+
+ if (success)
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion * 2))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ Result result = UnmapMemory(dst, src, pagesCount, dstPermission, KMemoryPermission.ReadAndWrite);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ _blockManager.InsertBlock(src, pagesCount, srcState, KMemoryPermission.ReadAndWrite);
+ _blockManager.InsertBlock(dst, pagesCount, MemoryState.Unmapped);
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result UnmapProcessMemory(ulong dst, ulong size, KPageTableBase srcPageTable, ulong src)
+ {
+ lock (_blockManager)
+ {
+ lock (srcPageTable._blockManager)
+ {
+ bool success = CheckRange(
+ dst,
+ size,
+ MemoryState.Mask,
+ MemoryState.ProcessMemory,
+ KMemoryPermission.ReadAndWrite,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _);
+
+ success &= srcPageTable.CheckRange(
+ src,
+ size,
+ MemoryState.MapProcessAllowed,
+ MemoryState.MapProcessAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _);
+
+ if (!success)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ KPageList srcPageList = new KPageList();
+ KPageList dstPageList = new KPageList();
+
+ srcPageTable.GetPhysicalRegions(src, size, srcPageList);
+ GetPhysicalRegions(dst, size, dstPageList);
+
+ if (!dstPageList.IsEqual(srcPageList))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ Result result = Unmap(dst, pagesCount);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ _blockManager.InsertBlock(dst, pagesCount, MemoryState.Unmapped);
+
+ return Result.Success;
+ }
+ }
+
+ public Result SetProcessMemoryPermission(ulong address, ulong size, KMemoryPermission permission)
+ {
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ MemoryState.ProcessPermissionChangeAllowed,
+ MemoryState.ProcessPermissionChangeAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState oldState,
+ out KMemoryPermission oldPermission,
+ out _))
+ {
+ MemoryState newState = oldState;
+
+ // If writing into the code region is allowed, then we need
+ // to change it to mutable.
+ if ((permission & KMemoryPermission.Write) != 0)
+ {
+ if (oldState == MemoryState.CodeStatic)
+ {
+ newState = MemoryState.CodeMutable;
+ }
+ else if (oldState == MemoryState.ModCodeStatic)
+ {
+ newState = MemoryState.ModCodeMutable;
+ }
+ else
+ {
+ throw new InvalidOperationException($"Memory state \"{oldState}\" not valid for this operation.");
+ }
+ }
+
+ if (newState != oldState || permission != oldPermission)
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong pagesCount = size / PageSize;
+
+ Result result;
+
+ if ((oldPermission & KMemoryPermission.Execute) != 0)
+ {
+ result = ReprotectWithAttributes(address, pagesCount, permission);
+ }
+ else
+ {
+ result = Reprotect(address, pagesCount, permission);
+ }
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ _blockManager.InsertBlock(address, pagesCount, newState, permission);
+ }
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result MapPhysicalMemory(ulong address, ulong size)
+ {
+ ulong endAddr = address + size;
+
+ lock (_blockManager)
+ {
+ ulong mappedSize = 0;
+
+ foreach (KMemoryInfo info in IterateOverRange(address, endAddr))
+ {
+ if (info.State != MemoryState.Unmapped)
+ {
+ mappedSize += GetSizeInRange(info, address, endAddr);
+ }
+ }
+
+ if (mappedSize == size)
+ {
+ return Result.Success;
+ }
+
+ ulong remainingSize = size - mappedSize;
+
+ ulong remainingPages = remainingSize / PageSize;
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ if (currentProcess.ResourceLimit != null &&
+ !currentProcess.ResourceLimit.Reserve(LimitableResource.Memory, remainingSize))
+ {
+ return KernelResult.ResLimitExceeded;
+ }
+
+ KMemoryRegionManager region = GetMemoryRegionManager();
+
+ Result result = region.AllocatePages(out KPageList pageList, remainingPages);
+
+ using var _ = new OnScopeExit(() => pageList.DecrementPagesReferenceCount(Context.MemoryManager));
+
+ void CleanUpForError()
+ {
+ currentProcess.ResourceLimit?.Release(LimitableResource.Memory, remainingSize);
+ }
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ CleanUpForError();
+
+ return KernelResult.OutOfResource;
+ }
+
+ LinkedListNode<KPageNode> pageListNode = pageList.Nodes.First;
+
+ KPageNode pageNode = pageListNode.Value;
+
+ ulong srcPa = pageNode.Address;
+ ulong srcPaPages = pageNode.PagesCount;
+
+ foreach (KMemoryInfo info in IterateOverRange(address, endAddr))
+ {
+ if (info.State != MemoryState.Unmapped)
+ {
+ continue;
+ }
+
+ ulong blockSize = GetSizeInRange(info, address, endAddr);
+
+ ulong dstVaPages = blockSize / PageSize;
+
+ ulong dstVa = GetAddrInRange(info, address);
+
+ while (dstVaPages > 0)
+ {
+ if (srcPaPages == 0)
+ {
+ pageListNode = pageListNode.Next;
+
+ pageNode = pageListNode.Value;
+
+ srcPa = pageNode.Address;
+ srcPaPages = pageNode.PagesCount;
+ }
+
+ ulong currentPagesCount = Math.Min(srcPaPages, dstVaPages);
+
+ MapPages(dstVa, currentPagesCount, srcPa, KMemoryPermission.ReadAndWrite, MemoryMapFlags.Private);
+
+ dstVa += currentPagesCount * PageSize;
+ srcPa += currentPagesCount * PageSize;
+ srcPaPages -= currentPagesCount;
+ dstVaPages -= currentPagesCount;
+ }
+ }
+
+ PhysicalMemoryUsage += remainingSize;
+
+ ulong pagesCount = size / PageSize;
+
+ _blockManager.InsertBlock(
+ address,
+ pagesCount,
+ MemoryState.Unmapped,
+ KMemoryPermission.None,
+ MemoryAttribute.None,
+ MemoryState.Heap,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.None);
+ }
+
+ return Result.Success;
+ }
+
+ public Result UnmapPhysicalMemory(ulong address, ulong size)
+ {
+ ulong endAddr = address + size;
+
+ lock (_blockManager)
+ {
+ // Scan, ensure that the region can be unmapped (all blocks are heap or
+ // already unmapped), fill pages list for freeing memory.
+ ulong heapMappedSize = 0;
+
+ foreach (KMemoryInfo info in IterateOverRange(address, endAddr))
+ {
+ if (info.State == MemoryState.Heap)
+ {
+ if (info.Attribute != MemoryAttribute.None)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong blockSize = GetSizeInRange(info, address, endAddr);
+
+ heapMappedSize += blockSize;
+ }
+ else if (info.State != MemoryState.Unmapped)
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+
+ if (heapMappedSize == 0)
+ {
+ return Result.Success;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ // Try to unmap all the heap mapped memory inside range.
+ Result result = Result.Success;
+
+ foreach (KMemoryInfo info in IterateOverRange(address, endAddr))
+ {
+ if (info.State == MemoryState.Heap)
+ {
+ ulong blockSize = GetSizeInRange(info, address, endAddr);
+ ulong blockAddress = GetAddrInRange(info, address);
+
+ ulong blockPagesCount = blockSize / PageSize;
+
+ result = Unmap(blockAddress, blockPagesCount);
+
+ // The kernel would attempt to remap if this fails, but we don't because:
+ // - The implementation may not support remapping if memory aliasing is not supported on the platform.
+ // - Unmap can't ever fail here anyway.
+ Debug.Assert(result == Result.Success);
+ }
+ }
+
+ if (result == Result.Success)
+ {
+ PhysicalMemoryUsage -= heapMappedSize;
+
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ currentProcess.ResourceLimit?.Release(LimitableResource.Memory, heapMappedSize);
+
+ ulong pagesCount = size / PageSize;
+
+ _blockManager.InsertBlock(address, pagesCount, MemoryState.Unmapped);
+ }
+
+ return result;
+ }
+ }
+
+ public Result CopyDataToCurrentProcess(
+ ulong dst,
+ ulong size,
+ ulong src,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permission,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected)
+ {
+ // Client -> server.
+ return CopyDataFromOrToCurrentProcess(
+ size,
+ src,
+ dst,
+ stateMask,
+ stateExpected,
+ permission,
+ attributeMask,
+ attributeExpected,
+ toServer: true);
+ }
+
+ public Result CopyDataFromCurrentProcess(
+ ulong dst,
+ ulong size,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permission,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected,
+ ulong src)
+ {
+ // Server -> client.
+ return CopyDataFromOrToCurrentProcess(
+ size,
+ dst,
+ src,
+ stateMask,
+ stateExpected,
+ permission,
+ attributeMask,
+ attributeExpected,
+ toServer: false);
+ }
+
+ private Result CopyDataFromOrToCurrentProcess(
+ ulong size,
+ ulong clientAddress,
+ ulong serverAddress,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permission,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected,
+ bool toServer)
+ {
+ if (AddrSpaceStart > clientAddress)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong srcEndAddr = clientAddress + size;
+
+ if (srcEndAddr <= clientAddress || srcEndAddr - 1 > AddrSpaceEnd - 1)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ clientAddress,
+ size,
+ stateMask,
+ stateExpected,
+ permission,
+ permission,
+ attributeMask | MemoryAttribute.Uncached,
+ attributeExpected))
+ {
+ KProcess currentProcess = KernelStatic.GetCurrentProcess();
+
+ while (size > 0)
+ {
+ ulong copySize = 0x100000; // Copy chunck size. Any value will do, moderate sizes are recommended.
+
+ if (copySize > size)
+ {
+ copySize = size;
+ }
+
+ if (toServer)
+ {
+ currentProcess.CpuMemory.Write(serverAddress, GetSpan(clientAddress, (int)copySize));
+ }
+ else
+ {
+ Write(clientAddress, currentProcess.CpuMemory.GetSpan(serverAddress, (int)copySize));
+ }
+
+ serverAddress += copySize;
+ clientAddress += copySize;
+ size -= copySize;
+ }
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result MapBufferFromClientProcess(
+ ulong size,
+ ulong src,
+ KPageTableBase srcPageTable,
+ KMemoryPermission permission,
+ MemoryState state,
+ bool send,
+ out ulong dst)
+ {
+ dst = 0;
+
+ lock (srcPageTable._blockManager)
+ {
+ lock (_blockManager)
+ {
+ Result result = srcPageTable.ReprotectClientProcess(
+ src,
+ size,
+ permission,
+ state,
+ out int blocksNeeded);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ if (!srcPageTable._slabManager.CanAllocate(blocksNeeded))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong srcMapAddress = BitUtils.AlignUp<ulong>(src, PageSize);
+ ulong srcMapEndAddr = BitUtils.AlignDown<ulong>(src + size, PageSize);
+ ulong srcMapSize = srcMapEndAddr - srcMapAddress;
+
+ result = MapPagesFromClientProcess(size, src, permission, state, srcPageTable, send, out ulong va);
+
+ if (result != Result.Success)
+ {
+ if (srcMapEndAddr > srcMapAddress)
+ {
+ srcPageTable.UnmapIpcRestorePermission(src, size, state);
+ }
+
+ return result;
+ }
+
+ if (srcMapAddress < srcMapEndAddr)
+ {
+ KMemoryPermission permissionMask = permission == KMemoryPermission.ReadAndWrite
+ ? KMemoryPermission.None
+ : KMemoryPermission.Read;
+
+ srcPageTable._blockManager.InsertBlock(srcMapAddress, srcMapSize / PageSize, SetIpcMappingPermissions, permissionMask);
+ }
+
+ dst = va;
+ }
+ }
+
+ return Result.Success;
+ }
+
+ private Result ReprotectClientProcess(
+ ulong address,
+ ulong size,
+ KMemoryPermission permission,
+ MemoryState state,
+ out int blocksNeeded)
+ {
+ blocksNeeded = 0;
+
+ if (AddrSpaceStart > address)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong endAddr = address + size;
+
+ if (endAddr <= address || endAddr - 1 > AddrSpaceEnd - 1)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ MemoryState stateMask;
+
+ switch (state)
+ {
+ case MemoryState.IpcBuffer0: stateMask = MemoryState.IpcSendAllowedType0; break;
+ case MemoryState.IpcBuffer1: stateMask = MemoryState.IpcSendAllowedType1; break;
+ case MemoryState.IpcBuffer3: stateMask = MemoryState.IpcSendAllowedType3; break;
+
+ default: return KernelResult.InvalidCombination;
+ }
+
+ KMemoryPermission permissionMask = permission == KMemoryPermission.ReadAndWrite
+ ? KMemoryPermission.None
+ : KMemoryPermission.Read;
+
+ MemoryAttribute attributeMask = MemoryAttribute.Borrowed | MemoryAttribute.Uncached;
+
+ if (state == MemoryState.IpcBuffer0)
+ {
+ attributeMask |= MemoryAttribute.DeviceMapped;
+ }
+
+ ulong addressRounded = BitUtils.AlignUp<ulong>(address, PageSize);
+ ulong addressTruncated = BitUtils.AlignDown<ulong>(address, PageSize);
+ ulong endAddrRounded = BitUtils.AlignUp<ulong>(endAddr, PageSize);
+ ulong endAddrTruncated = BitUtils.AlignDown<ulong>(endAddr, PageSize);
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong visitedSize = 0;
+
+ void CleanUpForError()
+ {
+ if (visitedSize == 0)
+ {
+ return;
+ }
+
+ ulong endAddrVisited = address + visitedSize;
+
+ foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrVisited))
+ {
+ if ((info.Permission & KMemoryPermission.ReadAndWrite) != permissionMask && info.IpcRefCount == 0)
+ {
+ ulong blockAddress = GetAddrInRange(info, addressRounded);
+ ulong blockSize = GetSizeInRange(info, addressRounded, endAddrVisited);
+
+ ulong blockPagesCount = blockSize / PageSize;
+
+ Result reprotectResult = Reprotect(blockAddress, blockPagesCount, info.Permission);
+ Debug.Assert(reprotectResult == Result.Success);
+ }
+ }
+ }
+
+ // Signal a read for any resources tracking reads in the region, as the other process is likely to use their data.
+ SignalMemoryTracking(addressTruncated, endAddrRounded - addressTruncated, false);
+
+ // Reprotect the aligned pages range on the client to make them inaccessible from the client process.
+ Result result;
+
+ if (addressRounded < endAddrTruncated)
+ {
+ foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrTruncated))
+ {
+ // Check if the block state matches what we expect.
+ if ((info.State & stateMask) != stateMask ||
+ (info.Permission & permission) != permission ||
+ (info.Attribute & attributeMask) != MemoryAttribute.None)
+ {
+ CleanUpForError();
+
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong blockAddress = GetAddrInRange(info, addressRounded);
+ ulong blockSize = GetSizeInRange(info, addressRounded, endAddrTruncated);
+
+ ulong blockPagesCount = blockSize / PageSize;
+
+ // If the first block starts before the aligned range, it will need to be split.
+ if (info.Address < addressRounded)
+ {
+ blocksNeeded++;
+ }
+
+ // If the last block ends after the aligned range, it will need to be split.
+ if (endAddrTruncated - 1 < info.Address + info.Size - 1)
+ {
+ blocksNeeded++;
+ }
+
+ if ((info.Permission & KMemoryPermission.ReadAndWrite) != permissionMask && info.IpcRefCount == 0)
+ {
+ result = Reprotect(blockAddress, blockPagesCount, permissionMask);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+ }
+
+ visitedSize += blockSize;
+ }
+ }
+
+ return Result.Success;
+ }
+
+ private Result MapPagesFromClientProcess(
+ ulong size,
+ ulong address,
+ KMemoryPermission permission,
+ MemoryState state,
+ KPageTableBase srcPageTable,
+ bool send,
+ out ulong dst)
+ {
+ dst = 0;
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong endAddr = address + size;
+
+ ulong addressTruncated = BitUtils.AlignDown<ulong>(address, PageSize);
+ ulong addressRounded = BitUtils.AlignUp<ulong>(address, PageSize);
+ ulong endAddrTruncated = BitUtils.AlignDown<ulong>(endAddr, PageSize);
+ ulong endAddrRounded = BitUtils.AlignUp<ulong>(endAddr, PageSize);
+
+ ulong neededSize = endAddrRounded - addressTruncated;
+
+ ulong neededPagesCount = neededSize / PageSize;
+
+ ulong regionPagesCount = (AliasRegionEnd - AliasRegionStart) / PageSize;
+
+ ulong va = 0;
+
+ for (int unit = MappingUnitSizes.Length - 1; unit >= 0 && va == 0; unit--)
+ {
+ int alignment = MappingUnitSizes[unit];
+
+ va = AllocateVa(AliasRegionStart, regionPagesCount, neededPagesCount, alignment);
+ }
+
+ if (va == 0)
+ {
+ return KernelResult.OutOfVaSpace;
+ }
+
+ ulong dstFirstPagePa = 0;
+ ulong dstLastPagePa = 0;
+ ulong currentVa = va;
+
+ using var _ = new OnScopeExit(() =>
+ {
+ if (dstFirstPagePa != 0)
+ {
+ Context.MemoryManager.DecrementPagesReferenceCount(dstFirstPagePa, 1);
+ }
+
+ if (dstLastPagePa != 0)
+ {
+ Context.MemoryManager.DecrementPagesReferenceCount(dstLastPagePa, 1);
+ }
+ });
+
+ void CleanUpForError()
+ {
+ if (currentVa != va)
+ {
+ Unmap(va, (currentVa - va) / PageSize);
+ }
+ }
+
+ // Is the first page address aligned?
+ // If not, allocate a new page and copy the unaligned chunck.
+ if (addressTruncated < addressRounded)
+ {
+ dstFirstPagePa = GetMemoryRegionManager().AllocatePagesContiguous(Context, 1, _aslrDisabled);
+
+ if (dstFirstPagePa == 0)
+ {
+ CleanUpForError();
+
+ return KernelResult.OutOfMemory;
+ }
+ }
+
+ // Is the last page end address aligned?
+ // If not, allocate a new page and copy the unaligned chunck.
+ if (endAddrTruncated < endAddrRounded && (addressTruncated == addressRounded || addressTruncated < endAddrTruncated))
+ {
+ dstLastPagePa = GetMemoryRegionManager().AllocatePagesContiguous(Context, 1, _aslrDisabled);
+
+ if (dstLastPagePa == 0)
+ {
+ CleanUpForError();
+
+ return KernelResult.OutOfMemory;
+ }
+ }
+
+ if (dstFirstPagePa != 0)
+ {
+ ulong firstPageFillAddress = dstFirstPagePa;
+ ulong unusedSizeAfter;
+
+ if (send)
+ {
+ ulong unusedSizeBefore = address - addressTruncated;
+
+ Context.Memory.Fill(GetDramAddressFromPa(dstFirstPagePa), unusedSizeBefore, (byte)_ipcFillValue);
+
+ ulong copySize = addressRounded <= endAddr ? addressRounded - address : size;
+ var data = srcPageTable.GetSpan(addressTruncated + unusedSizeBefore, (int)copySize);
+
+ Context.Memory.Write(GetDramAddressFromPa(dstFirstPagePa + unusedSizeBefore), data);
+
+ firstPageFillAddress += unusedSizeBefore + copySize;
+
+ unusedSizeAfter = addressRounded > endAddr ? addressRounded - endAddr : 0;
+ }
+ else
+ {
+ unusedSizeAfter = PageSize;
+ }
+
+ if (unusedSizeAfter != 0)
+ {
+ Context.Memory.Fill(GetDramAddressFromPa(firstPageFillAddress), unusedSizeAfter, (byte)_ipcFillValue);
+ }
+
+ Result result = MapPages(currentVa, 1, dstFirstPagePa, permission, MemoryMapFlags.Private);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ currentVa += PageSize;
+ }
+
+ if (endAddrTruncated > addressRounded)
+ {
+ ulong alignedSize = endAddrTruncated - addressRounded;
+
+ Result result;
+
+ if (srcPageTable.Supports4KBPages)
+ {
+ KPageList pageList = new KPageList();
+ srcPageTable.GetPhysicalRegions(addressRounded, alignedSize, pageList);
+
+ result = MapPages(currentVa, pageList, permission, MemoryMapFlags.None);
+ }
+ else
+ {
+ result = MapForeign(srcPageTable.GetHostRegions(addressRounded, alignedSize), currentVa, alignedSize);
+ }
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+
+ currentVa += alignedSize;
+ }
+
+ if (dstLastPagePa != 0)
+ {
+ ulong lastPageFillAddr = dstLastPagePa;
+ ulong unusedSizeAfter;
+
+ if (send)
+ {
+ ulong copySize = endAddr - endAddrTruncated;
+ var data = srcPageTable.GetSpan(endAddrTruncated, (int)copySize);
+
+ Context.Memory.Write(GetDramAddressFromPa(dstLastPagePa), data);
+
+ lastPageFillAddr += copySize;
+
+ unusedSizeAfter = PageSize - copySize;
+ }
+ else
+ {
+ unusedSizeAfter = PageSize;
+ }
+
+ Context.Memory.Fill(GetDramAddressFromPa(lastPageFillAddr), unusedSizeAfter, (byte)_ipcFillValue);
+
+ Result result = MapPages(currentVa, 1, dstLastPagePa, permission, MemoryMapFlags.Private);
+
+ if (result != Result.Success)
+ {
+ CleanUpForError();
+
+ return result;
+ }
+ }
+
+ _blockManager.InsertBlock(va, neededPagesCount, state, permission);
+
+ dst = va + (address - addressTruncated);
+
+ return Result.Success;
+ }
+
+ public Result UnmapNoAttributeIfStateEquals(ulong address, ulong size, MemoryState state)
+ {
+ if (AddrSpaceStart > address)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ ulong endAddr = address + size;
+
+ if (endAddr <= address || endAddr - 1 > AddrSpaceEnd - 1)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ MemoryState.Mask,
+ state,
+ KMemoryPermission.Read,
+ KMemoryPermission.Read,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _))
+ {
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ ulong addressTruncated = BitUtils.AlignDown<ulong>(address, PageSize);
+ ulong addressRounded = BitUtils.AlignUp<ulong>(address, PageSize);
+ ulong endAddrTruncated = BitUtils.AlignDown<ulong>(endAddr, PageSize);
+ ulong endAddrRounded = BitUtils.AlignUp<ulong>(endAddr, PageSize);
+
+ ulong pagesCount = (endAddrRounded - addressTruncated) / PageSize;
+
+ Result result = Unmap(addressTruncated, pagesCount);
+
+ if (result == Result.Success)
+ {
+ _blockManager.InsertBlock(addressTruncated, pagesCount, MemoryState.Unmapped);
+ }
+
+ return result;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result UnmapIpcRestorePermission(ulong address, ulong size, MemoryState state)
+ {
+ ulong endAddr = address + size;
+
+ ulong addressRounded = BitUtils.AlignUp<ulong>(address, PageSize);
+ ulong addressTruncated = BitUtils.AlignDown<ulong>(address, PageSize);
+ ulong endAddrRounded = BitUtils.AlignUp<ulong>(endAddr, PageSize);
+ ulong endAddrTruncated = BitUtils.AlignDown<ulong>(endAddr, PageSize);
+
+ ulong pagesCount = addressRounded < endAddrTruncated ? (endAddrTruncated - addressRounded) / PageSize : 0;
+
+ if (pagesCount == 0)
+ {
+ return Result.Success;
+ }
+
+ MemoryState stateMask;
+
+ switch (state)
+ {
+ case MemoryState.IpcBuffer0: stateMask = MemoryState.IpcSendAllowedType0; break;
+ case MemoryState.IpcBuffer1: stateMask = MemoryState.IpcSendAllowedType1; break;
+ case MemoryState.IpcBuffer3: stateMask = MemoryState.IpcSendAllowedType3; break;
+
+ default: return KernelResult.InvalidCombination;
+ }
+
+ MemoryAttribute attributeMask =
+ MemoryAttribute.Borrowed |
+ MemoryAttribute.IpcMapped |
+ MemoryAttribute.Uncached;
+
+ if (state == MemoryState.IpcBuffer0)
+ {
+ attributeMask |= MemoryAttribute.DeviceMapped;
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ // Anything on the client side should see this memory as modified.
+ SignalMemoryTracking(addressTruncated, endAddrRounded - addressTruncated, true);
+
+ lock (_blockManager)
+ {
+ foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrTruncated))
+ {
+ // Check if the block state matches what we expect.
+ if ((info.State & stateMask) != stateMask ||
+ (info.Attribute & attributeMask) != MemoryAttribute.IpcMapped)
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ if (info.Permission != info.SourcePermission && info.IpcRefCount == 1)
+ {
+ ulong blockAddress = GetAddrInRange(info, addressRounded);
+ ulong blockSize = GetSizeInRange(info, addressRounded, endAddrTruncated);
+
+ ulong blockPagesCount = blockSize / PageSize;
+
+ Result result = Reprotect(blockAddress, blockPagesCount, info.SourcePermission);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+ }
+ }
+
+ _blockManager.InsertBlock(addressRounded, pagesCount, RestoreIpcMappingPermissions);
+
+ return Result.Success;
+ }
+ }
+
+ private static void SetIpcMappingPermissions(KMemoryBlock block, KMemoryPermission permission)
+ {
+ block.SetIpcMappingPermission(permission);
+ }
+
+ private static void RestoreIpcMappingPermissions(KMemoryBlock block, KMemoryPermission permission)
+ {
+ block.RestoreIpcMappingPermission();
+ }
+
+ public Result GetPagesIfStateEquals(
+ ulong address,
+ ulong size,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permissionMask,
+ KMemoryPermission permissionExpected,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected,
+ KPageList pageList)
+ {
+ if (!InsideAddrSpace(address, size))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ stateMask | MemoryState.IsPoolAllocated,
+ stateExpected | MemoryState.IsPoolAllocated,
+ permissionMask,
+ permissionExpected,
+ attributeMask,
+ attributeExpected,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _))
+ {
+ GetPhysicalRegions(address, size, pageList);
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result BorrowIpcBuffer(ulong address, ulong size)
+ {
+ return SetAttributesAndChangePermission(
+ address,
+ size,
+ MemoryState.IpcBufferAllowed,
+ MemoryState.IpcBufferAllowed,
+ KMemoryPermission.Mask,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Borrowed);
+ }
+
+ public Result BorrowTransferMemory(KPageList pageList, ulong address, ulong size, KMemoryPermission permission)
+ {
+ return SetAttributesAndChangePermission(
+ address,
+ size,
+ MemoryState.TransferMemoryAllowed,
+ MemoryState.TransferMemoryAllowed,
+ KMemoryPermission.Mask,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ permission,
+ MemoryAttribute.Borrowed,
+ pageList);
+ }
+
+ public Result BorrowCodeMemory(KPageList pageList, ulong address, ulong size)
+ {
+ return SetAttributesAndChangePermission(
+ address,
+ size,
+ MemoryState.CodeMemoryAllowed,
+ MemoryState.CodeMemoryAllowed,
+ KMemoryPermission.Mask,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Borrowed,
+ pageList);
+ }
+
+ private Result SetAttributesAndChangePermission(
+ ulong address,
+ ulong size,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permissionMask,
+ KMemoryPermission permissionExpected,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected,
+ KMemoryPermission newPermission,
+ MemoryAttribute attributeSetMask,
+ KPageList pageList = null)
+ {
+ if (address + size <= address || !InsideAddrSpace(address, size))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ stateMask | MemoryState.IsPoolAllocated,
+ stateExpected | MemoryState.IsPoolAllocated,
+ permissionMask,
+ permissionExpected,
+ attributeMask,
+ attributeExpected,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState oldState,
+ out KMemoryPermission oldPermission,
+ out MemoryAttribute oldAttribute))
+ {
+ ulong pagesCount = size / PageSize;
+
+ if (pageList != null)
+ {
+ GetPhysicalRegions(address, pagesCount * PageSize, pageList);
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ if (newPermission == KMemoryPermission.None)
+ {
+ newPermission = oldPermission;
+ }
+
+ if (newPermission != oldPermission)
+ {
+ Result result = Reprotect(address, pagesCount, newPermission);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+ }
+
+ MemoryAttribute newAttribute = oldAttribute | attributeSetMask;
+
+ _blockManager.InsertBlock(address, pagesCount, oldState, newPermission, newAttribute);
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ public Result UnborrowIpcBuffer(ulong address, ulong size)
+ {
+ return ClearAttributesAndChangePermission(
+ address,
+ size,
+ MemoryState.IpcBufferAllowed,
+ MemoryState.IpcBufferAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.Borrowed,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Borrowed);
+ }
+
+ public Result UnborrowTransferMemory(ulong address, ulong size, KPageList pageList)
+ {
+ return ClearAttributesAndChangePermission(
+ address,
+ size,
+ MemoryState.TransferMemoryAllowed,
+ MemoryState.TransferMemoryAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.Borrowed,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Borrowed,
+ pageList);
+ }
+
+ public Result UnborrowCodeMemory(ulong address, ulong size, KPageList pageList)
+ {
+ return ClearAttributesAndChangePermission(
+ address,
+ size,
+ MemoryState.CodeMemoryAllowed,
+ MemoryState.CodeMemoryAllowed,
+ KMemoryPermission.None,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.Borrowed,
+ KMemoryPermission.ReadAndWrite,
+ MemoryAttribute.Borrowed,
+ pageList);
+ }
+
+ private Result ClearAttributesAndChangePermission(
+ ulong address,
+ ulong size,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permissionMask,
+ KMemoryPermission permissionExpected,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected,
+ KMemoryPermission newPermission,
+ MemoryAttribute attributeClearMask,
+ KPageList pageList = null)
+ {
+ if (address + size <= address || !InsideAddrSpace(address, size))
+ {
+ return KernelResult.InvalidMemState;
+ }
+
+ lock (_blockManager)
+ {
+ if (CheckRange(
+ address,
+ size,
+ stateMask | MemoryState.IsPoolAllocated,
+ stateExpected | MemoryState.IsPoolAllocated,
+ permissionMask,
+ permissionExpected,
+ attributeMask,
+ attributeExpected,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out MemoryState oldState,
+ out KMemoryPermission oldPermission,
+ out MemoryAttribute oldAttribute))
+ {
+ ulong pagesCount = size / PageSize;
+
+ if (pageList != null)
+ {
+ KPageList currentPageList = new KPageList();
+
+ GetPhysicalRegions(address, pagesCount * PageSize, currentPageList);
+
+ if (!currentPageList.IsEqual(pageList))
+ {
+ return KernelResult.InvalidMemRange;
+ }
+ }
+
+ if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
+ {
+ return KernelResult.OutOfResource;
+ }
+
+ if (newPermission == KMemoryPermission.None)
+ {
+ newPermission = oldPermission;
+ }
+
+ if (newPermission != oldPermission)
+ {
+ Result result = Reprotect(address, pagesCount, newPermission);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+ }
+
+ MemoryAttribute newAttribute = oldAttribute & ~attributeClearMask;
+
+ _blockManager.InsertBlock(address, pagesCount, oldState, newPermission, newAttribute);
+
+ return Result.Success;
+ }
+ else
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+ }
+
+ private static ulong GetAddrInRange(KMemoryInfo info, ulong start)
+ {
+ if (info.Address < start)
+ {
+ return start;
+ }
+
+ return info.Address;
+ }
+
+ private static ulong GetSizeInRange(KMemoryInfo info, ulong start, ulong end)
+ {
+ ulong endAddr = info.Size + info.Address;
+ ulong size = info.Size;
+
+ if (info.Address < start)
+ {
+ size -= start - info.Address;
+ }
+
+ if (endAddr > end)
+ {
+ size -= endAddr - end;
+ }
+
+ return size;
+ }
+
+ private bool IsUnmapped(ulong address, ulong size)
+ {
+ return CheckRange(
+ address,
+ size,
+ MemoryState.Mask,
+ MemoryState.Unmapped,
+ KMemoryPermission.Mask,
+ KMemoryPermission.None,
+ MemoryAttribute.Mask,
+ MemoryAttribute.None,
+ MemoryAttribute.IpcAndDeviceMapped,
+ out _,
+ out _,
+ out _);
+ }
+
+ private bool CheckRange(
+ ulong address,
+ ulong size,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permissionMask,
+ KMemoryPermission permissionExpected,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected,
+ MemoryAttribute attributeIgnoreMask,
+ out MemoryState outState,
+ out KMemoryPermission outPermission,
+ out MemoryAttribute outAttribute)
+ {
+ ulong endAddr = address + size;
+
+ KMemoryBlock currBlock = _blockManager.FindBlock(address);
+
+ KMemoryInfo info = currBlock.GetInfo();
+
+ MemoryState firstState = info.State;
+ KMemoryPermission firstPermission = info.Permission;
+ MemoryAttribute firstAttribute = info.Attribute;
+
+ do
+ {
+ info = currBlock.GetInfo();
+
+ // Check if the block state matches what we expect.
+ if (firstState != info.State ||
+ firstPermission != info.Permission ||
+ (info.Attribute & attributeMask) != attributeExpected ||
+ (firstAttribute | attributeIgnoreMask) != (info.Attribute | attributeIgnoreMask) ||
+ (firstState & stateMask) != stateExpected ||
+ (firstPermission & permissionMask) != permissionExpected)
+ {
+ outState = MemoryState.Unmapped;
+ outPermission = KMemoryPermission.None;
+ outAttribute = MemoryAttribute.None;
+
+ return false;
+ }
+ }
+ while (info.Address + info.Size - 1 < endAddr - 1 && (currBlock = currBlock.Successor) != null);
+
+ outState = firstState;
+ outPermission = firstPermission;
+ outAttribute = firstAttribute & ~attributeIgnoreMask;
+
+ return true;
+ }
+
+ private bool CheckRange(
+ ulong address,
+ ulong size,
+ MemoryState stateMask,
+ MemoryState stateExpected,
+ KMemoryPermission permissionMask,
+ KMemoryPermission permissionExpected,
+ MemoryAttribute attributeMask,
+ MemoryAttribute attributeExpected)
+ {
+ foreach (KMemoryInfo info in IterateOverRange(address, address + size))
+ {
+ // Check if the block state matches what we expect.
+ if ((info.State & stateMask) != stateExpected ||
+ (info.Permission & permissionMask) != permissionExpected ||
+ (info.Attribute & attributeMask) != attributeExpected)
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ private IEnumerable<KMemoryInfo> IterateOverRange(ulong start, ulong end)
+ {
+ KMemoryBlock currBlock = _blockManager.FindBlock(start);
+
+ KMemoryInfo info;
+
+ do
+ {
+ info = currBlock.GetInfo();
+
+ yield return info;
+ }
+ while (info.Address + info.Size - 1 < end - 1 && (currBlock = currBlock.Successor) != null);
+ }
+
+ private ulong AllocateVa(ulong regionStart, ulong regionPagesCount, ulong neededPagesCount, int alignment)
+ {
+ ulong address = 0;
+
+ ulong regionEndAddr = regionStart + regionPagesCount * PageSize;
+
+ ulong reservedPagesCount = _isKernel ? 1UL : 4UL;
+
+ if (_aslrEnabled)
+ {
+ ulong totalNeededSize = (reservedPagesCount + neededPagesCount) * PageSize;
+
+ ulong remainingPages = regionPagesCount - neededPagesCount;
+
+ ulong aslrMaxOffset = ((remainingPages + reservedPagesCount) * PageSize) / (ulong)alignment;
+
+ for (int attempt = 0; attempt < 8; attempt++)
+ {
+ ulong aslrAddress = BitUtils.AlignDown(regionStart + GetRandomValue(0, aslrMaxOffset) * (ulong)alignment, (ulong)alignment);
+ ulong aslrEndAddr = aslrAddress + totalNeededSize;
+
+ KMemoryInfo info = _blockManager.FindBlock(aslrAddress).GetInfo();
+
+ if (info.State != MemoryState.Unmapped)
+ {
+ continue;
+ }
+
+ ulong currBaseAddr = info.Address + reservedPagesCount * PageSize;
+ ulong currEndAddr = info.Address + info.Size;
+
+ if (aslrAddress >= regionStart &&
+ aslrAddress >= currBaseAddr &&
+ aslrEndAddr - 1 <= regionEndAddr - 1 &&
+ aslrEndAddr - 1 <= currEndAddr - 1)
+ {
+ address = aslrAddress;
+ break;
+ }
+ }
+
+ if (address == 0)
+ {
+ ulong aslrPage = GetRandomValue(0, aslrMaxOffset);
+
+ address = FindFirstFit(
+ regionStart + aslrPage * PageSize,
+ regionPagesCount - aslrPage,
+ neededPagesCount,
+ alignment,
+ 0,
+ reservedPagesCount);
+ }
+ }
+
+ if (address == 0)
+ {
+ address = FindFirstFit(
+ regionStart,
+ regionPagesCount,
+ neededPagesCount,
+ alignment,
+ 0,
+ reservedPagesCount);
+ }
+
+ return address;
+ }
+
+ private ulong FindFirstFit(
+ ulong regionStart,
+ ulong regionPagesCount,
+ ulong neededPagesCount,
+ int alignment,
+ ulong reservedStart,
+ ulong reservedPagesCount)
+ {
+ ulong reservedSize = reservedPagesCount * PageSize;
+
+ ulong totalNeededSize = reservedSize + neededPagesCount * PageSize;
+
+ ulong regionEndAddr = (regionStart + regionPagesCount * PageSize) - 1;
+
+ KMemoryBlock currBlock = _blockManager.FindBlock(regionStart);
+
+ KMemoryInfo info = currBlock.GetInfo();
+
+ while (regionEndAddr >= info.Address)
+ {
+ if (info.State == MemoryState.Unmapped)
+ {
+ ulong currBaseAddr = info.Address <= regionStart ? regionStart : info.Address;
+ ulong currEndAddr = info.Address + info.Size - 1;
+
+ currBaseAddr += reservedSize;
+
+ ulong address = BitUtils.AlignDown<ulong>(currBaseAddr, (ulong)alignment) + reservedStart;
+
+ if (currBaseAddr > address)
+ {
+ address += (ulong)alignment;
+ }
+
+ ulong allocationEndAddr = address + totalNeededSize - 1;
+
+ if (info.Address <= address &&
+ address < allocationEndAddr &&
+ allocationEndAddr <= regionEndAddr &&
+ allocationEndAddr <= currEndAddr)
+ {
+ return address;
+ }
+ }
+
+ currBlock = currBlock.Successor;
+
+ if (currBlock == null)
+ {
+ break;
+ }
+
+ info = currBlock.GetInfo();
+ }
+
+ return 0;
+ }
+
+ public bool CanContain(ulong address, ulong size, MemoryState state)
+ {
+ ulong endAddr = address + size;
+
+ ulong regionBaseAddr = GetBaseAddress(state);
+ ulong regionEndAddr = regionBaseAddr + GetSize(state);
+
+ bool InsideRegion()
+ {
+ return regionBaseAddr <= address &&
+ endAddr > address &&
+ endAddr - 1 <= regionEndAddr - 1;
+ }
+
+ bool OutsideHeapRegion()
+ {
+ return endAddr <= HeapRegionStart || address >= HeapRegionEnd;
+ }
+
+ bool OutsideAliasRegion()
+ {
+ return endAddr <= AliasRegionStart || address >= AliasRegionEnd;
+ }
+
+ switch (state)
+ {
+ case MemoryState.Io:
+ case MemoryState.Normal:
+ case MemoryState.CodeStatic:
+ case MemoryState.CodeMutable:
+ case MemoryState.SharedMemory:
+ case MemoryState.ModCodeStatic:
+ case MemoryState.ModCodeMutable:
+ case MemoryState.Stack:
+ case MemoryState.ThreadLocal:
+ case MemoryState.TransferMemoryIsolated:
+ case MemoryState.TransferMemory:
+ case MemoryState.ProcessMemory:
+ case MemoryState.CodeReadOnly:
+ case MemoryState.CodeWritable:
+ return InsideRegion() && OutsideHeapRegion() && OutsideAliasRegion();
+
+ case MemoryState.Heap:
+ return InsideRegion() && OutsideAliasRegion();
+
+ case MemoryState.IpcBuffer0:
+ case MemoryState.IpcBuffer1:
+ case MemoryState.IpcBuffer3:
+ return InsideRegion() && OutsideHeapRegion();
+
+ case MemoryState.KernelStack:
+ return InsideRegion();
+ }
+
+ throw new ArgumentException($"Invalid state value \"{state}\".");
+ }
+
+ private ulong GetBaseAddress(MemoryState state)
+ {
+ switch (state)
+ {
+ case MemoryState.Io:
+ case MemoryState.Normal:
+ case MemoryState.ThreadLocal:
+ return TlsIoRegionStart;
+
+ case MemoryState.CodeStatic:
+ case MemoryState.CodeMutable:
+ case MemoryState.SharedMemory:
+ case MemoryState.ModCodeStatic:
+ case MemoryState.ModCodeMutable:
+ case MemoryState.TransferMemoryIsolated:
+ case MemoryState.TransferMemory:
+ case MemoryState.ProcessMemory:
+ case MemoryState.CodeReadOnly:
+ case MemoryState.CodeWritable:
+ return GetAddrSpaceBaseAddr();
+
+ case MemoryState.Heap:
+ return HeapRegionStart;
+
+ case MemoryState.IpcBuffer0:
+ case MemoryState.IpcBuffer1:
+ case MemoryState.IpcBuffer3:
+ return AliasRegionStart;
+
+ case MemoryState.Stack:
+ return StackRegionStart;
+
+ case MemoryState.KernelStack:
+ return AddrSpaceStart;
+ }
+
+ throw new ArgumentException($"Invalid state value \"{state}\".");
+ }
+
+ private ulong GetSize(MemoryState state)
+ {
+ switch (state)
+ {
+ case MemoryState.Io:
+ case MemoryState.Normal:
+ case MemoryState.ThreadLocal:
+ return TlsIoRegionEnd - TlsIoRegionStart;
+
+ case MemoryState.CodeStatic:
+ case MemoryState.CodeMutable:
+ case MemoryState.SharedMemory:
+ case MemoryState.ModCodeStatic:
+ case MemoryState.ModCodeMutable:
+ case MemoryState.TransferMemoryIsolated:
+ case MemoryState.TransferMemory:
+ case MemoryState.ProcessMemory:
+ case MemoryState.CodeReadOnly:
+ case MemoryState.CodeWritable:
+ return GetAddrSpaceSize();
+
+ case MemoryState.Heap:
+ return HeapRegionEnd - HeapRegionStart;
+
+ case MemoryState.IpcBuffer0:
+ case MemoryState.IpcBuffer1:
+ case MemoryState.IpcBuffer3:
+ return AliasRegionEnd - AliasRegionStart;
+
+ case MemoryState.Stack:
+ return StackRegionEnd - StackRegionStart;
+
+ case MemoryState.KernelStack:
+ return AddrSpaceEnd - AddrSpaceStart;
+ }
+
+ throw new ArgumentException($"Invalid state value \"{state}\".");
+ }
+
+ public ulong GetAddrSpaceBaseAddr()
+ {
+ if (AddrSpaceWidth == 36 || AddrSpaceWidth == 39)
+ {
+ return 0x8000000;
+ }
+ else if (AddrSpaceWidth == 32)
+ {
+ return 0x200000;
+ }
+ else
+ {
+ throw new InvalidOperationException("Invalid address space width!");
+ }
+ }
+
+ public ulong GetAddrSpaceSize()
+ {
+ if (AddrSpaceWidth == 36)
+ {
+ return 0xff8000000;
+ }
+ else if (AddrSpaceWidth == 39)
+ {
+ return 0x7ff8000000;
+ }
+ else if (AddrSpaceWidth == 32)
+ {
+ return 0xffe00000;
+ }
+ else
+ {
+ throw new InvalidOperationException("Invalid address space width!");
+ }
+ }
+
+ private static ulong GetDramAddressFromPa(ulong pa)
+ {
+ return pa - DramMemoryMap.DramBase;
+ }
+
+ protected KMemoryRegionManager GetMemoryRegionManager()
+ {
+ return Context.MemoryManager.MemoryRegions[(int)_memRegion];
+ }
+
+ public ulong GetMmUsedPages()
+ {
+ lock (_blockManager)
+ {
+ return BitUtils.DivRoundUp<ulong>(GetMmUsedSize(), PageSize);
+ }
+ }
+
+ private ulong GetMmUsedSize()
+ {
+ return (ulong)(_blockManager.BlocksCount * KMemoryBlockSize);
+ }
+
+ public bool IsInvalidRegion(ulong address, ulong size)
+ {
+ return address + size - 1 > GetAddrSpaceBaseAddr() + GetAddrSpaceSize() - 1;
+ }
+
+ public bool InsideAddrSpace(ulong address, ulong size)
+ {
+ return AddrSpaceStart <= address && address + size - 1 <= AddrSpaceEnd - 1;
+ }
+
+ public bool InsideAliasRegion(ulong address, ulong size)
+ {
+ return address + size > AliasRegionStart && AliasRegionEnd > address;
+ }
+
+ public bool InsideHeapRegion(ulong address, ulong size)
+ {
+ return address + size > HeapRegionStart && HeapRegionEnd > address;
+ }
+
+ public bool InsideStackRegion(ulong address, ulong size)
+ {
+ return address + size > StackRegionStart && StackRegionEnd > address;
+ }
+
+ public bool OutsideAliasRegion(ulong address, ulong size)
+ {
+ return AliasRegionStart > address || address + size - 1 > AliasRegionEnd - 1;
+ }
+
+ public bool OutsideAddrSpace(ulong address, ulong size)
+ {
+ return AddrSpaceStart > address || address + size - 1 > AddrSpaceEnd - 1;
+ }
+
+ public bool OutsideStackRegion(ulong address, ulong size)
+ {
+ return StackRegionStart > address || address + size - 1 > StackRegionEnd - 1;
+ }
+
+ /// <summary>
+ /// Gets the host regions that make up the given virtual address region.
+ /// If any part of the virtual region is unmapped, null is returned.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range</param>
+ /// <returns>The host regions</returns>
+ /// <exception cref="Ryujinx.Memory.InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
+ protected abstract IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size);
+
+ /// <summary>
+ /// Gets the physical regions that make up the given virtual address region.
+ /// If any part of the virtual region is unmapped, null is returned.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range</param>
+ /// <param name="pageList">Page list where the ranges will be added</param>
+ protected abstract void GetPhysicalRegions(ulong va, ulong size, KPageList pageList);
+
+ /// <summary>
+ /// Gets a read-only span of data from CPU mapped memory.
+ /// </summary>
+ /// <remarks>
+ /// This may perform a allocation if the data is not contiguous in memory.
+ /// For this reason, the span is read-only, you can't modify the data.
+ /// </remarks>
+ /// <param name="va">Virtual address of the data</param>
+ /// <param name="size">Size of the data</param>
+ /// <param name="tracked">True if read tracking is triggered on the span</param>
+ /// <returns>A read-only span of the data</returns>
+ /// <exception cref="Ryujinx.Memory.InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
+ protected abstract ReadOnlySpan<byte> GetSpan(ulong va, int size);
+
+ /// <summary>
+ /// Maps a new memory region with the contents of a existing memory region.
+ /// </summary>
+ /// <param name="src">Source memory region where the data will be taken from</param>
+ /// <param name="dst">Destination memory region to map</param>
+ /// <param name="pagesCount">Number of pages to map</param>
+ /// <param name="oldSrcPermission">Current protection of the source memory region</param>
+ /// <param name="newDstPermission">Desired protection for the destination memory region</param>
+ /// <returns>Result of the mapping operation</returns>
+ protected abstract Result MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission);
+
+ /// <summary>
+ /// Unmaps a region of memory that was previously mapped with <see cref="MapMemory"/>.
+ /// </summary>
+ /// <param name="dst">Destination memory region to be unmapped</param>
+ /// <param name="src">Source memory region that was originally remapped</param>
+ /// <param name="pagesCount">Number of pages to unmap</param>
+ /// <param name="oldDstPermission">Current protection of the destination memory region</param>
+ /// <param name="newSrcPermission">Desired protection of the source memory region</param>
+ /// <returns>Result of the unmapping operation</returns>
+ protected abstract Result UnmapMemory(ulong dst, ulong src, ulong pagesCount, KMemoryPermission oldDstPermission, KMemoryPermission newSrcPermission);
+
+ /// <summary>
+ /// Maps a region of memory into the specified physical memory region.
+ /// </summary>
+ /// <param name="dstVa">Destination virtual address that should be mapped</param>
+ /// <param name="pagesCount">Number of pages to map</param>
+ /// <param name="srcPa">Physical address where the pages should be mapped. May be ignored if aliasing is not supported</param>
+ /// <param name="permission">Permission of the region to be mapped</param>
+ /// <param name="flags">Flags controlling the memory map operation</param>
+ /// <param name="shouldFillPages">Indicate if the pages should be filled with the <paramref name="fillValue"/> value</param>
+ /// <param name="fillValue">The value used to fill pages when <paramref name="shouldFillPages"/> is set to true</param>
+ /// <returns>Result of the mapping operation</returns>
+ protected abstract Result MapPages(
+ ulong dstVa,
+ ulong pagesCount,
+ ulong srcPa,
+ KMemoryPermission permission,
+ MemoryMapFlags flags,
+ bool shouldFillPages = false,
+ byte fillValue = 0);
+
+ /// <summary>
+ /// Maps a region of memory into the specified physical memory region.
+ /// </summary>
+ /// <param name="address">Destination virtual address that should be mapped</param>
+ /// <param name="pageList">List of physical memory pages where the pages should be mapped. May be ignored if aliasing is not supported</param>
+ /// <param name="permission">Permission of the region to be mapped</param>
+ /// <param name="flags">Flags controlling the memory map operation</param>
+ /// <param name="shouldFillPages">Indicate if the pages should be filled with the <paramref name="fillValue"/> value</param>
+ /// <param name="fillValue">The value used to fill pages when <paramref name="shouldFillPages"/> is set to true</param>
+ /// <returns>Result of the mapping operation</returns>
+ protected abstract Result MapPages(
+ ulong address,
+ KPageList pageList,
+ KMemoryPermission permission,
+ MemoryMapFlags flags,
+ bool shouldFillPages = false,
+ byte fillValue = 0);
+
+ /// <summary>
+ /// Maps pages into an arbitrary host memory location.
+ /// </summary>
+ /// <param name="regions">Host regions to be mapped into the specified virtual memory region</param>
+ /// <param name="va">Destination virtual address of the range on this page table</param>
+ /// <param name="size">Size of the range</param>
+ /// <returns>Result of the mapping operation</returns>
+ protected abstract Result MapForeign(IEnumerable<HostMemoryRange> regions, ulong va, ulong size);
+
+ /// <summary>
+ /// Unmaps a region of memory that was previously mapped with one of the page mapping methods.
+ /// </summary>
+ /// <param name="address">Virtual address of the region to unmap</param>
+ /// <param name="pagesCount">Number of pages to unmap</param>
+ /// <returns>Result of the unmapping operation</returns>
+ protected abstract Result Unmap(ulong address, ulong pagesCount);
+
+ /// <summary>
+ /// Changes the permissions of a given virtual memory region.
+ /// </summary>
+ /// <param name="address">Virtual address of the region to have the permission changes</param>
+ /// <param name="pagesCount">Number of pages to have their permissions changed</param>
+ /// <param name="permission">New permission</param>
+ /// <returns>Result of the permission change operation</returns>
+ protected abstract Result Reprotect(ulong address, ulong pagesCount, KMemoryPermission permission);
+
+ /// <summary>
+ /// Changes the permissions of a given virtual memory region.
+ /// </summary>
+ /// <param name="address">Virtual address of the region to have the permission changes</param>
+ /// <param name="pagesCount">Number of pages to have their permissions changed</param>
+ /// <param name="permission">New permission</param>
+ /// <returns>Result of the permission change operation</returns>
+ protected abstract Result ReprotectWithAttributes(ulong address, ulong pagesCount, KMemoryPermission permission);
+
+ /// <summary>
+ /// Alerts the memory tracking that a given region has been read from or written to.
+ /// This should be called before read/write is performed.
+ /// </summary>
+ /// <param name="va">Virtual address of the region</param>
+ /// <param name="size">Size of the region</param>
+ protected abstract void SignalMemoryTracking(ulong va, ulong size, bool write);
+
+ /// <summary>
+ /// Writes data to CPU mapped memory, with write tracking.
+ /// </summary>
+ /// <param name="va">Virtual address to write the data into</param>
+ /// <param name="data">Data to be written</param>
+ /// <exception cref="Ryujinx.Memory.InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
+ protected abstract void Write(ulong va, ReadOnlySpan<byte> data);
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KScopedPageList.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KScopedPageList.cs
new file mode 100644
index 00000000..a0c19f9c
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KScopedPageList.cs
@@ -0,0 +1,27 @@
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ struct KScopedPageList : IDisposable
+ {
+ private readonly KMemoryManager _manager;
+ private KPageList _pageList;
+
+ public KScopedPageList(KMemoryManager manager, KPageList pageList)
+ {
+ _manager = manager;
+ _pageList = pageList;
+ pageList.IncrementPagesReferenceCount(manager);
+ }
+
+ public void SignalSuccess()
+ {
+ _pageList = null;
+ }
+
+ public void Dispose()
+ {
+ _pageList?.DecrementPagesReferenceCount(_manager);
+ }
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs
new file mode 100644
index 00000000..5ec3cd72
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs
@@ -0,0 +1,75 @@
+using Ryujinx.Common;
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.Horizon.Common;
+using Ryujinx.Memory;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KSharedMemory : KAutoObject
+ {
+ private readonly KPageList _pageList;
+
+ private readonly ulong _ownerPid;
+
+ private readonly KMemoryPermission _ownerPermission;
+ private readonly KMemoryPermission _userPermission;
+
+ public KSharedMemory(
+ KernelContext context,
+ SharedMemoryStorage storage,
+ ulong ownerPid,
+ KMemoryPermission ownerPermission,
+ KMemoryPermission userPermission) : base(context)
+ {
+ _pageList = storage.GetPageList();
+ _ownerPid = ownerPid;
+ _ownerPermission = ownerPermission;
+ _userPermission = userPermission;
+ }
+
+ public Result MapIntoProcess(
+ KPageTableBase memoryManager,
+ ulong address,
+ ulong size,
+ KProcess process,
+ KMemoryPermission permission)
+ {
+ if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, KPageTableBase.PageSize))
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ KMemoryPermission expectedPermission = process.Pid == _ownerPid
+ ? _ownerPermission
+ : _userPermission;
+
+ if (permission != expectedPermission)
+ {
+ return KernelResult.InvalidPermission;
+ }
+
+ // On platforms with page size > 4 KB, this can fail due to the address not being page aligned,
+ // we can return an error to force the application to retry with a different address.
+
+ try
+ {
+ return memoryManager.MapPages(address, _pageList, MemoryState.SharedMemory, permission);
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ return KernelResult.InvalidMemState;
+ }
+ }
+
+ public Result UnmapFromProcess(KPageTableBase memoryManager, ulong address, ulong size, KProcess process)
+ {
+ if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, KPageTableBase.PageSize))
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ return memoryManager.UnmapPages(address, _pageList, MemoryState.SharedMemory);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KSlabHeap.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KSlabHeap.cs
new file mode 100644
index 00000000..9051e84c
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KSlabHeap.cs
@@ -0,0 +1,50 @@
+using System.Collections.Generic;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KSlabHeap
+ {
+ private LinkedList<ulong> _items;
+
+ public KSlabHeap(ulong pa, ulong itemSize, ulong size)
+ {
+ _items = new LinkedList<ulong>();
+
+ int itemsCount = (int)(size / itemSize);
+
+ for (int index = 0; index < itemsCount; index++)
+ {
+ _items.AddLast(pa);
+
+ pa += itemSize;
+ }
+ }
+
+ public bool TryGetItem(out ulong pa)
+ {
+ lock (_items)
+ {
+ if (_items.First != null)
+ {
+ pa = _items.First.Value;
+
+ _items.RemoveFirst();
+
+ return true;
+ }
+ }
+
+ pa = 0;
+
+ return false;
+ }
+
+ public void Free(ulong pa)
+ {
+ lock (_items)
+ {
+ _items.AddFirst(pa);
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KTransferMemory.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KTransferMemory.cs
new file mode 100644
index 00000000..b2449598
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KTransferMemory.cs
@@ -0,0 +1,130 @@
+using Ryujinx.Common;
+using Ryujinx.HLE.HOS.Kernel.Common;
+using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.Horizon.Common;
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class KTransferMemory : KAutoObject
+ {
+ private KProcess _creator;
+
+ // TODO: Remove when we no longer need to read it from the owner directly.
+ public KProcess Creator => _creator;
+
+ private readonly KPageList _pageList;
+
+ public ulong Address { get; private set; }
+ public ulong Size { get; private set; }
+
+ public KMemoryPermission Permission { get; private set; }
+
+ private bool _hasBeenInitialized;
+ private bool _isMapped;
+
+ public KTransferMemory(KernelContext context) : base(context)
+ {
+ _pageList = new KPageList();
+ }
+
+ public KTransferMemory(KernelContext context, SharedMemoryStorage storage) : base(context)
+ {
+ _pageList = storage.GetPageList();
+ Permission = KMemoryPermission.ReadAndWrite;
+
+ _hasBeenInitialized = true;
+ _isMapped = false;
+ }
+
+ public Result Initialize(ulong address, ulong size, KMemoryPermission permission)
+ {
+ KProcess creator = KernelStatic.GetCurrentProcess();
+
+ _creator = creator;
+
+ Result result = creator.MemoryManager.BorrowTransferMemory(_pageList, address, size, permission);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ creator.IncrementReferenceCount();
+
+ Permission = permission;
+ Address = address;
+ Size = size;
+ _hasBeenInitialized = true;
+ _isMapped = false;
+
+ return result;
+ }
+
+ public Result MapIntoProcess(
+ KPageTableBase memoryManager,
+ ulong address,
+ ulong size,
+ KProcess process,
+ KMemoryPermission permission)
+ {
+ if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, KPageTableBase.PageSize))
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ if (permission != Permission || _isMapped)
+ {
+ return KernelResult.InvalidState;
+ }
+
+ MemoryState state = Permission == KMemoryPermission.None ? MemoryState.TransferMemoryIsolated : MemoryState.TransferMemory;
+
+ Result result = memoryManager.MapPages(address, _pageList, state, KMemoryPermission.ReadAndWrite);
+
+ if (result == Result.Success)
+ {
+ _isMapped = true;
+ }
+
+ return result;
+ }
+
+ public Result UnmapFromProcess(
+ KPageTableBase memoryManager,
+ ulong address,
+ ulong size,
+ KProcess process)
+ {
+ if (_pageList.GetPagesCount() != BitUtils.DivRoundUp<ulong>(size, (ulong)KPageTableBase.PageSize))
+ {
+ return KernelResult.InvalidSize;
+ }
+
+ MemoryState state = Permission == KMemoryPermission.None ? MemoryState.TransferMemoryIsolated : MemoryState.TransferMemory;
+
+ Result result = memoryManager.UnmapPages(address, _pageList, state);
+
+ if (result == Result.Success)
+ {
+ _isMapped = false;
+ }
+
+ return result;
+ }
+
+ protected override void Destroy()
+ {
+ if (_hasBeenInitialized)
+ {
+ if (!_isMapped && _creator.MemoryManager.UnborrowTransferMemory(Address, Size, _pageList) != Result.Success)
+ {
+ throw new InvalidOperationException("Unexpected failure restoring transfer memory attributes.");
+ }
+
+ _creator.ResourceLimit?.Release(LimitableResource.TransferMemory, 1);
+ _creator.DecrementReferenceCount();
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryAttribute.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryAttribute.cs
new file mode 100644
index 00000000..42407ffe
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryAttribute.cs
@@ -0,0 +1,22 @@
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ [Flags]
+ enum MemoryAttribute : byte
+ {
+ None = 0,
+ Mask = 0xff,
+
+ Borrowed = 1 << 0,
+ IpcMapped = 1 << 1,
+ DeviceMapped = 1 << 2,
+ Uncached = 1 << 3,
+
+ IpcAndDeviceMapped = IpcMapped | DeviceMapped,
+
+ BorrowedAndIpcMapped = Borrowed | IpcMapped,
+
+ DeviceMappedAndUncached = DeviceMapped | Uncached
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryFillValue.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryFillValue.cs
new file mode 100644
index 00000000..cdc892fc
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryFillValue.cs
@@ -0,0 +1,10 @@
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ enum MemoryFillValue : byte
+ {
+ Zero = 0,
+ Stack = 0x58,
+ Ipc = 0x59,
+ Heap = 0x5A,
+ }
+}
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryPermission.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryPermission.cs
new file mode 100644
index 00000000..563b817d
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryPermission.cs
@@ -0,0 +1,20 @@
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ [Flags]
+ enum KMemoryPermission : uint
+ {
+ None = 0,
+ UserMask = Read | Write | Execute,
+ Mask = uint.MaxValue,
+
+ Read = 1 << 0,
+ Write = 1 << 1,
+ Execute = 1 << 2,
+ DontCare = 1 << 28,
+
+ ReadAndWrite = Read | Write,
+ ReadAndExecute = Read | Execute
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryRegion.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryRegion.cs
new file mode 100644
index 00000000..ad719bde
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryRegion.cs
@@ -0,0 +1,10 @@
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ enum MemoryRegion
+ {
+ Application = 0,
+ Applet = 1,
+ Service = 2,
+ NvServices = 3
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryState.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryState.cs
new file mode 100644
index 00000000..d3b61780
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/MemoryState.cs
@@ -0,0 +1,50 @@
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ [Flags]
+ enum MemoryState : uint
+ {
+ Unmapped = 0x00000000,
+ Io = 0x00002001,
+ Normal = 0x00042002,
+ CodeStatic = 0x00DC7E03,
+ CodeMutable = 0x03FEBD04,
+ Heap = 0x037EBD05,
+ SharedMemory = 0x00402006,
+ ModCodeStatic = 0x00DD7E08,
+ ModCodeMutable = 0x03FFBD09,
+ IpcBuffer0 = 0x005C3C0A,
+ Stack = 0x005C3C0B,
+ ThreadLocal = 0x0040200C,
+ TransferMemoryIsolated = 0x015C3C0D,
+ TransferMemory = 0x005C380E,
+ ProcessMemory = 0x0040380F,
+ Reserved = 0x00000010,
+ IpcBuffer1 = 0x005C3811,
+ IpcBuffer3 = 0x004C2812,
+ KernelStack = 0x00002013,
+ CodeReadOnly = 0x00402214,
+ CodeWritable = 0x00402015,
+ UserMask = 0xff,
+ Mask = 0xffffffff,
+
+ PermissionChangeAllowed = 1 << 8,
+ ForceReadWritableByDebugSyscalls = 1 << 9,
+ IpcSendAllowedType0 = 1 << 10,
+ IpcSendAllowedType3 = 1 << 11,
+ IpcSendAllowedType1 = 1 << 12,
+ ProcessPermissionChangeAllowed = 1 << 14,
+ MapAllowed = 1 << 15,
+ UnmapProcessCodeMemoryAllowed = 1 << 16,
+ TransferMemoryAllowed = 1 << 17,
+ QueryPhysicalAddressAllowed = 1 << 18,
+ MapDeviceAllowed = 1 << 19,
+ MapDeviceAlignedAllowed = 1 << 20,
+ IpcBufferAllowed = 1 << 21,
+ IsPoolAllocated = 1 << 22,
+ MapProcessAllowed = 1 << 23,
+ AttributeChangeAllowed = 1 << 24,
+ CodeMemoryAllowed = 1 << 25
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs
new file mode 100644
index 00000000..c68b7369
--- /dev/null
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs
@@ -0,0 +1,49 @@
+using System;
+
+namespace Ryujinx.HLE.HOS.Kernel.Memory
+{
+ class SharedMemoryStorage
+ {
+ private readonly KernelContext _context;
+ private readonly KPageList _pageList;
+ private readonly ulong _size;
+
+ public SharedMemoryStorage(KernelContext context, KPageList pageList)
+ {
+ _context = context;
+ _pageList = pageList;
+ _size = pageList.GetPagesCount() * KPageTableBase.PageSize;
+
+ foreach (KPageNode pageNode in pageList)
+ {
+ ulong address = pageNode.Address - DramMemoryMap.DramBase;
+ ulong size = pageNode.PagesCount * KPageTableBase.PageSize;
+ context.CommitMemory(address, size);
+ }
+ }
+
+ public void ZeroFill()
+ {
+ for (ulong offset = 0; offset < _size; offset += sizeof(ulong))
+ {
+ GetRef<ulong>(offset) = 0;
+ }
+ }
+
+ public ref T GetRef<T>(ulong offset) where T : unmanaged
+ {
+ if (_pageList.Nodes.Count == 1)
+ {
+ ulong address = _pageList.Nodes.First.Value.Address - DramMemoryMap.DramBase;
+ return ref _context.Memory.GetRef<T>(address + offset);
+ }
+
+ throw new NotImplementedException("Non-contiguous shared memory is not yet supported.");
+ }
+
+ public KPageList GetPageList()
+ {
+ return _pageList;
+ }
+ }
+}