diff options
| author | gdkchan <gab.dark.100@gmail.com> | 2019-01-18 20:26:39 -0200 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2019-01-18 20:26:39 -0200 |
| commit | 22bacc618815170c0d186a82e1ea4558e36b7063 (patch) | |
| tree | 79b97959481fea1ac301da6d4e9dea9b991ece6f /Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs | |
| parent | 3731d0ce8412c3c48286c242842bcb4940b4ca6d (diff) | |
Improve kernel IPC implementation (#550)
* Implement some IPC related kernel SVCs properly
* Fix BLZ decompression when the segment also has a uncompressed chunck
* Set default cpu core on process start from ProgramLoader, remove debug message
* Load process capabilities properly on KIPs
* Fix a copy/paste error in UnmapPhysicalMemory64
* Implement smarter switching between old and new IPC system to support the old HLE services implementation without the manual switch
* Implement RegisterService on sm and AcceptSession (partial)
* Misc fixes and improvements on new IPC methods
* Move IPC related SVCs into a separate file, and logging on RegisterService (sm)
* Some small fixes related to receive list buffers and error cases
* Load NSOs using the correct pool partition
* Fix corner case on GetMaskFromMinMax where range is 64, doesn't happen in pratice however
* Fix send static buffer copy
* Session release, implement closing requests on client disconnect
* Implement ConnectToPort SVC
* KLightSession init
Diffstat (limited to 'Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs')
| -rw-r--r-- | Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs | 285 |
1 files changed, 172 insertions, 113 deletions
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs index 777e9aa9..92cef559 100644 --- a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs +++ b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs @@ -94,6 +94,14 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory } } + public ulong AllocatePagesContiguous(ulong pagesCount, bool backwards) + { + lock (_blocks) + { + return AllocatePagesContiguousImpl(pagesCount, backwards); + } + } + private KernelResult AllocatePagesImpl(ulong pagesCount, bool backwards, out KPageList pageList) { pageList = new KPageList(); @@ -122,165 +130,216 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory //If so, try allocating as much requested pages as possible. while (blockPagesCount <= pagesCount) { - ulong address = 0; + ulong address = AllocatePagesForOrder(blockIndex, backwards, bestFitBlockSize); - for (int currBlockIndex = blockIndex; - currBlockIndex < _blockOrdersCount && address == 0; - currBlockIndex++) + //The address being zero means that no free space was found on that order, + //just give up and try with the next one. + if (address == 0) { - block = _blocks[currBlockIndex]; + break; + } - int index = 0; + //Add new allocated page(s) to the pages list. + //If an error occurs, then free all allocated pages and fail. + KernelResult result = pageList.AddRange(address, blockPagesCount); - bool zeroMask = false; + if (result != KernelResult.Success) + { + FreePages(address, blockPagesCount); - for (int level = 0; level < block.MaxLevel; level++) + foreach (KPageNode pageNode in pageList) { - long mask = block.Masks[level][index]; - - if (mask == 0) - { - zeroMask = true; - - break; - } - - if (backwards) - { - index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask); - } - else - { - index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask)); - } + FreePages(pageNode.Address, pageNode.PagesCount); } - if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask) - { - continue; - } + return result; + } - block.FreeCount--; + pagesCount -= blockPagesCount; + } + } - int tempIdx = index; + //Success case, all requested pages were allocated successfully. + if (pagesCount == 0) + { + return KernelResult.Success; + } - for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64) - { - block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63)); + //Error case, free allocated pages and return out of memory. + foreach (KPageNode pageNode in pageList) + { + FreePages(pageNode.Address, pageNode.PagesCount); + } - if (block.Masks[level][tempIdx / 64] != 0) - { - break; - } - } + pageList = null; - address = block.StartAligned + ((ulong)index << block.Order); - } + return KernelResult.OutOfMemory; + } - for (int currBlockIndex = blockIndex; - currBlockIndex < _blockOrdersCount && address == 0; - currBlockIndex++) - { - block = _blocks[currBlockIndex]; + private ulong AllocatePagesContiguousImpl(ulong pagesCount, bool backwards) + { + if (pagesCount == 0 || _blocks.Length < 1) + { + return 0; + } - int index = 0; + int blockIndex = 0; - bool zeroMask = false; + while ((1UL << _blocks[blockIndex].Order) / KMemoryManager.PageSize < pagesCount) + { + if (++blockIndex >= _blocks.Length) + { + return 0; + } + } - for (int level = 0; level < block.MaxLevel; level++) - { - long mask = block.Masks[level][index]; - - if (mask == 0) - { - zeroMask = true; - - break; - } - - if (backwards) - { - index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask)); - } - else - { - index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask); - } - } + ulong tightestFitBlockSize = 1UL << _blocks[blockIndex].Order; - if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask) - { - continue; - } + ulong address = AllocatePagesForOrder(blockIndex, backwards, tightestFitBlockSize); - block.FreeCount--; + ulong requiredSize = pagesCount * KMemoryManager.PageSize; - int tempIdx = index; + if (address != 0 && tightestFitBlockSize > requiredSize) + { + FreePages(address + requiredSize, (tightestFitBlockSize - requiredSize) / KMemoryManager.PageSize); + } - for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64) - { - block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63)); + return address; + } - if (block.Masks[level][tempIdx / 64] != 0) - { - break; - } - } + private ulong AllocatePagesForOrder(int blockIndex, bool backwards, ulong bestFitBlockSize) + { + ulong address = 0; - address = block.StartAligned + ((ulong)index << block.Order); - } + KMemoryRegionBlock block = null; - //The address being zero means that no free space was found on that order, - //just give up and try with the next one. - if (address == 0) + for (int currBlockIndex = blockIndex; + currBlockIndex < _blockOrdersCount && address == 0; + currBlockIndex++) + { + block = _blocks[currBlockIndex]; + + int index = 0; + + bool zeroMask = false; + + for (int level = 0; level < block.MaxLevel; level++) + { + long mask = block.Masks[level][index]; + + if (mask == 0) { + zeroMask = true; + break; } - //If we are using a larger order than best fit, then we should - //split it into smaller blocks. - ulong firstFreeBlockSize = 1UL << block.Order; + if (backwards) + { + index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask); + } + else + { + index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask)); + } + } - if (firstFreeBlockSize > bestFitBlockSize) + if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask) + { + continue; + } + + block.FreeCount--; + + int tempIdx = index; + + for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64) + { + block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63)); + + if (block.Masks[level][tempIdx / 64] != 0) { - FreePages(address + bestFitBlockSize, (firstFreeBlockSize - bestFitBlockSize) / KMemoryManager.PageSize); + break; } + } - //Add new allocated page(s) to the pages list. - //If an error occurs, then free all allocated pages and fail. - KernelResult result = pageList.AddRange(address, blockPagesCount); + address = block.StartAligned + ((ulong)index << block.Order); + } - if (result != KernelResult.Success) + for (int currBlockIndex = blockIndex; + currBlockIndex < _blockOrdersCount && address == 0; + currBlockIndex++) + { + block = _blocks[currBlockIndex]; + + int index = 0; + + bool zeroMask = false; + + for (int level = 0; level < block.MaxLevel; level++) + { + long mask = block.Masks[level][index]; + + if (mask == 0) { - FreePages(address, blockPagesCount); + zeroMask = true; - foreach (KPageNode pageNode in pageList) - { - FreePages(pageNode.Address, pageNode.PagesCount); - } + break; + } - return result; + if (backwards) + { + index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask)); } + else + { + index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask); + } + } - pagesCount -= blockPagesCount; + if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask) + { + continue; } - } - //Success case, all requested pages were allocated successfully. - if (pagesCount == 0) - { - return KernelResult.Success; + block.FreeCount--; + + int tempIdx = index; + + for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64) + { + block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63)); + + if (block.Masks[level][tempIdx / 64] != 0) + { + break; + } + } + + address = block.StartAligned + ((ulong)index << block.Order); } - //Error case, free allocated pages and return out of memory. - foreach (KPageNode pageNode in pageList) + if (address != 0) { - FreePages(pageNode.Address, pageNode.PagesCount); + //If we are using a larger order than best fit, then we should + //split it into smaller blocks. + ulong firstFreeBlockSize = 1UL << block.Order; + + if (firstFreeBlockSize > bestFitBlockSize) + { + FreePages(address + bestFitBlockSize, (firstFreeBlockSize - bestFitBlockSize) / KMemoryManager.PageSize); + } } - pageList = null; + return address; + } - return KernelResult.OutOfMemory; + public void FreePage(ulong address) + { + lock (_blocks) + { + FreePages(address, 1); + } } public void FreePages(KPageList pageList) |
