diff options
| author | TSR Berry <20988865+TSRBerry@users.noreply.github.com> | 2023-04-26 04:34:16 +0200 |
|---|---|---|
| committer | Mary <thog@protonmail.com> | 2023-04-27 23:51:14 +0200 |
| commit | 609abc8b9b3c05a63bef94c2133550b3c07f97b0 (patch) | |
| tree | 2d1e27f0256c8f411ad55de9ccbb3d598f6c7237 /src/Ryujinx.Tests.Memory | |
| parent | cee712105850ac3385cd0091a923438167433f9f (diff) | |
Rename Ryujinx.Memory.Tests to Ryujinx.Tests.Memory
Diffstat (limited to 'src/Ryujinx.Tests.Memory')
| -rw-r--r-- | src/Ryujinx.Tests.Memory/MockVirtualMemoryManager.cs | 109 | ||||
| -rw-r--r-- | src/Ryujinx.Tests.Memory/MultiRegionTrackingTests.cs | 439 | ||||
| -rw-r--r-- | src/Ryujinx.Tests.Memory/Ryujinx.Tests.Memory.csproj | 18 | ||||
| -rw-r--r-- | src/Ryujinx.Tests.Memory/Tests.cs | 110 | ||||
| -rw-r--r-- | src/Ryujinx.Tests.Memory/TrackingTests.cs | 509 |
5 files changed, 1185 insertions, 0 deletions
diff --git a/src/Ryujinx.Tests.Memory/MockVirtualMemoryManager.cs b/src/Ryujinx.Tests.Memory/MockVirtualMemoryManager.cs new file mode 100644 index 00000000..ef81a461 --- /dev/null +++ b/src/Ryujinx.Tests.Memory/MockVirtualMemoryManager.cs @@ -0,0 +1,109 @@ +using Ryujinx.Memory.Range; +using System; +using System.Collections.Generic; + +namespace Ryujinx.Memory.Tests +{ + public class MockVirtualMemoryManager : IVirtualMemoryManager + { + public bool Supports4KBPages => true; + + public bool NoMappings = false; + + public event Action<ulong, ulong, MemoryPermission> OnProtect; + + public MockVirtualMemoryManager(ulong size, int pageSize) + { + } + + public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags) + { + throw new NotImplementedException(); + } + + public void MapForeign(ulong va, nuint hostAddress, ulong size) + { + throw new NotImplementedException(); + } + + public void Unmap(ulong va, ulong size) + { + throw new NotImplementedException(); + } + + public T Read<T>(ulong va) where T : unmanaged + { + throw new NotImplementedException(); + } + + public void Read(ulong va, Span<byte> data) + { + throw new NotImplementedException(); + } + + public void Write<T>(ulong va, T value) where T : unmanaged + { + throw new NotImplementedException(); + } + + public void Write(ulong va, ReadOnlySpan<byte> data) + { + throw new NotImplementedException(); + } + + public bool WriteWithRedundancyCheck(ulong va, ReadOnlySpan<byte> data) + { + throw new NotImplementedException(); + } + + public ReadOnlySpan<byte> GetSpan(ulong va, int size, bool tracked = false) + { + throw new NotImplementedException(); + } + + public WritableRegion GetWritableRegion(ulong va, int size, bool tracked = false) + { + throw new NotImplementedException(); + } + + public ref T GetRef<T>(ulong va) where T : unmanaged + { + throw new NotImplementedException(); + } + + IEnumerable<HostMemoryRange> IVirtualMemoryManager.GetHostRegions(ulong va, ulong size) + { + throw new NotImplementedException(); + } + + IEnumerable<MemoryRange> IVirtualMemoryManager.GetPhysicalRegions(ulong va, ulong size) + { + return NoMappings ? Array.Empty<MemoryRange>() : new MemoryRange[] { new MemoryRange(va, size) }; + } + + public bool IsMapped(ulong va) + { + return true; + } + + public bool IsRangeMapped(ulong va, ulong size) + { + return true; + } + + public ulong GetPhysicalAddress(ulong va) + { + throw new NotImplementedException(); + } + + public void SignalMemoryTracking(ulong va, ulong size, bool write, bool precise = false, int? exemptId = null) + { + throw new NotImplementedException(); + } + + public void TrackingReprotect(ulong va, ulong size, MemoryPermission protection) + { + OnProtect?.Invoke(va, size, protection); + } + } +} diff --git a/src/Ryujinx.Tests.Memory/MultiRegionTrackingTests.cs b/src/Ryujinx.Tests.Memory/MultiRegionTrackingTests.cs new file mode 100644 index 00000000..38cb4921 --- /dev/null +++ b/src/Ryujinx.Tests.Memory/MultiRegionTrackingTests.cs @@ -0,0 +1,439 @@ +using NUnit.Framework; +using Ryujinx.Memory.Tracking; +using System; +using System.Collections.Generic; +using System.Linq; + +namespace Ryujinx.Memory.Tests +{ + public class MultiRegionTrackingTests + { + private const int RndCnt = 3; + + private const ulong MemorySize = 0x8000; + private const int PageSize = 4096; + + private MemoryBlock _memoryBlock; + private MemoryTracking _tracking; + private MockVirtualMemoryManager _memoryManager; + + [SetUp] + public void Setup() + { + _memoryBlock = new MemoryBlock(MemorySize); + _memoryManager = new MockVirtualMemoryManager(MemorySize, PageSize); + _tracking = new MemoryTracking(_memoryManager, PageSize); + } + + [TearDown] + public void Teardown() + { + _memoryBlock.Dispose(); + } + + private IMultiRegionHandle GetGranular(bool smart, ulong address, ulong size, ulong granularity) + { + return smart ? + _tracking.BeginSmartGranularTracking(address, size, granularity, 0) : + (IMultiRegionHandle)_tracking.BeginGranularTracking(address, size, null, granularity, 0); + } + + private void RandomOrder(Random random, List<int> indices, Action<int> action) + { + List<int> choices = indices.ToList(); + + while (choices.Count > 0) + { + int choice = random.Next(choices.Count); + action(choices[choice]); + choices.RemoveAt(choice); + } + } + + private int ExpectQueryInOrder(IMultiRegionHandle handle, ulong startAddress, ulong size, Func<ulong, bool> addressPredicate) + { + int regionCount = 0; + ulong lastAddress = startAddress; + + handle.QueryModified(startAddress, size, (address, range) => + { + Assert.IsTrue(addressPredicate(address)); // Written pages must be even. + Assert.GreaterOrEqual(address, lastAddress); // Must be signalled in ascending order, regardless of write order. + lastAddress = address; + regionCount++; + }); + + return regionCount; + } + + private int ExpectQueryInOrder(IMultiRegionHandle handle, ulong startAddress, ulong size, Func<ulong, bool> addressPredicate, int sequenceNumber) + { + int regionCount = 0; + ulong lastAddress = startAddress; + + handle.QueryModified(startAddress, size, (address, range) => + { + Assert.IsTrue(addressPredicate(address)); // Written pages must be even. + Assert.GreaterOrEqual(address, lastAddress); // Must be signalled in ascending order, regardless of write order. + lastAddress = address; + regionCount++; + }, sequenceNumber); + + return regionCount; + } + + private void PreparePages(IMultiRegionHandle handle, int pageCount, ulong address = 0) + { + Random random = new Random(); + + // Make sure the list has minimum granularity (smart region changes granularity based on requested ranges) + RandomOrder(random, Enumerable.Range(0, pageCount).ToList(), (i) => + { + ulong resultAddress = ulong.MaxValue; + handle.QueryModified((ulong)i * PageSize + address, PageSize, (address, range) => + { + resultAddress = address; + }); + Assert.AreEqual(resultAddress, (ulong)i * PageSize + address); + }); + } + + [Test] + public void DirtyRegionOrdering([Values] bool smart) + { + const int pageCount = 32; + IMultiRegionHandle handle = GetGranular(smart, 0, PageSize * pageCount, PageSize); + + Random random = new Random(); + + PreparePages(handle, pageCount); + + IEnumerable<int> halfRange = Enumerable.Range(0, pageCount / 2); + List<int> odd = halfRange.Select(x => x * 2 + 1).ToList(); + List<int> even = halfRange.Select(x => x * 2).ToList(); + + // Write to all the odd pages. + RandomOrder(random, odd, (i) => + { + _tracking.VirtualMemoryEvent((ulong)i * PageSize, PageSize, true); + }); + + int oddRegionCount = ExpectQueryInOrder(handle, 0, PageSize * pageCount, (address) => (address / PageSize) % 2 == 1); + + Assert.AreEqual(oddRegionCount, pageCount / 2); // Must have written to all odd pages. + + // Write to all the even pages. + RandomOrder(random, even, (i) => + { + _tracking.VirtualMemoryEvent((ulong)i * PageSize, PageSize, true); + }); + + int evenRegionCount = ExpectQueryInOrder(handle, 0, PageSize * pageCount, (address) => (address / PageSize) % 2 == 0); + + Assert.AreEqual(evenRegionCount, pageCount / 2); + } + + [Test] + public void SequenceNumber([Values] bool smart) + { + // The sequence number can be used to ignore dirty flags, and defer their consumption until later. + // If a user consumes a dirty flag with sequence number 1, then there is a write to the protected region, + // the dirty flag will not be acknowledged until the sequence number is 2. + + // This is useful for situations where we know that the data was complete when the sequence number was set. + // ...essentially, when that data can only be updated on a future sequence number. + + const int pageCount = 32; + IMultiRegionHandle handle = GetGranular(smart, 0, PageSize * pageCount, PageSize); + + PreparePages(handle, pageCount); + + Random random = new Random(); + + IEnumerable<int> halfRange = Enumerable.Range(0, pageCount / 2); + List<int> odd = halfRange.Select(x => x * 2 + 1).ToList(); + List<int> even = halfRange.Select(x => x * 2).ToList(); + + // Write to all the odd pages. + RandomOrder(random, odd, (i) => + { + _tracking.VirtualMemoryEvent((ulong)i * PageSize, PageSize, true); + }); + + int oddRegionCount = 0; + + // Track with sequence number 1. Future dirty flags should only be consumed with sequence number != 1. + // Only track the odd pages, so the even ones don't have their sequence number set. + + foreach (int index in odd) + { + handle.QueryModified((ulong)index * PageSize, PageSize, (address, range) => + { + oddRegionCount++; + }, 1); + } + + Assert.AreEqual(oddRegionCount, pageCount / 2); // Must have written to all odd pages. + + // Write to all pages. + + _tracking.VirtualMemoryEvent(0, PageSize * pageCount, true); + + // Only the even regions should be reported for sequence number 1. + + int evenRegionCount = ExpectQueryInOrder(handle, 0, PageSize * pageCount, (address) => (address / PageSize) % 2 == 0, 1); + + Assert.AreEqual(evenRegionCount, pageCount / 2); // Must have written to all even pages. + + oddRegionCount = 0; + + handle.QueryModified(0, PageSize * pageCount, (address, range) => { oddRegionCount++; }, 1); + + Assert.AreEqual(oddRegionCount, 0); // Sequence number has not changed, so found no dirty subregions. + + // With sequence number 2, all all pages should be reported as modified. + + oddRegionCount = ExpectQueryInOrder(handle, 0, PageSize * pageCount, (address) => (address / PageSize) % 2 == 1, 2); + + Assert.AreEqual(oddRegionCount, pageCount / 2); // Must have written to all odd pages. + } + + [Test] + public void SmartRegionTracking() + { + // Smart multi region handles dynamically change their tracking granularity based on QueryMemory calls. + // This can save on reprotects on larger resources. + + const int pageCount = 32; + IMultiRegionHandle handle = GetGranular(true, 0, PageSize * pageCount, PageSize); + + // Query some large regions to prep the subdivision of the tracking region. + + int[] regionSizes = new int[] { 6, 4, 3, 2, 6, 1 }; + ulong address = 0; + + for (int i = 0; i < regionSizes.Length; i++) + { + int region = regionSizes[i]; + handle.QueryModified(address, (ulong)(PageSize * region), (address, size) => { }); + + // There should be a gap between regions, + // So that they don't combine and we can see the full effects. + address += (ulong)(PageSize * (region + 1)); + } + + // Clear modified. + handle.QueryModified((address, size) => { }); + + // Trigger each region with a 1 byte write. + address = 0; + + for (int i = 0; i < regionSizes.Length; i++) + { + int region = regionSizes[i]; + _tracking.VirtualMemoryEvent(address, 1, true); + address += (ulong)(PageSize * (region + 1)); + } + + int regionInd = 0; + ulong expectedAddress = 0; + + // Expect each region to trigger in its entirety, in address ascending order. + handle.QueryModified((address, size) => { + int region = regionSizes[regionInd++]; + + Assert.AreEqual(address, expectedAddress); + Assert.AreEqual(size, (ulong)(PageSize * region)); + + expectedAddress += (ulong)(PageSize * (region + 1)); + }); + } + + [Test] + public void DisposeMultiHandles([Values] bool smart) + { + // Create and initialize two overlapping Multi Region Handles, with PageSize granularity. + const int pageCount = 32; + const int overlapStart = 16; + + Assert.AreEqual(0, _tracking.GetRegionCount()); + + IMultiRegionHandle handleLow = GetGranular(smart, 0, PageSize * pageCount, PageSize); + PreparePages(handleLow, pageCount); + + Assert.AreEqual(pageCount, _tracking.GetRegionCount()); + + IMultiRegionHandle handleHigh = GetGranular(smart, PageSize * overlapStart, PageSize * pageCount, PageSize); + PreparePages(handleHigh, pageCount, PageSize * overlapStart); + + // Combined pages (and assuming overlapStart <= pageCount) should be pageCount after overlapStart. + int totalPages = overlapStart + pageCount; + + Assert.AreEqual(totalPages, _tracking.GetRegionCount()); + + handleLow.Dispose(); // After disposing one, the pages for the other remain. + + Assert.AreEqual(pageCount, _tracking.GetRegionCount()); + + handleHigh.Dispose(); // After disposing the other, there are no pages left. + + Assert.AreEqual(0, _tracking.GetRegionCount()); + } + + [Test] + public void InheritHandles() + { + // Test merging the following into a granular region handle: + // - 3x gap (creates new granular handles) + // - 3x from multiregion: not dirty, dirty and with action + // - 2x gap + // - 3x single page: not dirty, dirty and with action + // - 3x two page: not dirty, dirty and with action (handle is not reused, but its state is copied to the granular handles) + // - 1x gap + // For a total of 18 pages. + + bool[] actionsTriggered = new bool[3]; + + MultiRegionHandle granular = _tracking.BeginGranularTracking(PageSize * 3, PageSize * 3, null, PageSize, 0); + PreparePages(granular, 3, PageSize * 3); + + // Write to the second handle in the multiregion. + _tracking.VirtualMemoryEvent(PageSize * 4, PageSize, true); + + // Add an action to the third handle in the multiregion. + granular.RegisterAction(PageSize * 5, PageSize, (_, _) => { actionsTriggered[0] = true; }); + + RegionHandle[] singlePages = new RegionHandle[3]; + + for (int i = 0; i < 3; i++) + { + singlePages[i] = _tracking.BeginTracking(PageSize * (8 + (ulong)i), PageSize, 0); + singlePages[i].Reprotect(); + } + + // Write to the second handle. + _tracking.VirtualMemoryEvent(PageSize * 9, PageSize, true); + + // Add an action to the third handle. + singlePages[2].RegisterAction((_, _) => { actionsTriggered[1] = true; }); + + RegionHandle[] doublePages = new RegionHandle[3]; + + for (int i = 0; i < 3; i++) + { + doublePages[i] = _tracking.BeginTracking(PageSize * (11 + (ulong)i * 2), PageSize * 2, 0); + doublePages[i].Reprotect(); + } + + // Write to the second handle. + _tracking.VirtualMemoryEvent(PageSize * 13, PageSize * 2, true); + + // Add an action to the third handle. + doublePages[2].RegisterAction((_, _) => { actionsTriggered[2] = true; }); + + // Finally, create a granular handle that inherits all these handles. + + IEnumerable<IRegionHandle>[] handleGroups = new IEnumerable<IRegionHandle>[] + { + granular.GetHandles(), + singlePages, + doublePages + }; + + MultiRegionHandle combined = _tracking.BeginGranularTracking(0, PageSize * 18, handleGroups.SelectMany((handles) => handles), PageSize, 0); + + bool[] expectedDirty = new bool[] + { + true, true, true, // Gap. + false, true, false, // Multi-region. + true, true, // Gap. + false, true, false, // Individual handles. + false, false, true, true, false, false, // Double size handles. + true // Gap. + }; + + for (int i = 0; i < 18; i++) + { + bool modified = false; + combined.QueryModified(PageSize * (ulong)i, PageSize, (_, _) => { modified = true; }); + + Assert.AreEqual(expectedDirty[i], modified); + } + + Assert.AreEqual(new bool[3], actionsTriggered); + + _tracking.VirtualMemoryEvent(PageSize * 5, PageSize, false); + Assert.IsTrue(actionsTriggered[0]); + + _tracking.VirtualMemoryEvent(PageSize * 10, PageSize, false); + Assert.IsTrue(actionsTriggered[1]); + + _tracking.VirtualMemoryEvent(PageSize * 15, PageSize, false); + Assert.IsTrue(actionsTriggered[2]); + + // The double page handles should be disposed, as they were split into granular handles. + foreach (RegionHandle doublePage in doublePages) + { + // These should have been disposed. + bool throws = false; + + try + { + doublePage.Dispose(); + } + catch (ObjectDisposedException) + { + throws = true; + } + + Assert.IsTrue(throws); + } + + IEnumerable<IRegionHandle> combinedHandles = combined.GetHandles(); + + Assert.AreEqual(handleGroups[0].ElementAt(0), combinedHandles.ElementAt(3)); + Assert.AreEqual(handleGroups[0].ElementAt(1), combinedHandles.ElementAt(4)); + Assert.AreEqual(handleGroups[0].ElementAt(2), combinedHandles.ElementAt(5)); + + Assert.AreEqual(singlePages[0], combinedHandles.ElementAt(8)); + Assert.AreEqual(singlePages[1], combinedHandles.ElementAt(9)); + Assert.AreEqual(singlePages[2], combinedHandles.ElementAt(10)); + } + + [Test] + public void PreciseAction() + { + bool actionTriggered = false; + + MultiRegionHandle granular = _tracking.BeginGranularTracking(PageSize * 3, PageSize * 3, null, PageSize, 0); + PreparePages(granular, 3, PageSize * 3); + + // Add a precise action to the second and third handle in the multiregion. + granular.RegisterPreciseAction(PageSize * 4, PageSize * 2, (_, _, _) => { actionTriggered = true; return true; }); + + // Precise write to first handle in the multiregion. + _tracking.VirtualMemoryEvent(PageSize * 3, PageSize, true, precise: true); + Assert.IsFalse(actionTriggered); // Action not triggered. + + bool firstPageModified = false; + granular.QueryModified(PageSize * 3, PageSize, (_, _) => { firstPageModified = true; }); + Assert.IsTrue(firstPageModified); // First page is modified. + + // Precise write to all handles in the multiregion. + _tracking.VirtualMemoryEvent(PageSize * 3, PageSize * 3, true, precise: true); + + bool[] pagesModified = new bool[3]; + + for (int i = 3; i < 6; i++) + { + int index = i - 3; + granular.QueryModified(PageSize * (ulong)i, PageSize, (_, _) => { pagesModified[index] = true; }); + } + + Assert.IsTrue(actionTriggered); // Action triggered. + + // Precise writes are ignored on two later handles due to the action returning true. + Assert.AreEqual(pagesModified, new bool[] { true, false, false }); + } + } +} diff --git a/src/Ryujinx.Tests.Memory/Ryujinx.Tests.Memory.csproj b/src/Ryujinx.Tests.Memory/Ryujinx.Tests.Memory.csproj new file mode 100644 index 00000000..4dcb6962 --- /dev/null +++ b/src/Ryujinx.Tests.Memory/Ryujinx.Tests.Memory.csproj @@ -0,0 +1,18 @@ +<Project Sdk="Microsoft.NET.Sdk"> + + <PropertyGroup> + <TargetFramework>net7.0</TargetFramework> + <IsPackable>false</IsPackable> + </PropertyGroup> + + <ItemGroup> + <PackageReference Include="Microsoft.NET.Test.Sdk" /> + <PackageReference Include="NUnit" /> + <PackageReference Include="NUnit3TestAdapter" /> + </ItemGroup> + + <ItemGroup> + <ProjectReference Include="..\Ryujinx.Memory\Ryujinx.Memory.csproj" /> + </ItemGroup> + +</Project> diff --git a/src/Ryujinx.Tests.Memory/Tests.cs b/src/Ryujinx.Tests.Memory/Tests.cs new file mode 100644 index 00000000..2717b76a --- /dev/null +++ b/src/Ryujinx.Tests.Memory/Tests.cs @@ -0,0 +1,110 @@ +using NUnit.Framework; +using System; +using System.Runtime.InteropServices; + +namespace Ryujinx.Memory.Tests +{ + public class Tests + { + private const ulong MemorySize = 0x8000; + + private MemoryBlock _memoryBlock; + + [SetUp] + public void Setup() + { + _memoryBlock = new MemoryBlock(MemorySize); + } + + [TearDown] + public void Teardown() + { + _memoryBlock.Dispose(); + } + + [Test] + public void Test_Read() + { + Marshal.WriteInt32(_memoryBlock.Pointer, 0x2020, 0x1234abcd); + + Assert.AreEqual(_memoryBlock.Read<int>(0x2020), 0x1234abcd); + } + + [Test] + public void Test_Write() + { + _memoryBlock.Write(0x2040, 0xbadc0de); + + Assert.AreEqual(Marshal.ReadInt32(_memoryBlock.Pointer, 0x2040), 0xbadc0de); + } + + [Test] + // Memory aliasing tests fail on CI at the moment. + [Platform(Exclude = "MacOsX")] + public void Test_Alias() + { + using MemoryBlock backing = new MemoryBlock(0x10000, MemoryAllocationFlags.Mirrorable); + using MemoryBlock toAlias = new MemoryBlock(0x10000, MemoryAllocationFlags.Reserve | MemoryAllocationFlags.ViewCompatible); + + toAlias.MapView(backing, 0x1000, 0, 0x4000); + toAlias.UnmapView(backing, 0x3000, 0x1000); + + toAlias.Write(0, 0xbadc0de); + Assert.AreEqual(Marshal.ReadInt32(backing.Pointer, 0x1000), 0xbadc0de); + } + + [Test] + // Memory aliasing tests fail on CI at the moment. + [Platform(Exclude = "MacOsX")] + public void Test_AliasRandom() + { + using MemoryBlock backing = new MemoryBlock(0x80000, MemoryAllocationFlags.Mirrorable); + using MemoryBlock toAlias = new MemoryBlock(0x80000, MemoryAllocationFlags.Reserve | MemoryAllocationFlags.ViewCompatible); + + Random rng = new Random(123); + + for (int i = 0; i < 20000; i++) + { + int srcPage = rng.Next(0, 64); + int dstPage = rng.Next(0, 64); + int pages = rng.Next(1, 65); + + if ((rng.Next() & 1) != 0) + { + toAlias.MapView(backing, (ulong)srcPage << 12, (ulong)dstPage << 12, (ulong)pages << 12); + + int offset = rng.Next(0, 0x1000 - sizeof(int)); + + toAlias.Write((ulong)((dstPage << 12) + offset), 0xbadc0de); + Assert.AreEqual(Marshal.ReadInt32(backing.Pointer, (srcPage << 12) + offset), 0xbadc0de); + } + else + { + toAlias.UnmapView(backing, (ulong)dstPage << 12, (ulong)pages << 12); + } + } + } + + [Test] + // Memory aliasing tests fail on CI at the moment. + [Platform(Exclude = "MacOsX")] + public void Test_AliasMapLeak() + { + ulong pageSize = 4096; + ulong size = 100000 * pageSize; // The mappings limit on Linux is usually around 65K, so let's make sure we are above that. + + using MemoryBlock backing = new MemoryBlock(pageSize, MemoryAllocationFlags.Mirrorable); + using MemoryBlock toAlias = new MemoryBlock(size, MemoryAllocationFlags.Reserve | MemoryAllocationFlags.ViewCompatible); + + for (ulong offset = 0; offset < size; offset += pageSize) + { + toAlias.MapView(backing, 0, offset, pageSize); + + toAlias.Write(offset, 0xbadc0de); + Assert.AreEqual(0xbadc0de, backing.Read<int>(0)); + + toAlias.UnmapView(backing, offset, pageSize); + } + } + } +}
\ No newline at end of file diff --git a/src/Ryujinx.Tests.Memory/TrackingTests.cs b/src/Ryujinx.Tests.Memory/TrackingTests.cs new file mode 100644 index 00000000..eb679804 --- /dev/null +++ b/src/Ryujinx.Tests.Memory/TrackingTests.cs @@ -0,0 +1,509 @@ +using NUnit.Framework; +using Ryujinx.Memory.Tracking; +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Threading; + +namespace Ryujinx.Memory.Tests +{ + public class TrackingTests + { + private const int RndCnt = 3; + + private const ulong MemorySize = 0x8000; + private const int PageSize = 4096; + + private MemoryBlock _memoryBlock; + private MemoryTracking _tracking; + private MockVirtualMemoryManager _memoryManager; + + [SetUp] + public void Setup() + { + _memoryBlock = new MemoryBlock(MemorySize); + _memoryManager = new MockVirtualMemoryManager(MemorySize, PageSize); + _tracking = new MemoryTracking(_memoryManager, PageSize); + } + + [TearDown] + public void Teardown() + { + _memoryBlock.Dispose(); + } + + private bool TestSingleWrite(RegionHandle handle, ulong address, ulong size) + { + handle.Reprotect(); + + _tracking.VirtualMemoryEvent(address, size, true); + + return handle.Dirty; + } + + [Test] + public void SingleRegion() + { + RegionHandle handle = _tracking.BeginTracking(0, PageSize, 0); + (ulong address, ulong size)? readTrackingTriggered = null; + handle.RegisterAction((address, size) => + { + readTrackingTriggered = (address, size); + }); + + bool dirtyInitial = handle.Dirty; + Assert.True(dirtyInitial); // Handle starts dirty. + + handle.Reprotect(); + + bool dirtyAfterReprotect = handle.Dirty; + Assert.False(dirtyAfterReprotect); // Handle is no longer dirty. + + _tracking.VirtualMemoryEvent(PageSize * 2, 4, true); + _tracking.VirtualMemoryEvent(PageSize * 2, 4, false); + + bool dirtyAfterUnrelatedReadWrite = handle.Dirty; + Assert.False(dirtyAfterUnrelatedReadWrite); // Not dirtied, as the write was to an unrelated address. + + Assert.IsNull(readTrackingTriggered); // Hasn't been triggered yet + + _tracking.VirtualMemoryEvent(0, 4, false); + + bool dirtyAfterRelatedRead = handle.Dirty; + Assert.False(dirtyAfterRelatedRead); // Only triggers on write. + Assert.AreEqual(readTrackingTriggered, (0UL, 4UL)); // Read action was triggered. + + readTrackingTriggered = null; + _tracking.VirtualMemoryEvent(0, 4, true); + + bool dirtyAfterRelatedWrite = handle.Dirty; + Assert.True(dirtyAfterRelatedWrite); // Dirty flag should now be set. + + _tracking.VirtualMemoryEvent(4, 4, true); + bool dirtyAfterRelatedWrite2 = handle.Dirty; + Assert.True(dirtyAfterRelatedWrite2); // Dirty flag should still be set. + + handle.Reprotect(); + + bool dirtyAfterReprotect2 = handle.Dirty; + Assert.False(dirtyAfterReprotect2); // Handle is no longer dirty. + + handle.Dispose(); + + bool dirtyAfterDispose = TestSingleWrite(handle, 0, 4); + Assert.False(dirtyAfterDispose); // Handle cannot be triggered when disposed + } + + [Test] + public void OverlappingRegions() + { + RegionHandle allHandle = _tracking.BeginTracking(0, PageSize * 16, 0); + allHandle.Reprotect(); + + (ulong address, ulong size)? readTrackingTriggeredAll = null; + Action registerReadAction = () => + { + readTrackingTriggeredAll = null; + allHandle.RegisterAction((address, size) => + { + readTrackingTriggeredAll = (address, size); + }); + }; + registerReadAction(); + + // Create 16 page sized handles contained within the allHandle. + RegionHandle[] containedHandles = new RegionHandle[16]; + + for (int i = 0; i < 16; i++) + { + containedHandles[i] = _tracking.BeginTracking((ulong)i * PageSize, PageSize, 0); + containedHandles[i].Reprotect(); + } + + for (int i = 0; i < 16; i++) + { + // No handles are dirty. + Assert.False(allHandle.Dirty); + Assert.IsNull(readTrackingTriggeredAll); + for (int j = 0; j < 16; j++) + { + Assert.False(containedHandles[j].Dirty); + } + + _tracking.VirtualMemoryEvent((ulong)i * PageSize, 1, true); + + // Only the handle covering the entire range and the relevant contained handle are dirty. + Assert.True(allHandle.Dirty); + Assert.AreEqual(readTrackingTriggeredAll, ((ulong)i * PageSize, 1UL)); // Triggered read tracking + for (int j = 0; j < 16; j++) + { + if (j == i) + { + Assert.True(containedHandles[j].Dirty); + } + else + { + Assert.False(containedHandles[j].Dirty); + } + } + + // Clear flags and reset read action. + registerReadAction(); + allHandle.Reprotect(); + containedHandles[i].Reprotect(); + } + } + + [Test] + public void PageAlignment( + [Values(1ul, 512ul, 2048ul, 4096ul, 65536ul)] [Random(1ul, 65536ul, RndCnt)] ulong address, + [Values(1ul, 4ul, 1024ul, 4096ul, 65536ul)] [Random(1ul, 65536ul, RndCnt)] ulong size) + { + ulong alignedStart = (address / PageSize) * PageSize; + ulong alignedEnd = ((address + size + PageSize - 1) / PageSize) * PageSize; + ulong alignedSize = alignedEnd - alignedStart; + + RegionHandle handle = _tracking.BeginTracking(address, size, 0); + + // Anywhere inside the pages the region is contained on should trigger. + + bool originalRangeTriggers = TestSingleWrite(handle, address, size); + Assert.True(originalRangeTriggers); + + bool alignedRangeTriggers = TestSingleWrite(handle, alignedStart, alignedSize); + Assert.True(alignedRangeTriggers); + + bool alignedStartTriggers = TestSingleWrite(handle, alignedStart, 1); + Assert.True(alignedStartTriggers); + + bool alignedEndTriggers = TestSingleWrite(handle, alignedEnd - 1, 1); + Assert.True(alignedEndTriggers); + + // Outside the tracked range should not trigger. + + bool alignedBeforeTriggers = TestSingleWrite(handle, alignedStart - 1, 1); + Assert.False(alignedBeforeTriggers); + + bool alignedAfterTriggers = TestSingleWrite(handle, alignedEnd, 1); + Assert.False(alignedAfterTriggers); + } + + [Test, Explicit, Timeout(1000)] + public void Multithreading() + { + // Multithreading sanity test + // Multiple threads can easily read/write memory regions from any existing handle. + // Handles can also be owned by different threads, though they should have one owner thread. + // Handles can be created and disposed at any time, by any thread. + + // This test should not throw or deadlock due to invalid state. + + const int threadCount = 1; + const int handlesPerThread = 16; + long finishedTime = 0; + + RegionHandle[] handles = new RegionHandle[threadCount * handlesPerThread]; + Random globalRand = new Random(); + + for (int i = 0; i < handles.Length; i++) + { + handles[i] = _tracking.BeginTracking((ulong)i * PageSize, PageSize, 0); + handles[i].Reprotect(); + } + + List<Thread> testThreads = new List<Thread>(); + + // Dirty flag consumer threads + int dirtyFlagReprotects = 0; + for (int i = 0; i < threadCount; i++) + { + int randSeed = i; + testThreads.Add(new Thread(() => + { + int handleBase = randSeed * handlesPerThread; + while (Stopwatch.GetTimestamp() < finishedTime) + { + Random random = new Random(randSeed); + RegionHandle handle = handles[handleBase + random.Next(handlesPerThread)]; + + if (handle.Dirty) + { + handle.Reprotect(); + Interlocked.Increment(ref dirtyFlagReprotects); + } + } + })); + } + + // Write trigger threads + int writeTriggers = 0; + for (int i = 0; i < threadCount; i++) + { + int randSeed = i; + testThreads.Add(new Thread(() => + { + Random random = new Random(randSeed); + ulong handleBase = (ulong)(randSeed * handlesPerThread * PageSize); + while (Stopwatch.GetTimestamp() < finishedTime) + { + _tracking.VirtualMemoryEvent(handleBase + (ulong)random.Next(PageSize * handlesPerThread), PageSize / 2, true); + Interlocked.Increment(ref writeTriggers); + } + })); + } + + // Handle create/delete threads + int handleLifecycles = 0; + for (int i = 0; i < threadCount; i++) + { + int randSeed = i; + testThreads.Add(new Thread(() => + { + int maxAddress = threadCount * handlesPerThread * PageSize; + Random random = new Random(randSeed + 512); + while (Stopwatch.GetTimestamp() < finishedTime) + { + RegionHandle handle = _tracking.BeginTracking((ulong)random.Next(maxAddress), (ulong)random.Next(65536), 0); + + handle.Dispose(); + + Interlocked.Increment(ref handleLifecycles); + } + })); + } + + finishedTime = Stopwatch.GetTimestamp() + Stopwatch.Frequency / 2; // Run for 500ms; + + foreach (Thread thread in testThreads) + { + thread.Start(); + } + + foreach (Thread thread in testThreads) + { + thread.Join(); + } + + Assert.Greater(dirtyFlagReprotects, 10); + Assert.Greater(writeTriggers, 10); + Assert.Greater(handleLifecycles, 10); + } + + [Test] + public void ReadActionThreadConsumption() + { + // Read actions should only be triggered once for each registration. + // The implementation should use an interlocked exchange to make sure other threads can't get the action. + + RegionHandle handle = _tracking.BeginTracking(0, PageSize, 0); + + int triggeredCount = 0; + int registeredCount = 0; + int signalThreadsDone = 0; + bool isRegistered = false; + + Action registerReadAction = () => + { + registeredCount++; + handle.RegisterAction((address, size) => + { + isRegistered = false; + Interlocked.Increment(ref triggeredCount); + }); + }; + + const int threadCount = 16; + const int iterationCount = 10000; + Thread[] signalThreads = new Thread[threadCount]; + + for (int i = 0; i < threadCount; i++) + { + int randSeed = i; + signalThreads[i] = new Thread(() => + { + Random random = new Random(randSeed); + for (int j = 0; j < iterationCount; j++) + { + _tracking.VirtualMemoryEvent((ulong)random.Next(PageSize), 4, false); + } + Interlocked.Increment(ref signalThreadsDone); + }); + } + + for (int i = 0; i < threadCount; i++) + { + signalThreads[i].Start(); + } + + while (signalThreadsDone != -1) + { + if (signalThreadsDone == threadCount) + { + signalThreadsDone = -1; + } + + if (!isRegistered) + { + isRegistered = true; + registerReadAction(); + } + } + + // The action should trigger exactly once for every registration, + // then we register once after all the threads signalling it cease. + Assert.AreEqual(registeredCount, triggeredCount + 1); + } + + [Test] + public void DisposeHandles() + { + // Ensure that disposed handles correctly remove their virtual and physical regions. + + RegionHandle handle = _tracking.BeginTracking(0, PageSize, 0); + handle.Reprotect(); + + Assert.AreEqual(1, _tracking.GetRegionCount()); + + handle.Dispose(); + + Assert.AreEqual(0, _tracking.GetRegionCount()); + + // Two handles, small entirely contains big. + // We expect there to be three regions after creating both, one for the small region and two covering the big one around it. + // Regions are always split to avoid overlapping, which is why there are three instead of two. + + RegionHandle handleSmall = _tracking.BeginTracking(PageSize, PageSize, 0); + RegionHandle handleBig = _tracking.BeginTracking(0, PageSize * 4, 0); + + Assert.AreEqual(3, _tracking.GetRegionCount()); + + // After disposing the big region, only the small one will remain. + handleBig.Dispose(); + + Assert.AreEqual(1, _tracking.GetRegionCount()); + + handleSmall.Dispose(); + + Assert.AreEqual(0, _tracking.GetRegionCount()); + } + + [Test] + public void ReadAndWriteProtection() + { + MemoryPermission protection = MemoryPermission.ReadAndWrite; + + _memoryManager.OnProtect += (va, size, newProtection) => + { + Assert.AreEqual((0, PageSize), (va, size)); // Should protect the exact region all the operations use. + protection = newProtection; + }; + + RegionHandle handle = _tracking.BeginTracking(0, PageSize, 0); + + // After creating the handle, there is no protection yet. + Assert.AreEqual(MemoryPermission.ReadAndWrite, protection); + + bool dirtyInitial = handle.Dirty; + Assert.True(dirtyInitial); // Handle starts dirty. + + handle.Reprotect(); + + // After a reprotect, there is write protection, which will set a dirty flag when any write happens. + Assert.AreEqual(MemoryPermission.Read, protection); + + (ulong address, ulong size)? readTrackingTriggered = null; + handle.RegisterAction((address, size) => + { + readTrackingTriggered = (address, size); + }); + + // Registering an action adds read/write protection. + Assert.AreEqual(MemoryPermission.None, protection); + + bool dirtyAfterReprotect = handle.Dirty; + Assert.False(dirtyAfterReprotect); // Handle is no longer dirty. + + // First we should read, which will trigger the action. This _should not_ remove write protection on the memory. + + _tracking.VirtualMemoryEvent(0, 4, false); + + bool dirtyAfterRead = handle.Dirty; + Assert.False(dirtyAfterRead); // Not dirtied, as this was a read. + + Assert.AreEqual(readTrackingTriggered, (0UL, 4UL)); // Read action was triggered. + + Assert.AreEqual(MemoryPermission.Read, protection); // Write protection is still present. + + readTrackingTriggered = null; + + // Now, perform a write. + + _tracking.VirtualMemoryEvent(0, 4, true); + + bool dirtyAfterWriteAfterRead = handle.Dirty; + Assert.True(dirtyAfterWriteAfterRead); // Should be dirty. + + Assert.AreEqual(MemoryPermission.ReadAndWrite, protection); // All protection is now be removed from the memory. + + Assert.IsNull(readTrackingTriggered); // Read tracking was removed when the action fired, as it can only fire once. + + handle.Dispose(); + } + + [Test] + public void PreciseAction() + { + RegionHandle handle = _tracking.BeginTracking(0, PageSize, 0); + + (ulong address, ulong size, bool write)? preciseTriggered = null; + handle.RegisterPreciseAction((address, size, write) => + { + preciseTriggered = (address, size, write); + + return true; + }); + + (ulong address, ulong size)? readTrackingTriggered = null; + handle.RegisterAction((address, size) => + { + readTrackingTriggered = (address, size); + }); + + handle.Reprotect(); + + _tracking.VirtualMemoryEvent(0, 4, false, precise: true); + + Assert.IsNull(readTrackingTriggered); // Hasn't been triggered - precise action returned true. + Assert.AreEqual(preciseTriggered, (0UL, 4UL, false)); // Precise action was triggered. + + _tracking.VirtualMemoryEvent(0, 4, true, precise: true); + + Assert.IsNull(readTrackingTriggered); // Still hasn't been triggered. + bool dirtyAfterPreciseActionTrue = handle.Dirty; + Assert.False(dirtyAfterPreciseActionTrue); // Not dirtied - precise action returned true. + Assert.AreEqual(preciseTriggered, (0UL, 4UL, true)); // Precise action was triggered. + + // Handle is now dirty. + handle.Reprotect(true); + preciseTriggered = null; + + _tracking.VirtualMemoryEvent(4, 4, true, precise: true); + Assert.AreEqual(preciseTriggered, (4UL, 4UL, true)); // Precise action was triggered even though handle was dirty. + + handle.Reprotect(); + handle.RegisterPreciseAction((address, size, write) => + { + preciseTriggered = (address, size, write); + + return false; // Now, we return false, which indicates that the regular read/write behaviours should trigger. + }); + + _tracking.VirtualMemoryEvent(8, 4, true, precise: true); + + Assert.AreEqual(readTrackingTriggered, (8UL, 4UL)); // Read action triggered, as precise action returned false. + bool dirtyAfterPreciseActionFalse = handle.Dirty; + Assert.True(dirtyAfterPreciseActionFalse); // Dirtied, as precise action returned false. + Assert.AreEqual(preciseTriggered, (8UL, 4UL, true)); // Precise action was triggered. + } + } +} |
