diff options
Diffstat (limited to 'Ryujinx.Common/Memory')
4 files changed, 352 insertions, 0 deletions
diff --git a/Ryujinx.Common/Memory/PartialUnmaps/NativeReaderWriterLock.cs b/Ryujinx.Common/Memory/PartialUnmaps/NativeReaderWriterLock.cs new file mode 100644 index 00000000..5419b340 --- /dev/null +++ b/Ryujinx.Common/Memory/PartialUnmaps/NativeReaderWriterLock.cs @@ -0,0 +1,80 @@ +using System.Runtime.InteropServices; +using System.Threading; + +using static Ryujinx.Common.Memory.PartialUnmaps.PartialUnmapHelpers; + +namespace Ryujinx.Common.Memory.PartialUnmaps +{ + /// <summary> + /// A simple implementation of a ReaderWriterLock which can be used from native code. + /// </summary> + [StructLayout(LayoutKind.Sequential, Pack = 1)] + public struct NativeReaderWriterLock + { + public int WriteLock; + public int ReaderCount; + + public static int WriteLockOffset; + public static int ReaderCountOffset; + + /// <summary> + /// Populates the field offsets for use when emitting native code. + /// </summary> + static NativeReaderWriterLock() + { + NativeReaderWriterLock instance = new NativeReaderWriterLock(); + + WriteLockOffset = OffsetOf(ref instance, ref instance.WriteLock); + ReaderCountOffset = OffsetOf(ref instance, ref instance.ReaderCount); + } + + /// <summary> + /// Acquires the reader lock. + /// </summary> + public void AcquireReaderLock() + { + // Must take write lock for a very short time to become a reader. + + while (Interlocked.CompareExchange(ref WriteLock, 1, 0) != 0) { } + + Interlocked.Increment(ref ReaderCount); + + Interlocked.Exchange(ref WriteLock, 0); + } + + /// <summary> + /// Releases the reader lock. + /// </summary> + public void ReleaseReaderLock() + { + Interlocked.Decrement(ref ReaderCount); + } + + /// <summary> + /// Upgrades to a writer lock. The reader lock is temporarily released while obtaining the writer lock. + /// </summary> + public void UpgradeToWriterLock() + { + // Prevent any more threads from entering reader. + // If the write lock is already taken, wait for it to not be taken. + + Interlocked.Decrement(ref ReaderCount); + + while (Interlocked.CompareExchange(ref WriteLock, 1, 0) != 0) { } + + // Wait for reader count to drop to 0, then take the lock again as the only reader. + + while (Interlocked.CompareExchange(ref ReaderCount, 1, 0) != 0) { } + } + + /// <summary> + /// Downgrades from a writer lock, back to a reader one. + /// </summary> + public void DowngradeFromWriterLock() + { + // Release the WriteLock. + + Interlocked.Exchange(ref WriteLock, 0); + } + } +} diff --git a/Ryujinx.Common/Memory/PartialUnmaps/PartialUnmapHelpers.cs b/Ryujinx.Common/Memory/PartialUnmaps/PartialUnmapHelpers.cs new file mode 100644 index 00000000..e650de06 --- /dev/null +++ b/Ryujinx.Common/Memory/PartialUnmaps/PartialUnmapHelpers.cs @@ -0,0 +1,20 @@ +using System.Runtime.CompilerServices; + +namespace Ryujinx.Common.Memory.PartialUnmaps +{ + static class PartialUnmapHelpers + { + /// <summary> + /// Calculates a byte offset of a given field within a struct. + /// </summary> + /// <typeparam name="T">Struct type</typeparam> + /// <typeparam name="T2">Field type</typeparam> + /// <param name="storage">Parent struct</param> + /// <param name="target">Field</param> + /// <returns>The byte offset of the given field in the given struct</returns> + public static int OffsetOf<T, T2>(ref T2 storage, ref T target) + { + return (int)Unsafe.ByteOffset(ref Unsafe.As<T2, T>(ref storage), ref target); + } + } +} diff --git a/Ryujinx.Common/Memory/PartialUnmaps/PartialUnmapState.cs b/Ryujinx.Common/Memory/PartialUnmaps/PartialUnmapState.cs new file mode 100644 index 00000000..3b42e140 --- /dev/null +++ b/Ryujinx.Common/Memory/PartialUnmaps/PartialUnmapState.cs @@ -0,0 +1,160 @@ +using System; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using System.Runtime.Versioning; +using System.Threading; + +using static Ryujinx.Common.Memory.PartialUnmaps.PartialUnmapHelpers; + +namespace Ryujinx.Common.Memory.PartialUnmaps +{ + /// <summary> + /// State for partial unmaps. Intended to be used on Windows. + /// </summary> + [StructLayout(LayoutKind.Sequential, Pack = 1)] + public struct PartialUnmapState + { + public NativeReaderWriterLock PartialUnmapLock; + public int PartialUnmapsCount; + public ThreadLocalMap<int> LocalCounts; + + public readonly static int PartialUnmapLockOffset; + public readonly static int PartialUnmapsCountOffset; + public readonly static int LocalCountsOffset; + + public readonly static IntPtr GlobalState; + + [SupportedOSPlatform("windows")] + [DllImport("kernel32.dll")] + public static extern int GetCurrentThreadId(); + + [SupportedOSPlatform("windows")] + [DllImport("kernel32.dll", SetLastError = true)] + static extern IntPtr OpenThread(int dwDesiredAccess, bool bInheritHandle, uint dwThreadId); + + [SupportedOSPlatform("windows")] + [DllImport("kernel32.dll", SetLastError = true)] + public static extern bool CloseHandle(IntPtr hObject); + + [SupportedOSPlatform("windows")] + [DllImport("kernel32.dll", SetLastError = true)] + static extern bool GetExitCodeThread(IntPtr hThread, out uint lpExitCode); + + /// <summary> + /// Creates a global static PartialUnmapState and populates the field offsets. + /// </summary> + static unsafe PartialUnmapState() + { + PartialUnmapState instance = new PartialUnmapState(); + + PartialUnmapLockOffset = OffsetOf(ref instance, ref instance.PartialUnmapLock); + PartialUnmapsCountOffset = OffsetOf(ref instance, ref instance.PartialUnmapsCount); + LocalCountsOffset = OffsetOf(ref instance, ref instance.LocalCounts); + + int size = Unsafe.SizeOf<PartialUnmapState>(); + GlobalState = Marshal.AllocHGlobal(size); + Unsafe.InitBlockUnaligned((void*)GlobalState, 0, (uint)size); + } + + /// <summary> + /// Resets the global state. + /// </summary> + public static unsafe void Reset() + { + int size = Unsafe.SizeOf<PartialUnmapState>(); + Unsafe.InitBlockUnaligned((void*)GlobalState, 0, (uint)size); + } + + /// <summary> + /// Gets a reference to the global state. + /// </summary> + /// <returns>A reference to the global state</returns> + public static unsafe ref PartialUnmapState GetRef() + { + return ref Unsafe.AsRef<PartialUnmapState>((void*)GlobalState); + } + + /// <summary> + /// Checks if an access violation handler should retry execution due to a fault caused by partial unmap. + /// </summary> + /// <remarks> + /// Due to Windows limitations, <see cref="UnmapView"/> might need to unmap more memory than requested. + /// The additional memory that was unmapped is later remapped, however this leaves a time gap where the + /// memory might be accessed but is unmapped. Users of the API must compensate for that by catching the + /// access violation and retrying if it happened between the unmap and remap operation. + /// This method can be used to decide if retrying in such cases is necessary or not. + /// + /// This version of the function is not used, but serves as a reference for the native + /// implementation in ARMeilleure. + /// </remarks> + /// <returns>True if execution should be retried, false otherwise</returns> + [SupportedOSPlatform("windows")] + public bool RetryFromAccessViolation() + { + PartialUnmapLock.AcquireReaderLock(); + + int threadID = GetCurrentThreadId(); + int threadIndex = LocalCounts.GetOrReserve(threadID, 0); + + if (threadIndex == -1) + { + // Out of thread local space... try again later. + + PartialUnmapLock.ReleaseReaderLock(); + + return true; + } + + ref int threadLocalPartialUnmapsCount = ref LocalCounts.GetValue(threadIndex); + + bool retry = threadLocalPartialUnmapsCount != PartialUnmapsCount; + if (retry) + { + threadLocalPartialUnmapsCount = PartialUnmapsCount; + } + + PartialUnmapLock.ReleaseReaderLock(); + + return retry; + } + + /// <summary> + /// Iterates and trims threads in the thread -> count map that + /// are no longer active. + /// </summary> + [SupportedOSPlatform("windows")] + public void TrimThreads() + { + const uint ExitCodeStillActive = 259; + const int ThreadQueryInformation = 0x40; + + Span<int> ids = LocalCounts.ThreadIds.ToSpan(); + + for (int i = 0; i < ids.Length; i++) + { + int id = ids[i]; + + if (id != 0) + { + IntPtr handle = OpenThread(ThreadQueryInformation, false, (uint)id); + + if (handle == IntPtr.Zero) + { + Interlocked.CompareExchange(ref ids[i], 0, id); + } + else + { + GetExitCodeThread(handle, out uint exitCode); + + if (exitCode != ExitCodeStillActive) + { + Interlocked.CompareExchange(ref ids[i], 0, id); + } + + CloseHandle(handle); + } + } + } + } + } +} diff --git a/Ryujinx.Common/Memory/PartialUnmaps/ThreadLocalMap.cs b/Ryujinx.Common/Memory/PartialUnmaps/ThreadLocalMap.cs new file mode 100644 index 00000000..a3bd5be8 --- /dev/null +++ b/Ryujinx.Common/Memory/PartialUnmaps/ThreadLocalMap.cs @@ -0,0 +1,92 @@ +using System.Runtime.InteropServices; +using System.Threading; + +using static Ryujinx.Common.Memory.PartialUnmaps.PartialUnmapHelpers; + +namespace Ryujinx.Common.Memory.PartialUnmaps +{ + /// <summary> + /// A simple fixed size thread safe map that can be used from native code. + /// Integer thread IDs map to corresponding structs. + /// </summary> + /// <typeparam name="T">The value type for the map</typeparam> + [StructLayout(LayoutKind.Sequential, Pack = 1)] + public struct ThreadLocalMap<T> where T : unmanaged + { + public const int MapSize = 20; + + public Array20<int> ThreadIds; + public Array20<T> Structs; + + public static int ThreadIdsOffset; + public static int StructsOffset; + + /// <summary> + /// Populates the field offsets for use when emitting native code. + /// </summary> + static ThreadLocalMap() + { + ThreadLocalMap<T> instance = new ThreadLocalMap<T>(); + + ThreadIdsOffset = OffsetOf(ref instance, ref instance.ThreadIds); + StructsOffset = OffsetOf(ref instance, ref instance.Structs); + } + + /// <summary> + /// Gets the index of a given thread ID in the map, or reserves one. + /// When reserving a struct, its value is set to the given initial value. + /// Returns -1 when there is no space to reserve a new entry. + /// </summary> + /// <param name="threadId">Thread ID to use as a key</param> + /// <param name="initial">Initial value of the associated struct.</param> + /// <returns>The index of the entry, or -1 if none</returns> + public int GetOrReserve(int threadId, T initial) + { + // Try get a match first. + + for (int i = 0; i < MapSize; i++) + { + int compare = Interlocked.CompareExchange(ref ThreadIds[i], threadId, threadId); + + if (compare == threadId) + { + return i; + } + } + + // Try get a free entry. Since the id is assumed to be unique to this thread, we know it doesn't exist yet. + + for (int i = 0; i < MapSize; i++) + { + int compare = Interlocked.CompareExchange(ref ThreadIds[i], threadId, 0); + + if (compare == 0) + { + Structs[i] = initial; + return i; + } + } + + return -1; + } + + /// <summary> + /// Gets the struct value for a given map entry. + /// </summary> + /// <param name="index">Index of the entry</param> + /// <returns>A reference to the struct value</returns> + public ref T GetValue(int index) + { + return ref Structs[index]; + } + + /// <summary> + /// Releases an entry from the map. + /// </summary> + /// <param name="index">Index of the entry to release</param> + public void Release(int index) + { + Interlocked.Exchange(ref ThreadIds[index], 0); + } + } +} |
