aboutsummaryrefslogtreecommitdiff
path: root/ARMeilleure/Common
diff options
context:
space:
mode:
Diffstat (limited to 'ARMeilleure/Common')
-rw-r--r--ARMeilleure/Common/AddressTable.cs2
-rw-r--r--ARMeilleure/Common/Allocator.cs24
-rw-r--r--ARMeilleure/Common/ArenaAllocator.cs188
-rw-r--r--ARMeilleure/Common/BitMap.cs153
-rw-r--r--ARMeilleure/Common/BitMapPool.cs32
-rw-r--r--ARMeilleure/Common/EntryTable.cs11
-rw-r--r--ARMeilleure/Common/NativeAllocator.cs27
-rw-r--r--ARMeilleure/Common/ThreadStaticPool.cs219
-rw-r--r--ARMeilleure/Common/ThreadStaticPoolEnums.cs14
9 files changed, 327 insertions, 343 deletions
diff --git a/ARMeilleure/Common/AddressTable.cs b/ARMeilleure/Common/AddressTable.cs
index 4af1dc3a..60586a35 100644
--- a/ARMeilleure/Common/AddressTable.cs
+++ b/ARMeilleure/Common/AddressTable.cs
@@ -211,7 +211,7 @@ namespace ARMeilleure.Common
private IntPtr Allocate<T>(int length, T fill, bool leaf) where T : unmanaged
{
var size = sizeof(T) * length;
- var page = Marshal.AllocHGlobal(size);
+ var page = (IntPtr)NativeAllocator.Instance.Allocate((uint)size);
var span = new Span<T>((void*)page, length);
span.Fill(fill);
diff --git a/ARMeilleure/Common/Allocator.cs b/ARMeilleure/Common/Allocator.cs
new file mode 100644
index 00000000..247a8e8b
--- /dev/null
+++ b/ARMeilleure/Common/Allocator.cs
@@ -0,0 +1,24 @@
+using System;
+
+namespace ARMeilleure.Common
+{
+ unsafe abstract class Allocator : IDisposable
+ {
+ public T* Allocate<T>(ulong count = 1) where T : unmanaged
+ {
+ return (T*)Allocate(count * (uint)sizeof(T));
+ }
+
+ public abstract void* Allocate(ulong size);
+
+ public abstract void Free(void* block);
+
+ protected virtual void Dispose(bool disposing) { }
+
+ public void Dispose()
+ {
+ Dispose(true);
+ GC.SuppressFinalize(this);
+ }
+ }
+}
diff --git a/ARMeilleure/Common/ArenaAllocator.cs b/ARMeilleure/Common/ArenaAllocator.cs
new file mode 100644
index 00000000..4ac7020d
--- /dev/null
+++ b/ARMeilleure/Common/ArenaAllocator.cs
@@ -0,0 +1,188 @@
+using System;
+using System.Collections.Generic;
+using System.Runtime.CompilerServices;
+using System.Threading;
+
+namespace ARMeilleure.Common
+{
+ unsafe sealed class ArenaAllocator : Allocator
+ {
+ private class PageInfo
+ {
+ public byte* Pointer;
+ public byte Unused;
+ public int UnusedCounter;
+ }
+
+ private int _lastReset;
+ private ulong _index;
+ private int _pageIndex;
+ private PageInfo _page;
+ private List<PageInfo> _pages;
+ private readonly ulong _pageSize;
+ private readonly uint _pageCount;
+ private readonly List<IntPtr> _extras;
+
+ public ArenaAllocator(uint pageSize, uint pageCount)
+ {
+ _lastReset = Environment.TickCount;
+
+ // Set _index to pageSize so that the first allocation goes through the slow path.
+ _index = pageSize;
+ _pageIndex = -1;
+
+ _page = null;
+ _pages = new List<PageInfo>();
+ _pageSize = pageSize;
+ _pageCount = pageCount;
+
+ _extras = new List<IntPtr>();
+ }
+
+ public Span<T> AllocateSpan<T>(ulong count) where T : unmanaged
+ {
+ return new Span<T>(Allocate<T>(count), (int)count);
+ }
+
+ public override void* Allocate(ulong size)
+ {
+ if (_index + size <= _pageSize)
+ {
+ byte* result = _page.Pointer + _index;
+
+ _index += size;
+
+ return result;
+ }
+
+ return AllocateSlow(size);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private void* AllocateSlow(ulong size)
+ {
+ if (size > _pageSize)
+ {
+ void* extra = NativeAllocator.Instance.Allocate(size);
+
+ _extras.Add((IntPtr)extra);
+
+ return extra;
+ }
+
+ if (_index + size > _pageSize)
+ {
+ _index = 0;
+ _pageIndex++;
+ }
+
+ if (_pageIndex < _pages.Count)
+ {
+ _page = _pages[_pageIndex];
+ _page.Unused = 0;
+ }
+ else
+ {
+ _page = new PageInfo();
+ _page.Pointer = (byte*)NativeAllocator.Instance.Allocate(_pageSize);
+
+ _pages.Add(_page);
+ }
+
+ byte* result = _page.Pointer + _index;
+
+ _index += size;
+
+ return result;
+ }
+
+ public override void Free(void* block) { }
+
+ public void Reset()
+ {
+ _index = _pageSize;
+ _pageIndex = -1;
+ _page = null;
+
+ // Free excess pages that was allocated.
+ while (_pages.Count > _pageCount)
+ {
+ NativeAllocator.Instance.Free(_pages[_pages.Count - 1].Pointer);
+
+ _pages.RemoveAt(_pages.Count - 1);
+ }
+
+ // Free extra blocks that are not page-sized
+ foreach (IntPtr ptr in _extras)
+ {
+ NativeAllocator.Instance.Free((void*)ptr);
+ }
+
+ _extras.Clear();
+
+ // Free pooled pages that has not been used in a while. Remove pages at the back first, because we try to
+ // keep the pages at the front alive, since they're more likely to be hot and in the d-cache.
+ bool removing = true;
+
+ // If arena is used frequently, keep pages for longer. Otherwise keep pages for a shorter amount of time.
+ int now = Environment.TickCount;
+ int count = (now - _lastReset) switch {
+ >= 5000 => 0,
+ >= 2500 => 50,
+ >= 1000 => 100,
+ >= 10 => 1500,
+ _ => 5000
+ };
+
+ for (int i = _pages.Count - 1; i >= 0; i--)
+ {
+ PageInfo page = _pages[i];
+
+ if (page.Unused == 0)
+ {
+ page.UnusedCounter = 0;
+ }
+
+ page.UnusedCounter += page.Unused;
+ page.Unused = 1;
+
+ // If page not used after `count` resets, remove it.
+ if (removing && page.UnusedCounter >= count)
+ {
+ NativeAllocator.Instance.Free(page.Pointer);
+
+ _pages.RemoveAt(i);
+ }
+ else
+ {
+ removing = false;
+ }
+ }
+
+ _lastReset = now;
+ }
+
+ protected override void Dispose(bool disposing)
+ {
+ if (_pages != null)
+ {
+ foreach (PageInfo info in _pages)
+ {
+ NativeAllocator.Instance.Free(info.Pointer);
+ }
+
+ foreach (IntPtr ptr in _extras)
+ {
+ NativeAllocator.Instance.Free((void*)ptr);
+ }
+
+ _pages = null;
+ }
+ }
+
+ ~ArenaAllocator()
+ {
+ Dispose(false);
+ }
+ }
+}
diff --git a/ARMeilleure/Common/BitMap.cs b/ARMeilleure/Common/BitMap.cs
index f782ac8b..4872c442 100644
--- a/ARMeilleure/Common/BitMap.cs
+++ b/ARMeilleure/Common/BitMap.cs
@@ -1,57 +1,27 @@
+using System;
using System.Collections;
using System.Collections.Generic;
using System.Numerics;
namespace ARMeilleure.Common
{
- class BitMap : IEnumerator<int>, IEnumerable<int>
+ unsafe class BitMap : IEnumerable<int>, IDisposable
{
private const int IntSize = 64;
private const int IntMask = IntSize - 1;
- private readonly List<long> _masks;
+ private int _count;
+ private long* _masks;
+ private readonly Allocator _allocator;
- private int _enumIndex;
- private long _enumMask;
- private int _enumBit;
-
- public int Current => _enumIndex * IntSize + _enumBit;
- object IEnumerator.Current => Current;
-
- public BitMap()
- {
- _masks = new List<long>(0);
- }
-
- public BitMap(int initialCapacity)
+ public BitMap(Allocator allocator)
{
- int count = (initialCapacity + IntMask) / IntSize;
-
- _masks = new List<long>(count);
-
- while (count-- > 0)
- {
- _masks.Add(0);
- }
+ _allocator = allocator;
}
- public BitMap Reset(int initialCapacity)
+ public BitMap(Allocator allocator, int capacity) : this(allocator)
{
- int count = (initialCapacity + IntMask) / IntSize;
-
- if (count > _masks.Capacity)
- {
- _masks.Capacity = count;
- }
-
- _masks.Clear();
-
- while (count-- > 0)
- {
- _masks.Add(0);
- }
-
- return this;
+ EnsureCapacity(capacity);
}
public bool Set(int bit)
@@ -97,7 +67,7 @@ namespace ARMeilleure.Common
public int FindFirstUnset()
{
- for (int index = 0; index < _masks.Count; index++)
+ for (int index = 0; index < _count; index++)
{
long mask = _masks[index];
@@ -107,16 +77,16 @@ namespace ARMeilleure.Common
}
}
- return _masks.Count * IntSize;
+ return _count * IntSize;
}
public bool Set(BitMap map)
{
- EnsureCapacity(map._masks.Count * IntSize);
+ EnsureCapacity(map._count * IntSize);
bool modified = false;
- for (int index = 0; index < _masks.Count; index++)
+ for (int index = 0; index < _count; index++)
{
long newValue = _masks[index] | map._masks[index];
@@ -133,11 +103,11 @@ namespace ARMeilleure.Common
public bool Clear(BitMap map)
{
- EnsureCapacity(map._masks.Count * IntSize);
+ EnsureCapacity(map._count * IntSize);
bool modified = false;
- for (int index = 0; index < _masks.Count; index++)
+ for (int index = 0; index < _count; index++)
{
long newValue = _masks[index] & ~map._masks[index];
@@ -152,15 +122,34 @@ namespace ARMeilleure.Common
return modified;
}
- #region IEnumerable<long> Methods
+ private void EnsureCapacity(int size)
+ {
+ int count = (size + IntMask) / IntSize;
+
+ if (count > _count)
+ {
+ var oldMask = _masks;
+ var oldSpan = new Span<long>(_masks, _count);
+
+ _masks = _allocator.Allocate<long>((uint)count);
+ _count = count;
- // Note: The bit enumerator is embedded in this class to avoid creating garbage when enumerating.
+ var newSpan = new Span<long>(_masks, _count);
- private void EnsureCapacity(int size)
+ oldSpan.CopyTo(newSpan);
+ newSpan.Slice(oldSpan.Length).Clear();
+
+ _allocator.Free(oldMask);
+ }
+ }
+
+ public void Dispose()
{
- while (_masks.Count * IntSize < size)
+ if (_masks != null)
{
- _masks.Add(0);
+ _allocator.Free(_masks);
+
+ _masks = null;
}
}
@@ -169,39 +158,59 @@ namespace ARMeilleure.Common
return GetEnumerator();
}
- public IEnumerator<int> GetEnumerator()
+ IEnumerator<int> IEnumerable<int>.GetEnumerator()
+ {
+ return GetEnumerator();
+ }
+
+ public Enumerator GetEnumerator()
{
- Reset();
- return this;
+ return new Enumerator(this);
}
- public bool MoveNext()
+ public struct Enumerator : IEnumerator<int>
{
- if (_enumMask != 0)
+ private int _index;
+ private long _mask;
+ private int _bit;
+ private readonly BitMap _map;
+
+ public int Current => _index * IntSize + _bit;
+ object IEnumerator.Current => Current;
+
+ public Enumerator(BitMap map)
{
- _enumMask &= ~(1L << _enumBit);
+ _index = -1;
+ _mask = 0;
+ _bit = 0;
+ _map = map;
}
- while (_enumMask == 0)
+
+ public bool MoveNext()
{
- if (++_enumIndex >= _masks.Count)
+ if (_mask != 0)
{
- return false;
+ _mask &= ~(1L << _bit);
}
- _enumMask = _masks[_enumIndex];
- }
- _enumBit = BitOperations.TrailingZeroCount(_enumMask);
- return true;
- }
- public void Reset()
- {
- _enumIndex = -1;
- _enumMask = 0;
- _enumBit = 0;
- }
+ while (_mask == 0)
+ {
+ if (++_index >= _map._count)
+ {
+ return false;
+ }
- public void Dispose() { }
+ _mask = _map._masks[_index];
+ }
- #endregion
+ _bit = BitOperations.TrailingZeroCount(_mask);
+
+ return true;
+ }
+
+ public void Reset() { }
+
+ public void Dispose() { }
+ }
}
} \ No newline at end of file
diff --git a/ARMeilleure/Common/BitMapPool.cs b/ARMeilleure/Common/BitMapPool.cs
deleted file mode 100644
index d8d297fa..00000000
--- a/ARMeilleure/Common/BitMapPool.cs
+++ /dev/null
@@ -1,32 +0,0 @@
-namespace ARMeilleure.Common
-{
- static class BitMapPool
- {
- public static BitMap Allocate(int initialCapacity)
- {
- return BitMap().Reset(initialCapacity);
- }
-
- #region "ThreadStaticPool"
- public static void PrepareBitMapPool(int groupId = 0)
- {
- ThreadStaticPool<BitMap>.PreparePool(groupId, ChunkSizeLimit.Small);
- }
-
- private static BitMap BitMap()
- {
- return ThreadStaticPool<BitMap>.Instance.Allocate();
- }
-
- public static void ResetBitMapPool(int groupId = 0)
- {
- ThreadStaticPool<BitMap>.ResetPool(groupId);
- }
-
- public static void DisposeBitMapPools()
- {
- ThreadStaticPool<BitMap>.DisposePools();
- }
- #endregion
- }
-}
diff --git a/ARMeilleure/Common/EntryTable.cs b/ARMeilleure/Common/EntryTable.cs
index b61af8f8..f3f3ce28 100644
--- a/ARMeilleure/Common/EntryTable.cs
+++ b/ARMeilleure/Common/EntryTable.cs
@@ -1,7 +1,6 @@
using System;
using System.Collections.Generic;
using System.Numerics;
-using System.Runtime.InteropServices;
namespace ARMeilleure.Common
{
@@ -41,7 +40,7 @@ namespace ARMeilleure.Common
throw new ArgumentException("Size of TEntry cannot be zero.");
}
- _allocated = new BitMap();
+ _allocated = new BitMap(NativeAllocator.Instance);
_pages = new Dictionary<int, IntPtr>();
_pageLogCapacity = BitOperations.Log2((uint)(pageSize / sizeof(TEntry)));
_pageCapacity = 1 << _pageLogCapacity;
@@ -150,7 +149,7 @@ namespace ARMeilleure.Common
if (!_pages.TryGetValue(pageIndex, out IntPtr page))
{
- page = Marshal.AllocHGlobal(sizeof(TEntry) * _pageCapacity);
+ page = (IntPtr)NativeAllocator.Instance.Allocate((uint)sizeof(TEntry) * (uint)_pageCapacity);
_pages.Add(pageIndex, page);
}
@@ -172,13 +171,15 @@ namespace ARMeilleure.Common
/// instance.
/// </summary>
/// <param name="disposing"><see langword="true"/> to dispose managed resources also; otherwise just unmanaged resouces</param>
- protected virtual void Dispose(bool disposing)
+ protected unsafe virtual void Dispose(bool disposing)
{
if (!_disposed)
{
+ _allocated.Dispose();
+
foreach (var page in _pages.Values)
{
- Marshal.FreeHGlobal(page);
+ NativeAllocator.Instance.Free((void*)page);
}
_disposed = true;
diff --git a/ARMeilleure/Common/NativeAllocator.cs b/ARMeilleure/Common/NativeAllocator.cs
new file mode 100644
index 00000000..71c04a9b
--- /dev/null
+++ b/ARMeilleure/Common/NativeAllocator.cs
@@ -0,0 +1,27 @@
+using System;
+using System.Runtime.InteropServices;
+
+namespace ARMeilleure.Common
+{
+ unsafe sealed class NativeAllocator : Allocator
+ {
+ public static NativeAllocator Instance { get; } = new();
+
+ public override void* Allocate(ulong size)
+ {
+ void* result = (void*)Marshal.AllocHGlobal((IntPtr)size);
+
+ if (result == null)
+ {
+ throw new OutOfMemoryException();
+ }
+
+ return result;
+ }
+
+ public override void Free(void* block)
+ {
+ Marshal.FreeHGlobal((IntPtr)block);
+ }
+ }
+}
diff --git a/ARMeilleure/Common/ThreadStaticPool.cs b/ARMeilleure/Common/ThreadStaticPool.cs
deleted file mode 100644
index bbe662f8..00000000
--- a/ARMeilleure/Common/ThreadStaticPool.cs
+++ /dev/null
@@ -1,219 +0,0 @@
-using ARMeilleure.Translation.PTC;
-using System;
-using System.Collections.Concurrent;
-using System.Collections.Generic;
-
-namespace ARMeilleure.Common
-{
- class ThreadStaticPool<T> where T : class, new()
- {
- [ThreadStatic]
- private static ThreadStaticPool<T> _instance;
-
- public static ThreadStaticPool<T> Instance
- {
- get
- {
- if (_instance == null)
- {
- PreparePool(); // So that we can still use a pool when blindly initializing one.
- }
-
- return _instance;
- }
- }
-
- private static readonly ConcurrentDictionary<int, Stack<ThreadStaticPool<T>>> _pools = new();
-
- private static Stack<ThreadStaticPool<T>> GetPools(int groupId)
- {
- return _pools.GetOrAdd(groupId, (groupId) => new());
- }
-
- public static void PreparePool(
- int groupId = 0,
- ChunkSizeLimit chunkSizeLimit = ChunkSizeLimit.Large,
- PoolSizeIncrement poolSizeIncrement = PoolSizeIncrement.Default)
- {
- if (Ptc.State == PtcState.Disabled)
- {
- PreparePoolDefault(groupId, (int)chunkSizeLimit, (int)poolSizeIncrement);
- }
- else
- {
- PreparePoolSlim((int)chunkSizeLimit, (int)poolSizeIncrement);
- }
- }
-
- private static void PreparePoolDefault(int groupId, int chunkSizeLimit, int poolSizeIncrement)
- {
- // Prepare the pool for this thread, ideally using an existing one from the specified group.
-
- if (_instance == null)
- {
- var pools = GetPools(groupId);
- lock (pools)
- {
- _instance = (pools.Count != 0) ? pools.Pop() : new(chunkSizeLimit, poolSizeIncrement);
- }
- }
- }
-
- private static void PreparePoolSlim(int chunkSizeLimit, int poolSizeIncrement)
- {
- // Prepare the pool for this thread.
-
- if (_instance == null)
- {
- _instance = new(chunkSizeLimit, poolSizeIncrement);
- }
- }
-
- public static void ResetPool(int groupId = 0)
- {
- if (Ptc.State == PtcState.Disabled)
- {
- ResetPoolDefault(groupId);
- }
- else
- {
- ResetPoolSlim();
- }
- }
-
- private static void ResetPoolDefault(int groupId)
- {
- // Reset, limit if necessary, and return the pool for this thread to the specified group.
-
- if (_instance != null)
- {
- var pools = GetPools(groupId);
- lock (pools)
- {
- _instance.Clear();
- _instance.ChunkSizeLimiter();
- pools.Push(_instance);
-
- _instance = null;
- }
- }
- }
-
- private static void ResetPoolSlim()
- {
- // Reset, limit if necessary, the pool for this thread.
-
- if (_instance != null)
- {
- _instance.Clear();
- _instance.ChunkSizeLimiter();
- }
- }
-
- public static void DisposePools()
- {
- if (Ptc.State == PtcState.Disabled)
- {
- DisposePoolsDefault();
- }
- else
- {
- DisposePoolSlim();
- }
- }
-
- private static void DisposePoolsDefault()
- {
- // Resets any static references to the pools used by threads for each group, allowing them to be garbage collected.
-
- foreach (var pools in _pools.Values)
- {
- foreach (var instance in pools)
- {
- instance.Dispose();
- }
-
- pools.Clear();
- }
-
- _pools.Clear();
- }
-
- private static void DisposePoolSlim()
- {
- // Dispose the pool for this thread.
-
- if (_instance != null)
- {
- _instance.Dispose();
-
- _instance = null;
- }
- }
-
- private List<T[]> _pool;
- private int _chunkIndex = -1;
- private int _poolIndex = -1;
- private int _chunkSizeLimit;
- private int _poolSizeIncrement;
-
- private ThreadStaticPool(int chunkSizeLimit, int poolSizeIncrement)
- {
- _chunkSizeLimit = chunkSizeLimit;
- _poolSizeIncrement = poolSizeIncrement;
-
- _pool = new(chunkSizeLimit * 2);
-
- AddChunkIfNeeded();
- }
-
- public T Allocate()
- {
- if (++_poolIndex >= _poolSizeIncrement)
- {
- AddChunkIfNeeded();
-
- _poolIndex = 0;
- }
-
- return _pool[_chunkIndex][_poolIndex];
- }
-
- private void AddChunkIfNeeded()
- {
- if (++_chunkIndex >= _pool.Count)
- {
- T[] pool = new T[_poolSizeIncrement];
-
- for (int i = 0; i < _poolSizeIncrement; i++)
- {
- pool[i] = new T();
- }
-
- _pool.Add(pool);
- }
- }
-
- public void Clear()
- {
- _chunkIndex = 0;
- _poolIndex = -1;
- }
-
- private void ChunkSizeLimiter()
- {
- if (_pool.Count >= _chunkSizeLimit)
- {
- int newChunkSize = _chunkSizeLimit / 2;
-
- _pool.RemoveRange(newChunkSize, _pool.Count - newChunkSize);
- _pool.Capacity = _chunkSizeLimit * 2;
- }
- }
-
- private void Dispose()
- {
- _pool = null;
- }
- }
-}
diff --git a/ARMeilleure/Common/ThreadStaticPoolEnums.cs b/ARMeilleure/Common/ThreadStaticPoolEnums.cs
deleted file mode 100644
index 0d1d98d3..00000000
--- a/ARMeilleure/Common/ThreadStaticPoolEnums.cs
+++ /dev/null
@@ -1,14 +0,0 @@
-namespace ARMeilleure.Common
-{
- public enum PoolSizeIncrement
- {
- Default = 200
- }
-
- public enum ChunkSizeLimit
- {
- Large = 200000 / PoolSizeIncrement.Default,
- Medium = 100000 / PoolSizeIncrement.Default,
- Small = 50000 / PoolSizeIncrement.Default
- }
-} \ No newline at end of file