aboutsummaryrefslogtreecommitdiff
path: root/src/Ryujinx.Graphics.Vulkan
diff options
context:
space:
mode:
authorTSR Berry <20988865+TSRBerry@users.noreply.github.com>2023-04-08 01:22:00 +0200
committerMary <thog@protonmail.com>2023-04-27 23:51:14 +0200
commitcee712105850ac3385cd0091a923438167433f9f (patch)
tree4a5274b21d8b7f938c0d0ce18736d3f2993b11b1 /src/Ryujinx.Graphics.Vulkan
parentcd124bda587ef09668a971fa1cac1c3f0cfc9f21 (diff)
Move solution and projects to src
Diffstat (limited to 'src/Ryujinx.Graphics.Vulkan')
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Auto.cs154
-rw-r--r--src/Ryujinx.Graphics.Vulkan/AutoFlushCounter.cs179
-rw-r--r--src/Ryujinx.Graphics.Vulkan/BackgroundResources.cs117
-rw-r--r--src/Ryujinx.Graphics.Vulkan/BitMap.cs157
-rw-r--r--src/Ryujinx.Graphics.Vulkan/BufferAllocationType.cs12
-rw-r--r--src/Ryujinx.Graphics.Vulkan/BufferHolder.cs788
-rw-r--r--src/Ryujinx.Graphics.Vulkan/BufferManager.cs455
-rw-r--r--src/Ryujinx.Graphics.Vulkan/BufferState.cs48
-rw-r--r--src/Ryujinx.Graphics.Vulkan/BufferUsageBitmap.cs77
-rw-r--r--src/Ryujinx.Graphics.Vulkan/CacheByRange.cs398
-rw-r--r--src/Ryujinx.Graphics.Vulkan/CommandBufferPool.cs368
-rw-r--r--src/Ryujinx.Graphics.Vulkan/CommandBufferScoped.cs44
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Constants.cs20
-rw-r--r--src/Ryujinx.Graphics.Vulkan/DescriptorSetCollection.cs246
-rw-r--r--src/Ryujinx.Graphics.Vulkan/DescriptorSetManager.cs201
-rw-r--r--src/Ryujinx.Graphics.Vulkan/DescriptorSetUpdater.cs674
-rw-r--r--src/Ryujinx.Graphics.Vulkan/DisposableBuffer.cs25
-rw-r--r--src/Ryujinx.Graphics.Vulkan/DisposableBufferView.cs25
-rw-r--r--src/Ryujinx.Graphics.Vulkan/DisposableFramebuffer.cs25
-rw-r--r--src/Ryujinx.Graphics.Vulkan/DisposableImage.cs25
-rw-r--r--src/Ryujinx.Graphics.Vulkan/DisposableImageView.cs25
-rw-r--r--src/Ryujinx.Graphics.Vulkan/DisposableMemory.cs24
-rw-r--r--src/Ryujinx.Graphics.Vulkan/DisposablePipeline.cs25
-rw-r--r--src/Ryujinx.Graphics.Vulkan/DisposableRenderPass.cs25
-rw-r--r--src/Ryujinx.Graphics.Vulkan/DisposableSampler.cs25
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/FsrScalingFilter.cs179
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/FxaaPostProcessingEffect.cs111
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/IPostProcessingEffect.cs10
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/IScalingFilter.cs20
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrScaling.glsl3945
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrScaling.spvbin0 -> 44672 bytes
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrSharpening.glsl3904
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrSharpening.spvbin0 -> 20472 bytes
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/Shaders/Fxaa.glsl1177
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/Shaders/Fxaa.spvbin0 -> 25012 bytes
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaBlend.glsl1404
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaBlend.spvbin0 -> 33728 bytes
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaEdge.glsl1402
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaEdge.spvbin0 -> 8464 bytes
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaNeighbour.glsl1403
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaNeighbour.spvbin0 -> 8328 bytes
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/SmaaConstants.cs15
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/SmaaPostProcessingEffect.cs289
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/Textures/SmaaAreaTexture.binbin0 -> 179200 bytes
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Effects/Textures/SmaaSearchTexture.binbin0 -> 1024 bytes
-rw-r--r--src/Ryujinx.Graphics.Vulkan/EnumConversion.cs374
-rw-r--r--src/Ryujinx.Graphics.Vulkan/FenceHelper.cs30
-rw-r--r--src/Ryujinx.Graphics.Vulkan/FenceHolder.cs79
-rw-r--r--src/Ryujinx.Graphics.Vulkan/FormatCapabilities.cs164
-rw-r--r--src/Ryujinx.Graphics.Vulkan/FormatConverter.cs49
-rw-r--r--src/Ryujinx.Graphics.Vulkan/FormatTable.cs172
-rw-r--r--src/Ryujinx.Graphics.Vulkan/FramebufferParams.cs240
-rw-r--r--src/Ryujinx.Graphics.Vulkan/HardwareCapabilities.cs120
-rw-r--r--src/Ryujinx.Graphics.Vulkan/HashTableSlim.cs112
-rw-r--r--src/Ryujinx.Graphics.Vulkan/HelperShader.cs1683
-rw-r--r--src/Ryujinx.Graphics.Vulkan/IdList.cs123
-rw-r--r--src/Ryujinx.Graphics.Vulkan/IndexBufferPattern.cs139
-rw-r--r--src/Ryujinx.Graphics.Vulkan/IndexBufferState.cs161
-rw-r--r--src/Ryujinx.Graphics.Vulkan/MemoryAllocation.cs37
-rw-r--r--src/Ryujinx.Graphics.Vulkan/MemoryAllocator.cs101
-rw-r--r--src/Ryujinx.Graphics.Vulkan/MemoryAllocatorBlockList.cs282
-rw-r--r--src/Ryujinx.Graphics.Vulkan/MoltenVK/MVKConfiguration.cs104
-rw-r--r--src/Ryujinx.Graphics.Vulkan/MoltenVK/MVKInitialization.cs31
-rw-r--r--src/Ryujinx.Graphics.Vulkan/MultiFenceHolder.cs212
-rw-r--r--src/Ryujinx.Graphics.Vulkan/NativeArray.cs48
-rw-r--r--src/Ryujinx.Graphics.Vulkan/PersistentFlushBuffer.cs89
-rw-r--r--src/Ryujinx.Graphics.Vulkan/PipelineBase.cs1742
-rw-r--r--src/Ryujinx.Graphics.Vulkan/PipelineConverter.cs318
-rw-r--r--src/Ryujinx.Graphics.Vulkan/PipelineDynamicState.cs170
-rw-r--r--src/Ryujinx.Graphics.Vulkan/PipelineFull.cs314
-rw-r--r--src/Ryujinx.Graphics.Vulkan/PipelineHelperShader.cs59
-rw-r--r--src/Ryujinx.Graphics.Vulkan/PipelineLayoutCache.cs58
-rw-r--r--src/Ryujinx.Graphics.Vulkan/PipelineLayoutCacheEntry.cs112
-rw-r--r--src/Ryujinx.Graphics.Vulkan/PipelineLayoutFactory.cs244
-rw-r--r--src/Ryujinx.Graphics.Vulkan/PipelineState.cs621
-rw-r--r--src/Ryujinx.Graphics.Vulkan/PipelineUid.cs129
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Queries/BufferedQuery.cs216
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Queries/CounterQueue.cs245
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Queries/CounterQueueEvent.cs170
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Queries/Counters.cs71
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Ryujinx.Graphics.Vulkan.csproj39
-rw-r--r--src/Ryujinx.Graphics.Vulkan/SamplerHolder.cs118
-rw-r--r--src/Ryujinx.Graphics.Vulkan/SemaphoreHolder.cs60
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shader.cs163
-rw-r--r--src/Ryujinx.Graphics.Vulkan/ShaderCollection.cs427
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/ChangeBufferStrideShaderSource.comp64
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/ColorBlitClearAlphaFragmentShaderSource.frag11
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/ColorBlitFragmentShaderSource.frag11
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/ColorBlitMsFragmentShaderSource.frag11
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/ColorBlitVertexShaderSource.vert20
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/ColorClearFFragmentShaderSource.frag9
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/ColorClearSIFragmentShaderSource.frag9
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/ColorClearUIFragmentShaderSource.frag9
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/ColorClearVertexShaderSource.vert19
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/ColorCopyShorteningComputeShaderSource.comp36
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/ColorCopyToNonMsComputeShaderSource.comp37
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/ColorCopyWideningComputeShaderSource.comp31
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/ColorDrawToMsFragmentShaderSource.frag27
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/ColorDrawToMsVertexShaderSource.vert11
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/ConvertIndexBufferShaderSource.comp58
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/ConvertIndirectDataShaderSource.comp103
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/DepthBlitFragmentShaderSource.frag10
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/DepthBlitMsFragmentShaderSource.frag10
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/DepthDrawToMsFragmentShaderSource.frag25
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/DepthDrawToNonMsFragmentShaderSource.frag28
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/ShaderBinaries.cs2413
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/StencilBlitFragmentShaderSource.frag12
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/StencilBlitMsFragmentShaderSource.frag12
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/StencilDrawToMsFragmentShaderSource.frag27
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Shaders/StencilDrawToNonMsFragmentShaderSource.frag30
-rw-r--r--src/Ryujinx.Graphics.Vulkan/SpecInfo.cs102
-rw-r--r--src/Ryujinx.Graphics.Vulkan/StagingBuffer.cs194
-rw-r--r--src/Ryujinx.Graphics.Vulkan/SyncManager.cs206
-rw-r--r--src/Ryujinx.Graphics.Vulkan/TextureBuffer.cs160
-rw-r--r--src/Ryujinx.Graphics.Vulkan/TextureCopy.cs476
-rw-r--r--src/Ryujinx.Graphics.Vulkan/TextureStorage.cs530
-rw-r--r--src/Ryujinx.Graphics.Vulkan/TextureView.cs885
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Vendor.cs62
-rw-r--r--src/Ryujinx.Graphics.Vulkan/VertexBufferState.cs153
-rw-r--r--src/Ryujinx.Graphics.Vulkan/VulkanConfiguration.cs12
-rw-r--r--src/Ryujinx.Graphics.Vulkan/VulkanDebugMessenger.cs153
-rw-r--r--src/Ryujinx.Graphics.Vulkan/VulkanException.cs41
-rw-r--r--src/Ryujinx.Graphics.Vulkan/VulkanInitialization.cs539
-rw-r--r--src/Ryujinx.Graphics.Vulkan/VulkanInstance.cs127
-rw-r--r--src/Ryujinx.Graphics.Vulkan/VulkanPhysicalDevice.cs70
-rw-r--r--src/Ryujinx.Graphics.Vulkan/VulkanRenderer.cs820
-rw-r--r--src/Ryujinx.Graphics.Vulkan/Window.cs603
-rw-r--r--src/Ryujinx.Graphics.Vulkan/WindowBase.cs18
128 files changed, 35831 insertions, 0 deletions
diff --git a/src/Ryujinx.Graphics.Vulkan/Auto.cs b/src/Ryujinx.Graphics.Vulkan/Auto.cs
new file mode 100644
index 00000000..77261de9
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Auto.cs
@@ -0,0 +1,154 @@
+using System;
+using System.Diagnostics;
+using System.Threading;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ interface IAuto
+ {
+ bool HasCommandBufferDependency(CommandBufferScoped cbs);
+
+ void IncrementReferenceCount();
+ void DecrementReferenceCount(int cbIndex);
+ void DecrementReferenceCount();
+ }
+
+ interface IAutoPrivate : IAuto
+ {
+ void AddCommandBufferDependencies(CommandBufferScoped cbs);
+ }
+
+ class Auto<T> : IAutoPrivate, IDisposable where T : IDisposable
+ {
+ private int _referenceCount;
+ private T _value;
+
+ private readonly BitMap _cbOwnership;
+ private readonly MultiFenceHolder _waitable;
+ private readonly IAutoPrivate[] _referencedObjs;
+
+ private bool _disposed;
+ private bool _destroyed;
+
+ public Auto(T value)
+ {
+ _referenceCount = 1;
+ _value = value;
+ _cbOwnership = new BitMap(CommandBufferPool.MaxCommandBuffers);
+ }
+
+ public Auto(T value, MultiFenceHolder waitable, params IAutoPrivate[] referencedObjs) : this(value)
+ {
+ _waitable = waitable;
+ _referencedObjs = referencedObjs;
+
+ for (int i = 0; i < referencedObjs.Length; i++)
+ {
+ referencedObjs[i].IncrementReferenceCount();
+ }
+ }
+
+ public T Get(CommandBufferScoped cbs, int offset, int size)
+ {
+ _waitable?.AddBufferUse(cbs.CommandBufferIndex, offset, size);
+ return Get(cbs);
+ }
+
+ public T GetUnsafe()
+ {
+ return _value;
+ }
+
+ public T Get(CommandBufferScoped cbs)
+ {
+ if (!_destroyed)
+ {
+ AddCommandBufferDependencies(cbs);
+ }
+
+ return _value;
+ }
+
+ public bool HasCommandBufferDependency(CommandBufferScoped cbs)
+ {
+ return _cbOwnership.IsSet(cbs.CommandBufferIndex);
+ }
+
+ public bool HasRentedCommandBufferDependency(CommandBufferPool cbp)
+ {
+ return _cbOwnership.AnySet();
+ }
+
+ public void AddCommandBufferDependencies(CommandBufferScoped cbs)
+ {
+ // We don't want to add a reference to this object to the command buffer
+ // more than once, so if we detect that the command buffer already has ownership
+ // of this object, then we can just return without doing anything else.
+ if (_cbOwnership.Set(cbs.CommandBufferIndex))
+ {
+ if (_waitable != null)
+ {
+ cbs.AddWaitable(_waitable);
+ }
+
+ cbs.AddDependant(this);
+
+ // We need to add a dependency on the command buffer to all objects this object
+ // references aswell.
+ if (_referencedObjs != null)
+ {
+ for (int i = 0; i < _referencedObjs.Length; i++)
+ {
+ _referencedObjs[i].AddCommandBufferDependencies(cbs);
+ }
+ }
+ }
+ }
+
+ public void IncrementReferenceCount()
+ {
+ if (Interlocked.Increment(ref _referenceCount) == 1)
+ {
+ Interlocked.Decrement(ref _referenceCount);
+ throw new InvalidOperationException("Attempted to increment the reference count of an object that was already destroyed.");
+ }
+ }
+
+ public void DecrementReferenceCount(int cbIndex)
+ {
+ _cbOwnership.Clear(cbIndex);
+ DecrementReferenceCount();
+ }
+
+ public void DecrementReferenceCount()
+ {
+ if (Interlocked.Decrement(ref _referenceCount) == 0)
+ {
+ _value.Dispose();
+ _value = default;
+ _destroyed = true;
+
+ // Value is no longer in use by the GPU, dispose all other
+ // resources that it references.
+ if (_referencedObjs != null)
+ {
+ for (int i = 0; i < _referencedObjs.Length; i++)
+ {
+ _referencedObjs[i].DecrementReferenceCount();
+ }
+ }
+ }
+
+ Debug.Assert(_referenceCount >= 0);
+ }
+
+ public void Dispose()
+ {
+ if (!_disposed)
+ {
+ DecrementReferenceCount();
+ _disposed = true;
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/AutoFlushCounter.cs b/src/Ryujinx.Graphics.Vulkan/AutoFlushCounter.cs
new file mode 100644
index 00000000..4e2a9d6b
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/AutoFlushCounter.cs
@@ -0,0 +1,179 @@
+using Ryujinx.Common.Logging;
+using System;
+using System.Diagnostics;
+using System.Linq;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ internal class AutoFlushCounter
+ {
+ // How often to flush on framebuffer change.
+ private readonly static long FramebufferFlushTimer = Stopwatch.Frequency / 1000; // (1ms)
+
+ // How often to flush on draw when fast flush mode is enabled.
+ private readonly static long DrawFlushTimer = Stopwatch.Frequency / 666; // (1.5ms)
+
+ // Average wait time that triggers fast flush mode to be entered.
+ private readonly static long FastFlushEnterThreshold = Stopwatch.Frequency / 666; // (1.5ms)
+
+ // Average wait time that triggers fast flush mode to be exited.
+ private readonly static long FastFlushExitThreshold = Stopwatch.Frequency / 10000; // (0.1ms)
+
+ // Number of frames to average waiting times over.
+ private const int SyncWaitAverageCount = 20;
+
+ private const int MinDrawCountForFlush = 10;
+ private const int MinConsecutiveQueryForFlush = 10;
+ private const int InitialQueryCountForFlush = 32;
+
+ private readonly VulkanRenderer _gd;
+
+ private long _lastFlush;
+ private ulong _lastDrawCount;
+ private bool _hasPendingQuery;
+ private int _consecutiveQueries;
+ private int _queryCount;
+
+ private int[] _queryCountHistory = new int[3];
+ private int _queryCountHistoryIndex;
+ private int _remainingQueries;
+
+ private long[] _syncWaitHistory = new long[SyncWaitAverageCount];
+ private int _syncWaitHistoryIndex;
+
+ private bool _fastFlushMode;
+
+ public AutoFlushCounter(VulkanRenderer gd)
+ {
+ _gd = gd;
+ }
+
+ public void RegisterFlush(ulong drawCount)
+ {
+ _lastFlush = Stopwatch.GetTimestamp();
+ _lastDrawCount = drawCount;
+
+ _hasPendingQuery = false;
+ _consecutiveQueries = 0;
+ }
+
+ public bool RegisterPendingQuery()
+ {
+ _hasPendingQuery = true;
+ _consecutiveQueries++;
+ _remainingQueries--;
+
+ _queryCountHistory[_queryCountHistoryIndex]++;
+
+ // Interrupt render passes to flush queries, so that early results arrive sooner.
+ if (++_queryCount == InitialQueryCountForFlush)
+ {
+ return true;
+ }
+
+ return false;
+ }
+
+ public int GetRemainingQueries()
+ {
+ if (_remainingQueries <= 0)
+ {
+ _remainingQueries = 16;
+ }
+
+ if (_queryCount < InitialQueryCountForFlush)
+ {
+ return Math.Min(InitialQueryCountForFlush - _queryCount, _remainingQueries);
+ }
+
+ return _remainingQueries;
+ }
+
+ public bool ShouldFlushQuery()
+ {
+ return _hasPendingQuery;
+ }
+
+ public bool ShouldFlushDraw(ulong drawCount)
+ {
+ if (_fastFlushMode)
+ {
+ long draws = (long)(drawCount - _lastDrawCount);
+
+ if (draws < MinDrawCountForFlush)
+ {
+ if (draws == 0)
+ {
+ _lastFlush = Stopwatch.GetTimestamp();
+ }
+
+ return false;
+ }
+
+ long flushTimeout = DrawFlushTimer;
+
+ long now = Stopwatch.GetTimestamp();
+
+ return now > _lastFlush + flushTimeout;
+ }
+
+ return false;
+ }
+
+ public bool ShouldFlushAttachmentChange(ulong drawCount)
+ {
+ _queryCount = 0;
+
+ // Flush when there's an attachment change out of a large block of queries.
+ if (_consecutiveQueries > MinConsecutiveQueryForFlush)
+ {
+ return true;
+ }
+
+ _consecutiveQueries = 0;
+
+ long draws = (long)(drawCount - _lastDrawCount);
+
+ if (draws < MinDrawCountForFlush)
+ {
+ if (draws == 0)
+ {
+ _lastFlush = Stopwatch.GetTimestamp();
+ }
+
+ return false;
+ }
+
+ long flushTimeout = FramebufferFlushTimer;
+
+ long now = Stopwatch.GetTimestamp();
+
+ return now > _lastFlush + flushTimeout;
+ }
+
+ public void Present()
+ {
+ // Query flush prediction.
+
+ _queryCountHistoryIndex = (_queryCountHistoryIndex + 1) % 3;
+
+ _remainingQueries = _queryCountHistory.Max() + 10;
+
+ _queryCountHistory[_queryCountHistoryIndex] = 0;
+
+ // Fast flush mode toggle.
+
+ _syncWaitHistory[_syncWaitHistoryIndex] = _gd.SyncManager.GetAndResetWaitTicks();
+
+ _syncWaitHistoryIndex = (_syncWaitHistoryIndex + 1) % SyncWaitAverageCount;
+
+ long averageWait = (long)_syncWaitHistory.Average();
+
+ if (_fastFlushMode ? averageWait < FastFlushExitThreshold : averageWait > FastFlushEnterThreshold)
+ {
+ _fastFlushMode = !_fastFlushMode;
+ Logger.Debug?.PrintMsg(LogClass.Gpu, $"Switched fast flush mode: ({_fastFlushMode})");
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/BackgroundResources.cs b/src/Ryujinx.Graphics.Vulkan/BackgroundResources.cs
new file mode 100644
index 00000000..b93b7a25
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/BackgroundResources.cs
@@ -0,0 +1,117 @@
+using Silk.NET.Vulkan;
+using System;
+using System.Collections.Generic;
+using System.Threading;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class BackgroundResource : IDisposable
+ {
+ private VulkanRenderer _gd;
+ private Device _device;
+
+ private CommandBufferPool _pool;
+ private PersistentFlushBuffer _flushBuffer;
+
+ public BackgroundResource(VulkanRenderer gd, Device device)
+ {
+ _gd = gd;
+ _device = device;
+ }
+
+ public CommandBufferPool GetPool()
+ {
+ if (_pool == null)
+ {
+ bool useBackground = _gd.BackgroundQueue.Handle != 0 && _gd.Vendor != Vendor.Amd;
+ Queue queue = useBackground ? _gd.BackgroundQueue : _gd.Queue;
+ object queueLock = useBackground ? _gd.BackgroundQueueLock : _gd.QueueLock;
+
+ lock (queueLock)
+ {
+ _pool = new CommandBufferPool(_gd.Api, _device, queue, queueLock, _gd.QueueFamilyIndex, isLight: true);
+ }
+ }
+
+ return _pool;
+ }
+
+ public PersistentFlushBuffer GetFlushBuffer()
+ {
+ if (_flushBuffer == null)
+ {
+ _flushBuffer = new PersistentFlushBuffer(_gd);
+ }
+
+ return _flushBuffer;
+ }
+
+ public void Dispose()
+ {
+ _pool?.Dispose();
+ _flushBuffer?.Dispose();
+ }
+ }
+
+ class BackgroundResources : IDisposable
+ {
+ private VulkanRenderer _gd;
+ private Device _device;
+
+ private Dictionary<Thread, BackgroundResource> _resources;
+
+ public BackgroundResources(VulkanRenderer gd, Device device)
+ {
+ _gd = gd;
+ _device = device;
+
+ _resources = new Dictionary<Thread, BackgroundResource>();
+ }
+
+ private void Cleanup()
+ {
+ lock (_resources)
+ {
+ foreach (KeyValuePair<Thread, BackgroundResource> tuple in _resources)
+ {
+ if (!tuple.Key.IsAlive)
+ {
+ tuple.Value.Dispose();
+ _resources.Remove(tuple.Key);
+ }
+ }
+ }
+ }
+
+ public BackgroundResource Get()
+ {
+ Thread thread = Thread.CurrentThread;
+
+ lock (_resources)
+ {
+ BackgroundResource resource;
+ if (!_resources.TryGetValue(thread, out resource))
+ {
+ Cleanup();
+
+ resource = new BackgroundResource(_gd, _device);
+
+ _resources[thread] = resource;
+ }
+
+ return resource;
+ }
+ }
+
+ public void Dispose()
+ {
+ lock (_resources)
+ {
+ foreach (var resource in _resources.Values)
+ {
+ resource.Dispose();
+ }
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/BitMap.cs b/src/Ryujinx.Graphics.Vulkan/BitMap.cs
new file mode 100644
index 00000000..efa71fc7
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/BitMap.cs
@@ -0,0 +1,157 @@
+namespace Ryujinx.Graphics.Vulkan
+{
+ readonly struct BitMap
+ {
+ public const int IntSize = 64;
+
+ private const int IntShift = 6;
+ private const int IntMask = IntSize - 1;
+
+ private readonly long[] _masks;
+
+ public BitMap(int count)
+ {
+ _masks = new long[(count + IntMask) / IntSize];
+ }
+
+ public bool AnySet()
+ {
+ for (int i = 0; i < _masks.Length; i++)
+ {
+ if (_masks[i] != 0)
+ {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ public bool IsSet(int bit)
+ {
+ int wordIndex = bit >> IntShift;
+ int wordBit = bit & IntMask;
+
+ long wordMask = 1L << wordBit;
+
+ return (_masks[wordIndex] & wordMask) != 0;
+ }
+
+ public bool IsSet(int start, int end)
+ {
+ if (start == end)
+ {
+ return IsSet(start);
+ }
+
+ int startIndex = start >> IntShift;
+ int startBit = start & IntMask;
+ long startMask = -1L << startBit;
+
+ int endIndex = end >> IntShift;
+ int endBit = end & IntMask;
+ long endMask = (long)(ulong.MaxValue >> (IntMask - endBit));
+
+ if (startIndex == endIndex)
+ {
+ return (_masks[startIndex] & startMask & endMask) != 0;
+ }
+
+ if ((_masks[startIndex] & startMask) != 0)
+ {
+ return true;
+ }
+
+ for (int i = startIndex + 1; i < endIndex; i++)
+ {
+ if (_masks[i] != 0)
+ {
+ return true;
+ }
+ }
+
+ if ((_masks[endIndex] & endMask) != 0)
+ {
+ return true;
+ }
+
+ return false;
+ }
+
+ public bool Set(int bit)
+ {
+ int wordIndex = bit >> IntShift;
+ int wordBit = bit & IntMask;
+
+ long wordMask = 1L << wordBit;
+
+ if ((_masks[wordIndex] & wordMask) != 0)
+ {
+ return false;
+ }
+
+ _masks[wordIndex] |= wordMask;
+
+ return true;
+ }
+
+ public void SetRange(int start, int end)
+ {
+ if (start == end)
+ {
+ Set(start);
+ return;
+ }
+
+ int startIndex = start >> IntShift;
+ int startBit = start & IntMask;
+ long startMask = -1L << startBit;
+
+ int endIndex = end >> IntShift;
+ int endBit = end & IntMask;
+ long endMask = (long)(ulong.MaxValue >> (IntMask - endBit));
+
+ if (startIndex == endIndex)
+ {
+ _masks[startIndex] |= startMask & endMask;
+ }
+ else
+ {
+ _masks[startIndex] |= startMask;
+
+ for (int i = startIndex + 1; i < endIndex; i++)
+ {
+ _masks[i] |= -1;
+ }
+
+ _masks[endIndex] |= endMask;
+ }
+ }
+
+ public void Clear(int bit)
+ {
+ int wordIndex = bit >> IntShift;
+ int wordBit = bit & IntMask;
+
+ long wordMask = 1L << wordBit;
+
+ _masks[wordIndex] &= ~wordMask;
+ }
+
+ public void Clear()
+ {
+ for (int i = 0; i < _masks.Length; i++)
+ {
+ _masks[i] = 0;
+ }
+ }
+
+ public void ClearInt(int start, int end)
+ {
+ for (int i = start; i <= end; i++)
+ {
+ _masks[i] = 0;
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/BufferAllocationType.cs b/src/Ryujinx.Graphics.Vulkan/BufferAllocationType.cs
new file mode 100644
index 00000000..81489041
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/BufferAllocationType.cs
@@ -0,0 +1,12 @@
+namespace Ryujinx.Graphics.Vulkan
+{
+ internal enum BufferAllocationType
+ {
+ Auto = 0,
+
+ HostMappedNoCache,
+ HostMapped,
+ DeviceLocal,
+ DeviceLocalMapped
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/BufferHolder.cs b/src/Ryujinx.Graphics.Vulkan/BufferHolder.cs
new file mode 100644
index 00000000..21b81bdd
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/BufferHolder.cs
@@ -0,0 +1,788 @@
+using Ryujinx.Common.Logging;
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using System;
+using System.Collections.Generic;
+using System.Runtime.CompilerServices;
+using System.Threading;
+using VkBuffer = Silk.NET.Vulkan.Buffer;
+using VkFormat = Silk.NET.Vulkan.Format;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class BufferHolder : IDisposable
+ {
+ private const int MaxUpdateBufferSize = 0x10000;
+
+ private const int SetCountThreshold = 100;
+ private const int WriteCountThreshold = 50;
+ private const int FlushCountThreshold = 5;
+
+ public const int DeviceLocalSizeThreshold = 256 * 1024; // 256kb
+
+ public const AccessFlags DefaultAccessFlags =
+ AccessFlags.IndirectCommandReadBit |
+ AccessFlags.ShaderReadBit |
+ AccessFlags.ShaderWriteBit |
+ AccessFlags.TransferReadBit |
+ AccessFlags.TransferWriteBit |
+ AccessFlags.UniformReadBit;
+
+ private readonly VulkanRenderer _gd;
+ private readonly Device _device;
+ private MemoryAllocation _allocation;
+ private Auto<DisposableBuffer> _buffer;
+ private Auto<MemoryAllocation> _allocationAuto;
+ private ulong _bufferHandle;
+
+ private CacheByRange<BufferHolder> _cachedConvertedBuffers;
+
+ public int Size { get; }
+
+ private IntPtr _map;
+
+ private MultiFenceHolder _waitable;
+
+ private bool _lastAccessIsWrite;
+
+ private BufferAllocationType _baseType;
+ private BufferAllocationType _currentType;
+ private bool _swapQueued;
+
+ public BufferAllocationType DesiredType { get; private set; }
+
+ private int _setCount;
+ private int _writeCount;
+ private int _flushCount;
+ private int _flushTemp;
+
+ private ReaderWriterLock _flushLock;
+ private FenceHolder _flushFence;
+ private int _flushWaiting;
+
+ private List<Action> _swapActions;
+
+ public BufferHolder(VulkanRenderer gd, Device device, VkBuffer buffer, MemoryAllocation allocation, int size, BufferAllocationType type, BufferAllocationType currentType)
+ {
+ _gd = gd;
+ _device = device;
+ _allocation = allocation;
+ _allocationAuto = new Auto<MemoryAllocation>(allocation);
+ _waitable = new MultiFenceHolder(size);
+ _buffer = new Auto<DisposableBuffer>(new DisposableBuffer(gd.Api, device, buffer), _waitable, _allocationAuto);
+ _bufferHandle = buffer.Handle;
+ Size = size;
+ _map = allocation.HostPointer;
+
+ _baseType = type;
+ _currentType = currentType;
+ DesiredType = currentType;
+
+ _flushLock = new ReaderWriterLock();
+ }
+
+ public bool TryBackingSwap(ref CommandBufferScoped? cbs)
+ {
+ if (_swapQueued && DesiredType != _currentType)
+ {
+ // Only swap if the buffer is not used in any queued command buffer.
+ bool isRented = _buffer.HasRentedCommandBufferDependency(_gd.CommandBufferPool);
+
+ if (!isRented && _gd.CommandBufferPool.OwnedByCurrentThread && !_flushLock.IsReaderLockHeld)
+ {
+ var currentAllocation = _allocationAuto;
+ var currentBuffer = _buffer;
+ IntPtr currentMap = _map;
+
+ (VkBuffer buffer, MemoryAllocation allocation, BufferAllocationType resultType) = _gd.BufferManager.CreateBacking(_gd, Size, DesiredType, false, _currentType);
+
+ if (buffer.Handle != 0)
+ {
+ _flushLock.AcquireWriterLock(Timeout.Infinite);
+
+ ClearFlushFence();
+
+ _waitable = new MultiFenceHolder(Size);
+
+ _allocation = allocation;
+ _allocationAuto = new Auto<MemoryAllocation>(allocation);
+ _buffer = new Auto<DisposableBuffer>(new DisposableBuffer(_gd.Api, _device, buffer), _waitable, _allocationAuto);
+ _bufferHandle = buffer.Handle;
+ _map = allocation.HostPointer;
+
+ if (_map != IntPtr.Zero && currentMap != IntPtr.Zero)
+ {
+ // Copy data directly. Readbacks don't have to wait if this is done.
+
+ unsafe
+ {
+ new Span<byte>((void*)currentMap, Size).CopyTo(new Span<byte>((void*)_map, Size));
+ }
+ }
+ else
+ {
+ if (cbs == null)
+ {
+ cbs = _gd.CommandBufferPool.Rent();
+ }
+
+ CommandBufferScoped cbsV = cbs.Value;
+
+ Copy(_gd, cbsV, currentBuffer, _buffer, 0, 0, Size);
+
+ // Need to wait for the data to reach the new buffer before data can be flushed.
+
+ _flushFence = _gd.CommandBufferPool.GetFence(cbsV.CommandBufferIndex);
+ _flushFence.Get();
+ }
+
+ Logger.Debug?.PrintMsg(LogClass.Gpu, $"Converted {Size} buffer {_currentType} to {resultType}");
+
+ _currentType = resultType;
+
+ if (_swapActions != null)
+ {
+ foreach (var action in _swapActions)
+ {
+ action();
+ }
+
+ _swapActions.Clear();
+ }
+
+ currentBuffer.Dispose();
+ currentAllocation.Dispose();
+
+ _gd.PipelineInternal.SwapBuffer(currentBuffer, _buffer);
+
+ _flushLock.ReleaseWriterLock();
+ }
+
+ _swapQueued = false;
+
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ }
+ else
+ {
+ _swapQueued = false;
+
+ return true;
+ }
+ }
+
+ private void ConsiderBackingSwap()
+ {
+ if (_baseType == BufferAllocationType.Auto)
+ {
+ if (_writeCount >= WriteCountThreshold || _setCount >= SetCountThreshold || _flushCount >= FlushCountThreshold)
+ {
+ if (_flushCount > 0 || _flushTemp-- > 0)
+ {
+ // Buffers that flush should ideally be mapped in host address space for easy copies.
+ // If the buffer is large it will do better on GPU memory, as there will be more writes than data flushes (typically individual pages).
+ // If it is small, then it's likely most of the buffer will be flushed so we want it on host memory, as access is cached.
+ DesiredType = Size > DeviceLocalSizeThreshold ? BufferAllocationType.DeviceLocalMapped : BufferAllocationType.HostMapped;
+
+ // It's harder for a buffer that is flushed to revert to another type of mapping.
+ if (_flushCount > 0)
+ {
+ _flushTemp = 1000;
+ }
+ }
+ else if (_writeCount >= WriteCountThreshold)
+ {
+ // Buffers that are written often should ideally be in the device local heap. (Storage buffers)
+ DesiredType = BufferAllocationType.DeviceLocal;
+ }
+ else if (_setCount > SetCountThreshold)
+ {
+ // Buffers that have their data set often should ideally be host mapped. (Constant buffers)
+ DesiredType = BufferAllocationType.HostMapped;
+ }
+
+ _flushCount = 0;
+ _writeCount = 0;
+ _setCount = 0;
+ }
+
+ if (!_swapQueued && DesiredType != _currentType)
+ {
+ _swapQueued = true;
+
+ _gd.PipelineInternal.AddBackingSwap(this);
+ }
+ }
+ }
+
+ public unsafe Auto<DisposableBufferView> CreateView(VkFormat format, int offset, int size, Action invalidateView)
+ {
+ var bufferViewCreateInfo = new BufferViewCreateInfo()
+ {
+ SType = StructureType.BufferViewCreateInfo,
+ Buffer = new VkBuffer(_bufferHandle),
+ Format = format,
+ Offset = (uint)offset,
+ Range = (uint)size
+ };
+
+ _gd.Api.CreateBufferView(_device, bufferViewCreateInfo, null, out var bufferView).ThrowOnError();
+
+ (_swapActions ??= new List<Action>()).Add(invalidateView);
+
+ return new Auto<DisposableBufferView>(new DisposableBufferView(_gd.Api, _device, bufferView), _waitable, _buffer);
+ }
+
+ public void InheritMetrics(BufferHolder other)
+ {
+ _setCount = other._setCount;
+ _writeCount = other._writeCount;
+ _flushCount = other._flushCount;
+ _flushTemp = other._flushTemp;
+ }
+
+ public unsafe void InsertBarrier(CommandBuffer commandBuffer, bool isWrite)
+ {
+ // If the last access is write, we always need a barrier to be sure we will read or modify
+ // the correct data.
+ // If the last access is read, and current one is a write, we need to wait until the
+ // read finishes to avoid overwriting data still in use.
+ // Otherwise, if the last access is a read and the current one too, we don't need barriers.
+ bool needsBarrier = isWrite || _lastAccessIsWrite;
+
+ _lastAccessIsWrite = isWrite;
+
+ if (needsBarrier)
+ {
+ MemoryBarrier memoryBarrier = new MemoryBarrier()
+ {
+ SType = StructureType.MemoryBarrier,
+ SrcAccessMask = DefaultAccessFlags,
+ DstAccessMask = DefaultAccessFlags
+ };
+
+ _gd.Api.CmdPipelineBarrier(
+ commandBuffer,
+ PipelineStageFlags.AllCommandsBit,
+ PipelineStageFlags.AllCommandsBit,
+ DependencyFlags.DeviceGroupBit,
+ 1,
+ memoryBarrier,
+ 0,
+ null,
+ 0,
+ null);
+ }
+ }
+
+ public Auto<DisposableBuffer> GetBuffer()
+ {
+ return _buffer;
+ }
+
+ public Auto<DisposableBuffer> GetBuffer(CommandBuffer commandBuffer, bool isWrite = false, bool isSSBO = false)
+ {
+ if (isWrite)
+ {
+ _writeCount++;
+
+ SignalWrite(0, Size);
+ }
+ else if (isSSBO)
+ {
+ // Always consider SSBO access for swapping to device local memory.
+
+ _writeCount++;
+
+ ConsiderBackingSwap();
+ }
+
+ return _buffer;
+ }
+
+ public Auto<DisposableBuffer> GetBuffer(CommandBuffer commandBuffer, int offset, int size, bool isWrite = false)
+ {
+ if (isWrite)
+ {
+ _writeCount++;
+
+ SignalWrite(offset, size);
+ }
+
+ return _buffer;
+ }
+
+ public void SignalWrite(int offset, int size)
+ {
+ ConsiderBackingSwap();
+
+ if (offset == 0 && size == Size)
+ {
+ _cachedConvertedBuffers.Clear();
+ }
+ else
+ {
+ _cachedConvertedBuffers.ClearRange(offset, size);
+ }
+ }
+
+ public BufferHandle GetHandle()
+ {
+ var handle = _bufferHandle;
+ return Unsafe.As<ulong, BufferHandle>(ref handle);
+ }
+
+ public unsafe IntPtr Map(int offset, int mappingSize)
+ {
+ return _map;
+ }
+
+ private void ClearFlushFence()
+ {
+ // Asusmes _flushLock is held as writer.
+
+ if (_flushFence != null)
+ {
+ if (_flushWaiting == 0)
+ {
+ _flushFence.Put();
+ }
+
+ _flushFence = null;
+ }
+ }
+
+ private void WaitForFlushFence()
+ {
+ // Assumes the _flushLock is held as reader, returns in same state.
+
+ if (_flushFence != null)
+ {
+ // If storage has changed, make sure the fence has been reached so that the data is in place.
+
+ var cookie = _flushLock.UpgradeToWriterLock(Timeout.Infinite);
+
+ if (_flushFence != null)
+ {
+ var fence = _flushFence;
+ Interlocked.Increment(ref _flushWaiting);
+
+ // Don't wait in the lock.
+
+ var restoreCookie = _flushLock.ReleaseLock();
+
+ fence.Wait();
+
+ _flushLock.RestoreLock(ref restoreCookie);
+
+ if (Interlocked.Decrement(ref _flushWaiting) == 0)
+ {
+ fence.Put();
+ }
+
+ _flushFence = null;
+ }
+
+ _flushLock.DowngradeFromWriterLock(ref cookie);
+ }
+ }
+
+ public unsafe PinnedSpan<byte> GetData(int offset, int size)
+ {
+ _flushLock.AcquireReaderLock(Timeout.Infinite);
+
+ WaitForFlushFence();
+
+ _flushCount++;
+
+ Span<byte> result;
+
+ if (_map != IntPtr.Zero)
+ {
+ result = GetDataStorage(offset, size);
+
+ // Need to be careful here, the buffer can't be unmapped while the data is being used.
+ _buffer.IncrementReferenceCount();
+
+ _flushLock.ReleaseReaderLock();
+
+ return PinnedSpan<byte>.UnsafeFromSpan(result, _buffer.DecrementReferenceCount);
+ }
+ else
+ {
+ BackgroundResource resource = _gd.BackgroundResources.Get();
+
+ if (_gd.CommandBufferPool.OwnedByCurrentThread)
+ {
+ _gd.FlushAllCommands();
+
+ result = resource.GetFlushBuffer().GetBufferData(_gd.CommandBufferPool, this, offset, size);
+ }
+ else
+ {
+ result = resource.GetFlushBuffer().GetBufferData(resource.GetPool(), this, offset, size);
+ }
+
+ _flushLock.ReleaseReaderLock();
+
+ // Flush buffer is pinned until the next GetBufferData on the thread, which is fine for current uses.
+ return PinnedSpan<byte>.UnsafeFromSpan(result);
+ }
+ }
+
+ public unsafe Span<byte> GetDataStorage(int offset, int size)
+ {
+ int mappingSize = Math.Min(size, Size - offset);
+
+ if (_map != IntPtr.Zero)
+ {
+ return new Span<byte>((void*)(_map + offset), mappingSize);
+ }
+
+ throw new InvalidOperationException("The buffer is not host mapped.");
+ }
+
+ public unsafe void SetData(int offset, ReadOnlySpan<byte> data, CommandBufferScoped? cbs = null, Action endRenderPass = null)
+ {
+ int dataSize = Math.Min(data.Length, Size - offset);
+ if (dataSize == 0)
+ {
+ return;
+ }
+
+ _setCount++;
+
+ if (_map != IntPtr.Zero)
+ {
+ // If persistently mapped, set the data directly if the buffer is not currently in use.
+ bool isRented = _buffer.HasRentedCommandBufferDependency(_gd.CommandBufferPool);
+
+ // If the buffer is rented, take a little more time and check if the use overlaps this handle.
+ bool needsFlush = isRented && _waitable.IsBufferRangeInUse(offset, dataSize);
+
+ if (!needsFlush)
+ {
+ WaitForFences(offset, dataSize);
+
+ data.Slice(0, dataSize).CopyTo(new Span<byte>((void*)(_map + offset), dataSize));
+
+ SignalWrite(offset, dataSize);
+
+ return;
+ }
+ }
+
+ if (cbs != null &&
+ _gd.PipelineInternal.RenderPassActive &&
+ !(_buffer.HasCommandBufferDependency(cbs.Value) &&
+ _waitable.IsBufferRangeInUse(cbs.Value.CommandBufferIndex, offset, dataSize)))
+ {
+ // If the buffer hasn't been used on the command buffer yet, try to preload the data.
+ // This avoids ending and beginning render passes on each buffer data upload.
+
+ cbs = _gd.PipelineInternal.GetPreloadCommandBuffer();
+ endRenderPass = null;
+ }
+
+ if (cbs == null ||
+ !VulkanConfiguration.UseFastBufferUpdates ||
+ data.Length > MaxUpdateBufferSize ||
+ !TryPushData(cbs.Value, endRenderPass, offset, data))
+ {
+ _gd.BufferManager.StagingBuffer.PushData(_gd.CommandBufferPool, cbs, endRenderPass, this, offset, data);
+ }
+ }
+
+ public unsafe void SetDataUnchecked(int offset, ReadOnlySpan<byte> data)
+ {
+ int dataSize = Math.Min(data.Length, Size - offset);
+ if (dataSize == 0)
+ {
+ return;
+ }
+
+ if (_map != IntPtr.Zero)
+ {
+ data.Slice(0, dataSize).CopyTo(new Span<byte>((void*)(_map + offset), dataSize));
+ }
+ else
+ {
+ _gd.BufferManager.StagingBuffer.PushData(_gd.CommandBufferPool, null, null, this, offset, data);
+ }
+ }
+
+ public void SetDataInline(CommandBufferScoped cbs, Action endRenderPass, int dstOffset, ReadOnlySpan<byte> data)
+ {
+ if (!TryPushData(cbs, endRenderPass, dstOffset, data))
+ {
+ throw new ArgumentException($"Invalid offset 0x{dstOffset:X} or data size 0x{data.Length:X}.");
+ }
+ }
+
+ private unsafe bool TryPushData(CommandBufferScoped cbs, Action endRenderPass, int dstOffset, ReadOnlySpan<byte> data)
+ {
+ if ((dstOffset & 3) != 0 || (data.Length & 3) != 0)
+ {
+ return false;
+ }
+
+ endRenderPass?.Invoke();
+
+ var dstBuffer = GetBuffer(cbs.CommandBuffer, dstOffset, data.Length, true).Get(cbs, dstOffset, data.Length).Value;
+
+ _writeCount--;
+
+ InsertBufferBarrier(
+ _gd,
+ cbs.CommandBuffer,
+ dstBuffer,
+ DefaultAccessFlags,
+ AccessFlags.TransferWriteBit,
+ PipelineStageFlags.AllCommandsBit,
+ PipelineStageFlags.TransferBit,
+ dstOffset,
+ data.Length);
+
+ fixed (byte* pData = data)
+ {
+ for (ulong offset = 0; offset < (ulong)data.Length;)
+ {
+ ulong size = Math.Min(MaxUpdateBufferSize, (ulong)data.Length - offset);
+ _gd.Api.CmdUpdateBuffer(cbs.CommandBuffer, dstBuffer, (ulong)dstOffset + offset, size, pData + offset);
+ offset += size;
+ }
+ }
+
+ InsertBufferBarrier(
+ _gd,
+ cbs.CommandBuffer,
+ dstBuffer,
+ AccessFlags.TransferWriteBit,
+ DefaultAccessFlags,
+ PipelineStageFlags.TransferBit,
+ PipelineStageFlags.AllCommandsBit,
+ dstOffset,
+ data.Length);
+
+ return true;
+ }
+
+ public static unsafe void Copy(
+ VulkanRenderer gd,
+ CommandBufferScoped cbs,
+ Auto<DisposableBuffer> src,
+ Auto<DisposableBuffer> dst,
+ int srcOffset,
+ int dstOffset,
+ int size)
+ {
+ var srcBuffer = src.Get(cbs, srcOffset, size).Value;
+ var dstBuffer = dst.Get(cbs, dstOffset, size).Value;
+
+ InsertBufferBarrier(
+ gd,
+ cbs.CommandBuffer,
+ dstBuffer,
+ DefaultAccessFlags,
+ AccessFlags.TransferWriteBit,
+ PipelineStageFlags.AllCommandsBit,
+ PipelineStageFlags.TransferBit,
+ dstOffset,
+ size);
+
+ var region = new BufferCopy((ulong)srcOffset, (ulong)dstOffset, (ulong)size);
+
+ gd.Api.CmdCopyBuffer(cbs.CommandBuffer, srcBuffer, dstBuffer, 1, &region);
+
+ InsertBufferBarrier(
+ gd,
+ cbs.CommandBuffer,
+ dstBuffer,
+ AccessFlags.TransferWriteBit,
+ DefaultAccessFlags,
+ PipelineStageFlags.TransferBit,
+ PipelineStageFlags.AllCommandsBit,
+ dstOffset,
+ size);
+ }
+
+ public static unsafe void InsertBufferBarrier(
+ VulkanRenderer gd,
+ CommandBuffer commandBuffer,
+ VkBuffer buffer,
+ AccessFlags srcAccessMask,
+ AccessFlags dstAccessMask,
+ PipelineStageFlags srcStageMask,
+ PipelineStageFlags dstStageMask,
+ int offset,
+ int size)
+ {
+ BufferMemoryBarrier memoryBarrier = new BufferMemoryBarrier()
+ {
+ SType = StructureType.BufferMemoryBarrier,
+ SrcAccessMask = srcAccessMask,
+ DstAccessMask = dstAccessMask,
+ SrcQueueFamilyIndex = Vk.QueueFamilyIgnored,
+ DstQueueFamilyIndex = Vk.QueueFamilyIgnored,
+ Buffer = buffer,
+ Offset = (ulong)offset,
+ Size = (ulong)size
+ };
+
+ gd.Api.CmdPipelineBarrier(
+ commandBuffer,
+ srcStageMask,
+ dstStageMask,
+ 0,
+ 0,
+ null,
+ 1,
+ memoryBarrier,
+ 0,
+ null);
+ }
+
+ public void WaitForFences()
+ {
+ _waitable.WaitForFences(_gd.Api, _device);
+ }
+
+ public void WaitForFences(int offset, int size)
+ {
+ _waitable.WaitForFences(_gd.Api, _device, offset, size);
+ }
+
+ private bool BoundToRange(int offset, ref int size)
+ {
+ if (offset >= Size)
+ {
+ return false;
+ }
+
+ size = Math.Min(Size - offset, size);
+
+ return true;
+ }
+
+ public Auto<DisposableBuffer> GetBufferI8ToI16(CommandBufferScoped cbs, int offset, int size)
+ {
+ if (!BoundToRange(offset, ref size))
+ {
+ return null;
+ }
+
+ var key = new I8ToI16CacheKey(_gd);
+
+ if (!_cachedConvertedBuffers.TryGetValue(offset, size, key, out var holder))
+ {
+ holder = _gd.BufferManager.Create(_gd, (size * 2 + 3) & ~3);
+
+ _gd.PipelineInternal.EndRenderPass();
+ _gd.HelperShader.ConvertI8ToI16(_gd, cbs, this, holder, offset, size);
+
+ key.SetBuffer(holder.GetBuffer());
+
+ _cachedConvertedBuffers.Add(offset, size, key, holder);
+ }
+
+ return holder.GetBuffer();
+ }
+
+ public Auto<DisposableBuffer> GetAlignedVertexBuffer(CommandBufferScoped cbs, int offset, int size, int stride, int alignment)
+ {
+ if (!BoundToRange(offset, ref size))
+ {
+ return null;
+ }
+
+ var key = new AlignedVertexBufferCacheKey(_gd, stride, alignment);
+
+ if (!_cachedConvertedBuffers.TryGetValue(offset, size, key, out var holder))
+ {
+ int alignedStride = (stride + (alignment - 1)) & -alignment;
+
+ holder = _gd.BufferManager.Create(_gd, (size / stride) * alignedStride);
+
+ _gd.PipelineInternal.EndRenderPass();
+ _gd.HelperShader.ChangeStride(_gd, cbs, this, holder, offset, size, stride, alignedStride);
+
+ key.SetBuffer(holder.GetBuffer());
+
+ _cachedConvertedBuffers.Add(offset, size, key, holder);
+ }
+
+ return holder.GetBuffer();
+ }
+
+ public Auto<DisposableBuffer> GetBufferTopologyConversion(CommandBufferScoped cbs, int offset, int size, IndexBufferPattern pattern, int indexSize)
+ {
+ if (!BoundToRange(offset, ref size))
+ {
+ return null;
+ }
+
+ var key = new TopologyConversionCacheKey(_gd, pattern, indexSize);
+
+ if (!_cachedConvertedBuffers.TryGetValue(offset, size, key, out var holder))
+ {
+ // The destination index size is always I32.
+
+ int indexCount = size / indexSize;
+
+ int convertedCount = pattern.GetConvertedCount(indexCount);
+
+ holder = _gd.BufferManager.Create(_gd, convertedCount * 4);
+
+ _gd.PipelineInternal.EndRenderPass();
+ _gd.HelperShader.ConvertIndexBuffer(_gd, cbs, this, holder, pattern, indexSize, offset, indexCount);
+
+ key.SetBuffer(holder.GetBuffer());
+
+ _cachedConvertedBuffers.Add(offset, size, key, holder);
+ }
+
+ return holder.GetBuffer();
+ }
+
+ public bool TryGetCachedConvertedBuffer(int offset, int size, ICacheKey key, out BufferHolder holder)
+ {
+ return _cachedConvertedBuffers.TryGetValue(offset, size, key, out holder);
+ }
+
+ public void AddCachedConvertedBuffer(int offset, int size, ICacheKey key, BufferHolder holder)
+ {
+ _cachedConvertedBuffers.Add(offset, size, key, holder);
+ }
+
+ public void AddCachedConvertedBufferDependency(int offset, int size, ICacheKey key, Dependency dependency)
+ {
+ _cachedConvertedBuffers.AddDependency(offset, size, key, dependency);
+ }
+
+ public void RemoveCachedConvertedBuffer(int offset, int size, ICacheKey key)
+ {
+ _cachedConvertedBuffers.Remove(offset, size, key);
+ }
+
+ public void Dispose()
+ {
+ _swapQueued = false;
+
+ _gd.PipelineInternal?.FlushCommandsIfWeightExceeding(_buffer, (ulong)Size);
+
+ _buffer.Dispose();
+ _allocationAuto.Dispose();
+ _cachedConvertedBuffers.Dispose();
+
+ _flushLock.AcquireWriterLock(Timeout.Infinite);
+
+ ClearFlushFence();
+
+ _flushLock.ReleaseWriterLock();
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/BufferManager.cs b/src/Ryujinx.Graphics.Vulkan/BufferManager.cs
new file mode 100644
index 00000000..f8f41e5b
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/BufferManager.cs
@@ -0,0 +1,455 @@
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using System;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using VkFormat = Silk.NET.Vulkan.Format;
+using VkBuffer = Silk.NET.Vulkan.Buffer;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class BufferManager : IDisposable
+ {
+ private const MemoryPropertyFlags DefaultBufferMemoryFlags =
+ MemoryPropertyFlags.HostVisibleBit |
+ MemoryPropertyFlags.HostCoherentBit |
+ MemoryPropertyFlags.HostCachedBit;
+
+ // Some drivers don't expose a "HostCached" memory type,
+ // so we need those alternative flags for the allocation to succeed there.
+ private const MemoryPropertyFlags DefaultBufferMemoryNoCacheFlags =
+ MemoryPropertyFlags.HostVisibleBit |
+ MemoryPropertyFlags.HostCoherentBit;
+
+ private const MemoryPropertyFlags DeviceLocalBufferMemoryFlags =
+ MemoryPropertyFlags.DeviceLocalBit;
+
+ private const MemoryPropertyFlags DeviceLocalMappedBufferMemoryFlags =
+ MemoryPropertyFlags.DeviceLocalBit |
+ MemoryPropertyFlags.HostVisibleBit |
+ MemoryPropertyFlags.HostCoherentBit;
+
+ private const BufferUsageFlags DefaultBufferUsageFlags =
+ BufferUsageFlags.TransferSrcBit |
+ BufferUsageFlags.TransferDstBit |
+ BufferUsageFlags.UniformTexelBufferBit |
+ BufferUsageFlags.StorageTexelBufferBit |
+ BufferUsageFlags.UniformBufferBit |
+ BufferUsageFlags.StorageBufferBit |
+ BufferUsageFlags.IndexBufferBit |
+ BufferUsageFlags.VertexBufferBit |
+ BufferUsageFlags.TransformFeedbackBufferBitExt;
+
+ private readonly Device _device;
+
+ private readonly IdList<BufferHolder> _buffers;
+
+ public int BufferCount { get; private set; }
+
+ public StagingBuffer StagingBuffer { get; }
+
+ public BufferManager(VulkanRenderer gd, Device device)
+ {
+ _device = device;
+ _buffers = new IdList<BufferHolder>();
+ StagingBuffer = new StagingBuffer(gd, this);
+ }
+
+ public BufferHandle CreateWithHandle(VulkanRenderer gd, int size, BufferAllocationType baseType = BufferAllocationType.HostMapped, BufferHandle storageHint = default)
+ {
+ return CreateWithHandle(gd, size, out _, baseType, storageHint);
+ }
+
+ public BufferHandle CreateWithHandle(VulkanRenderer gd, int size, out BufferHolder holder, BufferAllocationType baseType = BufferAllocationType.HostMapped, BufferHandle storageHint = default)
+ {
+ holder = Create(gd, size, baseType: baseType, storageHint: storageHint);
+ if (holder == null)
+ {
+ return BufferHandle.Null;
+ }
+
+ BufferCount++;
+
+ ulong handle64 = (uint)_buffers.Add(holder);
+
+ return Unsafe.As<ulong, BufferHandle>(ref handle64);
+ }
+
+ public unsafe (VkBuffer buffer, MemoryAllocation allocation, BufferAllocationType resultType) CreateBacking(
+ VulkanRenderer gd,
+ int size,
+ BufferAllocationType type,
+ bool forConditionalRendering = false,
+ BufferAllocationType fallbackType = BufferAllocationType.Auto)
+ {
+ var usage = DefaultBufferUsageFlags;
+
+ if (forConditionalRendering && gd.Capabilities.SupportsConditionalRendering)
+ {
+ usage |= BufferUsageFlags.ConditionalRenderingBitExt;
+ }
+ else if (gd.Capabilities.SupportsIndirectParameters)
+ {
+ usage |= BufferUsageFlags.IndirectBufferBit;
+ }
+
+ var bufferCreateInfo = new BufferCreateInfo()
+ {
+ SType = StructureType.BufferCreateInfo,
+ Size = (ulong)size,
+ Usage = usage,
+ SharingMode = SharingMode.Exclusive
+ };
+
+ gd.Api.CreateBuffer(_device, in bufferCreateInfo, null, out var buffer).ThrowOnError();
+ gd.Api.GetBufferMemoryRequirements(_device, buffer, out var requirements);
+
+ MemoryAllocation allocation;
+
+ do
+ {
+ var allocateFlags = type switch
+ {
+ BufferAllocationType.HostMappedNoCache => DefaultBufferMemoryNoCacheFlags,
+ BufferAllocationType.HostMapped => DefaultBufferMemoryFlags,
+ BufferAllocationType.DeviceLocal => DeviceLocalBufferMemoryFlags,
+ BufferAllocationType.DeviceLocalMapped => DeviceLocalMappedBufferMemoryFlags,
+ _ => DefaultBufferMemoryFlags
+ };
+
+ // If an allocation with this memory type fails, fall back to the previous one.
+ try
+ {
+ allocation = gd.MemoryAllocator.AllocateDeviceMemory(requirements, allocateFlags, true);
+ }
+ catch (VulkanException)
+ {
+ allocation = default;
+ }
+ }
+ while (allocation.Memory.Handle == 0 && (--type != fallbackType));
+
+ if (allocation.Memory.Handle == 0UL)
+ {
+ gd.Api.DestroyBuffer(_device, buffer, null);
+ return default;
+ }
+
+ gd.Api.BindBufferMemory(_device, buffer, allocation.Memory, allocation.Offset);
+
+ return (buffer, allocation, type);
+ }
+
+ public unsafe BufferHolder Create(
+ VulkanRenderer gd,
+ int size,
+ bool forConditionalRendering = false,
+ BufferAllocationType baseType = BufferAllocationType.HostMapped,
+ BufferHandle storageHint = default)
+ {
+ BufferAllocationType type = baseType;
+ BufferHolder storageHintHolder = null;
+
+ if (baseType == BufferAllocationType.Auto)
+ {
+ if (gd.IsSharedMemory)
+ {
+ baseType = BufferAllocationType.HostMapped;
+ type = baseType;
+ }
+ else
+ {
+ type = size >= BufferHolder.DeviceLocalSizeThreshold ? BufferAllocationType.DeviceLocal : BufferAllocationType.HostMapped;
+ }
+
+ if (storageHint != BufferHandle.Null)
+ {
+ if (TryGetBuffer(storageHint, out storageHintHolder))
+ {
+ type = storageHintHolder.DesiredType;
+ }
+ }
+ }
+
+ (VkBuffer buffer, MemoryAllocation allocation, BufferAllocationType resultType) =
+ CreateBacking(gd, size, type, forConditionalRendering);
+
+ if (buffer.Handle != 0)
+ {
+ var holder = new BufferHolder(gd, _device, buffer, allocation, size, baseType, resultType);
+
+ if (storageHintHolder != null)
+ {
+ holder.InheritMetrics(storageHintHolder);
+ }
+
+ return holder;
+ }
+
+ return null;
+ }
+
+ public Auto<DisposableBufferView> CreateView(BufferHandle handle, VkFormat format, int offset, int size, Action invalidateView)
+ {
+ if (TryGetBuffer(handle, out var holder))
+ {
+ return holder.CreateView(format, offset, size, invalidateView);
+ }
+
+ return null;
+ }
+
+ public Auto<DisposableBuffer> GetBuffer(CommandBuffer commandBuffer, BufferHandle handle, bool isWrite, bool isSSBO = false)
+ {
+ if (TryGetBuffer(handle, out var holder))
+ {
+ return holder.GetBuffer(commandBuffer, isWrite, isSSBO);
+ }
+
+ return null;
+ }
+
+ public Auto<DisposableBuffer> GetBuffer(CommandBuffer commandBuffer, BufferHandle handle, int offset, int size, bool isWrite)
+ {
+ if (TryGetBuffer(handle, out var holder))
+ {
+ return holder.GetBuffer(commandBuffer, offset, size, isWrite);
+ }
+
+ return null;
+ }
+
+ public Auto<DisposableBuffer> GetBufferI8ToI16(CommandBufferScoped cbs, BufferHandle handle, int offset, int size)
+ {
+ if (TryGetBuffer(handle, out var holder))
+ {
+ return holder.GetBufferI8ToI16(cbs, offset, size);
+ }
+
+ return null;
+ }
+
+ public Auto<DisposableBuffer> GetAlignedVertexBuffer(CommandBufferScoped cbs, BufferHandle handle, int offset, int size, int stride, int alignment)
+ {
+ if (TryGetBuffer(handle, out var holder))
+ {
+ return holder.GetAlignedVertexBuffer(cbs, offset, size, stride, alignment);
+ }
+
+ return null;
+ }
+
+ public Auto<DisposableBuffer> GetBufferTopologyConversion(CommandBufferScoped cbs, BufferHandle handle, int offset, int size, IndexBufferPattern pattern, int indexSize)
+ {
+ if (TryGetBuffer(handle, out var holder))
+ {
+ return holder.GetBufferTopologyConversion(cbs, offset, size, pattern, indexSize);
+ }
+
+ return null;
+ }
+
+ public (Auto<DisposableBuffer>, Auto<DisposableBuffer>) GetBufferTopologyConversionIndirect(
+ VulkanRenderer gd,
+ CommandBufferScoped cbs,
+ BufferRange indexBuffer,
+ BufferRange indirectBuffer,
+ BufferRange drawCountBuffer,
+ IndexBufferPattern pattern,
+ int indexSize,
+ bool hasDrawCount,
+ int maxDrawCount,
+ int indirectDataStride)
+ {
+ BufferHolder drawCountBufferHolder = null;
+
+ if (!TryGetBuffer(indexBuffer.Handle, out var indexBufferHolder) ||
+ !TryGetBuffer(indirectBuffer.Handle, out var indirectBufferHolder) ||
+ (hasDrawCount && !TryGetBuffer(drawCountBuffer.Handle, out drawCountBufferHolder)))
+ {
+ return (null, null);
+ }
+
+ var indexBufferKey = new TopologyConversionIndirectCacheKey(
+ gd,
+ pattern,
+ indexSize,
+ indirectBufferHolder,
+ indirectBuffer.Offset,
+ indirectBuffer.Size);
+
+ bool hasConvertedIndexBuffer = indexBufferHolder.TryGetCachedConvertedBuffer(
+ indexBuffer.Offset,
+ indexBuffer.Size,
+ indexBufferKey,
+ out var convertedIndexBuffer);
+
+ var indirectBufferKey = new IndirectDataCacheKey(pattern);
+ bool hasConvertedIndirectBuffer = indirectBufferHolder.TryGetCachedConvertedBuffer(
+ indirectBuffer.Offset,
+ indirectBuffer.Size,
+ indirectBufferKey,
+ out var convertedIndirectBuffer);
+
+ var drawCountBufferKey = new DrawCountCacheKey();
+ bool hasCachedDrawCount = true;
+
+ if (hasDrawCount)
+ {
+ hasCachedDrawCount = drawCountBufferHolder.TryGetCachedConvertedBuffer(
+ drawCountBuffer.Offset,
+ drawCountBuffer.Size,
+ drawCountBufferKey,
+ out _);
+ }
+
+ if (!hasConvertedIndexBuffer || !hasConvertedIndirectBuffer || !hasCachedDrawCount)
+ {
+ // The destination index size is always I32.
+
+ int indexCount = indexBuffer.Size / indexSize;
+
+ int convertedCount = pattern.GetConvertedCount(indexCount);
+
+ if (!hasConvertedIndexBuffer)
+ {
+ convertedIndexBuffer = Create(gd, convertedCount * 4);
+ indexBufferKey.SetBuffer(convertedIndexBuffer.GetBuffer());
+ indexBufferHolder.AddCachedConvertedBuffer(indexBuffer.Offset, indexBuffer.Size, indexBufferKey, convertedIndexBuffer);
+ }
+
+ if (!hasConvertedIndirectBuffer)
+ {
+ convertedIndirectBuffer = Create(gd, indirectBuffer.Size);
+ indirectBufferHolder.AddCachedConvertedBuffer(indirectBuffer.Offset, indirectBuffer.Size, indirectBufferKey, convertedIndirectBuffer);
+ }
+
+ gd.PipelineInternal.EndRenderPass();
+ gd.HelperShader.ConvertIndexBufferIndirect(
+ gd,
+ cbs,
+ indirectBufferHolder,
+ convertedIndirectBuffer,
+ drawCountBuffer,
+ indexBufferHolder,
+ convertedIndexBuffer,
+ pattern,
+ indexSize,
+ indexBuffer.Offset,
+ indexBuffer.Size,
+ indirectBuffer.Offset,
+ hasDrawCount,
+ maxDrawCount,
+ indirectDataStride);
+
+ // Any modification of the indirect buffer should invalidate the index buffers that are associated with it,
+ // since we used the indirect data to find the range of the index buffer that is used.
+
+ var indexBufferDependency = new Dependency(
+ indexBufferHolder,
+ indexBuffer.Offset,
+ indexBuffer.Size,
+ indexBufferKey);
+
+ indirectBufferHolder.AddCachedConvertedBufferDependency(
+ indirectBuffer.Offset,
+ indirectBuffer.Size,
+ indirectBufferKey,
+ indexBufferDependency);
+
+ if (hasDrawCount)
+ {
+ if (!hasCachedDrawCount)
+ {
+ drawCountBufferHolder.AddCachedConvertedBuffer(drawCountBuffer.Offset, drawCountBuffer.Size, drawCountBufferKey, null);
+ }
+
+ // If we have a draw count, any modification of the draw count should invalidate all indirect buffers
+ // where we used it to find the range of indirect data that is actually used.
+
+ var indirectBufferDependency = new Dependency(
+ indirectBufferHolder,
+ indirectBuffer.Offset,
+ indirectBuffer.Size,
+ indirectBufferKey);
+
+ drawCountBufferHolder.AddCachedConvertedBufferDependency(
+ drawCountBuffer.Offset,
+ drawCountBuffer.Size,
+ drawCountBufferKey,
+ indirectBufferDependency);
+ }
+ }
+
+ return (convertedIndexBuffer.GetBuffer(), convertedIndirectBuffer.GetBuffer());
+ }
+
+ public Auto<DisposableBuffer> GetBuffer(CommandBuffer commandBuffer, BufferHandle handle, bool isWrite, out int size)
+ {
+ if (TryGetBuffer(handle, out var holder))
+ {
+ size = holder.Size;
+ return holder.GetBuffer(commandBuffer, isWrite);
+ }
+
+ size = 0;
+ return null;
+ }
+
+ public PinnedSpan<byte> GetData(BufferHandle handle, int offset, int size)
+ {
+ if (TryGetBuffer(handle, out var holder))
+ {
+ return holder.GetData(offset, size);
+ }
+
+ return new PinnedSpan<byte>();
+ }
+
+ public void SetData<T>(BufferHandle handle, int offset, ReadOnlySpan<T> data) where T : unmanaged
+ {
+ SetData(handle, offset, MemoryMarshal.Cast<T, byte>(data), null, null);
+ }
+
+ public void SetData(BufferHandle handle, int offset, ReadOnlySpan<byte> data, CommandBufferScoped? cbs, Action endRenderPass)
+ {
+ if (TryGetBuffer(handle, out var holder))
+ {
+ holder.SetData(offset, data, cbs, endRenderPass);
+ }
+ }
+
+ public void Delete(BufferHandle handle)
+ {
+ if (TryGetBuffer(handle, out var holder))
+ {
+ holder.Dispose();
+ _buffers.Remove((int)Unsafe.As<BufferHandle, ulong>(ref handle));
+ }
+ }
+
+ private bool TryGetBuffer(BufferHandle handle, out BufferHolder holder)
+ {
+ return _buffers.TryGetValue((int)Unsafe.As<BufferHandle, ulong>(ref handle), out holder);
+ }
+
+ protected virtual void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ foreach (BufferHolder buffer in _buffers)
+ {
+ buffer.Dispose();
+ }
+
+ _buffers.Clear();
+ StagingBuffer.Dispose();
+ }
+ }
+
+ public void Dispose()
+ {
+ Dispose(true);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/BufferState.cs b/src/Ryujinx.Graphics.Vulkan/BufferState.cs
new file mode 100644
index 00000000..6829f833
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/BufferState.cs
@@ -0,0 +1,48 @@
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ struct BufferState : IDisposable
+ {
+ public static BufferState Null => new BufferState(null, 0, 0);
+
+ private readonly int _offset;
+ private readonly int _size;
+
+ private Auto<DisposableBuffer> _buffer;
+
+ public BufferState(Auto<DisposableBuffer> buffer, int offset, int size)
+ {
+ _buffer = buffer;
+ _offset = offset;
+ _size = size;
+ buffer?.IncrementReferenceCount();
+ }
+
+ public void BindTransformFeedbackBuffer(VulkanRenderer gd, CommandBufferScoped cbs, uint binding)
+ {
+ if (_buffer != null)
+ {
+ var buffer = _buffer.Get(cbs, _offset, _size).Value;
+
+ gd.TransformFeedbackApi.CmdBindTransformFeedbackBuffers(cbs.CommandBuffer, binding, 1, buffer, (ulong)_offset, (ulong)_size);
+ }
+ }
+
+ public void Swap(Auto<DisposableBuffer> from, Auto<DisposableBuffer> to)
+ {
+ if (_buffer == from)
+ {
+ _buffer.DecrementReferenceCount();
+ to.IncrementReferenceCount();
+
+ _buffer = to;
+ }
+ }
+
+ public void Dispose()
+ {
+ _buffer?.DecrementReferenceCount();
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/BufferUsageBitmap.cs b/src/Ryujinx.Graphics.Vulkan/BufferUsageBitmap.cs
new file mode 100644
index 00000000..920501d3
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/BufferUsageBitmap.cs
@@ -0,0 +1,77 @@
+namespace Ryujinx.Graphics.Vulkan
+{
+ internal class BufferUsageBitmap
+ {
+ private BitMap _bitmap;
+ private int _size;
+ private int _granularity;
+ private int _bits;
+
+ private int _intsPerCb;
+ private int _bitsPerCb;
+
+ public BufferUsageBitmap(int size, int granularity)
+ {
+ _size = size;
+ _granularity = granularity;
+ _bits = (size + (granularity - 1)) / granularity;
+
+ _intsPerCb = (_bits + (BitMap.IntSize - 1)) / BitMap.IntSize;
+ _bitsPerCb = _intsPerCb * BitMap.IntSize;
+
+ _bitmap = new BitMap(_bitsPerCb * CommandBufferPool.MaxCommandBuffers);
+ }
+
+ public void Add(int cbIndex, int offset, int size)
+ {
+ if (size == 0)
+ {
+ return;
+ }
+
+ // Some usages can be out of bounds (vertex buffer on amd), so bound if necessary.
+ if (offset + size > _size)
+ {
+ size = _size - offset;
+ }
+
+ int cbBase = cbIndex * _bitsPerCb;
+ int start = cbBase + offset / _granularity;
+ int end = cbBase + (offset + size - 1) / _granularity;
+
+ _bitmap.SetRange(start, end);
+ }
+
+ public bool OverlapsWith(int cbIndex, int offset, int size)
+ {
+ if (size == 0)
+ {
+ return false;
+ }
+
+ int cbBase = cbIndex * _bitsPerCb;
+ int start = cbBase + offset / _granularity;
+ int end = cbBase + (offset + size - 1) / _granularity;
+
+ return _bitmap.IsSet(start, end);
+ }
+
+ public bool OverlapsWith(int offset, int size)
+ {
+ for (int i = 0; i < CommandBufferPool.MaxCommandBuffers; i++)
+ {
+ if (OverlapsWith(i, offset, size))
+ {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ public void Clear(int cbIndex)
+ {
+ _bitmap.ClearInt(cbIndex * _intsPerCb, (cbIndex + 1) * _intsPerCb - 1);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/CacheByRange.cs b/src/Ryujinx.Graphics.Vulkan/CacheByRange.cs
new file mode 100644
index 00000000..a9d1b0ef
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/CacheByRange.cs
@@ -0,0 +1,398 @@
+using System;
+using System.Collections.Generic;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ interface ICacheKey : IDisposable
+ {
+ bool KeyEqual(ICacheKey other);
+ }
+
+ struct I8ToI16CacheKey : ICacheKey
+ {
+ // Used to notify the pipeline that bindings have invalidated on dispose.
+ private readonly VulkanRenderer _gd;
+ private Auto<DisposableBuffer> _buffer;
+
+ public I8ToI16CacheKey(VulkanRenderer gd)
+ {
+ _gd = gd;
+ _buffer = null;
+ }
+
+ public bool KeyEqual(ICacheKey other)
+ {
+ return other is I8ToI16CacheKey;
+ }
+
+ public void SetBuffer(Auto<DisposableBuffer> buffer)
+ {
+ _buffer = buffer;
+ }
+
+ public void Dispose()
+ {
+ _gd.PipelineInternal.DirtyIndexBuffer(_buffer);
+ }
+ }
+
+ struct AlignedVertexBufferCacheKey : ICacheKey
+ {
+ private readonly int _stride;
+ private readonly int _alignment;
+
+ // Used to notify the pipeline that bindings have invalidated on dispose.
+ private readonly VulkanRenderer _gd;
+ private Auto<DisposableBuffer> _buffer;
+
+ public AlignedVertexBufferCacheKey(VulkanRenderer gd, int stride, int alignment)
+ {
+ _gd = gd;
+ _stride = stride;
+ _alignment = alignment;
+ _buffer = null;
+ }
+
+ public bool KeyEqual(ICacheKey other)
+ {
+ return other is AlignedVertexBufferCacheKey entry &&
+ entry._stride == _stride &&
+ entry._alignment == _alignment;
+ }
+
+ public void SetBuffer(Auto<DisposableBuffer> buffer)
+ {
+ _buffer = buffer;
+ }
+
+ public void Dispose()
+ {
+ _gd.PipelineInternal.DirtyVertexBuffer(_buffer);
+ }
+ }
+
+ struct TopologyConversionCacheKey : ICacheKey
+ {
+ private IndexBufferPattern _pattern;
+ private int _indexSize;
+
+ // Used to notify the pipeline that bindings have invalidated on dispose.
+ private readonly VulkanRenderer _gd;
+ private Auto<DisposableBuffer> _buffer;
+
+ public TopologyConversionCacheKey(VulkanRenderer gd, IndexBufferPattern pattern, int indexSize)
+ {
+ _gd = gd;
+ _pattern = pattern;
+ _indexSize = indexSize;
+ _buffer = null;
+ }
+
+ public bool KeyEqual(ICacheKey other)
+ {
+ return other is TopologyConversionCacheKey entry &&
+ entry._pattern == _pattern &&
+ entry._indexSize == _indexSize;
+ }
+
+ public void SetBuffer(Auto<DisposableBuffer> buffer)
+ {
+ _buffer = buffer;
+ }
+
+ public void Dispose()
+ {
+ _gd.PipelineInternal.DirtyIndexBuffer(_buffer);
+ }
+ }
+
+ readonly struct TopologyConversionIndirectCacheKey : ICacheKey
+ {
+ private readonly TopologyConversionCacheKey _baseKey;
+ private readonly BufferHolder _indirectDataBuffer;
+ private readonly int _indirectDataOffset;
+ private readonly int _indirectDataSize;
+
+ public TopologyConversionIndirectCacheKey(
+ VulkanRenderer gd,
+ IndexBufferPattern pattern,
+ int indexSize,
+ BufferHolder indirectDataBuffer,
+ int indirectDataOffset,
+ int indirectDataSize)
+ {
+ _baseKey = new TopologyConversionCacheKey(gd, pattern, indexSize);
+ _indirectDataBuffer = indirectDataBuffer;
+ _indirectDataOffset = indirectDataOffset;
+ _indirectDataSize = indirectDataSize;
+ }
+
+ public bool KeyEqual(ICacheKey other)
+ {
+ return other is TopologyConversionIndirectCacheKey entry &&
+ entry._baseKey.KeyEqual(_baseKey) &&
+ entry._indirectDataBuffer == _indirectDataBuffer &&
+ entry._indirectDataOffset == _indirectDataOffset &&
+ entry._indirectDataSize == _indirectDataSize;
+ }
+
+ public void SetBuffer(Auto<DisposableBuffer> buffer)
+ {
+ _baseKey.SetBuffer(buffer);
+ }
+
+ public void Dispose()
+ {
+ _baseKey.Dispose();
+ }
+ }
+
+ struct IndirectDataCacheKey : ICacheKey
+ {
+ private IndexBufferPattern _pattern;
+
+ public IndirectDataCacheKey(IndexBufferPattern pattern)
+ {
+ _pattern = pattern;
+ }
+
+ public bool KeyEqual(ICacheKey other)
+ {
+ return other is IndirectDataCacheKey entry && entry._pattern == _pattern;
+ }
+
+ public void Dispose()
+ {
+ }
+ }
+
+ struct DrawCountCacheKey : ICacheKey
+ {
+ public bool KeyEqual(ICacheKey other)
+ {
+ return other is DrawCountCacheKey;
+ }
+
+ public void Dispose()
+ {
+ }
+ }
+
+ readonly struct Dependency
+ {
+ private readonly BufferHolder _buffer;
+ private readonly int _offset;
+ private readonly int _size;
+ private readonly ICacheKey _key;
+
+ public Dependency(BufferHolder buffer, int offset, int size, ICacheKey key)
+ {
+ _buffer = buffer;
+ _offset = offset;
+ _size = size;
+ _key = key;
+ }
+
+ public void RemoveFromOwner()
+ {
+ _buffer.RemoveCachedConvertedBuffer(_offset, _size, _key);
+ }
+ }
+
+ struct CacheByRange<T> where T : IDisposable
+ {
+ private struct Entry
+ {
+ public ICacheKey Key;
+ public T Value;
+ public List<Dependency> DependencyList;
+
+ public Entry(ICacheKey key, T value)
+ {
+ Key = key;
+ Value = value;
+ DependencyList = null;
+ }
+
+ public void InvalidateDependencies()
+ {
+ if (DependencyList != null)
+ {
+ foreach (Dependency dependency in DependencyList)
+ {
+ dependency.RemoveFromOwner();
+ }
+
+ DependencyList.Clear();
+ }
+ }
+ }
+
+ private Dictionary<ulong, List<Entry>> _ranges;
+
+ public void Add(int offset, int size, ICacheKey key, T value)
+ {
+ List<Entry> entries = GetEntries(offset, size);
+
+ entries.Add(new Entry(key, value));
+ }
+
+ public void AddDependency(int offset, int size, ICacheKey key, Dependency dependency)
+ {
+ List<Entry> entries = GetEntries(offset, size);
+
+ for (int i = 0; i < entries.Count; i++)
+ {
+ Entry entry = entries[i];
+
+ if (entry.Key.KeyEqual(key))
+ {
+ if (entry.DependencyList == null)
+ {
+ entry.DependencyList = new List<Dependency>();
+ entries[i] = entry;
+ }
+
+ entry.DependencyList.Add(dependency);
+
+ break;
+ }
+ }
+ }
+
+ public void Remove(int offset, int size, ICacheKey key)
+ {
+ List<Entry> entries = GetEntries(offset, size);
+
+ for (int i = 0; i < entries.Count; i++)
+ {
+ Entry entry = entries[i];
+
+ if (entry.Key.KeyEqual(key))
+ {
+ entries.RemoveAt(i--);
+
+ DestroyEntry(entry);
+ }
+ }
+
+ if (entries.Count == 0)
+ {
+ _ranges.Remove(PackRange(offset, size));
+ }
+ }
+
+ public bool TryGetValue(int offset, int size, ICacheKey key, out T value)
+ {
+ List<Entry> entries = GetEntries(offset, size);
+
+ foreach (Entry entry in entries)
+ {
+ if (entry.Key.KeyEqual(key))
+ {
+ value = entry.Value;
+
+ return true;
+ }
+ }
+
+ value = default;
+ return false;
+ }
+
+ public void Clear()
+ {
+ if (_ranges != null)
+ {
+ foreach (List<Entry> entries in _ranges.Values)
+ {
+ foreach (Entry entry in entries)
+ {
+ DestroyEntry(entry);
+ }
+ }
+
+ _ranges.Clear();
+ _ranges = null;
+ }
+ }
+
+ public void ClearRange(int offset, int size)
+ {
+ if (_ranges != null && _ranges.Count > 0)
+ {
+ int end = offset + size;
+
+ List<ulong> toRemove = null;
+
+ foreach (KeyValuePair<ulong, List<Entry>> range in _ranges)
+ {
+ (int rOffset, int rSize) = UnpackRange(range.Key);
+
+ int rEnd = rOffset + rSize;
+
+ if (rEnd > offset && rOffset < end)
+ {
+ List<Entry> entries = range.Value;
+
+ foreach (Entry entry in entries)
+ {
+ DestroyEntry(entry);
+ }
+
+ (toRemove ??= new List<ulong>()).Add(range.Key);
+ }
+ }
+
+ if (toRemove != null)
+ {
+ foreach (ulong range in toRemove)
+ {
+ _ranges.Remove(range);
+ }
+ }
+ }
+ }
+
+ private List<Entry> GetEntries(int offset, int size)
+ {
+ if (_ranges == null)
+ {
+ _ranges = new Dictionary<ulong, List<Entry>>();
+ }
+
+ ulong key = PackRange(offset, size);
+
+ List<Entry> value;
+ if (!_ranges.TryGetValue(key, out value))
+ {
+ value = new List<Entry>();
+ _ranges.Add(key, value);
+ }
+
+ return value;
+ }
+
+ private static void DestroyEntry(Entry entry)
+ {
+ entry.Key.Dispose();
+ entry.Value?.Dispose();
+ entry.InvalidateDependencies();
+ }
+
+ private static ulong PackRange(int offset, int size)
+ {
+ return (uint)offset | ((ulong)size << 32);
+ }
+
+ private static (int offset, int size) UnpackRange(ulong range)
+ {
+ return ((int)range, (int)(range >> 32));
+ }
+
+ public void Dispose()
+ {
+ Clear();
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/CommandBufferPool.cs b/src/Ryujinx.Graphics.Vulkan/CommandBufferPool.cs
new file mode 100644
index 00000000..4cbb24ef
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/CommandBufferPool.cs
@@ -0,0 +1,368 @@
+using Silk.NET.Vulkan;
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using Thread = System.Threading.Thread;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class CommandBufferPool : IDisposable
+ {
+ public const int MaxCommandBuffers = 16;
+
+ private int _totalCommandBuffers;
+ private int _totalCommandBuffersMask;
+
+ private readonly Vk _api;
+ private readonly Device _device;
+ private readonly Queue _queue;
+ private readonly object _queueLock;
+ private readonly CommandPool _pool;
+ private readonly Thread _owner;
+
+ public bool OwnedByCurrentThread => _owner == Thread.CurrentThread;
+
+ private struct ReservedCommandBuffer
+ {
+ public bool InUse;
+ public bool InConsumption;
+ public CommandBuffer CommandBuffer;
+ public FenceHolder Fence;
+ public SemaphoreHolder Semaphore;
+
+ public List<IAuto> Dependants;
+ public HashSet<MultiFenceHolder> Waitables;
+ public HashSet<SemaphoreHolder> Dependencies;
+
+ public void Initialize(Vk api, Device device, CommandPool pool)
+ {
+ var allocateInfo = new CommandBufferAllocateInfo()
+ {
+ SType = StructureType.CommandBufferAllocateInfo,
+ CommandBufferCount = 1,
+ CommandPool = pool,
+ Level = CommandBufferLevel.Primary
+ };
+
+ api.AllocateCommandBuffers(device, allocateInfo, out CommandBuffer);
+
+ Dependants = new List<IAuto>();
+ Waitables = new HashSet<MultiFenceHolder>();
+ Dependencies = new HashSet<SemaphoreHolder>();
+ }
+ }
+
+ private readonly ReservedCommandBuffer[] _commandBuffers;
+
+ private readonly int[] _queuedIndexes;
+ private int _queuedIndexesPtr;
+ private int _queuedCount;
+ private int _inUseCount;
+
+ public unsafe CommandBufferPool(Vk api, Device device, Queue queue, object queueLock, uint queueFamilyIndex, bool isLight = false)
+ {
+ _api = api;
+ _device = device;
+ _queue = queue;
+ _queueLock = queueLock;
+ _owner = Thread.CurrentThread;
+
+ var commandPoolCreateInfo = new CommandPoolCreateInfo()
+ {
+ SType = StructureType.CommandPoolCreateInfo,
+ QueueFamilyIndex = queueFamilyIndex,
+ Flags = CommandPoolCreateFlags.TransientBit |
+ CommandPoolCreateFlags.ResetCommandBufferBit
+ };
+
+ api.CreateCommandPool(device, commandPoolCreateInfo, null, out _pool).ThrowOnError();
+
+ // We need at least 2 command buffers to get texture data in some cases.
+ _totalCommandBuffers = isLight ? 2 : MaxCommandBuffers;
+ _totalCommandBuffersMask = _totalCommandBuffers - 1;
+
+ _commandBuffers = new ReservedCommandBuffer[_totalCommandBuffers];
+
+ _queuedIndexes = new int[_totalCommandBuffers];
+ _queuedIndexesPtr = 0;
+ _queuedCount = 0;
+
+ for (int i = 0; i < _totalCommandBuffers; i++)
+ {
+ _commandBuffers[i].Initialize(api, device, _pool);
+ WaitAndDecrementRef(i);
+ }
+ }
+
+ public void AddDependant(int cbIndex, IAuto dependant)
+ {
+ dependant.IncrementReferenceCount();
+ _commandBuffers[cbIndex].Dependants.Add(dependant);
+ }
+
+ public void AddWaitable(MultiFenceHolder waitable)
+ {
+ lock (_commandBuffers)
+ {
+ for (int i = 0; i < _totalCommandBuffers; i++)
+ {
+ ref var entry = ref _commandBuffers[i];
+
+ if (entry.InConsumption)
+ {
+ AddWaitable(i, waitable);
+ }
+ }
+ }
+ }
+
+ public void AddInUseWaitable(MultiFenceHolder waitable)
+ {
+ lock (_commandBuffers)
+ {
+ for (int i = 0; i < _totalCommandBuffers; i++)
+ {
+ ref var entry = ref _commandBuffers[i];
+
+ if (entry.InUse)
+ {
+ AddWaitable(i, waitable);
+ }
+ }
+ }
+ }
+
+ public void AddDependency(int cbIndex, CommandBufferScoped dependencyCbs)
+ {
+ Debug.Assert(_commandBuffers[cbIndex].InUse);
+ var semaphoreHolder = _commandBuffers[dependencyCbs.CommandBufferIndex].Semaphore;
+ semaphoreHolder.Get();
+ _commandBuffers[cbIndex].Dependencies.Add(semaphoreHolder);
+ }
+
+ public void AddWaitable(int cbIndex, MultiFenceHolder waitable)
+ {
+ ref var entry = ref _commandBuffers[cbIndex];
+ waitable.AddFence(cbIndex, entry.Fence);
+ entry.Waitables.Add(waitable);
+ }
+
+ public bool HasWaitableOnRentedCommandBuffer(MultiFenceHolder waitable, int offset, int size)
+ {
+ lock (_commandBuffers)
+ {
+ for (int i = 0; i < _totalCommandBuffers; i++)
+ {
+ ref var entry = ref _commandBuffers[i];
+
+ if (entry.InUse &&
+ entry.Waitables.Contains(waitable) &&
+ waitable.IsBufferRangeInUse(i, offset, size))
+ {
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ public bool IsFenceOnRentedCommandBuffer(FenceHolder fence)
+ {
+ lock (_commandBuffers)
+ {
+ for (int i = 0; i < _totalCommandBuffers; i++)
+ {
+ ref var entry = ref _commandBuffers[i];
+
+ if (entry.InUse && entry.Fence == fence)
+ {
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ public FenceHolder GetFence(int cbIndex)
+ {
+ return _commandBuffers[cbIndex].Fence;
+ }
+
+ private int FreeConsumed(bool wait)
+ {
+ int freeEntry = 0;
+
+ while (_queuedCount > 0)
+ {
+ int index = _queuedIndexes[_queuedIndexesPtr];
+
+ ref var entry = ref _commandBuffers[index];
+
+ if (wait || !entry.InConsumption || entry.Fence.IsSignaled())
+ {
+ WaitAndDecrementRef(index);
+
+ wait = false;
+ freeEntry = index;
+
+ _queuedCount--;
+ _queuedIndexesPtr = (_queuedIndexesPtr + 1) % _totalCommandBuffers;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ return freeEntry;
+ }
+
+ public CommandBufferScoped ReturnAndRent(CommandBufferScoped cbs)
+ {
+ Return(cbs);
+ return Rent();
+ }
+
+ public CommandBufferScoped Rent()
+ {
+ lock (_commandBuffers)
+ {
+ int cursor = FreeConsumed(_inUseCount + _queuedCount == _totalCommandBuffers);
+
+ for (int i = 0; i < _totalCommandBuffers; i++)
+ {
+ ref var entry = ref _commandBuffers[cursor];
+
+ if (!entry.InUse && !entry.InConsumption)
+ {
+ entry.InUse = true;
+
+ _inUseCount++;
+
+ var commandBufferBeginInfo = new CommandBufferBeginInfo()
+ {
+ SType = StructureType.CommandBufferBeginInfo
+ };
+
+ _api.BeginCommandBuffer(entry.CommandBuffer, commandBufferBeginInfo).ThrowOnError();
+
+ return new CommandBufferScoped(this, entry.CommandBuffer, cursor);
+ }
+
+ cursor = (cursor + 1) & _totalCommandBuffersMask;
+ }
+ }
+
+ throw new InvalidOperationException($"Out of command buffers (In use: {_inUseCount}, queued: {_queuedCount}, total: {_totalCommandBuffers})");
+ }
+
+ public void Return(CommandBufferScoped cbs)
+ {
+ Return(cbs, null, null, null);
+ }
+
+ public unsafe void Return(
+ CommandBufferScoped cbs,
+ ReadOnlySpan<Semaphore> waitSemaphores,
+ ReadOnlySpan<PipelineStageFlags> waitDstStageMask,
+ ReadOnlySpan<Semaphore> signalSemaphores)
+ {
+ lock (_commandBuffers)
+ {
+ int cbIndex = cbs.CommandBufferIndex;
+
+ ref var entry = ref _commandBuffers[cbIndex];
+
+ Debug.Assert(entry.InUse);
+ Debug.Assert(entry.CommandBuffer.Handle == cbs.CommandBuffer.Handle);
+ entry.InUse = false;
+ entry.InConsumption = true;
+ _inUseCount--;
+
+ var commandBuffer = entry.CommandBuffer;
+
+ _api.EndCommandBuffer(commandBuffer).ThrowOnError();
+
+ fixed (Semaphore* pWaitSemaphores = waitSemaphores, pSignalSemaphores = signalSemaphores)
+ {
+ fixed (PipelineStageFlags* pWaitDstStageMask = waitDstStageMask)
+ {
+ SubmitInfo sInfo = new SubmitInfo()
+ {
+ SType = StructureType.SubmitInfo,
+ WaitSemaphoreCount = waitSemaphores != null ? (uint)waitSemaphores.Length : 0,
+ PWaitSemaphores = pWaitSemaphores,
+ PWaitDstStageMask = pWaitDstStageMask,
+ CommandBufferCount = 1,
+ PCommandBuffers = &commandBuffer,
+ SignalSemaphoreCount = signalSemaphores != null ? (uint)signalSemaphores.Length : 0,
+ PSignalSemaphores = pSignalSemaphores
+ };
+
+ lock (_queueLock)
+ {
+ _api.QueueSubmit(_queue, 1, sInfo, entry.Fence.GetUnsafe()).ThrowOnError();
+ }
+ }
+ }
+
+ int ptr = (_queuedIndexesPtr + _queuedCount) % _totalCommandBuffers;
+ _queuedIndexes[ptr] = cbIndex;
+ _queuedCount++;
+ }
+ }
+
+ private void WaitAndDecrementRef(int cbIndex, bool refreshFence = true)
+ {
+ ref var entry = ref _commandBuffers[cbIndex];
+
+ if (entry.InConsumption)
+ {
+ entry.Fence.Wait();
+ entry.InConsumption = false;
+ }
+
+ foreach (var dependant in entry.Dependants)
+ {
+ dependant.DecrementReferenceCount(cbIndex);
+ }
+
+ foreach (var waitable in entry.Waitables)
+ {
+ waitable.RemoveFence(cbIndex, entry.Fence);
+ waitable.RemoveBufferUses(cbIndex);
+ }
+
+ foreach (var dependency in entry.Dependencies)
+ {
+ dependency.Put();
+ }
+
+ entry.Dependants.Clear();
+ entry.Waitables.Clear();
+ entry.Dependencies.Clear();
+ entry.Fence?.Dispose();
+
+ if (refreshFence)
+ {
+ entry.Fence = new FenceHolder(_api, _device);
+ }
+ else
+ {
+ entry.Fence = null;
+ }
+ }
+
+ public unsafe void Dispose()
+ {
+ for (int i = 0; i < _totalCommandBuffers; i++)
+ {
+ WaitAndDecrementRef(i, refreshFence: false);
+ }
+
+ _api.DestroyCommandPool(_device, _pool, null);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/CommandBufferScoped.cs b/src/Ryujinx.Graphics.Vulkan/CommandBufferScoped.cs
new file mode 100644
index 00000000..1d9e14bb
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/CommandBufferScoped.cs
@@ -0,0 +1,44 @@
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ readonly struct CommandBufferScoped : IDisposable
+ {
+ private readonly CommandBufferPool _pool;
+ public CommandBuffer CommandBuffer { get; }
+ public int CommandBufferIndex { get; }
+
+ public CommandBufferScoped(CommandBufferPool pool, CommandBuffer commandBuffer, int commandBufferIndex)
+ {
+ _pool = pool;
+ CommandBuffer = commandBuffer;
+ CommandBufferIndex = commandBufferIndex;
+ }
+
+ public void AddDependant(IAuto dependant)
+ {
+ _pool.AddDependant(CommandBufferIndex, dependant);
+ }
+
+ public void AddWaitable(MultiFenceHolder waitable)
+ {
+ _pool.AddWaitable(CommandBufferIndex, waitable);
+ }
+
+ public void AddDependency(CommandBufferScoped dependencyCbs)
+ {
+ _pool.AddDependency(CommandBufferIndex, dependencyCbs);
+ }
+
+ public FenceHolder GetFence()
+ {
+ return _pool.GetFence(CommandBufferIndex);
+ }
+
+ public void Dispose()
+ {
+ _pool?.Return(this);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/Constants.cs b/src/Ryujinx.Graphics.Vulkan/Constants.cs
new file mode 100644
index 00000000..f43d815a
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Constants.cs
@@ -0,0 +1,20 @@
+namespace Ryujinx.Graphics.Vulkan
+{
+ static class Constants
+ {
+ public const int MaxVertexAttributes = 32;
+ public const int MaxVertexBuffers = 32;
+ public const int MaxTransformFeedbackBuffers = 4;
+ public const int MaxRenderTargets = 8;
+ public const int MaxViewports = 16;
+ public const int MaxShaderStages = 5;
+ public const int MaxUniformBuffersPerStage = 18;
+ public const int MaxStorageBuffersPerStage = 16;
+ public const int MaxTexturesPerStage = 64;
+ public const int MaxImagesPerStage = 16;
+ public const int MaxUniformBufferBindings = MaxUniformBuffersPerStage * MaxShaderStages;
+ public const int MaxStorageBufferBindings = MaxStorageBuffersPerStage * MaxShaderStages;
+ public const int MaxTextureBindings = MaxTexturesPerStage * MaxShaderStages;
+ public const int MaxImageBindings = MaxImagesPerStage * MaxShaderStages;
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/DescriptorSetCollection.cs b/src/Ryujinx.Graphics.Vulkan/DescriptorSetCollection.cs
new file mode 100644
index 00000000..c57cb1a9
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/DescriptorSetCollection.cs
@@ -0,0 +1,246 @@
+using Silk.NET.Vulkan;
+using System;
+using VkBuffer = Silk.NET.Vulkan.Buffer;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ struct DescriptorSetCollection : IDisposable
+ {
+ private DescriptorSetManager.DescriptorPoolHolder _holder;
+ private readonly DescriptorSet[] _descriptorSets;
+ public int SetsCount => _descriptorSets.Length;
+
+ public DescriptorSetCollection(DescriptorSetManager.DescriptorPoolHolder holder, DescriptorSet[] descriptorSets)
+ {
+ _holder = holder;
+ _descriptorSets = descriptorSets;
+ }
+
+ public void InitializeBuffers(int setIndex, int baseBinding, int countPerUnit, DescriptorType type, VkBuffer dummyBuffer)
+ {
+ Span<DescriptorBufferInfo> infos = stackalloc DescriptorBufferInfo[countPerUnit];
+
+ infos.Fill(new DescriptorBufferInfo()
+ {
+ Buffer = dummyBuffer,
+ Range = Vk.WholeSize
+ });
+
+ UpdateBuffers(setIndex, baseBinding, infos, type);
+ }
+
+ public unsafe void UpdateBuffer(int setIndex, int bindingIndex, DescriptorBufferInfo bufferInfo, DescriptorType type)
+ {
+ if (bufferInfo.Buffer.Handle != 0UL)
+ {
+ var writeDescriptorSet = new WriteDescriptorSet
+ {
+ SType = StructureType.WriteDescriptorSet,
+ DstSet = _descriptorSets[setIndex],
+ DstBinding = (uint)bindingIndex,
+ DescriptorType = type,
+ DescriptorCount = 1,
+ PBufferInfo = &bufferInfo
+ };
+
+ _holder.Api.UpdateDescriptorSets(_holder.Device, 1, writeDescriptorSet, 0, null);
+ }
+ }
+
+ public unsafe void UpdateBuffers(int setIndex, int baseBinding, ReadOnlySpan<DescriptorBufferInfo> bufferInfo, DescriptorType type)
+ {
+ if (bufferInfo.Length == 0)
+ {
+ return;
+ }
+
+ fixed (DescriptorBufferInfo* pBufferInfo = bufferInfo)
+ {
+ var writeDescriptorSet = new WriteDescriptorSet
+ {
+ SType = StructureType.WriteDescriptorSet,
+ DstSet = _descriptorSets[setIndex],
+ DstBinding = (uint)baseBinding,
+ DescriptorType = type,
+ DescriptorCount = (uint)bufferInfo.Length,
+ PBufferInfo = pBufferInfo
+ };
+
+ _holder.Api.UpdateDescriptorSets(_holder.Device, 1, writeDescriptorSet, 0, null);
+ }
+ }
+
+ public unsafe void UpdateStorageBuffers(int setIndex, int baseBinding, ReadOnlySpan<DescriptorBufferInfo> bufferInfo)
+ {
+ if (bufferInfo.Length == 0)
+ {
+ return;
+ }
+
+ fixed (DescriptorBufferInfo* pBufferInfo = bufferInfo)
+ {
+ var writeDescriptorSet = new WriteDescriptorSet
+ {
+ SType = StructureType.WriteDescriptorSet,
+ DstSet = _descriptorSets[setIndex],
+ DstBinding = (uint)(baseBinding & ~(Constants.MaxStorageBuffersPerStage - 1)),
+ DstArrayElement = (uint)(baseBinding & (Constants.MaxStorageBuffersPerStage - 1)),
+ DescriptorType = DescriptorType.StorageBuffer,
+ DescriptorCount = (uint)bufferInfo.Length,
+ PBufferInfo = pBufferInfo
+ };
+
+ _holder.Api.UpdateDescriptorSets(_holder.Device, 1, writeDescriptorSet, 0, null);
+ }
+ }
+
+ public unsafe void UpdateImage(int setIndex, int bindingIndex, DescriptorImageInfo imageInfo, DescriptorType type)
+ {
+ if (imageInfo.ImageView.Handle != 0UL)
+ {
+ var writeDescriptorSet = new WriteDescriptorSet
+ {
+ SType = StructureType.WriteDescriptorSet,
+ DstSet = _descriptorSets[setIndex],
+ DstBinding = (uint)bindingIndex,
+ DescriptorType = type,
+ DescriptorCount = 1,
+ PImageInfo = &imageInfo
+ };
+
+ _holder.Api.UpdateDescriptorSets(_holder.Device, 1, writeDescriptorSet, 0, null);
+ }
+ }
+
+ public unsafe void UpdateImages(int setIndex, int baseBinding, ReadOnlySpan<DescriptorImageInfo> imageInfo, DescriptorType type)
+ {
+ if (imageInfo.Length == 0)
+ {
+ return;
+ }
+
+ fixed (DescriptorImageInfo* pImageInfo = imageInfo)
+ {
+ var writeDescriptorSet = new WriteDescriptorSet
+ {
+ SType = StructureType.WriteDescriptorSet,
+ DstSet = _descriptorSets[setIndex],
+ DstBinding = (uint)baseBinding,
+ DescriptorType = type,
+ DescriptorCount = (uint)imageInfo.Length,
+ PImageInfo = pImageInfo
+ };
+
+ _holder.Api.UpdateDescriptorSets(_holder.Device, 1, writeDescriptorSet, 0, null);
+ }
+ }
+
+ public unsafe void UpdateImagesCombined(int setIndex, int baseBinding, ReadOnlySpan<DescriptorImageInfo> imageInfo, DescriptorType type)
+ {
+ if (imageInfo.Length == 0)
+ {
+ return;
+ }
+
+ fixed (DescriptorImageInfo* pImageInfo = imageInfo)
+ {
+ for (int i = 0; i < imageInfo.Length; i++)
+ {
+ bool nonNull = imageInfo[i].ImageView.Handle != 0 && imageInfo[i].Sampler.Handle != 0;
+ if (nonNull)
+ {
+ int count = 1;
+
+ while (i + count < imageInfo.Length &&
+ imageInfo[i + count].ImageView.Handle != 0 &&
+ imageInfo[i + count].Sampler.Handle != 0)
+ {
+ count++;
+ }
+
+ var writeDescriptorSet = new WriteDescriptorSet
+ {
+ SType = StructureType.WriteDescriptorSet,
+ DstSet = _descriptorSets[setIndex],
+ DstBinding = (uint)(baseBinding + i),
+ DescriptorType = DescriptorType.CombinedImageSampler,
+ DescriptorCount = (uint)count,
+ PImageInfo = pImageInfo
+ };
+
+ _holder.Api.UpdateDescriptorSets(_holder.Device, 1, writeDescriptorSet, 0, null);
+
+ i += count - 1;
+ }
+ }
+ }
+ }
+
+ public unsafe void UpdateBufferImage(int setIndex, int bindingIndex, BufferView texelBufferView, DescriptorType type)
+ {
+ if (texelBufferView.Handle != 0UL)
+ {
+ var writeDescriptorSet = new WriteDescriptorSet
+ {
+ SType = StructureType.WriteDescriptorSet,
+ DstSet = _descriptorSets[setIndex],
+ DstBinding = (uint)bindingIndex,
+ DescriptorType = type,
+ DescriptorCount = 1,
+ PTexelBufferView = &texelBufferView
+ };
+
+ _holder.Api.UpdateDescriptorSets(_holder.Device, 1, writeDescriptorSet, 0, null);
+ }
+ }
+
+ public unsafe void UpdateBufferImages(int setIndex, int baseBinding, ReadOnlySpan<BufferView> texelBufferView, DescriptorType type)
+ {
+ if (texelBufferView.Length == 0)
+ {
+ return;
+ }
+
+ fixed (BufferView* pTexelBufferView = texelBufferView)
+ {
+ for (uint i = 0; i < texelBufferView.Length;)
+ {
+ uint count = 1;
+
+ if (texelBufferView[(int)i].Handle != 0UL)
+ {
+ while (i + count < texelBufferView.Length && texelBufferView[(int)(i + count)].Handle != 0UL)
+ {
+ count++;
+ }
+
+ var writeDescriptorSet = new WriteDescriptorSet
+ {
+ SType = StructureType.WriteDescriptorSet,
+ DstSet = _descriptorSets[setIndex],
+ DstBinding = (uint)baseBinding + i,
+ DescriptorType = type,
+ DescriptorCount = count,
+ PTexelBufferView = pTexelBufferView + i
+ };
+
+ _holder.Api.UpdateDescriptorSets(_holder.Device, 1, writeDescriptorSet, 0, null);
+ }
+
+ i += count;
+ }
+ }
+ }
+
+ public DescriptorSet[] GetSets()
+ {
+ return _descriptorSets;
+ }
+
+ public void Dispose()
+ {
+ _holder?.FreeDescriptorSets(this);
+ _holder = null;
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/DescriptorSetManager.cs b/src/Ryujinx.Graphics.Vulkan/DescriptorSetManager.cs
new file mode 100644
index 00000000..a88bb7b1
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/DescriptorSetManager.cs
@@ -0,0 +1,201 @@
+using Silk.NET.Vulkan;
+using System;
+using System.Diagnostics;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class DescriptorSetManager : IDisposable
+ {
+ private const uint DescriptorPoolMultiplier = 16;
+
+ public class DescriptorPoolHolder : IDisposable
+ {
+ public Vk Api { get; }
+ public Device Device { get; }
+
+ private readonly DescriptorPool _pool;
+ private readonly uint _capacity;
+ private int _totalSets;
+ private int _setsInUse;
+ private bool _done;
+
+ public unsafe DescriptorPoolHolder(Vk api, Device device)
+ {
+ Api = api;
+ Device = device;
+
+ var poolSizes = new DescriptorPoolSize[]
+ {
+ new DescriptorPoolSize(DescriptorType.UniformBuffer, (1 + Constants.MaxUniformBufferBindings) * DescriptorPoolMultiplier),
+ new DescriptorPoolSize(DescriptorType.StorageBuffer, Constants.MaxStorageBufferBindings * DescriptorPoolMultiplier),
+ new DescriptorPoolSize(DescriptorType.CombinedImageSampler, Constants.MaxTextureBindings * DescriptorPoolMultiplier),
+ new DescriptorPoolSize(DescriptorType.StorageImage, Constants.MaxImageBindings * DescriptorPoolMultiplier),
+ new DescriptorPoolSize(DescriptorType.UniformTexelBuffer, Constants.MaxTextureBindings * DescriptorPoolMultiplier),
+ new DescriptorPoolSize(DescriptorType.StorageTexelBuffer, Constants.MaxImageBindings * DescriptorPoolMultiplier)
+ };
+
+ uint maxSets = (uint)poolSizes.Length * DescriptorPoolMultiplier;
+
+ _capacity = maxSets;
+
+ fixed (DescriptorPoolSize* pPoolsSize = poolSizes)
+ {
+ var descriptorPoolCreateInfo = new DescriptorPoolCreateInfo()
+ {
+ SType = StructureType.DescriptorPoolCreateInfo,
+ MaxSets = maxSets,
+ PoolSizeCount = (uint)poolSizes.Length,
+ PPoolSizes = pPoolsSize
+ };
+
+ Api.CreateDescriptorPool(device, descriptorPoolCreateInfo, null, out _pool).ThrowOnError();
+ }
+ }
+
+ public unsafe DescriptorSetCollection AllocateDescriptorSets(ReadOnlySpan<DescriptorSetLayout> layouts)
+ {
+ TryAllocateDescriptorSets(layouts, isTry: false, out var dsc);
+ return dsc;
+ }
+
+ public bool TryAllocateDescriptorSets(ReadOnlySpan<DescriptorSetLayout> layouts, out DescriptorSetCollection dsc)
+ {
+ return TryAllocateDescriptorSets(layouts, isTry: true, out dsc);
+ }
+
+ private unsafe bool TryAllocateDescriptorSets(ReadOnlySpan<DescriptorSetLayout> layouts, bool isTry, out DescriptorSetCollection dsc)
+ {
+ Debug.Assert(!_done);
+
+ DescriptorSet[] descriptorSets = new DescriptorSet[layouts.Length];
+
+ fixed (DescriptorSet* pDescriptorSets = descriptorSets)
+ {
+ fixed (DescriptorSetLayout* pLayouts = layouts)
+ {
+ var descriptorSetAllocateInfo = new DescriptorSetAllocateInfo()
+ {
+ SType = StructureType.DescriptorSetAllocateInfo,
+ DescriptorPool = _pool,
+ DescriptorSetCount = (uint)layouts.Length,
+ PSetLayouts = pLayouts
+ };
+
+ var result = Api.AllocateDescriptorSets(Device, &descriptorSetAllocateInfo, pDescriptorSets);
+ if (isTry && result == Result.ErrorOutOfPoolMemory)
+ {
+ _totalSets = (int)_capacity;
+ _done = true;
+ DestroyIfDone();
+ dsc = default;
+ return false;
+ }
+
+ result.ThrowOnError();
+ }
+ }
+
+ _totalSets += layouts.Length;
+ _setsInUse += layouts.Length;
+
+ dsc = new DescriptorSetCollection(this, descriptorSets);
+ return true;
+ }
+
+ public void FreeDescriptorSets(DescriptorSetCollection dsc)
+ {
+ _setsInUse -= dsc.SetsCount;
+ Debug.Assert(_setsInUse >= 0);
+ DestroyIfDone();
+ }
+
+ public bool CanFit(int count)
+ {
+ if (_totalSets + count <= _capacity)
+ {
+ return true;
+ }
+
+ _done = true;
+ DestroyIfDone();
+ return false;
+ }
+
+ private unsafe void DestroyIfDone()
+ {
+ if (_done && _setsInUse == 0)
+ {
+ Api.DestroyDescriptorPool(Device, _pool, null);
+ }
+ }
+
+ protected virtual void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ unsafe
+ {
+ Api.DestroyDescriptorPool(Device, _pool, null);
+ }
+ }
+ }
+
+ public void Dispose()
+ {
+ Dispose(true);
+ }
+ }
+
+ private readonly Device _device;
+ private DescriptorPoolHolder _currentPool;
+
+ public DescriptorSetManager(Device device)
+ {
+ _device = device;
+ }
+
+ public Auto<DescriptorSetCollection> AllocateDescriptorSet(Vk api, DescriptorSetLayout layout)
+ {
+ Span<DescriptorSetLayout> layouts = stackalloc DescriptorSetLayout[1];
+ layouts[0] = layout;
+ return AllocateDescriptorSets(api, layouts);
+ }
+
+ public Auto<DescriptorSetCollection> AllocateDescriptorSets(Vk api, ReadOnlySpan<DescriptorSetLayout> layouts)
+ {
+ // If we fail the first time, just create a new pool and try again.
+ if (!GetPool(api, layouts.Length).TryAllocateDescriptorSets(layouts, out var dsc))
+ {
+ dsc = GetPool(api, layouts.Length).AllocateDescriptorSets(layouts);
+ }
+
+ return new Auto<DescriptorSetCollection>(dsc);
+ }
+
+ private DescriptorPoolHolder GetPool(Vk api, int requiredCount)
+ {
+ if (_currentPool == null || !_currentPool.CanFit(requiredCount))
+ {
+ _currentPool = new DescriptorPoolHolder(api, _device);
+ }
+
+ return _currentPool;
+ }
+
+ protected virtual void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ unsafe
+ {
+ _currentPool?.Dispose();
+ }
+ }
+ }
+
+ public void Dispose()
+ {
+ Dispose(true);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/DescriptorSetUpdater.cs b/src/Ryujinx.Graphics.Vulkan/DescriptorSetUpdater.cs
new file mode 100644
index 00000000..ab3befd8
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/DescriptorSetUpdater.cs
@@ -0,0 +1,674 @@
+using Ryujinx.Graphics.GAL;
+using Ryujinx.Graphics.Shader;
+using Silk.NET.Vulkan;
+using System;
+using System.Numerics;
+using System.Runtime.CompilerServices;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class DescriptorSetUpdater
+ {
+ private readonly VulkanRenderer _gd;
+ private readonly PipelineBase _pipeline;
+
+ private ShaderCollection _program;
+
+ private Auto<DisposableBuffer>[] _uniformBufferRefs;
+ private Auto<DisposableBuffer>[] _storageBufferRefs;
+ private Auto<DisposableImageView>[] _textureRefs;
+ private Auto<DisposableSampler>[] _samplerRefs;
+ private Auto<DisposableImageView>[] _imageRefs;
+ private TextureBuffer[] _bufferTextureRefs;
+ private TextureBuffer[] _bufferImageRefs;
+ private GAL.Format[] _bufferImageFormats;
+
+ private DescriptorBufferInfo[] _uniformBuffers;
+ private DescriptorBufferInfo[] _storageBuffers;
+ private DescriptorImageInfo[] _textures;
+ private DescriptorImageInfo[] _images;
+ private BufferView[] _bufferTextures;
+ private BufferView[] _bufferImages;
+
+ private bool[] _uniformSet;
+ private bool[] _storageSet;
+ private Silk.NET.Vulkan.Buffer _cachedSupportBuffer;
+
+ [Flags]
+ private enum DirtyFlags
+ {
+ None = 0,
+ Uniform = 1 << 0,
+ Storage = 1 << 1,
+ Texture = 1 << 2,
+ Image = 1 << 3,
+ All = Uniform | Storage | Texture | Image
+ }
+
+ private DirtyFlags _dirty;
+
+ private readonly BufferHolder _dummyBuffer;
+ private readonly TextureView _dummyTexture;
+ private readonly SamplerHolder _dummySampler;
+
+ public DescriptorSetUpdater(VulkanRenderer gd, PipelineBase pipeline)
+ {
+ _gd = gd;
+ _pipeline = pipeline;
+
+ // Some of the bindings counts needs to be multiplied by 2 because we have buffer and
+ // regular textures/images interleaved on the same descriptor set.
+
+ _uniformBufferRefs = new Auto<DisposableBuffer>[Constants.MaxUniformBufferBindings];
+ _storageBufferRefs = new Auto<DisposableBuffer>[Constants.MaxStorageBufferBindings];
+ _textureRefs = new Auto<DisposableImageView>[Constants.MaxTextureBindings * 2];
+ _samplerRefs = new Auto<DisposableSampler>[Constants.MaxTextureBindings * 2];
+ _imageRefs = new Auto<DisposableImageView>[Constants.MaxImageBindings * 2];
+ _bufferTextureRefs = new TextureBuffer[Constants.MaxTextureBindings * 2];
+ _bufferImageRefs = new TextureBuffer[Constants.MaxImageBindings * 2];
+ _bufferImageFormats = new GAL.Format[Constants.MaxImageBindings * 2];
+
+ _uniformBuffers = new DescriptorBufferInfo[Constants.MaxUniformBufferBindings];
+ _storageBuffers = new DescriptorBufferInfo[Constants.MaxStorageBufferBindings];
+ _textures = new DescriptorImageInfo[Constants.MaxTexturesPerStage];
+ _images = new DescriptorImageInfo[Constants.MaxImagesPerStage];
+ _bufferTextures = new BufferView[Constants.MaxTexturesPerStage];
+ _bufferImages = new BufferView[Constants.MaxImagesPerStage];
+
+ var initialImageInfo = new DescriptorImageInfo()
+ {
+ ImageLayout = ImageLayout.General
+ };
+
+ _textures.AsSpan().Fill(initialImageInfo);
+ _images.AsSpan().Fill(initialImageInfo);
+
+ _uniformSet = new bool[Constants.MaxUniformBufferBindings];
+ _storageSet = new bool[Constants.MaxStorageBufferBindings];
+
+ if (gd.Capabilities.SupportsNullDescriptors)
+ {
+ // If null descriptors are supported, we can pass null as the handle.
+ _dummyBuffer = null;
+ }
+ else
+ {
+ // If null descriptors are not supported, we need to pass the handle of a dummy buffer on unused bindings.
+ _dummyBuffer = gd.BufferManager.Create(gd, 0x10000, forConditionalRendering: false, baseType: BufferAllocationType.DeviceLocal);
+ }
+
+ _dummyTexture = gd.CreateTextureView(new TextureCreateInfo(
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 4,
+ GAL.Format.R8G8B8A8Unorm,
+ DepthStencilMode.Depth,
+ Target.Texture2D,
+ SwizzleComponent.Red,
+ SwizzleComponent.Green,
+ SwizzleComponent.Blue,
+ SwizzleComponent.Alpha), 1f);
+
+ _dummySampler = (SamplerHolder)gd.CreateSampler(new GAL.SamplerCreateInfo(
+ MinFilter.Nearest,
+ MagFilter.Nearest,
+ false,
+ AddressMode.Repeat,
+ AddressMode.Repeat,
+ AddressMode.Repeat,
+ CompareMode.None,
+ GAL.CompareOp.Always,
+ new ColorF(0, 0, 0, 0),
+ 0,
+ 0,
+ 0,
+ 1f));
+ }
+
+ public void Initialize()
+ {
+ Span<byte> dummyTextureData = stackalloc byte[4];
+ _dummyTexture.SetData(dummyTextureData);
+ }
+
+ public void SetProgram(ShaderCollection program)
+ {
+ _program = program;
+ _dirty = DirtyFlags.All;
+ }
+
+ public void SetImage(int binding, ITexture image, GAL.Format imageFormat)
+ {
+ if (image is TextureBuffer imageBuffer)
+ {
+ _bufferImageRefs[binding] = imageBuffer;
+ _bufferImageFormats[binding] = imageFormat;
+ }
+ else if (image is TextureView view)
+ {
+ _imageRefs[binding] = view.GetView(imageFormat).GetIdentityImageView();
+ }
+ else
+ {
+ _imageRefs[binding] = null;
+ _bufferImageRefs[binding] = null;
+ _bufferImageFormats[binding] = default;
+ }
+
+ SignalDirty(DirtyFlags.Image);
+ }
+
+ public void SetImage(int binding, Auto<DisposableImageView> image)
+ {
+ _imageRefs[binding] = image;
+
+ SignalDirty(DirtyFlags.Image);
+ }
+
+ public void SetStorageBuffers(CommandBuffer commandBuffer, ReadOnlySpan<BufferAssignment> buffers)
+ {
+ for (int i = 0; i < buffers.Length; i++)
+ {
+ var assignment = buffers[i];
+ var buffer = assignment.Range;
+ int index = assignment.Binding;
+
+ Auto<DisposableBuffer> vkBuffer = _gd.BufferManager.GetBuffer(commandBuffer, buffer.Handle, false, isSSBO: true);
+ ref Auto<DisposableBuffer> currentVkBuffer = ref _storageBufferRefs[index];
+
+ DescriptorBufferInfo info = new DescriptorBufferInfo()
+ {
+ Offset = (ulong)buffer.Offset,
+ Range = (ulong)buffer.Size
+ };
+ ref DescriptorBufferInfo currentInfo = ref _storageBuffers[index];
+
+ if (vkBuffer != currentVkBuffer || currentInfo.Offset != info.Offset || currentInfo.Range != info.Range)
+ {
+ _storageSet[index] = false;
+
+ currentInfo = info;
+ currentVkBuffer = vkBuffer;
+ }
+ }
+
+ SignalDirty(DirtyFlags.Storage);
+ }
+
+ public void SetStorageBuffers(CommandBuffer commandBuffer, int first, ReadOnlySpan<Auto<DisposableBuffer>> buffers)
+ {
+ for (int i = 0; i < buffers.Length; i++)
+ {
+ var vkBuffer = buffers[i];
+ int index = first + i;
+
+ ref Auto<DisposableBuffer> currentVkBuffer = ref _storageBufferRefs[index];
+
+ DescriptorBufferInfo info = new DescriptorBufferInfo()
+ {
+ Offset = 0,
+ Range = Vk.WholeSize
+ };
+ ref DescriptorBufferInfo currentInfo = ref _storageBuffers[index];
+
+ if (vkBuffer != currentVkBuffer || currentInfo.Offset != info.Offset || currentInfo.Range != info.Range)
+ {
+ _storageSet[index] = false;
+
+ currentInfo = info;
+ currentVkBuffer = vkBuffer;
+ }
+ }
+
+ SignalDirty(DirtyFlags.Storage);
+ }
+
+ public void SetTextureAndSampler(CommandBufferScoped cbs, ShaderStage stage, int binding, ITexture texture, ISampler sampler)
+ {
+ if (texture is TextureBuffer textureBuffer)
+ {
+ _bufferTextureRefs[binding] = textureBuffer;
+ }
+ else if (texture is TextureView view)
+ {
+ view.Storage.InsertWriteToReadBarrier(cbs, AccessFlags.ShaderReadBit, stage.ConvertToPipelineStageFlags());
+
+ _textureRefs[binding] = view.GetImageView();
+ _samplerRefs[binding] = ((SamplerHolder)sampler)?.GetSampler();
+ }
+ else
+ {
+ _textureRefs[binding] = null;
+ _samplerRefs[binding] = null;
+ _bufferTextureRefs[binding] = null;
+ }
+
+ SignalDirty(DirtyFlags.Texture);
+ }
+
+ public void SetUniformBuffers(CommandBuffer commandBuffer, ReadOnlySpan<BufferAssignment> buffers)
+ {
+ for (int i = 0; i < buffers.Length; i++)
+ {
+ var assignment = buffers[i];
+ var buffer = assignment.Range;
+ int index = assignment.Binding;
+
+ Auto<DisposableBuffer> vkBuffer = _gd.BufferManager.GetBuffer(commandBuffer, buffer.Handle, false);
+ ref Auto<DisposableBuffer> currentVkBuffer = ref _uniformBufferRefs[index];
+
+ DescriptorBufferInfo info = new DescriptorBufferInfo()
+ {
+ Offset = (ulong)buffer.Offset,
+ Range = (ulong)buffer.Size
+ };
+ ref DescriptorBufferInfo currentInfo = ref _uniformBuffers[index];
+
+ if (vkBuffer != currentVkBuffer || currentInfo.Offset != info.Offset || currentInfo.Range != info.Range)
+ {
+ _uniformSet[index] = false;
+
+ currentInfo = info;
+ currentVkBuffer = vkBuffer;
+ }
+ }
+
+ SignalDirty(DirtyFlags.Uniform);
+ }
+
+ private void SignalDirty(DirtyFlags flag)
+ {
+ _dirty |= flag;
+ }
+
+ public void UpdateAndBindDescriptorSets(CommandBufferScoped cbs, PipelineBindPoint pbp)
+ {
+ if ((_dirty & DirtyFlags.All) == 0)
+ {
+ return;
+ }
+
+ if (_dirty.HasFlag(DirtyFlags.Uniform))
+ {
+ if (_program.UsePushDescriptors)
+ {
+ UpdateAndBindUniformBufferPd(cbs, pbp);
+ }
+ else
+ {
+ UpdateAndBind(cbs, PipelineBase.UniformSetIndex, pbp);
+ }
+ }
+
+ if (_dirty.HasFlag(DirtyFlags.Storage))
+ {
+ UpdateAndBind(cbs, PipelineBase.StorageSetIndex, pbp);
+ }
+
+ if (_dirty.HasFlag(DirtyFlags.Texture))
+ {
+ UpdateAndBind(cbs, PipelineBase.TextureSetIndex, pbp);
+ }
+
+ if (_dirty.HasFlag(DirtyFlags.Image))
+ {
+ UpdateAndBind(cbs, PipelineBase.ImageSetIndex, pbp);
+ }
+
+ _dirty = DirtyFlags.None;
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private static void UpdateBuffer(
+ CommandBufferScoped cbs,
+ ref DescriptorBufferInfo info,
+ Auto<DisposableBuffer> buffer,
+ Auto<DisposableBuffer> dummyBuffer)
+ {
+ info.Buffer = buffer?.Get(cbs, (int)info.Offset, (int)info.Range).Value ?? default;
+
+ // The spec requires that buffers with null handle have offset as 0 and range as VK_WHOLE_SIZE.
+ if (info.Buffer.Handle == 0)
+ {
+ info.Buffer = dummyBuffer?.Get(cbs).Value ?? default;
+ info.Offset = 0;
+ info.Range = Vk.WholeSize;
+ }
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void UpdateAndBind(CommandBufferScoped cbs, int setIndex, PipelineBindPoint pbp)
+ {
+ var program = _program;
+ int stagesCount = program.Bindings[setIndex].Length;
+ if (stagesCount == 0 && setIndex != PipelineBase.UniformSetIndex)
+ {
+ return;
+ }
+
+ var dummyBuffer = _dummyBuffer?.GetBuffer();
+
+ var dsc = program.GetNewDescriptorSetCollection(_gd, cbs.CommandBufferIndex, setIndex, out var isNew).Get(cbs);
+
+ if (!program.HasMinimalLayout)
+ {
+ if (isNew)
+ {
+ Initialize(cbs, setIndex, dsc);
+ }
+
+ if (setIndex == PipelineBase.UniformSetIndex)
+ {
+ Span<DescriptorBufferInfo> uniformBuffer = stackalloc DescriptorBufferInfo[1];
+
+ if (!_uniformSet[0])
+ {
+ _cachedSupportBuffer = _gd.BufferManager.GetBuffer(cbs.CommandBuffer, _pipeline.SupportBufferUpdater.Handle, false).Get(cbs, 0, SupportBuffer.RequiredSize).Value;
+ _uniformSet[0] = true;
+ }
+
+ uniformBuffer[0] = new DescriptorBufferInfo()
+ {
+ Offset = 0,
+ Range = (ulong)SupportBuffer.RequiredSize,
+ Buffer = _cachedSupportBuffer
+ };
+
+ dsc.UpdateBuffers(0, 0, uniformBuffer, DescriptorType.UniformBuffer);
+ }
+ }
+
+ for (int stageIndex = 0; stageIndex < stagesCount; stageIndex++)
+ {
+ var stageBindings = program.Bindings[setIndex][stageIndex];
+ int bindingsCount = stageBindings.Length;
+ int count;
+
+ for (int bindingIndex = 0; bindingIndex < bindingsCount; bindingIndex += count)
+ {
+ int binding = stageBindings[bindingIndex];
+ count = 1;
+
+ while (bindingIndex + count < bindingsCount && stageBindings[bindingIndex + count] == binding + count)
+ {
+ count++;
+ }
+
+ if (setIndex == PipelineBase.UniformSetIndex)
+ {
+ for (int i = 0; i < count; i++)
+ {
+ int index = binding + i;
+
+ if (!_uniformSet[index])
+ {
+ UpdateBuffer(cbs, ref _uniformBuffers[index], _uniformBufferRefs[index], dummyBuffer);
+
+ _uniformSet[index] = true;
+ }
+ }
+
+ ReadOnlySpan<DescriptorBufferInfo> uniformBuffers = _uniformBuffers;
+ dsc.UpdateBuffers(0, binding, uniformBuffers.Slice(binding, count), DescriptorType.UniformBuffer);
+ }
+ else if (setIndex == PipelineBase.StorageSetIndex)
+ {
+ for (int i = 0; i < count; i++)
+ {
+ int index = binding + i;
+
+ if (!_storageSet[index])
+ {
+ UpdateBuffer(cbs, ref _storageBuffers[index], _storageBufferRefs[index], dummyBuffer);
+
+ _storageSet[index] = true;
+ }
+ }
+
+ ReadOnlySpan<DescriptorBufferInfo> storageBuffers = _storageBuffers;
+ if (program.HasMinimalLayout)
+ {
+ dsc.UpdateBuffers(0, binding, storageBuffers.Slice(binding, count), DescriptorType.StorageBuffer);
+ }
+ else
+ {
+ dsc.UpdateStorageBuffers(0, binding, storageBuffers.Slice(binding, count));
+ }
+ }
+ else if (setIndex == PipelineBase.TextureSetIndex)
+ {
+ if (((uint)binding % (Constants.MaxTexturesPerStage * 2)) < Constants.MaxTexturesPerStage || program.HasMinimalLayout)
+ {
+ Span<DescriptorImageInfo> textures = _textures;
+
+ for (int i = 0; i < count; i++)
+ {
+ ref var texture = ref textures[i];
+
+ texture.ImageView = _textureRefs[binding + i]?.Get(cbs).Value ?? default;
+ texture.Sampler = _samplerRefs[binding + i]?.Get(cbs).Value ?? default;
+
+ if (texture.ImageView.Handle == 0)
+ {
+ texture.ImageView = _dummyTexture.GetImageView().Get(cbs).Value;
+ }
+
+ if (texture.Sampler.Handle == 0)
+ {
+ texture.Sampler = _dummySampler.GetSampler().Get(cbs).Value;
+ }
+ }
+
+ dsc.UpdateImages(0, binding, textures.Slice(0, count), DescriptorType.CombinedImageSampler);
+ }
+ else
+ {
+ Span<BufferView> bufferTextures = _bufferTextures;
+
+ for (int i = 0; i < count; i++)
+ {
+ bufferTextures[i] = _bufferTextureRefs[binding + i]?.GetBufferView(cbs) ?? default;
+ }
+
+ dsc.UpdateBufferImages(0, binding, bufferTextures.Slice(0, count), DescriptorType.UniformTexelBuffer);
+ }
+ }
+ else if (setIndex == PipelineBase.ImageSetIndex)
+ {
+ if (((uint)binding % (Constants.MaxImagesPerStage * 2)) < Constants.MaxImagesPerStage || program.HasMinimalLayout)
+ {
+ Span<DescriptorImageInfo> images = _images;
+
+ for (int i = 0; i < count; i++)
+ {
+ images[i].ImageView = _imageRefs[binding + i]?.Get(cbs).Value ?? default;
+ }
+
+ dsc.UpdateImages(0, binding, images.Slice(0, count), DescriptorType.StorageImage);
+ }
+ else
+ {
+ Span<BufferView> bufferImages = _bufferImages;
+
+ for (int i = 0; i < count; i++)
+ {
+ bufferImages[i] = _bufferImageRefs[binding + i]?.GetBufferView(cbs, _bufferImageFormats[binding + i]) ?? default;
+ }
+
+ dsc.UpdateBufferImages(0, binding, bufferImages.Slice(0, count), DescriptorType.StorageTexelBuffer);
+ }
+ }
+ }
+ }
+
+ var sets = dsc.GetSets();
+
+ _gd.Api.CmdBindDescriptorSets(cbs.CommandBuffer, pbp, _program.PipelineLayout, (uint)setIndex, 1, sets, 0, ReadOnlySpan<uint>.Empty);
+ }
+
+ private unsafe void UpdateBuffers(
+ CommandBufferScoped cbs,
+ PipelineBindPoint pbp,
+ int baseBinding,
+ ReadOnlySpan<DescriptorBufferInfo> bufferInfo,
+ DescriptorType type)
+ {
+ if (bufferInfo.Length == 0)
+ {
+ return;
+ }
+
+ fixed (DescriptorBufferInfo* pBufferInfo = bufferInfo)
+ {
+ var writeDescriptorSet = new WriteDescriptorSet
+ {
+ SType = StructureType.WriteDescriptorSet,
+ DstBinding = (uint)baseBinding,
+ DescriptorType = type,
+ DescriptorCount = (uint)bufferInfo.Length,
+ PBufferInfo = pBufferInfo
+ };
+
+ _gd.PushDescriptorApi.CmdPushDescriptorSet(cbs.CommandBuffer, pbp, _program.PipelineLayout, 0, 1, &writeDescriptorSet);
+ }
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void UpdateAndBindUniformBufferPd(CommandBufferScoped cbs, PipelineBindPoint pbp)
+ {
+ var dummyBuffer = _dummyBuffer?.GetBuffer();
+ int stagesCount = _program.Bindings[PipelineBase.UniformSetIndex].Length;
+
+ if (!_uniformSet[0])
+ {
+ Span<DescriptorBufferInfo> uniformBuffer = stackalloc DescriptorBufferInfo[1];
+
+ uniformBuffer[0] = new DescriptorBufferInfo()
+ {
+ Offset = 0,
+ Range = (ulong)SupportBuffer.RequiredSize,
+ Buffer = _gd.BufferManager.GetBuffer(cbs.CommandBuffer, _pipeline.SupportBufferUpdater.Handle, false).Get(cbs, 0, SupportBuffer.RequiredSize).Value
+ };
+
+ _uniformSet[0] = true;
+
+ UpdateBuffers(cbs, pbp, 0, uniformBuffer, DescriptorType.UniformBuffer);
+ }
+
+ for (int stageIndex = 0; stageIndex < stagesCount; stageIndex++)
+ {
+ var stageBindings = _program.Bindings[PipelineBase.UniformSetIndex][stageIndex];
+ int bindingsCount = stageBindings.Length;
+ int count;
+
+ for (int bindingIndex = 0; bindingIndex < bindingsCount; bindingIndex += count)
+ {
+ int binding = stageBindings[bindingIndex];
+ count = 1;
+
+ while (bindingIndex + count < bindingsCount && stageBindings[bindingIndex + count] == binding + count)
+ {
+ count++;
+ }
+
+ bool doUpdate = false;
+
+ for (int i = 0; i < count; i++)
+ {
+ int index = binding + i;
+
+ if (!_uniformSet[index])
+ {
+ UpdateBuffer(cbs, ref _uniformBuffers[index], _uniformBufferRefs[index], dummyBuffer);
+ _uniformSet[index] = true;
+ doUpdate = true;
+ }
+ }
+
+ if (doUpdate)
+ {
+ ReadOnlySpan<DescriptorBufferInfo> uniformBuffers = _uniformBuffers;
+ UpdateBuffers(cbs, pbp, binding, uniformBuffers.Slice(binding, count), DescriptorType.UniformBuffer);
+ }
+ }
+ }
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void Initialize(CommandBufferScoped cbs, int setIndex, DescriptorSetCollection dsc)
+ {
+ var dummyBuffer = _dummyBuffer?.GetBuffer().Get(cbs).Value ?? default;
+
+ uint stages = _program.Stages;
+
+ while (stages != 0)
+ {
+ int stage = BitOperations.TrailingZeroCount(stages);
+ stages &= ~(1u << stage);
+
+ if (setIndex == PipelineBase.UniformSetIndex)
+ {
+ dsc.InitializeBuffers(
+ 0,
+ 1 + stage * Constants.MaxUniformBuffersPerStage,
+ Constants.MaxUniformBuffersPerStage,
+ DescriptorType.UniformBuffer,
+ dummyBuffer);
+ }
+ else if (setIndex == PipelineBase.StorageSetIndex)
+ {
+ dsc.InitializeBuffers(
+ 0,
+ stage * Constants.MaxStorageBuffersPerStage,
+ Constants.MaxStorageBuffersPerStage,
+ DescriptorType.StorageBuffer,
+ dummyBuffer);
+ }
+ }
+ }
+
+ public void SignalCommandBufferChange()
+ {
+ _dirty = DirtyFlags.All;
+
+ Array.Clear(_uniformSet);
+ Array.Clear(_storageSet);
+ }
+
+ private void SwapBuffer(Auto<DisposableBuffer>[] list, Auto<DisposableBuffer> from, Auto<DisposableBuffer> to)
+ {
+ for (int i = 0; i < list.Length; i++)
+ {
+ if (list[i] == from)
+ {
+ list[i] = to;
+ }
+ }
+ }
+
+ public void SwapBuffer(Auto<DisposableBuffer> from, Auto<DisposableBuffer> to)
+ {
+ SwapBuffer(_uniformBufferRefs, from, to);
+ SwapBuffer(_storageBufferRefs, from, to);
+ }
+
+ protected virtual void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ _dummyTexture.Dispose();
+ _dummySampler.Dispose();
+ }
+ }
+
+ public void Dispose()
+ {
+ Dispose(true);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/DisposableBuffer.cs b/src/Ryujinx.Graphics.Vulkan/DisposableBuffer.cs
new file mode 100644
index 00000000..0f474f97
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/DisposableBuffer.cs
@@ -0,0 +1,25 @@
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ readonly struct DisposableBuffer : IDisposable
+ {
+ private readonly Vk _api;
+ private readonly Device _device;
+
+ public Silk.NET.Vulkan.Buffer Value { get; }
+
+ public DisposableBuffer(Vk api, Device device, Silk.NET.Vulkan.Buffer buffer)
+ {
+ _api = api;
+ _device = device;
+ Value = buffer;
+ }
+
+ public void Dispose()
+ {
+ _api.DestroyBuffer(_device, Value, Span<AllocationCallbacks>.Empty);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/DisposableBufferView.cs b/src/Ryujinx.Graphics.Vulkan/DisposableBufferView.cs
new file mode 100644
index 00000000..e81ca412
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/DisposableBufferView.cs
@@ -0,0 +1,25 @@
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ readonly struct DisposableBufferView : IDisposable
+ {
+ private readonly Vk _api;
+ private readonly Device _device;
+
+ public BufferView Value { get; }
+
+ public DisposableBufferView(Vk api, Device device, BufferView bufferView)
+ {
+ _api = api;
+ _device = device;
+ Value = bufferView;
+ }
+
+ public void Dispose()
+ {
+ _api.DestroyBufferView(_device, Value, Span<AllocationCallbacks>.Empty);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/DisposableFramebuffer.cs b/src/Ryujinx.Graphics.Vulkan/DisposableFramebuffer.cs
new file mode 100644
index 00000000..5b195354
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/DisposableFramebuffer.cs
@@ -0,0 +1,25 @@
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ readonly struct DisposableFramebuffer : IDisposable
+ {
+ private readonly Vk _api;
+ private readonly Device _device;
+
+ public Framebuffer Value { get; }
+
+ public DisposableFramebuffer(Vk api, Device device, Framebuffer framebuffer)
+ {
+ _api = api;
+ _device = device;
+ Value = framebuffer;
+ }
+
+ public void Dispose()
+ {
+ _api.DestroyFramebuffer(_device, Value, Span<AllocationCallbacks>.Empty);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/DisposableImage.cs b/src/Ryujinx.Graphics.Vulkan/DisposableImage.cs
new file mode 100644
index 00000000..c76091b7
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/DisposableImage.cs
@@ -0,0 +1,25 @@
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ readonly struct DisposableImage : IDisposable
+ {
+ private readonly Vk _api;
+ private readonly Device _device;
+
+ public Image Value { get; }
+
+ public DisposableImage(Vk api, Device device, Image image)
+ {
+ _api = api;
+ _device = device;
+ Value = image;
+ }
+
+ public void Dispose()
+ {
+ _api.DestroyImage(_device, Value, Span<AllocationCallbacks>.Empty);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/DisposableImageView.cs b/src/Ryujinx.Graphics.Vulkan/DisposableImageView.cs
new file mode 100644
index 00000000..3b3bf6ad
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/DisposableImageView.cs
@@ -0,0 +1,25 @@
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ readonly struct DisposableImageView : IDisposable
+ {
+ private readonly Vk _api;
+ private readonly Device _device;
+
+ public ImageView Value { get; }
+
+ public DisposableImageView(Vk api, Device device, ImageView imageView)
+ {
+ _api = api;
+ _device = device;
+ Value = imageView;
+ }
+
+ public void Dispose()
+ {
+ _api.DestroyImageView(_device, Value, Span<AllocationCallbacks>.Empty);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/DisposableMemory.cs b/src/Ryujinx.Graphics.Vulkan/DisposableMemory.cs
new file mode 100644
index 00000000..638989ac
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/DisposableMemory.cs
@@ -0,0 +1,24 @@
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ readonly struct DisposableMemory : IDisposable
+ {
+ private readonly Vk _api;
+ private readonly Device _device;
+ private readonly DeviceMemory _memory;
+
+ public DisposableMemory(Vk api, Device device, DeviceMemory memory)
+ {
+ _api = api;
+ _device = device;
+ _memory = memory;
+ }
+
+ public void Dispose()
+ {
+ _api.FreeMemory(_device, _memory, Span<AllocationCallbacks>.Empty);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/DisposablePipeline.cs b/src/Ryujinx.Graphics.Vulkan/DisposablePipeline.cs
new file mode 100644
index 00000000..6e5cf4db
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/DisposablePipeline.cs
@@ -0,0 +1,25 @@
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ readonly struct DisposablePipeline : IDisposable
+ {
+ private readonly Vk _api;
+ private readonly Device _device;
+
+ public Pipeline Value { get; }
+
+ public DisposablePipeline(Vk api, Device device, Pipeline pipeline)
+ {
+ _api = api;
+ _device = device;
+ Value = pipeline;
+ }
+
+ public void Dispose()
+ {
+ _api.DestroyPipeline(_device, Value, Span<AllocationCallbacks>.Empty);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/DisposableRenderPass.cs b/src/Ryujinx.Graphics.Vulkan/DisposableRenderPass.cs
new file mode 100644
index 00000000..65652f41
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/DisposableRenderPass.cs
@@ -0,0 +1,25 @@
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ readonly struct DisposableRenderPass : IDisposable
+ {
+ private readonly Vk _api;
+ private readonly Device _device;
+
+ public RenderPass Value { get; }
+
+ public DisposableRenderPass(Vk api, Device device, RenderPass renderPass)
+ {
+ _api = api;
+ _device = device;
+ Value = renderPass;
+ }
+
+ public void Dispose()
+ {
+ _api.DestroyRenderPass(_device, Value, Span<AllocationCallbacks>.Empty);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/DisposableSampler.cs b/src/Ryujinx.Graphics.Vulkan/DisposableSampler.cs
new file mode 100644
index 00000000..4788b192
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/DisposableSampler.cs
@@ -0,0 +1,25 @@
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ readonly struct DisposableSampler : IDisposable
+ {
+ private readonly Vk _api;
+ private readonly Device _device;
+
+ public Sampler Value { get; }
+
+ public DisposableSampler(Vk api, Device device, Sampler sampler)
+ {
+ _api = api;
+ _device = device;
+ Value = sampler;
+ }
+
+ public void Dispose()
+ {
+ _api.DestroySampler(_device, Value, Span<AllocationCallbacks>.Empty);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/FsrScalingFilter.cs b/src/Ryujinx.Graphics.Vulkan/Effects/FsrScalingFilter.cs
new file mode 100644
index 00000000..5f15f15f
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/FsrScalingFilter.cs
@@ -0,0 +1,179 @@
+using Ryujinx.Common;
+using Ryujinx.Graphics.GAL;
+using Ryujinx.Graphics.Shader;
+using Ryujinx.Graphics.Shader.Translation;
+using Silk.NET.Vulkan;
+using System;
+using Extent2D = Ryujinx.Graphics.GAL.Extents2D;
+
+namespace Ryujinx.Graphics.Vulkan.Effects
+{
+ internal partial class FsrScalingFilter : IScalingFilter
+ {
+ private readonly VulkanRenderer _renderer;
+ private PipelineHelperShader _pipeline;
+ private ISampler _sampler;
+ private ShaderCollection _scalingProgram;
+ private ShaderCollection _sharpeningProgram;
+ private float _sharpeningLevel = 1;
+ private Device _device;
+ private TextureView _intermediaryTexture;
+
+ public float Level
+ {
+ get => _sharpeningLevel;
+ set
+ {
+ _sharpeningLevel = MathF.Max(0.01f, value);
+ }
+ }
+
+ public FsrScalingFilter(VulkanRenderer renderer, Device device)
+ {
+ _device = device;
+ _renderer = renderer;
+
+ Initialize();
+ }
+
+ public void Dispose()
+ {
+ _pipeline.Dispose();
+ _scalingProgram.Dispose();
+ _sharpeningProgram.Dispose();
+ _sampler.Dispose();
+ _intermediaryTexture?.Dispose();
+ }
+
+ public void Initialize()
+ {
+ _pipeline = new PipelineHelperShader(_renderer, _device);
+
+ _pipeline.Initialize();
+
+ var scalingShader = EmbeddedResources.Read("Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrScaling.spv");
+ var sharpeningShader = EmbeddedResources.Read("Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrSharpening.spv");
+
+ var computeBindings = new ShaderBindings(
+ new[] { 2 },
+ Array.Empty<int>(),
+ new[] { 1 },
+ new[] { 0 });
+
+ var sharpeningBindings = new ShaderBindings(
+ new[] { 2, 3, 4 },
+ Array.Empty<int>(),
+ new[] { 1 },
+ new[] { 0 });
+
+ _sampler = _renderer.CreateSampler(GAL.SamplerCreateInfo.Create(MinFilter.Linear, MagFilter.Linear));
+
+ _scalingProgram = _renderer.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(scalingShader, computeBindings, ShaderStage.Compute, TargetLanguage.Spirv)
+ });
+
+ _sharpeningProgram = _renderer.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(sharpeningShader, sharpeningBindings, ShaderStage.Compute, TargetLanguage.Spirv)
+ });
+ }
+
+ public void Run(
+ TextureView view,
+ CommandBufferScoped cbs,
+ Auto<DisposableImageView> destinationTexture,
+ Silk.NET.Vulkan.Format format,
+ int width,
+ int height,
+ Extent2D source,
+ Extent2D destination)
+ {
+ if (_intermediaryTexture == null
+ || _intermediaryTexture.Info.Width != width
+ || _intermediaryTexture.Info.Height != height
+ || !_intermediaryTexture.Info.Equals(view.Info))
+ {
+ var originalInfo = view.Info;
+
+ var swapRB = originalInfo.Format.IsBgr() && originalInfo.SwizzleR == SwizzleComponent.Red;
+
+ var info = new TextureCreateInfo(
+ width,
+ height,
+ originalInfo.Depth,
+ originalInfo.Levels,
+ originalInfo.Samples,
+ originalInfo.BlockWidth,
+ originalInfo.BlockHeight,
+ originalInfo.BytesPerPixel,
+ originalInfo.Format,
+ originalInfo.DepthStencilMode,
+ originalInfo.Target,
+ swapRB ? originalInfo.SwizzleB : originalInfo.SwizzleR,
+ originalInfo.SwizzleG,
+ swapRB ? originalInfo.SwizzleR : originalInfo.SwizzleB,
+ originalInfo.SwizzleA);
+ _intermediaryTexture?.Dispose();
+ _intermediaryTexture = _renderer.CreateTexture(info, view.ScaleFactor) as TextureView;
+ }
+
+ _pipeline.SetCommandBuffer(cbs);
+ _pipeline.SetProgram(_scalingProgram);
+ _pipeline.SetTextureAndSampler(ShaderStage.Compute, 1, view, _sampler);
+
+ float srcWidth = Math.Abs(source.X2 - source.X1);
+ float srcHeight = Math.Abs(source.Y2 - source.Y1);
+ float scaleX = srcWidth / view.Width;
+ float scaleY = srcHeight / view.Height;
+
+ ReadOnlySpan<float> dimensionsBuffer = stackalloc float[]
+ {
+ source.X1,
+ source.X2,
+ source.Y1,
+ source.Y2,
+ destination.X1,
+ destination.X2,
+ destination.Y1,
+ destination.Y2,
+ scaleX,
+ scaleY
+ };
+
+ int rangeSize = dimensionsBuffer.Length * sizeof(float);
+ var bufferHandle = _renderer.BufferManager.CreateWithHandle(_renderer, rangeSize);
+ _renderer.BufferManager.SetData(bufferHandle, 0, dimensionsBuffer);
+
+ ReadOnlySpan<float> sharpeningBuffer = stackalloc float[] { 1.5f - (Level * 0.01f * 1.5f)};
+ var sharpeningBufferHandle = _renderer.BufferManager.CreateWithHandle(_renderer, sizeof(float));
+ _renderer.BufferManager.SetData(sharpeningBufferHandle, 0, sharpeningBuffer);
+
+ int threadGroupWorkRegionDim = 16;
+ int dispatchX = (width + (threadGroupWorkRegionDim - 1)) / threadGroupWorkRegionDim;
+ int dispatchY = (height + (threadGroupWorkRegionDim - 1)) / threadGroupWorkRegionDim;
+
+ var bufferRanges = new BufferRange(bufferHandle, 0, rangeSize);
+ _pipeline.SetUniformBuffers(stackalloc[] { new BufferAssignment(2, bufferRanges) });
+ _pipeline.SetImage(0, _intermediaryTexture, GAL.Format.R8G8B8A8Unorm);
+ _pipeline.DispatchCompute(dispatchX, dispatchY, 1);
+ _pipeline.ComputeBarrier();
+
+ // Sharpening pass
+ _pipeline.SetCommandBuffer(cbs);
+ _pipeline.SetProgram(_sharpeningProgram);
+ _pipeline.SetTextureAndSampler(ShaderStage.Compute, 1, _intermediaryTexture, _sampler);
+ _pipeline.SetUniformBuffers(stackalloc[] { new BufferAssignment(2, bufferRanges) });
+ var sharpeningRange = new BufferRange(sharpeningBufferHandle, 0, sizeof(float));
+ _pipeline.SetUniformBuffers(stackalloc[] { new BufferAssignment(4, sharpeningRange) });
+ _pipeline.SetImage(0, destinationTexture);
+ _pipeline.DispatchCompute(dispatchX, dispatchY, 1);
+ _pipeline.ComputeBarrier();
+
+ _pipeline.Finish();
+
+ _renderer.BufferManager.Delete(bufferHandle);
+ _renderer.BufferManager.Delete(sharpeningBufferHandle);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/FxaaPostProcessingEffect.cs b/src/Ryujinx.Graphics.Vulkan/Effects/FxaaPostProcessingEffect.cs
new file mode 100644
index 00000000..b7316d85
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/FxaaPostProcessingEffect.cs
@@ -0,0 +1,111 @@
+using Ryujinx.Common;
+using Ryujinx.Graphics.GAL;
+using Ryujinx.Graphics.Shader;
+using Ryujinx.Graphics.Shader.Translation;
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan.Effects
+{
+ internal partial class FxaaPostProcessingEffect : IPostProcessingEffect
+ {
+ private readonly VulkanRenderer _renderer;
+ private ISampler _samplerLinear;
+ private ShaderCollection _shaderProgram;
+
+ private PipelineHelperShader _pipeline;
+ private TextureView _texture;
+
+ public FxaaPostProcessingEffect(VulkanRenderer renderer, Device device)
+ {
+ _renderer = renderer;
+ _pipeline = new PipelineHelperShader(renderer, device);
+
+ Initialize();
+ }
+
+ public void Dispose()
+ {
+ _shaderProgram.Dispose();
+ _pipeline.Dispose();
+ _samplerLinear.Dispose();
+ _texture?.Dispose();
+ }
+
+ private void Initialize()
+ {
+ _pipeline.Initialize();
+
+ var shader = EmbeddedResources.Read("Ryujinx.Graphics.Vulkan/Effects/Shaders/Fxaa.spv");
+
+ var computeBindings = new ShaderBindings(
+ new[] { 2 },
+ Array.Empty<int>(),
+ new[] { 1 },
+ new[] { 0 });
+
+ _samplerLinear = _renderer.CreateSampler(GAL.SamplerCreateInfo.Create(MinFilter.Linear, MagFilter.Linear));
+
+ _shaderProgram = _renderer.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(shader, computeBindings, ShaderStage.Compute, TargetLanguage.Spirv)
+ });
+ }
+
+ public TextureView Run(TextureView view, CommandBufferScoped cbs, int width, int height)
+ {
+ if (_texture == null || _texture.Width != view.Width || _texture.Height != view.Height)
+ {
+ _texture?.Dispose();
+
+ var info = view.Info;
+
+ if (view.Info.Format.IsBgr())
+ {
+ info = new TextureCreateInfo(info.Width,
+ info.Height,
+ info.Depth,
+ info.Levels,
+ info.Samples,
+ info.BlockWidth,
+ info.BlockHeight,
+ info.BytesPerPixel,
+ info.Format,
+ info.DepthStencilMode,
+ info.Target,
+ info.SwizzleB,
+ info.SwizzleG,
+ info.SwizzleR,
+ info.SwizzleA);
+ }
+ _texture = _renderer.CreateTexture(info, view.ScaleFactor) as TextureView;
+ }
+
+ _pipeline.SetCommandBuffer(cbs);
+ _pipeline.SetProgram(_shaderProgram);
+ _pipeline.SetTextureAndSampler(ShaderStage.Compute, 1, view, _samplerLinear);
+
+ ReadOnlySpan<float> resolutionBuffer = stackalloc float[] { view.Width, view.Height };
+ int rangeSize = resolutionBuffer.Length * sizeof(float);
+ var bufferHandle = _renderer.BufferManager.CreateWithHandle(_renderer, rangeSize);
+
+ _renderer.BufferManager.SetData(bufferHandle, 0, resolutionBuffer);
+
+ var bufferRanges = new BufferRange(bufferHandle, 0, rangeSize);
+ _pipeline.SetUniformBuffers(stackalloc[] { new BufferAssignment(2, bufferRanges) });
+
+ var dispatchX = BitUtils.DivRoundUp(view.Width, IPostProcessingEffect.LocalGroupSize);
+ var dispatchY = BitUtils.DivRoundUp(view.Height, IPostProcessingEffect.LocalGroupSize);
+
+ _pipeline.SetImage(0, _texture, GAL.Format.R8G8B8A8Unorm);
+ _pipeline.DispatchCompute(dispatchX, dispatchY, 1);
+
+ _renderer.BufferManager.Delete(bufferHandle);
+ _pipeline.ComputeBarrier();
+
+ _pipeline.Finish();
+
+ return _texture;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/IPostProcessingEffect.cs b/src/Ryujinx.Graphics.Vulkan/Effects/IPostProcessingEffect.cs
new file mode 100644
index 00000000..d36cf01d
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/IPostProcessingEffect.cs
@@ -0,0 +1,10 @@
+using System;
+
+namespace Ryujinx.Graphics.Vulkan.Effects
+{
+ internal interface IPostProcessingEffect : IDisposable
+ {
+ const int LocalGroupSize = 64;
+ TextureView Run(TextureView view, CommandBufferScoped cbs, int width, int height);
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/IScalingFilter.cs b/src/Ryujinx.Graphics.Vulkan/Effects/IScalingFilter.cs
new file mode 100644
index 00000000..54f809d7
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/IScalingFilter.cs
@@ -0,0 +1,20 @@
+using Silk.NET.Vulkan;
+using System;
+using Extent2D = Ryujinx.Graphics.GAL.Extents2D;
+
+namespace Ryujinx.Graphics.Vulkan.Effects
+{
+ internal interface IScalingFilter : IDisposable
+ {
+ float Level { get; set; }
+ void Run(
+ TextureView view,
+ CommandBufferScoped cbs,
+ Auto<DisposableImageView> destinationTexture,
+ Format format,
+ int width,
+ int height,
+ Extent2D source,
+ Extent2D destination);
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrScaling.glsl b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrScaling.glsl
new file mode 100644
index 00000000..5eb74b3d
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrScaling.glsl
@@ -0,0 +1,3945 @@
+// Scaling
+
+#version 430 core
+layout (local_size_x = 64) in;
+layout( rgba8, binding = 0, set = 3) uniform image2D imgOutput;
+layout( binding = 1, set = 2) uniform sampler2D Source;
+layout( binding = 2 ) uniform dimensions{
+ float srcX0;
+ float srcX1;
+ float srcY0;
+ float srcY1;
+ float dstX0;
+ float dstX1;
+ float dstY0;
+ float dstY1;
+ float scaleX;
+ float scaleY;
+};
+
+#define A_GPU 1
+#define A_GLSL 1
+//==============================================================================================================================
+//
+// [A] SHADER PORTABILITY 1.20210629
+//
+//==============================================================================================================================
+// FidelityFX Super Resolution Sample
+//
+// Copyright (c) 2021 Advanced Micro Devices, Inc. All rights reserved.
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files(the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions :
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//------------------------------------------------------------------------------------------------------------------------------
+// MIT LICENSE
+// ===========
+// Copyright (c) 2014 Michal Drobot (for concepts used in "FLOAT APPROXIMATIONS").
+// -----------
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation
+// files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy,
+// modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
+// Software is furnished to do so, subject to the following conditions:
+// -----------
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
+// Software.
+// -----------
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+//------------------------------------------------------------------------------------------------------------------------------
+// ABOUT
+// =====
+// Common central point for high-level shading language and C portability for various shader headers.
+//------------------------------------------------------------------------------------------------------------------------------
+// DEFINES
+// =======
+// A_CPU ..... Include the CPU related code.
+// A_GPU ..... Include the GPU related code.
+// A_GLSL .... Using GLSL.
+// A_HLSL .... Using HLSL.
+// A_HLSL_6_2 Using HLSL 6.2 with new 'uint16_t' and related types (requires '-enable-16bit-types').
+// A_NO_16_BIT_CAST Don't use instructions that are not availabe in SPIR-V (needed for running A_HLSL_6_2 on Vulkan)
+// A_GCC ..... Using a GCC compatible compiler (else assume MSVC compatible compiler by default).
+// =======
+// A_BYTE .... Support 8-bit integer.
+// A_HALF .... Support 16-bit integer and floating point.
+// A_LONG .... Support 64-bit integer.
+// A_DUBL .... Support 64-bit floating point.
+// =======
+// A_WAVE .... Support wave-wide operations.
+//------------------------------------------------------------------------------------------------------------------------------
+// To get #include "ffx_a.h" working in GLSL use '#extension GL_GOOGLE_include_directive:require'.
+//------------------------------------------------------------------------------------------------------------------------------
+// SIMPLIFIED TYPE SYSTEM
+// ======================
+// - All ints will be unsigned with exception of when signed is required.
+// - Type naming simplified and shortened "A<type><#components>",
+// - H = 16-bit float (half)
+// - F = 32-bit float (float)
+// - D = 64-bit float (double)
+// - P = 1-bit integer (predicate, not using bool because 'B' is used for byte)
+// - B = 8-bit integer (byte)
+// - W = 16-bit integer (word)
+// - U = 32-bit integer (unsigned)
+// - L = 64-bit integer (long)
+// - Using "AS<type><#components>" for signed when required.
+//------------------------------------------------------------------------------------------------------------------------------
+// TODO
+// ====
+// - Make sure 'ALerp*(a,b,m)' does 'b*m+(-a*m+a)' (2 ops).
+//------------------------------------------------------------------------------------------------------------------------------
+// CHANGE LOG
+// ==========
+// 20200914 - Expanded wave ops and prx code.
+// 20200713 - Added [ZOL] section, fixed serious bugs in sRGB and Rec.709 color conversion code, etc.
+//==============================================================================================================================
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// COMMON
+//==============================================================================================================================
+#define A_2PI 6.28318530718
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+//
+// CPU
+//
+//
+//==============================================================================================================================
+#ifdef A_CPU
+ // Supporting user defined overrides.
+ #ifndef A_RESTRICT
+ #define A_RESTRICT __restrict
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifndef A_STATIC
+ #define A_STATIC static
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ // Same types across CPU and GPU.
+ // Predicate uses 32-bit integer (C friendly bool).
+ typedef uint32_t AP1;
+ typedef float AF1;
+ typedef double AD1;
+ typedef uint8_t AB1;
+ typedef uint16_t AW1;
+ typedef uint32_t AU1;
+ typedef uint64_t AL1;
+ typedef int8_t ASB1;
+ typedef int16_t ASW1;
+ typedef int32_t ASU1;
+ typedef int64_t ASL1;
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AD1_(a) ((AD1)(a))
+ #define AF1_(a) ((AF1)(a))
+ #define AL1_(a) ((AL1)(a))
+ #define AU1_(a) ((AU1)(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASL1_(a) ((ASL1)(a))
+ #define ASU1_(a) ((ASU1)(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AU1 AU1_AF1(AF1 a){union{AF1 f;AU1 u;}bits;bits.f=a;return bits.u;}
+//------------------------------------------------------------------------------------------------------------------------------
+ #define A_TRUE 1
+ #define A_FALSE 0
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+// CPU/GPU PORTING
+//
+//------------------------------------------------------------------------------------------------------------------------------
+// Get CPU and GPU to share all setup code, without duplicate code paths.
+// This uses a lower-case prefix for special vector constructs.
+// - In C restrict pointers are used.
+// - In the shading language, in/inout/out arguments are used.
+// This depends on the ability to access a vector value in both languages via array syntax (aka color[2]).
+//==============================================================================================================================
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// VECTOR ARGUMENT/RETURN/INITIALIZATION PORTABILITY
+//==============================================================================================================================
+ #define retAD2 AD1 *A_RESTRICT
+ #define retAD3 AD1 *A_RESTRICT
+ #define retAD4 AD1 *A_RESTRICT
+ #define retAF2 AF1 *A_RESTRICT
+ #define retAF3 AF1 *A_RESTRICT
+ #define retAF4 AF1 *A_RESTRICT
+ #define retAL2 AL1 *A_RESTRICT
+ #define retAL3 AL1 *A_RESTRICT
+ #define retAL4 AL1 *A_RESTRICT
+ #define retAU2 AU1 *A_RESTRICT
+ #define retAU3 AU1 *A_RESTRICT
+ #define retAU4 AU1 *A_RESTRICT
+//------------------------------------------------------------------------------------------------------------------------------
+ #define inAD2 AD1 *A_RESTRICT
+ #define inAD3 AD1 *A_RESTRICT
+ #define inAD4 AD1 *A_RESTRICT
+ #define inAF2 AF1 *A_RESTRICT
+ #define inAF3 AF1 *A_RESTRICT
+ #define inAF4 AF1 *A_RESTRICT
+ #define inAL2 AL1 *A_RESTRICT
+ #define inAL3 AL1 *A_RESTRICT
+ #define inAL4 AL1 *A_RESTRICT
+ #define inAU2 AU1 *A_RESTRICT
+ #define inAU3 AU1 *A_RESTRICT
+ #define inAU4 AU1 *A_RESTRICT
+//------------------------------------------------------------------------------------------------------------------------------
+ #define inoutAD2 AD1 *A_RESTRICT
+ #define inoutAD3 AD1 *A_RESTRICT
+ #define inoutAD4 AD1 *A_RESTRICT
+ #define inoutAF2 AF1 *A_RESTRICT
+ #define inoutAF3 AF1 *A_RESTRICT
+ #define inoutAF4 AF1 *A_RESTRICT
+ #define inoutAL2 AL1 *A_RESTRICT
+ #define inoutAL3 AL1 *A_RESTRICT
+ #define inoutAL4 AL1 *A_RESTRICT
+ #define inoutAU2 AU1 *A_RESTRICT
+ #define inoutAU3 AU1 *A_RESTRICT
+ #define inoutAU4 AU1 *A_RESTRICT
+//------------------------------------------------------------------------------------------------------------------------------
+ #define outAD2 AD1 *A_RESTRICT
+ #define outAD3 AD1 *A_RESTRICT
+ #define outAD4 AD1 *A_RESTRICT
+ #define outAF2 AF1 *A_RESTRICT
+ #define outAF3 AF1 *A_RESTRICT
+ #define outAF4 AF1 *A_RESTRICT
+ #define outAL2 AL1 *A_RESTRICT
+ #define outAL3 AL1 *A_RESTRICT
+ #define outAL4 AL1 *A_RESTRICT
+ #define outAU2 AU1 *A_RESTRICT
+ #define outAU3 AU1 *A_RESTRICT
+ #define outAU4 AU1 *A_RESTRICT
+//------------------------------------------------------------------------------------------------------------------------------
+ #define varAD2(x) AD1 x[2]
+ #define varAD3(x) AD1 x[3]
+ #define varAD4(x) AD1 x[4]
+ #define varAF2(x) AF1 x[2]
+ #define varAF3(x) AF1 x[3]
+ #define varAF4(x) AF1 x[4]
+ #define varAL2(x) AL1 x[2]
+ #define varAL3(x) AL1 x[3]
+ #define varAL4(x) AL1 x[4]
+ #define varAU2(x) AU1 x[2]
+ #define varAU3(x) AU1 x[3]
+ #define varAU4(x) AU1 x[4]
+//------------------------------------------------------------------------------------------------------------------------------
+ #define initAD2(x,y) {x,y}
+ #define initAD3(x,y,z) {x,y,z}
+ #define initAD4(x,y,z,w) {x,y,z,w}
+ #define initAF2(x,y) {x,y}
+ #define initAF3(x,y,z) {x,y,z}
+ #define initAF4(x,y,z,w) {x,y,z,w}
+ #define initAL2(x,y) {x,y}
+ #define initAL3(x,y,z) {x,y,z}
+ #define initAL4(x,y,z,w) {x,y,z,w}
+ #define initAU2(x,y) {x,y}
+ #define initAU3(x,y,z) {x,y,z}
+ #define initAU4(x,y,z,w) {x,y,z,w}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// SCALAR RETURN OPS
+//------------------------------------------------------------------------------------------------------------------------------
+// TODO
+// ====
+// - Replace transcendentals with manual versions.
+//==============================================================================================================================
+ #ifdef A_GCC
+ A_STATIC AD1 AAbsD1(AD1 a){return __builtin_fabs(a);}
+ A_STATIC AF1 AAbsF1(AF1 a){return __builtin_fabsf(a);}
+ A_STATIC AU1 AAbsSU1(AU1 a){return AU1_(__builtin_abs(ASU1_(a)));}
+ A_STATIC AL1 AAbsSL1(AL1 a){return AL1_(__builtin_llabs(ASL1_(a)));}
+ #else
+ A_STATIC AD1 AAbsD1(AD1 a){return fabs(a);}
+ A_STATIC AF1 AAbsF1(AF1 a){return fabsf(a);}
+ A_STATIC AU1 AAbsSU1(AU1 a){return AU1_(abs(ASU1_(a)));}
+ A_STATIC AL1 AAbsSL1(AL1 a){return AL1_(labs((long)ASL1_(a)));}
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_GCC
+ A_STATIC AD1 ACosD1(AD1 a){return __builtin_cos(a);}
+ A_STATIC AF1 ACosF1(AF1 a){return __builtin_cosf(a);}
+ #else
+ A_STATIC AD1 ACosD1(AD1 a){return cos(a);}
+ A_STATIC AF1 ACosF1(AF1 a){return cosf(a);}
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 ADotD2(inAD2 a,inAD2 b){return a[0]*b[0]+a[1]*b[1];}
+ A_STATIC AD1 ADotD3(inAD3 a,inAD3 b){return a[0]*b[0]+a[1]*b[1]+a[2]*b[2];}
+ A_STATIC AD1 ADotD4(inAD4 a,inAD4 b){return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]+a[3]*b[3];}
+ A_STATIC AF1 ADotF2(inAF2 a,inAF2 b){return a[0]*b[0]+a[1]*b[1];}
+ A_STATIC AF1 ADotF3(inAF3 a,inAF3 b){return a[0]*b[0]+a[1]*b[1]+a[2]*b[2];}
+ A_STATIC AF1 ADotF4(inAF4 a,inAF4 b){return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]+a[3]*b[3];}
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_GCC
+ A_STATIC AD1 AExp2D1(AD1 a){return __builtin_exp2(a);}
+ A_STATIC AF1 AExp2F1(AF1 a){return __builtin_exp2f(a);}
+ #else
+ A_STATIC AD1 AExp2D1(AD1 a){return exp2(a);}
+ A_STATIC AF1 AExp2F1(AF1 a){return exp2f(a);}
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_GCC
+ A_STATIC AD1 AFloorD1(AD1 a){return __builtin_floor(a);}
+ A_STATIC AF1 AFloorF1(AF1 a){return __builtin_floorf(a);}
+ #else
+ A_STATIC AD1 AFloorD1(AD1 a){return floor(a);}
+ A_STATIC AF1 AFloorF1(AF1 a){return floorf(a);}
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 ALerpD1(AD1 a,AD1 b,AD1 c){return b*c+(-a*c+a);}
+ A_STATIC AF1 ALerpF1(AF1 a,AF1 b,AF1 c){return b*c+(-a*c+a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_GCC
+ A_STATIC AD1 ALog2D1(AD1 a){return __builtin_log2(a);}
+ A_STATIC AF1 ALog2F1(AF1 a){return __builtin_log2f(a);}
+ #else
+ A_STATIC AD1 ALog2D1(AD1 a){return log2(a);}
+ A_STATIC AF1 ALog2F1(AF1 a){return log2f(a);}
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 AMaxD1(AD1 a,AD1 b){return a>b?a:b;}
+ A_STATIC AF1 AMaxF1(AF1 a,AF1 b){return a>b?a:b;}
+ A_STATIC AL1 AMaxL1(AL1 a,AL1 b){return a>b?a:b;}
+ A_STATIC AU1 AMaxU1(AU1 a,AU1 b){return a>b?a:b;}
+//------------------------------------------------------------------------------------------------------------------------------
+ // These follow the convention that A integer types don't have signage, until they are operated on.
+ A_STATIC AL1 AMaxSL1(AL1 a,AL1 b){return (ASL1_(a)>ASL1_(b))?a:b;}
+ A_STATIC AU1 AMaxSU1(AU1 a,AU1 b){return (ASU1_(a)>ASU1_(b))?a:b;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 AMinD1(AD1 a,AD1 b){return a<b?a:b;}
+ A_STATIC AF1 AMinF1(AF1 a,AF1 b){return a<b?a:b;}
+ A_STATIC AL1 AMinL1(AL1 a,AL1 b){return a<b?a:b;}
+ A_STATIC AU1 AMinU1(AU1 a,AU1 b){return a<b?a:b;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AL1 AMinSL1(AL1 a,AL1 b){return (ASL1_(a)<ASL1_(b))?a:b;}
+ A_STATIC AU1 AMinSU1(AU1 a,AU1 b){return (ASU1_(a)<ASU1_(b))?a:b;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 ARcpD1(AD1 a){return 1.0/a;}
+ A_STATIC AF1 ARcpF1(AF1 a){return 1.0f/a;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AL1 AShrSL1(AL1 a,AL1 b){return AL1_(ASL1_(a)>>ASL1_(b));}
+ A_STATIC AU1 AShrSU1(AU1 a,AU1 b){return AU1_(ASU1_(a)>>ASU1_(b));}
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_GCC
+ A_STATIC AD1 ASinD1(AD1 a){return __builtin_sin(a);}
+ A_STATIC AF1 ASinF1(AF1 a){return __builtin_sinf(a);}
+ #else
+ A_STATIC AD1 ASinD1(AD1 a){return sin(a);}
+ A_STATIC AF1 ASinF1(AF1 a){return sinf(a);}
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_GCC
+ A_STATIC AD1 ASqrtD1(AD1 a){return __builtin_sqrt(a);}
+ A_STATIC AF1 ASqrtF1(AF1 a){return __builtin_sqrtf(a);}
+ #else
+ A_STATIC AD1 ASqrtD1(AD1 a){return sqrt(a);}
+ A_STATIC AF1 ASqrtF1(AF1 a){return sqrtf(a);}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// SCALAR RETURN OPS - DEPENDENT
+//==============================================================================================================================
+ A_STATIC AD1 AClampD1(AD1 x,AD1 n,AD1 m){return AMaxD1(n,AMinD1(x,m));}
+ A_STATIC AF1 AClampF1(AF1 x,AF1 n,AF1 m){return AMaxF1(n,AMinF1(x,m));}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 AFractD1(AD1 a){return a-AFloorD1(a);}
+ A_STATIC AF1 AFractF1(AF1 a){return a-AFloorF1(a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 APowD1(AD1 a,AD1 b){return AExp2D1(b*ALog2D1(a));}
+ A_STATIC AF1 APowF1(AF1 a,AF1 b){return AExp2F1(b*ALog2F1(a));}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 ARsqD1(AD1 a){return ARcpD1(ASqrtD1(a));}
+ A_STATIC AF1 ARsqF1(AF1 a){return ARcpF1(ASqrtF1(a));}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 ASatD1(AD1 a){return AMinD1(1.0,AMaxD1(0.0,a));}
+ A_STATIC AF1 ASatF1(AF1 a){return AMinF1(1.0f,AMaxF1(0.0f,a));}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// VECTOR OPS
+//------------------------------------------------------------------------------------------------------------------------------
+// These are added as needed for production or prototyping, so not necessarily a complete set.
+// They follow a convention of taking in a destination and also returning the destination value to increase utility.
+//==============================================================================================================================
+ A_STATIC retAD2 opAAbsD2(outAD2 d,inAD2 a){d[0]=AAbsD1(a[0]);d[1]=AAbsD1(a[1]);return d;}
+ A_STATIC retAD3 opAAbsD3(outAD3 d,inAD3 a){d[0]=AAbsD1(a[0]);d[1]=AAbsD1(a[1]);d[2]=AAbsD1(a[2]);return d;}
+ A_STATIC retAD4 opAAbsD4(outAD4 d,inAD4 a){d[0]=AAbsD1(a[0]);d[1]=AAbsD1(a[1]);d[2]=AAbsD1(a[2]);d[3]=AAbsD1(a[3]);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opAAbsF2(outAF2 d,inAF2 a){d[0]=AAbsF1(a[0]);d[1]=AAbsF1(a[1]);return d;}
+ A_STATIC retAF3 opAAbsF3(outAF3 d,inAF3 a){d[0]=AAbsF1(a[0]);d[1]=AAbsF1(a[1]);d[2]=AAbsF1(a[2]);return d;}
+ A_STATIC retAF4 opAAbsF4(outAF4 d,inAF4 a){d[0]=AAbsF1(a[0]);d[1]=AAbsF1(a[1]);d[2]=AAbsF1(a[2]);d[3]=AAbsF1(a[3]);return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opAAddD2(outAD2 d,inAD2 a,inAD2 b){d[0]=a[0]+b[0];d[1]=a[1]+b[1];return d;}
+ A_STATIC retAD3 opAAddD3(outAD3 d,inAD3 a,inAD3 b){d[0]=a[0]+b[0];d[1]=a[1]+b[1];d[2]=a[2]+b[2];return d;}
+ A_STATIC retAD4 opAAddD4(outAD4 d,inAD4 a,inAD4 b){d[0]=a[0]+b[0];d[1]=a[1]+b[1];d[2]=a[2]+b[2];d[3]=a[3]+b[3];return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opAAddF2(outAF2 d,inAF2 a,inAF2 b){d[0]=a[0]+b[0];d[1]=a[1]+b[1];return d;}
+ A_STATIC retAF3 opAAddF3(outAF3 d,inAF3 a,inAF3 b){d[0]=a[0]+b[0];d[1]=a[1]+b[1];d[2]=a[2]+b[2];return d;}
+ A_STATIC retAF4 opAAddF4(outAF4 d,inAF4 a,inAF4 b){d[0]=a[0]+b[0];d[1]=a[1]+b[1];d[2]=a[2]+b[2];d[3]=a[3]+b[3];return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opAAddOneD2(outAD2 d,inAD2 a,AD1 b){d[0]=a[0]+b;d[1]=a[1]+b;return d;}
+ A_STATIC retAD3 opAAddOneD3(outAD3 d,inAD3 a,AD1 b){d[0]=a[0]+b;d[1]=a[1]+b;d[2]=a[2]+b;return d;}
+ A_STATIC retAD4 opAAddOneD4(outAD4 d,inAD4 a,AD1 b){d[0]=a[0]+b;d[1]=a[1]+b;d[2]=a[2]+b;d[3]=a[3]+b;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opAAddOneF2(outAF2 d,inAF2 a,AF1 b){d[0]=a[0]+b;d[1]=a[1]+b;return d;}
+ A_STATIC retAF3 opAAddOneF3(outAF3 d,inAF3 a,AF1 b){d[0]=a[0]+b;d[1]=a[1]+b;d[2]=a[2]+b;return d;}
+ A_STATIC retAF4 opAAddOneF4(outAF4 d,inAF4 a,AF1 b){d[0]=a[0]+b;d[1]=a[1]+b;d[2]=a[2]+b;d[3]=a[3]+b;return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opACpyD2(outAD2 d,inAD2 a){d[0]=a[0];d[1]=a[1];return d;}
+ A_STATIC retAD3 opACpyD3(outAD3 d,inAD3 a){d[0]=a[0];d[1]=a[1];d[2]=a[2];return d;}
+ A_STATIC retAD4 opACpyD4(outAD4 d,inAD4 a){d[0]=a[0];d[1]=a[1];d[2]=a[2];d[3]=a[3];return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opACpyF2(outAF2 d,inAF2 a){d[0]=a[0];d[1]=a[1];return d;}
+ A_STATIC retAF3 opACpyF3(outAF3 d,inAF3 a){d[0]=a[0];d[1]=a[1];d[2]=a[2];return d;}
+ A_STATIC retAF4 opACpyF4(outAF4 d,inAF4 a){d[0]=a[0];d[1]=a[1];d[2]=a[2];d[3]=a[3];return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opALerpD2(outAD2 d,inAD2 a,inAD2 b,inAD2 c){d[0]=ALerpD1(a[0],b[0],c[0]);d[1]=ALerpD1(a[1],b[1],c[1]);return d;}
+ A_STATIC retAD3 opALerpD3(outAD3 d,inAD3 a,inAD3 b,inAD3 c){d[0]=ALerpD1(a[0],b[0],c[0]);d[1]=ALerpD1(a[1],b[1],c[1]);d[2]=ALerpD1(a[2],b[2],c[2]);return d;}
+ A_STATIC retAD4 opALerpD4(outAD4 d,inAD4 a,inAD4 b,inAD4 c){d[0]=ALerpD1(a[0],b[0],c[0]);d[1]=ALerpD1(a[1],b[1],c[1]);d[2]=ALerpD1(a[2],b[2],c[2]);d[3]=ALerpD1(a[3],b[3],c[3]);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opALerpF2(outAF2 d,inAF2 a,inAF2 b,inAF2 c){d[0]=ALerpF1(a[0],b[0],c[0]);d[1]=ALerpF1(a[1],b[1],c[1]);return d;}
+ A_STATIC retAF3 opALerpF3(outAF3 d,inAF3 a,inAF3 b,inAF3 c){d[0]=ALerpF1(a[0],b[0],c[0]);d[1]=ALerpF1(a[1],b[1],c[1]);d[2]=ALerpF1(a[2],b[2],c[2]);return d;}
+ A_STATIC retAF4 opALerpF4(outAF4 d,inAF4 a,inAF4 b,inAF4 c){d[0]=ALerpF1(a[0],b[0],c[0]);d[1]=ALerpF1(a[1],b[1],c[1]);d[2]=ALerpF1(a[2],b[2],c[2]);d[3]=ALerpF1(a[3],b[3],c[3]);return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opALerpOneD2(outAD2 d,inAD2 a,inAD2 b,AD1 c){d[0]=ALerpD1(a[0],b[0],c);d[1]=ALerpD1(a[1],b[1],c);return d;}
+ A_STATIC retAD3 opALerpOneD3(outAD3 d,inAD3 a,inAD3 b,AD1 c){d[0]=ALerpD1(a[0],b[0],c);d[1]=ALerpD1(a[1],b[1],c);d[2]=ALerpD1(a[2],b[2],c);return d;}
+ A_STATIC retAD4 opALerpOneD4(outAD4 d,inAD4 a,inAD4 b,AD1 c){d[0]=ALerpD1(a[0],b[0],c);d[1]=ALerpD1(a[1],b[1],c);d[2]=ALerpD1(a[2],b[2],c);d[3]=ALerpD1(a[3],b[3],c);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opALerpOneF2(outAF2 d,inAF2 a,inAF2 b,AF1 c){d[0]=ALerpF1(a[0],b[0],c);d[1]=ALerpF1(a[1],b[1],c);return d;}
+ A_STATIC retAF3 opALerpOneF3(outAF3 d,inAF3 a,inAF3 b,AF1 c){d[0]=ALerpF1(a[0],b[0],c);d[1]=ALerpF1(a[1],b[1],c);d[2]=ALerpF1(a[2],b[2],c);return d;}
+ A_STATIC retAF4 opALerpOneF4(outAF4 d,inAF4 a,inAF4 b,AF1 c){d[0]=ALerpF1(a[0],b[0],c);d[1]=ALerpF1(a[1],b[1],c);d[2]=ALerpF1(a[2],b[2],c);d[3]=ALerpF1(a[3],b[3],c);return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opAMaxD2(outAD2 d,inAD2 a,inAD2 b){d[0]=AMaxD1(a[0],b[0]);d[1]=AMaxD1(a[1],b[1]);return d;}
+ A_STATIC retAD3 opAMaxD3(outAD3 d,inAD3 a,inAD3 b){d[0]=AMaxD1(a[0],b[0]);d[1]=AMaxD1(a[1],b[1]);d[2]=AMaxD1(a[2],b[2]);return d;}
+ A_STATIC retAD4 opAMaxD4(outAD4 d,inAD4 a,inAD4 b){d[0]=AMaxD1(a[0],b[0]);d[1]=AMaxD1(a[1],b[1]);d[2]=AMaxD1(a[2],b[2]);d[3]=AMaxD1(a[3],b[3]);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opAMaxF2(outAF2 d,inAF2 a,inAF2 b){d[0]=AMaxF1(a[0],b[0]);d[1]=AMaxF1(a[1],b[1]);return d;}
+ A_STATIC retAF3 opAMaxF3(outAF3 d,inAF3 a,inAF3 b){d[0]=AMaxF1(a[0],b[0]);d[1]=AMaxF1(a[1],b[1]);d[2]=AMaxF1(a[2],b[2]);return d;}
+ A_STATIC retAF4 opAMaxF4(outAF4 d,inAF4 a,inAF4 b){d[0]=AMaxF1(a[0],b[0]);d[1]=AMaxF1(a[1],b[1]);d[2]=AMaxF1(a[2],b[2]);d[3]=AMaxF1(a[3],b[3]);return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opAMinD2(outAD2 d,inAD2 a,inAD2 b){d[0]=AMinD1(a[0],b[0]);d[1]=AMinD1(a[1],b[1]);return d;}
+ A_STATIC retAD3 opAMinD3(outAD3 d,inAD3 a,inAD3 b){d[0]=AMinD1(a[0],b[0]);d[1]=AMinD1(a[1],b[1]);d[2]=AMinD1(a[2],b[2]);return d;}
+ A_STATIC retAD4 opAMinD4(outAD4 d,inAD4 a,inAD4 b){d[0]=AMinD1(a[0],b[0]);d[1]=AMinD1(a[1],b[1]);d[2]=AMinD1(a[2],b[2]);d[3]=AMinD1(a[3],b[3]);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opAMinF2(outAF2 d,inAF2 a,inAF2 b){d[0]=AMinF1(a[0],b[0]);d[1]=AMinF1(a[1],b[1]);return d;}
+ A_STATIC retAF3 opAMinF3(outAF3 d,inAF3 a,inAF3 b){d[0]=AMinF1(a[0],b[0]);d[1]=AMinF1(a[1],b[1]);d[2]=AMinF1(a[2],b[2]);return d;}
+ A_STATIC retAF4 opAMinF4(outAF4 d,inAF4 a,inAF4 b){d[0]=AMinF1(a[0],b[0]);d[1]=AMinF1(a[1],b[1]);d[2]=AMinF1(a[2],b[2]);d[3]=AMinF1(a[3],b[3]);return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opAMulD2(outAD2 d,inAD2 a,inAD2 b){d[0]=a[0]*b[0];d[1]=a[1]*b[1];return d;}
+ A_STATIC retAD3 opAMulD3(outAD3 d,inAD3 a,inAD3 b){d[0]=a[0]*b[0];d[1]=a[1]*b[1];d[2]=a[2]*b[2];return d;}
+ A_STATIC retAD4 opAMulD4(outAD4 d,inAD4 a,inAD4 b){d[0]=a[0]*b[0];d[1]=a[1]*b[1];d[2]=a[2]*b[2];d[3]=a[3]*b[3];return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opAMulF2(outAF2 d,inAF2 a,inAF2 b){d[0]=a[0]*b[0];d[1]=a[1]*b[1];return d;}
+ A_STATIC retAF3 opAMulF3(outAF3 d,inAF3 a,inAF3 b){d[0]=a[0]*b[0];d[1]=a[1]*b[1];d[2]=a[2]*b[2];return d;}
+ A_STATIC retAF4 opAMulF4(outAF4 d,inAF4 a,inAF4 b){d[0]=a[0]*b[0];d[1]=a[1]*b[1];d[2]=a[2]*b[2];d[3]=a[3]*b[3];return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opAMulOneD2(outAD2 d,inAD2 a,AD1 b){d[0]=a[0]*b;d[1]=a[1]*b;return d;}
+ A_STATIC retAD3 opAMulOneD3(outAD3 d,inAD3 a,AD1 b){d[0]=a[0]*b;d[1]=a[1]*b;d[2]=a[2]*b;return d;}
+ A_STATIC retAD4 opAMulOneD4(outAD4 d,inAD4 a,AD1 b){d[0]=a[0]*b;d[1]=a[1]*b;d[2]=a[2]*b;d[3]=a[3]*b;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opAMulOneF2(outAF2 d,inAF2 a,AF1 b){d[0]=a[0]*b;d[1]=a[1]*b;return d;}
+ A_STATIC retAF3 opAMulOneF3(outAF3 d,inAF3 a,AF1 b){d[0]=a[0]*b;d[1]=a[1]*b;d[2]=a[2]*b;return d;}
+ A_STATIC retAF4 opAMulOneF4(outAF4 d,inAF4 a,AF1 b){d[0]=a[0]*b;d[1]=a[1]*b;d[2]=a[2]*b;d[3]=a[3]*b;return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opANegD2(outAD2 d,inAD2 a){d[0]=-a[0];d[1]=-a[1];return d;}
+ A_STATIC retAD3 opANegD3(outAD3 d,inAD3 a){d[0]=-a[0];d[1]=-a[1];d[2]=-a[2];return d;}
+ A_STATIC retAD4 opANegD4(outAD4 d,inAD4 a){d[0]=-a[0];d[1]=-a[1];d[2]=-a[2];d[3]=-a[3];return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opANegF2(outAF2 d,inAF2 a){d[0]=-a[0];d[1]=-a[1];return d;}
+ A_STATIC retAF3 opANegF3(outAF3 d,inAF3 a){d[0]=-a[0];d[1]=-a[1];d[2]=-a[2];return d;}
+ A_STATIC retAF4 opANegF4(outAF4 d,inAF4 a){d[0]=-a[0];d[1]=-a[1];d[2]=-a[2];d[3]=-a[3];return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opARcpD2(outAD2 d,inAD2 a){d[0]=ARcpD1(a[0]);d[1]=ARcpD1(a[1]);return d;}
+ A_STATIC retAD3 opARcpD3(outAD3 d,inAD3 a){d[0]=ARcpD1(a[0]);d[1]=ARcpD1(a[1]);d[2]=ARcpD1(a[2]);return d;}
+ A_STATIC retAD4 opARcpD4(outAD4 d,inAD4 a){d[0]=ARcpD1(a[0]);d[1]=ARcpD1(a[1]);d[2]=ARcpD1(a[2]);d[3]=ARcpD1(a[3]);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opARcpF2(outAF2 d,inAF2 a){d[0]=ARcpF1(a[0]);d[1]=ARcpF1(a[1]);return d;}
+ A_STATIC retAF3 opARcpF3(outAF3 d,inAF3 a){d[0]=ARcpF1(a[0]);d[1]=ARcpF1(a[1]);d[2]=ARcpF1(a[2]);return d;}
+ A_STATIC retAF4 opARcpF4(outAF4 d,inAF4 a){d[0]=ARcpF1(a[0]);d[1]=ARcpF1(a[1]);d[2]=ARcpF1(a[2]);d[3]=ARcpF1(a[3]);return d;}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// HALF FLOAT PACKING
+//==============================================================================================================================
+ // Convert float to half (in lower 16-bits of output).
+ // Same fast technique as documented here: ftp://ftp.fox-toolkit.org/pub/fasthalffloatconversion.pdf
+ // Supports denormals.
+ // Conversion rules are to make computations possibly "safer" on the GPU,
+ // -INF & -NaN -> -65504
+ // +INF & +NaN -> +65504
+ A_STATIC AU1 AU1_AH1_AF1(AF1 f){
+ static AW1 base[512]={
+ 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+ 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+ 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+ 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+ 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+ 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+ 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0001,0x0002,0x0004,0x0008,0x0010,0x0020,0x0040,0x0080,0x0100,
+ 0x0200,0x0400,0x0800,0x0c00,0x1000,0x1400,0x1800,0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,
+ 0x4000,0x4400,0x4800,0x4c00,0x5000,0x5400,0x5800,0x5c00,0x6000,0x6400,0x6800,0x6c00,0x7000,0x7400,0x7800,0x7bff,
+ 0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,
+ 0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,
+ 0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,
+ 0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,
+ 0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,
+ 0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,
+ 0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,
+ 0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,
+ 0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,
+ 0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,
+ 0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,
+ 0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,
+ 0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,
+ 0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8001,0x8002,0x8004,0x8008,0x8010,0x8020,0x8040,0x8080,0x8100,
+ 0x8200,0x8400,0x8800,0x8c00,0x9000,0x9400,0x9800,0x9c00,0xa000,0xa400,0xa800,0xac00,0xb000,0xb400,0xb800,0xbc00,
+ 0xc000,0xc400,0xc800,0xcc00,0xd000,0xd400,0xd800,0xdc00,0xe000,0xe400,0xe800,0xec00,0xf000,0xf400,0xf800,0xfbff,
+ 0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,
+ 0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,
+ 0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,
+ 0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,
+ 0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,
+ 0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,
+ 0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff};
+ static AB1 shift[512]={
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x17,0x16,0x15,0x14,0x13,0x12,0x11,0x10,0x0f,
+ 0x0e,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,
+ 0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x17,0x16,0x15,0x14,0x13,0x12,0x11,0x10,0x0f,
+ 0x0e,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,
+ 0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18};
+ union{AF1 f;AU1 u;}bits;bits.f=f;AU1 u=bits.u;AU1 i=u>>23;return (AU1)(base[i])+((u&0x7fffff)>>shift[i]);}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Used to output packed constant.
+ A_STATIC AU1 AU1_AH2_AF2(inAF2 a){return AU1_AH1_AF1(a[0])+(AU1_AH1_AF1(a[1])<<16);}
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+//
+// GLSL
+//
+//
+//==============================================================================================================================
+#if defined(A_GLSL) && defined(A_GPU)
+ #ifndef A_SKIP_EXT
+ #ifdef A_HALF
+ #extension GL_EXT_shader_16bit_storage:require
+ #extension GL_EXT_shader_explicit_arithmetic_types:require
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_LONG
+ #extension GL_ARB_gpu_shader_int64:require
+ #extension GL_NV_shader_atomic_int64:require
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_WAVE
+ #extension GL_KHR_shader_subgroup_arithmetic:require
+ #extension GL_KHR_shader_subgroup_ballot:require
+ #extension GL_KHR_shader_subgroup_quad:require
+ #extension GL_KHR_shader_subgroup_shuffle:require
+ #endif
+ #endif
+//==============================================================================================================================
+ #define AP1 bool
+ #define AP2 bvec2
+ #define AP3 bvec3
+ #define AP4 bvec4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AF1 float
+ #define AF2 vec2
+ #define AF3 vec3
+ #define AF4 vec4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AU1 uint
+ #define AU2 uvec2
+ #define AU3 uvec3
+ #define AU4 uvec4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASU1 int
+ #define ASU2 ivec2
+ #define ASU3 ivec3
+ #define ASU4 ivec4
+//==============================================================================================================================
+ #define AF1_AU1(x) uintBitsToFloat(AU1(x))
+ #define AF2_AU2(x) uintBitsToFloat(AU2(x))
+ #define AF3_AU3(x) uintBitsToFloat(AU3(x))
+ #define AF4_AU4(x) uintBitsToFloat(AU4(x))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AU1_AF1(x) floatBitsToUint(AF1(x))
+ #define AU2_AF2(x) floatBitsToUint(AF2(x))
+ #define AU3_AF3(x) floatBitsToUint(AF3(x))
+ #define AU4_AF4(x) floatBitsToUint(AF4(x))
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AU1_AH1_AF1_x(AF1 a){return packHalf2x16(AF2(a,0.0));}
+ #define AU1_AH1_AF1(a) AU1_AH1_AF1_x(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AU1_AH2_AF2 packHalf2x16
+ #define AU1_AW2Unorm_AF2 packUnorm2x16
+ #define AU1_AB4Unorm_AF4 packUnorm4x8
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AF2_AH2_AU1 unpackHalf2x16
+ #define AF2_AW2Unorm_AU1 unpackUnorm2x16
+ #define AF4_AB4Unorm_AU1 unpackUnorm4x8
+//==============================================================================================================================
+ AF1 AF1_x(AF1 a){return AF1(a);}
+ AF2 AF2_x(AF1 a){return AF2(a,a);}
+ AF3 AF3_x(AF1 a){return AF3(a,a,a);}
+ AF4 AF4_x(AF1 a){return AF4(a,a,a,a);}
+ #define AF1_(a) AF1_x(AF1(a))
+ #define AF2_(a) AF2_x(AF1(a))
+ #define AF3_(a) AF3_x(AF1(a))
+ #define AF4_(a) AF4_x(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AU1_x(AU1 a){return AU1(a);}
+ AU2 AU2_x(AU1 a){return AU2(a,a);}
+ AU3 AU3_x(AU1 a){return AU3(a,a,a);}
+ AU4 AU4_x(AU1 a){return AU4(a,a,a,a);}
+ #define AU1_(a) AU1_x(AU1(a))
+ #define AU2_(a) AU2_x(AU1(a))
+ #define AU3_(a) AU3_x(AU1(a))
+ #define AU4_(a) AU4_x(AU1(a))
+//==============================================================================================================================
+ AU1 AAbsSU1(AU1 a){return AU1(abs(ASU1(a)));}
+ AU2 AAbsSU2(AU2 a){return AU2(abs(ASU2(a)));}
+ AU3 AAbsSU3(AU3 a){return AU3(abs(ASU3(a)));}
+ AU4 AAbsSU4(AU4 a){return AU4(abs(ASU4(a)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 ABfe(AU1 src,AU1 off,AU1 bits){return bitfieldExtract(src,ASU1(off),ASU1(bits));}
+ AU1 ABfi(AU1 src,AU1 ins,AU1 mask){return (ins&mask)|(src&(~mask));}
+ // Proxy for V_BFI_B32 where the 'mask' is set as 'bits', 'mask=(1<<bits)-1', and 'bits' needs to be an immediate.
+ AU1 ABfiM(AU1 src,AU1 ins,AU1 bits){return bitfieldInsert(src,ins,0,ASU1(bits));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // V_MED3_F32.
+ AF1 AClampF1(AF1 x,AF1 n,AF1 m){return clamp(x,n,m);}
+ AF2 AClampF2(AF2 x,AF2 n,AF2 m){return clamp(x,n,m);}
+ AF3 AClampF3(AF3 x,AF3 n,AF3 m){return clamp(x,n,m);}
+ AF4 AClampF4(AF4 x,AF4 n,AF4 m){return clamp(x,n,m);}
+//------------------------------------------------------------------------------------------------------------------------------
+ // V_FRACT_F32 (note DX frac() is different).
+ AF1 AFractF1(AF1 x){return fract(x);}
+ AF2 AFractF2(AF2 x){return fract(x);}
+ AF3 AFractF3(AF3 x){return fract(x);}
+ AF4 AFractF4(AF4 x){return fract(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ALerpF1(AF1 x,AF1 y,AF1 a){return mix(x,y,a);}
+ AF2 ALerpF2(AF2 x,AF2 y,AF2 a){return mix(x,y,a);}
+ AF3 ALerpF3(AF3 x,AF3 y,AF3 a){return mix(x,y,a);}
+ AF4 ALerpF4(AF4 x,AF4 y,AF4 a){return mix(x,y,a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ // V_MAX3_F32.
+ AF1 AMax3F1(AF1 x,AF1 y,AF1 z){return max(x,max(y,z));}
+ AF2 AMax3F2(AF2 x,AF2 y,AF2 z){return max(x,max(y,z));}
+ AF3 AMax3F3(AF3 x,AF3 y,AF3 z){return max(x,max(y,z));}
+ AF4 AMax3F4(AF4 x,AF4 y,AF4 z){return max(x,max(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMax3SU1(AU1 x,AU1 y,AU1 z){return AU1(max(ASU1(x),max(ASU1(y),ASU1(z))));}
+ AU2 AMax3SU2(AU2 x,AU2 y,AU2 z){return AU2(max(ASU2(x),max(ASU2(y),ASU2(z))));}
+ AU3 AMax3SU3(AU3 x,AU3 y,AU3 z){return AU3(max(ASU3(x),max(ASU3(y),ASU3(z))));}
+ AU4 AMax3SU4(AU4 x,AU4 y,AU4 z){return AU4(max(ASU4(x),max(ASU4(y),ASU4(z))));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMax3U1(AU1 x,AU1 y,AU1 z){return max(x,max(y,z));}
+ AU2 AMax3U2(AU2 x,AU2 y,AU2 z){return max(x,max(y,z));}
+ AU3 AMax3U3(AU3 x,AU3 y,AU3 z){return max(x,max(y,z));}
+ AU4 AMax3U4(AU4 x,AU4 y,AU4 z){return max(x,max(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMaxSU1(AU1 a,AU1 b){return AU1(max(ASU1(a),ASU1(b)));}
+ AU2 AMaxSU2(AU2 a,AU2 b){return AU2(max(ASU2(a),ASU2(b)));}
+ AU3 AMaxSU3(AU3 a,AU3 b){return AU3(max(ASU3(a),ASU3(b)));}
+ AU4 AMaxSU4(AU4 a,AU4 b){return AU4(max(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Clamp has an easier pattern match for med3 when some ordering is known.
+ // V_MED3_F32.
+ AF1 AMed3F1(AF1 x,AF1 y,AF1 z){return max(min(x,y),min(max(x,y),z));}
+ AF2 AMed3F2(AF2 x,AF2 y,AF2 z){return max(min(x,y),min(max(x,y),z));}
+ AF3 AMed3F3(AF3 x,AF3 y,AF3 z){return max(min(x,y),min(max(x,y),z));}
+ AF4 AMed3F4(AF4 x,AF4 y,AF4 z){return max(min(x,y),min(max(x,y),z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // V_MIN3_F32.
+ AF1 AMin3F1(AF1 x,AF1 y,AF1 z){return min(x,min(y,z));}
+ AF2 AMin3F2(AF2 x,AF2 y,AF2 z){return min(x,min(y,z));}
+ AF3 AMin3F3(AF3 x,AF3 y,AF3 z){return min(x,min(y,z));}
+ AF4 AMin3F4(AF4 x,AF4 y,AF4 z){return min(x,min(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMin3SU1(AU1 x,AU1 y,AU1 z){return AU1(min(ASU1(x),min(ASU1(y),ASU1(z))));}
+ AU2 AMin3SU2(AU2 x,AU2 y,AU2 z){return AU2(min(ASU2(x),min(ASU2(y),ASU2(z))));}
+ AU3 AMin3SU3(AU3 x,AU3 y,AU3 z){return AU3(min(ASU3(x),min(ASU3(y),ASU3(z))));}
+ AU4 AMin3SU4(AU4 x,AU4 y,AU4 z){return AU4(min(ASU4(x),min(ASU4(y),ASU4(z))));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMin3U1(AU1 x,AU1 y,AU1 z){return min(x,min(y,z));}
+ AU2 AMin3U2(AU2 x,AU2 y,AU2 z){return min(x,min(y,z));}
+ AU3 AMin3U3(AU3 x,AU3 y,AU3 z){return min(x,min(y,z));}
+ AU4 AMin3U4(AU4 x,AU4 y,AU4 z){return min(x,min(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMinSU1(AU1 a,AU1 b){return AU1(min(ASU1(a),ASU1(b)));}
+ AU2 AMinSU2(AU2 a,AU2 b){return AU2(min(ASU2(a),ASU2(b)));}
+ AU3 AMinSU3(AU3 a,AU3 b){return AU3(min(ASU3(a),ASU3(b)));}
+ AU4 AMinSU4(AU4 a,AU4 b){return AU4(min(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Normalized trig. Valid input domain is {-256 to +256}. No GLSL compiler intrinsic exists to map to this currently.
+ // V_COS_F32.
+ AF1 ANCosF1(AF1 x){return cos(x*AF1_(A_2PI));}
+ AF2 ANCosF2(AF2 x){return cos(x*AF2_(A_2PI));}
+ AF3 ANCosF3(AF3 x){return cos(x*AF3_(A_2PI));}
+ AF4 ANCosF4(AF4 x){return cos(x*AF4_(A_2PI));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Normalized trig. Valid input domain is {-256 to +256}. No GLSL compiler intrinsic exists to map to this currently.
+ // V_SIN_F32.
+ AF1 ANSinF1(AF1 x){return sin(x*AF1_(A_2PI));}
+ AF2 ANSinF2(AF2 x){return sin(x*AF2_(A_2PI));}
+ AF3 ANSinF3(AF3 x){return sin(x*AF3_(A_2PI));}
+ AF4 ANSinF4(AF4 x){return sin(x*AF4_(A_2PI));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ARcpF1(AF1 x){return AF1_(1.0)/x;}
+ AF2 ARcpF2(AF2 x){return AF2_(1.0)/x;}
+ AF3 ARcpF3(AF3 x){return AF3_(1.0)/x;}
+ AF4 ARcpF4(AF4 x){return AF4_(1.0)/x;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ARsqF1(AF1 x){return AF1_(1.0)/sqrt(x);}
+ AF2 ARsqF2(AF2 x){return AF2_(1.0)/sqrt(x);}
+ AF3 ARsqF3(AF3 x){return AF3_(1.0)/sqrt(x);}
+ AF4 ARsqF4(AF4 x){return AF4_(1.0)/sqrt(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ASatF1(AF1 x){return clamp(x,AF1_(0.0),AF1_(1.0));}
+ AF2 ASatF2(AF2 x){return clamp(x,AF2_(0.0),AF2_(1.0));}
+ AF3 ASatF3(AF3 x){return clamp(x,AF3_(0.0),AF3_(1.0));}
+ AF4 ASatF4(AF4 x){return clamp(x,AF4_(0.0),AF4_(1.0));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AShrSU1(AU1 a,AU1 b){return AU1(ASU1(a)>>ASU1(b));}
+ AU2 AShrSU2(AU2 a,AU2 b){return AU2(ASU2(a)>>ASU2(b));}
+ AU3 AShrSU3(AU3 a,AU3 b){return AU3(ASU3(a)>>ASU3(b));}
+ AU4 AShrSU4(AU4 a,AU4 b){return AU4(ASU4(a)>>ASU4(b));}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// GLSL BYTE
+//==============================================================================================================================
+ #ifdef A_BYTE
+ #define AB1 uint8_t
+ #define AB2 u8vec2
+ #define AB3 u8vec3
+ #define AB4 u8vec4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASB1 int8_t
+ #define ASB2 i8vec2
+ #define ASB3 i8vec3
+ #define ASB4 i8vec4
+//------------------------------------------------------------------------------------------------------------------------------
+ AB1 AB1_x(AB1 a){return AB1(a);}
+ AB2 AB2_x(AB1 a){return AB2(a,a);}
+ AB3 AB3_x(AB1 a){return AB3(a,a,a);}
+ AB4 AB4_x(AB1 a){return AB4(a,a,a,a);}
+ #define AB1_(a) AB1_x(AB1(a))
+ #define AB2_(a) AB2_x(AB1(a))
+ #define AB3_(a) AB3_x(AB1(a))
+ #define AB4_(a) AB4_x(AB1(a))
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// GLSL HALF
+//==============================================================================================================================
+ #ifdef A_HALF
+ #define AH1 float16_t
+ #define AH2 f16vec2
+ #define AH3 f16vec3
+ #define AH4 f16vec4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AW1 uint16_t
+ #define AW2 u16vec2
+ #define AW3 u16vec3
+ #define AW4 u16vec4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASW1 int16_t
+ #define ASW2 i16vec2
+ #define ASW3 i16vec3
+ #define ASW4 i16vec4
+//==============================================================================================================================
+ #define AH2_AU1(x) unpackFloat2x16(AU1(x))
+ AH4 AH4_AU2_x(AU2 x){return AH4(unpackFloat2x16(x.x),unpackFloat2x16(x.y));}
+ #define AH4_AU2(x) AH4_AU2_x(AU2(x))
+ #define AW2_AU1(x) unpackUint2x16(AU1(x))
+ #define AW4_AU2(x) unpackUint4x16(pack64(AU2(x)))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AU1_AH2(x) packFloat2x16(AH2(x))
+ AU2 AU2_AH4_x(AH4 x){return AU2(packFloat2x16(x.xy),packFloat2x16(x.zw));}
+ #define AU2_AH4(x) AU2_AH4_x(AH4(x))
+ #define AU1_AW2(x) packUint2x16(AW2(x))
+ #define AU2_AW4(x) unpack32(packUint4x16(AW4(x)))
+//==============================================================================================================================
+ #define AW1_AH1(x) halfBitsToUint16(AH1(x))
+ #define AW2_AH2(x) halfBitsToUint16(AH2(x))
+ #define AW3_AH3(x) halfBitsToUint16(AH3(x))
+ #define AW4_AH4(x) halfBitsToUint16(AH4(x))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AH1_AW1(x) uint16BitsToHalf(AW1(x))
+ #define AH2_AW2(x) uint16BitsToHalf(AW2(x))
+ #define AH3_AW3(x) uint16BitsToHalf(AW3(x))
+ #define AH4_AW4(x) uint16BitsToHalf(AW4(x))
+//==============================================================================================================================
+ AH1 AH1_x(AH1 a){return AH1(a);}
+ AH2 AH2_x(AH1 a){return AH2(a,a);}
+ AH3 AH3_x(AH1 a){return AH3(a,a,a);}
+ AH4 AH4_x(AH1 a){return AH4(a,a,a,a);}
+ #define AH1_(a) AH1_x(AH1(a))
+ #define AH2_(a) AH2_x(AH1(a))
+ #define AH3_(a) AH3_x(AH1(a))
+ #define AH4_(a) AH4_x(AH1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AW1_x(AW1 a){return AW1(a);}
+ AW2 AW2_x(AW1 a){return AW2(a,a);}
+ AW3 AW3_x(AW1 a){return AW3(a,a,a);}
+ AW4 AW4_x(AW1 a){return AW4(a,a,a,a);}
+ #define AW1_(a) AW1_x(AW1(a))
+ #define AW2_(a) AW2_x(AW1(a))
+ #define AW3_(a) AW3_x(AW1(a))
+ #define AW4_(a) AW4_x(AW1(a))
+//==============================================================================================================================
+ AW1 AAbsSW1(AW1 a){return AW1(abs(ASW1(a)));}
+ AW2 AAbsSW2(AW2 a){return AW2(abs(ASW2(a)));}
+ AW3 AAbsSW3(AW3 a){return AW3(abs(ASW3(a)));}
+ AW4 AAbsSW4(AW4 a){return AW4(abs(ASW4(a)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AClampH1(AH1 x,AH1 n,AH1 m){return clamp(x,n,m);}
+ AH2 AClampH2(AH2 x,AH2 n,AH2 m){return clamp(x,n,m);}
+ AH3 AClampH3(AH3 x,AH3 n,AH3 m){return clamp(x,n,m);}
+ AH4 AClampH4(AH4 x,AH4 n,AH4 m){return clamp(x,n,m);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AFractH1(AH1 x){return fract(x);}
+ AH2 AFractH2(AH2 x){return fract(x);}
+ AH3 AFractH3(AH3 x){return fract(x);}
+ AH4 AFractH4(AH4 x){return fract(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ALerpH1(AH1 x,AH1 y,AH1 a){return mix(x,y,a);}
+ AH2 ALerpH2(AH2 x,AH2 y,AH2 a){return mix(x,y,a);}
+ AH3 ALerpH3(AH3 x,AH3 y,AH3 a){return mix(x,y,a);}
+ AH4 ALerpH4(AH4 x,AH4 y,AH4 a){return mix(x,y,a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ // No packed version of max3.
+ AH1 AMax3H1(AH1 x,AH1 y,AH1 z){return max(x,max(y,z));}
+ AH2 AMax3H2(AH2 x,AH2 y,AH2 z){return max(x,max(y,z));}
+ AH3 AMax3H3(AH3 x,AH3 y,AH3 z){return max(x,max(y,z));}
+ AH4 AMax3H4(AH4 x,AH4 y,AH4 z){return max(x,max(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AMaxSW1(AW1 a,AW1 b){return AW1(max(ASU1(a),ASU1(b)));}
+ AW2 AMaxSW2(AW2 a,AW2 b){return AW2(max(ASU2(a),ASU2(b)));}
+ AW3 AMaxSW3(AW3 a,AW3 b){return AW3(max(ASU3(a),ASU3(b)));}
+ AW4 AMaxSW4(AW4 a,AW4 b){return AW4(max(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // No packed version of min3.
+ AH1 AMin3H1(AH1 x,AH1 y,AH1 z){return min(x,min(y,z));}
+ AH2 AMin3H2(AH2 x,AH2 y,AH2 z){return min(x,min(y,z));}
+ AH3 AMin3H3(AH3 x,AH3 y,AH3 z){return min(x,min(y,z));}
+ AH4 AMin3H4(AH4 x,AH4 y,AH4 z){return min(x,min(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AMinSW1(AW1 a,AW1 b){return AW1(min(ASU1(a),ASU1(b)));}
+ AW2 AMinSW2(AW2 a,AW2 b){return AW2(min(ASU2(a),ASU2(b)));}
+ AW3 AMinSW3(AW3 a,AW3 b){return AW3(min(ASU3(a),ASU3(b)));}
+ AW4 AMinSW4(AW4 a,AW4 b){return AW4(min(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ARcpH1(AH1 x){return AH1_(1.0)/x;}
+ AH2 ARcpH2(AH2 x){return AH2_(1.0)/x;}
+ AH3 ARcpH3(AH3 x){return AH3_(1.0)/x;}
+ AH4 ARcpH4(AH4 x){return AH4_(1.0)/x;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ARsqH1(AH1 x){return AH1_(1.0)/sqrt(x);}
+ AH2 ARsqH2(AH2 x){return AH2_(1.0)/sqrt(x);}
+ AH3 ARsqH3(AH3 x){return AH3_(1.0)/sqrt(x);}
+ AH4 ARsqH4(AH4 x){return AH4_(1.0)/sqrt(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ASatH1(AH1 x){return clamp(x,AH1_(0.0),AH1_(1.0));}
+ AH2 ASatH2(AH2 x){return clamp(x,AH2_(0.0),AH2_(1.0));}
+ AH3 ASatH3(AH3 x){return clamp(x,AH3_(0.0),AH3_(1.0));}
+ AH4 ASatH4(AH4 x){return clamp(x,AH4_(0.0),AH4_(1.0));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AShrSW1(AW1 a,AW1 b){return AW1(ASW1(a)>>ASW1(b));}
+ AW2 AShrSW2(AW2 a,AW2 b){return AW2(ASW2(a)>>ASW2(b));}
+ AW3 AShrSW3(AW3 a,AW3 b){return AW3(ASW3(a)>>ASW3(b));}
+ AW4 AShrSW4(AW4 a,AW4 b){return AW4(ASW4(a)>>ASW4(b));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// GLSL DOUBLE
+//==============================================================================================================================
+ #ifdef A_DUBL
+ #define AD1 double
+ #define AD2 dvec2
+ #define AD3 dvec3
+ #define AD4 dvec4
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 AD1_x(AD1 a){return AD1(a);}
+ AD2 AD2_x(AD1 a){return AD2(a,a);}
+ AD3 AD3_x(AD1 a){return AD3(a,a,a);}
+ AD4 AD4_x(AD1 a){return AD4(a,a,a,a);}
+ #define AD1_(a) AD1_x(AD1(a))
+ #define AD2_(a) AD2_x(AD1(a))
+ #define AD3_(a) AD3_x(AD1(a))
+ #define AD4_(a) AD4_x(AD1(a))
+//==============================================================================================================================
+ AD1 AFractD1(AD1 x){return fract(x);}
+ AD2 AFractD2(AD2 x){return fract(x);}
+ AD3 AFractD3(AD3 x){return fract(x);}
+ AD4 AFractD4(AD4 x){return fract(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 ALerpD1(AD1 x,AD1 y,AD1 a){return mix(x,y,a);}
+ AD2 ALerpD2(AD2 x,AD2 y,AD2 a){return mix(x,y,a);}
+ AD3 ALerpD3(AD3 x,AD3 y,AD3 a){return mix(x,y,a);}
+ AD4 ALerpD4(AD4 x,AD4 y,AD4 a){return mix(x,y,a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 ARcpD1(AD1 x){return AD1_(1.0)/x;}
+ AD2 ARcpD2(AD2 x){return AD2_(1.0)/x;}
+ AD3 ARcpD3(AD3 x){return AD3_(1.0)/x;}
+ AD4 ARcpD4(AD4 x){return AD4_(1.0)/x;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 ARsqD1(AD1 x){return AD1_(1.0)/sqrt(x);}
+ AD2 ARsqD2(AD2 x){return AD2_(1.0)/sqrt(x);}
+ AD3 ARsqD3(AD3 x){return AD3_(1.0)/sqrt(x);}
+ AD4 ARsqD4(AD4 x){return AD4_(1.0)/sqrt(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 ASatD1(AD1 x){return clamp(x,AD1_(0.0),AD1_(1.0));}
+ AD2 ASatD2(AD2 x){return clamp(x,AD2_(0.0),AD2_(1.0));}
+ AD3 ASatD3(AD3 x){return clamp(x,AD3_(0.0),AD3_(1.0));}
+ AD4 ASatD4(AD4 x){return clamp(x,AD4_(0.0),AD4_(1.0));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// GLSL LONG
+//==============================================================================================================================
+ #ifdef A_LONG
+ #define AL1 uint64_t
+ #define AL2 u64vec2
+ #define AL3 u64vec3
+ #define AL4 u64vec4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASL1 int64_t
+ #define ASL2 i64vec2
+ #define ASL3 i64vec3
+ #define ASL4 i64vec4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AL1_AU2(x) packUint2x32(AU2(x))
+ #define AU2_AL1(x) unpackUint2x32(AL1(x))
+//------------------------------------------------------------------------------------------------------------------------------
+ AL1 AL1_x(AL1 a){return AL1(a);}
+ AL2 AL2_x(AL1 a){return AL2(a,a);}
+ AL3 AL3_x(AL1 a){return AL3(a,a,a);}
+ AL4 AL4_x(AL1 a){return AL4(a,a,a,a);}
+ #define AL1_(a) AL1_x(AL1(a))
+ #define AL2_(a) AL2_x(AL1(a))
+ #define AL3_(a) AL3_x(AL1(a))
+ #define AL4_(a) AL4_x(AL1(a))
+//==============================================================================================================================
+ AL1 AAbsSL1(AL1 a){return AL1(abs(ASL1(a)));}
+ AL2 AAbsSL2(AL2 a){return AL2(abs(ASL2(a)));}
+ AL3 AAbsSL3(AL3 a){return AL3(abs(ASL3(a)));}
+ AL4 AAbsSL4(AL4 a){return AL4(abs(ASL4(a)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AL1 AMaxSL1(AL1 a,AL1 b){return AL1(max(ASU1(a),ASU1(b)));}
+ AL2 AMaxSL2(AL2 a,AL2 b){return AL2(max(ASU2(a),ASU2(b)));}
+ AL3 AMaxSL3(AL3 a,AL3 b){return AL3(max(ASU3(a),ASU3(b)));}
+ AL4 AMaxSL4(AL4 a,AL4 b){return AL4(max(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AL1 AMinSL1(AL1 a,AL1 b){return AL1(min(ASU1(a),ASU1(b)));}
+ AL2 AMinSL2(AL2 a,AL2 b){return AL2(min(ASU2(a),ASU2(b)));}
+ AL3 AMinSL3(AL3 a,AL3 b){return AL3(min(ASU3(a),ASU3(b)));}
+ AL4 AMinSL4(AL4 a,AL4 b){return AL4(min(ASU4(a),ASU4(b)));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// WAVE OPERATIONS
+//==============================================================================================================================
+ #ifdef A_WAVE
+ // Where 'x' must be a compile time literal.
+ AF1 AWaveXorF1(AF1 v,AU1 x){return subgroupShuffleXor(v,x);}
+ AF2 AWaveXorF2(AF2 v,AU1 x){return subgroupShuffleXor(v,x);}
+ AF3 AWaveXorF3(AF3 v,AU1 x){return subgroupShuffleXor(v,x);}
+ AF4 AWaveXorF4(AF4 v,AU1 x){return subgroupShuffleXor(v,x);}
+ AU1 AWaveXorU1(AU1 v,AU1 x){return subgroupShuffleXor(v,x);}
+ AU2 AWaveXorU2(AU2 v,AU1 x){return subgroupShuffleXor(v,x);}
+ AU3 AWaveXorU3(AU3 v,AU1 x){return subgroupShuffleXor(v,x);}
+ AU4 AWaveXorU4(AU4 v,AU1 x){return subgroupShuffleXor(v,x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_HALF
+ AH2 AWaveXorH2(AH2 v,AU1 x){return AH2_AU1(subgroupShuffleXor(AU1_AH2(v),x));}
+ AH4 AWaveXorH4(AH4 v,AU1 x){return AH4_AU2(subgroupShuffleXor(AU2_AH4(v),x));}
+ AW2 AWaveXorW2(AW2 v,AU1 x){return AW2_AU1(subgroupShuffleXor(AU1_AW2(v),x));}
+ AW4 AWaveXorW4(AW4 v,AU1 x){return AW4_AU2(subgroupShuffleXor(AU2_AW4(v),x));}
+ #endif
+ #endif
+//==============================================================================================================================
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+//
+// HLSL
+//
+//
+//==============================================================================================================================
+#if defined(A_HLSL) && defined(A_GPU)
+ #ifdef A_HLSL_6_2
+ #define AP1 bool
+ #define AP2 bool2
+ #define AP3 bool3
+ #define AP4 bool4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AF1 float32_t
+ #define AF2 float32_t2
+ #define AF3 float32_t3
+ #define AF4 float32_t4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AU1 uint32_t
+ #define AU2 uint32_t2
+ #define AU3 uint32_t3
+ #define AU4 uint32_t4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASU1 int32_t
+ #define ASU2 int32_t2
+ #define ASU3 int32_t3
+ #define ASU4 int32_t4
+ #else
+ #define AP1 bool
+ #define AP2 bool2
+ #define AP3 bool3
+ #define AP4 bool4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AF1 float
+ #define AF2 float2
+ #define AF3 float3
+ #define AF4 float4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AU1 uint
+ #define AU2 uint2
+ #define AU3 uint3
+ #define AU4 uint4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASU1 int
+ #define ASU2 int2
+ #define ASU3 int3
+ #define ASU4 int4
+ #endif
+//==============================================================================================================================
+ #define AF1_AU1(x) asfloat(AU1(x))
+ #define AF2_AU2(x) asfloat(AU2(x))
+ #define AF3_AU3(x) asfloat(AU3(x))
+ #define AF4_AU4(x) asfloat(AU4(x))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AU1_AF1(x) asuint(AF1(x))
+ #define AU2_AF2(x) asuint(AF2(x))
+ #define AU3_AF3(x) asuint(AF3(x))
+ #define AU4_AF4(x) asuint(AF4(x))
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AU1_AH1_AF1_x(AF1 a){return f32tof16(a);}
+ #define AU1_AH1_AF1(a) AU1_AH1_AF1_x(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AU1_AH2_AF2_x(AF2 a){return f32tof16(a.x)|(f32tof16(a.y)<<16);}
+ #define AU1_AH2_AF2(a) AU1_AH2_AF2_x(AF2(a))
+ #define AU1_AB4Unorm_AF4(x) D3DCOLORtoUBYTE4(AF4(x))
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 AF2_AH2_AU1_x(AU1 x){return AF2(f16tof32(x&0xFFFF),f16tof32(x>>16));}
+ #define AF2_AH2_AU1(x) AF2_AH2_AU1_x(AU1(x))
+//==============================================================================================================================
+ AF1 AF1_x(AF1 a){return AF1(a);}
+ AF2 AF2_x(AF1 a){return AF2(a,a);}
+ AF3 AF3_x(AF1 a){return AF3(a,a,a);}
+ AF4 AF4_x(AF1 a){return AF4(a,a,a,a);}
+ #define AF1_(a) AF1_x(AF1(a))
+ #define AF2_(a) AF2_x(AF1(a))
+ #define AF3_(a) AF3_x(AF1(a))
+ #define AF4_(a) AF4_x(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AU1_x(AU1 a){return AU1(a);}
+ AU2 AU2_x(AU1 a){return AU2(a,a);}
+ AU3 AU3_x(AU1 a){return AU3(a,a,a);}
+ AU4 AU4_x(AU1 a){return AU4(a,a,a,a);}
+ #define AU1_(a) AU1_x(AU1(a))
+ #define AU2_(a) AU2_x(AU1(a))
+ #define AU3_(a) AU3_x(AU1(a))
+ #define AU4_(a) AU4_x(AU1(a))
+//==============================================================================================================================
+ AU1 AAbsSU1(AU1 a){return AU1(abs(ASU1(a)));}
+ AU2 AAbsSU2(AU2 a){return AU2(abs(ASU2(a)));}
+ AU3 AAbsSU3(AU3 a){return AU3(abs(ASU3(a)));}
+ AU4 AAbsSU4(AU4 a){return AU4(abs(ASU4(a)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 ABfe(AU1 src,AU1 off,AU1 bits){AU1 mask=(1u<<bits)-1;return (src>>off)&mask;}
+ AU1 ABfi(AU1 src,AU1 ins,AU1 mask){return (ins&mask)|(src&(~mask));}
+ AU1 ABfiM(AU1 src,AU1 ins,AU1 bits){AU1 mask=(1u<<bits)-1;return (ins&mask)|(src&(~mask));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AClampF1(AF1 x,AF1 n,AF1 m){return max(n,min(x,m));}
+ AF2 AClampF2(AF2 x,AF2 n,AF2 m){return max(n,min(x,m));}
+ AF3 AClampF3(AF3 x,AF3 n,AF3 m){return max(n,min(x,m));}
+ AF4 AClampF4(AF4 x,AF4 n,AF4 m){return max(n,min(x,m));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AFractF1(AF1 x){return x-floor(x);}
+ AF2 AFractF2(AF2 x){return x-floor(x);}
+ AF3 AFractF3(AF3 x){return x-floor(x);}
+ AF4 AFractF4(AF4 x){return x-floor(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ALerpF1(AF1 x,AF1 y,AF1 a){return lerp(x,y,a);}
+ AF2 ALerpF2(AF2 x,AF2 y,AF2 a){return lerp(x,y,a);}
+ AF3 ALerpF3(AF3 x,AF3 y,AF3 a){return lerp(x,y,a);}
+ AF4 ALerpF4(AF4 x,AF4 y,AF4 a){return lerp(x,y,a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AMax3F1(AF1 x,AF1 y,AF1 z){return max(x,max(y,z));}
+ AF2 AMax3F2(AF2 x,AF2 y,AF2 z){return max(x,max(y,z));}
+ AF3 AMax3F3(AF3 x,AF3 y,AF3 z){return max(x,max(y,z));}
+ AF4 AMax3F4(AF4 x,AF4 y,AF4 z){return max(x,max(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMax3SU1(AU1 x,AU1 y,AU1 z){return AU1(max(ASU1(x),max(ASU1(y),ASU1(z))));}
+ AU2 AMax3SU2(AU2 x,AU2 y,AU2 z){return AU2(max(ASU2(x),max(ASU2(y),ASU2(z))));}
+ AU3 AMax3SU3(AU3 x,AU3 y,AU3 z){return AU3(max(ASU3(x),max(ASU3(y),ASU3(z))));}
+ AU4 AMax3SU4(AU4 x,AU4 y,AU4 z){return AU4(max(ASU4(x),max(ASU4(y),ASU4(z))));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMax3U1(AU1 x,AU1 y,AU1 z){return max(x,max(y,z));}
+ AU2 AMax3U2(AU2 x,AU2 y,AU2 z){return max(x,max(y,z));}
+ AU3 AMax3U3(AU3 x,AU3 y,AU3 z){return max(x,max(y,z));}
+ AU4 AMax3U4(AU4 x,AU4 y,AU4 z){return max(x,max(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMaxSU1(AU1 a,AU1 b){return AU1(max(ASU1(a),ASU1(b)));}
+ AU2 AMaxSU2(AU2 a,AU2 b){return AU2(max(ASU2(a),ASU2(b)));}
+ AU3 AMaxSU3(AU3 a,AU3 b){return AU3(max(ASU3(a),ASU3(b)));}
+ AU4 AMaxSU4(AU4 a,AU4 b){return AU4(max(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AMed3F1(AF1 x,AF1 y,AF1 z){return max(min(x,y),min(max(x,y),z));}
+ AF2 AMed3F2(AF2 x,AF2 y,AF2 z){return max(min(x,y),min(max(x,y),z));}
+ AF3 AMed3F3(AF3 x,AF3 y,AF3 z){return max(min(x,y),min(max(x,y),z));}
+ AF4 AMed3F4(AF4 x,AF4 y,AF4 z){return max(min(x,y),min(max(x,y),z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AMin3F1(AF1 x,AF1 y,AF1 z){return min(x,min(y,z));}
+ AF2 AMin3F2(AF2 x,AF2 y,AF2 z){return min(x,min(y,z));}
+ AF3 AMin3F3(AF3 x,AF3 y,AF3 z){return min(x,min(y,z));}
+ AF4 AMin3F4(AF4 x,AF4 y,AF4 z){return min(x,min(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMin3SU1(AU1 x,AU1 y,AU1 z){return AU1(min(ASU1(x),min(ASU1(y),ASU1(z))));}
+ AU2 AMin3SU2(AU2 x,AU2 y,AU2 z){return AU2(min(ASU2(x),min(ASU2(y),ASU2(z))));}
+ AU3 AMin3SU3(AU3 x,AU3 y,AU3 z){return AU3(min(ASU3(x),min(ASU3(y),ASU3(z))));}
+ AU4 AMin3SU4(AU4 x,AU4 y,AU4 z){return AU4(min(ASU4(x),min(ASU4(y),ASU4(z))));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMin3U1(AU1 x,AU1 y,AU1 z){return min(x,min(y,z));}
+ AU2 AMin3U2(AU2 x,AU2 y,AU2 z){return min(x,min(y,z));}
+ AU3 AMin3U3(AU3 x,AU3 y,AU3 z){return min(x,min(y,z));}
+ AU4 AMin3U4(AU4 x,AU4 y,AU4 z){return min(x,min(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMinSU1(AU1 a,AU1 b){return AU1(min(ASU1(a),ASU1(b)));}
+ AU2 AMinSU2(AU2 a,AU2 b){return AU2(min(ASU2(a),ASU2(b)));}
+ AU3 AMinSU3(AU3 a,AU3 b){return AU3(min(ASU3(a),ASU3(b)));}
+ AU4 AMinSU4(AU4 a,AU4 b){return AU4(min(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ANCosF1(AF1 x){return cos(x*AF1_(A_2PI));}
+ AF2 ANCosF2(AF2 x){return cos(x*AF2_(A_2PI));}
+ AF3 ANCosF3(AF3 x){return cos(x*AF3_(A_2PI));}
+ AF4 ANCosF4(AF4 x){return cos(x*AF4_(A_2PI));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ANSinF1(AF1 x){return sin(x*AF1_(A_2PI));}
+ AF2 ANSinF2(AF2 x){return sin(x*AF2_(A_2PI));}
+ AF3 ANSinF3(AF3 x){return sin(x*AF3_(A_2PI));}
+ AF4 ANSinF4(AF4 x){return sin(x*AF4_(A_2PI));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ARcpF1(AF1 x){return rcp(x);}
+ AF2 ARcpF2(AF2 x){return rcp(x);}
+ AF3 ARcpF3(AF3 x){return rcp(x);}
+ AF4 ARcpF4(AF4 x){return rcp(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ARsqF1(AF1 x){return rsqrt(x);}
+ AF2 ARsqF2(AF2 x){return rsqrt(x);}
+ AF3 ARsqF3(AF3 x){return rsqrt(x);}
+ AF4 ARsqF4(AF4 x){return rsqrt(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ASatF1(AF1 x){return saturate(x);}
+ AF2 ASatF2(AF2 x){return saturate(x);}
+ AF3 ASatF3(AF3 x){return saturate(x);}
+ AF4 ASatF4(AF4 x){return saturate(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AShrSU1(AU1 a,AU1 b){return AU1(ASU1(a)>>ASU1(b));}
+ AU2 AShrSU2(AU2 a,AU2 b){return AU2(ASU2(a)>>ASU2(b));}
+ AU3 AShrSU3(AU3 a,AU3 b){return AU3(ASU3(a)>>ASU3(b));}
+ AU4 AShrSU4(AU4 a,AU4 b){return AU4(ASU4(a)>>ASU4(b));}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// HLSL BYTE
+//==============================================================================================================================
+ #ifdef A_BYTE
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// HLSL HALF
+//==============================================================================================================================
+ #ifdef A_HALF
+ #ifdef A_HLSL_6_2
+ #define AH1 float16_t
+ #define AH2 float16_t2
+ #define AH3 float16_t3
+ #define AH4 float16_t4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AW1 uint16_t
+ #define AW2 uint16_t2
+ #define AW3 uint16_t3
+ #define AW4 uint16_t4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASW1 int16_t
+ #define ASW2 int16_t2
+ #define ASW3 int16_t3
+ #define ASW4 int16_t4
+ #else
+ #define AH1 min16float
+ #define AH2 min16float2
+ #define AH3 min16float3
+ #define AH4 min16float4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AW1 min16uint
+ #define AW2 min16uint2
+ #define AW3 min16uint3
+ #define AW4 min16uint4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASW1 min16int
+ #define ASW2 min16int2
+ #define ASW3 min16int3
+ #define ASW4 min16int4
+ #endif
+//==============================================================================================================================
+ // Need to use manual unpack to get optimal execution (don't use packed types in buffers directly).
+ // Unpack requires this pattern: https://gpuopen.com/first-steps-implementing-fp16/
+ AH2 AH2_AU1_x(AU1 x){AF2 t=f16tof32(AU2(x&0xFFFF,x>>16));return AH2(t);}
+ AH4 AH4_AU2_x(AU2 x){return AH4(AH2_AU1_x(x.x),AH2_AU1_x(x.y));}
+ AW2 AW2_AU1_x(AU1 x){AU2 t=AU2(x&0xFFFF,x>>16);return AW2(t);}
+ AW4 AW4_AU2_x(AU2 x){return AW4(AW2_AU1_x(x.x),AW2_AU1_x(x.y));}
+ #define AH2_AU1(x) AH2_AU1_x(AU1(x))
+ #define AH4_AU2(x) AH4_AU2_x(AU2(x))
+ #define AW2_AU1(x) AW2_AU1_x(AU1(x))
+ #define AW4_AU2(x) AW4_AU2_x(AU2(x))
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AU1_AH2_x(AH2 x){return f32tof16(x.x)+(f32tof16(x.y)<<16);}
+ AU2 AU2_AH4_x(AH4 x){return AU2(AU1_AH2_x(x.xy),AU1_AH2_x(x.zw));}
+ AU1 AU1_AW2_x(AW2 x){return AU1(x.x)+(AU1(x.y)<<16);}
+ AU2 AU2_AW4_x(AW4 x){return AU2(AU1_AW2_x(x.xy),AU1_AW2_x(x.zw));}
+ #define AU1_AH2(x) AU1_AH2_x(AH2(x))
+ #define AU2_AH4(x) AU2_AH4_x(AH4(x))
+ #define AU1_AW2(x) AU1_AW2_x(AW2(x))
+ #define AU2_AW4(x) AU2_AW4_x(AW4(x))
+//==============================================================================================================================
+ #if defined(A_HLSL_6_2) && !defined(A_NO_16_BIT_CAST)
+ #define AW1_AH1(x) asuint16(x)
+ #define AW2_AH2(x) asuint16(x)
+ #define AW3_AH3(x) asuint16(x)
+ #define AW4_AH4(x) asuint16(x)
+ #else
+ #define AW1_AH1(a) AW1(f32tof16(AF1(a)))
+ #define AW2_AH2(a) AW2(AW1_AH1((a).x),AW1_AH1((a).y))
+ #define AW3_AH3(a) AW3(AW1_AH1((a).x),AW1_AH1((a).y),AW1_AH1((a).z))
+ #define AW4_AH4(a) AW4(AW1_AH1((a).x),AW1_AH1((a).y),AW1_AH1((a).z),AW1_AH1((a).w))
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ #if defined(A_HLSL_6_2) && !defined(A_NO_16_BIT_CAST)
+ #define AH1_AW1(x) asfloat16(x)
+ #define AH2_AW2(x) asfloat16(x)
+ #define AH3_AW3(x) asfloat16(x)
+ #define AH4_AW4(x) asfloat16(x)
+ #else
+ #define AH1_AW1(a) AH1(f16tof32(AU1(a)))
+ #define AH2_AW2(a) AH2(AH1_AW1((a).x),AH1_AW1((a).y))
+ #define AH3_AW3(a) AH3(AH1_AW1((a).x),AH1_AW1((a).y),AH1_AW1((a).z))
+ #define AH4_AW4(a) AH4(AH1_AW1((a).x),AH1_AW1((a).y),AH1_AW1((a).z),AH1_AW1((a).w))
+ #endif
+//==============================================================================================================================
+ AH1 AH1_x(AH1 a){return AH1(a);}
+ AH2 AH2_x(AH1 a){return AH2(a,a);}
+ AH3 AH3_x(AH1 a){return AH3(a,a,a);}
+ AH4 AH4_x(AH1 a){return AH4(a,a,a,a);}
+ #define AH1_(a) AH1_x(AH1(a))
+ #define AH2_(a) AH2_x(AH1(a))
+ #define AH3_(a) AH3_x(AH1(a))
+ #define AH4_(a) AH4_x(AH1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AW1_x(AW1 a){return AW1(a);}
+ AW2 AW2_x(AW1 a){return AW2(a,a);}
+ AW3 AW3_x(AW1 a){return AW3(a,a,a);}
+ AW4 AW4_x(AW1 a){return AW4(a,a,a,a);}
+ #define AW1_(a) AW1_x(AW1(a))
+ #define AW2_(a) AW2_x(AW1(a))
+ #define AW3_(a) AW3_x(AW1(a))
+ #define AW4_(a) AW4_x(AW1(a))
+//==============================================================================================================================
+ AW1 AAbsSW1(AW1 a){return AW1(abs(ASW1(a)));}
+ AW2 AAbsSW2(AW2 a){return AW2(abs(ASW2(a)));}
+ AW3 AAbsSW3(AW3 a){return AW3(abs(ASW3(a)));}
+ AW4 AAbsSW4(AW4 a){return AW4(abs(ASW4(a)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AClampH1(AH1 x,AH1 n,AH1 m){return max(n,min(x,m));}
+ AH2 AClampH2(AH2 x,AH2 n,AH2 m){return max(n,min(x,m));}
+ AH3 AClampH3(AH3 x,AH3 n,AH3 m){return max(n,min(x,m));}
+ AH4 AClampH4(AH4 x,AH4 n,AH4 m){return max(n,min(x,m));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // V_FRACT_F16 (note DX frac() is different).
+ AH1 AFractH1(AH1 x){return x-floor(x);}
+ AH2 AFractH2(AH2 x){return x-floor(x);}
+ AH3 AFractH3(AH3 x){return x-floor(x);}
+ AH4 AFractH4(AH4 x){return x-floor(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ALerpH1(AH1 x,AH1 y,AH1 a){return lerp(x,y,a);}
+ AH2 ALerpH2(AH2 x,AH2 y,AH2 a){return lerp(x,y,a);}
+ AH3 ALerpH3(AH3 x,AH3 y,AH3 a){return lerp(x,y,a);}
+ AH4 ALerpH4(AH4 x,AH4 y,AH4 a){return lerp(x,y,a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AMax3H1(AH1 x,AH1 y,AH1 z){return max(x,max(y,z));}
+ AH2 AMax3H2(AH2 x,AH2 y,AH2 z){return max(x,max(y,z));}
+ AH3 AMax3H3(AH3 x,AH3 y,AH3 z){return max(x,max(y,z));}
+ AH4 AMax3H4(AH4 x,AH4 y,AH4 z){return max(x,max(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AMaxSW1(AW1 a,AW1 b){return AW1(max(ASU1(a),ASU1(b)));}
+ AW2 AMaxSW2(AW2 a,AW2 b){return AW2(max(ASU2(a),ASU2(b)));}
+ AW3 AMaxSW3(AW3 a,AW3 b){return AW3(max(ASU3(a),ASU3(b)));}
+ AW4 AMaxSW4(AW4 a,AW4 b){return AW4(max(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AMin3H1(AH1 x,AH1 y,AH1 z){return min(x,min(y,z));}
+ AH2 AMin3H2(AH2 x,AH2 y,AH2 z){return min(x,min(y,z));}
+ AH3 AMin3H3(AH3 x,AH3 y,AH3 z){return min(x,min(y,z));}
+ AH4 AMin3H4(AH4 x,AH4 y,AH4 z){return min(x,min(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AMinSW1(AW1 a,AW1 b){return AW1(min(ASU1(a),ASU1(b)));}
+ AW2 AMinSW2(AW2 a,AW2 b){return AW2(min(ASU2(a),ASU2(b)));}
+ AW3 AMinSW3(AW3 a,AW3 b){return AW3(min(ASU3(a),ASU3(b)));}
+ AW4 AMinSW4(AW4 a,AW4 b){return AW4(min(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ARcpH1(AH1 x){return rcp(x);}
+ AH2 ARcpH2(AH2 x){return rcp(x);}
+ AH3 ARcpH3(AH3 x){return rcp(x);}
+ AH4 ARcpH4(AH4 x){return rcp(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ARsqH1(AH1 x){return rsqrt(x);}
+ AH2 ARsqH2(AH2 x){return rsqrt(x);}
+ AH3 ARsqH3(AH3 x){return rsqrt(x);}
+ AH4 ARsqH4(AH4 x){return rsqrt(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ASatH1(AH1 x){return saturate(x);}
+ AH2 ASatH2(AH2 x){return saturate(x);}
+ AH3 ASatH3(AH3 x){return saturate(x);}
+ AH4 ASatH4(AH4 x){return saturate(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AShrSW1(AW1 a,AW1 b){return AW1(ASW1(a)>>ASW1(b));}
+ AW2 AShrSW2(AW2 a,AW2 b){return AW2(ASW2(a)>>ASW2(b));}
+ AW3 AShrSW3(AW3 a,AW3 b){return AW3(ASW3(a)>>ASW3(b));}
+ AW4 AShrSW4(AW4 a,AW4 b){return AW4(ASW4(a)>>ASW4(b));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// HLSL DOUBLE
+//==============================================================================================================================
+ #ifdef A_DUBL
+ #ifdef A_HLSL_6_2
+ #define AD1 float64_t
+ #define AD2 float64_t2
+ #define AD3 float64_t3
+ #define AD4 float64_t4
+ #else
+ #define AD1 double
+ #define AD2 double2
+ #define AD3 double3
+ #define AD4 double4
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 AD1_x(AD1 a){return AD1(a);}
+ AD2 AD2_x(AD1 a){return AD2(a,a);}
+ AD3 AD3_x(AD1 a){return AD3(a,a,a);}
+ AD4 AD4_x(AD1 a){return AD4(a,a,a,a);}
+ #define AD1_(a) AD1_x(AD1(a))
+ #define AD2_(a) AD2_x(AD1(a))
+ #define AD3_(a) AD3_x(AD1(a))
+ #define AD4_(a) AD4_x(AD1(a))
+//==============================================================================================================================
+ AD1 AFractD1(AD1 a){return a-floor(a);}
+ AD2 AFractD2(AD2 a){return a-floor(a);}
+ AD3 AFractD3(AD3 a){return a-floor(a);}
+ AD4 AFractD4(AD4 a){return a-floor(a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 ALerpD1(AD1 x,AD1 y,AD1 a){return lerp(x,y,a);}
+ AD2 ALerpD2(AD2 x,AD2 y,AD2 a){return lerp(x,y,a);}
+ AD3 ALerpD3(AD3 x,AD3 y,AD3 a){return lerp(x,y,a);}
+ AD4 ALerpD4(AD4 x,AD4 y,AD4 a){return lerp(x,y,a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 ARcpD1(AD1 x){return rcp(x);}
+ AD2 ARcpD2(AD2 x){return rcp(x);}
+ AD3 ARcpD3(AD3 x){return rcp(x);}
+ AD4 ARcpD4(AD4 x){return rcp(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 ARsqD1(AD1 x){return rsqrt(x);}
+ AD2 ARsqD2(AD2 x){return rsqrt(x);}
+ AD3 ARsqD3(AD3 x){return rsqrt(x);}
+ AD4 ARsqD4(AD4 x){return rsqrt(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 ASatD1(AD1 x){return saturate(x);}
+ AD2 ASatD2(AD2 x){return saturate(x);}
+ AD3 ASatD3(AD3 x){return saturate(x);}
+ AD4 ASatD4(AD4 x){return saturate(x);}
+ #endif
+//==============================================================================================================================
+// HLSL WAVE
+//==============================================================================================================================
+ #ifdef A_WAVE
+ // Where 'x' must be a compile time literal.
+ AF1 AWaveXorF1(AF1 v,AU1 x){return WaveReadLaneAt(v,WaveGetLaneIndex()^x);}
+ AF2 AWaveXorF2(AF2 v,AU1 x){return WaveReadLaneAt(v,WaveGetLaneIndex()^x);}
+ AF3 AWaveXorF3(AF3 v,AU1 x){return WaveReadLaneAt(v,WaveGetLaneIndex()^x);}
+ AF4 AWaveXorF4(AF4 v,AU1 x){return WaveReadLaneAt(v,WaveGetLaneIndex()^x);}
+ AU1 AWaveXorU1(AU1 v,AU1 x){return WaveReadLaneAt(v,WaveGetLaneIndex()^x);}
+ AU2 AWaveXorU1(AU2 v,AU1 x){return WaveReadLaneAt(v,WaveGetLaneIndex()^x);}
+ AU3 AWaveXorU1(AU3 v,AU1 x){return WaveReadLaneAt(v,WaveGetLaneIndex()^x);}
+ AU4 AWaveXorU1(AU4 v,AU1 x){return WaveReadLaneAt(v,WaveGetLaneIndex()^x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_HALF
+ AH2 AWaveXorH2(AH2 v,AU1 x){return AH2_AU1(WaveReadLaneAt(AU1_AH2(v),WaveGetLaneIndex()^x));}
+ AH4 AWaveXorH4(AH4 v,AU1 x){return AH4_AU2(WaveReadLaneAt(AU2_AH4(v),WaveGetLaneIndex()^x));}
+ AW2 AWaveXorW2(AW2 v,AU1 x){return AW2_AU1(WaveReadLaneAt(AU1_AW2(v),WaveGetLaneIndex()^x));}
+ AW4 AWaveXorW4(AW4 v,AU1 x){return AW4_AU1(WaveReadLaneAt(AU1_AW4(v),WaveGetLaneIndex()^x));}
+ #endif
+ #endif
+//==============================================================================================================================
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+//
+// GPU COMMON
+//
+//
+//==============================================================================================================================
+#ifdef A_GPU
+ // Negative and positive infinity.
+ #define A_INFP_F AF1_AU1(0x7f800000u)
+ #define A_INFN_F AF1_AU1(0xff800000u)
+//------------------------------------------------------------------------------------------------------------------------------
+ // Copy sign from 's' to positive 'd'.
+ AF1 ACpySgnF1(AF1 d,AF1 s){return AF1_AU1(AU1_AF1(d)|(AU1_AF1(s)&AU1_(0x80000000u)));}
+ AF2 ACpySgnF2(AF2 d,AF2 s){return AF2_AU2(AU2_AF2(d)|(AU2_AF2(s)&AU2_(0x80000000u)));}
+ AF3 ACpySgnF3(AF3 d,AF3 s){return AF3_AU3(AU3_AF3(d)|(AU3_AF3(s)&AU3_(0x80000000u)));}
+ AF4 ACpySgnF4(AF4 d,AF4 s){return AF4_AU4(AU4_AF4(d)|(AU4_AF4(s)&AU4_(0x80000000u)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Single operation to return (useful to create a mask to use in lerp for branch free logic),
+ // m=NaN := 0
+ // m>=0 := 0
+ // m<0 := 1
+ // Uses the following useful floating point logic,
+ // saturate(+a*(-INF)==-INF) := 0
+ // saturate( 0*(-INF)== NaN) := 0
+ // saturate(-a*(-INF)==+INF) := 1
+ AF1 ASignedF1(AF1 m){return ASatF1(m*AF1_(A_INFN_F));}
+ AF2 ASignedF2(AF2 m){return ASatF2(m*AF2_(A_INFN_F));}
+ AF3 ASignedF3(AF3 m){return ASatF3(m*AF3_(A_INFN_F));}
+ AF4 ASignedF4(AF4 m){return ASatF4(m*AF4_(A_INFN_F));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AGtZeroF1(AF1 m){return ASatF1(m*AF1_(A_INFP_F));}
+ AF2 AGtZeroF2(AF2 m){return ASatF2(m*AF2_(A_INFP_F));}
+ AF3 AGtZeroF3(AF3 m){return ASatF3(m*AF3_(A_INFP_F));}
+ AF4 AGtZeroF4(AF4 m){return ASatF4(m*AF4_(A_INFP_F));}
+//==============================================================================================================================
+ #ifdef A_HALF
+ #ifdef A_HLSL_6_2
+ #define A_INFP_H AH1_AW1((uint16_t)0x7c00u)
+ #define A_INFN_H AH1_AW1((uint16_t)0xfc00u)
+ #else
+ #define A_INFP_H AH1_AW1(0x7c00u)
+ #define A_INFN_H AH1_AW1(0xfc00u)
+ #endif
+
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ACpySgnH1(AH1 d,AH1 s){return AH1_AW1(AW1_AH1(d)|(AW1_AH1(s)&AW1_(0x8000u)));}
+ AH2 ACpySgnH2(AH2 d,AH2 s){return AH2_AW2(AW2_AH2(d)|(AW2_AH2(s)&AW2_(0x8000u)));}
+ AH3 ACpySgnH3(AH3 d,AH3 s){return AH3_AW3(AW3_AH3(d)|(AW3_AH3(s)&AW3_(0x8000u)));}
+ AH4 ACpySgnH4(AH4 d,AH4 s){return AH4_AW4(AW4_AH4(d)|(AW4_AH4(s)&AW4_(0x8000u)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ASignedH1(AH1 m){return ASatH1(m*AH1_(A_INFN_H));}
+ AH2 ASignedH2(AH2 m){return ASatH2(m*AH2_(A_INFN_H));}
+ AH3 ASignedH3(AH3 m){return ASatH3(m*AH3_(A_INFN_H));}
+ AH4 ASignedH4(AH4 m){return ASatH4(m*AH4_(A_INFN_H));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AGtZeroH1(AH1 m){return ASatH1(m*AH1_(A_INFP_H));}
+ AH2 AGtZeroH2(AH2 m){return ASatH2(m*AH2_(A_INFP_H));}
+ AH3 AGtZeroH3(AH3 m){return ASatH3(m*AH3_(A_INFP_H));}
+ AH4 AGtZeroH4(AH4 m){return ASatH4(m*AH4_(A_INFP_H));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// [FIS] FLOAT INTEGER SORTABLE
+//------------------------------------------------------------------------------------------------------------------------------
+// Float to integer sortable.
+// - If sign bit=0, flip the sign bit (positives).
+// - If sign bit=1, flip all bits (negatives).
+// Integer sortable to float.
+// - If sign bit=1, flip the sign bit (positives).
+// - If sign bit=0, flip all bits (negatives).
+// Has nice side effects.
+// - Larger integers are more positive values.
+// - Float zero is mapped to center of integers (so clear to integer zero is a nice default for atomic max usage).
+// Burns 3 ops for conversion {shift,or,xor}.
+//==============================================================================================================================
+ AU1 AFisToU1(AU1 x){return x^(( AShrSU1(x,AU1_(31)))|AU1_(0x80000000));}
+ AU1 AFisFromU1(AU1 x){return x^((~AShrSU1(x,AU1_(31)))|AU1_(0x80000000));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Just adjust high 16-bit value (useful when upper part of 32-bit word is a 16-bit float value).
+ AU1 AFisToHiU1(AU1 x){return x^(( AShrSU1(x,AU1_(15)))|AU1_(0x80000000));}
+ AU1 AFisFromHiU1(AU1 x){return x^((~AShrSU1(x,AU1_(15)))|AU1_(0x80000000));}
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_HALF
+ AW1 AFisToW1(AW1 x){return x^(( AShrSW1(x,AW1_(15)))|AW1_(0x8000));}
+ AW1 AFisFromW1(AW1 x){return x^((~AShrSW1(x,AW1_(15)))|AW1_(0x8000));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW2 AFisToW2(AW2 x){return x^(( AShrSW2(x,AW2_(15)))|AW2_(0x8000));}
+ AW2 AFisFromW2(AW2 x){return x^((~AShrSW2(x,AW2_(15)))|AW2_(0x8000));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// [PERM] V_PERM_B32
+//------------------------------------------------------------------------------------------------------------------------------
+// Support for V_PERM_B32 started in the 3rd generation of GCN.
+//------------------------------------------------------------------------------------------------------------------------------
+// yyyyxxxx - The 'i' input.
+// 76543210
+// ========
+// HGFEDCBA - Naming on permutation.
+//------------------------------------------------------------------------------------------------------------------------------
+// TODO
+// ====
+// - Make sure compiler optimizes this.
+//==============================================================================================================================
+ #ifdef A_HALF
+ AU1 APerm0E0A(AU2 i){return((i.x )&0xffu)|((i.y<<16)&0xff0000u);}
+ AU1 APerm0F0B(AU2 i){return((i.x>> 8)&0xffu)|((i.y<< 8)&0xff0000u);}
+ AU1 APerm0G0C(AU2 i){return((i.x>>16)&0xffu)|((i.y )&0xff0000u);}
+ AU1 APerm0H0D(AU2 i){return((i.x>>24)&0xffu)|((i.y>> 8)&0xff0000u);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 APermHGFA(AU2 i){return((i.x )&0x000000ffu)|(i.y&0xffffff00u);}
+ AU1 APermHGFC(AU2 i){return((i.x>>16)&0x000000ffu)|(i.y&0xffffff00u);}
+ AU1 APermHGAE(AU2 i){return((i.x<< 8)&0x0000ff00u)|(i.y&0xffff00ffu);}
+ AU1 APermHGCE(AU2 i){return((i.x>> 8)&0x0000ff00u)|(i.y&0xffff00ffu);}
+ AU1 APermHAFE(AU2 i){return((i.x<<16)&0x00ff0000u)|(i.y&0xff00ffffu);}
+ AU1 APermHCFE(AU2 i){return((i.x )&0x00ff0000u)|(i.y&0xff00ffffu);}
+ AU1 APermAGFE(AU2 i){return((i.x<<24)&0xff000000u)|(i.y&0x00ffffffu);}
+ AU1 APermCGFE(AU2 i){return((i.x<< 8)&0xff000000u)|(i.y&0x00ffffffu);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 APermGCEA(AU2 i){return((i.x)&0x00ff00ffu)|((i.y<<8)&0xff00ff00u);}
+ AU1 APermGECA(AU2 i){return(((i.x)&0xffu)|((i.x>>8)&0xff00u)|((i.y<<16)&0xff0000u)|((i.y<<8)&0xff000000u));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// [BUC] BYTE UNSIGNED CONVERSION
+//------------------------------------------------------------------------------------------------------------------------------
+// Designed to use the optimal conversion, enables the scaling to possibly be factored into other computation.
+// Works on a range of {0 to A_BUC_<32,16>}, for <32-bit, and 16-bit> respectively.
+//------------------------------------------------------------------------------------------------------------------------------
+// OPCODE NOTES
+// ============
+// GCN does not do UNORM or SNORM for bytes in opcodes.
+// - V_CVT_F32_UBYTE{0,1,2,3} - Unsigned byte to float.
+// - V_CVT_PKACC_U8_F32 - Float to unsigned byte (does bit-field insert into 32-bit integer).
+// V_PERM_B32 does byte packing with ability to zero fill bytes as well.
+// - Can pull out byte values from two sources, and zero fill upper 8-bits of packed hi and lo.
+//------------------------------------------------------------------------------------------------------------------------------
+// BYTE : FLOAT - ABuc{0,1,2,3}{To,From}U1() - Designed for V_CVT_F32_UBYTE* and V_CVT_PKACCUM_U8_F32 ops.
+// ==== =====
+// 0 : 0
+// 1 : 1
+// ...
+// 255 : 255
+// : 256 (just outside the encoding range)
+//------------------------------------------------------------------------------------------------------------------------------
+// BYTE : FLOAT - ABuc{0,1,2,3}{To,From}U2() - Designed for 16-bit denormal tricks and V_PERM_B32.
+// ==== =====
+// 0 : 0
+// 1 : 1/512
+// 2 : 1/256
+// ...
+// 64 : 1/8
+// 128 : 1/4
+// 255 : 255/512
+// : 1/2 (just outside the encoding range)
+//------------------------------------------------------------------------------------------------------------------------------
+// OPTIMAL IMPLEMENTATIONS ON AMD ARCHITECTURES
+// ============================================
+// r=ABuc0FromU1(i)
+// V_CVT_F32_UBYTE0 r,i
+// --------------------------------------------
+// r=ABuc0ToU1(d,i)
+// V_CVT_PKACCUM_U8_F32 r,i,0,d
+// --------------------------------------------
+// d=ABuc0FromU2(i)
+// Where 'k0' is an SGPR with 0x0E0A
+// Where 'k1' is an SGPR with {32768.0} packed into the lower 16-bits
+// V_PERM_B32 d,i.x,i.y,k0
+// V_PK_FMA_F16 d,d,k1.x,0
+// --------------------------------------------
+// r=ABuc0ToU2(d,i)
+// Where 'k0' is an SGPR with {1.0/32768.0} packed into the lower 16-bits
+// Where 'k1' is an SGPR with 0x????
+// Where 'k2' is an SGPR with 0x????
+// V_PK_FMA_F16 i,i,k0.x,0
+// V_PERM_B32 r.x,i,i,k1
+// V_PERM_B32 r.y,i,i,k2
+//==============================================================================================================================
+ // Peak range for 32-bit and 16-bit operations.
+ #define A_BUC_32 (255.0)
+ #define A_BUC_16 (255.0/512.0)
+//==============================================================================================================================
+ #if 1
+ // Designed to be one V_CVT_PKACCUM_U8_F32.
+ // The extra min is required to pattern match to V_CVT_PKACCUM_U8_F32.
+ AU1 ABuc0ToU1(AU1 d,AF1 i){return (d&0xffffff00u)|((min(AU1(i),255u) )&(0x000000ffu));}
+ AU1 ABuc1ToU1(AU1 d,AF1 i){return (d&0xffff00ffu)|((min(AU1(i),255u)<< 8)&(0x0000ff00u));}
+ AU1 ABuc2ToU1(AU1 d,AF1 i){return (d&0xff00ffffu)|((min(AU1(i),255u)<<16)&(0x00ff0000u));}
+ AU1 ABuc3ToU1(AU1 d,AF1 i){return (d&0x00ffffffu)|((min(AU1(i),255u)<<24)&(0xff000000u));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Designed to be one V_CVT_F32_UBYTE*.
+ AF1 ABuc0FromU1(AU1 i){return AF1((i )&255u);}
+ AF1 ABuc1FromU1(AU1 i){return AF1((i>> 8)&255u);}
+ AF1 ABuc2FromU1(AU1 i){return AF1((i>>16)&255u);}
+ AF1 ABuc3FromU1(AU1 i){return AF1((i>>24)&255u);}
+ #endif
+//==============================================================================================================================
+ #ifdef A_HALF
+ // Takes {x0,x1} and {y0,y1} and builds {{x0,y0},{x1,y1}}.
+ AW2 ABuc01ToW2(AH2 x,AH2 y){x*=AH2_(1.0/32768.0);y*=AH2_(1.0/32768.0);
+ return AW2_AU1(APermGCEA(AU2(AU1_AW2(AW2_AH2(x)),AU1_AW2(AW2_AH2(y)))));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Designed for 3 ops to do SOA to AOS and conversion.
+ AU2 ABuc0ToU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)));
+ return AU2(APermHGFA(AU2(d.x,b)),APermHGFC(AU2(d.y,b)));}
+ AU2 ABuc1ToU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)));
+ return AU2(APermHGAE(AU2(d.x,b)),APermHGCE(AU2(d.y,b)));}
+ AU2 ABuc2ToU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)));
+ return AU2(APermHAFE(AU2(d.x,b)),APermHCFE(AU2(d.y,b)));}
+ AU2 ABuc3ToU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)));
+ return AU2(APermAGFE(AU2(d.x,b)),APermCGFE(AU2(d.y,b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Designed for 2 ops to do both AOS to SOA, and conversion.
+ AH2 ABuc0FromU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0E0A(i)))*AH2_(32768.0);}
+ AH2 ABuc1FromU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0F0B(i)))*AH2_(32768.0);}
+ AH2 ABuc2FromU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0G0C(i)))*AH2_(32768.0);}
+ AH2 ABuc3FromU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0H0D(i)))*AH2_(32768.0);}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// [BSC] BYTE SIGNED CONVERSION
+//------------------------------------------------------------------------------------------------------------------------------
+// Similar to [BUC].
+// Works on a range of {-/+ A_BSC_<32,16>}, for <32-bit, and 16-bit> respectively.
+//------------------------------------------------------------------------------------------------------------------------------
+// ENCODING (without zero-based encoding)
+// ========
+// 0 = unused (can be used to mean something else)
+// 1 = lowest value
+// 128 = exact zero center (zero based encoding
+// 255 = highest value
+//------------------------------------------------------------------------------------------------------------------------------
+// Zero-based [Zb] flips the MSB bit of the byte (making 128 "exact zero" actually zero).
+// This is useful if there is a desire for cleared values to decode as zero.
+//------------------------------------------------------------------------------------------------------------------------------
+// BYTE : FLOAT - ABsc{0,1,2,3}{To,From}U2() - Designed for 16-bit denormal tricks and V_PERM_B32.
+// ==== =====
+// 0 : -127/512 (unused)
+// 1 : -126/512
+// 2 : -125/512
+// ...
+// 128 : 0
+// ...
+// 255 : 127/512
+// : 1/4 (just outside the encoding range)
+//==============================================================================================================================
+ // Peak range for 32-bit and 16-bit operations.
+ #define A_BSC_32 (127.0)
+ #define A_BSC_16 (127.0/512.0)
+//==============================================================================================================================
+ #if 1
+ AU1 ABsc0ToU1(AU1 d,AF1 i){return (d&0xffffff00u)|((min(AU1(i+128.0),255u) )&(0x000000ffu));}
+ AU1 ABsc1ToU1(AU1 d,AF1 i){return (d&0xffff00ffu)|((min(AU1(i+128.0),255u)<< 8)&(0x0000ff00u));}
+ AU1 ABsc2ToU1(AU1 d,AF1 i){return (d&0xff00ffffu)|((min(AU1(i+128.0),255u)<<16)&(0x00ff0000u));}
+ AU1 ABsc3ToU1(AU1 d,AF1 i){return (d&0x00ffffffu)|((min(AU1(i+128.0),255u)<<24)&(0xff000000u));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 ABsc0ToZbU1(AU1 d,AF1 i){return ((d&0xffffff00u)|((min(AU1(trunc(i)+128.0),255u) )&(0x000000ffu)))^0x00000080u;}
+ AU1 ABsc1ToZbU1(AU1 d,AF1 i){return ((d&0xffff00ffu)|((min(AU1(trunc(i)+128.0),255u)<< 8)&(0x0000ff00u)))^0x00008000u;}
+ AU1 ABsc2ToZbU1(AU1 d,AF1 i){return ((d&0xff00ffffu)|((min(AU1(trunc(i)+128.0),255u)<<16)&(0x00ff0000u)))^0x00800000u;}
+ AU1 ABsc3ToZbU1(AU1 d,AF1 i){return ((d&0x00ffffffu)|((min(AU1(trunc(i)+128.0),255u)<<24)&(0xff000000u)))^0x80000000u;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ABsc0FromU1(AU1 i){return AF1((i )&255u)-128.0;}
+ AF1 ABsc1FromU1(AU1 i){return AF1((i>> 8)&255u)-128.0;}
+ AF1 ABsc2FromU1(AU1 i){return AF1((i>>16)&255u)-128.0;}
+ AF1 ABsc3FromU1(AU1 i){return AF1((i>>24)&255u)-128.0;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ABsc0FromZbU1(AU1 i){return AF1(((i )&255u)^0x80u)-128.0;}
+ AF1 ABsc1FromZbU1(AU1 i){return AF1(((i>> 8)&255u)^0x80u)-128.0;}
+ AF1 ABsc2FromZbU1(AU1 i){return AF1(((i>>16)&255u)^0x80u)-128.0;}
+ AF1 ABsc3FromZbU1(AU1 i){return AF1(((i>>24)&255u)^0x80u)-128.0;}
+ #endif
+//==============================================================================================================================
+ #ifdef A_HALF
+ // Takes {x0,x1} and {y0,y1} and builds {{x0,y0},{x1,y1}}.
+ AW2 ABsc01ToW2(AH2 x,AH2 y){x=x*AH2_(1.0/32768.0)+AH2_(0.25/32768.0);y=y*AH2_(1.0/32768.0)+AH2_(0.25/32768.0);
+ return AW2_AU1(APermGCEA(AU2(AU1_AW2(AW2_AH2(x)),AU1_AW2(AW2_AH2(y)))));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU2 ABsc0ToU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)+AH2_(0.25/32768.0)));
+ return AU2(APermHGFA(AU2(d.x,b)),APermHGFC(AU2(d.y,b)));}
+ AU2 ABsc1ToU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)+AH2_(0.25/32768.0)));
+ return AU2(APermHGAE(AU2(d.x,b)),APermHGCE(AU2(d.y,b)));}
+ AU2 ABsc2ToU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)+AH2_(0.25/32768.0)));
+ return AU2(APermHAFE(AU2(d.x,b)),APermHCFE(AU2(d.y,b)));}
+ AU2 ABsc3ToU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)+AH2_(0.25/32768.0)));
+ return AU2(APermAGFE(AU2(d.x,b)),APermCGFE(AU2(d.y,b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU2 ABsc0ToZbU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)+AH2_(0.25/32768.0)))^0x00800080u;
+ return AU2(APermHGFA(AU2(d.x,b)),APermHGFC(AU2(d.y,b)));}
+ AU2 ABsc1ToZbU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)+AH2_(0.25/32768.0)))^0x00800080u;
+ return AU2(APermHGAE(AU2(d.x,b)),APermHGCE(AU2(d.y,b)));}
+ AU2 ABsc2ToZbU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)+AH2_(0.25/32768.0)))^0x00800080u;
+ return AU2(APermHAFE(AU2(d.x,b)),APermHCFE(AU2(d.y,b)));}
+ AU2 ABsc3ToZbU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)+AH2_(0.25/32768.0)))^0x00800080u;
+ return AU2(APermAGFE(AU2(d.x,b)),APermCGFE(AU2(d.y,b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH2 ABsc0FromU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0E0A(i)))*AH2_(32768.0)-AH2_(0.25);}
+ AH2 ABsc1FromU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0F0B(i)))*AH2_(32768.0)-AH2_(0.25);}
+ AH2 ABsc2FromU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0G0C(i)))*AH2_(32768.0)-AH2_(0.25);}
+ AH2 ABsc3FromU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0H0D(i)))*AH2_(32768.0)-AH2_(0.25);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH2 ABsc0FromZbU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0E0A(i)^0x00800080u))*AH2_(32768.0)-AH2_(0.25);}
+ AH2 ABsc1FromZbU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0F0B(i)^0x00800080u))*AH2_(32768.0)-AH2_(0.25);}
+ AH2 ABsc2FromZbU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0G0C(i)^0x00800080u))*AH2_(32768.0)-AH2_(0.25);}
+ AH2 ABsc3FromZbU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0H0D(i)^0x00800080u))*AH2_(32768.0)-AH2_(0.25);}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// HALF APPROXIMATIONS
+//------------------------------------------------------------------------------------------------------------------------------
+// These support only positive inputs.
+// Did not see value yet in specialization for range.
+// Using quick testing, ended up mostly getting the same "best" approximation for various ranges.
+// With hardware that can co-execute transcendentals, the value in approximations could be less than expected.
+// However from a latency perspective, if execution of a transcendental is 4 clk, with no packed support, -> 8 clk total.
+// And co-execution would require a compiler interleaving a lot of independent work for packed usage.
+//------------------------------------------------------------------------------------------------------------------------------
+// The one Newton Raphson iteration form of rsq() was skipped (requires 6 ops total).
+// Same with sqrt(), as this could be x*rsq() (7 ops).
+//==============================================================================================================================
+ #ifdef A_HALF
+ // Minimize squared error across full positive range, 2 ops.
+ // The 0x1de2 based approximation maps {0 to 1} input maps to < 1 output.
+ AH1 APrxLoSqrtH1(AH1 a){return AH1_AW1((AW1_AH1(a)>>AW1_(1))+AW1_(0x1de2));}
+ AH2 APrxLoSqrtH2(AH2 a){return AH2_AW2((AW2_AH2(a)>>AW2_(1))+AW2_(0x1de2));}
+ AH3 APrxLoSqrtH3(AH3 a){return AH3_AW3((AW3_AH3(a)>>AW3_(1))+AW3_(0x1de2));}
+ AH4 APrxLoSqrtH4(AH4 a){return AH4_AW4((AW4_AH4(a)>>AW4_(1))+AW4_(0x1de2));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Lower precision estimation, 1 op.
+ // Minimize squared error across {smallest normal to 16384.0}.
+ AH1 APrxLoRcpH1(AH1 a){return AH1_AW1(AW1_(0x7784)-AW1_AH1(a));}
+ AH2 APrxLoRcpH2(AH2 a){return AH2_AW2(AW2_(0x7784)-AW2_AH2(a));}
+ AH3 APrxLoRcpH3(AH3 a){return AH3_AW3(AW3_(0x7784)-AW3_AH3(a));}
+ AH4 APrxLoRcpH4(AH4 a){return AH4_AW4(AW4_(0x7784)-AW4_AH4(a));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Medium precision estimation, one Newton Raphson iteration, 3 ops.
+ AH1 APrxMedRcpH1(AH1 a){AH1 b=AH1_AW1(AW1_(0x778d)-AW1_AH1(a));return b*(-b*a+AH1_(2.0));}
+ AH2 APrxMedRcpH2(AH2 a){AH2 b=AH2_AW2(AW2_(0x778d)-AW2_AH2(a));return b*(-b*a+AH2_(2.0));}
+ AH3 APrxMedRcpH3(AH3 a){AH3 b=AH3_AW3(AW3_(0x778d)-AW3_AH3(a));return b*(-b*a+AH3_(2.0));}
+ AH4 APrxMedRcpH4(AH4 a){AH4 b=AH4_AW4(AW4_(0x778d)-AW4_AH4(a));return b*(-b*a+AH4_(2.0));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Minimize squared error across {smallest normal to 16384.0}, 2 ops.
+ AH1 APrxLoRsqH1(AH1 a){return AH1_AW1(AW1_(0x59a3)-(AW1_AH1(a)>>AW1_(1)));}
+ AH2 APrxLoRsqH2(AH2 a){return AH2_AW2(AW2_(0x59a3)-(AW2_AH2(a)>>AW2_(1)));}
+ AH3 APrxLoRsqH3(AH3 a){return AH3_AW3(AW3_(0x59a3)-(AW3_AH3(a)>>AW3_(1)));}
+ AH4 APrxLoRsqH4(AH4 a){return AH4_AW4(AW4_(0x59a3)-(AW4_AH4(a)>>AW4_(1)));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// FLOAT APPROXIMATIONS
+//------------------------------------------------------------------------------------------------------------------------------
+// Michal Drobot has an excellent presentation on these: "Low Level Optimizations For GCN",
+// - Idea dates back to SGI, then to Quake 3, etc.
+// - https://michaldrobot.files.wordpress.com/2014/05/gcn_alu_opt_digitaldragons2014.pdf
+// - sqrt(x)=rsqrt(x)*x
+// - rcp(x)=rsqrt(x)*rsqrt(x) for positive x
+// - https://github.com/michaldrobot/ShaderFastLibs/blob/master/ShaderFastMathLib.h
+//------------------------------------------------------------------------------------------------------------------------------
+// These below are from perhaps less complete searching for optimal.
+// Used FP16 normal range for testing with +4096 32-bit step size for sampling error.
+// So these match up well with the half approximations.
+//==============================================================================================================================
+ AF1 APrxLoSqrtF1(AF1 a){return AF1_AU1((AU1_AF1(a)>>AU1_(1))+AU1_(0x1fbc4639));}
+ AF1 APrxLoRcpF1(AF1 a){return AF1_AU1(AU1_(0x7ef07ebb)-AU1_AF1(a));}
+ AF1 APrxMedRcpF1(AF1 a){AF1 b=AF1_AU1(AU1_(0x7ef19fff)-AU1_AF1(a));return b*(-b*a+AF1_(2.0));}
+ AF1 APrxLoRsqF1(AF1 a){return AF1_AU1(AU1_(0x5f347d74)-(AU1_AF1(a)>>AU1_(1)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 APrxLoSqrtF2(AF2 a){return AF2_AU2((AU2_AF2(a)>>AU2_(1))+AU2_(0x1fbc4639));}
+ AF2 APrxLoRcpF2(AF2 a){return AF2_AU2(AU2_(0x7ef07ebb)-AU2_AF2(a));}
+ AF2 APrxMedRcpF2(AF2 a){AF2 b=AF2_AU2(AU2_(0x7ef19fff)-AU2_AF2(a));return b*(-b*a+AF2_(2.0));}
+ AF2 APrxLoRsqF2(AF2 a){return AF2_AU2(AU2_(0x5f347d74)-(AU2_AF2(a)>>AU2_(1)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF3 APrxLoSqrtF3(AF3 a){return AF3_AU3((AU3_AF3(a)>>AU3_(1))+AU3_(0x1fbc4639));}
+ AF3 APrxLoRcpF3(AF3 a){return AF3_AU3(AU3_(0x7ef07ebb)-AU3_AF3(a));}
+ AF3 APrxMedRcpF3(AF3 a){AF3 b=AF3_AU3(AU3_(0x7ef19fff)-AU3_AF3(a));return b*(-b*a+AF3_(2.0));}
+ AF3 APrxLoRsqF3(AF3 a){return AF3_AU3(AU3_(0x5f347d74)-(AU3_AF3(a)>>AU3_(1)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF4 APrxLoSqrtF4(AF4 a){return AF4_AU4((AU4_AF4(a)>>AU4_(1))+AU4_(0x1fbc4639));}
+ AF4 APrxLoRcpF4(AF4 a){return AF4_AU4(AU4_(0x7ef07ebb)-AU4_AF4(a));}
+ AF4 APrxMedRcpF4(AF4 a){AF4 b=AF4_AU4(AU4_(0x7ef19fff)-AU4_AF4(a));return b*(-b*a+AF4_(2.0));}
+ AF4 APrxLoRsqF4(AF4 a){return AF4_AU4(AU4_(0x5f347d74)-(AU4_AF4(a)>>AU4_(1)));}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// PQ APPROXIMATIONS
+//------------------------------------------------------------------------------------------------------------------------------
+// PQ is very close to x^(1/8). The functions below Use the fast float approximation method to do
+// PQ<~>Gamma2 (4th power and fast 4th root) and PQ<~>Linear (8th power and fast 8th root). Maximum error is ~0.2%.
+//==============================================================================================================================
+// Helpers
+ AF1 Quart(AF1 a) { a = a * a; return a * a;}
+ AF1 Oct(AF1 a) { a = a * a; a = a * a; return a * a; }
+ AF2 Quart(AF2 a) { a = a * a; return a * a; }
+ AF2 Oct(AF2 a) { a = a * a; a = a * a; return a * a; }
+ AF3 Quart(AF3 a) { a = a * a; return a * a; }
+ AF3 Oct(AF3 a) { a = a * a; a = a * a; return a * a; }
+ AF4 Quart(AF4 a) { a = a * a; return a * a; }
+ AF4 Oct(AF4 a) { a = a * a; a = a * a; return a * a; }
+ //------------------------------------------------------------------------------------------------------------------------------
+ AF1 APrxPQToGamma2(AF1 a) { return Quart(a); }
+ AF1 APrxPQToLinear(AF1 a) { return Oct(a); }
+ AF1 APrxLoGamma2ToPQ(AF1 a) { return AF1_AU1((AU1_AF1(a) >> AU1_(2)) + AU1_(0x2F9A4E46)); }
+ AF1 APrxMedGamma2ToPQ(AF1 a) { AF1 b = AF1_AU1((AU1_AF1(a) >> AU1_(2)) + AU1_(0x2F9A4E46)); AF1 b4 = Quart(b); return b - b * (b4 - a) / (AF1_(4.0) * b4); }
+ AF1 APrxHighGamma2ToPQ(AF1 a) { return sqrt(sqrt(a)); }
+ AF1 APrxLoLinearToPQ(AF1 a) { return AF1_AU1((AU1_AF1(a) >> AU1_(3)) + AU1_(0x378D8723)); }
+ AF1 APrxMedLinearToPQ(AF1 a) { AF1 b = AF1_AU1((AU1_AF1(a) >> AU1_(3)) + AU1_(0x378D8723)); AF1 b8 = Oct(b); return b - b * (b8 - a) / (AF1_(8.0) * b8); }
+ AF1 APrxHighLinearToPQ(AF1 a) { return sqrt(sqrt(sqrt(a))); }
+ //------------------------------------------------------------------------------------------------------------------------------
+ AF2 APrxPQToGamma2(AF2 a) { return Quart(a); }
+ AF2 APrxPQToLinear(AF2 a) { return Oct(a); }
+ AF2 APrxLoGamma2ToPQ(AF2 a) { return AF2_AU2((AU2_AF2(a) >> AU2_(2)) + AU2_(0x2F9A4E46)); }
+ AF2 APrxMedGamma2ToPQ(AF2 a) { AF2 b = AF2_AU2((AU2_AF2(a) >> AU2_(2)) + AU2_(0x2F9A4E46)); AF2 b4 = Quart(b); return b - b * (b4 - a) / (AF1_(4.0) * b4); }
+ AF2 APrxHighGamma2ToPQ(AF2 a) { return sqrt(sqrt(a)); }
+ AF2 APrxLoLinearToPQ(AF2 a) { return AF2_AU2((AU2_AF2(a) >> AU2_(3)) + AU2_(0x378D8723)); }
+ AF2 APrxMedLinearToPQ(AF2 a) { AF2 b = AF2_AU2((AU2_AF2(a) >> AU2_(3)) + AU2_(0x378D8723)); AF2 b8 = Oct(b); return b - b * (b8 - a) / (AF1_(8.0) * b8); }
+ AF2 APrxHighLinearToPQ(AF2 a) { return sqrt(sqrt(sqrt(a))); }
+ //------------------------------------------------------------------------------------------------------------------------------
+ AF3 APrxPQToGamma2(AF3 a) { return Quart(a); }
+ AF3 APrxPQToLinear(AF3 a) { return Oct(a); }
+ AF3 APrxLoGamma2ToPQ(AF3 a) { return AF3_AU3((AU3_AF3(a) >> AU3_(2)) + AU3_(0x2F9A4E46)); }
+ AF3 APrxMedGamma2ToPQ(AF3 a) { AF3 b = AF3_AU3((AU3_AF3(a) >> AU3_(2)) + AU3_(0x2F9A4E46)); AF3 b4 = Quart(b); return b - b * (b4 - a) / (AF1_(4.0) * b4); }
+ AF3 APrxHighGamma2ToPQ(AF3 a) { return sqrt(sqrt(a)); }
+ AF3 APrxLoLinearToPQ(AF3 a) { return AF3_AU3((AU3_AF3(a) >> AU3_(3)) + AU3_(0x378D8723)); }
+ AF3 APrxMedLinearToPQ(AF3 a) { AF3 b = AF3_AU3((AU3_AF3(a) >> AU3_(3)) + AU3_(0x378D8723)); AF3 b8 = Oct(b); return b - b * (b8 - a) / (AF1_(8.0) * b8); }
+ AF3 APrxHighLinearToPQ(AF3 a) { return sqrt(sqrt(sqrt(a))); }
+ //------------------------------------------------------------------------------------------------------------------------------
+ AF4 APrxPQToGamma2(AF4 a) { return Quart(a); }
+ AF4 APrxPQToLinear(AF4 a) { return Oct(a); }
+ AF4 APrxLoGamma2ToPQ(AF4 a) { return AF4_AU4((AU4_AF4(a) >> AU4_(2)) + AU4_(0x2F9A4E46)); }
+ AF4 APrxMedGamma2ToPQ(AF4 a) { AF4 b = AF4_AU4((AU4_AF4(a) >> AU4_(2)) + AU4_(0x2F9A4E46)); AF4 b4 = Quart(b); return b - b * (b4 - a) / (AF1_(4.0) * b4); }
+ AF4 APrxHighGamma2ToPQ(AF4 a) { return sqrt(sqrt(a)); }
+ AF4 APrxLoLinearToPQ(AF4 a) { return AF4_AU4((AU4_AF4(a) >> AU4_(3)) + AU4_(0x378D8723)); }
+ AF4 APrxMedLinearToPQ(AF4 a) { AF4 b = AF4_AU4((AU4_AF4(a) >> AU4_(3)) + AU4_(0x378D8723)); AF4 b8 = Oct(b); return b - b * (b8 - a) / (AF1_(8.0) * b8); }
+ AF4 APrxHighLinearToPQ(AF4 a) { return sqrt(sqrt(sqrt(a))); }
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// PARABOLIC SIN & COS
+//------------------------------------------------------------------------------------------------------------------------------
+// Approximate answers to transcendental questions.
+//------------------------------------------------------------------------------------------------------------------------------
+//==============================================================================================================================
+ #if 1
+ // Valid input range is {-1 to 1} representing {0 to 2 pi}.
+ // Output range is {-1/4 to 1/4} representing {-1 to 1}.
+ AF1 APSinF1(AF1 x){return x*abs(x)-x;} // MAD.
+ AF2 APSinF2(AF2 x){return x*abs(x)-x;}
+ AF1 APCosF1(AF1 x){x=AFractF1(x*AF1_(0.5)+AF1_(0.75));x=x*AF1_(2.0)-AF1_(1.0);return APSinF1(x);} // 3x MAD, FRACT
+ AF2 APCosF2(AF2 x){x=AFractF2(x*AF2_(0.5)+AF2_(0.75));x=x*AF2_(2.0)-AF2_(1.0);return APSinF2(x);}
+ AF2 APSinCosF1(AF1 x){AF1 y=AFractF1(x*AF1_(0.5)+AF1_(0.75));y=y*AF1_(2.0)-AF1_(1.0);return APSinF2(AF2(x,y));}
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_HALF
+ // For a packed {sin,cos} pair,
+ // - Native takes 16 clocks and 4 issue slots (no packed transcendentals).
+ // - Parabolic takes 8 clocks and 8 issue slots (only fract is non-packed).
+ AH1 APSinH1(AH1 x){return x*abs(x)-x;}
+ AH2 APSinH2(AH2 x){return x*abs(x)-x;} // AND,FMA
+ AH1 APCosH1(AH1 x){x=AFractH1(x*AH1_(0.5)+AH1_(0.75));x=x*AH1_(2.0)-AH1_(1.0);return APSinH1(x);}
+ AH2 APCosH2(AH2 x){x=AFractH2(x*AH2_(0.5)+AH2_(0.75));x=x*AH2_(2.0)-AH2_(1.0);return APSinH2(x);} // 3x FMA, 2xFRACT, AND
+ AH2 APSinCosH1(AH1 x){AH1 y=AFractH1(x*AH1_(0.5)+AH1_(0.75));y=y*AH1_(2.0)-AH1_(1.0);return APSinH2(AH2(x,y));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// [ZOL] ZERO ONE LOGIC
+//------------------------------------------------------------------------------------------------------------------------------
+// Conditional free logic designed for easy 16-bit packing, and backwards porting to 32-bit.
+//------------------------------------------------------------------------------------------------------------------------------
+// 0 := false
+// 1 := true
+//------------------------------------------------------------------------------------------------------------------------------
+// AndNot(x,y) -> !(x&y) .... One op.
+// AndOr(x,y,z) -> (x&y)|z ... One op.
+// GtZero(x) -> x>0.0 ..... One op.
+// Sel(x,y,z) -> x?y:z ..... Two ops, has no precision loss.
+// Signed(x) -> x<0.0 ..... One op.
+// ZeroPass(x,y) -> x?0:y ..... Two ops, 'y' is a pass through safe for aliasing as integer.
+//------------------------------------------------------------------------------------------------------------------------------
+// OPTIMIZATION NOTES
+// ==================
+// - On Vega to use 2 constants in a packed op, pass in as one AW2 or one AH2 'k.xy' and use as 'k.xx' and 'k.yy'.
+// For example 'a.xy*k.xx+k.yy'.
+//==============================================================================================================================
+ #if 1
+ AU1 AZolAndU1(AU1 x,AU1 y){return min(x,y);}
+ AU2 AZolAndU2(AU2 x,AU2 y){return min(x,y);}
+ AU3 AZolAndU3(AU3 x,AU3 y){return min(x,y);}
+ AU4 AZolAndU4(AU4 x,AU4 y){return min(x,y);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AZolNotU1(AU1 x){return x^AU1_(1);}
+ AU2 AZolNotU2(AU2 x){return x^AU2_(1);}
+ AU3 AZolNotU3(AU3 x){return x^AU3_(1);}
+ AU4 AZolNotU4(AU4 x){return x^AU4_(1);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AZolOrU1(AU1 x,AU1 y){return max(x,y);}
+ AU2 AZolOrU2(AU2 x,AU2 y){return max(x,y);}
+ AU3 AZolOrU3(AU3 x,AU3 y){return max(x,y);}
+ AU4 AZolOrU4(AU4 x,AU4 y){return max(x,y);}
+//==============================================================================================================================
+ AU1 AZolF1ToU1(AF1 x){return AU1(x);}
+ AU2 AZolF2ToU2(AF2 x){return AU2(x);}
+ AU3 AZolF3ToU3(AF3 x){return AU3(x);}
+ AU4 AZolF4ToU4(AF4 x){return AU4(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ // 2 ops, denormals don't work in 32-bit on PC (and if they are enabled, OMOD is disabled).
+ AU1 AZolNotF1ToU1(AF1 x){return AU1(AF1_(1.0)-x);}
+ AU2 AZolNotF2ToU2(AF2 x){return AU2(AF2_(1.0)-x);}
+ AU3 AZolNotF3ToU3(AF3 x){return AU3(AF3_(1.0)-x);}
+ AU4 AZolNotF4ToU4(AF4 x){return AU4(AF4_(1.0)-x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AZolU1ToF1(AU1 x){return AF1(x);}
+ AF2 AZolU2ToF2(AU2 x){return AF2(x);}
+ AF3 AZolU3ToF3(AU3 x){return AF3(x);}
+ AF4 AZolU4ToF4(AU4 x){return AF4(x);}
+//==============================================================================================================================
+ AF1 AZolAndF1(AF1 x,AF1 y){return min(x,y);}
+ AF2 AZolAndF2(AF2 x,AF2 y){return min(x,y);}
+ AF3 AZolAndF3(AF3 x,AF3 y){return min(x,y);}
+ AF4 AZolAndF4(AF4 x,AF4 y){return min(x,y);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ASolAndNotF1(AF1 x,AF1 y){return (-x)*y+AF1_(1.0);}
+ AF2 ASolAndNotF2(AF2 x,AF2 y){return (-x)*y+AF2_(1.0);}
+ AF3 ASolAndNotF3(AF3 x,AF3 y){return (-x)*y+AF3_(1.0);}
+ AF4 ASolAndNotF4(AF4 x,AF4 y){return (-x)*y+AF4_(1.0);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AZolAndOrF1(AF1 x,AF1 y,AF1 z){return ASatF1(x*y+z);}
+ AF2 AZolAndOrF2(AF2 x,AF2 y,AF2 z){return ASatF2(x*y+z);}
+ AF3 AZolAndOrF3(AF3 x,AF3 y,AF3 z){return ASatF3(x*y+z);}
+ AF4 AZolAndOrF4(AF4 x,AF4 y,AF4 z){return ASatF4(x*y+z);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AZolGtZeroF1(AF1 x){return ASatF1(x*AF1_(A_INFP_F));}
+ AF2 AZolGtZeroF2(AF2 x){return ASatF2(x*AF2_(A_INFP_F));}
+ AF3 AZolGtZeroF3(AF3 x){return ASatF3(x*AF3_(A_INFP_F));}
+ AF4 AZolGtZeroF4(AF4 x){return ASatF4(x*AF4_(A_INFP_F));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AZolNotF1(AF1 x){return AF1_(1.0)-x;}
+ AF2 AZolNotF2(AF2 x){return AF2_(1.0)-x;}
+ AF3 AZolNotF3(AF3 x){return AF3_(1.0)-x;}
+ AF4 AZolNotF4(AF4 x){return AF4_(1.0)-x;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AZolOrF1(AF1 x,AF1 y){return max(x,y);}
+ AF2 AZolOrF2(AF2 x,AF2 y){return max(x,y);}
+ AF3 AZolOrF3(AF3 x,AF3 y){return max(x,y);}
+ AF4 AZolOrF4(AF4 x,AF4 y){return max(x,y);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AZolSelF1(AF1 x,AF1 y,AF1 z){AF1 r=(-x)*z+z;return x*y+r;}
+ AF2 AZolSelF2(AF2 x,AF2 y,AF2 z){AF2 r=(-x)*z+z;return x*y+r;}
+ AF3 AZolSelF3(AF3 x,AF3 y,AF3 z){AF3 r=(-x)*z+z;return x*y+r;}
+ AF4 AZolSelF4(AF4 x,AF4 y,AF4 z){AF4 r=(-x)*z+z;return x*y+r;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AZolSignedF1(AF1 x){return ASatF1(x*AF1_(A_INFN_F));}
+ AF2 AZolSignedF2(AF2 x){return ASatF2(x*AF2_(A_INFN_F));}
+ AF3 AZolSignedF3(AF3 x){return ASatF3(x*AF3_(A_INFN_F));}
+ AF4 AZolSignedF4(AF4 x){return ASatF4(x*AF4_(A_INFN_F));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AZolZeroPassF1(AF1 x,AF1 y){return AF1_AU1((AU1_AF1(x)!=AU1_(0))?AU1_(0):AU1_AF1(y));}
+ AF2 AZolZeroPassF2(AF2 x,AF2 y){return AF2_AU2((AU2_AF2(x)!=AU2_(0))?AU2_(0):AU2_AF2(y));}
+ AF3 AZolZeroPassF3(AF3 x,AF3 y){return AF3_AU3((AU3_AF3(x)!=AU3_(0))?AU3_(0):AU3_AF3(y));}
+ AF4 AZolZeroPassF4(AF4 x,AF4 y){return AF4_AU4((AU4_AF4(x)!=AU4_(0))?AU4_(0):AU4_AF4(y));}
+ #endif
+//==============================================================================================================================
+ #ifdef A_HALF
+ AW1 AZolAndW1(AW1 x,AW1 y){return min(x,y);}
+ AW2 AZolAndW2(AW2 x,AW2 y){return min(x,y);}
+ AW3 AZolAndW3(AW3 x,AW3 y){return min(x,y);}
+ AW4 AZolAndW4(AW4 x,AW4 y){return min(x,y);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AZolNotW1(AW1 x){return x^AW1_(1);}
+ AW2 AZolNotW2(AW2 x){return x^AW2_(1);}
+ AW3 AZolNotW3(AW3 x){return x^AW3_(1);}
+ AW4 AZolNotW4(AW4 x){return x^AW4_(1);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AZolOrW1(AW1 x,AW1 y){return max(x,y);}
+ AW2 AZolOrW2(AW2 x,AW2 y){return max(x,y);}
+ AW3 AZolOrW3(AW3 x,AW3 y){return max(x,y);}
+ AW4 AZolOrW4(AW4 x,AW4 y){return max(x,y);}
+//==============================================================================================================================
+ // Uses denormal trick.
+ AW1 AZolH1ToW1(AH1 x){return AW1_AH1(x*AH1_AW1(AW1_(1)));}
+ AW2 AZolH2ToW2(AH2 x){return AW2_AH2(x*AH2_AW2(AW2_(1)));}
+ AW3 AZolH3ToW3(AH3 x){return AW3_AH3(x*AH3_AW3(AW3_(1)));}
+ AW4 AZolH4ToW4(AH4 x){return AW4_AH4(x*AH4_AW4(AW4_(1)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // AMD arch lacks a packed conversion opcode.
+ AH1 AZolW1ToH1(AW1 x){return AH1_AW1(x*AW1_AH1(AH1_(1.0)));}
+ AH2 AZolW2ToH2(AW2 x){return AH2_AW2(x*AW2_AH2(AH2_(1.0)));}
+ AH3 AZolW1ToH3(AW3 x){return AH3_AW3(x*AW3_AH3(AH3_(1.0)));}
+ AH4 AZolW2ToH4(AW4 x){return AH4_AW4(x*AW4_AH4(AH4_(1.0)));}
+//==============================================================================================================================
+ AH1 AZolAndH1(AH1 x,AH1 y){return min(x,y);}
+ AH2 AZolAndH2(AH2 x,AH2 y){return min(x,y);}
+ AH3 AZolAndH3(AH3 x,AH3 y){return min(x,y);}
+ AH4 AZolAndH4(AH4 x,AH4 y){return min(x,y);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ASolAndNotH1(AH1 x,AH1 y){return (-x)*y+AH1_(1.0);}
+ AH2 ASolAndNotH2(AH2 x,AH2 y){return (-x)*y+AH2_(1.0);}
+ AH3 ASolAndNotH3(AH3 x,AH3 y){return (-x)*y+AH3_(1.0);}
+ AH4 ASolAndNotH4(AH4 x,AH4 y){return (-x)*y+AH4_(1.0);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AZolAndOrH1(AH1 x,AH1 y,AH1 z){return ASatH1(x*y+z);}
+ AH2 AZolAndOrH2(AH2 x,AH2 y,AH2 z){return ASatH2(x*y+z);}
+ AH3 AZolAndOrH3(AH3 x,AH3 y,AH3 z){return ASatH3(x*y+z);}
+ AH4 AZolAndOrH4(AH4 x,AH4 y,AH4 z){return ASatH4(x*y+z);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AZolGtZeroH1(AH1 x){return ASatH1(x*AH1_(A_INFP_H));}
+ AH2 AZolGtZeroH2(AH2 x){return ASatH2(x*AH2_(A_INFP_H));}
+ AH3 AZolGtZeroH3(AH3 x){return ASatH3(x*AH3_(A_INFP_H));}
+ AH4 AZolGtZeroH4(AH4 x){return ASatH4(x*AH4_(A_INFP_H));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AZolNotH1(AH1 x){return AH1_(1.0)-x;}
+ AH2 AZolNotH2(AH2 x){return AH2_(1.0)-x;}
+ AH3 AZolNotH3(AH3 x){return AH3_(1.0)-x;}
+ AH4 AZolNotH4(AH4 x){return AH4_(1.0)-x;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AZolOrH1(AH1 x,AH1 y){return max(x,y);}
+ AH2 AZolOrH2(AH2 x,AH2 y){return max(x,y);}
+ AH3 AZolOrH3(AH3 x,AH3 y){return max(x,y);}
+ AH4 AZolOrH4(AH4 x,AH4 y){return max(x,y);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AZolSelH1(AH1 x,AH1 y,AH1 z){AH1 r=(-x)*z+z;return x*y+r;}
+ AH2 AZolSelH2(AH2 x,AH2 y,AH2 z){AH2 r=(-x)*z+z;return x*y+r;}
+ AH3 AZolSelH3(AH3 x,AH3 y,AH3 z){AH3 r=(-x)*z+z;return x*y+r;}
+ AH4 AZolSelH4(AH4 x,AH4 y,AH4 z){AH4 r=(-x)*z+z;return x*y+r;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AZolSignedH1(AH1 x){return ASatH1(x*AH1_(A_INFN_H));}
+ AH2 AZolSignedH2(AH2 x){return ASatH2(x*AH2_(A_INFN_H));}
+ AH3 AZolSignedH3(AH3 x){return ASatH3(x*AH3_(A_INFN_H));}
+ AH4 AZolSignedH4(AH4 x){return ASatH4(x*AH4_(A_INFN_H));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// COLOR CONVERSIONS
+//------------------------------------------------------------------------------------------------------------------------------
+// These are all linear to/from some other space (where 'linear' has been shortened out of the function name).
+// So 'ToGamma' is 'LinearToGamma', and 'FromGamma' is 'LinearFromGamma'.
+// These are branch free implementations.
+// The AToSrgbF1() function is useful for stores for compute shaders for GPUs without hardware linear->sRGB store conversion.
+//------------------------------------------------------------------------------------------------------------------------------
+// TRANSFER FUNCTIONS
+// ==================
+// 709 ..... Rec709 used for some HDTVs
+// Gamma ... Typically 2.2 for some PC displays, or 2.4-2.5 for CRTs, or 2.2 FreeSync2 native
+// Pq ...... PQ native for HDR10
+// Srgb .... The sRGB output, typical of PC displays, useful for 10-bit output, or storing to 8-bit UNORM without SRGB type
+// Two ..... Gamma 2.0, fastest conversion (useful for intermediate pass approximations)
+// Three ... Gamma 3.0, less fast, but good for HDR.
+//------------------------------------------------------------------------------------------------------------------------------
+// KEEPING TO SPEC
+// ===============
+// Both Rec.709 and sRGB have a linear segment which as spec'ed would intersect the curved segment 2 times.
+// (a.) For 8-bit sRGB, steps {0 to 10.3} are in the linear region (4% of the encoding range).
+// (b.) For 8-bit 709, steps {0 to 20.7} are in the linear region (8% of the encoding range).
+// Also there is a slight step in the transition regions.
+// Precision of the coefficients in the spec being the likely cause.
+// Main usage case of the sRGB code is to do the linear->sRGB converstion in a compute shader before store.
+// This is to work around lack of hardware (typically only ROP does the conversion for free).
+// To "correct" the linear segment, would be to introduce error, because hardware decode of sRGB->linear is fixed (and free).
+// So this header keeps with the spec.
+// For linear->sRGB transforms, the linear segment in some respects reduces error, because rounding in that region is linear.
+// Rounding in the curved region in hardware (and fast software code) introduces error due to rounding in non-linear.
+//------------------------------------------------------------------------------------------------------------------------------
+// FOR PQ
+// ======
+// Both input and output is {0.0-1.0}, and where output 1.0 represents 10000.0 cd/m^2.
+// All constants are only specified to FP32 precision.
+// External PQ source reference,
+// - https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESlib.Utilities_Color.a1.0.1.ctl
+//------------------------------------------------------------------------------------------------------------------------------
+// PACKED VERSIONS
+// ===============
+// These are the A*H2() functions.
+// There is no PQ functions as FP16 seemed to not have enough precision for the conversion.
+// The remaining functions are "good enough" for 8-bit, and maybe 10-bit if not concerned about a few 1-bit errors.
+// Precision is lowest in the 709 conversion, higher in sRGB, higher still in Two and Gamma (when using 2.2 at least).
+//------------------------------------------------------------------------------------------------------------------------------
+// NOTES
+// =====
+// Could be faster for PQ conversions to be in ALU or a texture lookup depending on usage case.
+//==============================================================================================================================
+ #if 1
+ AF1 ATo709F1(AF1 c){AF3 j=AF3(0.018*4.5,4.5,0.45);AF2 k=AF2(1.099,-0.099);
+ return clamp(j.x ,c*j.y ,pow(c,j.z )*k.x +k.y );}
+ AF2 ATo709F2(AF2 c){AF3 j=AF3(0.018*4.5,4.5,0.45);AF2 k=AF2(1.099,-0.099);
+ return clamp(j.xx ,c*j.yy ,pow(c,j.zz )*k.xx +k.yy );}
+ AF3 ATo709F3(AF3 c){AF3 j=AF3(0.018*4.5,4.5,0.45);AF2 k=AF2(1.099,-0.099);
+ return clamp(j.xxx,c*j.yyy,pow(c,j.zzz)*k.xxx+k.yyy);}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Note 'rcpX' is '1/x', where the 'x' is what would be used in AFromGamma().
+ AF1 AToGammaF1(AF1 c,AF1 rcpX){return pow(c,AF1_(rcpX));}
+ AF2 AToGammaF2(AF2 c,AF1 rcpX){return pow(c,AF2_(rcpX));}
+ AF3 AToGammaF3(AF3 c,AF1 rcpX){return pow(c,AF3_(rcpX));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AToPqF1(AF1 x){AF1 p=pow(x,AF1_(0.159302));
+ return pow((AF1_(0.835938)+AF1_(18.8516)*p)/(AF1_(1.0)+AF1_(18.6875)*p),AF1_(78.8438));}
+ AF2 AToPqF1(AF2 x){AF2 p=pow(x,AF2_(0.159302));
+ return pow((AF2_(0.835938)+AF2_(18.8516)*p)/(AF2_(1.0)+AF2_(18.6875)*p),AF2_(78.8438));}
+ AF3 AToPqF1(AF3 x){AF3 p=pow(x,AF3_(0.159302));
+ return pow((AF3_(0.835938)+AF3_(18.8516)*p)/(AF3_(1.0)+AF3_(18.6875)*p),AF3_(78.8438));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AToSrgbF1(AF1 c){AF3 j=AF3(0.0031308*12.92,12.92,1.0/2.4);AF2 k=AF2(1.055,-0.055);
+ return clamp(j.x ,c*j.y ,pow(c,j.z )*k.x +k.y );}
+ AF2 AToSrgbF2(AF2 c){AF3 j=AF3(0.0031308*12.92,12.92,1.0/2.4);AF2 k=AF2(1.055,-0.055);
+ return clamp(j.xx ,c*j.yy ,pow(c,j.zz )*k.xx +k.yy );}
+ AF3 AToSrgbF3(AF3 c){AF3 j=AF3(0.0031308*12.92,12.92,1.0/2.4);AF2 k=AF2(1.055,-0.055);
+ return clamp(j.xxx,c*j.yyy,pow(c,j.zzz)*k.xxx+k.yyy);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AToTwoF1(AF1 c){return sqrt(c);}
+ AF2 AToTwoF2(AF2 c){return sqrt(c);}
+ AF3 AToTwoF3(AF3 c){return sqrt(c);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AToThreeF1(AF1 c){return pow(c,AF1_(1.0/3.0));}
+ AF2 AToThreeF2(AF2 c){return pow(c,AF2_(1.0/3.0));}
+ AF3 AToThreeF3(AF3 c){return pow(c,AF3_(1.0/3.0));}
+ #endif
+//==============================================================================================================================
+ #if 1
+ // Unfortunately median won't work here.
+ AF1 AFrom709F1(AF1 c){AF3 j=AF3(0.081/4.5,1.0/4.5,1.0/0.45);AF2 k=AF2(1.0/1.099,0.099/1.099);
+ return AZolSelF1(AZolSignedF1(c-j.x ),c*j.y ,pow(c*k.x +k.y ,j.z ));}
+ AF2 AFrom709F2(AF2 c){AF3 j=AF3(0.081/4.5,1.0/4.5,1.0/0.45);AF2 k=AF2(1.0/1.099,0.099/1.099);
+ return AZolSelF2(AZolSignedF2(c-j.xx ),c*j.yy ,pow(c*k.xx +k.yy ,j.zz ));}
+ AF3 AFrom709F3(AF3 c){AF3 j=AF3(0.081/4.5,1.0/4.5,1.0/0.45);AF2 k=AF2(1.0/1.099,0.099/1.099);
+ return AZolSelF3(AZolSignedF3(c-j.xxx),c*j.yyy,pow(c*k.xxx+k.yyy,j.zzz));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AFromGammaF1(AF1 c,AF1 x){return pow(c,AF1_(x));}
+ AF2 AFromGammaF2(AF2 c,AF1 x){return pow(c,AF2_(x));}
+ AF3 AFromGammaF3(AF3 c,AF1 x){return pow(c,AF3_(x));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AFromPqF1(AF1 x){AF1 p=pow(x,AF1_(0.0126833));
+ return pow(ASatF1(p-AF1_(0.835938))/(AF1_(18.8516)-AF1_(18.6875)*p),AF1_(6.27739));}
+ AF2 AFromPqF1(AF2 x){AF2 p=pow(x,AF2_(0.0126833));
+ return pow(ASatF2(p-AF2_(0.835938))/(AF2_(18.8516)-AF2_(18.6875)*p),AF2_(6.27739));}
+ AF3 AFromPqF1(AF3 x){AF3 p=pow(x,AF3_(0.0126833));
+ return pow(ASatF3(p-AF3_(0.835938))/(AF3_(18.8516)-AF3_(18.6875)*p),AF3_(6.27739));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Unfortunately median won't work here.
+ AF1 AFromSrgbF1(AF1 c){AF3 j=AF3(0.04045/12.92,1.0/12.92,2.4);AF2 k=AF2(1.0/1.055,0.055/1.055);
+ return AZolSelF1(AZolSignedF1(c-j.x ),c*j.y ,pow(c*k.x +k.y ,j.z ));}
+ AF2 AFromSrgbF2(AF2 c){AF3 j=AF3(0.04045/12.92,1.0/12.92,2.4);AF2 k=AF2(1.0/1.055,0.055/1.055);
+ return AZolSelF2(AZolSignedF2(c-j.xx ),c*j.yy ,pow(c*k.xx +k.yy ,j.zz ));}
+ AF3 AFromSrgbF3(AF3 c){AF3 j=AF3(0.04045/12.92,1.0/12.92,2.4);AF2 k=AF2(1.0/1.055,0.055/1.055);
+ return AZolSelF3(AZolSignedF3(c-j.xxx),c*j.yyy,pow(c*k.xxx+k.yyy,j.zzz));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AFromTwoF1(AF1 c){return c*c;}
+ AF2 AFromTwoF2(AF2 c){return c*c;}
+ AF3 AFromTwoF3(AF3 c){return c*c;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AFromThreeF1(AF1 c){return c*c*c;}
+ AF2 AFromThreeF2(AF2 c){return c*c*c;}
+ AF3 AFromThreeF3(AF3 c){return c*c*c;}
+ #endif
+//==============================================================================================================================
+ #ifdef A_HALF
+ AH1 ATo709H1(AH1 c){AH3 j=AH3(0.018*4.5,4.5,0.45);AH2 k=AH2(1.099,-0.099);
+ return clamp(j.x ,c*j.y ,pow(c,j.z )*k.x +k.y );}
+ AH2 ATo709H2(AH2 c){AH3 j=AH3(0.018*4.5,4.5,0.45);AH2 k=AH2(1.099,-0.099);
+ return clamp(j.xx ,c*j.yy ,pow(c,j.zz )*k.xx +k.yy );}
+ AH3 ATo709H3(AH3 c){AH3 j=AH3(0.018*4.5,4.5,0.45);AH2 k=AH2(1.099,-0.099);
+ return clamp(j.xxx,c*j.yyy,pow(c,j.zzz)*k.xxx+k.yyy);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AToGammaH1(AH1 c,AH1 rcpX){return pow(c,AH1_(rcpX));}
+ AH2 AToGammaH2(AH2 c,AH1 rcpX){return pow(c,AH2_(rcpX));}
+ AH3 AToGammaH3(AH3 c,AH1 rcpX){return pow(c,AH3_(rcpX));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AToSrgbH1(AH1 c){AH3 j=AH3(0.0031308*12.92,12.92,1.0/2.4);AH2 k=AH2(1.055,-0.055);
+ return clamp(j.x ,c*j.y ,pow(c,j.z )*k.x +k.y );}
+ AH2 AToSrgbH2(AH2 c){AH3 j=AH3(0.0031308*12.92,12.92,1.0/2.4);AH2 k=AH2(1.055,-0.055);
+ return clamp(j.xx ,c*j.yy ,pow(c,j.zz )*k.xx +k.yy );}
+ AH3 AToSrgbH3(AH3 c){AH3 j=AH3(0.0031308*12.92,12.92,1.0/2.4);AH2 k=AH2(1.055,-0.055);
+ return clamp(j.xxx,c*j.yyy,pow(c,j.zzz)*k.xxx+k.yyy);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AToTwoH1(AH1 c){return sqrt(c);}
+ AH2 AToTwoH2(AH2 c){return sqrt(c);}
+ AH3 AToTwoH3(AH3 c){return sqrt(c);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AToThreeF1(AH1 c){return pow(c,AH1_(1.0/3.0));}
+ AH2 AToThreeF2(AH2 c){return pow(c,AH2_(1.0/3.0));}
+ AH3 AToThreeF3(AH3 c){return pow(c,AH3_(1.0/3.0));}
+ #endif
+//==============================================================================================================================
+ #ifdef A_HALF
+ AH1 AFrom709H1(AH1 c){AH3 j=AH3(0.081/4.5,1.0/4.5,1.0/0.45);AH2 k=AH2(1.0/1.099,0.099/1.099);
+ return AZolSelH1(AZolSignedH1(c-j.x ),c*j.y ,pow(c*k.x +k.y ,j.z ));}
+ AH2 AFrom709H2(AH2 c){AH3 j=AH3(0.081/4.5,1.0/4.5,1.0/0.45);AH2 k=AH2(1.0/1.099,0.099/1.099);
+ return AZolSelH2(AZolSignedH2(c-j.xx ),c*j.yy ,pow(c*k.xx +k.yy ,j.zz ));}
+ AH3 AFrom709H3(AH3 c){AH3 j=AH3(0.081/4.5,1.0/4.5,1.0/0.45);AH2 k=AH2(1.0/1.099,0.099/1.099);
+ return AZolSelH3(AZolSignedH3(c-j.xxx),c*j.yyy,pow(c*k.xxx+k.yyy,j.zzz));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AFromGammaH1(AH1 c,AH1 x){return pow(c,AH1_(x));}
+ AH2 AFromGammaH2(AH2 c,AH1 x){return pow(c,AH2_(x));}
+ AH3 AFromGammaH3(AH3 c,AH1 x){return pow(c,AH3_(x));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AHromSrgbF1(AH1 c){AH3 j=AH3(0.04045/12.92,1.0/12.92,2.4);AH2 k=AH2(1.0/1.055,0.055/1.055);
+ return AZolSelH1(AZolSignedH1(c-j.x ),c*j.y ,pow(c*k.x +k.y ,j.z ));}
+ AH2 AHromSrgbF2(AH2 c){AH3 j=AH3(0.04045/12.92,1.0/12.92,2.4);AH2 k=AH2(1.0/1.055,0.055/1.055);
+ return AZolSelH2(AZolSignedH2(c-j.xx ),c*j.yy ,pow(c*k.xx +k.yy ,j.zz ));}
+ AH3 AHromSrgbF3(AH3 c){AH3 j=AH3(0.04045/12.92,1.0/12.92,2.4);AH2 k=AH2(1.0/1.055,0.055/1.055);
+ return AZolSelH3(AZolSignedH3(c-j.xxx),c*j.yyy,pow(c*k.xxx+k.yyy,j.zzz));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AFromTwoH1(AH1 c){return c*c;}
+ AH2 AFromTwoH2(AH2 c){return c*c;}
+ AH3 AFromTwoH3(AH3 c){return c*c;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AFromThreeH1(AH1 c){return c*c*c;}
+ AH2 AFromThreeH2(AH2 c){return c*c*c;}
+ AH3 AFromThreeH3(AH3 c){return c*c*c;}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// CS REMAP
+//==============================================================================================================================
+ // Simple remap 64x1 to 8x8 with rotated 2x2 pixel quads in quad linear.
+ // 543210
+ // ======
+ // ..xxx.
+ // yy...y
+ AU2 ARmp8x8(AU1 a){return AU2(ABfe(a,1u,3u),ABfiM(ABfe(a,3u,3u),a,1u));}
+//==============================================================================================================================
+ // More complex remap 64x1 to 8x8 which is necessary for 2D wave reductions.
+ // 543210
+ // ======
+ // .xx..x
+ // y..yy.
+ // Details,
+ // LANE TO 8x8 MAPPING
+ // ===================
+ // 00 01 08 09 10 11 18 19
+ // 02 03 0a 0b 12 13 1a 1b
+ // 04 05 0c 0d 14 15 1c 1d
+ // 06 07 0e 0f 16 17 1e 1f
+ // 20 21 28 29 30 31 38 39
+ // 22 23 2a 2b 32 33 3a 3b
+ // 24 25 2c 2d 34 35 3c 3d
+ // 26 27 2e 2f 36 37 3e 3f
+ AU2 ARmpRed8x8(AU1 a){return AU2(ABfiM(ABfe(a,2u,3u),a,1u),ABfiM(ABfe(a,3u,3u),ABfe(a,1u,2u),2u));}
+//==============================================================================================================================
+ #ifdef A_HALF
+ AW2 ARmp8x8H(AU1 a){return AW2(ABfe(a,1u,3u),ABfiM(ABfe(a,3u,3u),a,1u));}
+ AW2 ARmpRed8x8H(AU1 a){return AW2(ABfiM(ABfe(a,2u,3u),a,1u),ABfiM(ABfe(a,3u,3u),ABfe(a,1u,2u),2u));}
+ #endif
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+// REFERENCE
+//
+//------------------------------------------------------------------------------------------------------------------------------
+// IEEE FLOAT RULES
+// ================
+// - saturate(NaN)=0, saturate(-INF)=0, saturate(+INF)=1
+// - {+/-}0 * {+/-}INF = NaN
+// - -INF + (+INF) = NaN
+// - {+/-}0 / {+/-}0 = NaN
+// - {+/-}INF / {+/-}INF = NaN
+// - a<(-0) := sqrt(a) = NaN (a=-0.0 won't NaN)
+// - 0 == -0
+// - 4/0 = +INF
+// - 4/-0 = -INF
+// - 4+INF = +INF
+// - 4-INF = -INF
+// - 4*(+INF) = +INF
+// - 4*(-INF) = -INF
+// - -4*(+INF) = -INF
+// - sqrt(+INF) = +INF
+//------------------------------------------------------------------------------------------------------------------------------
+// FP16 ENCODING
+// =============
+// fedcba9876543210
+// ----------------
+// ......mmmmmmmmmm 10-bit mantissa (encodes 11-bit 0.5 to 1.0 except for denormals)
+// .eeeee.......... 5-bit exponent
+// .00000.......... denormals
+// .00001.......... -14 exponent
+// .11110.......... 15 exponent
+// .111110000000000 infinity
+// .11111nnnnnnnnnn NaN with n!=0
+// s............... sign
+//------------------------------------------------------------------------------------------------------------------------------
+// FP16/INT16 ALIASING DENORMAL
+// ============================
+// 11-bit unsigned integers alias with half float denormal/normal values,
+// 1 = 2^(-24) = 1/16777216 ....................... first denormal value
+// 2 = 2^(-23)
+// ...
+// 1023 = 2^(-14)*(1-2^(-10)) = 2^(-14)*(1-1/1024) ... last denormal value
+// 1024 = 2^(-14) = 1/16384 .......................... first normal value that still maps to integers
+// 2047 .............................................. last normal value that still maps to integers
+// Scaling limits,
+// 2^15 = 32768 ...................................... largest power of 2 scaling
+// Largest pow2 conversion mapping is at *32768,
+// 1 : 2^(-9) = 1/512
+// 2 : 1/256
+// 4 : 1/128
+// 8 : 1/64
+// 16 : 1/32
+// 32 : 1/16
+// 64 : 1/8
+// 128 : 1/4
+// 256 : 1/2
+// 512 : 1
+// 1024 : 2
+// 2047 : a little less than 4
+//==============================================================================================================================
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+//
+// GPU/CPU PORTABILITY
+//
+//
+//------------------------------------------------------------------------------------------------------------------------------
+// This is the GPU implementation.
+// See the CPU implementation for docs.
+//==============================================================================================================================
+#ifdef A_GPU
+ #define A_TRUE true
+ #define A_FALSE false
+ #define A_STATIC
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// VECTOR ARGUMENT/RETURN/INITIALIZATION PORTABILITY
+//==============================================================================================================================
+ #define retAD2 AD2
+ #define retAD3 AD3
+ #define retAD4 AD4
+ #define retAF2 AF2
+ #define retAF3 AF3
+ #define retAF4 AF4
+ #define retAL2 AL2
+ #define retAL3 AL3
+ #define retAL4 AL4
+ #define retAU2 AU2
+ #define retAU3 AU3
+ #define retAU4 AU4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define inAD2 in AD2
+ #define inAD3 in AD3
+ #define inAD4 in AD4
+ #define inAF2 in AF2
+ #define inAF3 in AF3
+ #define inAF4 in AF4
+ #define inAL2 in AL2
+ #define inAL3 in AL3
+ #define inAL4 in AL4
+ #define inAU2 in AU2
+ #define inAU3 in AU3
+ #define inAU4 in AU4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define inoutAD2 inout AD2
+ #define inoutAD3 inout AD3
+ #define inoutAD4 inout AD4
+ #define inoutAF2 inout AF2
+ #define inoutAF3 inout AF3
+ #define inoutAF4 inout AF4
+ #define inoutAL2 inout AL2
+ #define inoutAL3 inout AL3
+ #define inoutAL4 inout AL4
+ #define inoutAU2 inout AU2
+ #define inoutAU3 inout AU3
+ #define inoutAU4 inout AU4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define outAD2 out AD2
+ #define outAD3 out AD3
+ #define outAD4 out AD4
+ #define outAF2 out AF2
+ #define outAF3 out AF3
+ #define outAF4 out AF4
+ #define outAL2 out AL2
+ #define outAL3 out AL3
+ #define outAL4 out AL4
+ #define outAU2 out AU2
+ #define outAU3 out AU3
+ #define outAU4 out AU4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define varAD2(x) AD2 x
+ #define varAD3(x) AD3 x
+ #define varAD4(x) AD4 x
+ #define varAF2(x) AF2 x
+ #define varAF3(x) AF3 x
+ #define varAF4(x) AF4 x
+ #define varAL2(x) AL2 x
+ #define varAL3(x) AL3 x
+ #define varAL4(x) AL4 x
+ #define varAU2(x) AU2 x
+ #define varAU3(x) AU3 x
+ #define varAU4(x) AU4 x
+//------------------------------------------------------------------------------------------------------------------------------
+ #define initAD2(x,y) AD2(x,y)
+ #define initAD3(x,y,z) AD3(x,y,z)
+ #define initAD4(x,y,z,w) AD4(x,y,z,w)
+ #define initAF2(x,y) AF2(x,y)
+ #define initAF3(x,y,z) AF3(x,y,z)
+ #define initAF4(x,y,z,w) AF4(x,y,z,w)
+ #define initAL2(x,y) AL2(x,y)
+ #define initAL3(x,y,z) AL3(x,y,z)
+ #define initAL4(x,y,z,w) AL4(x,y,z,w)
+ #define initAU2(x,y) AU2(x,y)
+ #define initAU3(x,y,z) AU3(x,y,z)
+ #define initAU4(x,y,z,w) AU4(x,y,z,w)
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// SCALAR RETURN OPS
+//==============================================================================================================================
+ #define AAbsD1(a) abs(AD1(a))
+ #define AAbsF1(a) abs(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ACosD1(a) cos(AD1(a))
+ #define ACosF1(a) cos(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ADotD2(a,b) dot(AD2(a),AD2(b))
+ #define ADotD3(a,b) dot(AD3(a),AD3(b))
+ #define ADotD4(a,b) dot(AD4(a),AD4(b))
+ #define ADotF2(a,b) dot(AF2(a),AF2(b))
+ #define ADotF3(a,b) dot(AF3(a),AF3(b))
+ #define ADotF4(a,b) dot(AF4(a),AF4(b))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AExp2D1(a) exp2(AD1(a))
+ #define AExp2F1(a) exp2(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AFloorD1(a) floor(AD1(a))
+ #define AFloorF1(a) floor(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ALog2D1(a) log2(AD1(a))
+ #define ALog2F1(a) log2(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AMaxD1(a,b) max(a,b)
+ #define AMaxF1(a,b) max(a,b)
+ #define AMaxL1(a,b) max(a,b)
+ #define AMaxU1(a,b) max(a,b)
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AMinD1(a,b) min(a,b)
+ #define AMinF1(a,b) min(a,b)
+ #define AMinL1(a,b) min(a,b)
+ #define AMinU1(a,b) min(a,b)
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASinD1(a) sin(AD1(a))
+ #define ASinF1(a) sin(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASqrtD1(a) sqrt(AD1(a))
+ #define ASqrtF1(a) sqrt(AF1(a))
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// SCALAR RETURN OPS - DEPENDENT
+//==============================================================================================================================
+ #define APowD1(a,b) pow(AD1(a),AF1(b))
+ #define APowF1(a,b) pow(AF1(a),AF1(b))
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// VECTOR OPS
+//------------------------------------------------------------------------------------------------------------------------------
+// These are added as needed for production or prototyping, so not necessarily a complete set.
+// They follow a convention of taking in a destination and also returning the destination value to increase utility.
+//==============================================================================================================================
+ #ifdef A_DUBL
+ AD2 opAAbsD2(outAD2 d,inAD2 a){d=abs(a);return d;}
+ AD3 opAAbsD3(outAD3 d,inAD3 a){d=abs(a);return d;}
+ AD4 opAAbsD4(outAD4 d,inAD4 a){d=abs(a);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opAAddD2(outAD2 d,inAD2 a,inAD2 b){d=a+b;return d;}
+ AD3 opAAddD3(outAD3 d,inAD3 a,inAD3 b){d=a+b;return d;}
+ AD4 opAAddD4(outAD4 d,inAD4 a,inAD4 b){d=a+b;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opAAddOneD2(outAD2 d,inAD2 a,AD1 b){d=a+AD2_(b);return d;}
+ AD3 opAAddOneD3(outAD3 d,inAD3 a,AD1 b){d=a+AD3_(b);return d;}
+ AD4 opAAddOneD4(outAD4 d,inAD4 a,AD1 b){d=a+AD4_(b);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opACpyD2(outAD2 d,inAD2 a){d=a;return d;}
+ AD3 opACpyD3(outAD3 d,inAD3 a){d=a;return d;}
+ AD4 opACpyD4(outAD4 d,inAD4 a){d=a;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opALerpD2(outAD2 d,inAD2 a,inAD2 b,inAD2 c){d=ALerpD2(a,b,c);return d;}
+ AD3 opALerpD3(outAD3 d,inAD3 a,inAD3 b,inAD3 c){d=ALerpD3(a,b,c);return d;}
+ AD4 opALerpD4(outAD4 d,inAD4 a,inAD4 b,inAD4 c){d=ALerpD4(a,b,c);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opALerpOneD2(outAD2 d,inAD2 a,inAD2 b,AD1 c){d=ALerpD2(a,b,AD2_(c));return d;}
+ AD3 opALerpOneD3(outAD3 d,inAD3 a,inAD3 b,AD1 c){d=ALerpD3(a,b,AD3_(c));return d;}
+ AD4 opALerpOneD4(outAD4 d,inAD4 a,inAD4 b,AD1 c){d=ALerpD4(a,b,AD4_(c));return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opAMaxD2(outAD2 d,inAD2 a,inAD2 b){d=max(a,b);return d;}
+ AD3 opAMaxD3(outAD3 d,inAD3 a,inAD3 b){d=max(a,b);return d;}
+ AD4 opAMaxD4(outAD4 d,inAD4 a,inAD4 b){d=max(a,b);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opAMinD2(outAD2 d,inAD2 a,inAD2 b){d=min(a,b);return d;}
+ AD3 opAMinD3(outAD3 d,inAD3 a,inAD3 b){d=min(a,b);return d;}
+ AD4 opAMinD4(outAD4 d,inAD4 a,inAD4 b){d=min(a,b);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opAMulD2(outAD2 d,inAD2 a,inAD2 b){d=a*b;return d;}
+ AD3 opAMulD3(outAD3 d,inAD3 a,inAD3 b){d=a*b;return d;}
+ AD4 opAMulD4(outAD4 d,inAD4 a,inAD4 b){d=a*b;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opAMulOneD2(outAD2 d,inAD2 a,AD1 b){d=a*AD2_(b);return d;}
+ AD3 opAMulOneD3(outAD3 d,inAD3 a,AD1 b){d=a*AD3_(b);return d;}
+ AD4 opAMulOneD4(outAD4 d,inAD4 a,AD1 b){d=a*AD4_(b);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opANegD2(outAD2 d,inAD2 a){d=-a;return d;}
+ AD3 opANegD3(outAD3 d,inAD3 a){d=-a;return d;}
+ AD4 opANegD4(outAD4 d,inAD4 a){d=-a;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opARcpD2(outAD2 d,inAD2 a){d=ARcpD2(a);return d;}
+ AD3 opARcpD3(outAD3 d,inAD3 a){d=ARcpD3(a);return d;}
+ AD4 opARcpD4(outAD4 d,inAD4 a){d=ARcpD4(a);return d;}
+ #endif
+//==============================================================================================================================
+ AF2 opAAbsF2(outAF2 d,inAF2 a){d=abs(a);return d;}
+ AF3 opAAbsF3(outAF3 d,inAF3 a){d=abs(a);return d;}
+ AF4 opAAbsF4(outAF4 d,inAF4 a){d=abs(a);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opAAddF2(outAF2 d,inAF2 a,inAF2 b){d=a+b;return d;}
+ AF3 opAAddF3(outAF3 d,inAF3 a,inAF3 b){d=a+b;return d;}
+ AF4 opAAddF4(outAF4 d,inAF4 a,inAF4 b){d=a+b;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opAAddOneF2(outAF2 d,inAF2 a,AF1 b){d=a+AF2_(b);return d;}
+ AF3 opAAddOneF3(outAF3 d,inAF3 a,AF1 b){d=a+AF3_(b);return d;}
+ AF4 opAAddOneF4(outAF4 d,inAF4 a,AF1 b){d=a+AF4_(b);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opACpyF2(outAF2 d,inAF2 a){d=a;return d;}
+ AF3 opACpyF3(outAF3 d,inAF3 a){d=a;return d;}
+ AF4 opACpyF4(outAF4 d,inAF4 a){d=a;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opALerpF2(outAF2 d,inAF2 a,inAF2 b,inAF2 c){d=ALerpF2(a,b,c);return d;}
+ AF3 opALerpF3(outAF3 d,inAF3 a,inAF3 b,inAF3 c){d=ALerpF3(a,b,c);return d;}
+ AF4 opALerpF4(outAF4 d,inAF4 a,inAF4 b,inAF4 c){d=ALerpF4(a,b,c);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opALerpOneF2(outAF2 d,inAF2 a,inAF2 b,AF1 c){d=ALerpF2(a,b,AF2_(c));return d;}
+ AF3 opALerpOneF3(outAF3 d,inAF3 a,inAF3 b,AF1 c){d=ALerpF3(a,b,AF3_(c));return d;}
+ AF4 opALerpOneF4(outAF4 d,inAF4 a,inAF4 b,AF1 c){d=ALerpF4(a,b,AF4_(c));return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opAMaxF2(outAF2 d,inAF2 a,inAF2 b){d=max(a,b);return d;}
+ AF3 opAMaxF3(outAF3 d,inAF3 a,inAF3 b){d=max(a,b);return d;}
+ AF4 opAMaxF4(outAF4 d,inAF4 a,inAF4 b){d=max(a,b);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opAMinF2(outAF2 d,inAF2 a,inAF2 b){d=min(a,b);return d;}
+ AF3 opAMinF3(outAF3 d,inAF3 a,inAF3 b){d=min(a,b);return d;}
+ AF4 opAMinF4(outAF4 d,inAF4 a,inAF4 b){d=min(a,b);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opAMulF2(outAF2 d,inAF2 a,inAF2 b){d=a*b;return d;}
+ AF3 opAMulF3(outAF3 d,inAF3 a,inAF3 b){d=a*b;return d;}
+ AF4 opAMulF4(outAF4 d,inAF4 a,inAF4 b){d=a*b;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opAMulOneF2(outAF2 d,inAF2 a,AF1 b){d=a*AF2_(b);return d;}
+ AF3 opAMulOneF3(outAF3 d,inAF3 a,AF1 b){d=a*AF3_(b);return d;}
+ AF4 opAMulOneF4(outAF4 d,inAF4 a,AF1 b){d=a*AF4_(b);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opANegF2(outAF2 d,inAF2 a){d=-a;return d;}
+ AF3 opANegF3(outAF3 d,inAF3 a){d=-a;return d;}
+ AF4 opANegF4(outAF4 d,inAF4 a){d=-a;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opARcpF2(outAF2 d,inAF2 a){d=ARcpF2(a);return d;}
+ AF3 opARcpF3(outAF3 d,inAF3 a){d=ARcpF3(a);return d;}
+ AF4 opARcpF4(outAF4 d,inAF4 a){d=ARcpF4(a);return d;}
+#endif
+
+#define FSR_EASU_F 1
+AU4 con0, con1, con2, con3;
+float srcW, srcH, dstW, dstH;
+vec2 bLeft, tRight;
+
+AF2 translate(AF2 pos) {
+ return AF2(pos.x * scaleX, pos.y * scaleY);
+}
+
+void setBounds(vec2 bottomLeft, vec2 topRight) {
+ bLeft = bottomLeft;
+ tRight = topRight;
+}
+
+AF4 FsrEasuRF(AF2 p) { AF4 res = textureGather(Source, translate(p), 0); return res; }
+AF4 FsrEasuGF(AF2 p) { AF4 res = textureGather(Source, translate(p), 1); return res; }
+AF4 FsrEasuBF(AF2 p) { AF4 res = textureGather(Source, translate(p), 2); return res; }
+
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+//
+// AMD FidelityFX SUPER RESOLUTION [FSR 1] ::: SPATIAL SCALING & EXTRAS - v1.20210629
+//
+//
+//------------------------------------------------------------------------------------------------------------------------------
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//------------------------------------------------------------------------------------------------------------------------------
+// FidelityFX Super Resolution Sample
+//
+// Copyright (c) 2021 Advanced Micro Devices, Inc. All rights reserved.
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files(the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions :
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//------------------------------------------------------------------------------------------------------------------------------
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//------------------------------------------------------------------------------------------------------------------------------
+// ABOUT
+// =====
+// FSR is a collection of algorithms relating to generating a higher resolution image.
+// This specific header focuses on single-image non-temporal image scaling, and related tools.
+//
+// The core functions are EASU and RCAS:
+// [EASU] Edge Adaptive Spatial Upsampling ....... 1x to 4x area range spatial scaling, clamped adaptive elliptical filter.
+// [RCAS] Robust Contrast Adaptive Sharpening .... A non-scaling variation on CAS.
+// RCAS needs to be applied after EASU as a separate pass.
+//
+// Optional utility functions are:
+// [LFGA] Linear Film Grain Applicator ........... Tool to apply film grain after scaling.
+// [SRTM] Simple Reversible Tone-Mapper .......... Linear HDR {0 to FP16_MAX} to {0 to 1} and back.
+// [TEPD] Temporal Energy Preserving Dither ...... Temporally energy preserving dithered {0 to 1} linear to gamma 2.0 conversion.
+// See each individual sub-section for inline documentation.
+//------------------------------------------------------------------------------------------------------------------------------
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//------------------------------------------------------------------------------------------------------------------------------
+// FUNCTION PERMUTATIONS
+// =====================
+// *F() ..... Single item computation with 32-bit.
+// *H() ..... Single item computation with 16-bit, with packing (aka two 16-bit ops in parallel) when possible.
+// *Hx2() ... Processing two items in parallel with 16-bit, easier packing.
+// Not all interfaces in this file have a *Hx2() form.
+//==============================================================================================================================
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+// FSR - [EASU] EDGE ADAPTIVE SPATIAL UPSAMPLING
+//
+//------------------------------------------------------------------------------------------------------------------------------
+// EASU provides a high quality spatial-only scaling at relatively low cost.
+// Meaning EASU is appropiate for laptops and other low-end GPUs.
+// Quality from 1x to 4x area scaling is good.
+//------------------------------------------------------------------------------------------------------------------------------
+// The scalar uses a modified fast approximation to the standard lanczos(size=2) kernel.
+// EASU runs in a single pass, so it applies a directionally and anisotropically adaptive radial lanczos.
+// This is also kept as simple as possible to have minimum runtime.
+//------------------------------------------------------------------------------------------------------------------------------
+// The lanzcos filter has negative lobes, so by itself it will introduce ringing.
+// To remove all ringing, the algorithm uses the nearest 2x2 input texels as a neighborhood,
+// and limits output to the minimum and maximum of that neighborhood.
+//------------------------------------------------------------------------------------------------------------------------------
+// Input image requirements:
+//
+// Color needs to be encoded as 3 channel[red, green, blue](e.g.XYZ not supported)
+// Each channel needs to be in the range[0, 1]
+// Any color primaries are supported
+// Display / tonemapping curve needs to be as if presenting to sRGB display or similar(e.g.Gamma 2.0)
+// There should be no banding in the input
+// There should be no high amplitude noise in the input
+// There should be no noise in the input that is not at input pixel granularity
+// For performance purposes, use 32bpp formats
+//------------------------------------------------------------------------------------------------------------------------------
+// Best to apply EASU at the end of the frame after tonemapping
+// but before film grain or composite of the UI.
+//------------------------------------------------------------------------------------------------------------------------------
+// Example of including this header for D3D HLSL :
+//
+// #define A_GPU 1
+// #define A_HLSL 1
+// #define A_HALF 1
+// #include "ffx_a.h"
+// #define FSR_EASU_H 1
+// #define FSR_RCAS_H 1
+// //declare input callbacks
+// #include "ffx_fsr1.h"
+//
+// Example of including this header for Vulkan GLSL :
+//
+// #define A_GPU 1
+// #define A_GLSL 1
+// #define A_HALF 1
+// #include "ffx_a.h"
+// #define FSR_EASU_H 1
+// #define FSR_RCAS_H 1
+// //declare input callbacks
+// #include "ffx_fsr1.h"
+//
+// Example of including this header for Vulkan HLSL :
+//
+// #define A_GPU 1
+// #define A_HLSL 1
+// #define A_HLSL_6_2 1
+// #define A_NO_16_BIT_CAST 1
+// #define A_HALF 1
+// #include "ffx_a.h"
+// #define FSR_EASU_H 1
+// #define FSR_RCAS_H 1
+// //declare input callbacks
+// #include "ffx_fsr1.h"
+//
+// Example of declaring the required input callbacks for GLSL :
+// The callbacks need to gather4 for each color channel using the specified texture coordinate 'p'.
+// EASU uses gather4 to reduce position computation logic and for free Arrays of Structures to Structures of Arrays conversion.
+//
+// AH4 FsrEasuRH(AF2 p){return AH4(textureGather(sampler2D(tex,sam),p,0));}
+// AH4 FsrEasuGH(AF2 p){return AH4(textureGather(sampler2D(tex,sam),p,1));}
+// AH4 FsrEasuBH(AF2 p){return AH4(textureGather(sampler2D(tex,sam),p,2));}
+// ...
+// The FsrEasuCon function needs to be called from the CPU or GPU to set up constants.
+// The difference in viewport and input image size is there to support Dynamic Resolution Scaling.
+// To use FsrEasuCon() on the CPU, define A_CPU before including ffx_a and ffx_fsr1.
+// Including a GPU example here, the 'con0' through 'con3' values would be stored out to a constant buffer.
+// AU4 con0,con1,con2,con3;
+// FsrEasuCon(con0,con1,con2,con3,
+// 1920.0,1080.0, // Viewport size (top left aligned) in the input image which is to be scaled.
+// 3840.0,2160.0, // The size of the input image.
+// 2560.0,1440.0); // The output resolution.
+//==============================================================================================================================
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// CONSTANT SETUP
+//==============================================================================================================================
+// Call to setup required constant values (works on CPU or GPU).
+A_STATIC void FsrEasuCon(
+outAU4 con0,
+outAU4 con1,
+outAU4 con2,
+outAU4 con3,
+// This the rendered image resolution being upscaled
+AF1 inputViewportInPixelsX,
+AF1 inputViewportInPixelsY,
+// This is the resolution of the resource containing the input image (useful for dynamic resolution)
+AF1 inputSizeInPixelsX,
+AF1 inputSizeInPixelsY,
+// This is the display resolution which the input image gets upscaled to
+AF1 outputSizeInPixelsX,
+AF1 outputSizeInPixelsY){
+ // Output integer position to a pixel position in viewport.
+ con0[0]=AU1_AF1(inputViewportInPixelsX*ARcpF1(outputSizeInPixelsX));
+ con0[1]=AU1_AF1(inputViewportInPixelsY*ARcpF1(outputSizeInPixelsY));
+ con0[2]=AU1_AF1(AF1_(0.5)*inputViewportInPixelsX*ARcpF1(outputSizeInPixelsX)-AF1_(0.5));
+ con0[3]=AU1_AF1(AF1_(0.5)*inputViewportInPixelsY*ARcpF1(outputSizeInPixelsY)-AF1_(0.5));
+ // Viewport pixel position to normalized image space.
+ // This is used to get upper-left of 'F' tap.
+ con1[0]=AU1_AF1(ARcpF1(inputSizeInPixelsX));
+ con1[1]=AU1_AF1(ARcpF1(inputSizeInPixelsY));
+ // Centers of gather4, first offset from upper-left of 'F'.
+ // +---+---+
+ // | | |
+ // +--(0)--+
+ // | b | c |
+ // +---F---+---+---+
+ // | e | f | g | h |
+ // +--(1)--+--(2)--+
+ // | i | j | k | l |
+ // +---+---+---+---+
+ // | n | o |
+ // +--(3)--+
+ // | | |
+ // +---+---+
+ con1[2]=AU1_AF1(AF1_( 1.0)*ARcpF1(inputSizeInPixelsX));
+ con1[3]=AU1_AF1(AF1_(-1.0)*ARcpF1(inputSizeInPixelsY));
+ // These are from (0) instead of 'F'.
+ con2[0]=AU1_AF1(AF1_(-1.0)*ARcpF1(inputSizeInPixelsX));
+ con2[1]=AU1_AF1(AF1_( 2.0)*ARcpF1(inputSizeInPixelsY));
+ con2[2]=AU1_AF1(AF1_( 1.0)*ARcpF1(inputSizeInPixelsX));
+ con2[3]=AU1_AF1(AF1_( 2.0)*ARcpF1(inputSizeInPixelsY));
+ con3[0]=AU1_AF1(AF1_( 0.0)*ARcpF1(inputSizeInPixelsX));
+ con3[1]=AU1_AF1(AF1_( 4.0)*ARcpF1(inputSizeInPixelsY));
+ con3[2]=con3[3]=0;}
+
+//If the an offset into the input image resource
+A_STATIC void FsrEasuConOffset(
+ outAU4 con0,
+ outAU4 con1,
+ outAU4 con2,
+ outAU4 con3,
+ // This the rendered image resolution being upscaled
+ AF1 inputViewportInPixelsX,
+ AF1 inputViewportInPixelsY,
+ // This is the resolution of the resource containing the input image (useful for dynamic resolution)
+ AF1 inputSizeInPixelsX,
+ AF1 inputSizeInPixelsY,
+ // This is the display resolution which the input image gets upscaled to
+ AF1 outputSizeInPixelsX,
+ AF1 outputSizeInPixelsY,
+ // This is the input image offset into the resource containing it (useful for dynamic resolution)
+ AF1 inputOffsetInPixelsX,
+ AF1 inputOffsetInPixelsY) {
+ FsrEasuCon(con0, con1, con2, con3, inputViewportInPixelsX, inputViewportInPixelsY, inputSizeInPixelsX, inputSizeInPixelsY, outputSizeInPixelsX, outputSizeInPixelsY);
+ con0[2] = AU1_AF1(AF1_(0.5) * inputViewportInPixelsX * ARcpF1(outputSizeInPixelsX) - AF1_(0.5) + inputOffsetInPixelsX);
+ con0[3] = AU1_AF1(AF1_(0.5) * inputViewportInPixelsY * ARcpF1(outputSizeInPixelsY) - AF1_(0.5) + inputOffsetInPixelsY);
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// NON-PACKED 32-BIT VERSION
+//==============================================================================================================================
+#if defined(A_GPU)&&defined(FSR_EASU_F)
+ // Input callback prototypes, need to be implemented by calling shader
+ AF4 FsrEasuRF(AF2 p);
+ AF4 FsrEasuGF(AF2 p);
+ AF4 FsrEasuBF(AF2 p);
+//------------------------------------------------------------------------------------------------------------------------------
+ // Filtering for a given tap for the scalar.
+ void FsrEasuTapF(
+ inout AF3 aC, // Accumulated color, with negative lobe.
+ inout AF1 aW, // Accumulated weight.
+ AF2 off, // Pixel offset from resolve position to tap.
+ AF2 dir, // Gradient direction.
+ AF2 len, // Length.
+ AF1 lob, // Negative lobe strength.
+ AF1 clp, // Clipping point.
+ AF3 c){ // Tap color.
+ // Rotate offset by direction.
+ AF2 v;
+ v.x=(off.x*( dir.x))+(off.y*dir.y);
+ v.y=(off.x*(-dir.y))+(off.y*dir.x);
+ // Anisotropy.
+ v*=len;
+ // Compute distance^2.
+ AF1 d2=v.x*v.x+v.y*v.y;
+ // Limit to the window as at corner, 2 taps can easily be outside.
+ d2=min(d2,clp);
+ // Approximation of lancos2 without sin() or rcp(), or sqrt() to get x.
+ // (25/16 * (2/5 * x^2 - 1)^2 - (25/16 - 1)) * (1/4 * x^2 - 1)^2
+ // |_______________________________________| |_______________|
+ // base window
+ // The general form of the 'base' is,
+ // (a*(b*x^2-1)^2-(a-1))
+ // Where 'a=1/(2*b-b^2)' and 'b' moves around the negative lobe.
+ AF1 wB=AF1_(2.0/5.0)*d2+AF1_(-1.0);
+ AF1 wA=lob*d2+AF1_(-1.0);
+ wB*=wB;
+ wA*=wA;
+ wB=AF1_(25.0/16.0)*wB+AF1_(-(25.0/16.0-1.0));
+ AF1 w=wB*wA;
+ // Do weighted average.
+ aC+=c*w;aW+=w;}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Accumulate direction and length.
+ void FsrEasuSetF(
+ inout AF2 dir,
+ inout AF1 len,
+ AF2 pp,
+ AP1 biS,AP1 biT,AP1 biU,AP1 biV,
+ AF1 lA,AF1 lB,AF1 lC,AF1 lD,AF1 lE){
+ // Compute bilinear weight, branches factor out as predicates are compiler time immediates.
+ // s t
+ // u v
+ AF1 w = AF1_(0.0);
+ if(biS)w=(AF1_(1.0)-pp.x)*(AF1_(1.0)-pp.y);
+ if(biT)w= pp.x *(AF1_(1.0)-pp.y);
+ if(biU)w=(AF1_(1.0)-pp.x)* pp.y ;
+ if(biV)w= pp.x * pp.y ;
+ // Direction is the '+' diff.
+ // a
+ // b c d
+ // e
+ // Then takes magnitude from abs average of both sides of 'c'.
+ // Length converts gradient reversal to 0, smoothly to non-reversal at 1, shaped, then adding horz and vert terms.
+ AF1 dc=lD-lC;
+ AF1 cb=lC-lB;
+ AF1 lenX=max(abs(dc),abs(cb));
+ lenX=APrxLoRcpF1(lenX);
+ AF1 dirX=lD-lB;
+ dir.x+=dirX*w;
+ lenX=ASatF1(abs(dirX)*lenX);
+ lenX*=lenX;
+ len+=lenX*w;
+ // Repeat for the y axis.
+ AF1 ec=lE-lC;
+ AF1 ca=lC-lA;
+ AF1 lenY=max(abs(ec),abs(ca));
+ lenY=APrxLoRcpF1(lenY);
+ AF1 dirY=lE-lA;
+ dir.y+=dirY*w;
+ lenY=ASatF1(abs(dirY)*lenY);
+ lenY*=lenY;
+ len+=lenY*w;}
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrEasuF(
+ out AF3 pix,
+ AU2 ip, // Integer pixel position in output.
+ AU4 con0, // Constants generated by FsrEasuCon().
+ AU4 con1,
+ AU4 con2,
+ AU4 con3){
+//------------------------------------------------------------------------------------------------------------------------------
+ // Get position of 'f'.
+ AF2 pp=AF2(ip)*AF2_AU2(con0.xy)+AF2_AU2(con0.zw);
+ AF2 fp=floor(pp);
+ pp-=fp;
+//------------------------------------------------------------------------------------------------------------------------------
+ // 12-tap kernel.
+ // b c
+ // e f g h
+ // i j k l
+ // n o
+ // Gather 4 ordering.
+ // a b
+ // r g
+ // For packed FP16, need either {rg} or {ab} so using the following setup for gather in all versions,
+ // a b <- unused (z)
+ // r g
+ // a b a b
+ // r g r g
+ // a b
+ // r g <- unused (z)
+ // Allowing dead-code removal to remove the 'z's.
+ AF2 p0=fp*AF2_AU2(con1.xy)+AF2_AU2(con1.zw);
+ // These are from p0 to avoid pulling two constants on pre-Navi hardware.
+ AF2 p1=p0+AF2_AU2(con2.xy);
+ AF2 p2=p0+AF2_AU2(con2.zw);
+ AF2 p3=p0+AF2_AU2(con3.xy);
+ AF4 bczzR=FsrEasuRF(p0);
+ AF4 bczzG=FsrEasuGF(p0);
+ AF4 bczzB=FsrEasuBF(p0);
+ AF4 ijfeR=FsrEasuRF(p1);
+ AF4 ijfeG=FsrEasuGF(p1);
+ AF4 ijfeB=FsrEasuBF(p1);
+ AF4 klhgR=FsrEasuRF(p2);
+ AF4 klhgG=FsrEasuGF(p2);
+ AF4 klhgB=FsrEasuBF(p2);
+ AF4 zzonR=FsrEasuRF(p3);
+ AF4 zzonG=FsrEasuGF(p3);
+ AF4 zzonB=FsrEasuBF(p3);
+//------------------------------------------------------------------------------------------------------------------------------
+ // Simplest multi-channel approximate luma possible (luma times 2, in 2 FMA/MAD).
+ AF4 bczzL=bczzB*AF4_(0.5)+(bczzR*AF4_(0.5)+bczzG);
+ AF4 ijfeL=ijfeB*AF4_(0.5)+(ijfeR*AF4_(0.5)+ijfeG);
+ AF4 klhgL=klhgB*AF4_(0.5)+(klhgR*AF4_(0.5)+klhgG);
+ AF4 zzonL=zzonB*AF4_(0.5)+(zzonR*AF4_(0.5)+zzonG);
+ // Rename.
+ AF1 bL=bczzL.x;
+ AF1 cL=bczzL.y;
+ AF1 iL=ijfeL.x;
+ AF1 jL=ijfeL.y;
+ AF1 fL=ijfeL.z;
+ AF1 eL=ijfeL.w;
+ AF1 kL=klhgL.x;
+ AF1 lL=klhgL.y;
+ AF1 hL=klhgL.z;
+ AF1 gL=klhgL.w;
+ AF1 oL=zzonL.z;
+ AF1 nL=zzonL.w;
+ // Accumulate for bilinear interpolation.
+ AF2 dir=AF2_(0.0);
+ AF1 len=AF1_(0.0);
+ FsrEasuSetF(dir,len,pp,true, false,false,false,bL,eL,fL,gL,jL);
+ FsrEasuSetF(dir,len,pp,false,true ,false,false,cL,fL,gL,hL,kL);
+ FsrEasuSetF(dir,len,pp,false,false,true ,false,fL,iL,jL,kL,nL);
+ FsrEasuSetF(dir,len,pp,false,false,false,true ,gL,jL,kL,lL,oL);
+//------------------------------------------------------------------------------------------------------------------------------
+ // Normalize with approximation, and cleanup close to zero.
+ AF2 dir2=dir*dir;
+ AF1 dirR=dir2.x+dir2.y;
+ AP1 zro=dirR<AF1_(1.0/32768.0);
+ dirR=APrxLoRsqF1(dirR);
+ dirR=zro?AF1_(1.0):dirR;
+ dir.x=zro?AF1_(1.0):dir.x;
+ dir*=AF2_(dirR);
+ // Transform from {0 to 2} to {0 to 1} range, and shape with square.
+ len=len*AF1_(0.5);
+ len*=len;
+ // Stretch kernel {1.0 vert|horz, to sqrt(2.0) on diagonal}.
+ AF1 stretch=(dir.x*dir.x+dir.y*dir.y)*APrxLoRcpF1(max(abs(dir.x),abs(dir.y)));
+ // Anisotropic length after rotation,
+ // x := 1.0 lerp to 'stretch' on edges
+ // y := 1.0 lerp to 2x on edges
+ AF2 len2=AF2(AF1_(1.0)+(stretch-AF1_(1.0))*len,AF1_(1.0)+AF1_(-0.5)*len);
+ // Based on the amount of 'edge',
+ // the window shifts from +/-{sqrt(2.0) to slightly beyond 2.0}.
+ AF1 lob=AF1_(0.5)+AF1_((1.0/4.0-0.04)-0.5)*len;
+ // Set distance^2 clipping point to the end of the adjustable window.
+ AF1 clp=APrxLoRcpF1(lob);
+//------------------------------------------------------------------------------------------------------------------------------
+ // Accumulation mixed with min/max of 4 nearest.
+ // b c
+ // e f g h
+ // i j k l
+ // n o
+ AF3 min4=min(AMin3F3(AF3(ijfeR.z,ijfeG.z,ijfeB.z),AF3(klhgR.w,klhgG.w,klhgB.w),AF3(ijfeR.y,ijfeG.y,ijfeB.y)),
+ AF3(klhgR.x,klhgG.x,klhgB.x));
+ AF3 max4=max(AMax3F3(AF3(ijfeR.z,ijfeG.z,ijfeB.z),AF3(klhgR.w,klhgG.w,klhgB.w),AF3(ijfeR.y,ijfeG.y,ijfeB.y)),
+ AF3(klhgR.x,klhgG.x,klhgB.x));
+ // Accumulation.
+ AF3 aC=AF3_(0.0);
+ AF1 aW=AF1_(0.0);
+ FsrEasuTapF(aC,aW,AF2( 0.0,-1.0)-pp,dir,len2,lob,clp,AF3(bczzR.x,bczzG.x,bczzB.x)); // b
+ FsrEasuTapF(aC,aW,AF2( 1.0,-1.0)-pp,dir,len2,lob,clp,AF3(bczzR.y,bczzG.y,bczzB.y)); // c
+ FsrEasuTapF(aC,aW,AF2(-1.0, 1.0)-pp,dir,len2,lob,clp,AF3(ijfeR.x,ijfeG.x,ijfeB.x)); // i
+ FsrEasuTapF(aC,aW,AF2( 0.0, 1.0)-pp,dir,len2,lob,clp,AF3(ijfeR.y,ijfeG.y,ijfeB.y)); // j
+ FsrEasuTapF(aC,aW,AF2( 0.0, 0.0)-pp,dir,len2,lob,clp,AF3(ijfeR.z,ijfeG.z,ijfeB.z)); // f
+ FsrEasuTapF(aC,aW,AF2(-1.0, 0.0)-pp,dir,len2,lob,clp,AF3(ijfeR.w,ijfeG.w,ijfeB.w)); // e
+ FsrEasuTapF(aC,aW,AF2( 1.0, 1.0)-pp,dir,len2,lob,clp,AF3(klhgR.x,klhgG.x,klhgB.x)); // k
+ FsrEasuTapF(aC,aW,AF2( 2.0, 1.0)-pp,dir,len2,lob,clp,AF3(klhgR.y,klhgG.y,klhgB.y)); // l
+ FsrEasuTapF(aC,aW,AF2( 2.0, 0.0)-pp,dir,len2,lob,clp,AF3(klhgR.z,klhgG.z,klhgB.z)); // h
+ FsrEasuTapF(aC,aW,AF2( 1.0, 0.0)-pp,dir,len2,lob,clp,AF3(klhgR.w,klhgG.w,klhgB.w)); // g
+ FsrEasuTapF(aC,aW,AF2( 1.0, 2.0)-pp,dir,len2,lob,clp,AF3(zzonR.z,zzonG.z,zzonB.z)); // o
+ FsrEasuTapF(aC,aW,AF2( 0.0, 2.0)-pp,dir,len2,lob,clp,AF3(zzonR.w,zzonG.w,zzonB.w)); // n
+//------------------------------------------------------------------------------------------------------------------------------
+ // Normalize and dering.
+ pix=min(max4,max(min4,aC*AF3_(ARcpF1(aW))));}
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// PACKED 16-BIT VERSION
+//==============================================================================================================================
+#if defined(A_GPU)&&defined(A_HALF)&&defined(FSR_EASU_H)
+// Input callback prototypes, need to be implemented by calling shader
+ AH4 FsrEasuRH(AF2 p);
+ AH4 FsrEasuGH(AF2 p);
+ AH4 FsrEasuBH(AF2 p);
+//------------------------------------------------------------------------------------------------------------------------------
+ // This runs 2 taps in parallel.
+ void FsrEasuTapH(
+ inout AH2 aCR,inout AH2 aCG,inout AH2 aCB,
+ inout AH2 aW,
+ AH2 offX,AH2 offY,
+ AH2 dir,
+ AH2 len,
+ AH1 lob,
+ AH1 clp,
+ AH2 cR,AH2 cG,AH2 cB){
+ AH2 vX,vY;
+ vX=offX* dir.xx +offY*dir.yy;
+ vY=offX*(-dir.yy)+offY*dir.xx;
+ vX*=len.x;vY*=len.y;
+ AH2 d2=vX*vX+vY*vY;
+ d2=min(d2,AH2_(clp));
+ AH2 wB=AH2_(2.0/5.0)*d2+AH2_(-1.0);
+ AH2 wA=AH2_(lob)*d2+AH2_(-1.0);
+ wB*=wB;
+ wA*=wA;
+ wB=AH2_(25.0/16.0)*wB+AH2_(-(25.0/16.0-1.0));
+ AH2 w=wB*wA;
+ aCR+=cR*w;aCG+=cG*w;aCB+=cB*w;aW+=w;}
+//------------------------------------------------------------------------------------------------------------------------------
+ // This runs 2 taps in parallel.
+ void FsrEasuSetH(
+ inout AH2 dirPX,inout AH2 dirPY,
+ inout AH2 lenP,
+ AH2 pp,
+ AP1 biST,AP1 biUV,
+ AH2 lA,AH2 lB,AH2 lC,AH2 lD,AH2 lE){
+ AH2 w = AH2_(0.0);
+ if(biST)w=(AH2(1.0,0.0)+AH2(-pp.x,pp.x))*AH2_(AH1_(1.0)-pp.y);
+ if(biUV)w=(AH2(1.0,0.0)+AH2(-pp.x,pp.x))*AH2_( pp.y);
+ // ABS is not free in the packed FP16 path.
+ AH2 dc=lD-lC;
+ AH2 cb=lC-lB;
+ AH2 lenX=max(abs(dc),abs(cb));
+ lenX=ARcpH2(lenX);
+ AH2 dirX=lD-lB;
+ dirPX+=dirX*w;
+ lenX=ASatH2(abs(dirX)*lenX);
+ lenX*=lenX;
+ lenP+=lenX*w;
+ AH2 ec=lE-lC;
+ AH2 ca=lC-lA;
+ AH2 lenY=max(abs(ec),abs(ca));
+ lenY=ARcpH2(lenY);
+ AH2 dirY=lE-lA;
+ dirPY+=dirY*w;
+ lenY=ASatH2(abs(dirY)*lenY);
+ lenY*=lenY;
+ lenP+=lenY*w;}
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrEasuH(
+ out AH3 pix,
+ AU2 ip,
+ AU4 con0,
+ AU4 con1,
+ AU4 con2,
+ AU4 con3){
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 pp=AF2(ip)*AF2_AU2(con0.xy)+AF2_AU2(con0.zw);
+ AF2 fp=floor(pp);
+ pp-=fp;
+ AH2 ppp=AH2(pp);
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 p0=fp*AF2_AU2(con1.xy)+AF2_AU2(con1.zw);
+ AF2 p1=p0+AF2_AU2(con2.xy);
+ AF2 p2=p0+AF2_AU2(con2.zw);
+ AF2 p3=p0+AF2_AU2(con3.xy);
+ AH4 bczzR=FsrEasuRH(p0);
+ AH4 bczzG=FsrEasuGH(p0);
+ AH4 bczzB=FsrEasuBH(p0);
+ AH4 ijfeR=FsrEasuRH(p1);
+ AH4 ijfeG=FsrEasuGH(p1);
+ AH4 ijfeB=FsrEasuBH(p1);
+ AH4 klhgR=FsrEasuRH(p2);
+ AH4 klhgG=FsrEasuGH(p2);
+ AH4 klhgB=FsrEasuBH(p2);
+ AH4 zzonR=FsrEasuRH(p3);
+ AH4 zzonG=FsrEasuGH(p3);
+ AH4 zzonB=FsrEasuBH(p3);
+//------------------------------------------------------------------------------------------------------------------------------
+ AH4 bczzL=bczzB*AH4_(0.5)+(bczzR*AH4_(0.5)+bczzG);
+ AH4 ijfeL=ijfeB*AH4_(0.5)+(ijfeR*AH4_(0.5)+ijfeG);
+ AH4 klhgL=klhgB*AH4_(0.5)+(klhgR*AH4_(0.5)+klhgG);
+ AH4 zzonL=zzonB*AH4_(0.5)+(zzonR*AH4_(0.5)+zzonG);
+ AH1 bL=bczzL.x;
+ AH1 cL=bczzL.y;
+ AH1 iL=ijfeL.x;
+ AH1 jL=ijfeL.y;
+ AH1 fL=ijfeL.z;
+ AH1 eL=ijfeL.w;
+ AH1 kL=klhgL.x;
+ AH1 lL=klhgL.y;
+ AH1 hL=klhgL.z;
+ AH1 gL=klhgL.w;
+ AH1 oL=zzonL.z;
+ AH1 nL=zzonL.w;
+ // This part is different, accumulating 2 taps in parallel.
+ AH2 dirPX=AH2_(0.0);
+ AH2 dirPY=AH2_(0.0);
+ AH2 lenP=AH2_(0.0);
+ FsrEasuSetH(dirPX,dirPY,lenP,ppp,true, false,AH2(bL,cL),AH2(eL,fL),AH2(fL,gL),AH2(gL,hL),AH2(jL,kL));
+ FsrEasuSetH(dirPX,dirPY,lenP,ppp,false,true ,AH2(fL,gL),AH2(iL,jL),AH2(jL,kL),AH2(kL,lL),AH2(nL,oL));
+ AH2 dir=AH2(dirPX.r+dirPX.g,dirPY.r+dirPY.g);
+ AH1 len=lenP.r+lenP.g;
+//------------------------------------------------------------------------------------------------------------------------------
+ AH2 dir2=dir*dir;
+ AH1 dirR=dir2.x+dir2.y;
+ AP1 zro=dirR<AH1_(1.0/32768.0);
+ dirR=APrxLoRsqH1(dirR);
+ dirR=zro?AH1_(1.0):dirR;
+ dir.x=zro?AH1_(1.0):dir.x;
+ dir*=AH2_(dirR);
+ len=len*AH1_(0.5);
+ len*=len;
+ AH1 stretch=(dir.x*dir.x+dir.y*dir.y)*APrxLoRcpH1(max(abs(dir.x),abs(dir.y)));
+ AH2 len2=AH2(AH1_(1.0)+(stretch-AH1_(1.0))*len,AH1_(1.0)+AH1_(-0.5)*len);
+ AH1 lob=AH1_(0.5)+AH1_((1.0/4.0-0.04)-0.5)*len;
+ AH1 clp=APrxLoRcpH1(lob);
+//------------------------------------------------------------------------------------------------------------------------------
+ // FP16 is different, using packed trick to do min and max in same operation.
+ AH2 bothR=max(max(AH2(-ijfeR.z,ijfeR.z),AH2(-klhgR.w,klhgR.w)),max(AH2(-ijfeR.y,ijfeR.y),AH2(-klhgR.x,klhgR.x)));
+ AH2 bothG=max(max(AH2(-ijfeG.z,ijfeG.z),AH2(-klhgG.w,klhgG.w)),max(AH2(-ijfeG.y,ijfeG.y),AH2(-klhgG.x,klhgG.x)));
+ AH2 bothB=max(max(AH2(-ijfeB.z,ijfeB.z),AH2(-klhgB.w,klhgB.w)),max(AH2(-ijfeB.y,ijfeB.y),AH2(-klhgB.x,klhgB.x)));
+ // This part is different for FP16, working pairs of taps at a time.
+ AH2 pR=AH2_(0.0);
+ AH2 pG=AH2_(0.0);
+ AH2 pB=AH2_(0.0);
+ AH2 pW=AH2_(0.0);
+ FsrEasuTapH(pR,pG,pB,pW,AH2( 0.0, 1.0)-ppp.xx,AH2(-1.0,-1.0)-ppp.yy,dir,len2,lob,clp,bczzR.xy,bczzG.xy,bczzB.xy);
+ FsrEasuTapH(pR,pG,pB,pW,AH2(-1.0, 0.0)-ppp.xx,AH2( 1.0, 1.0)-ppp.yy,dir,len2,lob,clp,ijfeR.xy,ijfeG.xy,ijfeB.xy);
+ FsrEasuTapH(pR,pG,pB,pW,AH2( 0.0,-1.0)-ppp.xx,AH2( 0.0, 0.0)-ppp.yy,dir,len2,lob,clp,ijfeR.zw,ijfeG.zw,ijfeB.zw);
+ FsrEasuTapH(pR,pG,pB,pW,AH2( 1.0, 2.0)-ppp.xx,AH2( 1.0, 1.0)-ppp.yy,dir,len2,lob,clp,klhgR.xy,klhgG.xy,klhgB.xy);
+ FsrEasuTapH(pR,pG,pB,pW,AH2( 2.0, 1.0)-ppp.xx,AH2( 0.0, 0.0)-ppp.yy,dir,len2,lob,clp,klhgR.zw,klhgG.zw,klhgB.zw);
+ FsrEasuTapH(pR,pG,pB,pW,AH2( 1.0, 0.0)-ppp.xx,AH2( 2.0, 2.0)-ppp.yy,dir,len2,lob,clp,zzonR.zw,zzonG.zw,zzonB.zw);
+ AH3 aC=AH3(pR.x+pR.y,pG.x+pG.y,pB.x+pB.y);
+ AH1 aW=pW.x+pW.y;
+//------------------------------------------------------------------------------------------------------------------------------
+ // Slightly different for FP16 version due to combined min and max.
+ pix=min(AH3(bothR.y,bothG.y,bothB.y),max(-AH3(bothR.x,bothG.x,bothB.x),aC*AH3_(ARcpH1(aW))));}
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+// FSR - [RCAS] ROBUST CONTRAST ADAPTIVE SHARPENING
+//
+//------------------------------------------------------------------------------------------------------------------------------
+// CAS uses a simplified mechanism to convert local contrast into a variable amount of sharpness.
+// RCAS uses a more exact mechanism, solving for the maximum local sharpness possible before clipping.
+// RCAS also has a built in process to limit sharpening of what it detects as possible noise.
+// RCAS sharper does not support scaling, as it should be applied after EASU scaling.
+// Pass EASU output straight into RCAS, no color conversions necessary.
+//------------------------------------------------------------------------------------------------------------------------------
+// RCAS is based on the following logic.
+// RCAS uses a 5 tap filter in a cross pattern (same as CAS),
+// w n
+// w 1 w for taps w m e
+// w s
+// Where 'w' is the negative lobe weight.
+// output = (w*(n+e+w+s)+m)/(4*w+1)
+// RCAS solves for 'w' by seeing where the signal might clip out of the {0 to 1} input range,
+// 0 == (w*(n+e+w+s)+m)/(4*w+1) -> w = -m/(n+e+w+s)
+// 1 == (w*(n+e+w+s)+m)/(4*w+1) -> w = (1-m)/(n+e+w+s-4*1)
+// Then chooses the 'w' which results in no clipping, limits 'w', and multiplies by the 'sharp' amount.
+// This solution above has issues with MSAA input as the steps along the gradient cause edge detection issues.
+// So RCAS uses 4x the maximum and 4x the minimum (depending on equation)in place of the individual taps.
+// As well as switching from 'm' to either the minimum or maximum (depending on side), to help in energy conservation.
+// This stabilizes RCAS.
+// RCAS does a simple highpass which is normalized against the local contrast then shaped,
+// 0.25
+// 0.25 -1 0.25
+// 0.25
+// This is used as a noise detection filter, to reduce the effect of RCAS on grain, and focus on real edges.
+//
+// GLSL example for the required callbacks :
+//
+// AH4 FsrRcasLoadH(ASW2 p){return AH4(imageLoad(imgSrc,ASU2(p)));}
+// void FsrRcasInputH(inout AH1 r,inout AH1 g,inout AH1 b)
+// {
+// //do any simple input color conversions here or leave empty if none needed
+// }
+//
+// FsrRcasCon need to be called from the CPU or GPU to set up constants.
+// Including a GPU example here, the 'con' value would be stored out to a constant buffer.
+//
+// AU4 con;
+// FsrRcasCon(con,
+// 0.0); // The scale is {0.0 := maximum sharpness, to N>0, where N is the number of stops (halving) of the reduction of sharpness}.
+// ---------------
+// RCAS sharpening supports a CAS-like pass-through alpha via,
+// #define FSR_RCAS_PASSTHROUGH_ALPHA 1
+// RCAS also supports a define to enable a more expensive path to avoid some sharpening of noise.
+// Would suggest it is better to apply film grain after RCAS sharpening (and after scaling) instead of using this define,
+// #define FSR_RCAS_DENOISE 1
+//==============================================================================================================================
+// This is set at the limit of providing unnatural results for sharpening.
+#define FSR_RCAS_LIMIT (0.25-(1.0/16.0))
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// CONSTANT SETUP
+//==============================================================================================================================
+// Call to setup required constant values (works on CPU or GPU).
+A_STATIC void FsrRcasCon(
+outAU4 con,
+// The scale is {0.0 := maximum, to N>0, where N is the number of stops (halving) of the reduction of sharpness}.
+AF1 sharpness){
+ // Transform from stops to linear value.
+ sharpness=AExp2F1(-sharpness);
+ varAF2(hSharp)=initAF2(sharpness,sharpness);
+ con[0]=AU1_AF1(sharpness);
+ con[1]=AU1_AH2_AF2(hSharp);
+ con[2]=0;
+ con[3]=0;}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// NON-PACKED 32-BIT VERSION
+//==============================================================================================================================
+#if defined(A_GPU)&&defined(FSR_RCAS_F)
+ // Input callback prototypes that need to be implemented by calling shader
+ AF4 FsrRcasLoadF(ASU2 p);
+ void FsrRcasInputF(inout AF1 r,inout AF1 g,inout AF1 b);
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrRcasF(
+ out AF1 pixR, // Output values, non-vector so port between RcasFilter() and RcasFilterH() is easy.
+ out AF1 pixG,
+ out AF1 pixB,
+ #ifdef FSR_RCAS_PASSTHROUGH_ALPHA
+ out AF1 pixA,
+ #endif
+ AU2 ip, // Integer pixel position in output.
+ AU4 con){ // Constant generated by RcasSetup().
+ // Algorithm uses minimal 3x3 pixel neighborhood.
+ // b
+ // d e f
+ // h
+ ASU2 sp=ASU2(ip);
+ AF3 b=FsrRcasLoadF(sp+ASU2( 0,-1)).rgb;
+ AF3 d=FsrRcasLoadF(sp+ASU2(-1, 0)).rgb;
+ #ifdef FSR_RCAS_PASSTHROUGH_ALPHA
+ AF4 ee=FsrRcasLoadF(sp);
+ AF3 e=ee.rgb;pixA=ee.a;
+ #else
+ AF3 e=FsrRcasLoadF(sp).rgb;
+ #endif
+ AF3 f=FsrRcasLoadF(sp+ASU2( 1, 0)).rgb;
+ AF3 h=FsrRcasLoadF(sp+ASU2( 0, 1)).rgb;
+ // Rename (32-bit) or regroup (16-bit).
+ AF1 bR=b.r;
+ AF1 bG=b.g;
+ AF1 bB=b.b;
+ AF1 dR=d.r;
+ AF1 dG=d.g;
+ AF1 dB=d.b;
+ AF1 eR=e.r;
+ AF1 eG=e.g;
+ AF1 eB=e.b;
+ AF1 fR=f.r;
+ AF1 fG=f.g;
+ AF1 fB=f.b;
+ AF1 hR=h.r;
+ AF1 hG=h.g;
+ AF1 hB=h.b;
+ // Run optional input transform.
+ FsrRcasInputF(bR,bG,bB);
+ FsrRcasInputF(dR,dG,dB);
+ FsrRcasInputF(eR,eG,eB);
+ FsrRcasInputF(fR,fG,fB);
+ FsrRcasInputF(hR,hG,hB);
+ // Luma times 2.
+ AF1 bL=bB*AF1_(0.5)+(bR*AF1_(0.5)+bG);
+ AF1 dL=dB*AF1_(0.5)+(dR*AF1_(0.5)+dG);
+ AF1 eL=eB*AF1_(0.5)+(eR*AF1_(0.5)+eG);
+ AF1 fL=fB*AF1_(0.5)+(fR*AF1_(0.5)+fG);
+ AF1 hL=hB*AF1_(0.5)+(hR*AF1_(0.5)+hG);
+ // Noise detection.
+ AF1 nz=AF1_(0.25)*bL+AF1_(0.25)*dL+AF1_(0.25)*fL+AF1_(0.25)*hL-eL;
+ nz=ASatF1(abs(nz)*APrxMedRcpF1(AMax3F1(AMax3F1(bL,dL,eL),fL,hL)-AMin3F1(AMin3F1(bL,dL,eL),fL,hL)));
+ nz=AF1_(-0.5)*nz+AF1_(1.0);
+ // Min and max of ring.
+ AF1 mn4R=min(AMin3F1(bR,dR,fR),hR);
+ AF1 mn4G=min(AMin3F1(bG,dG,fG),hG);
+ AF1 mn4B=min(AMin3F1(bB,dB,fB),hB);
+ AF1 mx4R=max(AMax3F1(bR,dR,fR),hR);
+ AF1 mx4G=max(AMax3F1(bG,dG,fG),hG);
+ AF1 mx4B=max(AMax3F1(bB,dB,fB),hB);
+ // Immediate constants for peak range.
+ AF2 peakC=AF2(1.0,-1.0*4.0);
+ // Limiters, these need to be high precision RCPs.
+ AF1 hitMinR=min(mn4R,eR)*ARcpF1(AF1_(4.0)*mx4R);
+ AF1 hitMinG=min(mn4G,eG)*ARcpF1(AF1_(4.0)*mx4G);
+ AF1 hitMinB=min(mn4B,eB)*ARcpF1(AF1_(4.0)*mx4B);
+ AF1 hitMaxR=(peakC.x-max(mx4R,eR))*ARcpF1(AF1_(4.0)*mn4R+peakC.y);
+ AF1 hitMaxG=(peakC.x-max(mx4G,eG))*ARcpF1(AF1_(4.0)*mn4G+peakC.y);
+ AF1 hitMaxB=(peakC.x-max(mx4B,eB))*ARcpF1(AF1_(4.0)*mn4B+peakC.y);
+ AF1 lobeR=max(-hitMinR,hitMaxR);
+ AF1 lobeG=max(-hitMinG,hitMaxG);
+ AF1 lobeB=max(-hitMinB,hitMaxB);
+ AF1 lobe=max(AF1_(-FSR_RCAS_LIMIT),min(AMax3F1(lobeR,lobeG,lobeB),AF1_(0.0)))*AF1_AU1(con.x);
+ // Apply noise removal.
+ #ifdef FSR_RCAS_DENOISE
+ lobe*=nz;
+ #endif
+ // Resolve, which needs the medium precision rcp approximation to avoid visible tonality changes.
+ AF1 rcpL=APrxMedRcpF1(AF1_(4.0)*lobe+AF1_(1.0));
+ pixR=(lobe*bR+lobe*dR+lobe*hR+lobe*fR+eR)*rcpL;
+ pixG=(lobe*bG+lobe*dG+lobe*hG+lobe*fG+eG)*rcpL;
+ pixB=(lobe*bB+lobe*dB+lobe*hB+lobe*fB+eB)*rcpL;
+ return;}
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// NON-PACKED 16-BIT VERSION
+//==============================================================================================================================
+#if defined(A_GPU)&&defined(A_HALF)&&defined(FSR_RCAS_H)
+ // Input callback prototypes that need to be implemented by calling shader
+ AH4 FsrRcasLoadH(ASW2 p);
+ void FsrRcasInputH(inout AH1 r,inout AH1 g,inout AH1 b);
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrRcasH(
+ out AH1 pixR, // Output values, non-vector so port between RcasFilter() and RcasFilterH() is easy.
+ out AH1 pixG,
+ out AH1 pixB,
+ #ifdef FSR_RCAS_PASSTHROUGH_ALPHA
+ out AH1 pixA,
+ #endif
+ AU2 ip, // Integer pixel position in output.
+ AU4 con){ // Constant generated by RcasSetup().
+ // Sharpening algorithm uses minimal 3x3 pixel neighborhood.
+ // b
+ // d e f
+ // h
+ ASW2 sp=ASW2(ip);
+ AH3 b=FsrRcasLoadH(sp+ASW2( 0,-1)).rgb;
+ AH3 d=FsrRcasLoadH(sp+ASW2(-1, 0)).rgb;
+ #ifdef FSR_RCAS_PASSTHROUGH_ALPHA
+ AH4 ee=FsrRcasLoadH(sp);
+ AH3 e=ee.rgb;pixA=ee.a;
+ #else
+ AH3 e=FsrRcasLoadH(sp).rgb;
+ #endif
+ AH3 f=FsrRcasLoadH(sp+ASW2( 1, 0)).rgb;
+ AH3 h=FsrRcasLoadH(sp+ASW2( 0, 1)).rgb;
+ // Rename (32-bit) or regroup (16-bit).
+ AH1 bR=b.r;
+ AH1 bG=b.g;
+ AH1 bB=b.b;
+ AH1 dR=d.r;
+ AH1 dG=d.g;
+ AH1 dB=d.b;
+ AH1 eR=e.r;
+ AH1 eG=e.g;
+ AH1 eB=e.b;
+ AH1 fR=f.r;
+ AH1 fG=f.g;
+ AH1 fB=f.b;
+ AH1 hR=h.r;
+ AH1 hG=h.g;
+ AH1 hB=h.b;
+ // Run optional input transform.
+ FsrRcasInputH(bR,bG,bB);
+ FsrRcasInputH(dR,dG,dB);
+ FsrRcasInputH(eR,eG,eB);
+ FsrRcasInputH(fR,fG,fB);
+ FsrRcasInputH(hR,hG,hB);
+ // Luma times 2.
+ AH1 bL=bB*AH1_(0.5)+(bR*AH1_(0.5)+bG);
+ AH1 dL=dB*AH1_(0.5)+(dR*AH1_(0.5)+dG);
+ AH1 eL=eB*AH1_(0.5)+(eR*AH1_(0.5)+eG);
+ AH1 fL=fB*AH1_(0.5)+(fR*AH1_(0.5)+fG);
+ AH1 hL=hB*AH1_(0.5)+(hR*AH1_(0.5)+hG);
+ // Noise detection.
+ AH1 nz=AH1_(0.25)*bL+AH1_(0.25)*dL+AH1_(0.25)*fL+AH1_(0.25)*hL-eL;
+ nz=ASatH1(abs(nz)*APrxMedRcpH1(AMax3H1(AMax3H1(bL,dL,eL),fL,hL)-AMin3H1(AMin3H1(bL,dL,eL),fL,hL)));
+ nz=AH1_(-0.5)*nz+AH1_(1.0);
+ // Min and max of ring.
+ AH1 mn4R=min(AMin3H1(bR,dR,fR),hR);
+ AH1 mn4G=min(AMin3H1(bG,dG,fG),hG);
+ AH1 mn4B=min(AMin3H1(bB,dB,fB),hB);
+ AH1 mx4R=max(AMax3H1(bR,dR,fR),hR);
+ AH1 mx4G=max(AMax3H1(bG,dG,fG),hG);
+ AH1 mx4B=max(AMax3H1(bB,dB,fB),hB);
+ // Immediate constants for peak range.
+ AH2 peakC=AH2(1.0,-1.0*4.0);
+ // Limiters, these need to be high precision RCPs.
+ AH1 hitMinR=min(mn4R,eR)*ARcpH1(AH1_(4.0)*mx4R);
+ AH1 hitMinG=min(mn4G,eG)*ARcpH1(AH1_(4.0)*mx4G);
+ AH1 hitMinB=min(mn4B,eB)*ARcpH1(AH1_(4.0)*mx4B);
+ AH1 hitMaxR=(peakC.x-max(mx4R,eR))*ARcpH1(AH1_(4.0)*mn4R+peakC.y);
+ AH1 hitMaxG=(peakC.x-max(mx4G,eG))*ARcpH1(AH1_(4.0)*mn4G+peakC.y);
+ AH1 hitMaxB=(peakC.x-max(mx4B,eB))*ARcpH1(AH1_(4.0)*mn4B+peakC.y);
+ AH1 lobeR=max(-hitMinR,hitMaxR);
+ AH1 lobeG=max(-hitMinG,hitMaxG);
+ AH1 lobeB=max(-hitMinB,hitMaxB);
+ AH1 lobe=max(AH1_(-FSR_RCAS_LIMIT),min(AMax3H1(lobeR,lobeG,lobeB),AH1_(0.0)))*AH2_AU1(con.y).x;
+ // Apply noise removal.
+ #ifdef FSR_RCAS_DENOISE
+ lobe*=nz;
+ #endif
+ // Resolve, which needs the medium precision rcp approximation to avoid visible tonality changes.
+ AH1 rcpL=APrxMedRcpH1(AH1_(4.0)*lobe+AH1_(1.0));
+ pixR=(lobe*bR+lobe*dR+lobe*hR+lobe*fR+eR)*rcpL;
+ pixG=(lobe*bG+lobe*dG+lobe*hG+lobe*fG+eG)*rcpL;
+ pixB=(lobe*bB+lobe*dB+lobe*hB+lobe*fB+eB)*rcpL;}
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// PACKED 16-BIT VERSION
+//==============================================================================================================================
+#if defined(A_GPU)&&defined(A_HALF)&&defined(FSR_RCAS_HX2)
+ // Input callback prototypes that need to be implemented by the calling shader
+ AH4 FsrRcasLoadHx2(ASW2 p);
+ void FsrRcasInputHx2(inout AH2 r,inout AH2 g,inout AH2 b);
+//------------------------------------------------------------------------------------------------------------------------------
+ // Can be used to convert from packed Structures of Arrays to Arrays of Structures for store.
+ void FsrRcasDepackHx2(out AH4 pix0,out AH4 pix1,AH2 pixR,AH2 pixG,AH2 pixB){
+ #ifdef A_HLSL
+ // Invoke a slower path for DX only, since it won't allow uninitialized values.
+ pix0.a=pix1.a=0.0;
+ #endif
+ pix0.rgb=AH3(pixR.x,pixG.x,pixB.x);
+ pix1.rgb=AH3(pixR.y,pixG.y,pixB.y);}
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrRcasHx2(
+ // Output values are for 2 8x8 tiles in a 16x8 region.
+ // pix<R,G,B>.x = left 8x8 tile
+ // pix<R,G,B>.y = right 8x8 tile
+ // This enables later processing to easily be packed as well.
+ out AH2 pixR,
+ out AH2 pixG,
+ out AH2 pixB,
+ #ifdef FSR_RCAS_PASSTHROUGH_ALPHA
+ out AH2 pixA,
+ #endif
+ AU2 ip, // Integer pixel position in output.
+ AU4 con){ // Constant generated by RcasSetup().
+ // No scaling algorithm uses minimal 3x3 pixel neighborhood.
+ ASW2 sp0=ASW2(ip);
+ AH3 b0=FsrRcasLoadHx2(sp0+ASW2( 0,-1)).rgb;
+ AH3 d0=FsrRcasLoadHx2(sp0+ASW2(-1, 0)).rgb;
+ #ifdef FSR_RCAS_PASSTHROUGH_ALPHA
+ AH4 ee0=FsrRcasLoadHx2(sp0);
+ AH3 e0=ee0.rgb;pixA.r=ee0.a;
+ #else
+ AH3 e0=FsrRcasLoadHx2(sp0).rgb;
+ #endif
+ AH3 f0=FsrRcasLoadHx2(sp0+ASW2( 1, 0)).rgb;
+ AH3 h0=FsrRcasLoadHx2(sp0+ASW2( 0, 1)).rgb;
+ ASW2 sp1=sp0+ASW2(8,0);
+ AH3 b1=FsrRcasLoadHx2(sp1+ASW2( 0,-1)).rgb;
+ AH3 d1=FsrRcasLoadHx2(sp1+ASW2(-1, 0)).rgb;
+ #ifdef FSR_RCAS_PASSTHROUGH_ALPHA
+ AH4 ee1=FsrRcasLoadHx2(sp1);
+ AH3 e1=ee1.rgb;pixA.g=ee1.a;
+ #else
+ AH3 e1=FsrRcasLoadHx2(sp1).rgb;
+ #endif
+ AH3 f1=FsrRcasLoadHx2(sp1+ASW2( 1, 0)).rgb;
+ AH3 h1=FsrRcasLoadHx2(sp1+ASW2( 0, 1)).rgb;
+ // Arrays of Structures to Structures of Arrays conversion.
+ AH2 bR=AH2(b0.r,b1.r);
+ AH2 bG=AH2(b0.g,b1.g);
+ AH2 bB=AH2(b0.b,b1.b);
+ AH2 dR=AH2(d0.r,d1.r);
+ AH2 dG=AH2(d0.g,d1.g);
+ AH2 dB=AH2(d0.b,d1.b);
+ AH2 eR=AH2(e0.r,e1.r);
+ AH2 eG=AH2(e0.g,e1.g);
+ AH2 eB=AH2(e0.b,e1.b);
+ AH2 fR=AH2(f0.r,f1.r);
+ AH2 fG=AH2(f0.g,f1.g);
+ AH2 fB=AH2(f0.b,f1.b);
+ AH2 hR=AH2(h0.r,h1.r);
+ AH2 hG=AH2(h0.g,h1.g);
+ AH2 hB=AH2(h0.b,h1.b);
+ // Run optional input transform.
+ FsrRcasInputHx2(bR,bG,bB);
+ FsrRcasInputHx2(dR,dG,dB);
+ FsrRcasInputHx2(eR,eG,eB);
+ FsrRcasInputHx2(fR,fG,fB);
+ FsrRcasInputHx2(hR,hG,hB);
+ // Luma times 2.
+ AH2 bL=bB*AH2_(0.5)+(bR*AH2_(0.5)+bG);
+ AH2 dL=dB*AH2_(0.5)+(dR*AH2_(0.5)+dG);
+ AH2 eL=eB*AH2_(0.5)+(eR*AH2_(0.5)+eG);
+ AH2 fL=fB*AH2_(0.5)+(fR*AH2_(0.5)+fG);
+ AH2 hL=hB*AH2_(0.5)+(hR*AH2_(0.5)+hG);
+ // Noise detection.
+ AH2 nz=AH2_(0.25)*bL+AH2_(0.25)*dL+AH2_(0.25)*fL+AH2_(0.25)*hL-eL;
+ nz=ASatH2(abs(nz)*APrxMedRcpH2(AMax3H2(AMax3H2(bL,dL,eL),fL,hL)-AMin3H2(AMin3H2(bL,dL,eL),fL,hL)));
+ nz=AH2_(-0.5)*nz+AH2_(1.0);
+ // Min and max of ring.
+ AH2 mn4R=min(AMin3H2(bR,dR,fR),hR);
+ AH2 mn4G=min(AMin3H2(bG,dG,fG),hG);
+ AH2 mn4B=min(AMin3H2(bB,dB,fB),hB);
+ AH2 mx4R=max(AMax3H2(bR,dR,fR),hR);
+ AH2 mx4G=max(AMax3H2(bG,dG,fG),hG);
+ AH2 mx4B=max(AMax3H2(bB,dB,fB),hB);
+ // Immediate constants for peak range.
+ AH2 peakC=AH2(1.0,-1.0*4.0);
+ // Limiters, these need to be high precision RCPs.
+ AH2 hitMinR=min(mn4R,eR)*ARcpH2(AH2_(4.0)*mx4R);
+ AH2 hitMinG=min(mn4G,eG)*ARcpH2(AH2_(4.0)*mx4G);
+ AH2 hitMinB=min(mn4B,eB)*ARcpH2(AH2_(4.0)*mx4B);
+ AH2 hitMaxR=(peakC.x-max(mx4R,eR))*ARcpH2(AH2_(4.0)*mn4R+peakC.y);
+ AH2 hitMaxG=(peakC.x-max(mx4G,eG))*ARcpH2(AH2_(4.0)*mn4G+peakC.y);
+ AH2 hitMaxB=(peakC.x-max(mx4B,eB))*ARcpH2(AH2_(4.0)*mn4B+peakC.y);
+ AH2 lobeR=max(-hitMinR,hitMaxR);
+ AH2 lobeG=max(-hitMinG,hitMaxG);
+ AH2 lobeB=max(-hitMinB,hitMaxB);
+ AH2 lobe=max(AH2_(-FSR_RCAS_LIMIT),min(AMax3H2(lobeR,lobeG,lobeB),AH2_(0.0)))*AH2_(AH2_AU1(con.y).x);
+ // Apply noise removal.
+ #ifdef FSR_RCAS_DENOISE
+ lobe*=nz;
+ #endif
+ // Resolve, which needs the medium precision rcp approximation to avoid visible tonality changes.
+ AH2 rcpL=APrxMedRcpH2(AH2_(4.0)*lobe+AH2_(1.0));
+ pixR=(lobe*bR+lobe*dR+lobe*hR+lobe*fR+eR)*rcpL;
+ pixG=(lobe*bG+lobe*dG+lobe*hG+lobe*fG+eG)*rcpL;
+ pixB=(lobe*bB+lobe*dB+lobe*hB+lobe*fB+eB)*rcpL;}
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+// FSR - [LFGA] LINEAR FILM GRAIN APPLICATOR
+//
+//------------------------------------------------------------------------------------------------------------------------------
+// Adding output-resolution film grain after scaling is a good way to mask both rendering and scaling artifacts.
+// Suggest using tiled blue noise as film grain input, with peak noise frequency set for a specific look and feel.
+// The 'Lfga*()' functions provide a convenient way to introduce grain.
+// These functions limit grain based on distance to signal limits.
+// This is done so that the grain is temporally energy preserving, and thus won't modify image tonality.
+// Grain application should be done in a linear colorspace.
+// The grain should be temporally changing, but have a temporal sum per pixel that adds to zero (non-biased).
+//------------------------------------------------------------------------------------------------------------------------------
+// Usage,
+// FsrLfga*(
+// color, // In/out linear colorspace color {0 to 1} ranged.
+// grain, // Per pixel grain texture value {-0.5 to 0.5} ranged, input is 3-channel to support colored grain.
+// amount); // Amount of grain (0 to 1} ranged.
+//------------------------------------------------------------------------------------------------------------------------------
+// Example if grain texture is monochrome: 'FsrLfgaF(color,AF3_(grain),amount)'
+//==============================================================================================================================
+#if defined(A_GPU)
+ // Maximum grain is the minimum distance to the signal limit.
+ void FsrLfgaF(inout AF3 c,AF3 t,AF1 a){c+=(t*AF3_(a))*min(AF3_(1.0)-c,c);}
+#endif
+//==============================================================================================================================
+#if defined(A_GPU)&&defined(A_HALF)
+ // Half precision version (slower).
+ void FsrLfgaH(inout AH3 c,AH3 t,AH1 a){c+=(t*AH3_(a))*min(AH3_(1.0)-c,c);}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Packed half precision version (faster).
+ void FsrLfgaHx2(inout AH2 cR,inout AH2 cG,inout AH2 cB,AH2 tR,AH2 tG,AH2 tB,AH1 a){
+ cR+=(tR*AH2_(a))*min(AH2_(1.0)-cR,cR);cG+=(tG*AH2_(a))*min(AH2_(1.0)-cG,cG);cB+=(tB*AH2_(a))*min(AH2_(1.0)-cB,cB);}
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+// FSR - [SRTM] SIMPLE REVERSIBLE TONE-MAPPER
+//
+//------------------------------------------------------------------------------------------------------------------------------
+// This provides a way to take linear HDR color {0 to FP16_MAX} and convert it into a temporary {0 to 1} ranged post-tonemapped linear.
+// The tonemapper preserves RGB ratio, which helps maintain HDR color bleed during filtering.
+//------------------------------------------------------------------------------------------------------------------------------
+// Reversible tonemapper usage,
+// FsrSrtm*(color); // {0 to FP16_MAX} converted to {0 to 1}.
+// FsrSrtmInv*(color); // {0 to 1} converted into {0 to 32768, output peak safe for FP16}.
+//==============================================================================================================================
+#if defined(A_GPU)
+ void FsrSrtmF(inout AF3 c){c*=AF3_(ARcpF1(AMax3F1(c.r,c.g,c.b)+AF1_(1.0)));}
+ // The extra max solves the c=1.0 case (which is a /0).
+ void FsrSrtmInvF(inout AF3 c){c*=AF3_(ARcpF1(max(AF1_(1.0/32768.0),AF1_(1.0)-AMax3F1(c.r,c.g,c.b))));}
+#endif
+//==============================================================================================================================
+#if defined(A_GPU)&&defined(A_HALF)
+ void FsrSrtmH(inout AH3 c){c*=AH3_(ARcpH1(AMax3H1(c.r,c.g,c.b)+AH1_(1.0)));}
+ void FsrSrtmInvH(inout AH3 c){c*=AH3_(ARcpH1(max(AH1_(1.0/32768.0),AH1_(1.0)-AMax3H1(c.r,c.g,c.b))));}
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrSrtmHx2(inout AH2 cR,inout AH2 cG,inout AH2 cB){
+ AH2 rcp=ARcpH2(AMax3H2(cR,cG,cB)+AH2_(1.0));cR*=rcp;cG*=rcp;cB*=rcp;}
+ void FsrSrtmInvHx2(inout AH2 cR,inout AH2 cG,inout AH2 cB){
+ AH2 rcp=ARcpH2(max(AH2_(1.0/32768.0),AH2_(1.0)-AMax3H2(cR,cG,cB)));cR*=rcp;cG*=rcp;cB*=rcp;}
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+// FSR - [TEPD] TEMPORAL ENERGY PRESERVING DITHER
+//
+//------------------------------------------------------------------------------------------------------------------------------
+// Temporally energy preserving dithered {0 to 1} linear to gamma 2.0 conversion.
+// Gamma 2.0 is used so that the conversion back to linear is just to square the color.
+// The conversion comes in 8-bit and 10-bit modes, designed for output to 8-bit UNORM or 10:10:10:2 respectively.
+// Given good non-biased temporal blue noise as dither input,
+// the output dither will temporally conserve energy.
+// This is done by choosing the linear nearest step point instead of perceptual nearest.
+// See code below for details.
+//------------------------------------------------------------------------------------------------------------------------------
+// DX SPEC RULES FOR FLOAT->UNORM 8-BIT CONVERSION
+// ===============================================
+// - Output is 'uint(floor(saturate(n)*255.0+0.5))'.
+// - Thus rounding is to nearest.
+// - NaN gets converted to zero.
+// - INF is clamped to {0.0 to 1.0}.
+//==============================================================================================================================
+#if defined(A_GPU)
+ // Hand tuned integer position to dither value, with more values than simple checkerboard.
+ // Only 32-bit has enough precision for this compddation.
+ // Output is {0 to <1}.
+ AF1 FsrTepdDitF(AU2 p,AU1 f){
+ AF1 x=AF1_(p.x+f);
+ AF1 y=AF1_(p.y);
+ // The 1.61803 golden ratio.
+ AF1 a=AF1_((1.0+sqrt(5.0))/2.0);
+ // Number designed to provide a good visual pattern.
+ AF1 b=AF1_(1.0/3.69);
+ x=x*a+(y*b);
+ return AFractF1(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ // This version is 8-bit gamma 2.0.
+ // The 'c' input is {0 to 1}.
+ // Output is {0 to 1} ready for image store.
+ void FsrTepdC8F(inout AF3 c,AF1 dit){
+ AF3 n=sqrt(c);
+ n=floor(n*AF3_(255.0))*AF3_(1.0/255.0);
+ AF3 a=n*n;
+ AF3 b=n+AF3_(1.0/255.0);b=b*b;
+ // Ratio of 'a' to 'b' required to produce 'c'.
+ // APrxLoRcpF1() won't work here (at least for very high dynamic ranges).
+ // APrxMedRcpF1() is an IADD,FMA,MUL.
+ AF3 r=(c-b)*APrxMedRcpF3(a-b);
+ // Use the ratio as a cutoff to choose 'a' or 'b'.
+ // AGtZeroF1() is a MUL.
+ c=ASatF3(n+AGtZeroF3(AF3_(dit)-r)*AF3_(1.0/255.0));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // This version is 10-bit gamma 2.0.
+ // The 'c' input is {0 to 1}.
+ // Output is {0 to 1} ready for image store.
+ void FsrTepdC10F(inout AF3 c,AF1 dit){
+ AF3 n=sqrt(c);
+ n=floor(n*AF3_(1023.0))*AF3_(1.0/1023.0);
+ AF3 a=n*n;
+ AF3 b=n+AF3_(1.0/1023.0);b=b*b;
+ AF3 r=(c-b)*APrxMedRcpF3(a-b);
+ c=ASatF3(n+AGtZeroF3(AF3_(dit)-r)*AF3_(1.0/1023.0));}
+#endif
+//==============================================================================================================================
+#if defined(A_GPU)&&defined(A_HALF)
+ AH1 FsrTepdDitH(AU2 p,AU1 f){
+ AF1 x=AF1_(p.x+f);
+ AF1 y=AF1_(p.y);
+ AF1 a=AF1_((1.0+sqrt(5.0))/2.0);
+ AF1 b=AF1_(1.0/3.69);
+ x=x*a+(y*b);
+ return AH1(AFractF1(x));}
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrTepdC8H(inout AH3 c,AH1 dit){
+ AH3 n=sqrt(c);
+ n=floor(n*AH3_(255.0))*AH3_(1.0/255.0);
+ AH3 a=n*n;
+ AH3 b=n+AH3_(1.0/255.0);b=b*b;
+ AH3 r=(c-b)*APrxMedRcpH3(a-b);
+ c=ASatH3(n+AGtZeroH3(AH3_(dit)-r)*AH3_(1.0/255.0));}
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrTepdC10H(inout AH3 c,AH1 dit){
+ AH3 n=sqrt(c);
+ n=floor(n*AH3_(1023.0))*AH3_(1.0/1023.0);
+ AH3 a=n*n;
+ AH3 b=n+AH3_(1.0/1023.0);b=b*b;
+ AH3 r=(c-b)*APrxMedRcpH3(a-b);
+ c=ASatH3(n+AGtZeroH3(AH3_(dit)-r)*AH3_(1.0/1023.0));}
+//==============================================================================================================================
+ // This computes dither for positions 'p' and 'p+{8,0}'.
+ AH2 FsrTepdDitHx2(AU2 p,AU1 f){
+ AF2 x;
+ x.x=AF1_(p.x+f);
+ x.y=x.x+AF1_(8.0);
+ AF1 y=AF1_(p.y);
+ AF1 a=AF1_((1.0+sqrt(5.0))/2.0);
+ AF1 b=AF1_(1.0/3.69);
+ x=x*AF2_(a)+AF2_(y*b);
+ return AH2(AFractF2(x));}
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrTepdC8Hx2(inout AH2 cR,inout AH2 cG,inout AH2 cB,AH2 dit){
+ AH2 nR=sqrt(cR);
+ AH2 nG=sqrt(cG);
+ AH2 nB=sqrt(cB);
+ nR=floor(nR*AH2_(255.0))*AH2_(1.0/255.0);
+ nG=floor(nG*AH2_(255.0))*AH2_(1.0/255.0);
+ nB=floor(nB*AH2_(255.0))*AH2_(1.0/255.0);
+ AH2 aR=nR*nR;
+ AH2 aG=nG*nG;
+ AH2 aB=nB*nB;
+ AH2 bR=nR+AH2_(1.0/255.0);bR=bR*bR;
+ AH2 bG=nG+AH2_(1.0/255.0);bG=bG*bG;
+ AH2 bB=nB+AH2_(1.0/255.0);bB=bB*bB;
+ AH2 rR=(cR-bR)*APrxMedRcpH2(aR-bR);
+ AH2 rG=(cG-bG)*APrxMedRcpH2(aG-bG);
+ AH2 rB=(cB-bB)*APrxMedRcpH2(aB-bB);
+ cR=ASatH2(nR+AGtZeroH2(dit-rR)*AH2_(1.0/255.0));
+ cG=ASatH2(nG+AGtZeroH2(dit-rG)*AH2_(1.0/255.0));
+ cB=ASatH2(nB+AGtZeroH2(dit-rB)*AH2_(1.0/255.0));}
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrTepdC10Hx2(inout AH2 cR,inout AH2 cG,inout AH2 cB,AH2 dit){
+ AH2 nR=sqrt(cR);
+ AH2 nG=sqrt(cG);
+ AH2 nB=sqrt(cB);
+ nR=floor(nR*AH2_(1023.0))*AH2_(1.0/1023.0);
+ nG=floor(nG*AH2_(1023.0))*AH2_(1.0/1023.0);
+ nB=floor(nB*AH2_(1023.0))*AH2_(1.0/1023.0);
+ AH2 aR=nR*nR;
+ AH2 aG=nG*nG;
+ AH2 aB=nB*nB;
+ AH2 bR=nR+AH2_(1.0/1023.0);bR=bR*bR;
+ AH2 bG=nG+AH2_(1.0/1023.0);bG=bG*bG;
+ AH2 bB=nB+AH2_(1.0/1023.0);bB=bB*bB;
+ AH2 rR=(cR-bR)*APrxMedRcpH2(aR-bR);
+ AH2 rG=(cG-bG)*APrxMedRcpH2(aG-bG);
+ AH2 rB=(cB-bB)*APrxMedRcpH2(aB-bB);
+ cR=ASatH2(nR+AGtZeroH2(dit-rR)*AH2_(1.0/1023.0));
+ cG=ASatH2(nG+AGtZeroH2(dit-rG)*AH2_(1.0/1023.0));
+ cB=ASatH2(nB+AGtZeroH2(dit-rB)*AH2_(1.0/1023.0));}
+#endif
+
+
+float insideBox(vec2 v) {
+ vec2 s = step(bLeft, v) - step(tRight, v);
+ return s.x * s.y;
+}
+
+AF2 translateDest(AF2 pos) {
+ AF2 translatedPos = AF2(pos.x, pos.y);
+ translatedPos.x = dstX1 < dstX0 ? dstX1 - translatedPos.x : translatedPos.x;
+ translatedPos.y = dstY0 < dstY1 ? dstY1 + dstY0 - translatedPos.y - 1 : translatedPos.y;
+ return translatedPos;
+}
+
+void CurrFilter(AU2 pos)
+{
+ if((insideBox(vec2(pos.x, pos.y))) == 0) {
+ imageStore(imgOutput, ASU2(pos.x, pos.y), AF4(0,0,0,1));
+ return;
+ }
+ AF3 c;
+ FsrEasuF(c, AU2(pos.x - bLeft.x, pos.y - bLeft.y), con0, con1, con2, con3);
+ imageStore(imgOutput, ASU2(translateDest(pos)), AF4(c, 1));
+}
+
+void main() {
+ srcW = abs(srcX1 - srcX0);
+ srcH = abs(srcY1 - srcY0);
+ dstW = abs(dstX1 - dstX0);
+ dstH = abs(dstY1 - dstY0);
+
+ AU2 gxy = ARmp8x8(gl_LocalInvocationID.x) + AU2(gl_WorkGroupID.x << 4u, gl_WorkGroupID.y << 4u);
+
+ setBounds(vec2(dstX0 < dstX1 ? dstX0 : dstX1, dstY0 < dstY1 ? dstY0 : dstY1),
+ vec2(dstX1 > dstX0 ? dstX1 : dstX0, dstY1 > dstY0 ? dstY1 : dstY0));
+
+ // Upscaling
+ FsrEasuCon(con0, con1, con2, con3,
+ srcW, srcH, // Viewport size (top left aligned) in the input image which is to be scaled.
+ srcW, srcH, // The size of the input image.
+ dstW, dstH); // The output resolution.
+
+ CurrFilter(gxy);
+ gxy.x += 8u;
+ CurrFilter(gxy);
+ gxy.y += 8u;
+ CurrFilter(gxy);
+ gxy.x -= 8u;
+ CurrFilter(gxy);
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrScaling.spv b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrScaling.spv
new file mode 100644
index 00000000..c15b72ec
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrScaling.spv
Binary files differ
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrSharpening.glsl b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrSharpening.glsl
new file mode 100644
index 00000000..785bc0c8
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrSharpening.glsl
@@ -0,0 +1,3904 @@
+// Sharpening
+#version 430 core
+layout (local_size_x = 64) in;
+layout( rgba8, binding = 0, set = 3) uniform image2D imgOutput;
+layout( binding = 2 ) uniform invResolution
+{
+ vec2 invResolution_data;
+};
+layout( binding = 3 ) uniform outvResolution
+{
+ vec2 outvResolution_data;
+};
+layout( binding = 1, set = 2) uniform sampler2D source;
+layout( binding = 4 ) uniform sharpening
+{
+ float sharpening_data;
+};
+
+#define A_GPU 1
+#define A_GLSL 1
+//==============================================================================================================================
+//
+// [A] SHADER PORTABILITY 1.20210629
+//
+//==============================================================================================================================
+// FidelityFX Super Resolution Sample
+//
+// Copyright (c) 2021 Advanced Micro Devices, Inc. All rights reserved.
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files(the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions :
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//------------------------------------------------------------------------------------------------------------------------------
+// MIT LICENSE
+// ===========
+// Copyright (c) 2014 Michal Drobot (for concepts used in "FLOAT APPROXIMATIONS").
+// -----------
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation
+// files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy,
+// modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
+// Software is furnished to do so, subject to the following conditions:
+// -----------
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
+// Software.
+// -----------
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+//------------------------------------------------------------------------------------------------------------------------------
+// ABOUT
+// =====
+// Common central point for high-level shading language and C portability for various shader headers.
+//------------------------------------------------------------------------------------------------------------------------------
+// DEFINES
+// =======
+// A_CPU ..... Include the CPU related code.
+// A_GPU ..... Include the GPU related code.
+// A_GLSL .... Using GLSL.
+// A_HLSL .... Using HLSL.
+// A_HLSL_6_2 Using HLSL 6.2 with new 'uint16_t' and related types (requires '-enable-16bit-types').
+// A_NO_16_BIT_CAST Don't use instructions that are not availabe in SPIR-V (needed for running A_HLSL_6_2 on Vulkan)
+// A_GCC ..... Using a GCC compatible compiler (else assume MSVC compatible compiler by default).
+// =======
+// A_BYTE .... Support 8-bit integer.
+// A_HALF .... Support 16-bit integer and floating point.
+// A_LONG .... Support 64-bit integer.
+// A_DUBL .... Support 64-bit floating point.
+// =======
+// A_WAVE .... Support wave-wide operations.
+//------------------------------------------------------------------------------------------------------------------------------
+// To get #include "ffx_a.h" working in GLSL use '#extension GL_GOOGLE_include_directive:require'.
+//------------------------------------------------------------------------------------------------------------------------------
+// SIMPLIFIED TYPE SYSTEM
+// ======================
+// - All ints will be unsigned with exception of when signed is required.
+// - Type naming simplified and shortened "A<type><#components>",
+// - H = 16-bit float (half)
+// - F = 32-bit float (float)
+// - D = 64-bit float (double)
+// - P = 1-bit integer (predicate, not using bool because 'B' is used for byte)
+// - B = 8-bit integer (byte)
+// - W = 16-bit integer (word)
+// - U = 32-bit integer (unsigned)
+// - L = 64-bit integer (long)
+// - Using "AS<type><#components>" for signed when required.
+//------------------------------------------------------------------------------------------------------------------------------
+// TODO
+// ====
+// - Make sure 'ALerp*(a,b,m)' does 'b*m+(-a*m+a)' (2 ops).
+//------------------------------------------------------------------------------------------------------------------------------
+// CHANGE LOG
+// ==========
+// 20200914 - Expanded wave ops and prx code.
+// 20200713 - Added [ZOL] section, fixed serious bugs in sRGB and Rec.709 color conversion code, etc.
+//==============================================================================================================================
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// COMMON
+//==============================================================================================================================
+#define A_2PI 6.28318530718
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+//
+// CPU
+//
+//
+//==============================================================================================================================
+#ifdef A_CPU
+ // Supporting user defined overrides.
+ #ifndef A_RESTRICT
+ #define A_RESTRICT __restrict
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifndef A_STATIC
+ #define A_STATIC static
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ // Same types across CPU and GPU.
+ // Predicate uses 32-bit integer (C friendly bool).
+ typedef uint32_t AP1;
+ typedef float AF1;
+ typedef double AD1;
+ typedef uint8_t AB1;
+ typedef uint16_t AW1;
+ typedef uint32_t AU1;
+ typedef uint64_t AL1;
+ typedef int8_t ASB1;
+ typedef int16_t ASW1;
+ typedef int32_t ASU1;
+ typedef int64_t ASL1;
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AD1_(a) ((AD1)(a))
+ #define AF1_(a) ((AF1)(a))
+ #define AL1_(a) ((AL1)(a))
+ #define AU1_(a) ((AU1)(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASL1_(a) ((ASL1)(a))
+ #define ASU1_(a) ((ASU1)(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AU1 AU1_AF1(AF1 a){union{AF1 f;AU1 u;}bits;bits.f=a;return bits.u;}
+//------------------------------------------------------------------------------------------------------------------------------
+ #define A_TRUE 1
+ #define A_FALSE 0
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+// CPU/GPU PORTING
+//
+//------------------------------------------------------------------------------------------------------------------------------
+// Get CPU and GPU to share all setup code, without duplicate code paths.
+// This uses a lower-case prefix for special vector constructs.
+// - In C restrict pointers are used.
+// - In the shading language, in/inout/out arguments are used.
+// This depends on the ability to access a vector value in both languages via array syntax (aka color[2]).
+//==============================================================================================================================
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// VECTOR ARGUMENT/RETURN/INITIALIZATION PORTABILITY
+//==============================================================================================================================
+ #define retAD2 AD1 *A_RESTRICT
+ #define retAD3 AD1 *A_RESTRICT
+ #define retAD4 AD1 *A_RESTRICT
+ #define retAF2 AF1 *A_RESTRICT
+ #define retAF3 AF1 *A_RESTRICT
+ #define retAF4 AF1 *A_RESTRICT
+ #define retAL2 AL1 *A_RESTRICT
+ #define retAL3 AL1 *A_RESTRICT
+ #define retAL4 AL1 *A_RESTRICT
+ #define retAU2 AU1 *A_RESTRICT
+ #define retAU3 AU1 *A_RESTRICT
+ #define retAU4 AU1 *A_RESTRICT
+//------------------------------------------------------------------------------------------------------------------------------
+ #define inAD2 AD1 *A_RESTRICT
+ #define inAD3 AD1 *A_RESTRICT
+ #define inAD4 AD1 *A_RESTRICT
+ #define inAF2 AF1 *A_RESTRICT
+ #define inAF3 AF1 *A_RESTRICT
+ #define inAF4 AF1 *A_RESTRICT
+ #define inAL2 AL1 *A_RESTRICT
+ #define inAL3 AL1 *A_RESTRICT
+ #define inAL4 AL1 *A_RESTRICT
+ #define inAU2 AU1 *A_RESTRICT
+ #define inAU3 AU1 *A_RESTRICT
+ #define inAU4 AU1 *A_RESTRICT
+//------------------------------------------------------------------------------------------------------------------------------
+ #define inoutAD2 AD1 *A_RESTRICT
+ #define inoutAD3 AD1 *A_RESTRICT
+ #define inoutAD4 AD1 *A_RESTRICT
+ #define inoutAF2 AF1 *A_RESTRICT
+ #define inoutAF3 AF1 *A_RESTRICT
+ #define inoutAF4 AF1 *A_RESTRICT
+ #define inoutAL2 AL1 *A_RESTRICT
+ #define inoutAL3 AL1 *A_RESTRICT
+ #define inoutAL4 AL1 *A_RESTRICT
+ #define inoutAU2 AU1 *A_RESTRICT
+ #define inoutAU3 AU1 *A_RESTRICT
+ #define inoutAU4 AU1 *A_RESTRICT
+//------------------------------------------------------------------------------------------------------------------------------
+ #define outAD2 AD1 *A_RESTRICT
+ #define outAD3 AD1 *A_RESTRICT
+ #define outAD4 AD1 *A_RESTRICT
+ #define outAF2 AF1 *A_RESTRICT
+ #define outAF3 AF1 *A_RESTRICT
+ #define outAF4 AF1 *A_RESTRICT
+ #define outAL2 AL1 *A_RESTRICT
+ #define outAL3 AL1 *A_RESTRICT
+ #define outAL4 AL1 *A_RESTRICT
+ #define outAU2 AU1 *A_RESTRICT
+ #define outAU3 AU1 *A_RESTRICT
+ #define outAU4 AU1 *A_RESTRICT
+//------------------------------------------------------------------------------------------------------------------------------
+ #define varAD2(x) AD1 x[2]
+ #define varAD3(x) AD1 x[3]
+ #define varAD4(x) AD1 x[4]
+ #define varAF2(x) AF1 x[2]
+ #define varAF3(x) AF1 x[3]
+ #define varAF4(x) AF1 x[4]
+ #define varAL2(x) AL1 x[2]
+ #define varAL3(x) AL1 x[3]
+ #define varAL4(x) AL1 x[4]
+ #define varAU2(x) AU1 x[2]
+ #define varAU3(x) AU1 x[3]
+ #define varAU4(x) AU1 x[4]
+//------------------------------------------------------------------------------------------------------------------------------
+ #define initAD2(x,y) {x,y}
+ #define initAD3(x,y,z) {x,y,z}
+ #define initAD4(x,y,z,w) {x,y,z,w}
+ #define initAF2(x,y) {x,y}
+ #define initAF3(x,y,z) {x,y,z}
+ #define initAF4(x,y,z,w) {x,y,z,w}
+ #define initAL2(x,y) {x,y}
+ #define initAL3(x,y,z) {x,y,z}
+ #define initAL4(x,y,z,w) {x,y,z,w}
+ #define initAU2(x,y) {x,y}
+ #define initAU3(x,y,z) {x,y,z}
+ #define initAU4(x,y,z,w) {x,y,z,w}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// SCALAR RETURN OPS
+//------------------------------------------------------------------------------------------------------------------------------
+// TODO
+// ====
+// - Replace transcendentals with manual versions.
+//==============================================================================================================================
+ #ifdef A_GCC
+ A_STATIC AD1 AAbsD1(AD1 a){return __builtin_fabs(a);}
+ A_STATIC AF1 AAbsF1(AF1 a){return __builtin_fabsf(a);}
+ A_STATIC AU1 AAbsSU1(AU1 a){return AU1_(__builtin_abs(ASU1_(a)));}
+ A_STATIC AL1 AAbsSL1(AL1 a){return AL1_(__builtin_llabs(ASL1_(a)));}
+ #else
+ A_STATIC AD1 AAbsD1(AD1 a){return fabs(a);}
+ A_STATIC AF1 AAbsF1(AF1 a){return fabsf(a);}
+ A_STATIC AU1 AAbsSU1(AU1 a){return AU1_(abs(ASU1_(a)));}
+ A_STATIC AL1 AAbsSL1(AL1 a){return AL1_(labs((long)ASL1_(a)));}
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_GCC
+ A_STATIC AD1 ACosD1(AD1 a){return __builtin_cos(a);}
+ A_STATIC AF1 ACosF1(AF1 a){return __builtin_cosf(a);}
+ #else
+ A_STATIC AD1 ACosD1(AD1 a){return cos(a);}
+ A_STATIC AF1 ACosF1(AF1 a){return cosf(a);}
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 ADotD2(inAD2 a,inAD2 b){return a[0]*b[0]+a[1]*b[1];}
+ A_STATIC AD1 ADotD3(inAD3 a,inAD3 b){return a[0]*b[0]+a[1]*b[1]+a[2]*b[2];}
+ A_STATIC AD1 ADotD4(inAD4 a,inAD4 b){return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]+a[3]*b[3];}
+ A_STATIC AF1 ADotF2(inAF2 a,inAF2 b){return a[0]*b[0]+a[1]*b[1];}
+ A_STATIC AF1 ADotF3(inAF3 a,inAF3 b){return a[0]*b[0]+a[1]*b[1]+a[2]*b[2];}
+ A_STATIC AF1 ADotF4(inAF4 a,inAF4 b){return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]+a[3]*b[3];}
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_GCC
+ A_STATIC AD1 AExp2D1(AD1 a){return __builtin_exp2(a);}
+ A_STATIC AF1 AExp2F1(AF1 a){return __builtin_exp2f(a);}
+ #else
+ A_STATIC AD1 AExp2D1(AD1 a){return exp2(a);}
+ A_STATIC AF1 AExp2F1(AF1 a){return exp2f(a);}
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_GCC
+ A_STATIC AD1 AFloorD1(AD1 a){return __builtin_floor(a);}
+ A_STATIC AF1 AFloorF1(AF1 a){return __builtin_floorf(a);}
+ #else
+ A_STATIC AD1 AFloorD1(AD1 a){return floor(a);}
+ A_STATIC AF1 AFloorF1(AF1 a){return floorf(a);}
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 ALerpD1(AD1 a,AD1 b,AD1 c){return b*c+(-a*c+a);}
+ A_STATIC AF1 ALerpF1(AF1 a,AF1 b,AF1 c){return b*c+(-a*c+a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_GCC
+ A_STATIC AD1 ALog2D1(AD1 a){return __builtin_log2(a);}
+ A_STATIC AF1 ALog2F1(AF1 a){return __builtin_log2f(a);}
+ #else
+ A_STATIC AD1 ALog2D1(AD1 a){return log2(a);}
+ A_STATIC AF1 ALog2F1(AF1 a){return log2f(a);}
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 AMaxD1(AD1 a,AD1 b){return a>b?a:b;}
+ A_STATIC AF1 AMaxF1(AF1 a,AF1 b){return a>b?a:b;}
+ A_STATIC AL1 AMaxL1(AL1 a,AL1 b){return a>b?a:b;}
+ A_STATIC AU1 AMaxU1(AU1 a,AU1 b){return a>b?a:b;}
+//------------------------------------------------------------------------------------------------------------------------------
+ // These follow the convention that A integer types don't have signage, until they are operated on.
+ A_STATIC AL1 AMaxSL1(AL1 a,AL1 b){return (ASL1_(a)>ASL1_(b))?a:b;}
+ A_STATIC AU1 AMaxSU1(AU1 a,AU1 b){return (ASU1_(a)>ASU1_(b))?a:b;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 AMinD1(AD1 a,AD1 b){return a<b?a:b;}
+ A_STATIC AF1 AMinF1(AF1 a,AF1 b){return a<b?a:b;}
+ A_STATIC AL1 AMinL1(AL1 a,AL1 b){return a<b?a:b;}
+ A_STATIC AU1 AMinU1(AU1 a,AU1 b){return a<b?a:b;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AL1 AMinSL1(AL1 a,AL1 b){return (ASL1_(a)<ASL1_(b))?a:b;}
+ A_STATIC AU1 AMinSU1(AU1 a,AU1 b){return (ASU1_(a)<ASU1_(b))?a:b;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 ARcpD1(AD1 a){return 1.0/a;}
+ A_STATIC AF1 ARcpF1(AF1 a){return 1.0f/a;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AL1 AShrSL1(AL1 a,AL1 b){return AL1_(ASL1_(a)>>ASL1_(b));}
+ A_STATIC AU1 AShrSU1(AU1 a,AU1 b){return AU1_(ASU1_(a)>>ASU1_(b));}
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_GCC
+ A_STATIC AD1 ASinD1(AD1 a){return __builtin_sin(a);}
+ A_STATIC AF1 ASinF1(AF1 a){return __builtin_sinf(a);}
+ #else
+ A_STATIC AD1 ASinD1(AD1 a){return sin(a);}
+ A_STATIC AF1 ASinF1(AF1 a){return sinf(a);}
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_GCC
+ A_STATIC AD1 ASqrtD1(AD1 a){return __builtin_sqrt(a);}
+ A_STATIC AF1 ASqrtF1(AF1 a){return __builtin_sqrtf(a);}
+ #else
+ A_STATIC AD1 ASqrtD1(AD1 a){return sqrt(a);}
+ A_STATIC AF1 ASqrtF1(AF1 a){return sqrtf(a);}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// SCALAR RETURN OPS - DEPENDENT
+//==============================================================================================================================
+ A_STATIC AD1 AClampD1(AD1 x,AD1 n,AD1 m){return AMaxD1(n,AMinD1(x,m));}
+ A_STATIC AF1 AClampF1(AF1 x,AF1 n,AF1 m){return AMaxF1(n,AMinF1(x,m));}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 AFractD1(AD1 a){return a-AFloorD1(a);}
+ A_STATIC AF1 AFractF1(AF1 a){return a-AFloorF1(a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 APowD1(AD1 a,AD1 b){return AExp2D1(b*ALog2D1(a));}
+ A_STATIC AF1 APowF1(AF1 a,AF1 b){return AExp2F1(b*ALog2F1(a));}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 ARsqD1(AD1 a){return ARcpD1(ASqrtD1(a));}
+ A_STATIC AF1 ARsqF1(AF1 a){return ARcpF1(ASqrtF1(a));}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC AD1 ASatD1(AD1 a){return AMinD1(1.0,AMaxD1(0.0,a));}
+ A_STATIC AF1 ASatF1(AF1 a){return AMinF1(1.0f,AMaxF1(0.0f,a));}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// VECTOR OPS
+//------------------------------------------------------------------------------------------------------------------------------
+// These are added as needed for production or prototyping, so not necessarily a complete set.
+// They follow a convention of taking in a destination and also returning the destination value to increase utility.
+//==============================================================================================================================
+ A_STATIC retAD2 opAAbsD2(outAD2 d,inAD2 a){d[0]=AAbsD1(a[0]);d[1]=AAbsD1(a[1]);return d;}
+ A_STATIC retAD3 opAAbsD3(outAD3 d,inAD3 a){d[0]=AAbsD1(a[0]);d[1]=AAbsD1(a[1]);d[2]=AAbsD1(a[2]);return d;}
+ A_STATIC retAD4 opAAbsD4(outAD4 d,inAD4 a){d[0]=AAbsD1(a[0]);d[1]=AAbsD1(a[1]);d[2]=AAbsD1(a[2]);d[3]=AAbsD1(a[3]);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opAAbsF2(outAF2 d,inAF2 a){d[0]=AAbsF1(a[0]);d[1]=AAbsF1(a[1]);return d;}
+ A_STATIC retAF3 opAAbsF3(outAF3 d,inAF3 a){d[0]=AAbsF1(a[0]);d[1]=AAbsF1(a[1]);d[2]=AAbsF1(a[2]);return d;}
+ A_STATIC retAF4 opAAbsF4(outAF4 d,inAF4 a){d[0]=AAbsF1(a[0]);d[1]=AAbsF1(a[1]);d[2]=AAbsF1(a[2]);d[3]=AAbsF1(a[3]);return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opAAddD2(outAD2 d,inAD2 a,inAD2 b){d[0]=a[0]+b[0];d[1]=a[1]+b[1];return d;}
+ A_STATIC retAD3 opAAddD3(outAD3 d,inAD3 a,inAD3 b){d[0]=a[0]+b[0];d[1]=a[1]+b[1];d[2]=a[2]+b[2];return d;}
+ A_STATIC retAD4 opAAddD4(outAD4 d,inAD4 a,inAD4 b){d[0]=a[0]+b[0];d[1]=a[1]+b[1];d[2]=a[2]+b[2];d[3]=a[3]+b[3];return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opAAddF2(outAF2 d,inAF2 a,inAF2 b){d[0]=a[0]+b[0];d[1]=a[1]+b[1];return d;}
+ A_STATIC retAF3 opAAddF3(outAF3 d,inAF3 a,inAF3 b){d[0]=a[0]+b[0];d[1]=a[1]+b[1];d[2]=a[2]+b[2];return d;}
+ A_STATIC retAF4 opAAddF4(outAF4 d,inAF4 a,inAF4 b){d[0]=a[0]+b[0];d[1]=a[1]+b[1];d[2]=a[2]+b[2];d[3]=a[3]+b[3];return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opAAddOneD2(outAD2 d,inAD2 a,AD1 b){d[0]=a[0]+b;d[1]=a[1]+b;return d;}
+ A_STATIC retAD3 opAAddOneD3(outAD3 d,inAD3 a,AD1 b){d[0]=a[0]+b;d[1]=a[1]+b;d[2]=a[2]+b;return d;}
+ A_STATIC retAD4 opAAddOneD4(outAD4 d,inAD4 a,AD1 b){d[0]=a[0]+b;d[1]=a[1]+b;d[2]=a[2]+b;d[3]=a[3]+b;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opAAddOneF2(outAF2 d,inAF2 a,AF1 b){d[0]=a[0]+b;d[1]=a[1]+b;return d;}
+ A_STATIC retAF3 opAAddOneF3(outAF3 d,inAF3 a,AF1 b){d[0]=a[0]+b;d[1]=a[1]+b;d[2]=a[2]+b;return d;}
+ A_STATIC retAF4 opAAddOneF4(outAF4 d,inAF4 a,AF1 b){d[0]=a[0]+b;d[1]=a[1]+b;d[2]=a[2]+b;d[3]=a[3]+b;return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opACpyD2(outAD2 d,inAD2 a){d[0]=a[0];d[1]=a[1];return d;}
+ A_STATIC retAD3 opACpyD3(outAD3 d,inAD3 a){d[0]=a[0];d[1]=a[1];d[2]=a[2];return d;}
+ A_STATIC retAD4 opACpyD4(outAD4 d,inAD4 a){d[0]=a[0];d[1]=a[1];d[2]=a[2];d[3]=a[3];return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opACpyF2(outAF2 d,inAF2 a){d[0]=a[0];d[1]=a[1];return d;}
+ A_STATIC retAF3 opACpyF3(outAF3 d,inAF3 a){d[0]=a[0];d[1]=a[1];d[2]=a[2];return d;}
+ A_STATIC retAF4 opACpyF4(outAF4 d,inAF4 a){d[0]=a[0];d[1]=a[1];d[2]=a[2];d[3]=a[3];return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opALerpD2(outAD2 d,inAD2 a,inAD2 b,inAD2 c){d[0]=ALerpD1(a[0],b[0],c[0]);d[1]=ALerpD1(a[1],b[1],c[1]);return d;}
+ A_STATIC retAD3 opALerpD3(outAD3 d,inAD3 a,inAD3 b,inAD3 c){d[0]=ALerpD1(a[0],b[0],c[0]);d[1]=ALerpD1(a[1],b[1],c[1]);d[2]=ALerpD1(a[2],b[2],c[2]);return d;}
+ A_STATIC retAD4 opALerpD4(outAD4 d,inAD4 a,inAD4 b,inAD4 c){d[0]=ALerpD1(a[0],b[0],c[0]);d[1]=ALerpD1(a[1],b[1],c[1]);d[2]=ALerpD1(a[2],b[2],c[2]);d[3]=ALerpD1(a[3],b[3],c[3]);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opALerpF2(outAF2 d,inAF2 a,inAF2 b,inAF2 c){d[0]=ALerpF1(a[0],b[0],c[0]);d[1]=ALerpF1(a[1],b[1],c[1]);return d;}
+ A_STATIC retAF3 opALerpF3(outAF3 d,inAF3 a,inAF3 b,inAF3 c){d[0]=ALerpF1(a[0],b[0],c[0]);d[1]=ALerpF1(a[1],b[1],c[1]);d[2]=ALerpF1(a[2],b[2],c[2]);return d;}
+ A_STATIC retAF4 opALerpF4(outAF4 d,inAF4 a,inAF4 b,inAF4 c){d[0]=ALerpF1(a[0],b[0],c[0]);d[1]=ALerpF1(a[1],b[1],c[1]);d[2]=ALerpF1(a[2],b[2],c[2]);d[3]=ALerpF1(a[3],b[3],c[3]);return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opALerpOneD2(outAD2 d,inAD2 a,inAD2 b,AD1 c){d[0]=ALerpD1(a[0],b[0],c);d[1]=ALerpD1(a[1],b[1],c);return d;}
+ A_STATIC retAD3 opALerpOneD3(outAD3 d,inAD3 a,inAD3 b,AD1 c){d[0]=ALerpD1(a[0],b[0],c);d[1]=ALerpD1(a[1],b[1],c);d[2]=ALerpD1(a[2],b[2],c);return d;}
+ A_STATIC retAD4 opALerpOneD4(outAD4 d,inAD4 a,inAD4 b,AD1 c){d[0]=ALerpD1(a[0],b[0],c);d[1]=ALerpD1(a[1],b[1],c);d[2]=ALerpD1(a[2],b[2],c);d[3]=ALerpD1(a[3],b[3],c);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opALerpOneF2(outAF2 d,inAF2 a,inAF2 b,AF1 c){d[0]=ALerpF1(a[0],b[0],c);d[1]=ALerpF1(a[1],b[1],c);return d;}
+ A_STATIC retAF3 opALerpOneF3(outAF3 d,inAF3 a,inAF3 b,AF1 c){d[0]=ALerpF1(a[0],b[0],c);d[1]=ALerpF1(a[1],b[1],c);d[2]=ALerpF1(a[2],b[2],c);return d;}
+ A_STATIC retAF4 opALerpOneF4(outAF4 d,inAF4 a,inAF4 b,AF1 c){d[0]=ALerpF1(a[0],b[0],c);d[1]=ALerpF1(a[1],b[1],c);d[2]=ALerpF1(a[2],b[2],c);d[3]=ALerpF1(a[3],b[3],c);return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opAMaxD2(outAD2 d,inAD2 a,inAD2 b){d[0]=AMaxD1(a[0],b[0]);d[1]=AMaxD1(a[1],b[1]);return d;}
+ A_STATIC retAD3 opAMaxD3(outAD3 d,inAD3 a,inAD3 b){d[0]=AMaxD1(a[0],b[0]);d[1]=AMaxD1(a[1],b[1]);d[2]=AMaxD1(a[2],b[2]);return d;}
+ A_STATIC retAD4 opAMaxD4(outAD4 d,inAD4 a,inAD4 b){d[0]=AMaxD1(a[0],b[0]);d[1]=AMaxD1(a[1],b[1]);d[2]=AMaxD1(a[2],b[2]);d[3]=AMaxD1(a[3],b[3]);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opAMaxF2(outAF2 d,inAF2 a,inAF2 b){d[0]=AMaxF1(a[0],b[0]);d[1]=AMaxF1(a[1],b[1]);return d;}
+ A_STATIC retAF3 opAMaxF3(outAF3 d,inAF3 a,inAF3 b){d[0]=AMaxF1(a[0],b[0]);d[1]=AMaxF1(a[1],b[1]);d[2]=AMaxF1(a[2],b[2]);return d;}
+ A_STATIC retAF4 opAMaxF4(outAF4 d,inAF4 a,inAF4 b){d[0]=AMaxF1(a[0],b[0]);d[1]=AMaxF1(a[1],b[1]);d[2]=AMaxF1(a[2],b[2]);d[3]=AMaxF1(a[3],b[3]);return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opAMinD2(outAD2 d,inAD2 a,inAD2 b){d[0]=AMinD1(a[0],b[0]);d[1]=AMinD1(a[1],b[1]);return d;}
+ A_STATIC retAD3 opAMinD3(outAD3 d,inAD3 a,inAD3 b){d[0]=AMinD1(a[0],b[0]);d[1]=AMinD1(a[1],b[1]);d[2]=AMinD1(a[2],b[2]);return d;}
+ A_STATIC retAD4 opAMinD4(outAD4 d,inAD4 a,inAD4 b){d[0]=AMinD1(a[0],b[0]);d[1]=AMinD1(a[1],b[1]);d[2]=AMinD1(a[2],b[2]);d[3]=AMinD1(a[3],b[3]);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opAMinF2(outAF2 d,inAF2 a,inAF2 b){d[0]=AMinF1(a[0],b[0]);d[1]=AMinF1(a[1],b[1]);return d;}
+ A_STATIC retAF3 opAMinF3(outAF3 d,inAF3 a,inAF3 b){d[0]=AMinF1(a[0],b[0]);d[1]=AMinF1(a[1],b[1]);d[2]=AMinF1(a[2],b[2]);return d;}
+ A_STATIC retAF4 opAMinF4(outAF4 d,inAF4 a,inAF4 b){d[0]=AMinF1(a[0],b[0]);d[1]=AMinF1(a[1],b[1]);d[2]=AMinF1(a[2],b[2]);d[3]=AMinF1(a[3],b[3]);return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opAMulD2(outAD2 d,inAD2 a,inAD2 b){d[0]=a[0]*b[0];d[1]=a[1]*b[1];return d;}
+ A_STATIC retAD3 opAMulD3(outAD3 d,inAD3 a,inAD3 b){d[0]=a[0]*b[0];d[1]=a[1]*b[1];d[2]=a[2]*b[2];return d;}
+ A_STATIC retAD4 opAMulD4(outAD4 d,inAD4 a,inAD4 b){d[0]=a[0]*b[0];d[1]=a[1]*b[1];d[2]=a[2]*b[2];d[3]=a[3]*b[3];return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opAMulF2(outAF2 d,inAF2 a,inAF2 b){d[0]=a[0]*b[0];d[1]=a[1]*b[1];return d;}
+ A_STATIC retAF3 opAMulF3(outAF3 d,inAF3 a,inAF3 b){d[0]=a[0]*b[0];d[1]=a[1]*b[1];d[2]=a[2]*b[2];return d;}
+ A_STATIC retAF4 opAMulF4(outAF4 d,inAF4 a,inAF4 b){d[0]=a[0]*b[0];d[1]=a[1]*b[1];d[2]=a[2]*b[2];d[3]=a[3]*b[3];return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opAMulOneD2(outAD2 d,inAD2 a,AD1 b){d[0]=a[0]*b;d[1]=a[1]*b;return d;}
+ A_STATIC retAD3 opAMulOneD3(outAD3 d,inAD3 a,AD1 b){d[0]=a[0]*b;d[1]=a[1]*b;d[2]=a[2]*b;return d;}
+ A_STATIC retAD4 opAMulOneD4(outAD4 d,inAD4 a,AD1 b){d[0]=a[0]*b;d[1]=a[1]*b;d[2]=a[2]*b;d[3]=a[3]*b;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opAMulOneF2(outAF2 d,inAF2 a,AF1 b){d[0]=a[0]*b;d[1]=a[1]*b;return d;}
+ A_STATIC retAF3 opAMulOneF3(outAF3 d,inAF3 a,AF1 b){d[0]=a[0]*b;d[1]=a[1]*b;d[2]=a[2]*b;return d;}
+ A_STATIC retAF4 opAMulOneF4(outAF4 d,inAF4 a,AF1 b){d[0]=a[0]*b;d[1]=a[1]*b;d[2]=a[2]*b;d[3]=a[3]*b;return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opANegD2(outAD2 d,inAD2 a){d[0]=-a[0];d[1]=-a[1];return d;}
+ A_STATIC retAD3 opANegD3(outAD3 d,inAD3 a){d[0]=-a[0];d[1]=-a[1];d[2]=-a[2];return d;}
+ A_STATIC retAD4 opANegD4(outAD4 d,inAD4 a){d[0]=-a[0];d[1]=-a[1];d[2]=-a[2];d[3]=-a[3];return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opANegF2(outAF2 d,inAF2 a){d[0]=-a[0];d[1]=-a[1];return d;}
+ A_STATIC retAF3 opANegF3(outAF3 d,inAF3 a){d[0]=-a[0];d[1]=-a[1];d[2]=-a[2];return d;}
+ A_STATIC retAF4 opANegF4(outAF4 d,inAF4 a){d[0]=-a[0];d[1]=-a[1];d[2]=-a[2];d[3]=-a[3];return d;}
+//==============================================================================================================================
+ A_STATIC retAD2 opARcpD2(outAD2 d,inAD2 a){d[0]=ARcpD1(a[0]);d[1]=ARcpD1(a[1]);return d;}
+ A_STATIC retAD3 opARcpD3(outAD3 d,inAD3 a){d[0]=ARcpD1(a[0]);d[1]=ARcpD1(a[1]);d[2]=ARcpD1(a[2]);return d;}
+ A_STATIC retAD4 opARcpD4(outAD4 d,inAD4 a){d[0]=ARcpD1(a[0]);d[1]=ARcpD1(a[1]);d[2]=ARcpD1(a[2]);d[3]=ARcpD1(a[3]);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ A_STATIC retAF2 opARcpF2(outAF2 d,inAF2 a){d[0]=ARcpF1(a[0]);d[1]=ARcpF1(a[1]);return d;}
+ A_STATIC retAF3 opARcpF3(outAF3 d,inAF3 a){d[0]=ARcpF1(a[0]);d[1]=ARcpF1(a[1]);d[2]=ARcpF1(a[2]);return d;}
+ A_STATIC retAF4 opARcpF4(outAF4 d,inAF4 a){d[0]=ARcpF1(a[0]);d[1]=ARcpF1(a[1]);d[2]=ARcpF1(a[2]);d[3]=ARcpF1(a[3]);return d;}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// HALF FLOAT PACKING
+//==============================================================================================================================
+ // Convert float to half (in lower 16-bits of output).
+ // Same fast technique as documented here: ftp://ftp.fox-toolkit.org/pub/fasthalffloatconversion.pdf
+ // Supports denormals.
+ // Conversion rules are to make computations possibly "safer" on the GPU,
+ // -INF & -NaN -> -65504
+ // +INF & +NaN -> +65504
+ A_STATIC AU1 AU1_AH1_AF1(AF1 f){
+ static AW1 base[512]={
+ 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+ 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+ 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+ 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+ 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+ 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+ 0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0001,0x0002,0x0004,0x0008,0x0010,0x0020,0x0040,0x0080,0x0100,
+ 0x0200,0x0400,0x0800,0x0c00,0x1000,0x1400,0x1800,0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,
+ 0x4000,0x4400,0x4800,0x4c00,0x5000,0x5400,0x5800,0x5c00,0x6000,0x6400,0x6800,0x6c00,0x7000,0x7400,0x7800,0x7bff,
+ 0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,
+ 0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,
+ 0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,
+ 0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,
+ 0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,
+ 0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,
+ 0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,0x7bff,
+ 0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,
+ 0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,
+ 0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,
+ 0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,
+ 0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,
+ 0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,
+ 0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8000,0x8001,0x8002,0x8004,0x8008,0x8010,0x8020,0x8040,0x8080,0x8100,
+ 0x8200,0x8400,0x8800,0x8c00,0x9000,0x9400,0x9800,0x9c00,0xa000,0xa400,0xa800,0xac00,0xb000,0xb400,0xb800,0xbc00,
+ 0xc000,0xc400,0xc800,0xcc00,0xd000,0xd400,0xd800,0xdc00,0xe000,0xe400,0xe800,0xec00,0xf000,0xf400,0xf800,0xfbff,
+ 0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,
+ 0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,
+ 0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,
+ 0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,
+ 0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,
+ 0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,
+ 0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff,0xfbff};
+ static AB1 shift[512]={
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x17,0x16,0x15,0x14,0x13,0x12,0x11,0x10,0x0f,
+ 0x0e,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,
+ 0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x17,0x16,0x15,0x14,0x13,0x12,0x11,0x10,0x0f,
+ 0x0e,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,
+ 0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18};
+ union{AF1 f;AU1 u;}bits;bits.f=f;AU1 u=bits.u;AU1 i=u>>23;return (AU1)(base[i])+((u&0x7fffff)>>shift[i]);}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Used to output packed constant.
+ A_STATIC AU1 AU1_AH2_AF2(inAF2 a){return AU1_AH1_AF1(a[0])+(AU1_AH1_AF1(a[1])<<16);}
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+//
+// GLSL
+//
+//
+//==============================================================================================================================
+#if defined(A_GLSL) && defined(A_GPU)
+ #ifndef A_SKIP_EXT
+ #ifdef A_HALF
+ #extension GL_EXT_shader_16bit_storage:require
+ #extension GL_EXT_shader_explicit_arithmetic_types:require
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_LONG
+ #extension GL_ARB_gpu_shader_int64:require
+ #extension GL_NV_shader_atomic_int64:require
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_WAVE
+ #extension GL_KHR_shader_subgroup_arithmetic:require
+ #extension GL_KHR_shader_subgroup_ballot:require
+ #extension GL_KHR_shader_subgroup_quad:require
+ #extension GL_KHR_shader_subgroup_shuffle:require
+ #endif
+ #endif
+//==============================================================================================================================
+ #define AP1 bool
+ #define AP2 bvec2
+ #define AP3 bvec3
+ #define AP4 bvec4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AF1 float
+ #define AF2 vec2
+ #define AF3 vec3
+ #define AF4 vec4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AU1 uint
+ #define AU2 uvec2
+ #define AU3 uvec3
+ #define AU4 uvec4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASU1 int
+ #define ASU2 ivec2
+ #define ASU3 ivec3
+ #define ASU4 ivec4
+//==============================================================================================================================
+ #define AF1_AU1(x) uintBitsToFloat(AU1(x))
+ #define AF2_AU2(x) uintBitsToFloat(AU2(x))
+ #define AF3_AU3(x) uintBitsToFloat(AU3(x))
+ #define AF4_AU4(x) uintBitsToFloat(AU4(x))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AU1_AF1(x) floatBitsToUint(AF1(x))
+ #define AU2_AF2(x) floatBitsToUint(AF2(x))
+ #define AU3_AF3(x) floatBitsToUint(AF3(x))
+ #define AU4_AF4(x) floatBitsToUint(AF4(x))
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AU1_AH1_AF1_x(AF1 a){return packHalf2x16(AF2(a,0.0));}
+ #define AU1_AH1_AF1(a) AU1_AH1_AF1_x(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AU1_AH2_AF2 packHalf2x16
+ #define AU1_AW2Unorm_AF2 packUnorm2x16
+ #define AU1_AB4Unorm_AF4 packUnorm4x8
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AF2_AH2_AU1 unpackHalf2x16
+ #define AF2_AW2Unorm_AU1 unpackUnorm2x16
+ #define AF4_AB4Unorm_AU1 unpackUnorm4x8
+//==============================================================================================================================
+ AF1 AF1_x(AF1 a){return AF1(a);}
+ AF2 AF2_x(AF1 a){return AF2(a,a);}
+ AF3 AF3_x(AF1 a){return AF3(a,a,a);}
+ AF4 AF4_x(AF1 a){return AF4(a,a,a,a);}
+ #define AF1_(a) AF1_x(AF1(a))
+ #define AF2_(a) AF2_x(AF1(a))
+ #define AF3_(a) AF3_x(AF1(a))
+ #define AF4_(a) AF4_x(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AU1_x(AU1 a){return AU1(a);}
+ AU2 AU2_x(AU1 a){return AU2(a,a);}
+ AU3 AU3_x(AU1 a){return AU3(a,a,a);}
+ AU4 AU4_x(AU1 a){return AU4(a,a,a,a);}
+ #define AU1_(a) AU1_x(AU1(a))
+ #define AU2_(a) AU2_x(AU1(a))
+ #define AU3_(a) AU3_x(AU1(a))
+ #define AU4_(a) AU4_x(AU1(a))
+//==============================================================================================================================
+ AU1 AAbsSU1(AU1 a){return AU1(abs(ASU1(a)));}
+ AU2 AAbsSU2(AU2 a){return AU2(abs(ASU2(a)));}
+ AU3 AAbsSU3(AU3 a){return AU3(abs(ASU3(a)));}
+ AU4 AAbsSU4(AU4 a){return AU4(abs(ASU4(a)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 ABfe(AU1 src,AU1 off,AU1 bits){return bitfieldExtract(src,ASU1(off),ASU1(bits));}
+ AU1 ABfi(AU1 src,AU1 ins,AU1 mask){return (ins&mask)|(src&(~mask));}
+ // Proxy for V_BFI_B32 where the 'mask' is set as 'bits', 'mask=(1<<bits)-1', and 'bits' needs to be an immediate.
+ AU1 ABfiM(AU1 src,AU1 ins,AU1 bits){return bitfieldInsert(src,ins,0,ASU1(bits));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // V_MED3_F32.
+ AF1 AClampF1(AF1 x,AF1 n,AF1 m){return clamp(x,n,m);}
+ AF2 AClampF2(AF2 x,AF2 n,AF2 m){return clamp(x,n,m);}
+ AF3 AClampF3(AF3 x,AF3 n,AF3 m){return clamp(x,n,m);}
+ AF4 AClampF4(AF4 x,AF4 n,AF4 m){return clamp(x,n,m);}
+//------------------------------------------------------------------------------------------------------------------------------
+ // V_FRACT_F32 (note DX frac() is different).
+ AF1 AFractF1(AF1 x){return fract(x);}
+ AF2 AFractF2(AF2 x){return fract(x);}
+ AF3 AFractF3(AF3 x){return fract(x);}
+ AF4 AFractF4(AF4 x){return fract(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ALerpF1(AF1 x,AF1 y,AF1 a){return mix(x,y,a);}
+ AF2 ALerpF2(AF2 x,AF2 y,AF2 a){return mix(x,y,a);}
+ AF3 ALerpF3(AF3 x,AF3 y,AF3 a){return mix(x,y,a);}
+ AF4 ALerpF4(AF4 x,AF4 y,AF4 a){return mix(x,y,a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ // V_MAX3_F32.
+ AF1 AMax3F1(AF1 x,AF1 y,AF1 z){return max(x,max(y,z));}
+ AF2 AMax3F2(AF2 x,AF2 y,AF2 z){return max(x,max(y,z));}
+ AF3 AMax3F3(AF3 x,AF3 y,AF3 z){return max(x,max(y,z));}
+ AF4 AMax3F4(AF4 x,AF4 y,AF4 z){return max(x,max(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMax3SU1(AU1 x,AU1 y,AU1 z){return AU1(max(ASU1(x),max(ASU1(y),ASU1(z))));}
+ AU2 AMax3SU2(AU2 x,AU2 y,AU2 z){return AU2(max(ASU2(x),max(ASU2(y),ASU2(z))));}
+ AU3 AMax3SU3(AU3 x,AU3 y,AU3 z){return AU3(max(ASU3(x),max(ASU3(y),ASU3(z))));}
+ AU4 AMax3SU4(AU4 x,AU4 y,AU4 z){return AU4(max(ASU4(x),max(ASU4(y),ASU4(z))));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMax3U1(AU1 x,AU1 y,AU1 z){return max(x,max(y,z));}
+ AU2 AMax3U2(AU2 x,AU2 y,AU2 z){return max(x,max(y,z));}
+ AU3 AMax3U3(AU3 x,AU3 y,AU3 z){return max(x,max(y,z));}
+ AU4 AMax3U4(AU4 x,AU4 y,AU4 z){return max(x,max(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMaxSU1(AU1 a,AU1 b){return AU1(max(ASU1(a),ASU1(b)));}
+ AU2 AMaxSU2(AU2 a,AU2 b){return AU2(max(ASU2(a),ASU2(b)));}
+ AU3 AMaxSU3(AU3 a,AU3 b){return AU3(max(ASU3(a),ASU3(b)));}
+ AU4 AMaxSU4(AU4 a,AU4 b){return AU4(max(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Clamp has an easier pattern match for med3 when some ordering is known.
+ // V_MED3_F32.
+ AF1 AMed3F1(AF1 x,AF1 y,AF1 z){return max(min(x,y),min(max(x,y),z));}
+ AF2 AMed3F2(AF2 x,AF2 y,AF2 z){return max(min(x,y),min(max(x,y),z));}
+ AF3 AMed3F3(AF3 x,AF3 y,AF3 z){return max(min(x,y),min(max(x,y),z));}
+ AF4 AMed3F4(AF4 x,AF4 y,AF4 z){return max(min(x,y),min(max(x,y),z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // V_MIN3_F32.
+ AF1 AMin3F1(AF1 x,AF1 y,AF1 z){return min(x,min(y,z));}
+ AF2 AMin3F2(AF2 x,AF2 y,AF2 z){return min(x,min(y,z));}
+ AF3 AMin3F3(AF3 x,AF3 y,AF3 z){return min(x,min(y,z));}
+ AF4 AMin3F4(AF4 x,AF4 y,AF4 z){return min(x,min(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMin3SU1(AU1 x,AU1 y,AU1 z){return AU1(min(ASU1(x),min(ASU1(y),ASU1(z))));}
+ AU2 AMin3SU2(AU2 x,AU2 y,AU2 z){return AU2(min(ASU2(x),min(ASU2(y),ASU2(z))));}
+ AU3 AMin3SU3(AU3 x,AU3 y,AU3 z){return AU3(min(ASU3(x),min(ASU3(y),ASU3(z))));}
+ AU4 AMin3SU4(AU4 x,AU4 y,AU4 z){return AU4(min(ASU4(x),min(ASU4(y),ASU4(z))));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMin3U1(AU1 x,AU1 y,AU1 z){return min(x,min(y,z));}
+ AU2 AMin3U2(AU2 x,AU2 y,AU2 z){return min(x,min(y,z));}
+ AU3 AMin3U3(AU3 x,AU3 y,AU3 z){return min(x,min(y,z));}
+ AU4 AMin3U4(AU4 x,AU4 y,AU4 z){return min(x,min(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMinSU1(AU1 a,AU1 b){return AU1(min(ASU1(a),ASU1(b)));}
+ AU2 AMinSU2(AU2 a,AU2 b){return AU2(min(ASU2(a),ASU2(b)));}
+ AU3 AMinSU3(AU3 a,AU3 b){return AU3(min(ASU3(a),ASU3(b)));}
+ AU4 AMinSU4(AU4 a,AU4 b){return AU4(min(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Normalized trig. Valid input domain is {-256 to +256}. No GLSL compiler intrinsic exists to map to this currently.
+ // V_COS_F32.
+ AF1 ANCosF1(AF1 x){return cos(x*AF1_(A_2PI));}
+ AF2 ANCosF2(AF2 x){return cos(x*AF2_(A_2PI));}
+ AF3 ANCosF3(AF3 x){return cos(x*AF3_(A_2PI));}
+ AF4 ANCosF4(AF4 x){return cos(x*AF4_(A_2PI));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Normalized trig. Valid input domain is {-256 to +256}. No GLSL compiler intrinsic exists to map to this currently.
+ // V_SIN_F32.
+ AF1 ANSinF1(AF1 x){return sin(x*AF1_(A_2PI));}
+ AF2 ANSinF2(AF2 x){return sin(x*AF2_(A_2PI));}
+ AF3 ANSinF3(AF3 x){return sin(x*AF3_(A_2PI));}
+ AF4 ANSinF4(AF4 x){return sin(x*AF4_(A_2PI));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ARcpF1(AF1 x){return AF1_(1.0)/x;}
+ AF2 ARcpF2(AF2 x){return AF2_(1.0)/x;}
+ AF3 ARcpF3(AF3 x){return AF3_(1.0)/x;}
+ AF4 ARcpF4(AF4 x){return AF4_(1.0)/x;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ARsqF1(AF1 x){return AF1_(1.0)/sqrt(x);}
+ AF2 ARsqF2(AF2 x){return AF2_(1.0)/sqrt(x);}
+ AF3 ARsqF3(AF3 x){return AF3_(1.0)/sqrt(x);}
+ AF4 ARsqF4(AF4 x){return AF4_(1.0)/sqrt(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ASatF1(AF1 x){return clamp(x,AF1_(0.0),AF1_(1.0));}
+ AF2 ASatF2(AF2 x){return clamp(x,AF2_(0.0),AF2_(1.0));}
+ AF3 ASatF3(AF3 x){return clamp(x,AF3_(0.0),AF3_(1.0));}
+ AF4 ASatF4(AF4 x){return clamp(x,AF4_(0.0),AF4_(1.0));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AShrSU1(AU1 a,AU1 b){return AU1(ASU1(a)>>ASU1(b));}
+ AU2 AShrSU2(AU2 a,AU2 b){return AU2(ASU2(a)>>ASU2(b));}
+ AU3 AShrSU3(AU3 a,AU3 b){return AU3(ASU3(a)>>ASU3(b));}
+ AU4 AShrSU4(AU4 a,AU4 b){return AU4(ASU4(a)>>ASU4(b));}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// GLSL BYTE
+//==============================================================================================================================
+ #ifdef A_BYTE
+ #define AB1 uint8_t
+ #define AB2 u8vec2
+ #define AB3 u8vec3
+ #define AB4 u8vec4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASB1 int8_t
+ #define ASB2 i8vec2
+ #define ASB3 i8vec3
+ #define ASB4 i8vec4
+//------------------------------------------------------------------------------------------------------------------------------
+ AB1 AB1_x(AB1 a){return AB1(a);}
+ AB2 AB2_x(AB1 a){return AB2(a,a);}
+ AB3 AB3_x(AB1 a){return AB3(a,a,a);}
+ AB4 AB4_x(AB1 a){return AB4(a,a,a,a);}
+ #define AB1_(a) AB1_x(AB1(a))
+ #define AB2_(a) AB2_x(AB1(a))
+ #define AB3_(a) AB3_x(AB1(a))
+ #define AB4_(a) AB4_x(AB1(a))
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// GLSL HALF
+//==============================================================================================================================
+ #ifdef A_HALF
+ #define AH1 float16_t
+ #define AH2 f16vec2
+ #define AH3 f16vec3
+ #define AH4 f16vec4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AW1 uint16_t
+ #define AW2 u16vec2
+ #define AW3 u16vec3
+ #define AW4 u16vec4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASW1 int16_t
+ #define ASW2 i16vec2
+ #define ASW3 i16vec3
+ #define ASW4 i16vec4
+//==============================================================================================================================
+ #define AH2_AU1(x) unpackFloat2x16(AU1(x))
+ AH4 AH4_AU2_x(AU2 x){return AH4(unpackFloat2x16(x.x),unpackFloat2x16(x.y));}
+ #define AH4_AU2(x) AH4_AU2_x(AU2(x))
+ #define AW2_AU1(x) unpackUint2x16(AU1(x))
+ #define AW4_AU2(x) unpackUint4x16(pack64(AU2(x)))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AU1_AH2(x) packFloat2x16(AH2(x))
+ AU2 AU2_AH4_x(AH4 x){return AU2(packFloat2x16(x.xy),packFloat2x16(x.zw));}
+ #define AU2_AH4(x) AU2_AH4_x(AH4(x))
+ #define AU1_AW2(x) packUint2x16(AW2(x))
+ #define AU2_AW4(x) unpack32(packUint4x16(AW4(x)))
+//==============================================================================================================================
+ #define AW1_AH1(x) halfBitsToUint16(AH1(x))
+ #define AW2_AH2(x) halfBitsToUint16(AH2(x))
+ #define AW3_AH3(x) halfBitsToUint16(AH3(x))
+ #define AW4_AH4(x) halfBitsToUint16(AH4(x))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AH1_AW1(x) uint16BitsToHalf(AW1(x))
+ #define AH2_AW2(x) uint16BitsToHalf(AW2(x))
+ #define AH3_AW3(x) uint16BitsToHalf(AW3(x))
+ #define AH4_AW4(x) uint16BitsToHalf(AW4(x))
+//==============================================================================================================================
+ AH1 AH1_x(AH1 a){return AH1(a);}
+ AH2 AH2_x(AH1 a){return AH2(a,a);}
+ AH3 AH3_x(AH1 a){return AH3(a,a,a);}
+ AH4 AH4_x(AH1 a){return AH4(a,a,a,a);}
+ #define AH1_(a) AH1_x(AH1(a))
+ #define AH2_(a) AH2_x(AH1(a))
+ #define AH3_(a) AH3_x(AH1(a))
+ #define AH4_(a) AH4_x(AH1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AW1_x(AW1 a){return AW1(a);}
+ AW2 AW2_x(AW1 a){return AW2(a,a);}
+ AW3 AW3_x(AW1 a){return AW3(a,a,a);}
+ AW4 AW4_x(AW1 a){return AW4(a,a,a,a);}
+ #define AW1_(a) AW1_x(AW1(a))
+ #define AW2_(a) AW2_x(AW1(a))
+ #define AW3_(a) AW3_x(AW1(a))
+ #define AW4_(a) AW4_x(AW1(a))
+//==============================================================================================================================
+ AW1 AAbsSW1(AW1 a){return AW1(abs(ASW1(a)));}
+ AW2 AAbsSW2(AW2 a){return AW2(abs(ASW2(a)));}
+ AW3 AAbsSW3(AW3 a){return AW3(abs(ASW3(a)));}
+ AW4 AAbsSW4(AW4 a){return AW4(abs(ASW4(a)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AClampH1(AH1 x,AH1 n,AH1 m){return clamp(x,n,m);}
+ AH2 AClampH2(AH2 x,AH2 n,AH2 m){return clamp(x,n,m);}
+ AH3 AClampH3(AH3 x,AH3 n,AH3 m){return clamp(x,n,m);}
+ AH4 AClampH4(AH4 x,AH4 n,AH4 m){return clamp(x,n,m);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AFractH1(AH1 x){return fract(x);}
+ AH2 AFractH2(AH2 x){return fract(x);}
+ AH3 AFractH3(AH3 x){return fract(x);}
+ AH4 AFractH4(AH4 x){return fract(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ALerpH1(AH1 x,AH1 y,AH1 a){return mix(x,y,a);}
+ AH2 ALerpH2(AH2 x,AH2 y,AH2 a){return mix(x,y,a);}
+ AH3 ALerpH3(AH3 x,AH3 y,AH3 a){return mix(x,y,a);}
+ AH4 ALerpH4(AH4 x,AH4 y,AH4 a){return mix(x,y,a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ // No packed version of max3.
+ AH1 AMax3H1(AH1 x,AH1 y,AH1 z){return max(x,max(y,z));}
+ AH2 AMax3H2(AH2 x,AH2 y,AH2 z){return max(x,max(y,z));}
+ AH3 AMax3H3(AH3 x,AH3 y,AH3 z){return max(x,max(y,z));}
+ AH4 AMax3H4(AH4 x,AH4 y,AH4 z){return max(x,max(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AMaxSW1(AW1 a,AW1 b){return AW1(max(ASU1(a),ASU1(b)));}
+ AW2 AMaxSW2(AW2 a,AW2 b){return AW2(max(ASU2(a),ASU2(b)));}
+ AW3 AMaxSW3(AW3 a,AW3 b){return AW3(max(ASU3(a),ASU3(b)));}
+ AW4 AMaxSW4(AW4 a,AW4 b){return AW4(max(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // No packed version of min3.
+ AH1 AMin3H1(AH1 x,AH1 y,AH1 z){return min(x,min(y,z));}
+ AH2 AMin3H2(AH2 x,AH2 y,AH2 z){return min(x,min(y,z));}
+ AH3 AMin3H3(AH3 x,AH3 y,AH3 z){return min(x,min(y,z));}
+ AH4 AMin3H4(AH4 x,AH4 y,AH4 z){return min(x,min(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AMinSW1(AW1 a,AW1 b){return AW1(min(ASU1(a),ASU1(b)));}
+ AW2 AMinSW2(AW2 a,AW2 b){return AW2(min(ASU2(a),ASU2(b)));}
+ AW3 AMinSW3(AW3 a,AW3 b){return AW3(min(ASU3(a),ASU3(b)));}
+ AW4 AMinSW4(AW4 a,AW4 b){return AW4(min(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ARcpH1(AH1 x){return AH1_(1.0)/x;}
+ AH2 ARcpH2(AH2 x){return AH2_(1.0)/x;}
+ AH3 ARcpH3(AH3 x){return AH3_(1.0)/x;}
+ AH4 ARcpH4(AH4 x){return AH4_(1.0)/x;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ARsqH1(AH1 x){return AH1_(1.0)/sqrt(x);}
+ AH2 ARsqH2(AH2 x){return AH2_(1.0)/sqrt(x);}
+ AH3 ARsqH3(AH3 x){return AH3_(1.0)/sqrt(x);}
+ AH4 ARsqH4(AH4 x){return AH4_(1.0)/sqrt(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ASatH1(AH1 x){return clamp(x,AH1_(0.0),AH1_(1.0));}
+ AH2 ASatH2(AH2 x){return clamp(x,AH2_(0.0),AH2_(1.0));}
+ AH3 ASatH3(AH3 x){return clamp(x,AH3_(0.0),AH3_(1.0));}
+ AH4 ASatH4(AH4 x){return clamp(x,AH4_(0.0),AH4_(1.0));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AShrSW1(AW1 a,AW1 b){return AW1(ASW1(a)>>ASW1(b));}
+ AW2 AShrSW2(AW2 a,AW2 b){return AW2(ASW2(a)>>ASW2(b));}
+ AW3 AShrSW3(AW3 a,AW3 b){return AW3(ASW3(a)>>ASW3(b));}
+ AW4 AShrSW4(AW4 a,AW4 b){return AW4(ASW4(a)>>ASW4(b));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// GLSL DOUBLE
+//==============================================================================================================================
+ #ifdef A_DUBL
+ #define AD1 double
+ #define AD2 dvec2
+ #define AD3 dvec3
+ #define AD4 dvec4
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 AD1_x(AD1 a){return AD1(a);}
+ AD2 AD2_x(AD1 a){return AD2(a,a);}
+ AD3 AD3_x(AD1 a){return AD3(a,a,a);}
+ AD4 AD4_x(AD1 a){return AD4(a,a,a,a);}
+ #define AD1_(a) AD1_x(AD1(a))
+ #define AD2_(a) AD2_x(AD1(a))
+ #define AD3_(a) AD3_x(AD1(a))
+ #define AD4_(a) AD4_x(AD1(a))
+//==============================================================================================================================
+ AD1 AFractD1(AD1 x){return fract(x);}
+ AD2 AFractD2(AD2 x){return fract(x);}
+ AD3 AFractD3(AD3 x){return fract(x);}
+ AD4 AFractD4(AD4 x){return fract(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 ALerpD1(AD1 x,AD1 y,AD1 a){return mix(x,y,a);}
+ AD2 ALerpD2(AD2 x,AD2 y,AD2 a){return mix(x,y,a);}
+ AD3 ALerpD3(AD3 x,AD3 y,AD3 a){return mix(x,y,a);}
+ AD4 ALerpD4(AD4 x,AD4 y,AD4 a){return mix(x,y,a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 ARcpD1(AD1 x){return AD1_(1.0)/x;}
+ AD2 ARcpD2(AD2 x){return AD2_(1.0)/x;}
+ AD3 ARcpD3(AD3 x){return AD3_(1.0)/x;}
+ AD4 ARcpD4(AD4 x){return AD4_(1.0)/x;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 ARsqD1(AD1 x){return AD1_(1.0)/sqrt(x);}
+ AD2 ARsqD2(AD2 x){return AD2_(1.0)/sqrt(x);}
+ AD3 ARsqD3(AD3 x){return AD3_(1.0)/sqrt(x);}
+ AD4 ARsqD4(AD4 x){return AD4_(1.0)/sqrt(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 ASatD1(AD1 x){return clamp(x,AD1_(0.0),AD1_(1.0));}
+ AD2 ASatD2(AD2 x){return clamp(x,AD2_(0.0),AD2_(1.0));}
+ AD3 ASatD3(AD3 x){return clamp(x,AD3_(0.0),AD3_(1.0));}
+ AD4 ASatD4(AD4 x){return clamp(x,AD4_(0.0),AD4_(1.0));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// GLSL LONG
+//==============================================================================================================================
+ #ifdef A_LONG
+ #define AL1 uint64_t
+ #define AL2 u64vec2
+ #define AL3 u64vec3
+ #define AL4 u64vec4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASL1 int64_t
+ #define ASL2 i64vec2
+ #define ASL3 i64vec3
+ #define ASL4 i64vec4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AL1_AU2(x) packUint2x32(AU2(x))
+ #define AU2_AL1(x) unpackUint2x32(AL1(x))
+//------------------------------------------------------------------------------------------------------------------------------
+ AL1 AL1_x(AL1 a){return AL1(a);}
+ AL2 AL2_x(AL1 a){return AL2(a,a);}
+ AL3 AL3_x(AL1 a){return AL3(a,a,a);}
+ AL4 AL4_x(AL1 a){return AL4(a,a,a,a);}
+ #define AL1_(a) AL1_x(AL1(a))
+ #define AL2_(a) AL2_x(AL1(a))
+ #define AL3_(a) AL3_x(AL1(a))
+ #define AL4_(a) AL4_x(AL1(a))
+//==============================================================================================================================
+ AL1 AAbsSL1(AL1 a){return AL1(abs(ASL1(a)));}
+ AL2 AAbsSL2(AL2 a){return AL2(abs(ASL2(a)));}
+ AL3 AAbsSL3(AL3 a){return AL3(abs(ASL3(a)));}
+ AL4 AAbsSL4(AL4 a){return AL4(abs(ASL4(a)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AL1 AMaxSL1(AL1 a,AL1 b){return AL1(max(ASU1(a),ASU1(b)));}
+ AL2 AMaxSL2(AL2 a,AL2 b){return AL2(max(ASU2(a),ASU2(b)));}
+ AL3 AMaxSL3(AL3 a,AL3 b){return AL3(max(ASU3(a),ASU3(b)));}
+ AL4 AMaxSL4(AL4 a,AL4 b){return AL4(max(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AL1 AMinSL1(AL1 a,AL1 b){return AL1(min(ASU1(a),ASU1(b)));}
+ AL2 AMinSL2(AL2 a,AL2 b){return AL2(min(ASU2(a),ASU2(b)));}
+ AL3 AMinSL3(AL3 a,AL3 b){return AL3(min(ASU3(a),ASU3(b)));}
+ AL4 AMinSL4(AL4 a,AL4 b){return AL4(min(ASU4(a),ASU4(b)));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// WAVE OPERATIONS
+//==============================================================================================================================
+ #ifdef A_WAVE
+ // Where 'x' must be a compile time literal.
+ AF1 AWaveXorF1(AF1 v,AU1 x){return subgroupShuffleXor(v,x);}
+ AF2 AWaveXorF2(AF2 v,AU1 x){return subgroupShuffleXor(v,x);}
+ AF3 AWaveXorF3(AF3 v,AU1 x){return subgroupShuffleXor(v,x);}
+ AF4 AWaveXorF4(AF4 v,AU1 x){return subgroupShuffleXor(v,x);}
+ AU1 AWaveXorU1(AU1 v,AU1 x){return subgroupShuffleXor(v,x);}
+ AU2 AWaveXorU2(AU2 v,AU1 x){return subgroupShuffleXor(v,x);}
+ AU3 AWaveXorU3(AU3 v,AU1 x){return subgroupShuffleXor(v,x);}
+ AU4 AWaveXorU4(AU4 v,AU1 x){return subgroupShuffleXor(v,x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_HALF
+ AH2 AWaveXorH2(AH2 v,AU1 x){return AH2_AU1(subgroupShuffleXor(AU1_AH2(v),x));}
+ AH4 AWaveXorH4(AH4 v,AU1 x){return AH4_AU2(subgroupShuffleXor(AU2_AH4(v),x));}
+ AW2 AWaveXorW2(AW2 v,AU1 x){return AW2_AU1(subgroupShuffleXor(AU1_AW2(v),x));}
+ AW4 AWaveXorW4(AW4 v,AU1 x){return AW4_AU2(subgroupShuffleXor(AU2_AW4(v),x));}
+ #endif
+ #endif
+//==============================================================================================================================
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+//
+// HLSL
+//
+//
+//==============================================================================================================================
+#if defined(A_HLSL) && defined(A_GPU)
+ #ifdef A_HLSL_6_2
+ #define AP1 bool
+ #define AP2 bool2
+ #define AP3 bool3
+ #define AP4 bool4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AF1 float32_t
+ #define AF2 float32_t2
+ #define AF3 float32_t3
+ #define AF4 float32_t4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AU1 uint32_t
+ #define AU2 uint32_t2
+ #define AU3 uint32_t3
+ #define AU4 uint32_t4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASU1 int32_t
+ #define ASU2 int32_t2
+ #define ASU3 int32_t3
+ #define ASU4 int32_t4
+ #else
+ #define AP1 bool
+ #define AP2 bool2
+ #define AP3 bool3
+ #define AP4 bool4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AF1 float
+ #define AF2 float2
+ #define AF3 float3
+ #define AF4 float4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AU1 uint
+ #define AU2 uint2
+ #define AU3 uint3
+ #define AU4 uint4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASU1 int
+ #define ASU2 int2
+ #define ASU3 int3
+ #define ASU4 int4
+ #endif
+//==============================================================================================================================
+ #define AF1_AU1(x) asfloat(AU1(x))
+ #define AF2_AU2(x) asfloat(AU2(x))
+ #define AF3_AU3(x) asfloat(AU3(x))
+ #define AF4_AU4(x) asfloat(AU4(x))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AU1_AF1(x) asuint(AF1(x))
+ #define AU2_AF2(x) asuint(AF2(x))
+ #define AU3_AF3(x) asuint(AF3(x))
+ #define AU4_AF4(x) asuint(AF4(x))
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AU1_AH1_AF1_x(AF1 a){return f32tof16(a);}
+ #define AU1_AH1_AF1(a) AU1_AH1_AF1_x(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AU1_AH2_AF2_x(AF2 a){return f32tof16(a.x)|(f32tof16(a.y)<<16);}
+ #define AU1_AH2_AF2(a) AU1_AH2_AF2_x(AF2(a))
+ #define AU1_AB4Unorm_AF4(x) D3DCOLORtoUBYTE4(AF4(x))
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 AF2_AH2_AU1_x(AU1 x){return AF2(f16tof32(x&0xFFFF),f16tof32(x>>16));}
+ #define AF2_AH2_AU1(x) AF2_AH2_AU1_x(AU1(x))
+//==============================================================================================================================
+ AF1 AF1_x(AF1 a){return AF1(a);}
+ AF2 AF2_x(AF1 a){return AF2(a,a);}
+ AF3 AF3_x(AF1 a){return AF3(a,a,a);}
+ AF4 AF4_x(AF1 a){return AF4(a,a,a,a);}
+ #define AF1_(a) AF1_x(AF1(a))
+ #define AF2_(a) AF2_x(AF1(a))
+ #define AF3_(a) AF3_x(AF1(a))
+ #define AF4_(a) AF4_x(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AU1_x(AU1 a){return AU1(a);}
+ AU2 AU2_x(AU1 a){return AU2(a,a);}
+ AU3 AU3_x(AU1 a){return AU3(a,a,a);}
+ AU4 AU4_x(AU1 a){return AU4(a,a,a,a);}
+ #define AU1_(a) AU1_x(AU1(a))
+ #define AU2_(a) AU2_x(AU1(a))
+ #define AU3_(a) AU3_x(AU1(a))
+ #define AU4_(a) AU4_x(AU1(a))
+//==============================================================================================================================
+ AU1 AAbsSU1(AU1 a){return AU1(abs(ASU1(a)));}
+ AU2 AAbsSU2(AU2 a){return AU2(abs(ASU2(a)));}
+ AU3 AAbsSU3(AU3 a){return AU3(abs(ASU3(a)));}
+ AU4 AAbsSU4(AU4 a){return AU4(abs(ASU4(a)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 ABfe(AU1 src,AU1 off,AU1 bits){AU1 mask=(1u<<bits)-1;return (src>>off)&mask;}
+ AU1 ABfi(AU1 src,AU1 ins,AU1 mask){return (ins&mask)|(src&(~mask));}
+ AU1 ABfiM(AU1 src,AU1 ins,AU1 bits){AU1 mask=(1u<<bits)-1;return (ins&mask)|(src&(~mask));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AClampF1(AF1 x,AF1 n,AF1 m){return max(n,min(x,m));}
+ AF2 AClampF2(AF2 x,AF2 n,AF2 m){return max(n,min(x,m));}
+ AF3 AClampF3(AF3 x,AF3 n,AF3 m){return max(n,min(x,m));}
+ AF4 AClampF4(AF4 x,AF4 n,AF4 m){return max(n,min(x,m));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AFractF1(AF1 x){return x-floor(x);}
+ AF2 AFractF2(AF2 x){return x-floor(x);}
+ AF3 AFractF3(AF3 x){return x-floor(x);}
+ AF4 AFractF4(AF4 x){return x-floor(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ALerpF1(AF1 x,AF1 y,AF1 a){return lerp(x,y,a);}
+ AF2 ALerpF2(AF2 x,AF2 y,AF2 a){return lerp(x,y,a);}
+ AF3 ALerpF3(AF3 x,AF3 y,AF3 a){return lerp(x,y,a);}
+ AF4 ALerpF4(AF4 x,AF4 y,AF4 a){return lerp(x,y,a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AMax3F1(AF1 x,AF1 y,AF1 z){return max(x,max(y,z));}
+ AF2 AMax3F2(AF2 x,AF2 y,AF2 z){return max(x,max(y,z));}
+ AF3 AMax3F3(AF3 x,AF3 y,AF3 z){return max(x,max(y,z));}
+ AF4 AMax3F4(AF4 x,AF4 y,AF4 z){return max(x,max(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMax3SU1(AU1 x,AU1 y,AU1 z){return AU1(max(ASU1(x),max(ASU1(y),ASU1(z))));}
+ AU2 AMax3SU2(AU2 x,AU2 y,AU2 z){return AU2(max(ASU2(x),max(ASU2(y),ASU2(z))));}
+ AU3 AMax3SU3(AU3 x,AU3 y,AU3 z){return AU3(max(ASU3(x),max(ASU3(y),ASU3(z))));}
+ AU4 AMax3SU4(AU4 x,AU4 y,AU4 z){return AU4(max(ASU4(x),max(ASU4(y),ASU4(z))));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMax3U1(AU1 x,AU1 y,AU1 z){return max(x,max(y,z));}
+ AU2 AMax3U2(AU2 x,AU2 y,AU2 z){return max(x,max(y,z));}
+ AU3 AMax3U3(AU3 x,AU3 y,AU3 z){return max(x,max(y,z));}
+ AU4 AMax3U4(AU4 x,AU4 y,AU4 z){return max(x,max(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMaxSU1(AU1 a,AU1 b){return AU1(max(ASU1(a),ASU1(b)));}
+ AU2 AMaxSU2(AU2 a,AU2 b){return AU2(max(ASU2(a),ASU2(b)));}
+ AU3 AMaxSU3(AU3 a,AU3 b){return AU3(max(ASU3(a),ASU3(b)));}
+ AU4 AMaxSU4(AU4 a,AU4 b){return AU4(max(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AMed3F1(AF1 x,AF1 y,AF1 z){return max(min(x,y),min(max(x,y),z));}
+ AF2 AMed3F2(AF2 x,AF2 y,AF2 z){return max(min(x,y),min(max(x,y),z));}
+ AF3 AMed3F3(AF3 x,AF3 y,AF3 z){return max(min(x,y),min(max(x,y),z));}
+ AF4 AMed3F4(AF4 x,AF4 y,AF4 z){return max(min(x,y),min(max(x,y),z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AMin3F1(AF1 x,AF1 y,AF1 z){return min(x,min(y,z));}
+ AF2 AMin3F2(AF2 x,AF2 y,AF2 z){return min(x,min(y,z));}
+ AF3 AMin3F3(AF3 x,AF3 y,AF3 z){return min(x,min(y,z));}
+ AF4 AMin3F4(AF4 x,AF4 y,AF4 z){return min(x,min(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMin3SU1(AU1 x,AU1 y,AU1 z){return AU1(min(ASU1(x),min(ASU1(y),ASU1(z))));}
+ AU2 AMin3SU2(AU2 x,AU2 y,AU2 z){return AU2(min(ASU2(x),min(ASU2(y),ASU2(z))));}
+ AU3 AMin3SU3(AU3 x,AU3 y,AU3 z){return AU3(min(ASU3(x),min(ASU3(y),ASU3(z))));}
+ AU4 AMin3SU4(AU4 x,AU4 y,AU4 z){return AU4(min(ASU4(x),min(ASU4(y),ASU4(z))));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMin3U1(AU1 x,AU1 y,AU1 z){return min(x,min(y,z));}
+ AU2 AMin3U2(AU2 x,AU2 y,AU2 z){return min(x,min(y,z));}
+ AU3 AMin3U3(AU3 x,AU3 y,AU3 z){return min(x,min(y,z));}
+ AU4 AMin3U4(AU4 x,AU4 y,AU4 z){return min(x,min(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AMinSU1(AU1 a,AU1 b){return AU1(min(ASU1(a),ASU1(b)));}
+ AU2 AMinSU2(AU2 a,AU2 b){return AU2(min(ASU2(a),ASU2(b)));}
+ AU3 AMinSU3(AU3 a,AU3 b){return AU3(min(ASU3(a),ASU3(b)));}
+ AU4 AMinSU4(AU4 a,AU4 b){return AU4(min(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ANCosF1(AF1 x){return cos(x*AF1_(A_2PI));}
+ AF2 ANCosF2(AF2 x){return cos(x*AF2_(A_2PI));}
+ AF3 ANCosF3(AF3 x){return cos(x*AF3_(A_2PI));}
+ AF4 ANCosF4(AF4 x){return cos(x*AF4_(A_2PI));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ANSinF1(AF1 x){return sin(x*AF1_(A_2PI));}
+ AF2 ANSinF2(AF2 x){return sin(x*AF2_(A_2PI));}
+ AF3 ANSinF3(AF3 x){return sin(x*AF3_(A_2PI));}
+ AF4 ANSinF4(AF4 x){return sin(x*AF4_(A_2PI));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ARcpF1(AF1 x){return rcp(x);}
+ AF2 ARcpF2(AF2 x){return rcp(x);}
+ AF3 ARcpF3(AF3 x){return rcp(x);}
+ AF4 ARcpF4(AF4 x){return rcp(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ARsqF1(AF1 x){return rsqrt(x);}
+ AF2 ARsqF2(AF2 x){return rsqrt(x);}
+ AF3 ARsqF3(AF3 x){return rsqrt(x);}
+ AF4 ARsqF4(AF4 x){return rsqrt(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ASatF1(AF1 x){return saturate(x);}
+ AF2 ASatF2(AF2 x){return saturate(x);}
+ AF3 ASatF3(AF3 x){return saturate(x);}
+ AF4 ASatF4(AF4 x){return saturate(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AShrSU1(AU1 a,AU1 b){return AU1(ASU1(a)>>ASU1(b));}
+ AU2 AShrSU2(AU2 a,AU2 b){return AU2(ASU2(a)>>ASU2(b));}
+ AU3 AShrSU3(AU3 a,AU3 b){return AU3(ASU3(a)>>ASU3(b));}
+ AU4 AShrSU4(AU4 a,AU4 b){return AU4(ASU4(a)>>ASU4(b));}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// HLSL BYTE
+//==============================================================================================================================
+ #ifdef A_BYTE
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// HLSL HALF
+//==============================================================================================================================
+ #ifdef A_HALF
+ #ifdef A_HLSL_6_2
+ #define AH1 float16_t
+ #define AH2 float16_t2
+ #define AH3 float16_t3
+ #define AH4 float16_t4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AW1 uint16_t
+ #define AW2 uint16_t2
+ #define AW3 uint16_t3
+ #define AW4 uint16_t4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASW1 int16_t
+ #define ASW2 int16_t2
+ #define ASW3 int16_t3
+ #define ASW4 int16_t4
+ #else
+ #define AH1 min16float
+ #define AH2 min16float2
+ #define AH3 min16float3
+ #define AH4 min16float4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AW1 min16uint
+ #define AW2 min16uint2
+ #define AW3 min16uint3
+ #define AW4 min16uint4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASW1 min16int
+ #define ASW2 min16int2
+ #define ASW3 min16int3
+ #define ASW4 min16int4
+ #endif
+//==============================================================================================================================
+ // Need to use manual unpack to get optimal execution (don't use packed types in buffers directly).
+ // Unpack requires this pattern: https://gpuopen.com/first-steps-implementing-fp16/
+ AH2 AH2_AU1_x(AU1 x){AF2 t=f16tof32(AU2(x&0xFFFF,x>>16));return AH2(t);}
+ AH4 AH4_AU2_x(AU2 x){return AH4(AH2_AU1_x(x.x),AH2_AU1_x(x.y));}
+ AW2 AW2_AU1_x(AU1 x){AU2 t=AU2(x&0xFFFF,x>>16);return AW2(t);}
+ AW4 AW4_AU2_x(AU2 x){return AW4(AW2_AU1_x(x.x),AW2_AU1_x(x.y));}
+ #define AH2_AU1(x) AH2_AU1_x(AU1(x))
+ #define AH4_AU2(x) AH4_AU2_x(AU2(x))
+ #define AW2_AU1(x) AW2_AU1_x(AU1(x))
+ #define AW4_AU2(x) AW4_AU2_x(AU2(x))
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AU1_AH2_x(AH2 x){return f32tof16(x.x)+(f32tof16(x.y)<<16);}
+ AU2 AU2_AH4_x(AH4 x){return AU2(AU1_AH2_x(x.xy),AU1_AH2_x(x.zw));}
+ AU1 AU1_AW2_x(AW2 x){return AU1(x.x)+(AU1(x.y)<<16);}
+ AU2 AU2_AW4_x(AW4 x){return AU2(AU1_AW2_x(x.xy),AU1_AW2_x(x.zw));}
+ #define AU1_AH2(x) AU1_AH2_x(AH2(x))
+ #define AU2_AH4(x) AU2_AH4_x(AH4(x))
+ #define AU1_AW2(x) AU1_AW2_x(AW2(x))
+ #define AU2_AW4(x) AU2_AW4_x(AW4(x))
+//==============================================================================================================================
+ #if defined(A_HLSL_6_2) && !defined(A_NO_16_BIT_CAST)
+ #define AW1_AH1(x) asuint16(x)
+ #define AW2_AH2(x) asuint16(x)
+ #define AW3_AH3(x) asuint16(x)
+ #define AW4_AH4(x) asuint16(x)
+ #else
+ #define AW1_AH1(a) AW1(f32tof16(AF1(a)))
+ #define AW2_AH2(a) AW2(AW1_AH1((a).x),AW1_AH1((a).y))
+ #define AW3_AH3(a) AW3(AW1_AH1((a).x),AW1_AH1((a).y),AW1_AH1((a).z))
+ #define AW4_AH4(a) AW4(AW1_AH1((a).x),AW1_AH1((a).y),AW1_AH1((a).z),AW1_AH1((a).w))
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ #if defined(A_HLSL_6_2) && !defined(A_NO_16_BIT_CAST)
+ #define AH1_AW1(x) asfloat16(x)
+ #define AH2_AW2(x) asfloat16(x)
+ #define AH3_AW3(x) asfloat16(x)
+ #define AH4_AW4(x) asfloat16(x)
+ #else
+ #define AH1_AW1(a) AH1(f16tof32(AU1(a)))
+ #define AH2_AW2(a) AH2(AH1_AW1((a).x),AH1_AW1((a).y))
+ #define AH3_AW3(a) AH3(AH1_AW1((a).x),AH1_AW1((a).y),AH1_AW1((a).z))
+ #define AH4_AW4(a) AH4(AH1_AW1((a).x),AH1_AW1((a).y),AH1_AW1((a).z),AH1_AW1((a).w))
+ #endif
+//==============================================================================================================================
+ AH1 AH1_x(AH1 a){return AH1(a);}
+ AH2 AH2_x(AH1 a){return AH2(a,a);}
+ AH3 AH3_x(AH1 a){return AH3(a,a,a);}
+ AH4 AH4_x(AH1 a){return AH4(a,a,a,a);}
+ #define AH1_(a) AH1_x(AH1(a))
+ #define AH2_(a) AH2_x(AH1(a))
+ #define AH3_(a) AH3_x(AH1(a))
+ #define AH4_(a) AH4_x(AH1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AW1_x(AW1 a){return AW1(a);}
+ AW2 AW2_x(AW1 a){return AW2(a,a);}
+ AW3 AW3_x(AW1 a){return AW3(a,a,a);}
+ AW4 AW4_x(AW1 a){return AW4(a,a,a,a);}
+ #define AW1_(a) AW1_x(AW1(a))
+ #define AW2_(a) AW2_x(AW1(a))
+ #define AW3_(a) AW3_x(AW1(a))
+ #define AW4_(a) AW4_x(AW1(a))
+//==============================================================================================================================
+ AW1 AAbsSW1(AW1 a){return AW1(abs(ASW1(a)));}
+ AW2 AAbsSW2(AW2 a){return AW2(abs(ASW2(a)));}
+ AW3 AAbsSW3(AW3 a){return AW3(abs(ASW3(a)));}
+ AW4 AAbsSW4(AW4 a){return AW4(abs(ASW4(a)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AClampH1(AH1 x,AH1 n,AH1 m){return max(n,min(x,m));}
+ AH2 AClampH2(AH2 x,AH2 n,AH2 m){return max(n,min(x,m));}
+ AH3 AClampH3(AH3 x,AH3 n,AH3 m){return max(n,min(x,m));}
+ AH4 AClampH4(AH4 x,AH4 n,AH4 m){return max(n,min(x,m));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // V_FRACT_F16 (note DX frac() is different).
+ AH1 AFractH1(AH1 x){return x-floor(x);}
+ AH2 AFractH2(AH2 x){return x-floor(x);}
+ AH3 AFractH3(AH3 x){return x-floor(x);}
+ AH4 AFractH4(AH4 x){return x-floor(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ALerpH1(AH1 x,AH1 y,AH1 a){return lerp(x,y,a);}
+ AH2 ALerpH2(AH2 x,AH2 y,AH2 a){return lerp(x,y,a);}
+ AH3 ALerpH3(AH3 x,AH3 y,AH3 a){return lerp(x,y,a);}
+ AH4 ALerpH4(AH4 x,AH4 y,AH4 a){return lerp(x,y,a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AMax3H1(AH1 x,AH1 y,AH1 z){return max(x,max(y,z));}
+ AH2 AMax3H2(AH2 x,AH2 y,AH2 z){return max(x,max(y,z));}
+ AH3 AMax3H3(AH3 x,AH3 y,AH3 z){return max(x,max(y,z));}
+ AH4 AMax3H4(AH4 x,AH4 y,AH4 z){return max(x,max(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AMaxSW1(AW1 a,AW1 b){return AW1(max(ASU1(a),ASU1(b)));}
+ AW2 AMaxSW2(AW2 a,AW2 b){return AW2(max(ASU2(a),ASU2(b)));}
+ AW3 AMaxSW3(AW3 a,AW3 b){return AW3(max(ASU3(a),ASU3(b)));}
+ AW4 AMaxSW4(AW4 a,AW4 b){return AW4(max(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AMin3H1(AH1 x,AH1 y,AH1 z){return min(x,min(y,z));}
+ AH2 AMin3H2(AH2 x,AH2 y,AH2 z){return min(x,min(y,z));}
+ AH3 AMin3H3(AH3 x,AH3 y,AH3 z){return min(x,min(y,z));}
+ AH4 AMin3H4(AH4 x,AH4 y,AH4 z){return min(x,min(y,z));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AMinSW1(AW1 a,AW1 b){return AW1(min(ASU1(a),ASU1(b)));}
+ AW2 AMinSW2(AW2 a,AW2 b){return AW2(min(ASU2(a),ASU2(b)));}
+ AW3 AMinSW3(AW3 a,AW3 b){return AW3(min(ASU3(a),ASU3(b)));}
+ AW4 AMinSW4(AW4 a,AW4 b){return AW4(min(ASU4(a),ASU4(b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ARcpH1(AH1 x){return rcp(x);}
+ AH2 ARcpH2(AH2 x){return rcp(x);}
+ AH3 ARcpH3(AH3 x){return rcp(x);}
+ AH4 ARcpH4(AH4 x){return rcp(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ARsqH1(AH1 x){return rsqrt(x);}
+ AH2 ARsqH2(AH2 x){return rsqrt(x);}
+ AH3 ARsqH3(AH3 x){return rsqrt(x);}
+ AH4 ARsqH4(AH4 x){return rsqrt(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ASatH1(AH1 x){return saturate(x);}
+ AH2 ASatH2(AH2 x){return saturate(x);}
+ AH3 ASatH3(AH3 x){return saturate(x);}
+ AH4 ASatH4(AH4 x){return saturate(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AShrSW1(AW1 a,AW1 b){return AW1(ASW1(a)>>ASW1(b));}
+ AW2 AShrSW2(AW2 a,AW2 b){return AW2(ASW2(a)>>ASW2(b));}
+ AW3 AShrSW3(AW3 a,AW3 b){return AW3(ASW3(a)>>ASW3(b));}
+ AW4 AShrSW4(AW4 a,AW4 b){return AW4(ASW4(a)>>ASW4(b));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// HLSL DOUBLE
+//==============================================================================================================================
+ #ifdef A_DUBL
+ #ifdef A_HLSL_6_2
+ #define AD1 float64_t
+ #define AD2 float64_t2
+ #define AD3 float64_t3
+ #define AD4 float64_t4
+ #else
+ #define AD1 double
+ #define AD2 double2
+ #define AD3 double3
+ #define AD4 double4
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 AD1_x(AD1 a){return AD1(a);}
+ AD2 AD2_x(AD1 a){return AD2(a,a);}
+ AD3 AD3_x(AD1 a){return AD3(a,a,a);}
+ AD4 AD4_x(AD1 a){return AD4(a,a,a,a);}
+ #define AD1_(a) AD1_x(AD1(a))
+ #define AD2_(a) AD2_x(AD1(a))
+ #define AD3_(a) AD3_x(AD1(a))
+ #define AD4_(a) AD4_x(AD1(a))
+//==============================================================================================================================
+ AD1 AFractD1(AD1 a){return a-floor(a);}
+ AD2 AFractD2(AD2 a){return a-floor(a);}
+ AD3 AFractD3(AD3 a){return a-floor(a);}
+ AD4 AFractD4(AD4 a){return a-floor(a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 ALerpD1(AD1 x,AD1 y,AD1 a){return lerp(x,y,a);}
+ AD2 ALerpD2(AD2 x,AD2 y,AD2 a){return lerp(x,y,a);}
+ AD3 ALerpD3(AD3 x,AD3 y,AD3 a){return lerp(x,y,a);}
+ AD4 ALerpD4(AD4 x,AD4 y,AD4 a){return lerp(x,y,a);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 ARcpD1(AD1 x){return rcp(x);}
+ AD2 ARcpD2(AD2 x){return rcp(x);}
+ AD3 ARcpD3(AD3 x){return rcp(x);}
+ AD4 ARcpD4(AD4 x){return rcp(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 ARsqD1(AD1 x){return rsqrt(x);}
+ AD2 ARsqD2(AD2 x){return rsqrt(x);}
+ AD3 ARsqD3(AD3 x){return rsqrt(x);}
+ AD4 ARsqD4(AD4 x){return rsqrt(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD1 ASatD1(AD1 x){return saturate(x);}
+ AD2 ASatD2(AD2 x){return saturate(x);}
+ AD3 ASatD3(AD3 x){return saturate(x);}
+ AD4 ASatD4(AD4 x){return saturate(x);}
+ #endif
+//==============================================================================================================================
+// HLSL WAVE
+//==============================================================================================================================
+ #ifdef A_WAVE
+ // Where 'x' must be a compile time literal.
+ AF1 AWaveXorF1(AF1 v,AU1 x){return WaveReadLaneAt(v,WaveGetLaneIndex()^x);}
+ AF2 AWaveXorF2(AF2 v,AU1 x){return WaveReadLaneAt(v,WaveGetLaneIndex()^x);}
+ AF3 AWaveXorF3(AF3 v,AU1 x){return WaveReadLaneAt(v,WaveGetLaneIndex()^x);}
+ AF4 AWaveXorF4(AF4 v,AU1 x){return WaveReadLaneAt(v,WaveGetLaneIndex()^x);}
+ AU1 AWaveXorU1(AU1 v,AU1 x){return WaveReadLaneAt(v,WaveGetLaneIndex()^x);}
+ AU2 AWaveXorU1(AU2 v,AU1 x){return WaveReadLaneAt(v,WaveGetLaneIndex()^x);}
+ AU3 AWaveXorU1(AU3 v,AU1 x){return WaveReadLaneAt(v,WaveGetLaneIndex()^x);}
+ AU4 AWaveXorU1(AU4 v,AU1 x){return WaveReadLaneAt(v,WaveGetLaneIndex()^x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_HALF
+ AH2 AWaveXorH2(AH2 v,AU1 x){return AH2_AU1(WaveReadLaneAt(AU1_AH2(v),WaveGetLaneIndex()^x));}
+ AH4 AWaveXorH4(AH4 v,AU1 x){return AH4_AU2(WaveReadLaneAt(AU2_AH4(v),WaveGetLaneIndex()^x));}
+ AW2 AWaveXorW2(AW2 v,AU1 x){return AW2_AU1(WaveReadLaneAt(AU1_AW2(v),WaveGetLaneIndex()^x));}
+ AW4 AWaveXorW4(AW4 v,AU1 x){return AW4_AU1(WaveReadLaneAt(AU1_AW4(v),WaveGetLaneIndex()^x));}
+ #endif
+ #endif
+//==============================================================================================================================
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+//
+// GPU COMMON
+//
+//
+//==============================================================================================================================
+#ifdef A_GPU
+ // Negative and positive infinity.
+ #define A_INFP_F AF1_AU1(0x7f800000u)
+ #define A_INFN_F AF1_AU1(0xff800000u)
+//------------------------------------------------------------------------------------------------------------------------------
+ // Copy sign from 's' to positive 'd'.
+ AF1 ACpySgnF1(AF1 d,AF1 s){return AF1_AU1(AU1_AF1(d)|(AU1_AF1(s)&AU1_(0x80000000u)));}
+ AF2 ACpySgnF2(AF2 d,AF2 s){return AF2_AU2(AU2_AF2(d)|(AU2_AF2(s)&AU2_(0x80000000u)));}
+ AF3 ACpySgnF3(AF3 d,AF3 s){return AF3_AU3(AU3_AF3(d)|(AU3_AF3(s)&AU3_(0x80000000u)));}
+ AF4 ACpySgnF4(AF4 d,AF4 s){return AF4_AU4(AU4_AF4(d)|(AU4_AF4(s)&AU4_(0x80000000u)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Single operation to return (useful to create a mask to use in lerp for branch free logic),
+ // m=NaN := 0
+ // m>=0 := 0
+ // m<0 := 1
+ // Uses the following useful floating point logic,
+ // saturate(+a*(-INF)==-INF) := 0
+ // saturate( 0*(-INF)== NaN) := 0
+ // saturate(-a*(-INF)==+INF) := 1
+ AF1 ASignedF1(AF1 m){return ASatF1(m*AF1_(A_INFN_F));}
+ AF2 ASignedF2(AF2 m){return ASatF2(m*AF2_(A_INFN_F));}
+ AF3 ASignedF3(AF3 m){return ASatF3(m*AF3_(A_INFN_F));}
+ AF4 ASignedF4(AF4 m){return ASatF4(m*AF4_(A_INFN_F));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AGtZeroF1(AF1 m){return ASatF1(m*AF1_(A_INFP_F));}
+ AF2 AGtZeroF2(AF2 m){return ASatF2(m*AF2_(A_INFP_F));}
+ AF3 AGtZeroF3(AF3 m){return ASatF3(m*AF3_(A_INFP_F));}
+ AF4 AGtZeroF4(AF4 m){return ASatF4(m*AF4_(A_INFP_F));}
+//==============================================================================================================================
+ #ifdef A_HALF
+ #ifdef A_HLSL_6_2
+ #define A_INFP_H AH1_AW1((uint16_t)0x7c00u)
+ #define A_INFN_H AH1_AW1((uint16_t)0xfc00u)
+ #else
+ #define A_INFP_H AH1_AW1(0x7c00u)
+ #define A_INFN_H AH1_AW1(0xfc00u)
+ #endif
+
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ACpySgnH1(AH1 d,AH1 s){return AH1_AW1(AW1_AH1(d)|(AW1_AH1(s)&AW1_(0x8000u)));}
+ AH2 ACpySgnH2(AH2 d,AH2 s){return AH2_AW2(AW2_AH2(d)|(AW2_AH2(s)&AW2_(0x8000u)));}
+ AH3 ACpySgnH3(AH3 d,AH3 s){return AH3_AW3(AW3_AH3(d)|(AW3_AH3(s)&AW3_(0x8000u)));}
+ AH4 ACpySgnH4(AH4 d,AH4 s){return AH4_AW4(AW4_AH4(d)|(AW4_AH4(s)&AW4_(0x8000u)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ASignedH1(AH1 m){return ASatH1(m*AH1_(A_INFN_H));}
+ AH2 ASignedH2(AH2 m){return ASatH2(m*AH2_(A_INFN_H));}
+ AH3 ASignedH3(AH3 m){return ASatH3(m*AH3_(A_INFN_H));}
+ AH4 ASignedH4(AH4 m){return ASatH4(m*AH4_(A_INFN_H));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AGtZeroH1(AH1 m){return ASatH1(m*AH1_(A_INFP_H));}
+ AH2 AGtZeroH2(AH2 m){return ASatH2(m*AH2_(A_INFP_H));}
+ AH3 AGtZeroH3(AH3 m){return ASatH3(m*AH3_(A_INFP_H));}
+ AH4 AGtZeroH4(AH4 m){return ASatH4(m*AH4_(A_INFP_H));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// [FIS] FLOAT INTEGER SORTABLE
+//------------------------------------------------------------------------------------------------------------------------------
+// Float to integer sortable.
+// - If sign bit=0, flip the sign bit (positives).
+// - If sign bit=1, flip all bits (negatives).
+// Integer sortable to float.
+// - If sign bit=1, flip the sign bit (positives).
+// - If sign bit=0, flip all bits (negatives).
+// Has nice side effects.
+// - Larger integers are more positive values.
+// - Float zero is mapped to center of integers (so clear to integer zero is a nice default for atomic max usage).
+// Burns 3 ops for conversion {shift,or,xor}.
+//==============================================================================================================================
+ AU1 AFisToU1(AU1 x){return x^(( AShrSU1(x,AU1_(31)))|AU1_(0x80000000));}
+ AU1 AFisFromU1(AU1 x){return x^((~AShrSU1(x,AU1_(31)))|AU1_(0x80000000));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Just adjust high 16-bit value (useful when upper part of 32-bit word is a 16-bit float value).
+ AU1 AFisToHiU1(AU1 x){return x^(( AShrSU1(x,AU1_(15)))|AU1_(0x80000000));}
+ AU1 AFisFromHiU1(AU1 x){return x^((~AShrSU1(x,AU1_(15)))|AU1_(0x80000000));}
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_HALF
+ AW1 AFisToW1(AW1 x){return x^(( AShrSW1(x,AW1_(15)))|AW1_(0x8000));}
+ AW1 AFisFromW1(AW1 x){return x^((~AShrSW1(x,AW1_(15)))|AW1_(0x8000));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW2 AFisToW2(AW2 x){return x^(( AShrSW2(x,AW2_(15)))|AW2_(0x8000));}
+ AW2 AFisFromW2(AW2 x){return x^((~AShrSW2(x,AW2_(15)))|AW2_(0x8000));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// [PERM] V_PERM_B32
+//------------------------------------------------------------------------------------------------------------------------------
+// Support for V_PERM_B32 started in the 3rd generation of GCN.
+//------------------------------------------------------------------------------------------------------------------------------
+// yyyyxxxx - The 'i' input.
+// 76543210
+// ========
+// HGFEDCBA - Naming on permutation.
+//------------------------------------------------------------------------------------------------------------------------------
+// TODO
+// ====
+// - Make sure compiler optimizes this.
+//==============================================================================================================================
+ #ifdef A_HALF
+ AU1 APerm0E0A(AU2 i){return((i.x )&0xffu)|((i.y<<16)&0xff0000u);}
+ AU1 APerm0F0B(AU2 i){return((i.x>> 8)&0xffu)|((i.y<< 8)&0xff0000u);}
+ AU1 APerm0G0C(AU2 i){return((i.x>>16)&0xffu)|((i.y )&0xff0000u);}
+ AU1 APerm0H0D(AU2 i){return((i.x>>24)&0xffu)|((i.y>> 8)&0xff0000u);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 APermHGFA(AU2 i){return((i.x )&0x000000ffu)|(i.y&0xffffff00u);}
+ AU1 APermHGFC(AU2 i){return((i.x>>16)&0x000000ffu)|(i.y&0xffffff00u);}
+ AU1 APermHGAE(AU2 i){return((i.x<< 8)&0x0000ff00u)|(i.y&0xffff00ffu);}
+ AU1 APermHGCE(AU2 i){return((i.x>> 8)&0x0000ff00u)|(i.y&0xffff00ffu);}
+ AU1 APermHAFE(AU2 i){return((i.x<<16)&0x00ff0000u)|(i.y&0xff00ffffu);}
+ AU1 APermHCFE(AU2 i){return((i.x )&0x00ff0000u)|(i.y&0xff00ffffu);}
+ AU1 APermAGFE(AU2 i){return((i.x<<24)&0xff000000u)|(i.y&0x00ffffffu);}
+ AU1 APermCGFE(AU2 i){return((i.x<< 8)&0xff000000u)|(i.y&0x00ffffffu);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 APermGCEA(AU2 i){return((i.x)&0x00ff00ffu)|((i.y<<8)&0xff00ff00u);}
+ AU1 APermGECA(AU2 i){return(((i.x)&0xffu)|((i.x>>8)&0xff00u)|((i.y<<16)&0xff0000u)|((i.y<<8)&0xff000000u));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// [BUC] BYTE UNSIGNED CONVERSION
+//------------------------------------------------------------------------------------------------------------------------------
+// Designed to use the optimal conversion, enables the scaling to possibly be factored into other computation.
+// Works on a range of {0 to A_BUC_<32,16>}, for <32-bit, and 16-bit> respectively.
+//------------------------------------------------------------------------------------------------------------------------------
+// OPCODE NOTES
+// ============
+// GCN does not do UNORM or SNORM for bytes in opcodes.
+// - V_CVT_F32_UBYTE{0,1,2,3} - Unsigned byte to float.
+// - V_CVT_PKACC_U8_F32 - Float to unsigned byte (does bit-field insert into 32-bit integer).
+// V_PERM_B32 does byte packing with ability to zero fill bytes as well.
+// - Can pull out byte values from two sources, and zero fill upper 8-bits of packed hi and lo.
+//------------------------------------------------------------------------------------------------------------------------------
+// BYTE : FLOAT - ABuc{0,1,2,3}{To,From}U1() - Designed for V_CVT_F32_UBYTE* and V_CVT_PKACCUM_U8_F32 ops.
+// ==== =====
+// 0 : 0
+// 1 : 1
+// ...
+// 255 : 255
+// : 256 (just outside the encoding range)
+//------------------------------------------------------------------------------------------------------------------------------
+// BYTE : FLOAT - ABuc{0,1,2,3}{To,From}U2() - Designed for 16-bit denormal tricks and V_PERM_B32.
+// ==== =====
+// 0 : 0
+// 1 : 1/512
+// 2 : 1/256
+// ...
+// 64 : 1/8
+// 128 : 1/4
+// 255 : 255/512
+// : 1/2 (just outside the encoding range)
+//------------------------------------------------------------------------------------------------------------------------------
+// OPTIMAL IMPLEMENTATIONS ON AMD ARCHITECTURES
+// ============================================
+// r=ABuc0FromU1(i)
+// V_CVT_F32_UBYTE0 r,i
+// --------------------------------------------
+// r=ABuc0ToU1(d,i)
+// V_CVT_PKACCUM_U8_F32 r,i,0,d
+// --------------------------------------------
+// d=ABuc0FromU2(i)
+// Where 'k0' is an SGPR with 0x0E0A
+// Where 'k1' is an SGPR with {32768.0} packed into the lower 16-bits
+// V_PERM_B32 d,i.x,i.y,k0
+// V_PK_FMA_F16 d,d,k1.x,0
+// --------------------------------------------
+// r=ABuc0ToU2(d,i)
+// Where 'k0' is an SGPR with {1.0/32768.0} packed into the lower 16-bits
+// Where 'k1' is an SGPR with 0x????
+// Where 'k2' is an SGPR with 0x????
+// V_PK_FMA_F16 i,i,k0.x,0
+// V_PERM_B32 r.x,i,i,k1
+// V_PERM_B32 r.y,i,i,k2
+//==============================================================================================================================
+ // Peak range for 32-bit and 16-bit operations.
+ #define A_BUC_32 (255.0)
+ #define A_BUC_16 (255.0/512.0)
+//==============================================================================================================================
+ #if 1
+ // Designed to be one V_CVT_PKACCUM_U8_F32.
+ // The extra min is required to pattern match to V_CVT_PKACCUM_U8_F32.
+ AU1 ABuc0ToU1(AU1 d,AF1 i){return (d&0xffffff00u)|((min(AU1(i),255u) )&(0x000000ffu));}
+ AU1 ABuc1ToU1(AU1 d,AF1 i){return (d&0xffff00ffu)|((min(AU1(i),255u)<< 8)&(0x0000ff00u));}
+ AU1 ABuc2ToU1(AU1 d,AF1 i){return (d&0xff00ffffu)|((min(AU1(i),255u)<<16)&(0x00ff0000u));}
+ AU1 ABuc3ToU1(AU1 d,AF1 i){return (d&0x00ffffffu)|((min(AU1(i),255u)<<24)&(0xff000000u));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Designed to be one V_CVT_F32_UBYTE*.
+ AF1 ABuc0FromU1(AU1 i){return AF1((i )&255u);}
+ AF1 ABuc1FromU1(AU1 i){return AF1((i>> 8)&255u);}
+ AF1 ABuc2FromU1(AU1 i){return AF1((i>>16)&255u);}
+ AF1 ABuc3FromU1(AU1 i){return AF1((i>>24)&255u);}
+ #endif
+//==============================================================================================================================
+ #ifdef A_HALF
+ // Takes {x0,x1} and {y0,y1} and builds {{x0,y0},{x1,y1}}.
+ AW2 ABuc01ToW2(AH2 x,AH2 y){x*=AH2_(1.0/32768.0);y*=AH2_(1.0/32768.0);
+ return AW2_AU1(APermGCEA(AU2(AU1_AW2(AW2_AH2(x)),AU1_AW2(AW2_AH2(y)))));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Designed for 3 ops to do SOA to AOS and conversion.
+ AU2 ABuc0ToU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)));
+ return AU2(APermHGFA(AU2(d.x,b)),APermHGFC(AU2(d.y,b)));}
+ AU2 ABuc1ToU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)));
+ return AU2(APermHGAE(AU2(d.x,b)),APermHGCE(AU2(d.y,b)));}
+ AU2 ABuc2ToU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)));
+ return AU2(APermHAFE(AU2(d.x,b)),APermHCFE(AU2(d.y,b)));}
+ AU2 ABuc3ToU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)));
+ return AU2(APermAGFE(AU2(d.x,b)),APermCGFE(AU2(d.y,b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Designed for 2 ops to do both AOS to SOA, and conversion.
+ AH2 ABuc0FromU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0E0A(i)))*AH2_(32768.0);}
+ AH2 ABuc1FromU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0F0B(i)))*AH2_(32768.0);}
+ AH2 ABuc2FromU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0G0C(i)))*AH2_(32768.0);}
+ AH2 ABuc3FromU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0H0D(i)))*AH2_(32768.0);}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// [BSC] BYTE SIGNED CONVERSION
+//------------------------------------------------------------------------------------------------------------------------------
+// Similar to [BUC].
+// Works on a range of {-/+ A_BSC_<32,16>}, for <32-bit, and 16-bit> respectively.
+//------------------------------------------------------------------------------------------------------------------------------
+// ENCODING (without zero-based encoding)
+// ========
+// 0 = unused (can be used to mean something else)
+// 1 = lowest value
+// 128 = exact zero center (zero based encoding
+// 255 = highest value
+//------------------------------------------------------------------------------------------------------------------------------
+// Zero-based [Zb] flips the MSB bit of the byte (making 128 "exact zero" actually zero).
+// This is useful if there is a desire for cleared values to decode as zero.
+//------------------------------------------------------------------------------------------------------------------------------
+// BYTE : FLOAT - ABsc{0,1,2,3}{To,From}U2() - Designed for 16-bit denormal tricks and V_PERM_B32.
+// ==== =====
+// 0 : -127/512 (unused)
+// 1 : -126/512
+// 2 : -125/512
+// ...
+// 128 : 0
+// ...
+// 255 : 127/512
+// : 1/4 (just outside the encoding range)
+//==============================================================================================================================
+ // Peak range for 32-bit and 16-bit operations.
+ #define A_BSC_32 (127.0)
+ #define A_BSC_16 (127.0/512.0)
+//==============================================================================================================================
+ #if 1
+ AU1 ABsc0ToU1(AU1 d,AF1 i){return (d&0xffffff00u)|((min(AU1(i+128.0),255u) )&(0x000000ffu));}
+ AU1 ABsc1ToU1(AU1 d,AF1 i){return (d&0xffff00ffu)|((min(AU1(i+128.0),255u)<< 8)&(0x0000ff00u));}
+ AU1 ABsc2ToU1(AU1 d,AF1 i){return (d&0xff00ffffu)|((min(AU1(i+128.0),255u)<<16)&(0x00ff0000u));}
+ AU1 ABsc3ToU1(AU1 d,AF1 i){return (d&0x00ffffffu)|((min(AU1(i+128.0),255u)<<24)&(0xff000000u));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 ABsc0ToZbU1(AU1 d,AF1 i){return ((d&0xffffff00u)|((min(AU1(trunc(i)+128.0),255u) )&(0x000000ffu)))^0x00000080u;}
+ AU1 ABsc1ToZbU1(AU1 d,AF1 i){return ((d&0xffff00ffu)|((min(AU1(trunc(i)+128.0),255u)<< 8)&(0x0000ff00u)))^0x00008000u;}
+ AU1 ABsc2ToZbU1(AU1 d,AF1 i){return ((d&0xff00ffffu)|((min(AU1(trunc(i)+128.0),255u)<<16)&(0x00ff0000u)))^0x00800000u;}
+ AU1 ABsc3ToZbU1(AU1 d,AF1 i){return ((d&0x00ffffffu)|((min(AU1(trunc(i)+128.0),255u)<<24)&(0xff000000u)))^0x80000000u;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ABsc0FromU1(AU1 i){return AF1((i )&255u)-128.0;}
+ AF1 ABsc1FromU1(AU1 i){return AF1((i>> 8)&255u)-128.0;}
+ AF1 ABsc2FromU1(AU1 i){return AF1((i>>16)&255u)-128.0;}
+ AF1 ABsc3FromU1(AU1 i){return AF1((i>>24)&255u)-128.0;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ABsc0FromZbU1(AU1 i){return AF1(((i )&255u)^0x80u)-128.0;}
+ AF1 ABsc1FromZbU1(AU1 i){return AF1(((i>> 8)&255u)^0x80u)-128.0;}
+ AF1 ABsc2FromZbU1(AU1 i){return AF1(((i>>16)&255u)^0x80u)-128.0;}
+ AF1 ABsc3FromZbU1(AU1 i){return AF1(((i>>24)&255u)^0x80u)-128.0;}
+ #endif
+//==============================================================================================================================
+ #ifdef A_HALF
+ // Takes {x0,x1} and {y0,y1} and builds {{x0,y0},{x1,y1}}.
+ AW2 ABsc01ToW2(AH2 x,AH2 y){x=x*AH2_(1.0/32768.0)+AH2_(0.25/32768.0);y=y*AH2_(1.0/32768.0)+AH2_(0.25/32768.0);
+ return AW2_AU1(APermGCEA(AU2(AU1_AW2(AW2_AH2(x)),AU1_AW2(AW2_AH2(y)))));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU2 ABsc0ToU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)+AH2_(0.25/32768.0)));
+ return AU2(APermHGFA(AU2(d.x,b)),APermHGFC(AU2(d.y,b)));}
+ AU2 ABsc1ToU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)+AH2_(0.25/32768.0)));
+ return AU2(APermHGAE(AU2(d.x,b)),APermHGCE(AU2(d.y,b)));}
+ AU2 ABsc2ToU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)+AH2_(0.25/32768.0)));
+ return AU2(APermHAFE(AU2(d.x,b)),APermHCFE(AU2(d.y,b)));}
+ AU2 ABsc3ToU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)+AH2_(0.25/32768.0)));
+ return AU2(APermAGFE(AU2(d.x,b)),APermCGFE(AU2(d.y,b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU2 ABsc0ToZbU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)+AH2_(0.25/32768.0)))^0x00800080u;
+ return AU2(APermHGFA(AU2(d.x,b)),APermHGFC(AU2(d.y,b)));}
+ AU2 ABsc1ToZbU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)+AH2_(0.25/32768.0)))^0x00800080u;
+ return AU2(APermHGAE(AU2(d.x,b)),APermHGCE(AU2(d.y,b)));}
+ AU2 ABsc2ToZbU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)+AH2_(0.25/32768.0)))^0x00800080u;
+ return AU2(APermHAFE(AU2(d.x,b)),APermHCFE(AU2(d.y,b)));}
+ AU2 ABsc3ToZbU2(AU2 d,AH2 i){AU1 b=AU1_AW2(AW2_AH2(i*AH2_(1.0/32768.0)+AH2_(0.25/32768.0)))^0x00800080u;
+ return AU2(APermAGFE(AU2(d.x,b)),APermCGFE(AU2(d.y,b)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH2 ABsc0FromU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0E0A(i)))*AH2_(32768.0)-AH2_(0.25);}
+ AH2 ABsc1FromU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0F0B(i)))*AH2_(32768.0)-AH2_(0.25);}
+ AH2 ABsc2FromU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0G0C(i)))*AH2_(32768.0)-AH2_(0.25);}
+ AH2 ABsc3FromU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0H0D(i)))*AH2_(32768.0)-AH2_(0.25);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH2 ABsc0FromZbU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0E0A(i)^0x00800080u))*AH2_(32768.0)-AH2_(0.25);}
+ AH2 ABsc1FromZbU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0F0B(i)^0x00800080u))*AH2_(32768.0)-AH2_(0.25);}
+ AH2 ABsc2FromZbU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0G0C(i)^0x00800080u))*AH2_(32768.0)-AH2_(0.25);}
+ AH2 ABsc3FromZbU2(AU2 i){return AH2_AW2(AW2_AU1(APerm0H0D(i)^0x00800080u))*AH2_(32768.0)-AH2_(0.25);}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// HALF APPROXIMATIONS
+//------------------------------------------------------------------------------------------------------------------------------
+// These support only positive inputs.
+// Did not see value yet in specialization for range.
+// Using quick testing, ended up mostly getting the same "best" approximation for various ranges.
+// With hardware that can co-execute transcendentals, the value in approximations could be less than expected.
+// However from a latency perspective, if execution of a transcendental is 4 clk, with no packed support, -> 8 clk total.
+// And co-execution would require a compiler interleaving a lot of independent work for packed usage.
+//------------------------------------------------------------------------------------------------------------------------------
+// The one Newton Raphson iteration form of rsq() was skipped (requires 6 ops total).
+// Same with sqrt(), as this could be x*rsq() (7 ops).
+//==============================================================================================================================
+ #ifdef A_HALF
+ // Minimize squared error across full positive range, 2 ops.
+ // The 0x1de2 based approximation maps {0 to 1} input maps to < 1 output.
+ AH1 APrxLoSqrtH1(AH1 a){return AH1_AW1((AW1_AH1(a)>>AW1_(1))+AW1_(0x1de2));}
+ AH2 APrxLoSqrtH2(AH2 a){return AH2_AW2((AW2_AH2(a)>>AW2_(1))+AW2_(0x1de2));}
+ AH3 APrxLoSqrtH3(AH3 a){return AH3_AW3((AW3_AH3(a)>>AW3_(1))+AW3_(0x1de2));}
+ AH4 APrxLoSqrtH4(AH4 a){return AH4_AW4((AW4_AH4(a)>>AW4_(1))+AW4_(0x1de2));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Lower precision estimation, 1 op.
+ // Minimize squared error across {smallest normal to 16384.0}.
+ AH1 APrxLoRcpH1(AH1 a){return AH1_AW1(AW1_(0x7784)-AW1_AH1(a));}
+ AH2 APrxLoRcpH2(AH2 a){return AH2_AW2(AW2_(0x7784)-AW2_AH2(a));}
+ AH3 APrxLoRcpH3(AH3 a){return AH3_AW3(AW3_(0x7784)-AW3_AH3(a));}
+ AH4 APrxLoRcpH4(AH4 a){return AH4_AW4(AW4_(0x7784)-AW4_AH4(a));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Medium precision estimation, one Newton Raphson iteration, 3 ops.
+ AH1 APrxMedRcpH1(AH1 a){AH1 b=AH1_AW1(AW1_(0x778d)-AW1_AH1(a));return b*(-b*a+AH1_(2.0));}
+ AH2 APrxMedRcpH2(AH2 a){AH2 b=AH2_AW2(AW2_(0x778d)-AW2_AH2(a));return b*(-b*a+AH2_(2.0));}
+ AH3 APrxMedRcpH3(AH3 a){AH3 b=AH3_AW3(AW3_(0x778d)-AW3_AH3(a));return b*(-b*a+AH3_(2.0));}
+ AH4 APrxMedRcpH4(AH4 a){AH4 b=AH4_AW4(AW4_(0x778d)-AW4_AH4(a));return b*(-b*a+AH4_(2.0));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Minimize squared error across {smallest normal to 16384.0}, 2 ops.
+ AH1 APrxLoRsqH1(AH1 a){return AH1_AW1(AW1_(0x59a3)-(AW1_AH1(a)>>AW1_(1)));}
+ AH2 APrxLoRsqH2(AH2 a){return AH2_AW2(AW2_(0x59a3)-(AW2_AH2(a)>>AW2_(1)));}
+ AH3 APrxLoRsqH3(AH3 a){return AH3_AW3(AW3_(0x59a3)-(AW3_AH3(a)>>AW3_(1)));}
+ AH4 APrxLoRsqH4(AH4 a){return AH4_AW4(AW4_(0x59a3)-(AW4_AH4(a)>>AW4_(1)));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// FLOAT APPROXIMATIONS
+//------------------------------------------------------------------------------------------------------------------------------
+// Michal Drobot has an excellent presentation on these: "Low Level Optimizations For GCN",
+// - Idea dates back to SGI, then to Quake 3, etc.
+// - https://michaldrobot.files.wordpress.com/2014/05/gcn_alu_opt_digitaldragons2014.pdf
+// - sqrt(x)=rsqrt(x)*x
+// - rcp(x)=rsqrt(x)*rsqrt(x) for positive x
+// - https://github.com/michaldrobot/ShaderFastLibs/blob/master/ShaderFastMathLib.h
+//------------------------------------------------------------------------------------------------------------------------------
+// These below are from perhaps less complete searching for optimal.
+// Used FP16 normal range for testing with +4096 32-bit step size for sampling error.
+// So these match up well with the half approximations.
+//==============================================================================================================================
+ AF1 APrxLoSqrtF1(AF1 a){return AF1_AU1((AU1_AF1(a)>>AU1_(1))+AU1_(0x1fbc4639));}
+ AF1 APrxLoRcpF1(AF1 a){return AF1_AU1(AU1_(0x7ef07ebb)-AU1_AF1(a));}
+ AF1 APrxMedRcpF1(AF1 a){AF1 b=AF1_AU1(AU1_(0x7ef19fff)-AU1_AF1(a));return b*(-b*a+AF1_(2.0));}
+ AF1 APrxLoRsqF1(AF1 a){return AF1_AU1(AU1_(0x5f347d74)-(AU1_AF1(a)>>AU1_(1)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 APrxLoSqrtF2(AF2 a){return AF2_AU2((AU2_AF2(a)>>AU2_(1))+AU2_(0x1fbc4639));}
+ AF2 APrxLoRcpF2(AF2 a){return AF2_AU2(AU2_(0x7ef07ebb)-AU2_AF2(a));}
+ AF2 APrxMedRcpF2(AF2 a){AF2 b=AF2_AU2(AU2_(0x7ef19fff)-AU2_AF2(a));return b*(-b*a+AF2_(2.0));}
+ AF2 APrxLoRsqF2(AF2 a){return AF2_AU2(AU2_(0x5f347d74)-(AU2_AF2(a)>>AU2_(1)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF3 APrxLoSqrtF3(AF3 a){return AF3_AU3((AU3_AF3(a)>>AU3_(1))+AU3_(0x1fbc4639));}
+ AF3 APrxLoRcpF3(AF3 a){return AF3_AU3(AU3_(0x7ef07ebb)-AU3_AF3(a));}
+ AF3 APrxMedRcpF3(AF3 a){AF3 b=AF3_AU3(AU3_(0x7ef19fff)-AU3_AF3(a));return b*(-b*a+AF3_(2.0));}
+ AF3 APrxLoRsqF3(AF3 a){return AF3_AU3(AU3_(0x5f347d74)-(AU3_AF3(a)>>AU3_(1)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF4 APrxLoSqrtF4(AF4 a){return AF4_AU4((AU4_AF4(a)>>AU4_(1))+AU4_(0x1fbc4639));}
+ AF4 APrxLoRcpF4(AF4 a){return AF4_AU4(AU4_(0x7ef07ebb)-AU4_AF4(a));}
+ AF4 APrxMedRcpF4(AF4 a){AF4 b=AF4_AU4(AU4_(0x7ef19fff)-AU4_AF4(a));return b*(-b*a+AF4_(2.0));}
+ AF4 APrxLoRsqF4(AF4 a){return AF4_AU4(AU4_(0x5f347d74)-(AU4_AF4(a)>>AU4_(1)));}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// PQ APPROXIMATIONS
+//------------------------------------------------------------------------------------------------------------------------------
+// PQ is very close to x^(1/8). The functions below Use the fast float approximation method to do
+// PQ<~>Gamma2 (4th power and fast 4th root) and PQ<~>Linear (8th power and fast 8th root). Maximum error is ~0.2%.
+//==============================================================================================================================
+// Helpers
+ AF1 Quart(AF1 a) { a = a * a; return a * a;}
+ AF1 Oct(AF1 a) { a = a * a; a = a * a; return a * a; }
+ AF2 Quart(AF2 a) { a = a * a; return a * a; }
+ AF2 Oct(AF2 a) { a = a * a; a = a * a; return a * a; }
+ AF3 Quart(AF3 a) { a = a * a; return a * a; }
+ AF3 Oct(AF3 a) { a = a * a; a = a * a; return a * a; }
+ AF4 Quart(AF4 a) { a = a * a; return a * a; }
+ AF4 Oct(AF4 a) { a = a * a; a = a * a; return a * a; }
+ //------------------------------------------------------------------------------------------------------------------------------
+ AF1 APrxPQToGamma2(AF1 a) { return Quart(a); }
+ AF1 APrxPQToLinear(AF1 a) { return Oct(a); }
+ AF1 APrxLoGamma2ToPQ(AF1 a) { return AF1_AU1((AU1_AF1(a) >> AU1_(2)) + AU1_(0x2F9A4E46)); }
+ AF1 APrxMedGamma2ToPQ(AF1 a) { AF1 b = AF1_AU1((AU1_AF1(a) >> AU1_(2)) + AU1_(0x2F9A4E46)); AF1 b4 = Quart(b); return b - b * (b4 - a) / (AF1_(4.0) * b4); }
+ AF1 APrxHighGamma2ToPQ(AF1 a) { return sqrt(sqrt(a)); }
+ AF1 APrxLoLinearToPQ(AF1 a) { return AF1_AU1((AU1_AF1(a) >> AU1_(3)) + AU1_(0x378D8723)); }
+ AF1 APrxMedLinearToPQ(AF1 a) { AF1 b = AF1_AU1((AU1_AF1(a) >> AU1_(3)) + AU1_(0x378D8723)); AF1 b8 = Oct(b); return b - b * (b8 - a) / (AF1_(8.0) * b8); }
+ AF1 APrxHighLinearToPQ(AF1 a) { return sqrt(sqrt(sqrt(a))); }
+ //------------------------------------------------------------------------------------------------------------------------------
+ AF2 APrxPQToGamma2(AF2 a) { return Quart(a); }
+ AF2 APrxPQToLinear(AF2 a) { return Oct(a); }
+ AF2 APrxLoGamma2ToPQ(AF2 a) { return AF2_AU2((AU2_AF2(a) >> AU2_(2)) + AU2_(0x2F9A4E46)); }
+ AF2 APrxMedGamma2ToPQ(AF2 a) { AF2 b = AF2_AU2((AU2_AF2(a) >> AU2_(2)) + AU2_(0x2F9A4E46)); AF2 b4 = Quart(b); return b - b * (b4 - a) / (AF1_(4.0) * b4); }
+ AF2 APrxHighGamma2ToPQ(AF2 a) { return sqrt(sqrt(a)); }
+ AF2 APrxLoLinearToPQ(AF2 a) { return AF2_AU2((AU2_AF2(a) >> AU2_(3)) + AU2_(0x378D8723)); }
+ AF2 APrxMedLinearToPQ(AF2 a) { AF2 b = AF2_AU2((AU2_AF2(a) >> AU2_(3)) + AU2_(0x378D8723)); AF2 b8 = Oct(b); return b - b * (b8 - a) / (AF1_(8.0) * b8); }
+ AF2 APrxHighLinearToPQ(AF2 a) { return sqrt(sqrt(sqrt(a))); }
+ //------------------------------------------------------------------------------------------------------------------------------
+ AF3 APrxPQToGamma2(AF3 a) { return Quart(a); }
+ AF3 APrxPQToLinear(AF3 a) { return Oct(a); }
+ AF3 APrxLoGamma2ToPQ(AF3 a) { return AF3_AU3((AU3_AF3(a) >> AU3_(2)) + AU3_(0x2F9A4E46)); }
+ AF3 APrxMedGamma2ToPQ(AF3 a) { AF3 b = AF3_AU3((AU3_AF3(a) >> AU3_(2)) + AU3_(0x2F9A4E46)); AF3 b4 = Quart(b); return b - b * (b4 - a) / (AF1_(4.0) * b4); }
+ AF3 APrxHighGamma2ToPQ(AF3 a) { return sqrt(sqrt(a)); }
+ AF3 APrxLoLinearToPQ(AF3 a) { return AF3_AU3((AU3_AF3(a) >> AU3_(3)) + AU3_(0x378D8723)); }
+ AF3 APrxMedLinearToPQ(AF3 a) { AF3 b = AF3_AU3((AU3_AF3(a) >> AU3_(3)) + AU3_(0x378D8723)); AF3 b8 = Oct(b); return b - b * (b8 - a) / (AF1_(8.0) * b8); }
+ AF3 APrxHighLinearToPQ(AF3 a) { return sqrt(sqrt(sqrt(a))); }
+ //------------------------------------------------------------------------------------------------------------------------------
+ AF4 APrxPQToGamma2(AF4 a) { return Quart(a); }
+ AF4 APrxPQToLinear(AF4 a) { return Oct(a); }
+ AF4 APrxLoGamma2ToPQ(AF4 a) { return AF4_AU4((AU4_AF4(a) >> AU4_(2)) + AU4_(0x2F9A4E46)); }
+ AF4 APrxMedGamma2ToPQ(AF4 a) { AF4 b = AF4_AU4((AU4_AF4(a) >> AU4_(2)) + AU4_(0x2F9A4E46)); AF4 b4 = Quart(b); return b - b * (b4 - a) / (AF1_(4.0) * b4); }
+ AF4 APrxHighGamma2ToPQ(AF4 a) { return sqrt(sqrt(a)); }
+ AF4 APrxLoLinearToPQ(AF4 a) { return AF4_AU4((AU4_AF4(a) >> AU4_(3)) + AU4_(0x378D8723)); }
+ AF4 APrxMedLinearToPQ(AF4 a) { AF4 b = AF4_AU4((AU4_AF4(a) >> AU4_(3)) + AU4_(0x378D8723)); AF4 b8 = Oct(b); return b - b * (b8 - a) / (AF1_(8.0) * b8); }
+ AF4 APrxHighLinearToPQ(AF4 a) { return sqrt(sqrt(sqrt(a))); }
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// PARABOLIC SIN & COS
+//------------------------------------------------------------------------------------------------------------------------------
+// Approximate answers to transcendental questions.
+//------------------------------------------------------------------------------------------------------------------------------
+//==============================================================================================================================
+ #if 1
+ // Valid input range is {-1 to 1} representing {0 to 2 pi}.
+ // Output range is {-1/4 to 1/4} representing {-1 to 1}.
+ AF1 APSinF1(AF1 x){return x*abs(x)-x;} // MAD.
+ AF2 APSinF2(AF2 x){return x*abs(x)-x;}
+ AF1 APCosF1(AF1 x){x=AFractF1(x*AF1_(0.5)+AF1_(0.75));x=x*AF1_(2.0)-AF1_(1.0);return APSinF1(x);} // 3x MAD, FRACT
+ AF2 APCosF2(AF2 x){x=AFractF2(x*AF2_(0.5)+AF2_(0.75));x=x*AF2_(2.0)-AF2_(1.0);return APSinF2(x);}
+ AF2 APSinCosF1(AF1 x){AF1 y=AFractF1(x*AF1_(0.5)+AF1_(0.75));y=y*AF1_(2.0)-AF1_(1.0);return APSinF2(AF2(x,y));}
+ #endif
+//------------------------------------------------------------------------------------------------------------------------------
+ #ifdef A_HALF
+ // For a packed {sin,cos} pair,
+ // - Native takes 16 clocks and 4 issue slots (no packed transcendentals).
+ // - Parabolic takes 8 clocks and 8 issue slots (only fract is non-packed).
+ AH1 APSinH1(AH1 x){return x*abs(x)-x;}
+ AH2 APSinH2(AH2 x){return x*abs(x)-x;} // AND,FMA
+ AH1 APCosH1(AH1 x){x=AFractH1(x*AH1_(0.5)+AH1_(0.75));x=x*AH1_(2.0)-AH1_(1.0);return APSinH1(x);}
+ AH2 APCosH2(AH2 x){x=AFractH2(x*AH2_(0.5)+AH2_(0.75));x=x*AH2_(2.0)-AH2_(1.0);return APSinH2(x);} // 3x FMA, 2xFRACT, AND
+ AH2 APSinCosH1(AH1 x){AH1 y=AFractH1(x*AH1_(0.5)+AH1_(0.75));y=y*AH1_(2.0)-AH1_(1.0);return APSinH2(AH2(x,y));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// [ZOL] ZERO ONE LOGIC
+//------------------------------------------------------------------------------------------------------------------------------
+// Conditional free logic designed for easy 16-bit packing, and backwards porting to 32-bit.
+//------------------------------------------------------------------------------------------------------------------------------
+// 0 := false
+// 1 := true
+//------------------------------------------------------------------------------------------------------------------------------
+// AndNot(x,y) -> !(x&y) .... One op.
+// AndOr(x,y,z) -> (x&y)|z ... One op.
+// GtZero(x) -> x>0.0 ..... One op.
+// Sel(x,y,z) -> x?y:z ..... Two ops, has no precision loss.
+// Signed(x) -> x<0.0 ..... One op.
+// ZeroPass(x,y) -> x?0:y ..... Two ops, 'y' is a pass through safe for aliasing as integer.
+//------------------------------------------------------------------------------------------------------------------------------
+// OPTIMIZATION NOTES
+// ==================
+// - On Vega to use 2 constants in a packed op, pass in as one AW2 or one AH2 'k.xy' and use as 'k.xx' and 'k.yy'.
+// For example 'a.xy*k.xx+k.yy'.
+//==============================================================================================================================
+ #if 1
+ AU1 AZolAndU1(AU1 x,AU1 y){return min(x,y);}
+ AU2 AZolAndU2(AU2 x,AU2 y){return min(x,y);}
+ AU3 AZolAndU3(AU3 x,AU3 y){return min(x,y);}
+ AU4 AZolAndU4(AU4 x,AU4 y){return min(x,y);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AZolNotU1(AU1 x){return x^AU1_(1);}
+ AU2 AZolNotU2(AU2 x){return x^AU2_(1);}
+ AU3 AZolNotU3(AU3 x){return x^AU3_(1);}
+ AU4 AZolNotU4(AU4 x){return x^AU4_(1);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AU1 AZolOrU1(AU1 x,AU1 y){return max(x,y);}
+ AU2 AZolOrU2(AU2 x,AU2 y){return max(x,y);}
+ AU3 AZolOrU3(AU3 x,AU3 y){return max(x,y);}
+ AU4 AZolOrU4(AU4 x,AU4 y){return max(x,y);}
+//==============================================================================================================================
+ AU1 AZolF1ToU1(AF1 x){return AU1(x);}
+ AU2 AZolF2ToU2(AF2 x){return AU2(x);}
+ AU3 AZolF3ToU3(AF3 x){return AU3(x);}
+ AU4 AZolF4ToU4(AF4 x){return AU4(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ // 2 ops, denormals don't work in 32-bit on PC (and if they are enabled, OMOD is disabled).
+ AU1 AZolNotF1ToU1(AF1 x){return AU1(AF1_(1.0)-x);}
+ AU2 AZolNotF2ToU2(AF2 x){return AU2(AF2_(1.0)-x);}
+ AU3 AZolNotF3ToU3(AF3 x){return AU3(AF3_(1.0)-x);}
+ AU4 AZolNotF4ToU4(AF4 x){return AU4(AF4_(1.0)-x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AZolU1ToF1(AU1 x){return AF1(x);}
+ AF2 AZolU2ToF2(AU2 x){return AF2(x);}
+ AF3 AZolU3ToF3(AU3 x){return AF3(x);}
+ AF4 AZolU4ToF4(AU4 x){return AF4(x);}
+//==============================================================================================================================
+ AF1 AZolAndF1(AF1 x,AF1 y){return min(x,y);}
+ AF2 AZolAndF2(AF2 x,AF2 y){return min(x,y);}
+ AF3 AZolAndF3(AF3 x,AF3 y){return min(x,y);}
+ AF4 AZolAndF4(AF4 x,AF4 y){return min(x,y);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 ASolAndNotF1(AF1 x,AF1 y){return (-x)*y+AF1_(1.0);}
+ AF2 ASolAndNotF2(AF2 x,AF2 y){return (-x)*y+AF2_(1.0);}
+ AF3 ASolAndNotF3(AF3 x,AF3 y){return (-x)*y+AF3_(1.0);}
+ AF4 ASolAndNotF4(AF4 x,AF4 y){return (-x)*y+AF4_(1.0);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AZolAndOrF1(AF1 x,AF1 y,AF1 z){return ASatF1(x*y+z);}
+ AF2 AZolAndOrF2(AF2 x,AF2 y,AF2 z){return ASatF2(x*y+z);}
+ AF3 AZolAndOrF3(AF3 x,AF3 y,AF3 z){return ASatF3(x*y+z);}
+ AF4 AZolAndOrF4(AF4 x,AF4 y,AF4 z){return ASatF4(x*y+z);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AZolGtZeroF1(AF1 x){return ASatF1(x*AF1_(A_INFP_F));}
+ AF2 AZolGtZeroF2(AF2 x){return ASatF2(x*AF2_(A_INFP_F));}
+ AF3 AZolGtZeroF3(AF3 x){return ASatF3(x*AF3_(A_INFP_F));}
+ AF4 AZolGtZeroF4(AF4 x){return ASatF4(x*AF4_(A_INFP_F));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AZolNotF1(AF1 x){return AF1_(1.0)-x;}
+ AF2 AZolNotF2(AF2 x){return AF2_(1.0)-x;}
+ AF3 AZolNotF3(AF3 x){return AF3_(1.0)-x;}
+ AF4 AZolNotF4(AF4 x){return AF4_(1.0)-x;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AZolOrF1(AF1 x,AF1 y){return max(x,y);}
+ AF2 AZolOrF2(AF2 x,AF2 y){return max(x,y);}
+ AF3 AZolOrF3(AF3 x,AF3 y){return max(x,y);}
+ AF4 AZolOrF4(AF4 x,AF4 y){return max(x,y);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AZolSelF1(AF1 x,AF1 y,AF1 z){AF1 r=(-x)*z+z;return x*y+r;}
+ AF2 AZolSelF2(AF2 x,AF2 y,AF2 z){AF2 r=(-x)*z+z;return x*y+r;}
+ AF3 AZolSelF3(AF3 x,AF3 y,AF3 z){AF3 r=(-x)*z+z;return x*y+r;}
+ AF4 AZolSelF4(AF4 x,AF4 y,AF4 z){AF4 r=(-x)*z+z;return x*y+r;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AZolSignedF1(AF1 x){return ASatF1(x*AF1_(A_INFN_F));}
+ AF2 AZolSignedF2(AF2 x){return ASatF2(x*AF2_(A_INFN_F));}
+ AF3 AZolSignedF3(AF3 x){return ASatF3(x*AF3_(A_INFN_F));}
+ AF4 AZolSignedF4(AF4 x){return ASatF4(x*AF4_(A_INFN_F));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AZolZeroPassF1(AF1 x,AF1 y){return AF1_AU1((AU1_AF1(x)!=AU1_(0))?AU1_(0):AU1_AF1(y));}
+ AF2 AZolZeroPassF2(AF2 x,AF2 y){return AF2_AU2((AU2_AF2(x)!=AU2_(0))?AU2_(0):AU2_AF2(y));}
+ AF3 AZolZeroPassF3(AF3 x,AF3 y){return AF3_AU3((AU3_AF3(x)!=AU3_(0))?AU3_(0):AU3_AF3(y));}
+ AF4 AZolZeroPassF4(AF4 x,AF4 y){return AF4_AU4((AU4_AF4(x)!=AU4_(0))?AU4_(0):AU4_AF4(y));}
+ #endif
+//==============================================================================================================================
+ #ifdef A_HALF
+ AW1 AZolAndW1(AW1 x,AW1 y){return min(x,y);}
+ AW2 AZolAndW2(AW2 x,AW2 y){return min(x,y);}
+ AW3 AZolAndW3(AW3 x,AW3 y){return min(x,y);}
+ AW4 AZolAndW4(AW4 x,AW4 y){return min(x,y);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AZolNotW1(AW1 x){return x^AW1_(1);}
+ AW2 AZolNotW2(AW2 x){return x^AW2_(1);}
+ AW3 AZolNotW3(AW3 x){return x^AW3_(1);}
+ AW4 AZolNotW4(AW4 x){return x^AW4_(1);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AW1 AZolOrW1(AW1 x,AW1 y){return max(x,y);}
+ AW2 AZolOrW2(AW2 x,AW2 y){return max(x,y);}
+ AW3 AZolOrW3(AW3 x,AW3 y){return max(x,y);}
+ AW4 AZolOrW4(AW4 x,AW4 y){return max(x,y);}
+//==============================================================================================================================
+ // Uses denormal trick.
+ AW1 AZolH1ToW1(AH1 x){return AW1_AH1(x*AH1_AW1(AW1_(1)));}
+ AW2 AZolH2ToW2(AH2 x){return AW2_AH2(x*AH2_AW2(AW2_(1)));}
+ AW3 AZolH3ToW3(AH3 x){return AW3_AH3(x*AH3_AW3(AW3_(1)));}
+ AW4 AZolH4ToW4(AH4 x){return AW4_AH4(x*AH4_AW4(AW4_(1)));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // AMD arch lacks a packed conversion opcode.
+ AH1 AZolW1ToH1(AW1 x){return AH1_AW1(x*AW1_AH1(AH1_(1.0)));}
+ AH2 AZolW2ToH2(AW2 x){return AH2_AW2(x*AW2_AH2(AH2_(1.0)));}
+ AH3 AZolW1ToH3(AW3 x){return AH3_AW3(x*AW3_AH3(AH3_(1.0)));}
+ AH4 AZolW2ToH4(AW4 x){return AH4_AW4(x*AW4_AH4(AH4_(1.0)));}
+//==============================================================================================================================
+ AH1 AZolAndH1(AH1 x,AH1 y){return min(x,y);}
+ AH2 AZolAndH2(AH2 x,AH2 y){return min(x,y);}
+ AH3 AZolAndH3(AH3 x,AH3 y){return min(x,y);}
+ AH4 AZolAndH4(AH4 x,AH4 y){return min(x,y);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 ASolAndNotH1(AH1 x,AH1 y){return (-x)*y+AH1_(1.0);}
+ AH2 ASolAndNotH2(AH2 x,AH2 y){return (-x)*y+AH2_(1.0);}
+ AH3 ASolAndNotH3(AH3 x,AH3 y){return (-x)*y+AH3_(1.0);}
+ AH4 ASolAndNotH4(AH4 x,AH4 y){return (-x)*y+AH4_(1.0);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AZolAndOrH1(AH1 x,AH1 y,AH1 z){return ASatH1(x*y+z);}
+ AH2 AZolAndOrH2(AH2 x,AH2 y,AH2 z){return ASatH2(x*y+z);}
+ AH3 AZolAndOrH3(AH3 x,AH3 y,AH3 z){return ASatH3(x*y+z);}
+ AH4 AZolAndOrH4(AH4 x,AH4 y,AH4 z){return ASatH4(x*y+z);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AZolGtZeroH1(AH1 x){return ASatH1(x*AH1_(A_INFP_H));}
+ AH2 AZolGtZeroH2(AH2 x){return ASatH2(x*AH2_(A_INFP_H));}
+ AH3 AZolGtZeroH3(AH3 x){return ASatH3(x*AH3_(A_INFP_H));}
+ AH4 AZolGtZeroH4(AH4 x){return ASatH4(x*AH4_(A_INFP_H));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AZolNotH1(AH1 x){return AH1_(1.0)-x;}
+ AH2 AZolNotH2(AH2 x){return AH2_(1.0)-x;}
+ AH3 AZolNotH3(AH3 x){return AH3_(1.0)-x;}
+ AH4 AZolNotH4(AH4 x){return AH4_(1.0)-x;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AZolOrH1(AH1 x,AH1 y){return max(x,y);}
+ AH2 AZolOrH2(AH2 x,AH2 y){return max(x,y);}
+ AH3 AZolOrH3(AH3 x,AH3 y){return max(x,y);}
+ AH4 AZolOrH4(AH4 x,AH4 y){return max(x,y);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AZolSelH1(AH1 x,AH1 y,AH1 z){AH1 r=(-x)*z+z;return x*y+r;}
+ AH2 AZolSelH2(AH2 x,AH2 y,AH2 z){AH2 r=(-x)*z+z;return x*y+r;}
+ AH3 AZolSelH3(AH3 x,AH3 y,AH3 z){AH3 r=(-x)*z+z;return x*y+r;}
+ AH4 AZolSelH4(AH4 x,AH4 y,AH4 z){AH4 r=(-x)*z+z;return x*y+r;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AZolSignedH1(AH1 x){return ASatH1(x*AH1_(A_INFN_H));}
+ AH2 AZolSignedH2(AH2 x){return ASatH2(x*AH2_(A_INFN_H));}
+ AH3 AZolSignedH3(AH3 x){return ASatH3(x*AH3_(A_INFN_H));}
+ AH4 AZolSignedH4(AH4 x){return ASatH4(x*AH4_(A_INFN_H));}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// COLOR CONVERSIONS
+//------------------------------------------------------------------------------------------------------------------------------
+// These are all linear to/from some other space (where 'linear' has been shortened out of the function name).
+// So 'ToGamma' is 'LinearToGamma', and 'FromGamma' is 'LinearFromGamma'.
+// These are branch free implementations.
+// The AToSrgbF1() function is useful for stores for compute shaders for GPUs without hardware linear->sRGB store conversion.
+//------------------------------------------------------------------------------------------------------------------------------
+// TRANSFER FUNCTIONS
+// ==================
+// 709 ..... Rec709 used for some HDTVs
+// Gamma ... Typically 2.2 for some PC displays, or 2.4-2.5 for CRTs, or 2.2 FreeSync2 native
+// Pq ...... PQ native for HDR10
+// Srgb .... The sRGB output, typical of PC displays, useful for 10-bit output, or storing to 8-bit UNORM without SRGB type
+// Two ..... Gamma 2.0, fastest conversion (useful for intermediate pass approximations)
+// Three ... Gamma 3.0, less fast, but good for HDR.
+//------------------------------------------------------------------------------------------------------------------------------
+// KEEPING TO SPEC
+// ===============
+// Both Rec.709 and sRGB have a linear segment which as spec'ed would intersect the curved segment 2 times.
+// (a.) For 8-bit sRGB, steps {0 to 10.3} are in the linear region (4% of the encoding range).
+// (b.) For 8-bit 709, steps {0 to 20.7} are in the linear region (8% of the encoding range).
+// Also there is a slight step in the transition regions.
+// Precision of the coefficients in the spec being the likely cause.
+// Main usage case of the sRGB code is to do the linear->sRGB converstion in a compute shader before store.
+// This is to work around lack of hardware (typically only ROP does the conversion for free).
+// To "correct" the linear segment, would be to introduce error, because hardware decode of sRGB->linear is fixed (and free).
+// So this header keeps with the spec.
+// For linear->sRGB transforms, the linear segment in some respects reduces error, because rounding in that region is linear.
+// Rounding in the curved region in hardware (and fast software code) introduces error due to rounding in non-linear.
+//------------------------------------------------------------------------------------------------------------------------------
+// FOR PQ
+// ======
+// Both input and output is {0.0-1.0}, and where output 1.0 represents 10000.0 cd/m^2.
+// All constants are only specified to FP32 precision.
+// External PQ source reference,
+// - https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESlib.Utilities_Color.a1.0.1.ctl
+//------------------------------------------------------------------------------------------------------------------------------
+// PACKED VERSIONS
+// ===============
+// These are the A*H2() functions.
+// There is no PQ functions as FP16 seemed to not have enough precision for the conversion.
+// The remaining functions are "good enough" for 8-bit, and maybe 10-bit if not concerned about a few 1-bit errors.
+// Precision is lowest in the 709 conversion, higher in sRGB, higher still in Two and Gamma (when using 2.2 at least).
+//------------------------------------------------------------------------------------------------------------------------------
+// NOTES
+// =====
+// Could be faster for PQ conversions to be in ALU or a texture lookup depending on usage case.
+//==============================================================================================================================
+ #if 1
+ AF1 ATo709F1(AF1 c){AF3 j=AF3(0.018*4.5,4.5,0.45);AF2 k=AF2(1.099,-0.099);
+ return clamp(j.x ,c*j.y ,pow(c,j.z )*k.x +k.y );}
+ AF2 ATo709F2(AF2 c){AF3 j=AF3(0.018*4.5,4.5,0.45);AF2 k=AF2(1.099,-0.099);
+ return clamp(j.xx ,c*j.yy ,pow(c,j.zz )*k.xx +k.yy );}
+ AF3 ATo709F3(AF3 c){AF3 j=AF3(0.018*4.5,4.5,0.45);AF2 k=AF2(1.099,-0.099);
+ return clamp(j.xxx,c*j.yyy,pow(c,j.zzz)*k.xxx+k.yyy);}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Note 'rcpX' is '1/x', where the 'x' is what would be used in AFromGamma().
+ AF1 AToGammaF1(AF1 c,AF1 rcpX){return pow(c,AF1_(rcpX));}
+ AF2 AToGammaF2(AF2 c,AF1 rcpX){return pow(c,AF2_(rcpX));}
+ AF3 AToGammaF3(AF3 c,AF1 rcpX){return pow(c,AF3_(rcpX));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AToPqF1(AF1 x){AF1 p=pow(x,AF1_(0.159302));
+ return pow((AF1_(0.835938)+AF1_(18.8516)*p)/(AF1_(1.0)+AF1_(18.6875)*p),AF1_(78.8438));}
+ AF2 AToPqF1(AF2 x){AF2 p=pow(x,AF2_(0.159302));
+ return pow((AF2_(0.835938)+AF2_(18.8516)*p)/(AF2_(1.0)+AF2_(18.6875)*p),AF2_(78.8438));}
+ AF3 AToPqF1(AF3 x){AF3 p=pow(x,AF3_(0.159302));
+ return pow((AF3_(0.835938)+AF3_(18.8516)*p)/(AF3_(1.0)+AF3_(18.6875)*p),AF3_(78.8438));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AToSrgbF1(AF1 c){AF3 j=AF3(0.0031308*12.92,12.92,1.0/2.4);AF2 k=AF2(1.055,-0.055);
+ return clamp(j.x ,c*j.y ,pow(c,j.z )*k.x +k.y );}
+ AF2 AToSrgbF2(AF2 c){AF3 j=AF3(0.0031308*12.92,12.92,1.0/2.4);AF2 k=AF2(1.055,-0.055);
+ return clamp(j.xx ,c*j.yy ,pow(c,j.zz )*k.xx +k.yy );}
+ AF3 AToSrgbF3(AF3 c){AF3 j=AF3(0.0031308*12.92,12.92,1.0/2.4);AF2 k=AF2(1.055,-0.055);
+ return clamp(j.xxx,c*j.yyy,pow(c,j.zzz)*k.xxx+k.yyy);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AToTwoF1(AF1 c){return sqrt(c);}
+ AF2 AToTwoF2(AF2 c){return sqrt(c);}
+ AF3 AToTwoF3(AF3 c){return sqrt(c);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AToThreeF1(AF1 c){return pow(c,AF1_(1.0/3.0));}
+ AF2 AToThreeF2(AF2 c){return pow(c,AF2_(1.0/3.0));}
+ AF3 AToThreeF3(AF3 c){return pow(c,AF3_(1.0/3.0));}
+ #endif
+//==============================================================================================================================
+ #if 1
+ // Unfortunately median won't work here.
+ AF1 AFrom709F1(AF1 c){AF3 j=AF3(0.081/4.5,1.0/4.5,1.0/0.45);AF2 k=AF2(1.0/1.099,0.099/1.099);
+ return AZolSelF1(AZolSignedF1(c-j.x ),c*j.y ,pow(c*k.x +k.y ,j.z ));}
+ AF2 AFrom709F2(AF2 c){AF3 j=AF3(0.081/4.5,1.0/4.5,1.0/0.45);AF2 k=AF2(1.0/1.099,0.099/1.099);
+ return AZolSelF2(AZolSignedF2(c-j.xx ),c*j.yy ,pow(c*k.xx +k.yy ,j.zz ));}
+ AF3 AFrom709F3(AF3 c){AF3 j=AF3(0.081/4.5,1.0/4.5,1.0/0.45);AF2 k=AF2(1.0/1.099,0.099/1.099);
+ return AZolSelF3(AZolSignedF3(c-j.xxx),c*j.yyy,pow(c*k.xxx+k.yyy,j.zzz));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AFromGammaF1(AF1 c,AF1 x){return pow(c,AF1_(x));}
+ AF2 AFromGammaF2(AF2 c,AF1 x){return pow(c,AF2_(x));}
+ AF3 AFromGammaF3(AF3 c,AF1 x){return pow(c,AF3_(x));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AFromPqF1(AF1 x){AF1 p=pow(x,AF1_(0.0126833));
+ return pow(ASatF1(p-AF1_(0.835938))/(AF1_(18.8516)-AF1_(18.6875)*p),AF1_(6.27739));}
+ AF2 AFromPqF1(AF2 x){AF2 p=pow(x,AF2_(0.0126833));
+ return pow(ASatF2(p-AF2_(0.835938))/(AF2_(18.8516)-AF2_(18.6875)*p),AF2_(6.27739));}
+ AF3 AFromPqF1(AF3 x){AF3 p=pow(x,AF3_(0.0126833));
+ return pow(ASatF3(p-AF3_(0.835938))/(AF3_(18.8516)-AF3_(18.6875)*p),AF3_(6.27739));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Unfortunately median won't work here.
+ AF1 AFromSrgbF1(AF1 c){AF3 j=AF3(0.04045/12.92,1.0/12.92,2.4);AF2 k=AF2(1.0/1.055,0.055/1.055);
+ return AZolSelF1(AZolSignedF1(c-j.x ),c*j.y ,pow(c*k.x +k.y ,j.z ));}
+ AF2 AFromSrgbF2(AF2 c){AF3 j=AF3(0.04045/12.92,1.0/12.92,2.4);AF2 k=AF2(1.0/1.055,0.055/1.055);
+ return AZolSelF2(AZolSignedF2(c-j.xx ),c*j.yy ,pow(c*k.xx +k.yy ,j.zz ));}
+ AF3 AFromSrgbF3(AF3 c){AF3 j=AF3(0.04045/12.92,1.0/12.92,2.4);AF2 k=AF2(1.0/1.055,0.055/1.055);
+ return AZolSelF3(AZolSignedF3(c-j.xxx),c*j.yyy,pow(c*k.xxx+k.yyy,j.zzz));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AFromTwoF1(AF1 c){return c*c;}
+ AF2 AFromTwoF2(AF2 c){return c*c;}
+ AF3 AFromTwoF3(AF3 c){return c*c;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF1 AFromThreeF1(AF1 c){return c*c*c;}
+ AF2 AFromThreeF2(AF2 c){return c*c*c;}
+ AF3 AFromThreeF3(AF3 c){return c*c*c;}
+ #endif
+//==============================================================================================================================
+ #ifdef A_HALF
+ AH1 ATo709H1(AH1 c){AH3 j=AH3(0.018*4.5,4.5,0.45);AH2 k=AH2(1.099,-0.099);
+ return clamp(j.x ,c*j.y ,pow(c,j.z )*k.x +k.y );}
+ AH2 ATo709H2(AH2 c){AH3 j=AH3(0.018*4.5,4.5,0.45);AH2 k=AH2(1.099,-0.099);
+ return clamp(j.xx ,c*j.yy ,pow(c,j.zz )*k.xx +k.yy );}
+ AH3 ATo709H3(AH3 c){AH3 j=AH3(0.018*4.5,4.5,0.45);AH2 k=AH2(1.099,-0.099);
+ return clamp(j.xxx,c*j.yyy,pow(c,j.zzz)*k.xxx+k.yyy);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AToGammaH1(AH1 c,AH1 rcpX){return pow(c,AH1_(rcpX));}
+ AH2 AToGammaH2(AH2 c,AH1 rcpX){return pow(c,AH2_(rcpX));}
+ AH3 AToGammaH3(AH3 c,AH1 rcpX){return pow(c,AH3_(rcpX));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AToSrgbH1(AH1 c){AH3 j=AH3(0.0031308*12.92,12.92,1.0/2.4);AH2 k=AH2(1.055,-0.055);
+ return clamp(j.x ,c*j.y ,pow(c,j.z )*k.x +k.y );}
+ AH2 AToSrgbH2(AH2 c){AH3 j=AH3(0.0031308*12.92,12.92,1.0/2.4);AH2 k=AH2(1.055,-0.055);
+ return clamp(j.xx ,c*j.yy ,pow(c,j.zz )*k.xx +k.yy );}
+ AH3 AToSrgbH3(AH3 c){AH3 j=AH3(0.0031308*12.92,12.92,1.0/2.4);AH2 k=AH2(1.055,-0.055);
+ return clamp(j.xxx,c*j.yyy,pow(c,j.zzz)*k.xxx+k.yyy);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AToTwoH1(AH1 c){return sqrt(c);}
+ AH2 AToTwoH2(AH2 c){return sqrt(c);}
+ AH3 AToTwoH3(AH3 c){return sqrt(c);}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AToThreeF1(AH1 c){return pow(c,AH1_(1.0/3.0));}
+ AH2 AToThreeF2(AH2 c){return pow(c,AH2_(1.0/3.0));}
+ AH3 AToThreeF3(AH3 c){return pow(c,AH3_(1.0/3.0));}
+ #endif
+//==============================================================================================================================
+ #ifdef A_HALF
+ AH1 AFrom709H1(AH1 c){AH3 j=AH3(0.081/4.5,1.0/4.5,1.0/0.45);AH2 k=AH2(1.0/1.099,0.099/1.099);
+ return AZolSelH1(AZolSignedH1(c-j.x ),c*j.y ,pow(c*k.x +k.y ,j.z ));}
+ AH2 AFrom709H2(AH2 c){AH3 j=AH3(0.081/4.5,1.0/4.5,1.0/0.45);AH2 k=AH2(1.0/1.099,0.099/1.099);
+ return AZolSelH2(AZolSignedH2(c-j.xx ),c*j.yy ,pow(c*k.xx +k.yy ,j.zz ));}
+ AH3 AFrom709H3(AH3 c){AH3 j=AH3(0.081/4.5,1.0/4.5,1.0/0.45);AH2 k=AH2(1.0/1.099,0.099/1.099);
+ return AZolSelH3(AZolSignedH3(c-j.xxx),c*j.yyy,pow(c*k.xxx+k.yyy,j.zzz));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AFromGammaH1(AH1 c,AH1 x){return pow(c,AH1_(x));}
+ AH2 AFromGammaH2(AH2 c,AH1 x){return pow(c,AH2_(x));}
+ AH3 AFromGammaH3(AH3 c,AH1 x){return pow(c,AH3_(x));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AHromSrgbF1(AH1 c){AH3 j=AH3(0.04045/12.92,1.0/12.92,2.4);AH2 k=AH2(1.0/1.055,0.055/1.055);
+ return AZolSelH1(AZolSignedH1(c-j.x ),c*j.y ,pow(c*k.x +k.y ,j.z ));}
+ AH2 AHromSrgbF2(AH2 c){AH3 j=AH3(0.04045/12.92,1.0/12.92,2.4);AH2 k=AH2(1.0/1.055,0.055/1.055);
+ return AZolSelH2(AZolSignedH2(c-j.xx ),c*j.yy ,pow(c*k.xx +k.yy ,j.zz ));}
+ AH3 AHromSrgbF3(AH3 c){AH3 j=AH3(0.04045/12.92,1.0/12.92,2.4);AH2 k=AH2(1.0/1.055,0.055/1.055);
+ return AZolSelH3(AZolSignedH3(c-j.xxx),c*j.yyy,pow(c*k.xxx+k.yyy,j.zzz));}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AFromTwoH1(AH1 c){return c*c;}
+ AH2 AFromTwoH2(AH2 c){return c*c;}
+ AH3 AFromTwoH3(AH3 c){return c*c;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AH1 AFromThreeH1(AH1 c){return c*c*c;}
+ AH2 AFromThreeH2(AH2 c){return c*c*c;}
+ AH3 AFromThreeH3(AH3 c){return c*c*c;}
+ #endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// CS REMAP
+//==============================================================================================================================
+ // Simple remap 64x1 to 8x8 with rotated 2x2 pixel quads in quad linear.
+ // 543210
+ // ======
+ // ..xxx.
+ // yy...y
+ AU2 ARmp8x8(AU1 a){return AU2(ABfe(a,1u,3u),ABfiM(ABfe(a,3u,3u),a,1u));}
+//==============================================================================================================================
+ // More complex remap 64x1 to 8x8 which is necessary for 2D wave reductions.
+ // 543210
+ // ======
+ // .xx..x
+ // y..yy.
+ // Details,
+ // LANE TO 8x8 MAPPING
+ // ===================
+ // 00 01 08 09 10 11 18 19
+ // 02 03 0a 0b 12 13 1a 1b
+ // 04 05 0c 0d 14 15 1c 1d
+ // 06 07 0e 0f 16 17 1e 1f
+ // 20 21 28 29 30 31 38 39
+ // 22 23 2a 2b 32 33 3a 3b
+ // 24 25 2c 2d 34 35 3c 3d
+ // 26 27 2e 2f 36 37 3e 3f
+ AU2 ARmpRed8x8(AU1 a){return AU2(ABfiM(ABfe(a,2u,3u),a,1u),ABfiM(ABfe(a,3u,3u),ABfe(a,1u,2u),2u));}
+//==============================================================================================================================
+ #ifdef A_HALF
+ AW2 ARmp8x8H(AU1 a){return AW2(ABfe(a,1u,3u),ABfiM(ABfe(a,3u,3u),a,1u));}
+ AW2 ARmpRed8x8H(AU1 a){return AW2(ABfiM(ABfe(a,2u,3u),a,1u),ABfiM(ABfe(a,3u,3u),ABfe(a,1u,2u),2u));}
+ #endif
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+// REFERENCE
+//
+//------------------------------------------------------------------------------------------------------------------------------
+// IEEE FLOAT RULES
+// ================
+// - saturate(NaN)=0, saturate(-INF)=0, saturate(+INF)=1
+// - {+/-}0 * {+/-}INF = NaN
+// - -INF + (+INF) = NaN
+// - {+/-}0 / {+/-}0 = NaN
+// - {+/-}INF / {+/-}INF = NaN
+// - a<(-0) := sqrt(a) = NaN (a=-0.0 won't NaN)
+// - 0 == -0
+// - 4/0 = +INF
+// - 4/-0 = -INF
+// - 4+INF = +INF
+// - 4-INF = -INF
+// - 4*(+INF) = +INF
+// - 4*(-INF) = -INF
+// - -4*(+INF) = -INF
+// - sqrt(+INF) = +INF
+//------------------------------------------------------------------------------------------------------------------------------
+// FP16 ENCODING
+// =============
+// fedcba9876543210
+// ----------------
+// ......mmmmmmmmmm 10-bit mantissa (encodes 11-bit 0.5 to 1.0 except for denormals)
+// .eeeee.......... 5-bit exponent
+// .00000.......... denormals
+// .00001.......... -14 exponent
+// .11110.......... 15 exponent
+// .111110000000000 infinity
+// .11111nnnnnnnnnn NaN with n!=0
+// s............... sign
+//------------------------------------------------------------------------------------------------------------------------------
+// FP16/INT16 ALIASING DENORMAL
+// ============================
+// 11-bit unsigned integers alias with half float denormal/normal values,
+// 1 = 2^(-24) = 1/16777216 ....................... first denormal value
+// 2 = 2^(-23)
+// ...
+// 1023 = 2^(-14)*(1-2^(-10)) = 2^(-14)*(1-1/1024) ... last denormal value
+// 1024 = 2^(-14) = 1/16384 .......................... first normal value that still maps to integers
+// 2047 .............................................. last normal value that still maps to integers
+// Scaling limits,
+// 2^15 = 32768 ...................................... largest power of 2 scaling
+// Largest pow2 conversion mapping is at *32768,
+// 1 : 2^(-9) = 1/512
+// 2 : 1/256
+// 4 : 1/128
+// 8 : 1/64
+// 16 : 1/32
+// 32 : 1/16
+// 64 : 1/8
+// 128 : 1/4
+// 256 : 1/2
+// 512 : 1
+// 1024 : 2
+// 2047 : a little less than 4
+//==============================================================================================================================
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+//
+// GPU/CPU PORTABILITY
+//
+//
+//------------------------------------------------------------------------------------------------------------------------------
+// This is the GPU implementation.
+// See the CPU implementation for docs.
+//==============================================================================================================================
+#ifdef A_GPU
+ #define A_TRUE true
+ #define A_FALSE false
+ #define A_STATIC
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// VECTOR ARGUMENT/RETURN/INITIALIZATION PORTABILITY
+//==============================================================================================================================
+ #define retAD2 AD2
+ #define retAD3 AD3
+ #define retAD4 AD4
+ #define retAF2 AF2
+ #define retAF3 AF3
+ #define retAF4 AF4
+ #define retAL2 AL2
+ #define retAL3 AL3
+ #define retAL4 AL4
+ #define retAU2 AU2
+ #define retAU3 AU3
+ #define retAU4 AU4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define inAD2 in AD2
+ #define inAD3 in AD3
+ #define inAD4 in AD4
+ #define inAF2 in AF2
+ #define inAF3 in AF3
+ #define inAF4 in AF4
+ #define inAL2 in AL2
+ #define inAL3 in AL3
+ #define inAL4 in AL4
+ #define inAU2 in AU2
+ #define inAU3 in AU3
+ #define inAU4 in AU4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define inoutAD2 inout AD2
+ #define inoutAD3 inout AD3
+ #define inoutAD4 inout AD4
+ #define inoutAF2 inout AF2
+ #define inoutAF3 inout AF3
+ #define inoutAF4 inout AF4
+ #define inoutAL2 inout AL2
+ #define inoutAL3 inout AL3
+ #define inoutAL4 inout AL4
+ #define inoutAU2 inout AU2
+ #define inoutAU3 inout AU3
+ #define inoutAU4 inout AU4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define outAD2 out AD2
+ #define outAD3 out AD3
+ #define outAD4 out AD4
+ #define outAF2 out AF2
+ #define outAF3 out AF3
+ #define outAF4 out AF4
+ #define outAL2 out AL2
+ #define outAL3 out AL3
+ #define outAL4 out AL4
+ #define outAU2 out AU2
+ #define outAU3 out AU3
+ #define outAU4 out AU4
+//------------------------------------------------------------------------------------------------------------------------------
+ #define varAD2(x) AD2 x
+ #define varAD3(x) AD3 x
+ #define varAD4(x) AD4 x
+ #define varAF2(x) AF2 x
+ #define varAF3(x) AF3 x
+ #define varAF4(x) AF4 x
+ #define varAL2(x) AL2 x
+ #define varAL3(x) AL3 x
+ #define varAL4(x) AL4 x
+ #define varAU2(x) AU2 x
+ #define varAU3(x) AU3 x
+ #define varAU4(x) AU4 x
+//------------------------------------------------------------------------------------------------------------------------------
+ #define initAD2(x,y) AD2(x,y)
+ #define initAD3(x,y,z) AD3(x,y,z)
+ #define initAD4(x,y,z,w) AD4(x,y,z,w)
+ #define initAF2(x,y) AF2(x,y)
+ #define initAF3(x,y,z) AF3(x,y,z)
+ #define initAF4(x,y,z,w) AF4(x,y,z,w)
+ #define initAL2(x,y) AL2(x,y)
+ #define initAL3(x,y,z) AL3(x,y,z)
+ #define initAL4(x,y,z,w) AL4(x,y,z,w)
+ #define initAU2(x,y) AU2(x,y)
+ #define initAU3(x,y,z) AU3(x,y,z)
+ #define initAU4(x,y,z,w) AU4(x,y,z,w)
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// SCALAR RETURN OPS
+//==============================================================================================================================
+ #define AAbsD1(a) abs(AD1(a))
+ #define AAbsF1(a) abs(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ACosD1(a) cos(AD1(a))
+ #define ACosF1(a) cos(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ADotD2(a,b) dot(AD2(a),AD2(b))
+ #define ADotD3(a,b) dot(AD3(a),AD3(b))
+ #define ADotD4(a,b) dot(AD4(a),AD4(b))
+ #define ADotF2(a,b) dot(AF2(a),AF2(b))
+ #define ADotF3(a,b) dot(AF3(a),AF3(b))
+ #define ADotF4(a,b) dot(AF4(a),AF4(b))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AExp2D1(a) exp2(AD1(a))
+ #define AExp2F1(a) exp2(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AFloorD1(a) floor(AD1(a))
+ #define AFloorF1(a) floor(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ALog2D1(a) log2(AD1(a))
+ #define ALog2F1(a) log2(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AMaxD1(a,b) max(a,b)
+ #define AMaxF1(a,b) max(a,b)
+ #define AMaxL1(a,b) max(a,b)
+ #define AMaxU1(a,b) max(a,b)
+//------------------------------------------------------------------------------------------------------------------------------
+ #define AMinD1(a,b) min(a,b)
+ #define AMinF1(a,b) min(a,b)
+ #define AMinL1(a,b) min(a,b)
+ #define AMinU1(a,b) min(a,b)
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASinD1(a) sin(AD1(a))
+ #define ASinF1(a) sin(AF1(a))
+//------------------------------------------------------------------------------------------------------------------------------
+ #define ASqrtD1(a) sqrt(AD1(a))
+ #define ASqrtF1(a) sqrt(AF1(a))
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// SCALAR RETURN OPS - DEPENDENT
+//==============================================================================================================================
+ #define APowD1(a,b) pow(AD1(a),AF1(b))
+ #define APowF1(a,b) pow(AF1(a),AF1(b))
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// VECTOR OPS
+//------------------------------------------------------------------------------------------------------------------------------
+// These are added as needed for production or prototyping, so not necessarily a complete set.
+// They follow a convention of taking in a destination and also returning the destination value to increase utility.
+//==============================================================================================================================
+ #ifdef A_DUBL
+ AD2 opAAbsD2(outAD2 d,inAD2 a){d=abs(a);return d;}
+ AD3 opAAbsD3(outAD3 d,inAD3 a){d=abs(a);return d;}
+ AD4 opAAbsD4(outAD4 d,inAD4 a){d=abs(a);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opAAddD2(outAD2 d,inAD2 a,inAD2 b){d=a+b;return d;}
+ AD3 opAAddD3(outAD3 d,inAD3 a,inAD3 b){d=a+b;return d;}
+ AD4 opAAddD4(outAD4 d,inAD4 a,inAD4 b){d=a+b;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opAAddOneD2(outAD2 d,inAD2 a,AD1 b){d=a+AD2_(b);return d;}
+ AD3 opAAddOneD3(outAD3 d,inAD3 a,AD1 b){d=a+AD3_(b);return d;}
+ AD4 opAAddOneD4(outAD4 d,inAD4 a,AD1 b){d=a+AD4_(b);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opACpyD2(outAD2 d,inAD2 a){d=a;return d;}
+ AD3 opACpyD3(outAD3 d,inAD3 a){d=a;return d;}
+ AD4 opACpyD4(outAD4 d,inAD4 a){d=a;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opALerpD2(outAD2 d,inAD2 a,inAD2 b,inAD2 c){d=ALerpD2(a,b,c);return d;}
+ AD3 opALerpD3(outAD3 d,inAD3 a,inAD3 b,inAD3 c){d=ALerpD3(a,b,c);return d;}
+ AD4 opALerpD4(outAD4 d,inAD4 a,inAD4 b,inAD4 c){d=ALerpD4(a,b,c);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opALerpOneD2(outAD2 d,inAD2 a,inAD2 b,AD1 c){d=ALerpD2(a,b,AD2_(c));return d;}
+ AD3 opALerpOneD3(outAD3 d,inAD3 a,inAD3 b,AD1 c){d=ALerpD3(a,b,AD3_(c));return d;}
+ AD4 opALerpOneD4(outAD4 d,inAD4 a,inAD4 b,AD1 c){d=ALerpD4(a,b,AD4_(c));return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opAMaxD2(outAD2 d,inAD2 a,inAD2 b){d=max(a,b);return d;}
+ AD3 opAMaxD3(outAD3 d,inAD3 a,inAD3 b){d=max(a,b);return d;}
+ AD4 opAMaxD4(outAD4 d,inAD4 a,inAD4 b){d=max(a,b);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opAMinD2(outAD2 d,inAD2 a,inAD2 b){d=min(a,b);return d;}
+ AD3 opAMinD3(outAD3 d,inAD3 a,inAD3 b){d=min(a,b);return d;}
+ AD4 opAMinD4(outAD4 d,inAD4 a,inAD4 b){d=min(a,b);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opAMulD2(outAD2 d,inAD2 a,inAD2 b){d=a*b;return d;}
+ AD3 opAMulD3(outAD3 d,inAD3 a,inAD3 b){d=a*b;return d;}
+ AD4 opAMulD4(outAD4 d,inAD4 a,inAD4 b){d=a*b;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opAMulOneD2(outAD2 d,inAD2 a,AD1 b){d=a*AD2_(b);return d;}
+ AD3 opAMulOneD3(outAD3 d,inAD3 a,AD1 b){d=a*AD3_(b);return d;}
+ AD4 opAMulOneD4(outAD4 d,inAD4 a,AD1 b){d=a*AD4_(b);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opANegD2(outAD2 d,inAD2 a){d=-a;return d;}
+ AD3 opANegD3(outAD3 d,inAD3 a){d=-a;return d;}
+ AD4 opANegD4(outAD4 d,inAD4 a){d=-a;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AD2 opARcpD2(outAD2 d,inAD2 a){d=ARcpD2(a);return d;}
+ AD3 opARcpD3(outAD3 d,inAD3 a){d=ARcpD3(a);return d;}
+ AD4 opARcpD4(outAD4 d,inAD4 a){d=ARcpD4(a);return d;}
+ #endif
+//==============================================================================================================================
+ AF2 opAAbsF2(outAF2 d,inAF2 a){d=abs(a);return d;}
+ AF3 opAAbsF3(outAF3 d,inAF3 a){d=abs(a);return d;}
+ AF4 opAAbsF4(outAF4 d,inAF4 a){d=abs(a);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opAAddF2(outAF2 d,inAF2 a,inAF2 b){d=a+b;return d;}
+ AF3 opAAddF3(outAF3 d,inAF3 a,inAF3 b){d=a+b;return d;}
+ AF4 opAAddF4(outAF4 d,inAF4 a,inAF4 b){d=a+b;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opAAddOneF2(outAF2 d,inAF2 a,AF1 b){d=a+AF2_(b);return d;}
+ AF3 opAAddOneF3(outAF3 d,inAF3 a,AF1 b){d=a+AF3_(b);return d;}
+ AF4 opAAddOneF4(outAF4 d,inAF4 a,AF1 b){d=a+AF4_(b);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opACpyF2(outAF2 d,inAF2 a){d=a;return d;}
+ AF3 opACpyF3(outAF3 d,inAF3 a){d=a;return d;}
+ AF4 opACpyF4(outAF4 d,inAF4 a){d=a;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opALerpF2(outAF2 d,inAF2 a,inAF2 b,inAF2 c){d=ALerpF2(a,b,c);return d;}
+ AF3 opALerpF3(outAF3 d,inAF3 a,inAF3 b,inAF3 c){d=ALerpF3(a,b,c);return d;}
+ AF4 opALerpF4(outAF4 d,inAF4 a,inAF4 b,inAF4 c){d=ALerpF4(a,b,c);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opALerpOneF2(outAF2 d,inAF2 a,inAF2 b,AF1 c){d=ALerpF2(a,b,AF2_(c));return d;}
+ AF3 opALerpOneF3(outAF3 d,inAF3 a,inAF3 b,AF1 c){d=ALerpF3(a,b,AF3_(c));return d;}
+ AF4 opALerpOneF4(outAF4 d,inAF4 a,inAF4 b,AF1 c){d=ALerpF4(a,b,AF4_(c));return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opAMaxF2(outAF2 d,inAF2 a,inAF2 b){d=max(a,b);return d;}
+ AF3 opAMaxF3(outAF3 d,inAF3 a,inAF3 b){d=max(a,b);return d;}
+ AF4 opAMaxF4(outAF4 d,inAF4 a,inAF4 b){d=max(a,b);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opAMinF2(outAF2 d,inAF2 a,inAF2 b){d=min(a,b);return d;}
+ AF3 opAMinF3(outAF3 d,inAF3 a,inAF3 b){d=min(a,b);return d;}
+ AF4 opAMinF4(outAF4 d,inAF4 a,inAF4 b){d=min(a,b);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opAMulF2(outAF2 d,inAF2 a,inAF2 b){d=a*b;return d;}
+ AF3 opAMulF3(outAF3 d,inAF3 a,inAF3 b){d=a*b;return d;}
+ AF4 opAMulF4(outAF4 d,inAF4 a,inAF4 b){d=a*b;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opAMulOneF2(outAF2 d,inAF2 a,AF1 b){d=a*AF2_(b);return d;}
+ AF3 opAMulOneF3(outAF3 d,inAF3 a,AF1 b){d=a*AF3_(b);return d;}
+ AF4 opAMulOneF4(outAF4 d,inAF4 a,AF1 b){d=a*AF4_(b);return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opANegF2(outAF2 d,inAF2 a){d=-a;return d;}
+ AF3 opANegF3(outAF3 d,inAF3 a){d=-a;return d;}
+ AF4 opANegF4(outAF4 d,inAF4 a){d=-a;return d;}
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 opARcpF2(outAF2 d,inAF2 a){d=ARcpF2(a);return d;}
+ AF3 opARcpF3(outAF3 d,inAF3 a){d=ARcpF3(a);return d;}
+ AF4 opARcpF4(outAF4 d,inAF4 a){d=ARcpF4(a);return d;}
+#endif
+
+
+#define FSR_RCAS_F 1
+AU4 con0;
+
+AF4 FsrRcasLoadF(ASU2 p) { return AF4(texelFetch(source, p, 0)); }
+void FsrRcasInputF(inout AF1 r, inout AF1 g, inout AF1 b) {}
+
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+//
+// AMD FidelityFX SUPER RESOLUTION [FSR 1] ::: SPATIAL SCALING & EXTRAS - v1.20210629
+//
+//
+//------------------------------------------------------------------------------------------------------------------------------
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//------------------------------------------------------------------------------------------------------------------------------
+// FidelityFX Super Resolution Sample
+//
+// Copyright (c) 2021 Advanced Micro Devices, Inc. All rights reserved.
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files(the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions :
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//------------------------------------------------------------------------------------------------------------------------------
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//------------------------------------------------------------------------------------------------------------------------------
+// ABOUT
+// =====
+// FSR is a collection of algorithms relating to generating a higher resolution image.
+// This specific header focuses on single-image non-temporal image scaling, and related tools.
+//
+// The core functions are EASU and RCAS:
+// [EASU] Edge Adaptive Spatial Upsampling ....... 1x to 4x area range spatial scaling, clamped adaptive elliptical filter.
+// [RCAS] Robust Contrast Adaptive Sharpening .... A non-scaling variation on CAS.
+// RCAS needs to be applied after EASU as a separate pass.
+//
+// Optional utility functions are:
+// [LFGA] Linear Film Grain Applicator ........... Tool to apply film grain after scaling.
+// [SRTM] Simple Reversible Tone-Mapper .......... Linear HDR {0 to FP16_MAX} to {0 to 1} and back.
+// [TEPD] Temporal Energy Preserving Dither ...... Temporally energy preserving dithered {0 to 1} linear to gamma 2.0 conversion.
+// See each individual sub-section for inline documentation.
+//------------------------------------------------------------------------------------------------------------------------------
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//------------------------------------------------------------------------------------------------------------------------------
+// FUNCTION PERMUTATIONS
+// =====================
+// *F() ..... Single item computation with 32-bit.
+// *H() ..... Single item computation with 16-bit, with packing (aka two 16-bit ops in parallel) when possible.
+// *Hx2() ... Processing two items in parallel with 16-bit, easier packing.
+// Not all interfaces in this file have a *Hx2() form.
+//==============================================================================================================================
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+// FSR - [EASU] EDGE ADAPTIVE SPATIAL UPSAMPLING
+//
+//------------------------------------------------------------------------------------------------------------------------------
+// EASU provides a high quality spatial-only scaling at relatively low cost.
+// Meaning EASU is appropiate for laptops and other low-end GPUs.
+// Quality from 1x to 4x area scaling is good.
+//------------------------------------------------------------------------------------------------------------------------------
+// The scalar uses a modified fast approximation to the standard lanczos(size=2) kernel.
+// EASU runs in a single pass, so it applies a directionally and anisotropically adaptive radial lanczos.
+// This is also kept as simple as possible to have minimum runtime.
+//------------------------------------------------------------------------------------------------------------------------------
+// The lanzcos filter has negative lobes, so by itself it will introduce ringing.
+// To remove all ringing, the algorithm uses the nearest 2x2 input texels as a neighborhood,
+// and limits output to the minimum and maximum of that neighborhood.
+//------------------------------------------------------------------------------------------------------------------------------
+// Input image requirements:
+//
+// Color needs to be encoded as 3 channel[red, green, blue](e.g.XYZ not supported)
+// Each channel needs to be in the range[0, 1]
+// Any color primaries are supported
+// Display / tonemapping curve needs to be as if presenting to sRGB display or similar(e.g.Gamma 2.0)
+// There should be no banding in the input
+// There should be no high amplitude noise in the input
+// There should be no noise in the input that is not at input pixel granularity
+// For performance purposes, use 32bpp formats
+//------------------------------------------------------------------------------------------------------------------------------
+// Best to apply EASU at the end of the frame after tonemapping
+// but before film grain or composite of the UI.
+//------------------------------------------------------------------------------------------------------------------------------
+// Example of including this header for D3D HLSL :
+//
+// #define A_GPU 1
+// #define A_HLSL 1
+// #define A_HALF 1
+// #include "ffx_a.h"
+// #define FSR_EASU_H 1
+// #define FSR_RCAS_H 1
+// //declare input callbacks
+// #include "ffx_fsr1.h"
+//
+// Example of including this header for Vulkan GLSL :
+//
+// #define A_GPU 1
+// #define A_GLSL 1
+// #define A_HALF 1
+// #include "ffx_a.h"
+// #define FSR_EASU_H 1
+// #define FSR_RCAS_H 1
+// //declare input callbacks
+// #include "ffx_fsr1.h"
+//
+// Example of including this header for Vulkan HLSL :
+//
+// #define A_GPU 1
+// #define A_HLSL 1
+// #define A_HLSL_6_2 1
+// #define A_NO_16_BIT_CAST 1
+// #define A_HALF 1
+// #include "ffx_a.h"
+// #define FSR_EASU_H 1
+// #define FSR_RCAS_H 1
+// //declare input callbacks
+// #include "ffx_fsr1.h"
+//
+// Example of declaring the required input callbacks for GLSL :
+// The callbacks need to gather4 for each color channel using the specified texture coordinate 'p'.
+// EASU uses gather4 to reduce position computation logic and for free Arrays of Structures to Structures of Arrays conversion.
+//
+// AH4 FsrEasuRH(AF2 p){return AH4(textureGather(sampler2D(tex,sam),p,0));}
+// AH4 FsrEasuGH(AF2 p){return AH4(textureGather(sampler2D(tex,sam),p,1));}
+// AH4 FsrEasuBH(AF2 p){return AH4(textureGather(sampler2D(tex,sam),p,2));}
+// ...
+// The FsrEasuCon function needs to be called from the CPU or GPU to set up constants.
+// The difference in viewport and input image size is there to support Dynamic Resolution Scaling.
+// To use FsrEasuCon() on the CPU, define A_CPU before including ffx_a and ffx_fsr1.
+// Including a GPU example here, the 'con0' through 'con3' values would be stored out to a constant buffer.
+// AU4 con0,con1,con2,con3;
+// FsrEasuCon(con0,con1,con2,con3,
+// 1920.0,1080.0, // Viewport size (top left aligned) in the input image which is to be scaled.
+// 3840.0,2160.0, // The size of the input image.
+// 2560.0,1440.0); // The output resolution.
+//==============================================================================================================================
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// CONSTANT SETUP
+//==============================================================================================================================
+// Call to setup required constant values (works on CPU or GPU).
+A_STATIC void FsrEasuCon(
+outAU4 con0,
+outAU4 con1,
+outAU4 con2,
+outAU4 con3,
+// This the rendered image resolution being upscaled
+AF1 inputViewportInPixelsX,
+AF1 inputViewportInPixelsY,
+// This is the resolution of the resource containing the input image (useful for dynamic resolution)
+AF1 inputSizeInPixelsX,
+AF1 inputSizeInPixelsY,
+// This is the display resolution which the input image gets upscaled to
+AF1 outputSizeInPixelsX,
+AF1 outputSizeInPixelsY){
+ // Output integer position to a pixel position in viewport.
+ con0[0]=AU1_AF1(inputViewportInPixelsX*ARcpF1(outputSizeInPixelsX));
+ con0[1]=AU1_AF1(inputViewportInPixelsY*ARcpF1(outputSizeInPixelsY));
+ con0[2]=AU1_AF1(AF1_(0.5)*inputViewportInPixelsX*ARcpF1(outputSizeInPixelsX)-AF1_(0.5));
+ con0[3]=AU1_AF1(AF1_(0.5)*inputViewportInPixelsY*ARcpF1(outputSizeInPixelsY)-AF1_(0.5));
+ // Viewport pixel position to normalized image space.
+ // This is used to get upper-left of 'F' tap.
+ con1[0]=AU1_AF1(ARcpF1(inputSizeInPixelsX));
+ con1[1]=AU1_AF1(ARcpF1(inputSizeInPixelsY));
+ // Centers of gather4, first offset from upper-left of 'F'.
+ // +---+---+
+ // | | |
+ // +--(0)--+
+ // | b | c |
+ // +---F---+---+---+
+ // | e | f | g | h |
+ // +--(1)--+--(2)--+
+ // | i | j | k | l |
+ // +---+---+---+---+
+ // | n | o |
+ // +--(3)--+
+ // | | |
+ // +---+---+
+ con1[2]=AU1_AF1(AF1_( 1.0)*ARcpF1(inputSizeInPixelsX));
+ con1[3]=AU1_AF1(AF1_(-1.0)*ARcpF1(inputSizeInPixelsY));
+ // These are from (0) instead of 'F'.
+ con2[0]=AU1_AF1(AF1_(-1.0)*ARcpF1(inputSizeInPixelsX));
+ con2[1]=AU1_AF1(AF1_( 2.0)*ARcpF1(inputSizeInPixelsY));
+ con2[2]=AU1_AF1(AF1_( 1.0)*ARcpF1(inputSizeInPixelsX));
+ con2[3]=AU1_AF1(AF1_( 2.0)*ARcpF1(inputSizeInPixelsY));
+ con3[0]=AU1_AF1(AF1_( 0.0)*ARcpF1(inputSizeInPixelsX));
+ con3[1]=AU1_AF1(AF1_( 4.0)*ARcpF1(inputSizeInPixelsY));
+ con3[2]=con3[3]=0;}
+
+//If the an offset into the input image resource
+A_STATIC void FsrEasuConOffset(
+ outAU4 con0,
+ outAU4 con1,
+ outAU4 con2,
+ outAU4 con3,
+ // This the rendered image resolution being upscaled
+ AF1 inputViewportInPixelsX,
+ AF1 inputViewportInPixelsY,
+ // This is the resolution of the resource containing the input image (useful for dynamic resolution)
+ AF1 inputSizeInPixelsX,
+ AF1 inputSizeInPixelsY,
+ // This is the display resolution which the input image gets upscaled to
+ AF1 outputSizeInPixelsX,
+ AF1 outputSizeInPixelsY,
+ // This is the input image offset into the resource containing it (useful for dynamic resolution)
+ AF1 inputOffsetInPixelsX,
+ AF1 inputOffsetInPixelsY) {
+ FsrEasuCon(con0, con1, con2, con3, inputViewportInPixelsX, inputViewportInPixelsY, inputSizeInPixelsX, inputSizeInPixelsY, outputSizeInPixelsX, outputSizeInPixelsY);
+ con0[2] = AU1_AF1(AF1_(0.5) * inputViewportInPixelsX * ARcpF1(outputSizeInPixelsX) - AF1_(0.5) + inputOffsetInPixelsX);
+ con0[3] = AU1_AF1(AF1_(0.5) * inputViewportInPixelsY * ARcpF1(outputSizeInPixelsY) - AF1_(0.5) + inputOffsetInPixelsY);
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// NON-PACKED 32-BIT VERSION
+//==============================================================================================================================
+#if defined(A_GPU)&&defined(FSR_EASU_F)
+ // Input callback prototypes, need to be implemented by calling shader
+ AF4 FsrEasuRF(AF2 p);
+ AF4 FsrEasuGF(AF2 p);
+ AF4 FsrEasuBF(AF2 p);
+//------------------------------------------------------------------------------------------------------------------------------
+ // Filtering for a given tap for the scalar.
+ void FsrEasuTapF(
+ inout AF3 aC, // Accumulated color, with negative lobe.
+ inout AF1 aW, // Accumulated weight.
+ AF2 off, // Pixel offset from resolve position to tap.
+ AF2 dir, // Gradient direction.
+ AF2 len, // Length.
+ AF1 lob, // Negative lobe strength.
+ AF1 clp, // Clipping point.
+ AF3 c){ // Tap color.
+ // Rotate offset by direction.
+ AF2 v;
+ v.x=(off.x*( dir.x))+(off.y*dir.y);
+ v.y=(off.x*(-dir.y))+(off.y*dir.x);
+ // Anisotropy.
+ v*=len;
+ // Compute distance^2.
+ AF1 d2=v.x*v.x+v.y*v.y;
+ // Limit to the window as at corner, 2 taps can easily be outside.
+ d2=min(d2,clp);
+ // Approximation of lancos2 without sin() or rcp(), or sqrt() to get x.
+ // (25/16 * (2/5 * x^2 - 1)^2 - (25/16 - 1)) * (1/4 * x^2 - 1)^2
+ // |_______________________________________| |_______________|
+ // base window
+ // The general form of the 'base' is,
+ // (a*(b*x^2-1)^2-(a-1))
+ // Where 'a=1/(2*b-b^2)' and 'b' moves around the negative lobe.
+ AF1 wB=AF1_(2.0/5.0)*d2+AF1_(-1.0);
+ AF1 wA=lob*d2+AF1_(-1.0);
+ wB*=wB;
+ wA*=wA;
+ wB=AF1_(25.0/16.0)*wB+AF1_(-(25.0/16.0-1.0));
+ AF1 w=wB*wA;
+ // Do weighted average.
+ aC+=c*w;aW+=w;}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Accumulate direction and length.
+ void FsrEasuSetF(
+ inout AF2 dir,
+ inout AF1 len,
+ AF2 pp,
+ AP1 biS,AP1 biT,AP1 biU,AP1 biV,
+ AF1 lA,AF1 lB,AF1 lC,AF1 lD,AF1 lE){
+ // Compute bilinear weight, branches factor out as predicates are compiler time immediates.
+ // s t
+ // u v
+ AF1 w = AF1_(0.0);
+ if(biS)w=(AF1_(1.0)-pp.x)*(AF1_(1.0)-pp.y);
+ if(biT)w= pp.x *(AF1_(1.0)-pp.y);
+ if(biU)w=(AF1_(1.0)-pp.x)* pp.y ;
+ if(biV)w= pp.x * pp.y ;
+ // Direction is the '+' diff.
+ // a
+ // b c d
+ // e
+ // Then takes magnitude from abs average of both sides of 'c'.
+ // Length converts gradient reversal to 0, smoothly to non-reversal at 1, shaped, then adding horz and vert terms.
+ AF1 dc=lD-lC;
+ AF1 cb=lC-lB;
+ AF1 lenX=max(abs(dc),abs(cb));
+ lenX=APrxLoRcpF1(lenX);
+ AF1 dirX=lD-lB;
+ dir.x+=dirX*w;
+ lenX=ASatF1(abs(dirX)*lenX);
+ lenX*=lenX;
+ len+=lenX*w;
+ // Repeat for the y axis.
+ AF1 ec=lE-lC;
+ AF1 ca=lC-lA;
+ AF1 lenY=max(abs(ec),abs(ca));
+ lenY=APrxLoRcpF1(lenY);
+ AF1 dirY=lE-lA;
+ dir.y+=dirY*w;
+ lenY=ASatF1(abs(dirY)*lenY);
+ lenY*=lenY;
+ len+=lenY*w;}
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrEasuF(
+ out AF3 pix,
+ AU2 ip, // Integer pixel position in output.
+ AU4 con0, // Constants generated by FsrEasuCon().
+ AU4 con1,
+ AU4 con2,
+ AU4 con3){
+//------------------------------------------------------------------------------------------------------------------------------
+ // Get position of 'f'.
+ AF2 pp=AF2(ip)*AF2_AU2(con0.xy)+AF2_AU2(con0.zw);
+ AF2 fp=floor(pp);
+ pp-=fp;
+//------------------------------------------------------------------------------------------------------------------------------
+ // 12-tap kernel.
+ // b c
+ // e f g h
+ // i j k l
+ // n o
+ // Gather 4 ordering.
+ // a b
+ // r g
+ // For packed FP16, need either {rg} or {ab} so using the following setup for gather in all versions,
+ // a b <- unused (z)
+ // r g
+ // a b a b
+ // r g r g
+ // a b
+ // r g <- unused (z)
+ // Allowing dead-code removal to remove the 'z's.
+ AF2 p0=fp*AF2_AU2(con1.xy)+AF2_AU2(con1.zw);
+ // These are from p0 to avoid pulling two constants on pre-Navi hardware.
+ AF2 p1=p0+AF2_AU2(con2.xy);
+ AF2 p2=p0+AF2_AU2(con2.zw);
+ AF2 p3=p0+AF2_AU2(con3.xy);
+ AF4 bczzR=FsrEasuRF(p0);
+ AF4 bczzG=FsrEasuGF(p0);
+ AF4 bczzB=FsrEasuBF(p0);
+ AF4 ijfeR=FsrEasuRF(p1);
+ AF4 ijfeG=FsrEasuGF(p1);
+ AF4 ijfeB=FsrEasuBF(p1);
+ AF4 klhgR=FsrEasuRF(p2);
+ AF4 klhgG=FsrEasuGF(p2);
+ AF4 klhgB=FsrEasuBF(p2);
+ AF4 zzonR=FsrEasuRF(p3);
+ AF4 zzonG=FsrEasuGF(p3);
+ AF4 zzonB=FsrEasuBF(p3);
+//------------------------------------------------------------------------------------------------------------------------------
+ // Simplest multi-channel approximate luma possible (luma times 2, in 2 FMA/MAD).
+ AF4 bczzL=bczzB*AF4_(0.5)+(bczzR*AF4_(0.5)+bczzG);
+ AF4 ijfeL=ijfeB*AF4_(0.5)+(ijfeR*AF4_(0.5)+ijfeG);
+ AF4 klhgL=klhgB*AF4_(0.5)+(klhgR*AF4_(0.5)+klhgG);
+ AF4 zzonL=zzonB*AF4_(0.5)+(zzonR*AF4_(0.5)+zzonG);
+ // Rename.
+ AF1 bL=bczzL.x;
+ AF1 cL=bczzL.y;
+ AF1 iL=ijfeL.x;
+ AF1 jL=ijfeL.y;
+ AF1 fL=ijfeL.z;
+ AF1 eL=ijfeL.w;
+ AF1 kL=klhgL.x;
+ AF1 lL=klhgL.y;
+ AF1 hL=klhgL.z;
+ AF1 gL=klhgL.w;
+ AF1 oL=zzonL.z;
+ AF1 nL=zzonL.w;
+ // Accumulate for bilinear interpolation.
+ AF2 dir=AF2_(0.0);
+ AF1 len=AF1_(0.0);
+ FsrEasuSetF(dir,len,pp,true, false,false,false,bL,eL,fL,gL,jL);
+ FsrEasuSetF(dir,len,pp,false,true ,false,false,cL,fL,gL,hL,kL);
+ FsrEasuSetF(dir,len,pp,false,false,true ,false,fL,iL,jL,kL,nL);
+ FsrEasuSetF(dir,len,pp,false,false,false,true ,gL,jL,kL,lL,oL);
+//------------------------------------------------------------------------------------------------------------------------------
+ // Normalize with approximation, and cleanup close to zero.
+ AF2 dir2=dir*dir;
+ AF1 dirR=dir2.x+dir2.y;
+ AP1 zro=dirR<AF1_(1.0/32768.0);
+ dirR=APrxLoRsqF1(dirR);
+ dirR=zro?AF1_(1.0):dirR;
+ dir.x=zro?AF1_(1.0):dir.x;
+ dir*=AF2_(dirR);
+ // Transform from {0 to 2} to {0 to 1} range, and shape with square.
+ len=len*AF1_(0.5);
+ len*=len;
+ // Stretch kernel {1.0 vert|horz, to sqrt(2.0) on diagonal}.
+ AF1 stretch=(dir.x*dir.x+dir.y*dir.y)*APrxLoRcpF1(max(abs(dir.x),abs(dir.y)));
+ // Anisotropic length after rotation,
+ // x := 1.0 lerp to 'stretch' on edges
+ // y := 1.0 lerp to 2x on edges
+ AF2 len2=AF2(AF1_(1.0)+(stretch-AF1_(1.0))*len,AF1_(1.0)+AF1_(-0.5)*len);
+ // Based on the amount of 'edge',
+ // the window shifts from +/-{sqrt(2.0) to slightly beyond 2.0}.
+ AF1 lob=AF1_(0.5)+AF1_((1.0/4.0-0.04)-0.5)*len;
+ // Set distance^2 clipping point to the end of the adjustable window.
+ AF1 clp=APrxLoRcpF1(lob);
+//------------------------------------------------------------------------------------------------------------------------------
+ // Accumulation mixed with min/max of 4 nearest.
+ // b c
+ // e f g h
+ // i j k l
+ // n o
+ AF3 min4=min(AMin3F3(AF3(ijfeR.z,ijfeG.z,ijfeB.z),AF3(klhgR.w,klhgG.w,klhgB.w),AF3(ijfeR.y,ijfeG.y,ijfeB.y)),
+ AF3(klhgR.x,klhgG.x,klhgB.x));
+ AF3 max4=max(AMax3F3(AF3(ijfeR.z,ijfeG.z,ijfeB.z),AF3(klhgR.w,klhgG.w,klhgB.w),AF3(ijfeR.y,ijfeG.y,ijfeB.y)),
+ AF3(klhgR.x,klhgG.x,klhgB.x));
+ // Accumulation.
+ AF3 aC=AF3_(0.0);
+ AF1 aW=AF1_(0.0);
+ FsrEasuTapF(aC,aW,AF2( 0.0,-1.0)-pp,dir,len2,lob,clp,AF3(bczzR.x,bczzG.x,bczzB.x)); // b
+ FsrEasuTapF(aC,aW,AF2( 1.0,-1.0)-pp,dir,len2,lob,clp,AF3(bczzR.y,bczzG.y,bczzB.y)); // c
+ FsrEasuTapF(aC,aW,AF2(-1.0, 1.0)-pp,dir,len2,lob,clp,AF3(ijfeR.x,ijfeG.x,ijfeB.x)); // i
+ FsrEasuTapF(aC,aW,AF2( 0.0, 1.0)-pp,dir,len2,lob,clp,AF3(ijfeR.y,ijfeG.y,ijfeB.y)); // j
+ FsrEasuTapF(aC,aW,AF2( 0.0, 0.0)-pp,dir,len2,lob,clp,AF3(ijfeR.z,ijfeG.z,ijfeB.z)); // f
+ FsrEasuTapF(aC,aW,AF2(-1.0, 0.0)-pp,dir,len2,lob,clp,AF3(ijfeR.w,ijfeG.w,ijfeB.w)); // e
+ FsrEasuTapF(aC,aW,AF2( 1.0, 1.0)-pp,dir,len2,lob,clp,AF3(klhgR.x,klhgG.x,klhgB.x)); // k
+ FsrEasuTapF(aC,aW,AF2( 2.0, 1.0)-pp,dir,len2,lob,clp,AF3(klhgR.y,klhgG.y,klhgB.y)); // l
+ FsrEasuTapF(aC,aW,AF2( 2.0, 0.0)-pp,dir,len2,lob,clp,AF3(klhgR.z,klhgG.z,klhgB.z)); // h
+ FsrEasuTapF(aC,aW,AF2( 1.0, 0.0)-pp,dir,len2,lob,clp,AF3(klhgR.w,klhgG.w,klhgB.w)); // g
+ FsrEasuTapF(aC,aW,AF2( 1.0, 2.0)-pp,dir,len2,lob,clp,AF3(zzonR.z,zzonG.z,zzonB.z)); // o
+ FsrEasuTapF(aC,aW,AF2( 0.0, 2.0)-pp,dir,len2,lob,clp,AF3(zzonR.w,zzonG.w,zzonB.w)); // n
+//------------------------------------------------------------------------------------------------------------------------------
+ // Normalize and dering.
+ pix=min(max4,max(min4,aC*AF3_(ARcpF1(aW))));}
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// PACKED 16-BIT VERSION
+//==============================================================================================================================
+#if defined(A_GPU)&&defined(A_HALF)&&defined(FSR_EASU_H)
+// Input callback prototypes, need to be implemented by calling shader
+ AH4 FsrEasuRH(AF2 p);
+ AH4 FsrEasuGH(AF2 p);
+ AH4 FsrEasuBH(AF2 p);
+//------------------------------------------------------------------------------------------------------------------------------
+ // This runs 2 taps in parallel.
+ void FsrEasuTapH(
+ inout AH2 aCR,inout AH2 aCG,inout AH2 aCB,
+ inout AH2 aW,
+ AH2 offX,AH2 offY,
+ AH2 dir,
+ AH2 len,
+ AH1 lob,
+ AH1 clp,
+ AH2 cR,AH2 cG,AH2 cB){
+ AH2 vX,vY;
+ vX=offX* dir.xx +offY*dir.yy;
+ vY=offX*(-dir.yy)+offY*dir.xx;
+ vX*=len.x;vY*=len.y;
+ AH2 d2=vX*vX+vY*vY;
+ d2=min(d2,AH2_(clp));
+ AH2 wB=AH2_(2.0/5.0)*d2+AH2_(-1.0);
+ AH2 wA=AH2_(lob)*d2+AH2_(-1.0);
+ wB*=wB;
+ wA*=wA;
+ wB=AH2_(25.0/16.0)*wB+AH2_(-(25.0/16.0-1.0));
+ AH2 w=wB*wA;
+ aCR+=cR*w;aCG+=cG*w;aCB+=cB*w;aW+=w;}
+//------------------------------------------------------------------------------------------------------------------------------
+ // This runs 2 taps in parallel.
+ void FsrEasuSetH(
+ inout AH2 dirPX,inout AH2 dirPY,
+ inout AH2 lenP,
+ AH2 pp,
+ AP1 biST,AP1 biUV,
+ AH2 lA,AH2 lB,AH2 lC,AH2 lD,AH2 lE){
+ AH2 w = AH2_(0.0);
+ if(biST)w=(AH2(1.0,0.0)+AH2(-pp.x,pp.x))*AH2_(AH1_(1.0)-pp.y);
+ if(biUV)w=(AH2(1.0,0.0)+AH2(-pp.x,pp.x))*AH2_( pp.y);
+ // ABS is not free in the packed FP16 path.
+ AH2 dc=lD-lC;
+ AH2 cb=lC-lB;
+ AH2 lenX=max(abs(dc),abs(cb));
+ lenX=ARcpH2(lenX);
+ AH2 dirX=lD-lB;
+ dirPX+=dirX*w;
+ lenX=ASatH2(abs(dirX)*lenX);
+ lenX*=lenX;
+ lenP+=lenX*w;
+ AH2 ec=lE-lC;
+ AH2 ca=lC-lA;
+ AH2 lenY=max(abs(ec),abs(ca));
+ lenY=ARcpH2(lenY);
+ AH2 dirY=lE-lA;
+ dirPY+=dirY*w;
+ lenY=ASatH2(abs(dirY)*lenY);
+ lenY*=lenY;
+ lenP+=lenY*w;}
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrEasuH(
+ out AH3 pix,
+ AU2 ip,
+ AU4 con0,
+ AU4 con1,
+ AU4 con2,
+ AU4 con3){
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 pp=AF2(ip)*AF2_AU2(con0.xy)+AF2_AU2(con0.zw);
+ AF2 fp=floor(pp);
+ pp-=fp;
+ AH2 ppp=AH2(pp);
+//------------------------------------------------------------------------------------------------------------------------------
+ AF2 p0=fp*AF2_AU2(con1.xy)+AF2_AU2(con1.zw);
+ AF2 p1=p0+AF2_AU2(con2.xy);
+ AF2 p2=p0+AF2_AU2(con2.zw);
+ AF2 p3=p0+AF2_AU2(con3.xy);
+ AH4 bczzR=FsrEasuRH(p0);
+ AH4 bczzG=FsrEasuGH(p0);
+ AH4 bczzB=FsrEasuBH(p0);
+ AH4 ijfeR=FsrEasuRH(p1);
+ AH4 ijfeG=FsrEasuGH(p1);
+ AH4 ijfeB=FsrEasuBH(p1);
+ AH4 klhgR=FsrEasuRH(p2);
+ AH4 klhgG=FsrEasuGH(p2);
+ AH4 klhgB=FsrEasuBH(p2);
+ AH4 zzonR=FsrEasuRH(p3);
+ AH4 zzonG=FsrEasuGH(p3);
+ AH4 zzonB=FsrEasuBH(p3);
+//------------------------------------------------------------------------------------------------------------------------------
+ AH4 bczzL=bczzB*AH4_(0.5)+(bczzR*AH4_(0.5)+bczzG);
+ AH4 ijfeL=ijfeB*AH4_(0.5)+(ijfeR*AH4_(0.5)+ijfeG);
+ AH4 klhgL=klhgB*AH4_(0.5)+(klhgR*AH4_(0.5)+klhgG);
+ AH4 zzonL=zzonB*AH4_(0.5)+(zzonR*AH4_(0.5)+zzonG);
+ AH1 bL=bczzL.x;
+ AH1 cL=bczzL.y;
+ AH1 iL=ijfeL.x;
+ AH1 jL=ijfeL.y;
+ AH1 fL=ijfeL.z;
+ AH1 eL=ijfeL.w;
+ AH1 kL=klhgL.x;
+ AH1 lL=klhgL.y;
+ AH1 hL=klhgL.z;
+ AH1 gL=klhgL.w;
+ AH1 oL=zzonL.z;
+ AH1 nL=zzonL.w;
+ // This part is different, accumulating 2 taps in parallel.
+ AH2 dirPX=AH2_(0.0);
+ AH2 dirPY=AH2_(0.0);
+ AH2 lenP=AH2_(0.0);
+ FsrEasuSetH(dirPX,dirPY,lenP,ppp,true, false,AH2(bL,cL),AH2(eL,fL),AH2(fL,gL),AH2(gL,hL),AH2(jL,kL));
+ FsrEasuSetH(dirPX,dirPY,lenP,ppp,false,true ,AH2(fL,gL),AH2(iL,jL),AH2(jL,kL),AH2(kL,lL),AH2(nL,oL));
+ AH2 dir=AH2(dirPX.r+dirPX.g,dirPY.r+dirPY.g);
+ AH1 len=lenP.r+lenP.g;
+//------------------------------------------------------------------------------------------------------------------------------
+ AH2 dir2=dir*dir;
+ AH1 dirR=dir2.x+dir2.y;
+ AP1 zro=dirR<AH1_(1.0/32768.0);
+ dirR=APrxLoRsqH1(dirR);
+ dirR=zro?AH1_(1.0):dirR;
+ dir.x=zro?AH1_(1.0):dir.x;
+ dir*=AH2_(dirR);
+ len=len*AH1_(0.5);
+ len*=len;
+ AH1 stretch=(dir.x*dir.x+dir.y*dir.y)*APrxLoRcpH1(max(abs(dir.x),abs(dir.y)));
+ AH2 len2=AH2(AH1_(1.0)+(stretch-AH1_(1.0))*len,AH1_(1.0)+AH1_(-0.5)*len);
+ AH1 lob=AH1_(0.5)+AH1_((1.0/4.0-0.04)-0.5)*len;
+ AH1 clp=APrxLoRcpH1(lob);
+//------------------------------------------------------------------------------------------------------------------------------
+ // FP16 is different, using packed trick to do min and max in same operation.
+ AH2 bothR=max(max(AH2(-ijfeR.z,ijfeR.z),AH2(-klhgR.w,klhgR.w)),max(AH2(-ijfeR.y,ijfeR.y),AH2(-klhgR.x,klhgR.x)));
+ AH2 bothG=max(max(AH2(-ijfeG.z,ijfeG.z),AH2(-klhgG.w,klhgG.w)),max(AH2(-ijfeG.y,ijfeG.y),AH2(-klhgG.x,klhgG.x)));
+ AH2 bothB=max(max(AH2(-ijfeB.z,ijfeB.z),AH2(-klhgB.w,klhgB.w)),max(AH2(-ijfeB.y,ijfeB.y),AH2(-klhgB.x,klhgB.x)));
+ // This part is different for FP16, working pairs of taps at a time.
+ AH2 pR=AH2_(0.0);
+ AH2 pG=AH2_(0.0);
+ AH2 pB=AH2_(0.0);
+ AH2 pW=AH2_(0.0);
+ FsrEasuTapH(pR,pG,pB,pW,AH2( 0.0, 1.0)-ppp.xx,AH2(-1.0,-1.0)-ppp.yy,dir,len2,lob,clp,bczzR.xy,bczzG.xy,bczzB.xy);
+ FsrEasuTapH(pR,pG,pB,pW,AH2(-1.0, 0.0)-ppp.xx,AH2( 1.0, 1.0)-ppp.yy,dir,len2,lob,clp,ijfeR.xy,ijfeG.xy,ijfeB.xy);
+ FsrEasuTapH(pR,pG,pB,pW,AH2( 0.0,-1.0)-ppp.xx,AH2( 0.0, 0.0)-ppp.yy,dir,len2,lob,clp,ijfeR.zw,ijfeG.zw,ijfeB.zw);
+ FsrEasuTapH(pR,pG,pB,pW,AH2( 1.0, 2.0)-ppp.xx,AH2( 1.0, 1.0)-ppp.yy,dir,len2,lob,clp,klhgR.xy,klhgG.xy,klhgB.xy);
+ FsrEasuTapH(pR,pG,pB,pW,AH2( 2.0, 1.0)-ppp.xx,AH2( 0.0, 0.0)-ppp.yy,dir,len2,lob,clp,klhgR.zw,klhgG.zw,klhgB.zw);
+ FsrEasuTapH(pR,pG,pB,pW,AH2( 1.0, 0.0)-ppp.xx,AH2( 2.0, 2.0)-ppp.yy,dir,len2,lob,clp,zzonR.zw,zzonG.zw,zzonB.zw);
+ AH3 aC=AH3(pR.x+pR.y,pG.x+pG.y,pB.x+pB.y);
+ AH1 aW=pW.x+pW.y;
+//------------------------------------------------------------------------------------------------------------------------------
+ // Slightly different for FP16 version due to combined min and max.
+ pix=min(AH3(bothR.y,bothG.y,bothB.y),max(-AH3(bothR.x,bothG.x,bothB.x),aC*AH3_(ARcpH1(aW))));}
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+// FSR - [RCAS] ROBUST CONTRAST ADAPTIVE SHARPENING
+//
+//------------------------------------------------------------------------------------------------------------------------------
+// CAS uses a simplified mechanism to convert local contrast into a variable amount of sharpness.
+// RCAS uses a more exact mechanism, solving for the maximum local sharpness possible before clipping.
+// RCAS also has a built in process to limit sharpening of what it detects as possible noise.
+// RCAS sharper does not support scaling, as it should be applied after EASU scaling.
+// Pass EASU output straight into RCAS, no color conversions necessary.
+//------------------------------------------------------------------------------------------------------------------------------
+// RCAS is based on the following logic.
+// RCAS uses a 5 tap filter in a cross pattern (same as CAS),
+// w n
+// w 1 w for taps w m e
+// w s
+// Where 'w' is the negative lobe weight.
+// output = (w*(n+e+w+s)+m)/(4*w+1)
+// RCAS solves for 'w' by seeing where the signal might clip out of the {0 to 1} input range,
+// 0 == (w*(n+e+w+s)+m)/(4*w+1) -> w = -m/(n+e+w+s)
+// 1 == (w*(n+e+w+s)+m)/(4*w+1) -> w = (1-m)/(n+e+w+s-4*1)
+// Then chooses the 'w' which results in no clipping, limits 'w', and multiplies by the 'sharp' amount.
+// This solution above has issues with MSAA input as the steps along the gradient cause edge detection issues.
+// So RCAS uses 4x the maximum and 4x the minimum (depending on equation)in place of the individual taps.
+// As well as switching from 'm' to either the minimum or maximum (depending on side), to help in energy conservation.
+// This stabilizes RCAS.
+// RCAS does a simple highpass which is normalized against the local contrast then shaped,
+// 0.25
+// 0.25 -1 0.25
+// 0.25
+// This is used as a noise detection filter, to reduce the effect of RCAS on grain, and focus on real edges.
+//
+// GLSL example for the required callbacks :
+//
+// AH4 FsrRcasLoadH(ASW2 p){return AH4(imageLoad(imgSrc,ASU2(p)));}
+// void FsrRcasInputH(inout AH1 r,inout AH1 g,inout AH1 b)
+// {
+// //do any simple input color conversions here or leave empty if none needed
+// }
+//
+// FsrRcasCon need to be called from the CPU or GPU to set up constants.
+// Including a GPU example here, the 'con' value would be stored out to a constant buffer.
+//
+// AU4 con;
+// FsrRcasCon(con,
+// 0.0); // The scale is {0.0 := maximum sharpness, to N>0, where N is the number of stops (halving) of the reduction of sharpness}.
+// ---------------
+// RCAS sharpening supports a CAS-like pass-through alpha via,
+// #define FSR_RCAS_PASSTHROUGH_ALPHA 1
+// RCAS also supports a define to enable a more expensive path to avoid some sharpening of noise.
+// Would suggest it is better to apply film grain after RCAS sharpening (and after scaling) instead of using this define,
+// #define FSR_RCAS_DENOISE 1
+//==============================================================================================================================
+// This is set at the limit of providing unnatural results for sharpening.
+#define FSR_RCAS_LIMIT (0.25-(1.0/16.0))
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// CONSTANT SETUP
+//==============================================================================================================================
+// Call to setup required constant values (works on CPU or GPU).
+A_STATIC void FsrRcasCon(
+outAU4 con,
+// The scale is {0.0 := maximum, to N>0, where N is the number of stops (halving) of the reduction of sharpness}.
+AF1 sharpness){
+ // Transform from stops to linear value.
+ sharpness=AExp2F1(-sharpness);
+ varAF2(hSharp)=initAF2(sharpness,sharpness);
+ con[0]=AU1_AF1(sharpness);
+ con[1]=AU1_AH2_AF2(hSharp);
+ con[2]=0;
+ con[3]=0;}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// NON-PACKED 32-BIT VERSION
+//==============================================================================================================================
+#if defined(A_GPU)&&defined(FSR_RCAS_F)
+ // Input callback prototypes that need to be implemented by calling shader
+ AF4 FsrRcasLoadF(ASU2 p);
+ void FsrRcasInputF(inout AF1 r,inout AF1 g,inout AF1 b);
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrRcasF(
+ out AF1 pixR, // Output values, non-vector so port between RcasFilter() and RcasFilterH() is easy.
+ out AF1 pixG,
+ out AF1 pixB,
+ #ifdef FSR_RCAS_PASSTHROUGH_ALPHA
+ out AF1 pixA,
+ #endif
+ AU2 ip, // Integer pixel position in output.
+ AU4 con){ // Constant generated by RcasSetup().
+ // Algorithm uses minimal 3x3 pixel neighborhood.
+ // b
+ // d e f
+ // h
+ ASU2 sp=ASU2(ip);
+ AF3 b=FsrRcasLoadF(sp+ASU2( 0,-1)).rgb;
+ AF3 d=FsrRcasLoadF(sp+ASU2(-1, 0)).rgb;
+ #ifdef FSR_RCAS_PASSTHROUGH_ALPHA
+ AF4 ee=FsrRcasLoadF(sp);
+ AF3 e=ee.rgb;pixA=ee.a;
+ #else
+ AF3 e=FsrRcasLoadF(sp).rgb;
+ #endif
+ AF3 f=FsrRcasLoadF(sp+ASU2( 1, 0)).rgb;
+ AF3 h=FsrRcasLoadF(sp+ASU2( 0, 1)).rgb;
+ // Rename (32-bit) or regroup (16-bit).
+ AF1 bR=b.r;
+ AF1 bG=b.g;
+ AF1 bB=b.b;
+ AF1 dR=d.r;
+ AF1 dG=d.g;
+ AF1 dB=d.b;
+ AF1 eR=e.r;
+ AF1 eG=e.g;
+ AF1 eB=e.b;
+ AF1 fR=f.r;
+ AF1 fG=f.g;
+ AF1 fB=f.b;
+ AF1 hR=h.r;
+ AF1 hG=h.g;
+ AF1 hB=h.b;
+ // Run optional input transform.
+ FsrRcasInputF(bR,bG,bB);
+ FsrRcasInputF(dR,dG,dB);
+ FsrRcasInputF(eR,eG,eB);
+ FsrRcasInputF(fR,fG,fB);
+ FsrRcasInputF(hR,hG,hB);
+ // Luma times 2.
+ AF1 bL=bB*AF1_(0.5)+(bR*AF1_(0.5)+bG);
+ AF1 dL=dB*AF1_(0.5)+(dR*AF1_(0.5)+dG);
+ AF1 eL=eB*AF1_(0.5)+(eR*AF1_(0.5)+eG);
+ AF1 fL=fB*AF1_(0.5)+(fR*AF1_(0.5)+fG);
+ AF1 hL=hB*AF1_(0.5)+(hR*AF1_(0.5)+hG);
+ // Noise detection.
+ AF1 nz=AF1_(0.25)*bL+AF1_(0.25)*dL+AF1_(0.25)*fL+AF1_(0.25)*hL-eL;
+ nz=ASatF1(abs(nz)*APrxMedRcpF1(AMax3F1(AMax3F1(bL,dL,eL),fL,hL)-AMin3F1(AMin3F1(bL,dL,eL),fL,hL)));
+ nz=AF1_(-0.5)*nz+AF1_(1.0);
+ // Min and max of ring.
+ AF1 mn4R=min(AMin3F1(bR,dR,fR),hR);
+ AF1 mn4G=min(AMin3F1(bG,dG,fG),hG);
+ AF1 mn4B=min(AMin3F1(bB,dB,fB),hB);
+ AF1 mx4R=max(AMax3F1(bR,dR,fR),hR);
+ AF1 mx4G=max(AMax3F1(bG,dG,fG),hG);
+ AF1 mx4B=max(AMax3F1(bB,dB,fB),hB);
+ // Immediate constants for peak range.
+ AF2 peakC=AF2(1.0,-1.0*4.0);
+ // Limiters, these need to be high precision RCPs.
+ AF1 hitMinR=min(mn4R,eR)*ARcpF1(AF1_(4.0)*mx4R);
+ AF1 hitMinG=min(mn4G,eG)*ARcpF1(AF1_(4.0)*mx4G);
+ AF1 hitMinB=min(mn4B,eB)*ARcpF1(AF1_(4.0)*mx4B);
+ AF1 hitMaxR=(peakC.x-max(mx4R,eR))*ARcpF1(AF1_(4.0)*mn4R+peakC.y);
+ AF1 hitMaxG=(peakC.x-max(mx4G,eG))*ARcpF1(AF1_(4.0)*mn4G+peakC.y);
+ AF1 hitMaxB=(peakC.x-max(mx4B,eB))*ARcpF1(AF1_(4.0)*mn4B+peakC.y);
+ AF1 lobeR=max(-hitMinR,hitMaxR);
+ AF1 lobeG=max(-hitMinG,hitMaxG);
+ AF1 lobeB=max(-hitMinB,hitMaxB);
+ AF1 lobe=max(AF1_(-FSR_RCAS_LIMIT),min(AMax3F1(lobeR,lobeG,lobeB),AF1_(0.0)))*AF1_AU1(con.x);
+ // Apply noise removal.
+ #ifdef FSR_RCAS_DENOISE
+ lobe*=nz;
+ #endif
+ // Resolve, which needs the medium precision rcp approximation to avoid visible tonality changes.
+ AF1 rcpL=APrxMedRcpF1(AF1_(4.0)*lobe+AF1_(1.0));
+ pixR=(lobe*bR+lobe*dR+lobe*hR+lobe*fR+eR)*rcpL;
+ pixG=(lobe*bG+lobe*dG+lobe*hG+lobe*fG+eG)*rcpL;
+ pixB=(lobe*bB+lobe*dB+lobe*hB+lobe*fB+eB)*rcpL;
+ return;}
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// NON-PACKED 16-BIT VERSION
+//==============================================================================================================================
+#if defined(A_GPU)&&defined(A_HALF)&&defined(FSR_RCAS_H)
+ // Input callback prototypes that need to be implemented by calling shader
+ AH4 FsrRcasLoadH(ASW2 p);
+ void FsrRcasInputH(inout AH1 r,inout AH1 g,inout AH1 b);
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrRcasH(
+ out AH1 pixR, // Output values, non-vector so port between RcasFilter() and RcasFilterH() is easy.
+ out AH1 pixG,
+ out AH1 pixB,
+ #ifdef FSR_RCAS_PASSTHROUGH_ALPHA
+ out AH1 pixA,
+ #endif
+ AU2 ip, // Integer pixel position in output.
+ AU4 con){ // Constant generated by RcasSetup().
+ // Sharpening algorithm uses minimal 3x3 pixel neighborhood.
+ // b
+ // d e f
+ // h
+ ASW2 sp=ASW2(ip);
+ AH3 b=FsrRcasLoadH(sp+ASW2( 0,-1)).rgb;
+ AH3 d=FsrRcasLoadH(sp+ASW2(-1, 0)).rgb;
+ #ifdef FSR_RCAS_PASSTHROUGH_ALPHA
+ AH4 ee=FsrRcasLoadH(sp);
+ AH3 e=ee.rgb;pixA=ee.a;
+ #else
+ AH3 e=FsrRcasLoadH(sp).rgb;
+ #endif
+ AH3 f=FsrRcasLoadH(sp+ASW2( 1, 0)).rgb;
+ AH3 h=FsrRcasLoadH(sp+ASW2( 0, 1)).rgb;
+ // Rename (32-bit) or regroup (16-bit).
+ AH1 bR=b.r;
+ AH1 bG=b.g;
+ AH1 bB=b.b;
+ AH1 dR=d.r;
+ AH1 dG=d.g;
+ AH1 dB=d.b;
+ AH1 eR=e.r;
+ AH1 eG=e.g;
+ AH1 eB=e.b;
+ AH1 fR=f.r;
+ AH1 fG=f.g;
+ AH1 fB=f.b;
+ AH1 hR=h.r;
+ AH1 hG=h.g;
+ AH1 hB=h.b;
+ // Run optional input transform.
+ FsrRcasInputH(bR,bG,bB);
+ FsrRcasInputH(dR,dG,dB);
+ FsrRcasInputH(eR,eG,eB);
+ FsrRcasInputH(fR,fG,fB);
+ FsrRcasInputH(hR,hG,hB);
+ // Luma times 2.
+ AH1 bL=bB*AH1_(0.5)+(bR*AH1_(0.5)+bG);
+ AH1 dL=dB*AH1_(0.5)+(dR*AH1_(0.5)+dG);
+ AH1 eL=eB*AH1_(0.5)+(eR*AH1_(0.5)+eG);
+ AH1 fL=fB*AH1_(0.5)+(fR*AH1_(0.5)+fG);
+ AH1 hL=hB*AH1_(0.5)+(hR*AH1_(0.5)+hG);
+ // Noise detection.
+ AH1 nz=AH1_(0.25)*bL+AH1_(0.25)*dL+AH1_(0.25)*fL+AH1_(0.25)*hL-eL;
+ nz=ASatH1(abs(nz)*APrxMedRcpH1(AMax3H1(AMax3H1(bL,dL,eL),fL,hL)-AMin3H1(AMin3H1(bL,dL,eL),fL,hL)));
+ nz=AH1_(-0.5)*nz+AH1_(1.0);
+ // Min and max of ring.
+ AH1 mn4R=min(AMin3H1(bR,dR,fR),hR);
+ AH1 mn4G=min(AMin3H1(bG,dG,fG),hG);
+ AH1 mn4B=min(AMin3H1(bB,dB,fB),hB);
+ AH1 mx4R=max(AMax3H1(bR,dR,fR),hR);
+ AH1 mx4G=max(AMax3H1(bG,dG,fG),hG);
+ AH1 mx4B=max(AMax3H1(bB,dB,fB),hB);
+ // Immediate constants for peak range.
+ AH2 peakC=AH2(1.0,-1.0*4.0);
+ // Limiters, these need to be high precision RCPs.
+ AH1 hitMinR=min(mn4R,eR)*ARcpH1(AH1_(4.0)*mx4R);
+ AH1 hitMinG=min(mn4G,eG)*ARcpH1(AH1_(4.0)*mx4G);
+ AH1 hitMinB=min(mn4B,eB)*ARcpH1(AH1_(4.0)*mx4B);
+ AH1 hitMaxR=(peakC.x-max(mx4R,eR))*ARcpH1(AH1_(4.0)*mn4R+peakC.y);
+ AH1 hitMaxG=(peakC.x-max(mx4G,eG))*ARcpH1(AH1_(4.0)*mn4G+peakC.y);
+ AH1 hitMaxB=(peakC.x-max(mx4B,eB))*ARcpH1(AH1_(4.0)*mn4B+peakC.y);
+ AH1 lobeR=max(-hitMinR,hitMaxR);
+ AH1 lobeG=max(-hitMinG,hitMaxG);
+ AH1 lobeB=max(-hitMinB,hitMaxB);
+ AH1 lobe=max(AH1_(-FSR_RCAS_LIMIT),min(AMax3H1(lobeR,lobeG,lobeB),AH1_(0.0)))*AH2_AU1(con.y).x;
+ // Apply noise removal.
+ #ifdef FSR_RCAS_DENOISE
+ lobe*=nz;
+ #endif
+ // Resolve, which needs the medium precision rcp approximation to avoid visible tonality changes.
+ AH1 rcpL=APrxMedRcpH1(AH1_(4.0)*lobe+AH1_(1.0));
+ pixR=(lobe*bR+lobe*dR+lobe*hR+lobe*fR+eR)*rcpL;
+ pixG=(lobe*bG+lobe*dG+lobe*hG+lobe*fG+eG)*rcpL;
+ pixB=(lobe*bB+lobe*dB+lobe*hB+lobe*fB+eB)*rcpL;}
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+// PACKED 16-BIT VERSION
+//==============================================================================================================================
+#if defined(A_GPU)&&defined(A_HALF)&&defined(FSR_RCAS_HX2)
+ // Input callback prototypes that need to be implemented by the calling shader
+ AH4 FsrRcasLoadHx2(ASW2 p);
+ void FsrRcasInputHx2(inout AH2 r,inout AH2 g,inout AH2 b);
+//------------------------------------------------------------------------------------------------------------------------------
+ // Can be used to convert from packed Structures of Arrays to Arrays of Structures for store.
+ void FsrRcasDepackHx2(out AH4 pix0,out AH4 pix1,AH2 pixR,AH2 pixG,AH2 pixB){
+ #ifdef A_HLSL
+ // Invoke a slower path for DX only, since it won't allow uninitialized values.
+ pix0.a=pix1.a=0.0;
+ #endif
+ pix0.rgb=AH3(pixR.x,pixG.x,pixB.x);
+ pix1.rgb=AH3(pixR.y,pixG.y,pixB.y);}
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrRcasHx2(
+ // Output values are for 2 8x8 tiles in a 16x8 region.
+ // pix<R,G,B>.x = left 8x8 tile
+ // pix<R,G,B>.y = right 8x8 tile
+ // This enables later processing to easily be packed as well.
+ out AH2 pixR,
+ out AH2 pixG,
+ out AH2 pixB,
+ #ifdef FSR_RCAS_PASSTHROUGH_ALPHA
+ out AH2 pixA,
+ #endif
+ AU2 ip, // Integer pixel position in output.
+ AU4 con){ // Constant generated by RcasSetup().
+ // No scaling algorithm uses minimal 3x3 pixel neighborhood.
+ ASW2 sp0=ASW2(ip);
+ AH3 b0=FsrRcasLoadHx2(sp0+ASW2( 0,-1)).rgb;
+ AH3 d0=FsrRcasLoadHx2(sp0+ASW2(-1, 0)).rgb;
+ #ifdef FSR_RCAS_PASSTHROUGH_ALPHA
+ AH4 ee0=FsrRcasLoadHx2(sp0);
+ AH3 e0=ee0.rgb;pixA.r=ee0.a;
+ #else
+ AH3 e0=FsrRcasLoadHx2(sp0).rgb;
+ #endif
+ AH3 f0=FsrRcasLoadHx2(sp0+ASW2( 1, 0)).rgb;
+ AH3 h0=FsrRcasLoadHx2(sp0+ASW2( 0, 1)).rgb;
+ ASW2 sp1=sp0+ASW2(8,0);
+ AH3 b1=FsrRcasLoadHx2(sp1+ASW2( 0,-1)).rgb;
+ AH3 d1=FsrRcasLoadHx2(sp1+ASW2(-1, 0)).rgb;
+ #ifdef FSR_RCAS_PASSTHROUGH_ALPHA
+ AH4 ee1=FsrRcasLoadHx2(sp1);
+ AH3 e1=ee1.rgb;pixA.g=ee1.a;
+ #else
+ AH3 e1=FsrRcasLoadHx2(sp1).rgb;
+ #endif
+ AH3 f1=FsrRcasLoadHx2(sp1+ASW2( 1, 0)).rgb;
+ AH3 h1=FsrRcasLoadHx2(sp1+ASW2( 0, 1)).rgb;
+ // Arrays of Structures to Structures of Arrays conversion.
+ AH2 bR=AH2(b0.r,b1.r);
+ AH2 bG=AH2(b0.g,b1.g);
+ AH2 bB=AH2(b0.b,b1.b);
+ AH2 dR=AH2(d0.r,d1.r);
+ AH2 dG=AH2(d0.g,d1.g);
+ AH2 dB=AH2(d0.b,d1.b);
+ AH2 eR=AH2(e0.r,e1.r);
+ AH2 eG=AH2(e0.g,e1.g);
+ AH2 eB=AH2(e0.b,e1.b);
+ AH2 fR=AH2(f0.r,f1.r);
+ AH2 fG=AH2(f0.g,f1.g);
+ AH2 fB=AH2(f0.b,f1.b);
+ AH2 hR=AH2(h0.r,h1.r);
+ AH2 hG=AH2(h0.g,h1.g);
+ AH2 hB=AH2(h0.b,h1.b);
+ // Run optional input transform.
+ FsrRcasInputHx2(bR,bG,bB);
+ FsrRcasInputHx2(dR,dG,dB);
+ FsrRcasInputHx2(eR,eG,eB);
+ FsrRcasInputHx2(fR,fG,fB);
+ FsrRcasInputHx2(hR,hG,hB);
+ // Luma times 2.
+ AH2 bL=bB*AH2_(0.5)+(bR*AH2_(0.5)+bG);
+ AH2 dL=dB*AH2_(0.5)+(dR*AH2_(0.5)+dG);
+ AH2 eL=eB*AH2_(0.5)+(eR*AH2_(0.5)+eG);
+ AH2 fL=fB*AH2_(0.5)+(fR*AH2_(0.5)+fG);
+ AH2 hL=hB*AH2_(0.5)+(hR*AH2_(0.5)+hG);
+ // Noise detection.
+ AH2 nz=AH2_(0.25)*bL+AH2_(0.25)*dL+AH2_(0.25)*fL+AH2_(0.25)*hL-eL;
+ nz=ASatH2(abs(nz)*APrxMedRcpH2(AMax3H2(AMax3H2(bL,dL,eL),fL,hL)-AMin3H2(AMin3H2(bL,dL,eL),fL,hL)));
+ nz=AH2_(-0.5)*nz+AH2_(1.0);
+ // Min and max of ring.
+ AH2 mn4R=min(AMin3H2(bR,dR,fR),hR);
+ AH2 mn4G=min(AMin3H2(bG,dG,fG),hG);
+ AH2 mn4B=min(AMin3H2(bB,dB,fB),hB);
+ AH2 mx4R=max(AMax3H2(bR,dR,fR),hR);
+ AH2 mx4G=max(AMax3H2(bG,dG,fG),hG);
+ AH2 mx4B=max(AMax3H2(bB,dB,fB),hB);
+ // Immediate constants for peak range.
+ AH2 peakC=AH2(1.0,-1.0*4.0);
+ // Limiters, these need to be high precision RCPs.
+ AH2 hitMinR=min(mn4R,eR)*ARcpH2(AH2_(4.0)*mx4R);
+ AH2 hitMinG=min(mn4G,eG)*ARcpH2(AH2_(4.0)*mx4G);
+ AH2 hitMinB=min(mn4B,eB)*ARcpH2(AH2_(4.0)*mx4B);
+ AH2 hitMaxR=(peakC.x-max(mx4R,eR))*ARcpH2(AH2_(4.0)*mn4R+peakC.y);
+ AH2 hitMaxG=(peakC.x-max(mx4G,eG))*ARcpH2(AH2_(4.0)*mn4G+peakC.y);
+ AH2 hitMaxB=(peakC.x-max(mx4B,eB))*ARcpH2(AH2_(4.0)*mn4B+peakC.y);
+ AH2 lobeR=max(-hitMinR,hitMaxR);
+ AH2 lobeG=max(-hitMinG,hitMaxG);
+ AH2 lobeB=max(-hitMinB,hitMaxB);
+ AH2 lobe=max(AH2_(-FSR_RCAS_LIMIT),min(AMax3H2(lobeR,lobeG,lobeB),AH2_(0.0)))*AH2_(AH2_AU1(con.y).x);
+ // Apply noise removal.
+ #ifdef FSR_RCAS_DENOISE
+ lobe*=nz;
+ #endif
+ // Resolve, which needs the medium precision rcp approximation to avoid visible tonality changes.
+ AH2 rcpL=APrxMedRcpH2(AH2_(4.0)*lobe+AH2_(1.0));
+ pixR=(lobe*bR+lobe*dR+lobe*hR+lobe*fR+eR)*rcpL;
+ pixG=(lobe*bG+lobe*dG+lobe*hG+lobe*fG+eG)*rcpL;
+ pixB=(lobe*bB+lobe*dB+lobe*hB+lobe*fB+eB)*rcpL;}
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+// FSR - [LFGA] LINEAR FILM GRAIN APPLICATOR
+//
+//------------------------------------------------------------------------------------------------------------------------------
+// Adding output-resolution film grain after scaling is a good way to mask both rendering and scaling artifacts.
+// Suggest using tiled blue noise as film grain input, with peak noise frequency set for a specific look and feel.
+// The 'Lfga*()' functions provide a convenient way to introduce grain.
+// These functions limit grain based on distance to signal limits.
+// This is done so that the grain is temporally energy preserving, and thus won't modify image tonality.
+// Grain application should be done in a linear colorspace.
+// The grain should be temporally changing, but have a temporal sum per pixel that adds to zero (non-biased).
+//------------------------------------------------------------------------------------------------------------------------------
+// Usage,
+// FsrLfga*(
+// color, // In/out linear colorspace color {0 to 1} ranged.
+// grain, // Per pixel grain texture value {-0.5 to 0.5} ranged, input is 3-channel to support colored grain.
+// amount); // Amount of grain (0 to 1} ranged.
+//------------------------------------------------------------------------------------------------------------------------------
+// Example if grain texture is monochrome: 'FsrLfgaF(color,AF3_(grain),amount)'
+//==============================================================================================================================
+#if defined(A_GPU)
+ // Maximum grain is the minimum distance to the signal limit.
+ void FsrLfgaF(inout AF3 c,AF3 t,AF1 a){c+=(t*AF3_(a))*min(AF3_(1.0)-c,c);}
+#endif
+//==============================================================================================================================
+#if defined(A_GPU)&&defined(A_HALF)
+ // Half precision version (slower).
+ void FsrLfgaH(inout AH3 c,AH3 t,AH1 a){c+=(t*AH3_(a))*min(AH3_(1.0)-c,c);}
+//------------------------------------------------------------------------------------------------------------------------------
+ // Packed half precision version (faster).
+ void FsrLfgaHx2(inout AH2 cR,inout AH2 cG,inout AH2 cB,AH2 tR,AH2 tG,AH2 tB,AH1 a){
+ cR+=(tR*AH2_(a))*min(AH2_(1.0)-cR,cR);cG+=(tG*AH2_(a))*min(AH2_(1.0)-cG,cG);cB+=(tB*AH2_(a))*min(AH2_(1.0)-cB,cB);}
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+// FSR - [SRTM] SIMPLE REVERSIBLE TONE-MAPPER
+//
+//------------------------------------------------------------------------------------------------------------------------------
+// This provides a way to take linear HDR color {0 to FP16_MAX} and convert it into a temporary {0 to 1} ranged post-tonemapped linear.
+// The tonemapper preserves RGB ratio, which helps maintain HDR color bleed during filtering.
+//------------------------------------------------------------------------------------------------------------------------------
+// Reversible tonemapper usage,
+// FsrSrtm*(color); // {0 to FP16_MAX} converted to {0 to 1}.
+// FsrSrtmInv*(color); // {0 to 1} converted into {0 to 32768, output peak safe for FP16}.
+//==============================================================================================================================
+#if defined(A_GPU)
+ void FsrSrtmF(inout AF3 c){c*=AF3_(ARcpF1(AMax3F1(c.r,c.g,c.b)+AF1_(1.0)));}
+ // The extra max solves the c=1.0 case (which is a /0).
+ void FsrSrtmInvF(inout AF3 c){c*=AF3_(ARcpF1(max(AF1_(1.0/32768.0),AF1_(1.0)-AMax3F1(c.r,c.g,c.b))));}
+#endif
+//==============================================================================================================================
+#if defined(A_GPU)&&defined(A_HALF)
+ void FsrSrtmH(inout AH3 c){c*=AH3_(ARcpH1(AMax3H1(c.r,c.g,c.b)+AH1_(1.0)));}
+ void FsrSrtmInvH(inout AH3 c){c*=AH3_(ARcpH1(max(AH1_(1.0/32768.0),AH1_(1.0)-AMax3H1(c.r,c.g,c.b))));}
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrSrtmHx2(inout AH2 cR,inout AH2 cG,inout AH2 cB){
+ AH2 rcp=ARcpH2(AMax3H2(cR,cG,cB)+AH2_(1.0));cR*=rcp;cG*=rcp;cB*=rcp;}
+ void FsrSrtmInvHx2(inout AH2 cR,inout AH2 cG,inout AH2 cB){
+ AH2 rcp=ARcpH2(max(AH2_(1.0/32768.0),AH2_(1.0)-AMax3H2(cR,cG,cB)));cR*=rcp;cG*=rcp;cB*=rcp;}
+#endif
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+//_____________________________________________________________/\_______________________________________________________________
+//==============================================================================================================================
+//
+// FSR - [TEPD] TEMPORAL ENERGY PRESERVING DITHER
+//
+//------------------------------------------------------------------------------------------------------------------------------
+// Temporally energy preserving dithered {0 to 1} linear to gamma 2.0 conversion.
+// Gamma 2.0 is used so that the conversion back to linear is just to square the color.
+// The conversion comes in 8-bit and 10-bit modes, designed for output to 8-bit UNORM or 10:10:10:2 respectively.
+// Given good non-biased temporal blue noise as dither input,
+// the output dither will temporally conserve energy.
+// This is done by choosing the linear nearest step point instead of perceptual nearest.
+// See code below for details.
+//------------------------------------------------------------------------------------------------------------------------------
+// DX SPEC RULES FOR FLOAT->UNORM 8-BIT CONVERSION
+// ===============================================
+// - Output is 'uint(floor(saturate(n)*255.0+0.5))'.
+// - Thus rounding is to nearest.
+// - NaN gets converted to zero.
+// - INF is clamped to {0.0 to 1.0}.
+//==============================================================================================================================
+#if defined(A_GPU)
+ // Hand tuned integer position to dither value, with more values than simple checkerboard.
+ // Only 32-bit has enough precision for this compddation.
+ // Output is {0 to <1}.
+ AF1 FsrTepdDitF(AU2 p,AU1 f){
+ AF1 x=AF1_(p.x+f);
+ AF1 y=AF1_(p.y);
+ // The 1.61803 golden ratio.
+ AF1 a=AF1_((1.0+sqrt(5.0))/2.0);
+ // Number designed to provide a good visual pattern.
+ AF1 b=AF1_(1.0/3.69);
+ x=x*a+(y*b);
+ return AFractF1(x);}
+//------------------------------------------------------------------------------------------------------------------------------
+ // This version is 8-bit gamma 2.0.
+ // The 'c' input is {0 to 1}.
+ // Output is {0 to 1} ready for image store.
+ void FsrTepdC8F(inout AF3 c,AF1 dit){
+ AF3 n=sqrt(c);
+ n=floor(n*AF3_(255.0))*AF3_(1.0/255.0);
+ AF3 a=n*n;
+ AF3 b=n+AF3_(1.0/255.0);b=b*b;
+ // Ratio of 'a' to 'b' required to produce 'c'.
+ // APrxLoRcpF1() won't work here (at least for very high dynamic ranges).
+ // APrxMedRcpF1() is an IADD,FMA,MUL.
+ AF3 r=(c-b)*APrxMedRcpF3(a-b);
+ // Use the ratio as a cutoff to choose 'a' or 'b'.
+ // AGtZeroF1() is a MUL.
+ c=ASatF3(n+AGtZeroF3(AF3_(dit)-r)*AF3_(1.0/255.0));}
+//------------------------------------------------------------------------------------------------------------------------------
+ // This version is 10-bit gamma 2.0.
+ // The 'c' input is {0 to 1}.
+ // Output is {0 to 1} ready for image store.
+ void FsrTepdC10F(inout AF3 c,AF1 dit){
+ AF3 n=sqrt(c);
+ n=floor(n*AF3_(1023.0))*AF3_(1.0/1023.0);
+ AF3 a=n*n;
+ AF3 b=n+AF3_(1.0/1023.0);b=b*b;
+ AF3 r=(c-b)*APrxMedRcpF3(a-b);
+ c=ASatF3(n+AGtZeroF3(AF3_(dit)-r)*AF3_(1.0/1023.0));}
+#endif
+//==============================================================================================================================
+#if defined(A_GPU)&&defined(A_HALF)
+ AH1 FsrTepdDitH(AU2 p,AU1 f){
+ AF1 x=AF1_(p.x+f);
+ AF1 y=AF1_(p.y);
+ AF1 a=AF1_((1.0+sqrt(5.0))/2.0);
+ AF1 b=AF1_(1.0/3.69);
+ x=x*a+(y*b);
+ return AH1(AFractF1(x));}
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrTepdC8H(inout AH3 c,AH1 dit){
+ AH3 n=sqrt(c);
+ n=floor(n*AH3_(255.0))*AH3_(1.0/255.0);
+ AH3 a=n*n;
+ AH3 b=n+AH3_(1.0/255.0);b=b*b;
+ AH3 r=(c-b)*APrxMedRcpH3(a-b);
+ c=ASatH3(n+AGtZeroH3(AH3_(dit)-r)*AH3_(1.0/255.0));}
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrTepdC10H(inout AH3 c,AH1 dit){
+ AH3 n=sqrt(c);
+ n=floor(n*AH3_(1023.0))*AH3_(1.0/1023.0);
+ AH3 a=n*n;
+ AH3 b=n+AH3_(1.0/1023.0);b=b*b;
+ AH3 r=(c-b)*APrxMedRcpH3(a-b);
+ c=ASatH3(n+AGtZeroH3(AH3_(dit)-r)*AH3_(1.0/1023.0));}
+//==============================================================================================================================
+ // This computes dither for positions 'p' and 'p+{8,0}'.
+ AH2 FsrTepdDitHx2(AU2 p,AU1 f){
+ AF2 x;
+ x.x=AF1_(p.x+f);
+ x.y=x.x+AF1_(8.0);
+ AF1 y=AF1_(p.y);
+ AF1 a=AF1_((1.0+sqrt(5.0))/2.0);
+ AF1 b=AF1_(1.0/3.69);
+ x=x*AF2_(a)+AF2_(y*b);
+ return AH2(AFractF2(x));}
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrTepdC8Hx2(inout AH2 cR,inout AH2 cG,inout AH2 cB,AH2 dit){
+ AH2 nR=sqrt(cR);
+ AH2 nG=sqrt(cG);
+ AH2 nB=sqrt(cB);
+ nR=floor(nR*AH2_(255.0))*AH2_(1.0/255.0);
+ nG=floor(nG*AH2_(255.0))*AH2_(1.0/255.0);
+ nB=floor(nB*AH2_(255.0))*AH2_(1.0/255.0);
+ AH2 aR=nR*nR;
+ AH2 aG=nG*nG;
+ AH2 aB=nB*nB;
+ AH2 bR=nR+AH2_(1.0/255.0);bR=bR*bR;
+ AH2 bG=nG+AH2_(1.0/255.0);bG=bG*bG;
+ AH2 bB=nB+AH2_(1.0/255.0);bB=bB*bB;
+ AH2 rR=(cR-bR)*APrxMedRcpH2(aR-bR);
+ AH2 rG=(cG-bG)*APrxMedRcpH2(aG-bG);
+ AH2 rB=(cB-bB)*APrxMedRcpH2(aB-bB);
+ cR=ASatH2(nR+AGtZeroH2(dit-rR)*AH2_(1.0/255.0));
+ cG=ASatH2(nG+AGtZeroH2(dit-rG)*AH2_(1.0/255.0));
+ cB=ASatH2(nB+AGtZeroH2(dit-rB)*AH2_(1.0/255.0));}
+//------------------------------------------------------------------------------------------------------------------------------
+ void FsrTepdC10Hx2(inout AH2 cR,inout AH2 cG,inout AH2 cB,AH2 dit){
+ AH2 nR=sqrt(cR);
+ AH2 nG=sqrt(cG);
+ AH2 nB=sqrt(cB);
+ nR=floor(nR*AH2_(1023.0))*AH2_(1.0/1023.0);
+ nG=floor(nG*AH2_(1023.0))*AH2_(1.0/1023.0);
+ nB=floor(nB*AH2_(1023.0))*AH2_(1.0/1023.0);
+ AH2 aR=nR*nR;
+ AH2 aG=nG*nG;
+ AH2 aB=nB*nB;
+ AH2 bR=nR+AH2_(1.0/1023.0);bR=bR*bR;
+ AH2 bG=nG+AH2_(1.0/1023.0);bG=bG*bG;
+ AH2 bB=nB+AH2_(1.0/1023.0);bB=bB*bB;
+ AH2 rR=(cR-bR)*APrxMedRcpH2(aR-bR);
+ AH2 rG=(cG-bG)*APrxMedRcpH2(aG-bG);
+ AH2 rB=(cB-bB)*APrxMedRcpH2(aB-bB);
+ cR=ASatH2(nR+AGtZeroH2(dit-rR)*AH2_(1.0/1023.0));
+ cG=ASatH2(nG+AGtZeroH2(dit-rG)*AH2_(1.0/1023.0));
+ cB=ASatH2(nB+AGtZeroH2(dit-rB)*AH2_(1.0/1023.0));}
+#endif
+
+
+void CurrFilter(AU2 pos)
+{
+ AF3 c;
+ FsrRcasF(c.r, c.g, c.b, pos, con0);
+ imageStore(imgOutput, ASU2(pos), AF4(c, 1));
+}
+
+void main() {
+ FsrRcasCon(con0, sharpening_data);
+
+ AU2 gxy = ARmp8x8(gl_LocalInvocationID.x) + AU2(gl_WorkGroupID.x << 4u, gl_WorkGroupID.y << 4u);
+ CurrFilter(gxy);
+ gxy.x += 8u;
+ CurrFilter(gxy);
+ gxy.y += 8u;
+ CurrFilter(gxy);
+ gxy.x -= 8u;
+ CurrFilter(gxy);
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrSharpening.spv b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrSharpening.spv
new file mode 100644
index 00000000..b2e30e1f
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/FsrSharpening.spv
Binary files differ
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/Fxaa.glsl b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/Fxaa.glsl
new file mode 100644
index 00000000..f197c64c
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/Fxaa.glsl
@@ -0,0 +1,1177 @@
+/*============================================================================
+
+
+ NVIDIA FXAA 3.11 by TIMOTHY LOTTES
+
+
+------------------------------------------------------------------------------
+COPYRIGHT (C) 2010, 2011 NVIDIA CORPORATION. ALL RIGHTS RESERVED.
+------------------------------------------------------------------------------
+TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED
+*AS IS* AND NVIDIA AND ITS SUPPLIERS DISCLAIM ALL WARRANTIES, EITHER EXPRESS
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL NVIDIA
+OR ITS SUPPLIERS BE LIABLE FOR ANY SPECIAL, INCIDENTAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR
+LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION,
+OR ANY OTHER PECUNIARY LOSS) ARISING OUT OF THE USE OF OR INABILITY TO USE
+THIS SOFTWARE, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+------------------------------------------------------------------------------
+ INTEGRATION CHECKLIST
+------------------------------------------------------------------------------
+(1.)
+In the shader source, setup defines for the desired configuration.
+When providing multiple shaders (for different presets),
+simply setup the defines differently in multiple files.
+Example,
+
+ #define FXAA_PC 1
+ #define FXAA_HLSL_5 1
+ #define FXAA_QUALITY_PRESET 12
+
+Or,
+
+ #define FXAA_360 1
+
+Or,
+
+ #define FXAA_PS3 1
+
+Etc.
+
+(2.)
+Then include this file,
+
+ #include "Fxaa3_11.h"
+
+(3.)
+Then call the FXAA pixel shader from within your desired shader.
+Look at the FXAA Quality FxaaPixelShader() for docs on inputs.
+As for FXAA 3.11 all inputs for all shaders are the same
+to enable easy porting between platforms.
+
+ return FxaaPixelShader(...);
+
+(4.)
+Insure pass prior to FXAA outputs RGBL (see next section).
+Or use,
+
+ #define FXAA_GREEN_AS_LUMA 1
+
+(5.)
+Setup engine to provide the following constants
+which are used in the FxaaPixelShader() inputs,
+
+ FxaaFloat2 fxaaQualityRcpFrame,
+ FxaaFloat4 fxaaConsoleRcpFrameOpt,
+ FxaaFloat4 fxaaConsoleRcpFrameOpt2,
+ FxaaFloat4 fxaaConsole360RcpFrameOpt2,
+ FxaaFloat fxaaQualitySubpix,
+ FxaaFloat fxaaQualityEdgeThreshold,
+ FxaaFloat fxaaQualityEdgeThresholdMin,
+ FxaaFloat fxaaConsoleEdgeSharpness,
+ FxaaFloat fxaaConsoleEdgeThreshold,
+ FxaaFloat fxaaConsoleEdgeThresholdMin,
+ FxaaFloat4 fxaaConsole360ConstDir
+
+Look at the FXAA Quality FxaaPixelShader() for docs on inputs.
+
+(6.)
+Have FXAA vertex shader run as a full screen triangle,
+and output "pos" and "fxaaConsolePosPos"
+such that inputs in the pixel shader provide,
+
+ // {xy} = center of pixel
+ FxaaFloat2 pos,
+
+ // {xy_} = upper left of pixel
+ // {_zw} = lower right of pixel
+ FxaaFloat4 fxaaConsolePosPos,
+
+(7.)
+Insure the texture sampler(s) used by FXAA are set to bilinear filtering.
+
+
+------------------------------------------------------------------------------
+ INTEGRATION - RGBL AND COLORSPACE
+------------------------------------------------------------------------------
+FXAA3 requires RGBL as input unless the following is set,
+
+ #define FXAA_GREEN_AS_LUMA 1
+
+In which case the engine uses green in place of luma,
+and requires RGB input is in a non-linear colorspace.
+
+RGB should be LDR (low dynamic range).
+Specifically do FXAA after tonemapping.
+
+RGB data as returned by a texture fetch can be non-linear,
+or linear when FXAA_GREEN_AS_LUMA is not set.
+Note an "sRGB format" texture counts as linear,
+because the result of a texture fetch is linear data.
+Regular "RGBA8" textures in the sRGB colorspace are non-linear.
+
+If FXAA_GREEN_AS_LUMA is not set,
+luma must be stored in the alpha channel prior to running FXAA.
+This luma should be in a perceptual space (could be gamma 2.0).
+Example pass before FXAA where output is gamma 2.0 encoded,
+
+ color.rgb = ToneMap(color.rgb); // linear color output
+ color.rgb = sqrt(color.rgb); // gamma 2.0 color output
+ return color;
+
+To use FXAA,
+
+ color.rgb = ToneMap(color.rgb); // linear color output
+ color.rgb = sqrt(color.rgb); // gamma 2.0 color output
+ color.a = dot(color.rgb, FxaaFloat3(0.299, 0.587, 0.114)); // compute luma
+ return color;
+
+Another example where output is linear encoded,
+say for instance writing to an sRGB formated render target,
+where the render target does the conversion back to sRGB after blending,
+
+ color.rgb = ToneMap(color.rgb); // linear color output
+ return color;
+
+To use FXAA,
+
+ color.rgb = ToneMap(color.rgb); // linear color output
+ color.a = sqrt(dot(color.rgb, FxaaFloat3(0.299, 0.587, 0.114))); // compute luma
+ return color;
+
+Getting luma correct is required for the algorithm to work correctly.
+
+
+------------------------------------------------------------------------------
+ BEING LINEARLY CORRECT?
+------------------------------------------------------------------------------
+Applying FXAA to a framebuffer with linear RGB color will look worse.
+This is very counter intuitive, but happends to be true in this case.
+The reason is because dithering artifacts will be more visiable
+in a linear colorspace.
+
+
+------------------------------------------------------------------------------
+ COMPLEX INTEGRATION
+------------------------------------------------------------------------------
+Q. What if the engine is blending into RGB before wanting to run FXAA?
+
+A. In the last opaque pass prior to FXAA,
+ have the pass write out luma into alpha.
+ Then blend into RGB only.
+ FXAA should be able to run ok
+ assuming the blending pass did not any add aliasing.
+ This should be the common case for particles and common blending passes.
+
+A. Or use FXAA_GREEN_AS_LUMA.
+
+============================================================================*/
+
+#version 430 core
+
+layout(local_size_x = 16, local_size_y = 16) in;
+layout(rgba8, binding = 0, set = 3) uniform image2D imgOutput;
+
+layout(binding = 1, set = 2) uniform sampler2D inputImage;
+layout(binding = 2) uniform invResolution
+{
+ vec2 invResolution_data;
+};
+
+#define FXAA_QUALITY_PRESET 12
+#define FXAA_GREEN_AS_LUMA 1
+#define FXAA_PC 1
+#define FXAA_GLSL_130 1
+
+
+/*============================================================================
+
+ INTEGRATION KNOBS
+
+/*==========================================================================*/
+#ifndef FXAA_PC
+ //
+ // FXAA Quality
+ // The high quality PC algorithm.
+ //
+ #define FXAA_PC 0
+#endif
+/*--------------------------------------------------------------------------*/
+#ifndef FXAA_GLSL_120
+ #define FXAA_GLSL_120 0
+#endif
+/*--------------------------------------------------------------------------*/
+#ifndef FXAA_GLSL_130
+ #define FXAA_GLSL_130 0
+#endif
+/*==========================================================================*/
+#ifndef FXAA_GREEN_AS_LUMA
+ //
+ // For those using non-linear color,
+ // and either not able to get luma in alpha, or not wanting to,
+ // this enables FXAA to run using green as a proxy for luma.
+ // So with this enabled, no need to pack luma in alpha.
+ //
+ // This will turn off AA on anything which lacks some amount of green.
+ // Pure red and blue or combination of only R and B, will get no AA.
+ //
+ // Might want to lower the settings for both,
+ // fxaaConsoleEdgeThresholdMin
+ // fxaaQualityEdgeThresholdMin
+ // In order to insure AA does not get turned off on colors
+ // which contain a minor amount of green.
+ //
+ // 1 = On.
+ // 0 = Off.
+ //
+ #define FXAA_GREEN_AS_LUMA 0
+#endif
+/*--------------------------------------------------------------------------*/
+#ifndef FXAA_EARLY_EXIT
+ //
+ // Controls algorithm's early exit path.
+ // On PS3 turning this ON adds 2 cycles to the shader.
+ // On 360 turning this OFF adds 10ths of a millisecond to the shader.
+ // Turning this off on console will result in a more blurry image.
+ // So this defaults to on.
+ //
+ // 1 = On.
+ // 0 = Off.
+ //
+ #define FXAA_EARLY_EXIT 1
+#endif
+/*--------------------------------------------------------------------------*/
+#ifndef FXAA_DISCARD
+ //
+ // Only valid for PC OpenGL currently.
+ // Probably will not work when FXAA_GREEN_AS_LUMA = 1.
+ //
+ // 1 = Use discard on pixels which don't need AA.
+ // For APIs which enable concurrent TEX+ROP from same surface.
+ // 0 = Return unchanged color on pixels which don't need AA.
+ //
+ #define FXAA_DISCARD 0
+#endif
+/*--------------------------------------------------------------------------*/
+#ifndef FXAA_FAST_PIXEL_OFFSET
+ //
+ // Used for GLSL 120 only.
+ //
+ // 1 = GL API supports fast pixel offsets
+ // 0 = do not use fast pixel offsets
+ //
+ #ifdef GL_EXT_gpu_shader4
+ #define FXAA_FAST_PIXEL_OFFSET 1
+ #endif
+ #ifdef GL_NV_gpu_shader5
+ #define FXAA_FAST_PIXEL_OFFSET 1
+ #endif
+ #ifdef GL_ARB_gpu_shader5
+ #define FXAA_FAST_PIXEL_OFFSET 1
+ #endif
+ #ifndef FXAA_FAST_PIXEL_OFFSET
+ #define FXAA_FAST_PIXEL_OFFSET 0
+ #endif
+#endif
+/*--------------------------------------------------------------------------*/
+#ifndef FXAA_GATHER4_ALPHA
+ //
+ // 1 = API supports gather4 on alpha channel.
+ // 0 = API does not support gather4 on alpha channel.
+ //
+ #if (FXAA_HLSL_5 == 1)
+ #define FXAA_GATHER4_ALPHA 1
+ #endif
+ #ifdef GL_ARB_gpu_shader5
+ #define FXAA_GATHER4_ALPHA 1
+ #endif
+ #ifdef GL_NV_gpu_shader5
+ #define FXAA_GATHER4_ALPHA 1
+ #endif
+ #ifndef FXAA_GATHER4_ALPHA
+ #define FXAA_GATHER4_ALPHA 0
+ #endif
+#endif
+
+/*============================================================================
+ FXAA QUALITY - TUNING KNOBS
+------------------------------------------------------------------------------
+NOTE the other tuning knobs are now in the shader function inputs!
+============================================================================*/
+#ifndef FXAA_QUALITY_PRESET
+ //
+ // Choose the quality preset.
+ // This needs to be compiled into the shader as it effects code.
+ // Best option to include multiple presets is to
+ // in each shader define the preset, then include this file.
+ //
+ // OPTIONS
+ // -----------------------------------------------------------------------
+ // 10 to 15 - default medium dither (10=fastest, 15=highest quality)
+ // 20 to 29 - less dither, more expensive (20=fastest, 29=highest quality)
+ // 39 - no dither, very expensive
+ //
+ // NOTES
+ // -----------------------------------------------------------------------
+ // 12 = slightly faster then FXAA 3.9 and higher edge quality (default)
+ // 13 = about same speed as FXAA 3.9 and better than 12
+ // 23 = closest to FXAA 3.9 visually and performance wise
+ // _ = the lowest digit is directly related to performance
+ // _ = the highest digit is directly related to style
+ //
+ #define FXAA_QUALITY_PRESET 12
+#endif
+
+
+/*============================================================================
+
+ FXAA QUALITY - PRESETS
+
+============================================================================*/
+
+/*============================================================================
+ FXAA QUALITY - MEDIUM DITHER PRESETS
+============================================================================*/
+#if (FXAA_QUALITY_PRESET == 10)
+ #define FXAA_QUALITY_PS 3
+ #define FXAA_QUALITY_P0 1.5
+ #define FXAA_QUALITY_P1 3.0
+ #define FXAA_QUALITY_P2 12.0
+#endif
+/*--------------------------------------------------------------------------*/
+#if (FXAA_QUALITY_PRESET == 11)
+ #define FXAA_QUALITY_PS 4
+ #define FXAA_QUALITY_P0 1.0
+ #define FXAA_QUALITY_P1 1.5
+ #define FXAA_QUALITY_P2 3.0
+ #define FXAA_QUALITY_P3 12.0
+#endif
+/*--------------------------------------------------------------------------*/
+#if (FXAA_QUALITY_PRESET == 12)
+ #define FXAA_QUALITY_PS 5
+ #define FXAA_QUALITY_P0 1.0
+ #define FXAA_QUALITY_P1 1.5
+ #define FXAA_QUALITY_P2 2.0
+ #define FXAA_QUALITY_P3 4.0
+ #define FXAA_QUALITY_P4 12.0
+#endif
+/*--------------------------------------------------------------------------*/
+#if (FXAA_QUALITY_PRESET == 13)
+ #define FXAA_QUALITY_PS 6
+ #define FXAA_QUALITY_P0 1.0
+ #define FXAA_QUALITY_P1 1.5
+ #define FXAA_QUALITY_P2 2.0
+ #define FXAA_QUALITY_P3 2.0
+ #define FXAA_QUALITY_P4 4.0
+ #define FXAA_QUALITY_P5 12.0
+#endif
+/*--------------------------------------------------------------------------*/
+#if (FXAA_QUALITY_PRESET == 14)
+ #define FXAA_QUALITY_PS 7
+ #define FXAA_QUALITY_P0 1.0
+ #define FXAA_QUALITY_P1 1.5
+ #define FXAA_QUALITY_P2 2.0
+ #define FXAA_QUALITY_P3 2.0
+ #define FXAA_QUALITY_P4 2.0
+ #define FXAA_QUALITY_P5 4.0
+ #define FXAA_QUALITY_P6 12.0
+#endif
+/*--------------------------------------------------------------------------*/
+#if (FXAA_QUALITY_PRESET == 15)
+ #define FXAA_QUALITY_PS 8
+ #define FXAA_QUALITY_P0 1.0
+ #define FXAA_QUALITY_P1 1.5
+ #define FXAA_QUALITY_P2 2.0
+ #define FXAA_QUALITY_P3 2.0
+ #define FXAA_QUALITY_P4 2.0
+ #define FXAA_QUALITY_P5 2.0
+ #define FXAA_QUALITY_P6 4.0
+ #define FXAA_QUALITY_P7 12.0
+#endif
+
+/*============================================================================
+ FXAA QUALITY - LOW DITHER PRESETS
+============================================================================*/
+#if (FXAA_QUALITY_PRESET == 20)
+ #define FXAA_QUALITY_PS 3
+ #define FXAA_QUALITY_P0 1.5
+ #define FXAA_QUALITY_P1 2.0
+ #define FXAA_QUALITY_P2 8.0
+#endif
+/*--------------------------------------------------------------------------*/
+#if (FXAA_QUALITY_PRESET == 21)
+ #define FXAA_QUALITY_PS 4
+ #define FXAA_QUALITY_P0 1.0
+ #define FXAA_QUALITY_P1 1.5
+ #define FXAA_QUALITY_P2 2.0
+ #define FXAA_QUALITY_P3 8.0
+#endif
+/*--------------------------------------------------------------------------*/
+#if (FXAA_QUALITY_PRESET == 22)
+ #define FXAA_QUALITY_PS 5
+ #define FXAA_QUALITY_P0 1.0
+ #define FXAA_QUALITY_P1 1.5
+ #define FXAA_QUALITY_P2 2.0
+ #define FXAA_QUALITY_P3 2.0
+ #define FXAA_QUALITY_P4 8.0
+#endif
+/*--------------------------------------------------------------------------*/
+#if (FXAA_QUALITY_PRESET == 23)
+ #define FXAA_QUALITY_PS 6
+ #define FXAA_QUALITY_P0 1.0
+ #define FXAA_QUALITY_P1 1.5
+ #define FXAA_QUALITY_P2 2.0
+ #define FXAA_QUALITY_P3 2.0
+ #define FXAA_QUALITY_P4 2.0
+ #define FXAA_QUALITY_P5 8.0
+#endif
+/*--------------------------------------------------------------------------*/
+#if (FXAA_QUALITY_PRESET == 24)
+ #define FXAA_QUALITY_PS 7
+ #define FXAA_QUALITY_P0 1.0
+ #define FXAA_QUALITY_P1 1.5
+ #define FXAA_QUALITY_P2 2.0
+ #define FXAA_QUALITY_P3 2.0
+ #define FXAA_QUALITY_P4 2.0
+ #define FXAA_QUALITY_P5 3.0
+ #define FXAA_QUALITY_P6 8.0
+#endif
+/*--------------------------------------------------------------------------*/
+#if (FXAA_QUALITY_PRESET == 25)
+ #define FXAA_QUALITY_PS 8
+ #define FXAA_QUALITY_P0 1.0
+ #define FXAA_QUALITY_P1 1.5
+ #define FXAA_QUALITY_P2 2.0
+ #define FXAA_QUALITY_P3 2.0
+ #define FXAA_QUALITY_P4 2.0
+ #define FXAA_QUALITY_P5 2.0
+ #define FXAA_QUALITY_P6 4.0
+ #define FXAA_QUALITY_P7 8.0
+#endif
+/*--------------------------------------------------------------------------*/
+#if (FXAA_QUALITY_PRESET == 26)
+ #define FXAA_QUALITY_PS 9
+ #define FXAA_QUALITY_P0 1.0
+ #define FXAA_QUALITY_P1 1.5
+ #define FXAA_QUALITY_P2 2.0
+ #define FXAA_QUALITY_P3 2.0
+ #define FXAA_QUALITY_P4 2.0
+ #define FXAA_QUALITY_P5 2.0
+ #define FXAA_QUALITY_P6 2.0
+ #define FXAA_QUALITY_P7 4.0
+ #define FXAA_QUALITY_P8 8.0
+#endif
+/*--------------------------------------------------------------------------*/
+#if (FXAA_QUALITY_PRESET == 27)
+ #define FXAA_QUALITY_PS 10
+ #define FXAA_QUALITY_P0 1.0
+ #define FXAA_QUALITY_P1 1.5
+ #define FXAA_QUALITY_P2 2.0
+ #define FXAA_QUALITY_P3 2.0
+ #define FXAA_QUALITY_P4 2.0
+ #define FXAA_QUALITY_P5 2.0
+ #define FXAA_QUALITY_P6 2.0
+ #define FXAA_QUALITY_P7 2.0
+ #define FXAA_QUALITY_P8 4.0
+ #define FXAA_QUALITY_P9 8.0
+#endif
+/*--------------------------------------------------------------------------*/
+#if (FXAA_QUALITY_PRESET == 28)
+ #define FXAA_QUALITY_PS 11
+ #define FXAA_QUALITY_P0 1.0
+ #define FXAA_QUALITY_P1 1.5
+ #define FXAA_QUALITY_P2 2.0
+ #define FXAA_QUALITY_P3 2.0
+ #define FXAA_QUALITY_P4 2.0
+ #define FXAA_QUALITY_P5 2.0
+ #define FXAA_QUALITY_P6 2.0
+ #define FXAA_QUALITY_P7 2.0
+ #define FXAA_QUALITY_P8 2.0
+ #define FXAA_QUALITY_P9 4.0
+ #define FXAA_QUALITY_P10 8.0
+#endif
+/*--------------------------------------------------------------------------*/
+#if (FXAA_QUALITY_PRESET == 29)
+ #define FXAA_QUALITY_PS 12
+ #define FXAA_QUALITY_P0 1.0
+ #define FXAA_QUALITY_P1 1.5
+ #define FXAA_QUALITY_P2 2.0
+ #define FXAA_QUALITY_P3 2.0
+ #define FXAA_QUALITY_P4 2.0
+ #define FXAA_QUALITY_P5 2.0
+ #define FXAA_QUALITY_P6 2.0
+ #define FXAA_QUALITY_P7 2.0
+ #define FXAA_QUALITY_P8 2.0
+ #define FXAA_QUALITY_P9 2.0
+ #define FXAA_QUALITY_P10 4.0
+ #define FXAA_QUALITY_P11 8.0
+#endif
+
+/*============================================================================
+ FXAA QUALITY - EXTREME QUALITY
+============================================================================*/
+#if (FXAA_QUALITY_PRESET == 39)
+ #define FXAA_QUALITY_PS 12
+ #define FXAA_QUALITY_P0 1.0
+ #define FXAA_QUALITY_P1 1.0
+ #define FXAA_QUALITY_P2 1.0
+ #define FXAA_QUALITY_P3 1.0
+ #define FXAA_QUALITY_P4 1.0
+ #define FXAA_QUALITY_P5 1.5
+ #define FXAA_QUALITY_P6 2.0
+ #define FXAA_QUALITY_P7 2.0
+ #define FXAA_QUALITY_P8 2.0
+ #define FXAA_QUALITY_P9 2.0
+ #define FXAA_QUALITY_P10 4.0
+ #define FXAA_QUALITY_P11 8.0
+#endif
+
+
+
+/*============================================================================
+
+ API PORTING
+
+============================================================================*/
+#if (FXAA_GLSL_120 == 1) || (FXAA_GLSL_130 == 1)
+ #define FxaaBool bool
+ #define FxaaDiscard discard
+ #define FxaaFloat float
+ #define FxaaFloat2 vec2
+ #define FxaaFloat3 vec3
+ #define FxaaFloat4 vec4
+ #define FxaaHalf float
+ #define FxaaHalf2 vec2
+ #define FxaaHalf3 vec3
+ #define FxaaHalf4 vec4
+ #define FxaaInt2 ivec2
+ #define FxaaSat(x) clamp(x, 0.0, 1.0)
+ #define FxaaTex sampler2D
+#else
+ #define FxaaBool bool
+ #define FxaaDiscard clip(-1)
+ #define FxaaFloat float
+ #define FxaaFloat2 float2
+ #define FxaaFloat3 float3
+ #define FxaaFloat4 float4
+ #define FxaaHalf half
+ #define FxaaHalf2 half2
+ #define FxaaHalf3 half3
+ #define FxaaHalf4 half4
+ #define FxaaSat(x) saturate(x)
+#endif
+/*--------------------------------------------------------------------------*/
+#if (FXAA_GLSL_120 == 1)
+ // Requires,
+ // #version 120
+ // And at least,
+ // #extension GL_EXT_gpu_shader4 : enable
+ // (or set FXAA_FAST_PIXEL_OFFSET 1 to work like DX9)
+ #define FxaaTexTop(t, p) texture2DLod(t, p, 0.0)
+ #if (FXAA_FAST_PIXEL_OFFSET == 1)
+ #define FxaaTexOff(t, p, o, r) texture2DLodOffset(t, p, 0.0, o)
+ #else
+ #define FxaaTexOff(t, p, o, r) texture2DLod(t, p + (o * r), 0.0)
+ #endif
+ #if (FXAA_GATHER4_ALPHA == 1)
+ // use #extension GL_ARB_gpu_shader5 : enable
+ #define FxaaTexAlpha4(t, p) textureGather(t, p, 3)
+ #define FxaaTexOffAlpha4(t, p, o) textureGatherOffset(t, p, o, 3)
+ #define FxaaTexGreen4(t, p) textureGather(t, p, 1)
+ #define FxaaTexOffGreen4(t, p, o) textureGatherOffset(t, p, o, 1)
+ #endif
+#endif
+/*--------------------------------------------------------------------------*/
+#if (FXAA_GLSL_130 == 1)
+ // Requires "#version 130" or better
+ #define FxaaTexTop(t, p) textureLod(t, p, 0.0)
+ #define FxaaTexOff(t, p, o, r) textureLodOffset(t, p, 0.0, o)
+ #if (FXAA_GATHER4_ALPHA == 1)
+ // use #extension GL_ARB_gpu_shader5 : enable
+ #define FxaaTexAlpha4(t, p) textureGather(t, p, 3)
+ #define FxaaTexOffAlpha4(t, p, o) textureGatherOffset(t, p, o, 3)
+ #define FxaaTexGreen4(t, p) textureGather(t, p, 1)
+ #define FxaaTexOffGreen4(t, p, o) textureGatherOffset(t, p, o, 1)
+ #endif
+#endif
+
+
+/*============================================================================
+ GREEN AS LUMA OPTION SUPPORT FUNCTION
+============================================================================*/
+#if (FXAA_GREEN_AS_LUMA == 0)
+ FxaaFloat FxaaLuma(FxaaFloat4 rgba) { return rgba.w; }
+#else
+ FxaaFloat FxaaLuma(FxaaFloat4 rgba) { return rgba.y; }
+#endif
+
+
+
+
+/*============================================================================
+
+ FXAA3 QUALITY - PC
+
+============================================================================*/
+#if (FXAA_PC == 1)
+/*--------------------------------------------------------------------------*/
+FxaaFloat4 FxaaPixelShader(
+ //
+ // Use noperspective interpolation here (turn off perspective interpolation).
+ // {xy} = center of pixel
+ FxaaFloat2 pos,
+ //
+ // Used only for FXAA Console, and not used on the 360 version.
+ // Use noperspective interpolation here (turn off perspective interpolation).
+ // {xy_} = upper left of pixel
+ // {_zw} = lower right of pixel
+ FxaaFloat4 fxaaConsolePosPos,
+ //
+ // Input color texture.
+ // {rgb_} = color in linear or perceptual color space
+ // if (FXAA_GREEN_AS_LUMA == 0)
+ // {__a} = luma in perceptual color space (not linear)
+ FxaaTex tex,
+ //
+ // Only used on the optimized 360 version of FXAA Console.
+ // For everything but 360, just use the same input here as for "tex".
+ // For 360, same texture, just alias with a 2nd sampler.
+ // This sampler needs to have an exponent bias of -1.
+ FxaaTex fxaaConsole360TexExpBiasNegOne,
+ //
+ // Only used on the optimized 360 version of FXAA Console.
+ // For everything but 360, just use the same input here as for "tex".
+ // For 360, same texture, just alias with a 3nd sampler.
+ // This sampler needs to have an exponent bias of -2.
+ FxaaTex fxaaConsole360TexExpBiasNegTwo,
+ //
+ // Only used on FXAA Quality.
+ // This must be from a constant/uniform.
+ // {x_} = 1.0/screenWidthInPixels
+ // {_y} = 1.0/screenHeightInPixels
+ FxaaFloat2 fxaaQualityRcpFrame,
+ //
+ // Only used on FXAA Console.
+ // This must be from a constant/uniform.
+ // This effects sub-pixel AA quality and inversely sharpness.
+ // Where N ranges between,
+ // N = 0.50 (default)
+ // N = 0.33 (sharper)
+ // {x__} = -N/screenWidthInPixels
+ // {_y_} = -N/screenHeightInPixels
+ // {_z_} = N/screenWidthInPixels
+ // {__w} = N/screenHeightInPixels
+ FxaaFloat4 fxaaConsoleRcpFrameOpt,
+ //
+ // Only used on FXAA Console.
+ // Not used on 360, but used on PS3 and PC.
+ // This must be from a constant/uniform.
+ // {x__} = -2.0/screenWidthInPixels
+ // {_y_} = -2.0/screenHeightInPixels
+ // {_z_} = 2.0/screenWidthInPixels
+ // {__w} = 2.0/screenHeightInPixels
+ FxaaFloat4 fxaaConsoleRcpFrameOpt2,
+ //
+ // Only used on FXAA Console.
+ // Only used on 360 in place of fxaaConsoleRcpFrameOpt2.
+ // This must be from a constant/uniform.
+ // {x__} = 8.0/screenWidthInPixels
+ // {_y_} = 8.0/screenHeightInPixels
+ // {_z_} = -4.0/screenWidthInPixels
+ // {__w} = -4.0/screenHeightInPixels
+ FxaaFloat4 fxaaConsole360RcpFrameOpt2,
+ //
+ // Only used on FXAA Quality.
+ // This used to be the FXAA_QUALITY_SUBPIX define.
+ // It is here now to allow easier tuning.
+ // Choose the amount of sub-pixel aliasing removal.
+ // This can effect sharpness.
+ // 1.00 - upper limit (softer)
+ // 0.75 - default amount of filtering
+ // 0.50 - lower limit (sharper, less sub-pixel aliasing removal)
+ // 0.25 - almost off
+ // 0.00 - completely off
+ FxaaFloat fxaaQualitySubpix,
+ //
+ // Only used on FXAA Quality.
+ // This used to be the FXAA_QUALITY_EDGE_THRESHOLD define.
+ // It is here now to allow easier tuning.
+ // The minimum amount of local contrast required to apply algorithm.
+ // 0.333 - too little (faster)
+ // 0.250 - low quality
+ // 0.166 - default
+ // 0.125 - high quality
+ // 0.063 - overkill (slower)
+ FxaaFloat fxaaQualityEdgeThreshold,
+ //
+ // Only used on FXAA Quality.
+ // This used to be the FXAA_QUALITY_EDGE_THRESHOLD_MIN define.
+ // It is here now to allow easier tuning.
+ // Trims the algorithm from processing darks.
+ // 0.0833 - upper limit (default, the start of visible unfiltered edges)
+ // 0.0625 - high quality (faster)
+ // 0.0312 - visible limit (slower)
+ // Special notes when using FXAA_GREEN_AS_LUMA,
+ // Likely want to set this to zero.
+ // As colors that are mostly not-green
+ // will appear very dark in the green channel!
+ // Tune by looking at mostly non-green content,
+ // then start at zero and increase until aliasing is a problem.
+ FxaaFloat fxaaQualityEdgeThresholdMin,
+ //
+ // Only used on FXAA Console.
+ // This used to be the FXAA_CONSOLE_EDGE_SHARPNESS define.
+ // It is here now to allow easier tuning.
+ // This does not effect PS3, as this needs to be compiled in.
+ // Use FXAA_CONSOLE_PS3_EDGE_SHARPNESS for PS3.
+ // Due to the PS3 being ALU bound,
+ // there are only three safe values here: 2 and 4 and 8.
+ // These options use the shaders ability to a free *|/ by 2|4|8.
+ // For all other platforms can be a non-power of two.
+ // 8.0 is sharper (default!!!)
+ // 4.0 is softer
+ // 2.0 is really soft (good only for vector graphics inputs)
+ FxaaFloat fxaaConsoleEdgeSharpness,
+ //
+ // Only used on FXAA Console.
+ // This used to be the FXAA_CONSOLE_EDGE_THRESHOLD define.
+ // It is here now to allow easier tuning.
+ // This does not effect PS3, as this needs to be compiled in.
+ // Use FXAA_CONSOLE_PS3_EDGE_THRESHOLD for PS3.
+ // Due to the PS3 being ALU bound,
+ // there are only two safe values here: 1/4 and 1/8.
+ // These options use the shaders ability to a free *|/ by 2|4|8.
+ // The console setting has a different mapping than the quality setting.
+ // Other platforms can use other values.
+ // 0.125 leaves less aliasing, but is softer (default!!!)
+ // 0.25 leaves more aliasing, and is sharper
+ FxaaFloat fxaaConsoleEdgeThreshold,
+ //
+ // Only used on FXAA Console.
+ // This used to be the FXAA_CONSOLE_EDGE_THRESHOLD_MIN define.
+ // It is here now to allow easier tuning.
+ // Trims the algorithm from processing darks.
+ // The console setting has a different mapping than the quality setting.
+ // This only applies when FXAA_EARLY_EXIT is 1.
+ // This does not apply to PS3,
+ // PS3 was simplified to avoid more shader instructions.
+ // 0.06 - faster but more aliasing in darks
+ // 0.05 - default
+ // 0.04 - slower and less aliasing in darks
+ // Special notes when using FXAA_GREEN_AS_LUMA,
+ // Likely want to set this to zero.
+ // As colors that are mostly not-green
+ // will appear very dark in the green channel!
+ // Tune by looking at mostly non-green content,
+ // then start at zero and increase until aliasing is a problem.
+ FxaaFloat fxaaConsoleEdgeThresholdMin,
+ //
+ // Extra constants for 360 FXAA Console only.
+ // Use zeros or anything else for other platforms.
+ // These must be in physical constant registers and NOT immedates.
+ // Immedates will result in compiler un-optimizing.
+ // {xyzw} = float4(1.0, -1.0, 0.25, -0.25)
+ FxaaFloat4 fxaaConsole360ConstDir
+) {
+/*--------------------------------------------------------------------------*/
+ FxaaFloat2 posM;
+ posM.x = pos.x;
+ posM.y = pos.y;
+ #if (FXAA_GATHER4_ALPHA == 1)
+ #if (FXAA_DISCARD == 0)
+ FxaaFloat4 rgbyM = FxaaTexTop(tex, posM);
+ #if (FXAA_GREEN_AS_LUMA == 0)
+ #define lumaM rgbyM.w
+ #else
+ #define lumaM rgbyM.y
+ #endif
+ #endif
+ #if (FXAA_GREEN_AS_LUMA == 0)
+ FxaaFloat4 luma4A = FxaaTexAlpha4(tex, posM);
+ FxaaFloat4 luma4B = FxaaTexOffAlpha4(tex, posM, FxaaInt2(-1, -1));
+ #else
+ FxaaFloat4 luma4A = FxaaTexGreen4(tex, posM);
+ FxaaFloat4 luma4B = FxaaTexOffGreen4(tex, posM, FxaaInt2(-1, -1));
+ #endif
+ #if (FXAA_DISCARD == 1)
+ #define lumaM luma4A.w
+ #endif
+ #define lumaE luma4A.z
+ #define lumaS luma4A.x
+ #define lumaSE luma4A.y
+ #define lumaNW luma4B.w
+ #define lumaN luma4B.z
+ #define lumaW luma4B.x
+ #else
+ FxaaFloat4 rgbyM = FxaaTexTop(tex, posM);
+ #if (FXAA_GREEN_AS_LUMA == 0)
+ #define lumaM rgbyM.w
+ #else
+ #define lumaM rgbyM.y
+ #endif
+ FxaaFloat lumaS = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2( 0, 1), fxaaQualityRcpFrame.xy));
+ FxaaFloat lumaE = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2( 1, 0), fxaaQualityRcpFrame.xy));
+ FxaaFloat lumaN = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2( 0,-1), fxaaQualityRcpFrame.xy));
+ FxaaFloat lumaW = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2(-1, 0), fxaaQualityRcpFrame.xy));
+ #endif
+/*--------------------------------------------------------------------------*/
+ FxaaFloat maxSM = max(lumaS, lumaM);
+ FxaaFloat minSM = min(lumaS, lumaM);
+ FxaaFloat maxESM = max(lumaE, maxSM);
+ FxaaFloat minESM = min(lumaE, minSM);
+ FxaaFloat maxWN = max(lumaN, lumaW);
+ FxaaFloat minWN = min(lumaN, lumaW);
+ FxaaFloat rangeMax = max(maxWN, maxESM);
+ FxaaFloat rangeMin = min(minWN, minESM);
+ FxaaFloat rangeMaxScaled = rangeMax * fxaaQualityEdgeThreshold;
+ FxaaFloat range = rangeMax - rangeMin;
+ FxaaFloat rangeMaxClamped = max(fxaaQualityEdgeThresholdMin, rangeMaxScaled);
+ FxaaBool earlyExit = range < rangeMaxClamped;
+/*--------------------------------------------------------------------------*/
+ if(earlyExit)
+ #if (FXAA_DISCARD == 1)
+ FxaaDiscard;
+ #else
+ return rgbyM;
+ #endif
+/*--------------------------------------------------------------------------*/
+ #if (FXAA_GATHER4_ALPHA == 0)
+ FxaaFloat lumaNW = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2(-1,-1), fxaaQualityRcpFrame.xy));
+ FxaaFloat lumaSE = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2( 1, 1), fxaaQualityRcpFrame.xy));
+ FxaaFloat lumaNE = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2( 1,-1), fxaaQualityRcpFrame.xy));
+ FxaaFloat lumaSW = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2(-1, 1), fxaaQualityRcpFrame.xy));
+ #else
+ FxaaFloat lumaNE = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2(1, -1), fxaaQualityRcpFrame.xy));
+ FxaaFloat lumaSW = FxaaLuma(FxaaTexOff(tex, posM, FxaaInt2(-1, 1), fxaaQualityRcpFrame.xy));
+ #endif
+/*--------------------------------------------------------------------------*/
+ FxaaFloat lumaNS = lumaN + lumaS;
+ FxaaFloat lumaWE = lumaW + lumaE;
+ FxaaFloat subpixRcpRange = 1.0/range;
+ FxaaFloat subpixNSWE = lumaNS + lumaWE;
+ FxaaFloat edgeHorz1 = (-2.0 * lumaM) + lumaNS;
+ FxaaFloat edgeVert1 = (-2.0 * lumaM) + lumaWE;
+/*--------------------------------------------------------------------------*/
+ FxaaFloat lumaNESE = lumaNE + lumaSE;
+ FxaaFloat lumaNWNE = lumaNW + lumaNE;
+ FxaaFloat edgeHorz2 = (-2.0 * lumaE) + lumaNESE;
+ FxaaFloat edgeVert2 = (-2.0 * lumaN) + lumaNWNE;
+/*--------------------------------------------------------------------------*/
+ FxaaFloat lumaNWSW = lumaNW + lumaSW;
+ FxaaFloat lumaSWSE = lumaSW + lumaSE;
+ FxaaFloat edgeHorz4 = (abs(edgeHorz1) * 2.0) + abs(edgeHorz2);
+ FxaaFloat edgeVert4 = (abs(edgeVert1) * 2.0) + abs(edgeVert2);
+ FxaaFloat edgeHorz3 = (-2.0 * lumaW) + lumaNWSW;
+ FxaaFloat edgeVert3 = (-2.0 * lumaS) + lumaSWSE;
+ FxaaFloat edgeHorz = abs(edgeHorz3) + edgeHorz4;
+ FxaaFloat edgeVert = abs(edgeVert3) + edgeVert4;
+/*--------------------------------------------------------------------------*/
+ FxaaFloat subpixNWSWNESE = lumaNWSW + lumaNESE;
+ FxaaFloat lengthSign = fxaaQualityRcpFrame.x;
+ FxaaBool horzSpan = edgeHorz >= edgeVert;
+ FxaaFloat subpixA = subpixNSWE * 2.0 + subpixNWSWNESE;
+/*--------------------------------------------------------------------------*/
+ if(!horzSpan) lumaN = lumaW;
+ if(!horzSpan) lumaS = lumaE;
+ if(horzSpan) lengthSign = fxaaQualityRcpFrame.y;
+ FxaaFloat subpixB = (subpixA * (1.0/12.0)) - lumaM;
+/*--------------------------------------------------------------------------*/
+ FxaaFloat gradientN = lumaN - lumaM;
+ FxaaFloat gradientS = lumaS - lumaM;
+ FxaaFloat lumaNN = lumaN + lumaM;
+ FxaaFloat lumaSS = lumaS + lumaM;
+ FxaaBool pairN = abs(gradientN) >= abs(gradientS);
+ FxaaFloat gradient = max(abs(gradientN), abs(gradientS));
+ if(pairN) lengthSign = -lengthSign;
+ FxaaFloat subpixC = FxaaSat(abs(subpixB) * subpixRcpRange);
+/*--------------------------------------------------------------------------*/
+ FxaaFloat2 posB;
+ posB.x = posM.x;
+ posB.y = posM.y;
+ FxaaFloat2 offNP;
+ offNP.x = (!horzSpan) ? 0.0 : fxaaQualityRcpFrame.x;
+ offNP.y = ( horzSpan) ? 0.0 : fxaaQualityRcpFrame.y;
+ if(!horzSpan) posB.x += lengthSign * 0.5;
+ if( horzSpan) posB.y += lengthSign * 0.5;
+/*--------------------------------------------------------------------------*/
+ FxaaFloat2 posN;
+ posN.x = posB.x - offNP.x * FXAA_QUALITY_P0;
+ posN.y = posB.y - offNP.y * FXAA_QUALITY_P0;
+ FxaaFloat2 posP;
+ posP.x = posB.x + offNP.x * FXAA_QUALITY_P0;
+ posP.y = posB.y + offNP.y * FXAA_QUALITY_P0;
+ FxaaFloat subpixD = ((-2.0)*subpixC) + 3.0;
+ FxaaFloat lumaEndN = FxaaLuma(FxaaTexTop(tex, posN));
+ FxaaFloat subpixE = subpixC * subpixC;
+ FxaaFloat lumaEndP = FxaaLuma(FxaaTexTop(tex, posP));
+/*--------------------------------------------------------------------------*/
+ if(!pairN) lumaNN = lumaSS;
+ FxaaFloat gradientScaled = gradient * 1.0/4.0;
+ FxaaFloat lumaMM = lumaM - lumaNN * 0.5;
+ FxaaFloat subpixF = subpixD * subpixE;
+ FxaaBool lumaMLTZero = lumaMM < 0.0;
+/*--------------------------------------------------------------------------*/
+ lumaEndN -= lumaNN * 0.5;
+ lumaEndP -= lumaNN * 0.5;
+ FxaaBool doneN = abs(lumaEndN) >= gradientScaled;
+ FxaaBool doneP = abs(lumaEndP) >= gradientScaled;
+ if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P1;
+ if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P1;
+ FxaaBool doneNP = (!doneN) || (!doneP);
+ if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P1;
+ if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P1;
+/*--------------------------------------------------------------------------*/
+ if(doneNP) {
+ if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));
+ if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));
+ if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;
+ if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;
+ doneN = abs(lumaEndN) >= gradientScaled;
+ doneP = abs(lumaEndP) >= gradientScaled;
+ if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P2;
+ if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P2;
+ doneNP = (!doneN) || (!doneP);
+ if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P2;
+ if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P2;
+/*--------------------------------------------------------------------------*/
+ #if (FXAA_QUALITY_PS > 3)
+ if(doneNP) {
+ if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));
+ if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));
+ if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;
+ if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;
+ doneN = abs(lumaEndN) >= gradientScaled;
+ doneP = abs(lumaEndP) >= gradientScaled;
+ if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P3;
+ if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P3;
+ doneNP = (!doneN) || (!doneP);
+ if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P3;
+ if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P3;
+/*--------------------------------------------------------------------------*/
+ #if (FXAA_QUALITY_PS > 4)
+ if(doneNP) {
+ if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));
+ if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));
+ if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;
+ if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;
+ doneN = abs(lumaEndN) >= gradientScaled;
+ doneP = abs(lumaEndP) >= gradientScaled;
+ if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P4;
+ if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P4;
+ doneNP = (!doneN) || (!doneP);
+ if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P4;
+ if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P4;
+/*--------------------------------------------------------------------------*/
+ #if (FXAA_QUALITY_PS > 5)
+ if(doneNP) {
+ if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));
+ if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));
+ if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;
+ if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;
+ doneN = abs(lumaEndN) >= gradientScaled;
+ doneP = abs(lumaEndP) >= gradientScaled;
+ if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P5;
+ if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P5;
+ doneNP = (!doneN) || (!doneP);
+ if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P5;
+ if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P5;
+/*--------------------------------------------------------------------------*/
+ #if (FXAA_QUALITY_PS > 6)
+ if(doneNP) {
+ if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));
+ if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));
+ if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;
+ if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;
+ doneN = abs(lumaEndN) >= gradientScaled;
+ doneP = abs(lumaEndP) >= gradientScaled;
+ if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P6;
+ if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P6;
+ doneNP = (!doneN) || (!doneP);
+ if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P6;
+ if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P6;
+/*--------------------------------------------------------------------------*/
+ #if (FXAA_QUALITY_PS > 7)
+ if(doneNP) {
+ if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));
+ if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));
+ if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;
+ if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;
+ doneN = abs(lumaEndN) >= gradientScaled;
+ doneP = abs(lumaEndP) >= gradientScaled;
+ if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P7;
+ if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P7;
+ doneNP = (!doneN) || (!doneP);
+ if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P7;
+ if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P7;
+/*--------------------------------------------------------------------------*/
+ #if (FXAA_QUALITY_PS > 8)
+ if(doneNP) {
+ if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));
+ if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));
+ if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;
+ if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;
+ doneN = abs(lumaEndN) >= gradientScaled;
+ doneP = abs(lumaEndP) >= gradientScaled;
+ if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P8;
+ if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P8;
+ doneNP = (!doneN) || (!doneP);
+ if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P8;
+ if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P8;
+/*--------------------------------------------------------------------------*/
+ #if (FXAA_QUALITY_PS > 9)
+ if(doneNP) {
+ if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));
+ if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));
+ if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;
+ if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;
+ doneN = abs(lumaEndN) >= gradientScaled;
+ doneP = abs(lumaEndP) >= gradientScaled;
+ if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P9;
+ if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P9;
+ doneNP = (!doneN) || (!doneP);
+ if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P9;
+ if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P9;
+/*--------------------------------------------------------------------------*/
+ #if (FXAA_QUALITY_PS > 10)
+ if(doneNP) {
+ if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));
+ if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));
+ if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;
+ if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;
+ doneN = abs(lumaEndN) >= gradientScaled;
+ doneP = abs(lumaEndP) >= gradientScaled;
+ if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P10;
+ if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P10;
+ doneNP = (!doneN) || (!doneP);
+ if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P10;
+ if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P10;
+/*--------------------------------------------------------------------------*/
+ #if (FXAA_QUALITY_PS > 11)
+ if(doneNP) {
+ if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));
+ if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));
+ if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;
+ if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;
+ doneN = abs(lumaEndN) >= gradientScaled;
+ doneP = abs(lumaEndP) >= gradientScaled;
+ if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P11;
+ if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P11;
+ doneNP = (!doneN) || (!doneP);
+ if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P11;
+ if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P11;
+/*--------------------------------------------------------------------------*/
+ #if (FXAA_QUALITY_PS > 12)
+ if(doneNP) {
+ if(!doneN) lumaEndN = FxaaLuma(FxaaTexTop(tex, posN.xy));
+ if(!doneP) lumaEndP = FxaaLuma(FxaaTexTop(tex, posP.xy));
+ if(!doneN) lumaEndN = lumaEndN - lumaNN * 0.5;
+ if(!doneP) lumaEndP = lumaEndP - lumaNN * 0.5;
+ doneN = abs(lumaEndN) >= gradientScaled;
+ doneP = abs(lumaEndP) >= gradientScaled;
+ if(!doneN) posN.x -= offNP.x * FXAA_QUALITY_P12;
+ if(!doneN) posN.y -= offNP.y * FXAA_QUALITY_P12;
+ doneNP = (!doneN) || (!doneP);
+ if(!doneP) posP.x += offNP.x * FXAA_QUALITY_P12;
+ if(!doneP) posP.y += offNP.y * FXAA_QUALITY_P12;
+/*--------------------------------------------------------------------------*/
+ }
+ #endif
+/*--------------------------------------------------------------------------*/
+ }
+ #endif
+/*--------------------------------------------------------------------------*/
+ }
+ #endif
+/*--------------------------------------------------------------------------*/
+ }
+ #endif
+/*--------------------------------------------------------------------------*/
+ }
+ #endif
+/*--------------------------------------------------------------------------*/
+ }
+ #endif
+/*--------------------------------------------------------------------------*/
+ }
+ #endif
+/*--------------------------------------------------------------------------*/
+ }
+ #endif
+/*--------------------------------------------------------------------------*/
+ }
+ #endif
+/*--------------------------------------------------------------------------*/
+ }
+ #endif
+/*--------------------------------------------------------------------------*/
+ }
+/*--------------------------------------------------------------------------*/
+ FxaaFloat dstN = posM.x - posN.x;
+ FxaaFloat dstP = posP.x - posM.x;
+ if(!horzSpan) dstN = posM.y - posN.y;
+ if(!horzSpan) dstP = posP.y - posM.y;
+/*--------------------------------------------------------------------------*/
+ FxaaBool goodSpanN = (lumaEndN < 0.0) != lumaMLTZero;
+ FxaaFloat spanLength = (dstP + dstN);
+ FxaaBool goodSpanP = (lumaEndP < 0.0) != lumaMLTZero;
+ FxaaFloat spanLengthRcp = 1.0/spanLength;
+/*--------------------------------------------------------------------------*/
+ FxaaBool directionN = dstN < dstP;
+ FxaaFloat dst = min(dstN, dstP);
+ FxaaBool goodSpan = directionN ? goodSpanN : goodSpanP;
+ FxaaFloat subpixG = subpixF * subpixF;
+ FxaaFloat pixelOffset = (dst * (-spanLengthRcp)) + 0.5;
+ FxaaFloat subpixH = subpixG * fxaaQualitySubpix;
+/*--------------------------------------------------------------------------*/
+ FxaaFloat pixelOffsetGood = goodSpan ? pixelOffset : 0.0;
+ FxaaFloat pixelOffsetSubpix = max(pixelOffsetGood, subpixH);
+ if(!horzSpan) posM.x += pixelOffsetSubpix * lengthSign;
+ if( horzSpan) posM.y += pixelOffsetSubpix * lengthSign;
+ #if (FXAA_DISCARD == 1)
+ return FxaaTexTop(tex, posM);
+ #else
+ return FxaaFloat4(FxaaTexTop(tex, posM).xyz, lumaM);
+ #endif
+}
+/*==========================================================================*/
+#endif
+
+vec4 mainImage(vec2 fragCoord)
+{
+ vec2 rcpFrame = 1./invResolution_data.xy;
+ vec2 uv2 = fragCoord.xy / invResolution_data.xy;
+
+ float fxaaQualitySubpix = 0.75; // [0..1], default 0.75
+ float fxaaQualityEdgeThreshold = 0.166; // [0.125..0.33], default 0.166
+ float fxaaQualityEdgeThresholdMin = 0.02;//0.0625; // ?
+ vec4 dummy4 = vec4(0.0,0.0,0.0,0.0);
+ float dummy1 = 0.0;
+
+ vec4 col = FxaaPixelShader(uv2, dummy4,
+ inputImage, inputImage, inputImage,
+ rcpFrame, dummy4, dummy4, dummy4,
+ fxaaQualitySubpix, fxaaQualityEdgeThreshold,
+ fxaaQualityEdgeThresholdMin,
+ dummy1, dummy1, dummy1, dummy4);
+
+ vec4 fragColor = vec4( col.xyz, 1. );
+
+ return fragColor;
+}
+
+void main()
+{
+ ivec2 loc = ivec2(gl_GlobalInvocationID.x * 4, gl_GlobalInvocationID.y * 4);
+ for(int i = 0; i < 4; i++)
+ {
+ for(int j = 0; j < 4; j++)
+ {
+ ivec2 texelCoord = ivec2(loc.x + i, loc.y + j);
+ vec4 outColor = mainImage(texelCoord + vec2(0.5));
+ imageStore(imgOutput, texelCoord, outColor);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/Fxaa.spv b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/Fxaa.spv
new file mode 100644
index 00000000..b466bcb6
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/Fxaa.spv
Binary files differ
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaBlend.glsl b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaBlend.glsl
new file mode 100644
index 00000000..a518cf25
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaBlend.glsl
@@ -0,0 +1,1404 @@
+#version 430 core
+#define SMAA_GLSL_4 1
+
+layout (constant_id = 0) const int SMAA_PRESET_LOW = 0;
+layout (constant_id = 1) const int SMAA_PRESET_MEDIUM = 0;
+layout (constant_id = 2) const int SMAA_PRESET_HIGH = 0;
+layout (constant_id = 3) const int SMAA_PRESET_ULTRA = 0;
+layout (constant_id = 4) const float METRIC_WIDTH = 1920.0;
+layout (constant_id = 5) const float METRIC_HEIGHT = 1080.0;
+
+#define SMAA_RT_METRICS float4(1.0 / METRIC_WIDTH, 1.0 / METRIC_HEIGHT, METRIC_WIDTH, METRIC_HEIGHT)
+
+layout (local_size_x = 16, local_size_y = 16) in;
+/**
+ * Copyright (C) 2013 Jorge Jimenez (jorge@iryoku.com)
+ * Copyright (C) 2013 Jose I. Echevarria (joseignacioechevarria@gmail.com)
+ * Copyright (C) 2013 Belen Masia (bmasia@unizar.es)
+ * Copyright (C) 2013 Fernando Navarro (fernandn@microsoft.com)
+ * Copyright (C) 2013 Diego Gutierrez (diegog@unizar.es)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished to
+ * do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software. As clarification, there
+ * is no requirement that the copyright notice and permission be included in
+ * binary distributions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+/**
+ * _______ ___ ___ ___ ___
+ * / || \/ | / \ / \
+ * | (---- | \ / | / ^ \ / ^ \
+ * \ \ | |\/| | / /_\ \ / /_\ \
+ * ----) | | | | | / _____ \ / _____ \
+ * |_______/ |__| |__| /__/ \__\ /__/ \__\
+ *
+ * E N H A N C E D
+ * S U B P I X E L M O R P H O L O G I C A L A N T I A L I A S I N G
+ *
+ * http://www.iryoku.com/smaa/
+ *
+ * Hi, welcome aboard!
+ *
+ * Here you'll find instructions to get the shader up and running as fast as
+ * possible.
+ *
+ * IMPORTANTE NOTICE: when updating, remember to update both this file and the
+ * precomputed textures! They may change from version to version.
+ *
+ * The shader has three passes, chained together as follows:
+ *
+ * |input|------------------
+ * v |
+ * [ SMAA*EdgeDetection ] |
+ * v |
+ * |edgesTex| |
+ * v |
+ * [ SMAABlendingWeightCalculation ] |
+ * v |
+ * |blendTex| |
+ * v |
+ * [ SMAANeighborhoodBlending ] <------
+ * v
+ * |output|
+ *
+ * Note that each [pass] has its own vertex and pixel shader. Remember to use
+ * oversized triangles instead of quads to avoid overshading along the
+ * diagonal.
+ *
+ * You've three edge detection methods to choose from: luma, color or depth.
+ * They represent different quality/performance and anti-aliasing/sharpness
+ * tradeoffs, so our recommendation is for you to choose the one that best
+ * suits your particular scenario:
+ *
+ * - Depth edge detection is usually the fastest but it may miss some edges.
+ *
+ * - Luma edge detection is usually more expensive than depth edge detection,
+ * but catches visible edges that depth edge detection can miss.
+ *
+ * - Color edge detection is usually the most expensive one but catches
+ * chroma-only edges.
+ *
+ * For quickstarters: just use luma edge detection.
+ *
+ * The general advice is to not rush the integration process and ensure each
+ * step is done correctly (don't try to integrate SMAA T2x with predicated edge
+ * detection from the start!). Ok then, let's go!
+ *
+ * 1. The first step is to create two RGBA temporal render targets for holding
+ * |edgesTex| and |blendTex|.
+ *
+ * In DX10 or DX11, you can use a RG render target for the edges texture.
+ * In the case of NVIDIA GPUs, using RG render targets seems to actually be
+ * slower.
+ *
+ * On the Xbox 360, you can use the same render target for resolving both
+ * |edgesTex| and |blendTex|, as they aren't needed simultaneously.
+ *
+ * 2. Both temporal render targets |edgesTex| and |blendTex| must be cleared
+ * each frame. Do not forget to clear the alpha channel!
+ *
+ * 3. The next step is loading the two supporting precalculated textures,
+ * 'areaTex' and 'searchTex'. You'll find them in the 'Textures' folder as
+ * C++ headers, and also as regular DDS files. They'll be needed for the
+ * 'SMAABlendingWeightCalculation' pass.
+ *
+ * If you use the C++ headers, be sure to load them in the format specified
+ * inside of them.
+ *
+ * You can also compress 'areaTex' and 'searchTex' using BC5 and BC4
+ * respectively, if you have that option in your content processor pipeline.
+ * When compressing then, you get a non-perceptible quality decrease, and a
+ * marginal performance increase.
+ *
+ * 4. All samplers must be set to linear filtering and clamp.
+ *
+ * After you get the technique working, remember that 64-bit inputs have
+ * half-rate linear filtering on GCN.
+ *
+ * If SMAA is applied to 64-bit color buffers, switching to point filtering
+ * when accesing them will increase the performance. Search for
+ * 'SMAASamplePoint' to see which textures may benefit from point
+ * filtering, and where (which is basically the color input in the edge
+ * detection and resolve passes).
+ *
+ * 5. All texture reads and buffer writes must be non-sRGB, with the exception
+ * of the input read and the output write in
+ * 'SMAANeighborhoodBlending' (and only in this pass!). If sRGB reads in
+ * this last pass are not possible, the technique will work anyway, but
+ * will perform antialiasing in gamma space.
+ *
+ * IMPORTANT: for best results the input read for the color/luma edge
+ * detection should *NOT* be sRGB.
+ *
+ * 6. Before including SMAA.h you'll have to setup the render target metrics,
+ * the target and any optional configuration defines. Optionally you can
+ * use a preset.
+ *
+ * You have the following targets available:
+ * SMAA_HLSL_3
+ * SMAA_HLSL_4
+ * SMAA_HLSL_4_1
+ * SMAA_GLSL_3 *
+ * SMAA_GLSL_4 *
+ *
+ * * (See SMAA_INCLUDE_VS and SMAA_INCLUDE_PS below).
+ *
+ * And four presets:
+ * SMAA_PRESET_LOW (%60 of the quality)
+ * SMAA_PRESET_MEDIUM (%80 of the quality)
+ * SMAA_PRESET_HIGH (%95 of the quality)
+ * SMAA_PRESET_ULTRA (%99 of the quality)
+ *
+ * For example:
+ * #define SMAA_RT_METRICS float4(1.0 / 1280.0, 1.0 / 720.0, 1280.0, 720.0)
+ * #define SMAA_HLSL_4
+ * #define SMAA_PRESET_HIGH
+ * #include "SMAA.h"
+ *
+ * Note that SMAA_RT_METRICS doesn't need to be a macro, it can be a
+ * uniform variable. The code is designed to minimize the impact of not
+ * using a constant value, but it is still better to hardcode it.
+ *
+ * Depending on how you encoded 'areaTex' and 'searchTex', you may have to
+ * add (and customize) the following defines before including SMAA.h:
+ * #define SMAA_AREATEX_SELECT(sample) sample.rg
+ * #define SMAA_SEARCHTEX_SELECT(sample) sample.r
+ *
+ * If your engine is already using porting macros, you can define
+ * SMAA_CUSTOM_SL, and define the porting functions by yourself.
+ *
+ * 7. Then, you'll have to setup the passes as indicated in the scheme above.
+ * You can take a look into SMAA.fx, to see how we did it for our demo.
+ * Checkout the function wrappers, you may want to copy-paste them!
+ *
+ * 8. It's recommended to validate the produced |edgesTex| and |blendTex|.
+ * You can use a screenshot from your engine to compare the |edgesTex|
+ * and |blendTex| produced inside of the engine with the results obtained
+ * with the reference demo.
+ *
+ * 9. After you get the last pass to work, it's time to optimize. You'll have
+ * to initialize a stencil buffer in the first pass (discard is already in
+ * the code), then mask execution by using it the second pass. The last
+ * pass should be executed in all pixels.
+ *
+ *
+ * After this point you can choose to enable predicated thresholding,
+ * temporal supersampling and motion blur integration:
+ *
+ * a) If you want to use predicated thresholding, take a look into
+ * SMAA_PREDICATION; you'll need to pass an extra texture in the edge
+ * detection pass.
+ *
+ * b) If you want to enable temporal supersampling (SMAA T2x):
+ *
+ * 1. The first step is to render using subpixel jitters. I won't go into
+ * detail, but it's as simple as moving each vertex position in the
+ * vertex shader, you can check how we do it in our DX10 demo.
+ *
+ * 2. Then, you must setup the temporal resolve. You may want to take a look
+ * into SMAAResolve for resolving 2x modes. After you get it working, you'll
+ * probably see ghosting everywhere. But fear not, you can enable the
+ * CryENGINE temporal reprojection by setting the SMAA_REPROJECTION macro.
+ * Check out SMAA_DECODE_VELOCITY if your velocity buffer is encoded.
+ *
+ * 3. The next step is to apply SMAA to each subpixel jittered frame, just as
+ * done for 1x.
+ *
+ * 4. At this point you should already have something usable, but for best
+ * results the proper area textures must be set depending on current jitter.
+ * For this, the parameter 'subsampleIndices' of
+ * 'SMAABlendingWeightCalculationPS' must be set as follows, for our T2x
+ * mode:
+ *
+ * @SUBSAMPLE_INDICES
+ *
+ * | S# | Camera Jitter | subsampleIndices |
+ * +----+------------------+---------------------+
+ * | 0 | ( 0.25, -0.25) | float4(1, 1, 1, 0) |
+ * | 1 | (-0.25, 0.25) | float4(2, 2, 2, 0) |
+ *
+ * These jitter positions assume a bottom-to-top y axis. S# stands for the
+ * sample number.
+ *
+ * More information about temporal supersampling here:
+ * http://iryoku.com/aacourse/downloads/13-Anti-Aliasing-Methods-in-CryENGINE-3.pdf
+ *
+ * c) If you want to enable spatial multisampling (SMAA S2x):
+ *
+ * 1. The scene must be rendered using MSAA 2x. The MSAA 2x buffer must be
+ * created with:
+ * - DX10: see below (*)
+ * - DX10.1: D3D10_STANDARD_MULTISAMPLE_PATTERN or
+ * - DX11: D3D11_STANDARD_MULTISAMPLE_PATTERN
+ *
+ * This allows to ensure that the subsample order matches the table in
+ * @SUBSAMPLE_INDICES.
+ *
+ * (*) In the case of DX10, we refer the reader to:
+ * - SMAA::detectMSAAOrder and
+ * - SMAA::msaaReorder
+ *
+ * These functions allow to match the standard multisample patterns by
+ * detecting the subsample order for a specific GPU, and reordering
+ * them appropriately.
+ *
+ * 2. A shader must be run to output each subsample into a separate buffer
+ * (DX10 is required). You can use SMAASeparate for this purpose, or just do
+ * it in an existing pass (for example, in the tone mapping pass, which has
+ * the advantage of feeding tone mapped subsamples to SMAA, which will yield
+ * better results).
+ *
+ * 3. The full SMAA 1x pipeline must be run for each separated buffer, storing
+ * the results in the final buffer. The second run should alpha blend with
+ * the existing final buffer using a blending factor of 0.5.
+ * 'subsampleIndices' must be adjusted as in the SMAA T2x case (see point
+ * b).
+ *
+ * d) If you want to enable temporal supersampling on top of SMAA S2x
+ * (which actually is SMAA 4x):
+ *
+ * 1. SMAA 4x consists on temporally jittering SMAA S2x, so the first step is
+ * to calculate SMAA S2x for current frame. In this case, 'subsampleIndices'
+ * must be set as follows:
+ *
+ * | F# | S# | Camera Jitter | Net Jitter | subsampleIndices |
+ * +----+----+--------------------+-------------------+----------------------+
+ * | 0 | 0 | ( 0.125, 0.125) | ( 0.375, -0.125) | float4(5, 3, 1, 3) |
+ * | 0 | 1 | ( 0.125, 0.125) | (-0.125, 0.375) | float4(4, 6, 2, 3) |
+ * +----+----+--------------------+-------------------+----------------------+
+ * | 1 | 2 | (-0.125, -0.125) | ( 0.125, -0.375) | float4(3, 5, 1, 4) |
+ * | 1 | 3 | (-0.125, -0.125) | (-0.375, 0.125) | float4(6, 4, 2, 4) |
+ *
+ * These jitter positions assume a bottom-to-top y axis. F# stands for the
+ * frame number. S# stands for the sample number.
+ *
+ * 2. After calculating SMAA S2x for current frame (with the new subsample
+ * indices), previous frame must be reprojected as in SMAA T2x mode (see
+ * point b).
+ *
+ * e) If motion blur is used, you may want to do the edge detection pass
+ * together with motion blur. This has two advantages:
+ *
+ * 1. Pixels under heavy motion can be omitted from the edge detection process.
+ * For these pixels we can just store "no edge", as motion blur will take
+ * care of them.
+ * 2. The center pixel tap is reused.
+ *
+ * Note that in this case depth testing should be used instead of stenciling,
+ * as we have to write all the pixels in the motion blur pass.
+ *
+ * That's it!
+ */
+
+//-----------------------------------------------------------------------------
+// SMAA Presets
+
+/**
+ * Note that if you use one of these presets, the following configuration
+ * macros will be ignored if set in the "Configurable Defines" section.
+ */
+
+#if defined(SMAA_PRESET_LOW)
+#define SMAA_THRESHOLD 0.15
+#define SMAA_MAX_SEARCH_STEPS 4
+#define SMAA_DISABLE_DIAG_DETECTION
+#define SMAA_DISABLE_CORNER_DETECTION
+#elif defined(SMAA_PRESET_MEDIUM)
+#define SMAA_THRESHOLD 0.1
+#define SMAA_MAX_SEARCH_STEPS 8
+#define SMAA_DISABLE_DIAG_DETECTION
+#define SMAA_DISABLE_CORNER_DETECTION
+#elif defined(SMAA_PRESET_HIGH)
+#define SMAA_THRESHOLD 0.1
+#define SMAA_MAX_SEARCH_STEPS 16
+#define SMAA_MAX_SEARCH_STEPS_DIAG 8
+#define SMAA_CORNER_ROUNDING 25
+#elif defined(SMAA_PRESET_ULTRA)
+#define SMAA_THRESHOLD 0.05
+#define SMAA_MAX_SEARCH_STEPS 32
+#define SMAA_MAX_SEARCH_STEPS_DIAG 16
+#define SMAA_CORNER_ROUNDING 25
+#endif
+
+//-----------------------------------------------------------------------------
+// Configurable Defines
+
+/**
+ * SMAA_THRESHOLD specifies the threshold or sensitivity to edges.
+ * Lowering this value you will be able to detect more edges at the expense of
+ * performance.
+ *
+ * Range: [0, 0.5]
+ * 0.1 is a reasonable value, and allows to catch most visible edges.
+ * 0.05 is a rather overkill value, that allows to catch 'em all.
+ *
+ * If temporal supersampling is used, 0.2 could be a reasonable value, as low
+ * contrast edges are properly filtered by just 2x.
+ */
+#ifndef SMAA_THRESHOLD
+#define SMAA_THRESHOLD 0.1
+#endif
+
+/**
+ * SMAA_DEPTH_THRESHOLD specifies the threshold for depth edge detection.
+ *
+ * Range: depends on the depth range of the scene.
+ */
+#ifndef SMAA_DEPTH_THRESHOLD
+#define SMAA_DEPTH_THRESHOLD (0.1 * SMAA_THRESHOLD)
+#endif
+
+/**
+ * SMAA_MAX_SEARCH_STEPS specifies the maximum steps performed in the
+ * horizontal/vertical pattern searches, at each side of the pixel.
+ *
+ * In number of pixels, it's actually the double. So the maximum line length
+ * perfectly handled by, for example 16, is 64 (by perfectly, we meant that
+ * longer lines won't look as good, but still antialiased).
+ *
+ * Range: [0, 112]
+ */
+#ifndef SMAA_MAX_SEARCH_STEPS
+#define SMAA_MAX_SEARCH_STEPS 16
+#endif
+
+/**
+ * SMAA_MAX_SEARCH_STEPS_DIAG specifies the maximum steps performed in the
+ * diagonal pattern searches, at each side of the pixel. In this case we jump
+ * one pixel at time, instead of two.
+ *
+ * Range: [0, 20]
+ *
+ * On high-end machines it is cheap (between a 0.8x and 0.9x slower for 16
+ * steps), but it can have a significant impact on older machines.
+ *
+ * Define SMAA_DISABLE_DIAG_DETECTION to disable diagonal processing.
+ */
+#ifndef SMAA_MAX_SEARCH_STEPS_DIAG
+#define SMAA_MAX_SEARCH_STEPS_DIAG 8
+#endif
+
+/**
+ * SMAA_CORNER_ROUNDING specifies how much sharp corners will be rounded.
+ *
+ * Range: [0, 100]
+ *
+ * Define SMAA_DISABLE_CORNER_DETECTION to disable corner processing.
+ */
+#ifndef SMAA_CORNER_ROUNDING
+#define SMAA_CORNER_ROUNDING 25
+#endif
+
+/**
+ * If there is an neighbor edge that has SMAA_LOCAL_CONTRAST_FACTOR times
+ * bigger contrast than current edge, current edge will be discarded.
+ *
+ * This allows to eliminate spurious crossing edges, and is based on the fact
+ * that, if there is too much contrast in a direction, that will hide
+ * perceptually contrast in the other neighbors.
+ */
+#ifndef SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR
+#define SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR 2.0
+#endif
+
+/**
+ * Predicated thresholding allows to better preserve texture details and to
+ * improve performance, by decreasing the number of detected edges using an
+ * additional buffer like the light accumulation buffer, object ids or even the
+ * depth buffer (the depth buffer usage may be limited to indoor or short range
+ * scenes).
+ *
+ * It locally decreases the luma or color threshold if an edge is found in an
+ * additional buffer (so the global threshold can be higher).
+ *
+ * This method was developed by Playstation EDGE MLAA team, and used in
+ * Killzone 3, by using the light accumulation buffer. More information here:
+ * http://iryoku.com/aacourse/downloads/06-MLAA-on-PS3.pptx
+ */
+#ifndef SMAA_PREDICATION
+#define SMAA_PREDICATION 0
+#endif
+
+/**
+ * Threshold to be used in the additional predication buffer.
+ *
+ * Range: depends on the input, so you'll have to find the magic number that
+ * works for you.
+ */
+#ifndef SMAA_PREDICATION_THRESHOLD
+#define SMAA_PREDICATION_THRESHOLD 0.01
+#endif
+
+/**
+ * How much to scale the global threshold used for luma or color edge
+ * detection when using predication.
+ *
+ * Range: [1, 5]
+ */
+#ifndef SMAA_PREDICATION_SCALE
+#define SMAA_PREDICATION_SCALE 2.0
+#endif
+
+/**
+ * How much to locally decrease the threshold.
+ *
+ * Range: [0, 1]
+ */
+#ifndef SMAA_PREDICATION_STRENGTH
+#define SMAA_PREDICATION_STRENGTH 0.4
+#endif
+
+/**
+ * Temporal reprojection allows to remove ghosting artifacts when using
+ * temporal supersampling. We use the CryEngine 3 method which also introduces
+ * velocity weighting. This feature is of extreme importance for totally
+ * removing ghosting. More information here:
+ * http://iryoku.com/aacourse/downloads/13-Anti-Aliasing-Methods-in-CryENGINE-3.pdf
+ *
+ * Note that you'll need to setup a velocity buffer for enabling reprojection.
+ * For static geometry, saving the previous depth buffer is a viable
+ * alternative.
+ */
+#ifndef SMAA_REPROJECTION
+#define SMAA_REPROJECTION 0
+#endif
+
+/**
+ * SMAA_REPROJECTION_WEIGHT_SCALE controls the velocity weighting. It allows to
+ * remove ghosting trails behind the moving object, which are not removed by
+ * just using reprojection. Using low values will exhibit ghosting, while using
+ * high values will disable temporal supersampling under motion.
+ *
+ * Behind the scenes, velocity weighting removes temporal supersampling when
+ * the velocity of the subsamples differs (meaning they are different objects).
+ *
+ * Range: [0, 80]
+ */
+#ifndef SMAA_REPROJECTION_WEIGHT_SCALE
+#define SMAA_REPROJECTION_WEIGHT_SCALE 30.0
+#endif
+
+/**
+ * On some compilers, discard cannot be used in vertex shaders. Thus, they need
+ * to be compiled separately.
+ */
+#ifndef SMAA_INCLUDE_VS
+#define SMAA_INCLUDE_VS 1
+#endif
+#ifndef SMAA_INCLUDE_PS
+#define SMAA_INCLUDE_PS 1
+#endif
+
+//-----------------------------------------------------------------------------
+// Texture Access Defines
+
+#ifndef SMAA_AREATEX_SELECT
+#if defined(SMAA_HLSL_3)
+#define SMAA_AREATEX_SELECT(sample) sample.ra
+#else
+#define SMAA_AREATEX_SELECT(sample) sample.rg
+#endif
+#endif
+
+#ifndef SMAA_SEARCHTEX_SELECT
+#define SMAA_SEARCHTEX_SELECT(sample) sample.r
+#endif
+
+#ifndef SMAA_DECODE_VELOCITY
+#define SMAA_DECODE_VELOCITY(sample) sample.rg
+#endif
+
+//-----------------------------------------------------------------------------
+// Non-Configurable Defines
+
+#define SMAA_AREATEX_MAX_DISTANCE 16
+#define SMAA_AREATEX_MAX_DISTANCE_DIAG 20
+#define SMAA_AREATEX_PIXEL_SIZE (1.0 / float2(160.0, 560.0))
+#define SMAA_AREATEX_SUBTEX_SIZE (1.0 / 7.0)
+#define SMAA_SEARCHTEX_SIZE float2(66.0, 33.0)
+#define SMAA_SEARCHTEX_PACKED_SIZE float2(64.0, 16.0)
+#define SMAA_CORNER_ROUNDING_NORM (float(SMAA_CORNER_ROUNDING) / 100.0)
+
+//-----------------------------------------------------------------------------
+// Porting Functions
+
+#if defined(SMAA_HLSL_3)
+#define SMAATexture2D(tex) sampler2D tex
+#define SMAATexturePass2D(tex) tex
+#define SMAASampleLevelZero(tex, coord) tex2Dlod(tex, float4(coord, 0.0, 0.0))
+#define SMAASampleLevelZeroPoint(tex, coord) tex2Dlod(tex, float4(coord, 0.0, 0.0))
+#define SMAASampleLevelZeroOffset(tex, coord, offset) tex2Dlod(tex, float4(coord + offset * SMAA_RT_METRICS.xy, 0.0, 0.0))
+#define SMAASample(tex, coord) tex2D(tex, coord)
+#define SMAASamplePoint(tex, coord) tex2D(tex, coord)
+#define SMAASampleOffset(tex, coord, offset) tex2D(tex, coord + offset * SMAA_RT_METRICS.xy)
+#define SMAA_FLATTEN [flatten]
+#define SMAA_BRANCH [branch]
+#endif
+#if defined(SMAA_HLSL_4) || defined(SMAA_HLSL_4_1)
+SamplerState LinearSampler { Filter = MIN_MAG_LINEAR_MIP_POINT; AddressU = Clamp; AddressV = Clamp; };
+SamplerState PointSampler { Filter = MIN_MAG_MIP_POINT; AddressU = Clamp; AddressV = Clamp; };
+#define SMAATexture2D(tex) Texture2D tex
+#define SMAATexturePass2D(tex) tex
+#define SMAASampleLevelZero(tex, coord) tex.SampleLevel(LinearSampler, coord, 0)
+#define SMAASampleLevelZeroPoint(tex, coord) tex.SampleLevel(PointSampler, coord, 0)
+#define SMAASampleLevelZeroOffset(tex, coord, offset) tex.SampleLevel(LinearSampler, coord, 0, offset)
+#define SMAASample(tex, coord) tex.Sample(LinearSampler, coord)
+#define SMAASamplePoint(tex, coord) tex.Sample(PointSampler, coord)
+#define SMAASampleOffset(tex, coord, offset) tex.Sample(LinearSampler, coord, offset)
+#define SMAA_FLATTEN [flatten]
+#define SMAA_BRANCH [branch]
+#define SMAATexture2DMS2(tex) Texture2DMS<float4, 2> tex
+#define SMAALoad(tex, pos, sample) tex.Load(pos, sample)
+#if defined(SMAA_HLSL_4_1)
+#define SMAAGather(tex, coord) tex.Gather(LinearSampler, coord, 0)
+#endif
+#endif
+#if defined(SMAA_GLSL_3) || defined(SMAA_GLSL_4)
+#define SMAATexture2D(tex) sampler2D tex
+#define SMAATexturePass2D(tex) tex
+#define SMAASampleLevelZero(tex, coord) textureLod(tex, coord, 0.0)
+#define SMAASampleLevelZeroPoint(tex, coord) textureLod(tex, coord, 0.0)
+#define SMAASampleLevelZeroOffset(tex, coord, offset) textureLodOffset(tex, coord, 0.0, offset)
+#define SMAASample(tex, coord) texture(tex, coord)
+#define SMAASamplePoint(tex, coord) texture(tex, coord)
+#define SMAASampleOffset(tex, coord, offset) texture(tex, coord, offset)
+#define SMAA_FLATTEN
+#define SMAA_BRANCH
+#define lerp(a, b, t) mix(a, b, t)
+#define saturate(a) clamp(a, 0.0, 1.0)
+#if defined(SMAA_GLSL_4)
+#define mad(a, b, c) fma(a, b, c)
+#define SMAAGather(tex, coord) textureGather(tex, coord)
+#else
+#define mad(a, b, c) (a * b + c)
+#endif
+#define float2 vec2
+#define float3 vec3
+#define float4 vec4
+#define int2 ivec2
+#define int3 ivec3
+#define int4 ivec4
+#define bool2 bvec2
+#define bool3 bvec3
+#define bool4 bvec4
+#endif
+
+#if !defined(SMAA_HLSL_3) && !defined(SMAA_HLSL_4) && !defined(SMAA_HLSL_4_1) && !defined(SMAA_GLSL_3) && !defined(SMAA_GLSL_4) && !defined(SMAA_CUSTOM_SL)
+#error you must define the shading language: SMAA_HLSL_*, SMAA_GLSL_* or SMAA_CUSTOM_SL
+#endif
+
+//-----------------------------------------------------------------------------
+// Misc functions
+
+/**
+ * Gathers current pixel, and the top-left neighbors.
+ */
+float3 SMAAGatherNeighbours(float2 texcoord,
+ float4 offset[3],
+ SMAATexture2D(tex)) {
+ #ifdef SMAAGather
+ return SMAAGather(tex, texcoord + SMAA_RT_METRICS.xy * float2(-0.5, -0.5)).grb;
+ #else
+ float P = SMAASamplePoint(tex, texcoord).r;
+ float Pleft = SMAASamplePoint(tex, offset[0].xy).r;
+ float Ptop = SMAASamplePoint(tex, offset[0].zw).r;
+ return float3(P, Pleft, Ptop);
+ #endif
+}
+
+/**
+ * Adjusts the threshold by means of predication.
+ */
+float2 SMAACalculatePredicatedThreshold(float2 texcoord,
+ float4 offset[3],
+ SMAATexture2D(predicationTex)) {
+ float3 neighbours = SMAAGatherNeighbours(texcoord, offset, SMAATexturePass2D(predicationTex));
+ float2 delta = abs(neighbours.xx - neighbours.yz);
+ float2 edges = step(SMAA_PREDICATION_THRESHOLD, delta);
+ return SMAA_PREDICATION_SCALE * SMAA_THRESHOLD * (1.0 - SMAA_PREDICATION_STRENGTH * edges);
+}
+
+/**
+ * Conditional move:
+ */
+void SMAAMovc(bool2 cond, inout float2 variable, float2 value) {
+ SMAA_FLATTEN if (cond.x) variable.x = value.x;
+ SMAA_FLATTEN if (cond.y) variable.y = value.y;
+}
+
+void SMAAMovc(bool4 cond, inout float4 variable, float4 value) {
+ SMAAMovc(cond.xy, variable.xy, value.xy);
+ SMAAMovc(cond.zw, variable.zw, value.zw);
+}
+
+
+#if SMAA_INCLUDE_VS
+//-----------------------------------------------------------------------------
+// Vertex Shaders
+
+/**
+ * Edge Detection Vertex Shader
+ */
+void SMAAEdgeDetectionVS(float2 texcoord,
+ out float4 offset[3]) {
+ offset[0] = mad(SMAA_RT_METRICS.xyxy, float4(-1.0, 0.0, 0.0, -1.0), texcoord.xyxy);
+ offset[1] = mad(SMAA_RT_METRICS.xyxy, float4( 1.0, 0.0, 0.0, 1.0), texcoord.xyxy);
+ offset[2] = mad(SMAA_RT_METRICS.xyxy, float4(-2.0, 0.0, 0.0, -2.0), texcoord.xyxy);
+}
+
+/**
+ * Blend Weight Calculation Vertex Shader
+ */
+void SMAABlendingWeightCalculationVS(float2 texcoord,
+ out float2 pixcoord,
+ out float4 offset[3]) {
+ pixcoord = texcoord * SMAA_RT_METRICS.zw;
+
+ // We will use these offsets for the searches later on (see @PSEUDO_GATHER4):
+ offset[0] = mad(SMAA_RT_METRICS.xyxy, float4(-0.25, -0.125, 1.25, -0.125), texcoord.xyxy);
+ offset[1] = mad(SMAA_RT_METRICS.xyxy, float4(-0.125, -0.25, -0.125, 1.25), texcoord.xyxy);
+
+ // And these for the searches, they indicate the ends of the loops:
+ offset[2] = mad(SMAA_RT_METRICS.xxyy,
+ float4(-2.0, 2.0, -2.0, 2.0) * float(SMAA_MAX_SEARCH_STEPS),
+ float4(offset[0].xz, offset[1].yw));
+}
+
+/**
+ * Neighborhood Blending Vertex Shader
+ */
+void SMAANeighborhoodBlendingVS(float2 texcoord,
+ out float4 offset) {
+ offset = mad(SMAA_RT_METRICS.xyxy, float4( 1.0, 0.0, 0.0, 1.0), texcoord.xyxy);
+}
+#endif // SMAA_INCLUDE_VS
+
+#if SMAA_INCLUDE_PS
+//-----------------------------------------------------------------------------
+// Edge Detection Pixel Shaders (First Pass)
+
+/**
+ * Luma Edge Detection
+ *
+ * IMPORTANT NOTICE: luma edge detection requires gamma-corrected colors, and
+ * thus 'colorTex' should be a non-sRGB texture.
+ */
+float2 SMAALumaEdgeDetectionPS(float2 texcoord,
+ float4 offset[3],
+ SMAATexture2D(colorTex)
+ #if SMAA_PREDICATION
+ , SMAATexture2D(predicationTex)
+ #endif
+ ) {
+ // Calculate the threshold:
+ #if SMAA_PREDICATION
+ float2 threshold = SMAACalculatePredicatedThreshold(texcoord, offset, SMAATexturePass2D(predicationTex));
+ #else
+ float2 threshold = float2(SMAA_THRESHOLD, SMAA_THRESHOLD);
+ #endif
+
+ // Calculate lumas:
+ float3 weights = float3(0.2126, 0.7152, 0.0722);
+ float L = dot(SMAASamplePoint(colorTex, texcoord).rgb, weights);
+
+ float Lleft = dot(SMAASamplePoint(colorTex, offset[0].xy).rgb, weights);
+ float Ltop = dot(SMAASamplePoint(colorTex, offset[0].zw).rgb, weights);
+
+ // We do the usual threshold:
+ float4 delta;
+ delta.xy = abs(L - float2(Lleft, Ltop));
+ float2 edges = step(threshold, delta.xy);
+
+ // Then discard if there is no edge:
+ if (dot(edges, float2(1.0, 1.0)) == 0.0)
+ return float2(-2.0, -2.0);
+
+ // Calculate right and bottom deltas:
+ float Lright = dot(SMAASamplePoint(colorTex, offset[1].xy).rgb, weights);
+ float Lbottom = dot(SMAASamplePoint(colorTex, offset[1].zw).rgb, weights);
+ delta.zw = abs(L - float2(Lright, Lbottom));
+
+ // Calculate the maximum delta in the direct neighborhood:
+ float2 maxDelta = max(delta.xy, delta.zw);
+
+ // Calculate left-left and top-top deltas:
+ float Lleftleft = dot(SMAASamplePoint(colorTex, offset[2].xy).rgb, weights);
+ float Ltoptop = dot(SMAASamplePoint(colorTex, offset[2].zw).rgb, weights);
+ delta.zw = abs(float2(Lleft, Ltop) - float2(Lleftleft, Ltoptop));
+
+ // Calculate the final maximum delta:
+ maxDelta = max(maxDelta.xy, delta.zw);
+ float finalDelta = max(maxDelta.x, maxDelta.y);
+
+ // Local contrast adaptation:
+ edges.xy *= step(finalDelta, SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR * delta.xy);
+
+ return edges;
+}
+
+/**
+ * Color Edge Detection
+ *
+ * IMPORTANT NOTICE: color edge detection requires gamma-corrected colors, and
+ * thus 'colorTex' should be a non-sRGB texture.
+ */
+float2 SMAAColorEdgeDetectionPS(float2 texcoord,
+ float4 offset[3],
+ SMAATexture2D(colorTex)
+ #if SMAA_PREDICATION
+ , SMAATexture2D(predicationTex)
+ #endif
+ ) {
+ // Calculate the threshold:
+ #if SMAA_PREDICATION
+ float2 threshold = SMAACalculatePredicatedThreshold(texcoord, offset, predicationTex);
+ #else
+ float2 threshold = float2(SMAA_THRESHOLD, SMAA_THRESHOLD);
+ #endif
+
+ // Calculate color deltas:
+ float4 delta;
+ float3 C = SMAASamplePoint(colorTex, texcoord).rgb;
+
+ float3 Cleft = SMAASamplePoint(colorTex, offset[0].xy).rgb;
+ float3 t = abs(C - Cleft);
+ delta.x = max(max(t.r, t.g), t.b);
+
+ float3 Ctop = SMAASamplePoint(colorTex, offset[0].zw).rgb;
+ t = abs(C - Ctop);
+ delta.y = max(max(t.r, t.g), t.b);
+
+ // We do the usual threshold:
+ float2 edges = step(threshold, delta.xy);
+
+ // Then discard if there is no edge:
+ if (dot(edges, float2(1.0, 1.0)) == 0.0)
+ return float2(-2.0, -2.0);
+
+ // Calculate right and bottom deltas:
+ float3 Cright = SMAASamplePoint(colorTex, offset[1].xy).rgb;
+ t = abs(C - Cright);
+ delta.z = max(max(t.r, t.g), t.b);
+
+ float3 Cbottom = SMAASamplePoint(colorTex, offset[1].zw).rgb;
+ t = abs(C - Cbottom);
+ delta.w = max(max(t.r, t.g), t.b);
+
+ // Calculate the maximum delta in the direct neighborhood:
+ float2 maxDelta = max(delta.xy, delta.zw);
+
+ // Calculate left-left and top-top deltas:
+ float3 Cleftleft = SMAASamplePoint(colorTex, offset[2].xy).rgb;
+ t = abs(C - Cleftleft);
+ delta.z = max(max(t.r, t.g), t.b);
+
+ float3 Ctoptop = SMAASamplePoint(colorTex, offset[2].zw).rgb;
+ t = abs(C - Ctoptop);
+ delta.w = max(max(t.r, t.g), t.b);
+
+ // Calculate the final maximum delta:
+ maxDelta = max(maxDelta.xy, delta.zw);
+ float finalDelta = max(maxDelta.x, maxDelta.y);
+
+ // Local contrast adaptation:
+ edges.xy *= step(finalDelta, SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR * delta.xy);
+
+ return edges;
+}
+
+/**
+ * Depth Edge Detection
+ */
+float2 SMAADepthEdgeDetectionPS(float2 texcoord,
+ float4 offset[3],
+ SMAATexture2D(depthTex)) {
+ float3 neighbours = SMAAGatherNeighbours(texcoord, offset, SMAATexturePass2D(depthTex));
+ float2 delta = abs(neighbours.xx - float2(neighbours.y, neighbours.z));
+ float2 edges = step(SMAA_DEPTH_THRESHOLD, delta);
+
+ if (dot(edges, float2(1.0, 1.0)) == 0.0)
+ return float2(-2.0, -2.0);
+
+ return edges;
+}
+
+//-----------------------------------------------------------------------------
+// Diagonal Search Functions
+
+#if !defined(SMAA_DISABLE_DIAG_DETECTION)
+
+/**
+ * Allows to decode two binary values from a bilinear-filtered access.
+ */
+float2 SMAADecodeDiagBilinearAccess(float2 e) {
+ // Bilinear access for fetching 'e' have a 0.25 offset, and we are
+ // interested in the R and G edges:
+ //
+ // +---G---+-------+
+ // | x o R x |
+ // +-------+-------+
+ //
+ // Then, if one of these edge is enabled:
+ // Red: (0.75 * X + 0.25 * 1) => 0.25 or 1.0
+ // Green: (0.75 * 1 + 0.25 * X) => 0.75 or 1.0
+ //
+ // This function will unpack the values (mad + mul + round):
+ // wolframalpha.com: round(x * abs(5 * x - 5 * 0.75)) plot 0 to 1
+ e.r = e.r * abs(5.0 * e.r - 5.0 * 0.75);
+ return round(e);
+}
+
+float4 SMAADecodeDiagBilinearAccess(float4 e) {
+ e.rb = e.rb * abs(5.0 * e.rb - 5.0 * 0.75);
+ return round(e);
+}
+
+/**
+ * These functions allows to perform diagonal pattern searches.
+ */
+float2 SMAASearchDiag1(SMAATexture2D(edgesTex), float2 texcoord, float2 dir, out float2 e) {
+ float4 coord = float4(texcoord, -1.0, 1.0);
+ float3 t = float3(SMAA_RT_METRICS.xy, 1.0);
+ while (coord.z < float(SMAA_MAX_SEARCH_STEPS_DIAG - 1) &&
+ coord.w > 0.9) {
+ coord.xyz = mad(t, float3(dir, 1.0), coord.xyz);
+ e = SMAASampleLevelZero(edgesTex, coord.xy).rg;
+ coord.w = dot(e, float2(0.5, 0.5));
+ }
+ return coord.zw;
+}
+
+float2 SMAASearchDiag2(SMAATexture2D(edgesTex), float2 texcoord, float2 dir, out float2 e) {
+ float4 coord = float4(texcoord, -1.0, 1.0);
+ coord.x += 0.25 * SMAA_RT_METRICS.x; // See @SearchDiag2Optimization
+ float3 t = float3(SMAA_RT_METRICS.xy, 1.0);
+ while (coord.z < float(SMAA_MAX_SEARCH_STEPS_DIAG - 1) &&
+ coord.w > 0.9) {
+ coord.xyz = mad(t, float3(dir, 1.0), coord.xyz);
+
+ // @SearchDiag2Optimization
+ // Fetch both edges at once using bilinear filtering:
+ e = SMAASampleLevelZero(edgesTex, coord.xy).rg;
+ e = SMAADecodeDiagBilinearAccess(e);
+
+ // Non-optimized version:
+ // e.g = SMAASampleLevelZero(edgesTex, coord.xy).g;
+ // e.r = SMAASampleLevelZeroOffset(edgesTex, coord.xy, int2(1, 0)).r;
+
+ coord.w = dot(e, float2(0.5, 0.5));
+ }
+ return coord.zw;
+}
+
+/**
+ * Similar to SMAAArea, this calculates the area corresponding to a certain
+ * diagonal distance and crossing edges 'e'.
+ */
+float2 SMAAAreaDiag(SMAATexture2D(areaTex), float2 dist, float2 e, float offset) {
+ float2 texcoord = mad(float2(SMAA_AREATEX_MAX_DISTANCE_DIAG, SMAA_AREATEX_MAX_DISTANCE_DIAG), e, dist);
+
+ // We do a scale and bias for mapping to texel space:
+ texcoord = mad(SMAA_AREATEX_PIXEL_SIZE, texcoord, 0.5 * SMAA_AREATEX_PIXEL_SIZE);
+
+ // Diagonal areas are on the second half of the texture:
+ texcoord.x += 0.5;
+
+ // Move to proper place, according to the subpixel offset:
+ texcoord.y += SMAA_AREATEX_SUBTEX_SIZE * offset;
+
+ // Do it!
+ return SMAA_AREATEX_SELECT(SMAASampleLevelZero(areaTex, texcoord));
+}
+
+/**
+ * This searches for diagonal patterns and returns the corresponding weights.
+ */
+float2 SMAACalculateDiagWeights(SMAATexture2D(edgesTex), SMAATexture2D(areaTex), float2 texcoord, float2 e, float4 subsampleIndices) {
+ float2 weights = float2(0.0, 0.0);
+
+ // Search for the line ends:
+ float4 d;
+ float2 end;
+ if (e.r > 0.0) {
+ d.xz = SMAASearchDiag1(SMAATexturePass2D(edgesTex), texcoord, float2(-1.0, 1.0), end);
+ d.x += float(end.y > 0.9);
+ } else
+ d.xz = float2(0.0, 0.0);
+ d.yw = SMAASearchDiag1(SMAATexturePass2D(edgesTex), texcoord, float2(1.0, -1.0), end);
+
+ SMAA_BRANCH
+ if (d.x + d.y > 2.0) { // d.x + d.y + 1 > 3
+ // Fetch the crossing edges:
+ float4 coords = mad(float4(-d.x + 0.25, d.x, d.y, -d.y - 0.25), SMAA_RT_METRICS.xyxy, texcoord.xyxy);
+ float4 c;
+ c.xy = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2(-1, 0)).rg;
+ c.zw = SMAASampleLevelZeroOffset(edgesTex, coords.zw, int2( 1, 0)).rg;
+ c.yxwz = SMAADecodeDiagBilinearAccess(c.xyzw);
+
+ // Non-optimized version:
+ // float4 coords = mad(float4(-d.x, d.x, d.y, -d.y), SMAA_RT_METRICS.xyxy, texcoord.xyxy);
+ // float4 c;
+ // c.x = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2(-1, 0)).g;
+ // c.y = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2( 0, 0)).r;
+ // c.z = SMAASampleLevelZeroOffset(edgesTex, coords.zw, int2( 1, 0)).g;
+ // c.w = SMAASampleLevelZeroOffset(edgesTex, coords.zw, int2( 1, -1)).r;
+
+ // Merge crossing edges at each side into a single value:
+ float2 cc = mad(float2(2.0, 2.0), c.xz, c.yw);
+
+ // Remove the crossing edge if we didn't found the end of the line:
+ SMAAMovc(bool2(step(0.9, d.zw)), cc, float2(0.0, 0.0));
+
+ // Fetch the areas for this line:
+ weights += SMAAAreaDiag(SMAATexturePass2D(areaTex), d.xy, cc, subsampleIndices.z);
+ }
+
+ // Search for the line ends:
+ d.xz = SMAASearchDiag2(SMAATexturePass2D(edgesTex), texcoord, float2(-1.0, -1.0), end);
+ if (SMAASampleLevelZeroOffset(edgesTex, texcoord, int2(1, 0)).r > 0.0) {
+ d.yw = SMAASearchDiag2(SMAATexturePass2D(edgesTex), texcoord, float2(1.0, 1.0), end);
+ d.y += float(end.y > 0.9);
+ } else
+ d.yw = float2(0.0, 0.0);
+
+ SMAA_BRANCH
+ if (d.x + d.y > 2.0) { // d.x + d.y + 1 > 3
+ // Fetch the crossing edges:
+ float4 coords = mad(float4(-d.x, -d.x, d.y, d.y), SMAA_RT_METRICS.xyxy, texcoord.xyxy);
+ float4 c;
+ c.x = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2(-1, 0)).g;
+ c.y = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2( 0, -1)).r;
+ c.zw = SMAASampleLevelZeroOffset(edgesTex, coords.zw, int2( 1, 0)).gr;
+ float2 cc = mad(float2(2.0, 2.0), c.xz, c.yw);
+
+ // Remove the crossing edge if we didn't found the end of the line:
+ SMAAMovc(bool2(step(0.9, d.zw)), cc, float2(0.0, 0.0));
+
+ // Fetch the areas for this line:
+ weights += SMAAAreaDiag(SMAATexturePass2D(areaTex), d.xy, cc, subsampleIndices.w).gr;
+ }
+
+ return weights;
+}
+#endif
+
+//-----------------------------------------------------------------------------
+// Horizontal/Vertical Search Functions
+
+/**
+ * This allows to determine how much length should we add in the last step
+ * of the searches. It takes the bilinearly interpolated edge (see
+ * @PSEUDO_GATHER4), and adds 0, 1 or 2, depending on which edges and
+ * crossing edges are active.
+ */
+float SMAASearchLength(SMAATexture2D(searchTex), float2 e, float offset) {
+ // The texture is flipped vertically, with left and right cases taking half
+ // of the space horizontally:
+ float2 scale = SMAA_SEARCHTEX_SIZE * float2(0.5, -1.0);
+ float2 bias = SMAA_SEARCHTEX_SIZE * float2(offset, 1.0);
+
+ // Scale and bias to access texel centers:
+ scale += float2(-1.0, 1.0);
+ bias += float2( 0.5, -0.5);
+
+ // Convert from pixel coordinates to texcoords:
+ // (We use SMAA_SEARCHTEX_PACKED_SIZE because the texture is cropped)
+ scale *= 1.0 / SMAA_SEARCHTEX_PACKED_SIZE;
+ bias *= 1.0 / SMAA_SEARCHTEX_PACKED_SIZE;
+
+ // Lookup the search texture:
+ return SMAA_SEARCHTEX_SELECT(SMAASampleLevelZero(searchTex, mad(scale, e, bias)));
+}
+
+/**
+ * Horizontal/vertical search functions for the 2nd pass.
+ */
+float SMAASearchXLeft(SMAATexture2D(edgesTex), SMAATexture2D(searchTex), float2 texcoord, float end) {
+ /**
+ * @PSEUDO_GATHER4
+ * This texcoord has been offset by (-0.25, -0.125) in the vertex shader to
+ * sample between edge, thus fetching four edges in a row.
+ * Sampling with different offsets in each direction allows to disambiguate
+ * which edges are active from the four fetched ones.
+ */
+ float2 e = float2(0.0, 1.0);
+ while (texcoord.x > end &&
+ e.g > 0.8281 && // Is there some edge not activated?
+ e.r == 0.0) { // Or is there a crossing edge that breaks the line?
+ e = SMAASampleLevelZero(edgesTex, texcoord).rg;
+ texcoord = mad(-float2(2.0, 0.0), SMAA_RT_METRICS.xy, texcoord);
+ }
+
+ float offset = mad(-(255.0 / 127.0), SMAASearchLength(SMAATexturePass2D(searchTex), e, 0.0), 3.25);
+ return mad(SMAA_RT_METRICS.x, offset, texcoord.x);
+
+ // Non-optimized version:
+ // We correct the previous (-0.25, -0.125) offset we applied:
+ // texcoord.x += 0.25 * SMAA_RT_METRICS.x;
+
+ // The searches are bias by 1, so adjust the coords accordingly:
+ // texcoord.x += SMAA_RT_METRICS.x;
+
+ // Disambiguate the length added by the last step:
+ // texcoord.x += 2.0 * SMAA_RT_METRICS.x; // Undo last step
+ // texcoord.x -= SMAA_RT_METRICS.x * (255.0 / 127.0) * SMAASearchLength(SMAATexturePass2D(searchTex), e, 0.0);
+ // return mad(SMAA_RT_METRICS.x, offset, texcoord.x);
+}
+
+float SMAASearchXRight(SMAATexture2D(edgesTex), SMAATexture2D(searchTex), float2 texcoord, float end) {
+ float2 e = float2(0.0, 1.0);
+ while (texcoord.x < end &&
+ e.g > 0.8281 && // Is there some edge not activated?
+ e.r == 0.0) { // Or is there a crossing edge that breaks the line?
+ e = SMAASampleLevelZero(edgesTex, texcoord).rg;
+ texcoord = mad(float2(2.0, 0.0), SMAA_RT_METRICS.xy, texcoord);
+ }
+ float offset = mad(-(255.0 / 127.0), SMAASearchLength(SMAATexturePass2D(searchTex), e, 0.5), 3.25);
+ return mad(-SMAA_RT_METRICS.x, offset, texcoord.x);
+}
+
+float SMAASearchYUp(SMAATexture2D(edgesTex), SMAATexture2D(searchTex), float2 texcoord, float end) {
+ float2 e = float2(1.0, 0.0);
+ while (texcoord.y > end &&
+ e.r > 0.8281 && // Is there some edge not activated?
+ e.g == 0.0) { // Or is there a crossing edge that breaks the line?
+ e = SMAASampleLevelZero(edgesTex, texcoord).rg;
+ texcoord = mad(-float2(0.0, 2.0), SMAA_RT_METRICS.xy, texcoord);
+ }
+ float offset = mad(-(255.0 / 127.0), SMAASearchLength(SMAATexturePass2D(searchTex), e.gr, 0.0), 3.25);
+ return mad(SMAA_RT_METRICS.y, offset, texcoord.y);
+}
+
+float SMAASearchYDown(SMAATexture2D(edgesTex), SMAATexture2D(searchTex), float2 texcoord, float end) {
+ float2 e = float2(1.0, 0.0);
+ while (texcoord.y < end &&
+ e.r > 0.8281 && // Is there some edge not activated?
+ e.g == 0.0) { // Or is there a crossing edge that breaks the line?
+ e = SMAASampleLevelZero(edgesTex, texcoord).rg;
+ texcoord = mad(float2(0.0, 2.0), SMAA_RT_METRICS.xy, texcoord);
+ }
+ float offset = mad(-(255.0 / 127.0), SMAASearchLength(SMAATexturePass2D(searchTex), e.gr, 0.5), 3.25);
+ return mad(-SMAA_RT_METRICS.y, offset, texcoord.y);
+}
+
+/**
+ * Ok, we have the distance and both crossing edges. So, what are the areas
+ * at each side of current edge?
+ */
+float2 SMAAArea(SMAATexture2D(areaTex), float2 dist, float e1, float e2, float offset) {
+ // Rounding prevents precision errors of bilinear filtering:
+ float2 texcoord = mad(float2(SMAA_AREATEX_MAX_DISTANCE, SMAA_AREATEX_MAX_DISTANCE), round(4.0 * float2(e1, e2)), dist);
+
+ // We do a scale and bias for mapping to texel space:
+ texcoord = mad(SMAA_AREATEX_PIXEL_SIZE, texcoord, 0.5 * SMAA_AREATEX_PIXEL_SIZE);
+
+ // Move to proper place, according to the subpixel offset:
+ texcoord.y = mad(SMAA_AREATEX_SUBTEX_SIZE, offset, texcoord.y);
+
+ // Do it!
+ return SMAA_AREATEX_SELECT(SMAASampleLevelZero(areaTex, texcoord));
+}
+
+//-----------------------------------------------------------------------------
+// Corner Detection Functions
+
+void SMAADetectHorizontalCornerPattern(SMAATexture2D(edgesTex), inout float2 weights, float4 texcoord, float2 d) {
+ #if !defined(SMAA_DISABLE_CORNER_DETECTION)
+ float2 leftRight = step(d.xy, d.yx);
+ float2 rounding = (1.0 - SMAA_CORNER_ROUNDING_NORM) * leftRight;
+
+ rounding /= leftRight.x + leftRight.y; // Reduce blending for pixels in the center of a line.
+
+ float2 factor = float2(1.0, 1.0);
+ factor.x -= rounding.x * SMAASampleLevelZeroOffset(edgesTex, texcoord.xy, int2(0, 1)).r;
+ factor.x -= rounding.y * SMAASampleLevelZeroOffset(edgesTex, texcoord.zw, int2(1, 1)).r;
+ factor.y -= rounding.x * SMAASampleLevelZeroOffset(edgesTex, texcoord.xy, int2(0, -2)).r;
+ factor.y -= rounding.y * SMAASampleLevelZeroOffset(edgesTex, texcoord.zw, int2(1, -2)).r;
+
+ weights *= saturate(factor);
+ #endif
+}
+
+void SMAADetectVerticalCornerPattern(SMAATexture2D(edgesTex), inout float2 weights, float4 texcoord, float2 d) {
+ #if !defined(SMAA_DISABLE_CORNER_DETECTION)
+ float2 leftRight = step(d.xy, d.yx);
+ float2 rounding = (1.0 - SMAA_CORNER_ROUNDING_NORM) * leftRight;
+
+ rounding /= leftRight.x + leftRight.y;
+
+ float2 factor = float2(1.0, 1.0);
+ factor.x -= rounding.x * SMAASampleLevelZeroOffset(edgesTex, texcoord.xy, int2( 1, 0)).g;
+ factor.x -= rounding.y * SMAASampleLevelZeroOffset(edgesTex, texcoord.zw, int2( 1, 1)).g;
+ factor.y -= rounding.x * SMAASampleLevelZeroOffset(edgesTex, texcoord.xy, int2(-2, 0)).g;
+ factor.y -= rounding.y * SMAASampleLevelZeroOffset(edgesTex, texcoord.zw, int2(-2, 1)).g;
+
+ weights *= saturate(factor);
+ #endif
+}
+
+//-----------------------------------------------------------------------------
+// Blending Weight Calculation Pixel Shader (Second Pass)
+
+float4 SMAABlendingWeightCalculationPS(float2 texcoord,
+ float2 pixcoord,
+ float4 offset[3],
+ SMAATexture2D(edgesTex),
+ SMAATexture2D(areaTex),
+ SMAATexture2D(searchTex),
+ float4 subsampleIndices) { // Just pass zero for SMAA 1x, see @SUBSAMPLE_INDICES.
+ float4 weights = float4(0.0, 0.0, 0.0, 0.0);
+
+ float2 e = SMAASample(edgesTex, texcoord).rg;
+
+ SMAA_BRANCH
+ if (e.g > 0.0) { // Edge at north
+ #if !defined(SMAA_DISABLE_DIAG_DETECTION)
+ // Diagonals have both north and west edges, so searching for them in
+ // one of the boundaries is enough.
+ weights.rg = SMAACalculateDiagWeights(SMAATexturePass2D(edgesTex), SMAATexturePass2D(areaTex), texcoord, e, subsampleIndices);
+
+ // We give priority to diagonals, so if we find a diagonal we skip
+ // horizontal/vertical processing.
+ SMAA_BRANCH
+ if (weights.r == -weights.g) { // weights.r + weights.g == 0.0
+ #endif
+
+ float2 d;
+
+ // Find the distance to the left:
+ float3 coords;
+ coords.x = SMAASearchXLeft(SMAATexturePass2D(edgesTex), SMAATexturePass2D(searchTex), offset[0].xy, offset[2].x);
+ coords.y = offset[1].y; // offset[1].y = texcoord.y - 0.25 * SMAA_RT_METRICS.y (@CROSSING_OFFSET)
+ d.x = coords.x;
+
+ // Now fetch the left crossing edges, two at a time using bilinear
+ // filtering. Sampling at -0.25 (see @CROSSING_OFFSET) enables to
+ // discern what value each edge has:
+ float e1 = SMAASampleLevelZero(edgesTex, coords.xy).r;
+
+ // Find the distance to the right:
+ coords.z = SMAASearchXRight(SMAATexturePass2D(edgesTex), SMAATexturePass2D(searchTex), offset[0].zw, offset[2].y);
+ d.y = coords.z;
+
+ // We want the distances to be in pixel units (doing this here allow to
+ // better interleave arithmetic and memory accesses):
+ d = abs(round(mad(SMAA_RT_METRICS.zz, d, -pixcoord.xx)));
+
+ // SMAAArea below needs a sqrt, as the areas texture is compressed
+ // quadratically:
+ float2 sqrt_d = sqrt(d);
+
+ // Fetch the right crossing edges:
+ float e2 = SMAASampleLevelZeroOffset(edgesTex, coords.zy, int2(1, 0)).r;
+
+ // Ok, we know how this pattern looks like, now it is time for getting
+ // the actual area:
+ weights.rg = SMAAArea(SMAATexturePass2D(areaTex), sqrt_d, e1, e2, subsampleIndices.y);
+
+ // Fix corners:
+ coords.y = texcoord.y;
+ SMAADetectHorizontalCornerPattern(SMAATexturePass2D(edgesTex), weights.rg, coords.xyzy, d);
+
+ #if !defined(SMAA_DISABLE_DIAG_DETECTION)
+ } else
+ e.r = 0.0; // Skip vertical processing.
+ #endif
+ }
+
+ SMAA_BRANCH
+ if (e.r > 0.0) { // Edge at west
+ float2 d;
+
+ // Find the distance to the top:
+ float3 coords;
+ coords.y = SMAASearchYUp(SMAATexturePass2D(edgesTex), SMAATexturePass2D(searchTex), offset[1].xy, offset[2].z);
+ coords.x = offset[0].x; // offset[1].x = texcoord.x - 0.25 * SMAA_RT_METRICS.x;
+ d.x = coords.y;
+
+ // Fetch the top crossing edges:
+ float e1 = SMAASampleLevelZero(edgesTex, coords.xy).g;
+
+ // Find the distance to the bottom:
+ coords.z = SMAASearchYDown(SMAATexturePass2D(edgesTex), SMAATexturePass2D(searchTex), offset[1].zw, offset[2].w);
+ d.y = coords.z;
+
+ // We want the distances to be in pixel units:
+ d = abs(round(mad(SMAA_RT_METRICS.ww, d, -pixcoord.yy)));
+
+ // SMAAArea below needs a sqrt, as the areas texture is compressed
+ // quadratically:
+ float2 sqrt_d = sqrt(d);
+
+ // Fetch the bottom crossing edges:
+ float e2 = SMAASampleLevelZeroOffset(edgesTex, coords.xz, int2(0, 1)).g;
+
+ // Get the area for this direction:
+ weights.ba = SMAAArea(SMAATexturePass2D(areaTex), sqrt_d, e1, e2, subsampleIndices.x);
+
+ // Fix corners:
+ coords.x = texcoord.x;
+ SMAADetectVerticalCornerPattern(SMAATexturePass2D(edgesTex), weights.ba, coords.xyxz, d);
+ }
+
+ return weights;
+}
+
+//-----------------------------------------------------------------------------
+// Neighborhood Blending Pixel Shader (Third Pass)
+
+float4 SMAANeighborhoodBlendingPS(float2 texcoord,
+ float4 offset,
+ SMAATexture2D(colorTex),
+ SMAATexture2D(blendTex)
+ #if SMAA_REPROJECTION
+ , SMAATexture2D(velocityTex)
+ #endif
+ ) {
+ // Fetch the blending weights for current pixel:
+ float4 a;
+ a.x = SMAASample(blendTex, offset.xy).a; // Right
+ a.y = SMAASample(blendTex, offset.zw).g; // Top
+ a.wz = SMAASample(blendTex, texcoord).xz; // Bottom / Left
+
+ // Is there any blending weight with a value greater than 0.0?
+ SMAA_BRANCH
+ if (dot(a, float4(1.0, 1.0, 1.0, 1.0)) < 1e-5) {
+ float4 color = SMAASampleLevelZero(colorTex, texcoord);
+
+ #if SMAA_REPROJECTION
+ float2 velocity = SMAA_DECODE_VELOCITY(SMAASampleLevelZero(velocityTex, texcoord));
+
+ // Pack velocity into the alpha channel:
+ color.a = sqrt(5.0 * length(velocity));
+ #endif
+
+ return color;
+ } else {
+ bool h = max(a.x, a.z) > max(a.y, a.w); // max(horizontal) > max(vertical)
+
+ // Calculate the blending offsets:
+ float4 blendingOffset = float4(0.0, a.y, 0.0, a.w);
+ float2 blendingWeight = a.yw;
+ SMAAMovc(bool4(h, h, h, h), blendingOffset, float4(a.x, 0.0, a.z, 0.0));
+ SMAAMovc(bool2(h, h), blendingWeight, a.xz);
+ blendingWeight /= dot(blendingWeight, float2(1.0, 1.0));
+
+ // Calculate the texture coordinates:
+ float4 blendingCoord = mad(blendingOffset, float4(SMAA_RT_METRICS.xy, -SMAA_RT_METRICS.xy), texcoord.xyxy);
+
+ // We exploit bilinear filtering to mix current pixel with the chosen
+ // neighbor:
+ float4 color = blendingWeight.x * SMAASampleLevelZero(colorTex, blendingCoord.xy);
+ color += blendingWeight.y * SMAASampleLevelZero(colorTex, blendingCoord.zw);
+
+ #if SMAA_REPROJECTION
+ // Antialias velocity for proper reprojection in a later stage:
+ float2 velocity = blendingWeight.x * SMAA_DECODE_VELOCITY(SMAASampleLevelZero(velocityTex, blendingCoord.xy));
+ velocity += blendingWeight.y * SMAA_DECODE_VELOCITY(SMAASampleLevelZero(velocityTex, blendingCoord.zw));
+
+ // Pack velocity into the alpha channel:
+ color.a = sqrt(5.0 * length(velocity));
+ #endif
+
+ return color;
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Temporal Resolve Pixel Shader (Optional Pass)
+
+float4 SMAAResolvePS(float2 texcoord,
+ SMAATexture2D(currentColorTex),
+ SMAATexture2D(previousColorTex)
+ #if SMAA_REPROJECTION
+ , SMAATexture2D(velocityTex)
+ #endif
+ ) {
+ #if SMAA_REPROJECTION
+ // Velocity is assumed to be calculated for motion blur, so we need to
+ // inverse it for reprojection:
+ float2 velocity = -SMAA_DECODE_VELOCITY(SMAASamplePoint(velocityTex, texcoord).rg);
+
+ // Fetch current pixel:
+ float4 current = SMAASamplePoint(currentColorTex, texcoord);
+
+ // Reproject current coordinates and fetch previous pixel:
+ float4 previous = SMAASamplePoint(previousColorTex, texcoord + velocity);
+
+ // Attenuate the previous pixel if the velocity is different:
+ float delta = abs(current.a * current.a - previous.a * previous.a) / 5.0;
+ float weight = 0.5 * saturate(1.0 - sqrt(delta) * SMAA_REPROJECTION_WEIGHT_SCALE);
+
+ // Blend the pixels according to the calculated weight:
+ return lerp(current, previous, weight);
+ #else
+ // Just blend the pixels:
+ float4 current = SMAASamplePoint(currentColorTex, texcoord);
+ float4 previous = SMAASamplePoint(previousColorTex, texcoord);
+ return lerp(current, previous, 0.5);
+ #endif
+}
+
+//-----------------------------------------------------------------------------
+// Separate Multisamples Pixel Shader (Optional Pass)
+
+#ifdef SMAALoad
+void SMAASeparatePS(float4 position,
+ float2 texcoord,
+ out float4 target0,
+ out float4 target1,
+ SMAATexture2DMS2(colorTexMS)) {
+ int2 pos = int2(position.xy);
+ target0 = SMAALoad(colorTexMS, pos, 0);
+ target1 = SMAALoad(colorTexMS, pos, 1);
+}
+#endif
+
+//-----------------------------------------------------------------------------
+#endif // SMAA_INCLUDE_PS
+
+layout(rgba8, binding = 0, set = 3) uniform image2D imgOutput;
+
+layout(binding = 1, set = 2) uniform sampler2D inputImg;
+layout(binding = 3, set = 2) uniform sampler2D samplerArea;
+layout(binding = 4, set = 2) uniform sampler2D samplerSearch;
+layout( binding = 2 ) uniform invResolution
+{
+ vec2 invResolution_data;
+};
+
+void main() {
+ ivec2 loc = ivec2(gl_GlobalInvocationID.x * 4, gl_GlobalInvocationID.y * 4);
+ for(int i = 0; i < 4; i++)
+ {
+ for(int j = 0; j < 4; j++)
+ {
+ ivec2 texelCoord = ivec2(loc.x + i, loc.y + j);
+ vec2 coord = (texelCoord + vec2(0.5)) / invResolution_data;
+ vec2 pixCoord;
+ vec4 offset[3];
+
+ SMAABlendingWeightCalculationVS( coord, pixCoord, offset);
+
+ vec4 oColor = SMAABlendingWeightCalculationPS(coord, pixCoord, offset, inputImg, samplerArea, samplerSearch, ivec4(0));
+
+ imageStore(imgOutput, texelCoord, oColor);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaBlend.spv b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaBlend.spv
new file mode 100644
index 00000000..8efa011f
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaBlend.spv
Binary files differ
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaEdge.glsl b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaEdge.glsl
new file mode 100644
index 00000000..668b97d5
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaEdge.glsl
@@ -0,0 +1,1402 @@
+#version 430 core
+#define SMAA_GLSL_4 1
+
+layout (constant_id = 0) const int SMAA_PRESET_LOW = 0;
+layout (constant_id = 1) const int SMAA_PRESET_MEDIUM = 0;
+layout (constant_id = 2) const int SMAA_PRESET_HIGH = 0;
+layout (constant_id = 3) const int SMAA_PRESET_ULTRA = 0;
+layout (constant_id = 4) const float METRIC_WIDTH = 1920.0;
+layout (constant_id = 5) const float METRIC_HEIGHT = 1080.0;
+
+#define SMAA_RT_METRICS float4(1.0 / METRIC_WIDTH, 1.0 / METRIC_HEIGHT, METRIC_WIDTH, METRIC_HEIGHT)
+
+layout (local_size_x = 16, local_size_y = 16) in;
+/**
+ * Copyright (C) 2013 Jorge Jimenez (jorge@iryoku.com)
+ * Copyright (C) 2013 Jose I. Echevarria (joseignacioechevarria@gmail.com)
+ * Copyright (C) 2013 Belen Masia (bmasia@unizar.es)
+ * Copyright (C) 2013 Fernando Navarro (fernandn@microsoft.com)
+ * Copyright (C) 2013 Diego Gutierrez (diegog@unizar.es)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished to
+ * do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software. As clarification, there
+ * is no requirement that the copyright notice and permission be included in
+ * binary distributions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+/**
+ * _______ ___ ___ ___ ___
+ * / || \/ | / \ / \
+ * | (---- | \ / | / ^ \ / ^ \
+ * \ \ | |\/| | / /_\ \ / /_\ \
+ * ----) | | | | | / _____ \ / _____ \
+ * |_______/ |__| |__| /__/ \__\ /__/ \__\
+ *
+ * E N H A N C E D
+ * S U B P I X E L M O R P H O L O G I C A L A N T I A L I A S I N G
+ *
+ * http://www.iryoku.com/smaa/
+ *
+ * Hi, welcome aboard!
+ *
+ * Here you'll find instructions to get the shader up and running as fast as
+ * possible.
+ *
+ * IMPORTANTE NOTICE: when updating, remember to update both this file and the
+ * precomputed textures! They may change from version to version.
+ *
+ * The shader has three passes, chained together as follows:
+ *
+ * |input|------------------
+ * v |
+ * [ SMAA*EdgeDetection ] |
+ * v |
+ * |edgesTex| |
+ * v |
+ * [ SMAABlendingWeightCalculation ] |
+ * v |
+ * |blendTex| |
+ * v |
+ * [ SMAANeighborhoodBlending ] <------
+ * v
+ * |output|
+ *
+ * Note that each [pass] has its own vertex and pixel shader. Remember to use
+ * oversized triangles instead of quads to avoid overshading along the
+ * diagonal.
+ *
+ * You've three edge detection methods to choose from: luma, color or depth.
+ * They represent different quality/performance and anti-aliasing/sharpness
+ * tradeoffs, so our recommendation is for you to choose the one that best
+ * suits your particular scenario:
+ *
+ * - Depth edge detection is usually the fastest but it may miss some edges.
+ *
+ * - Luma edge detection is usually more expensive than depth edge detection,
+ * but catches visible edges that depth edge detection can miss.
+ *
+ * - Color edge detection is usually the most expensive one but catches
+ * chroma-only edges.
+ *
+ * For quickstarters: just use luma edge detection.
+ *
+ * The general advice is to not rush the integration process and ensure each
+ * step is done correctly (don't try to integrate SMAA T2x with predicated edge
+ * detection from the start!). Ok then, let's go!
+ *
+ * 1. The first step is to create two RGBA temporal render targets for holding
+ * |edgesTex| and |blendTex|.
+ *
+ * In DX10 or DX11, you can use a RG render target for the edges texture.
+ * In the case of NVIDIA GPUs, using RG render targets seems to actually be
+ * slower.
+ *
+ * On the Xbox 360, you can use the same render target for resolving both
+ * |edgesTex| and |blendTex|, as they aren't needed simultaneously.
+ *
+ * 2. Both temporal render targets |edgesTex| and |blendTex| must be cleared
+ * each frame. Do not forget to clear the alpha channel!
+ *
+ * 3. The next step is loading the two supporting precalculated textures,
+ * 'areaTex' and 'searchTex'. You'll find them in the 'Textures' folder as
+ * C++ headers, and also as regular DDS files. They'll be needed for the
+ * 'SMAABlendingWeightCalculation' pass.
+ *
+ * If you use the C++ headers, be sure to load them in the format specified
+ * inside of them.
+ *
+ * You can also compress 'areaTex' and 'searchTex' using BC5 and BC4
+ * respectively, if you have that option in your content processor pipeline.
+ * When compressing then, you get a non-perceptible quality decrease, and a
+ * marginal performance increase.
+ *
+ * 4. All samplers must be set to linear filtering and clamp.
+ *
+ * After you get the technique working, remember that 64-bit inputs have
+ * half-rate linear filtering on GCN.
+ *
+ * If SMAA is applied to 64-bit color buffers, switching to point filtering
+ * when accesing them will increase the performance. Search for
+ * 'SMAASamplePoint' to see which textures may benefit from point
+ * filtering, and where (which is basically the color input in the edge
+ * detection and resolve passes).
+ *
+ * 5. All texture reads and buffer writes must be non-sRGB, with the exception
+ * of the input read and the output write in
+ * 'SMAANeighborhoodBlending' (and only in this pass!). If sRGB reads in
+ * this last pass are not possible, the technique will work anyway, but
+ * will perform antialiasing in gamma space.
+ *
+ * IMPORTANT: for best results the input read for the color/luma edge
+ * detection should *NOT* be sRGB.
+ *
+ * 6. Before including SMAA.h you'll have to setup the render target metrics,
+ * the target and any optional configuration defines. Optionally you can
+ * use a preset.
+ *
+ * You have the following targets available:
+ * SMAA_HLSL_3
+ * SMAA_HLSL_4
+ * SMAA_HLSL_4_1
+ * SMAA_GLSL_3 *
+ * SMAA_GLSL_4 *
+ *
+ * * (See SMAA_INCLUDE_VS and SMAA_INCLUDE_PS below).
+ *
+ * And four presets:
+ * SMAA_PRESET_LOW (%60 of the quality)
+ * SMAA_PRESET_MEDIUM (%80 of the quality)
+ * SMAA_PRESET_HIGH (%95 of the quality)
+ * SMAA_PRESET_ULTRA (%99 of the quality)
+ *
+ * For example:
+ * #define SMAA_RT_METRICS float4(1.0 / 1280.0, 1.0 / 720.0, 1280.0, 720.0)
+ * #define SMAA_HLSL_4
+ * #define SMAA_PRESET_HIGH
+ * #include "SMAA.h"
+ *
+ * Note that SMAA_RT_METRICS doesn't need to be a macro, it can be a
+ * uniform variable. The code is designed to minimize the impact of not
+ * using a constant value, but it is still better to hardcode it.
+ *
+ * Depending on how you encoded 'areaTex' and 'searchTex', you may have to
+ * add (and customize) the following defines before including SMAA.h:
+ * #define SMAA_AREATEX_SELECT(sample) sample.rg
+ * #define SMAA_SEARCHTEX_SELECT(sample) sample.r
+ *
+ * If your engine is already using porting macros, you can define
+ * SMAA_CUSTOM_SL, and define the porting functions by yourself.
+ *
+ * 7. Then, you'll have to setup the passes as indicated in the scheme above.
+ * You can take a look into SMAA.fx, to see how we did it for our demo.
+ * Checkout the function wrappers, you may want to copy-paste them!
+ *
+ * 8. It's recommended to validate the produced |edgesTex| and |blendTex|.
+ * You can use a screenshot from your engine to compare the |edgesTex|
+ * and |blendTex| produced inside of the engine with the results obtained
+ * with the reference demo.
+ *
+ * 9. After you get the last pass to work, it's time to optimize. You'll have
+ * to initialize a stencil buffer in the first pass (discard is already in
+ * the code), then mask execution by using it the second pass. The last
+ * pass should be executed in all pixels.
+ *
+ *
+ * After this point you can choose to enable predicated thresholding,
+ * temporal supersampling and motion blur integration:
+ *
+ * a) If you want to use predicated thresholding, take a look into
+ * SMAA_PREDICATION; you'll need to pass an extra texture in the edge
+ * detection pass.
+ *
+ * b) If you want to enable temporal supersampling (SMAA T2x):
+ *
+ * 1. The first step is to render using subpixel jitters. I won't go into
+ * detail, but it's as simple as moving each vertex position in the
+ * vertex shader, you can check how we do it in our DX10 demo.
+ *
+ * 2. Then, you must setup the temporal resolve. You may want to take a look
+ * into SMAAResolve for resolving 2x modes. After you get it working, you'll
+ * probably see ghosting everywhere. But fear not, you can enable the
+ * CryENGINE temporal reprojection by setting the SMAA_REPROJECTION macro.
+ * Check out SMAA_DECODE_VELOCITY if your velocity buffer is encoded.
+ *
+ * 3. The next step is to apply SMAA to each subpixel jittered frame, just as
+ * done for 1x.
+ *
+ * 4. At this point you should already have something usable, but for best
+ * results the proper area textures must be set depending on current jitter.
+ * For this, the parameter 'subsampleIndices' of
+ * 'SMAABlendingWeightCalculationPS' must be set as follows, for our T2x
+ * mode:
+ *
+ * @SUBSAMPLE_INDICES
+ *
+ * | S# | Camera Jitter | subsampleIndices |
+ * +----+------------------+---------------------+
+ * | 0 | ( 0.25, -0.25) | float4(1, 1, 1, 0) |
+ * | 1 | (-0.25, 0.25) | float4(2, 2, 2, 0) |
+ *
+ * These jitter positions assume a bottom-to-top y axis. S# stands for the
+ * sample number.
+ *
+ * More information about temporal supersampling here:
+ * http://iryoku.com/aacourse/downloads/13-Anti-Aliasing-Methods-in-CryENGINE-3.pdf
+ *
+ * c) If you want to enable spatial multisampling (SMAA S2x):
+ *
+ * 1. The scene must be rendered using MSAA 2x. The MSAA 2x buffer must be
+ * created with:
+ * - DX10: see below (*)
+ * - DX10.1: D3D10_STANDARD_MULTISAMPLE_PATTERN or
+ * - DX11: D3D11_STANDARD_MULTISAMPLE_PATTERN
+ *
+ * This allows to ensure that the subsample order matches the table in
+ * @SUBSAMPLE_INDICES.
+ *
+ * (*) In the case of DX10, we refer the reader to:
+ * - SMAA::detectMSAAOrder and
+ * - SMAA::msaaReorder
+ *
+ * These functions allow to match the standard multisample patterns by
+ * detecting the subsample order for a specific GPU, and reordering
+ * them appropriately.
+ *
+ * 2. A shader must be run to output each subsample into a separate buffer
+ * (DX10 is required). You can use SMAASeparate for this purpose, or just do
+ * it in an existing pass (for example, in the tone mapping pass, which has
+ * the advantage of feeding tone mapped subsamples to SMAA, which will yield
+ * better results).
+ *
+ * 3. The full SMAA 1x pipeline must be run for each separated buffer, storing
+ * the results in the final buffer. The second run should alpha blend with
+ * the existing final buffer using a blending factor of 0.5.
+ * 'subsampleIndices' must be adjusted as in the SMAA T2x case (see point
+ * b).
+ *
+ * d) If you want to enable temporal supersampling on top of SMAA S2x
+ * (which actually is SMAA 4x):
+ *
+ * 1. SMAA 4x consists on temporally jittering SMAA S2x, so the first step is
+ * to calculate SMAA S2x for current frame. In this case, 'subsampleIndices'
+ * must be set as follows:
+ *
+ * | F# | S# | Camera Jitter | Net Jitter | subsampleIndices |
+ * +----+----+--------------------+-------------------+----------------------+
+ * | 0 | 0 | ( 0.125, 0.125) | ( 0.375, -0.125) | float4(5, 3, 1, 3) |
+ * | 0 | 1 | ( 0.125, 0.125) | (-0.125, 0.375) | float4(4, 6, 2, 3) |
+ * +----+----+--------------------+-------------------+----------------------+
+ * | 1 | 2 | (-0.125, -0.125) | ( 0.125, -0.375) | float4(3, 5, 1, 4) |
+ * | 1 | 3 | (-0.125, -0.125) | (-0.375, 0.125) | float4(6, 4, 2, 4) |
+ *
+ * These jitter positions assume a bottom-to-top y axis. F# stands for the
+ * frame number. S# stands for the sample number.
+ *
+ * 2. After calculating SMAA S2x for current frame (with the new subsample
+ * indices), previous frame must be reprojected as in SMAA T2x mode (see
+ * point b).
+ *
+ * e) If motion blur is used, you may want to do the edge detection pass
+ * together with motion blur. This has two advantages:
+ *
+ * 1. Pixels under heavy motion can be omitted from the edge detection process.
+ * For these pixels we can just store "no edge", as motion blur will take
+ * care of them.
+ * 2. The center pixel tap is reused.
+ *
+ * Note that in this case depth testing should be used instead of stenciling,
+ * as we have to write all the pixels in the motion blur pass.
+ *
+ * That's it!
+ */
+
+//-----------------------------------------------------------------------------
+// SMAA Presets
+
+/**
+ * Note that if you use one of these presets, the following configuration
+ * macros will be ignored if set in the "Configurable Defines" section.
+ */
+
+#if defined(SMAA_PRESET_LOW)
+#define SMAA_THRESHOLD 0.15
+#define SMAA_MAX_SEARCH_STEPS 4
+#define SMAA_DISABLE_DIAG_DETECTION
+#define SMAA_DISABLE_CORNER_DETECTION
+#elif defined(SMAA_PRESET_MEDIUM)
+#define SMAA_THRESHOLD 0.1
+#define SMAA_MAX_SEARCH_STEPS 8
+#define SMAA_DISABLE_DIAG_DETECTION
+#define SMAA_DISABLE_CORNER_DETECTION
+#elif defined(SMAA_PRESET_HIGH)
+#define SMAA_THRESHOLD 0.1
+#define SMAA_MAX_SEARCH_STEPS 16
+#define SMAA_MAX_SEARCH_STEPS_DIAG 8
+#define SMAA_CORNER_ROUNDING 25
+#elif defined(SMAA_PRESET_ULTRA)
+#define SMAA_THRESHOLD 0.05
+#define SMAA_MAX_SEARCH_STEPS 32
+#define SMAA_MAX_SEARCH_STEPS_DIAG 16
+#define SMAA_CORNER_ROUNDING 25
+#endif
+
+//-----------------------------------------------------------------------------
+// Configurable Defines
+
+/**
+ * SMAA_THRESHOLD specifies the threshold or sensitivity to edges.
+ * Lowering this value you will be able to detect more edges at the expense of
+ * performance.
+ *
+ * Range: [0, 0.5]
+ * 0.1 is a reasonable value, and allows to catch most visible edges.
+ * 0.05 is a rather overkill value, that allows to catch 'em all.
+ *
+ * If temporal supersampling is used, 0.2 could be a reasonable value, as low
+ * contrast edges are properly filtered by just 2x.
+ */
+#ifndef SMAA_THRESHOLD
+#define SMAA_THRESHOLD 0.1
+#endif
+
+/**
+ * SMAA_DEPTH_THRESHOLD specifies the threshold for depth edge detection.
+ *
+ * Range: depends on the depth range of the scene.
+ */
+#ifndef SMAA_DEPTH_THRESHOLD
+#define SMAA_DEPTH_THRESHOLD (0.1 * SMAA_THRESHOLD)
+#endif
+
+/**
+ * SMAA_MAX_SEARCH_STEPS specifies the maximum steps performed in the
+ * horizontal/vertical pattern searches, at each side of the pixel.
+ *
+ * In number of pixels, it's actually the double. So the maximum line length
+ * perfectly handled by, for example 16, is 64 (by perfectly, we meant that
+ * longer lines won't look as good, but still antialiased).
+ *
+ * Range: [0, 112]
+ */
+#ifndef SMAA_MAX_SEARCH_STEPS
+#define SMAA_MAX_SEARCH_STEPS 16
+#endif
+
+/**
+ * SMAA_MAX_SEARCH_STEPS_DIAG specifies the maximum steps performed in the
+ * diagonal pattern searches, at each side of the pixel. In this case we jump
+ * one pixel at time, instead of two.
+ *
+ * Range: [0, 20]
+ *
+ * On high-end machines it is cheap (between a 0.8x and 0.9x slower for 16
+ * steps), but it can have a significant impact on older machines.
+ *
+ * Define SMAA_DISABLE_DIAG_DETECTION to disable diagonal processing.
+ */
+#ifndef SMAA_MAX_SEARCH_STEPS_DIAG
+#define SMAA_MAX_SEARCH_STEPS_DIAG 8
+#endif
+
+/**
+ * SMAA_CORNER_ROUNDING specifies how much sharp corners will be rounded.
+ *
+ * Range: [0, 100]
+ *
+ * Define SMAA_DISABLE_CORNER_DETECTION to disable corner processing.
+ */
+#ifndef SMAA_CORNER_ROUNDING
+#define SMAA_CORNER_ROUNDING 25
+#endif
+
+/**
+ * If there is an neighbor edge that has SMAA_LOCAL_CONTRAST_FACTOR times
+ * bigger contrast than current edge, current edge will be discarded.
+ *
+ * This allows to eliminate spurious crossing edges, and is based on the fact
+ * that, if there is too much contrast in a direction, that will hide
+ * perceptually contrast in the other neighbors.
+ */
+#ifndef SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR
+#define SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR 2.0
+#endif
+
+/**
+ * Predicated thresholding allows to better preserve texture details and to
+ * improve performance, by decreasing the number of detected edges using an
+ * additional buffer like the light accumulation buffer, object ids or even the
+ * depth buffer (the depth buffer usage may be limited to indoor or short range
+ * scenes).
+ *
+ * It locally decreases the luma or color threshold if an edge is found in an
+ * additional buffer (so the global threshold can be higher).
+ *
+ * This method was developed by Playstation EDGE MLAA team, and used in
+ * Killzone 3, by using the light accumulation buffer. More information here:
+ * http://iryoku.com/aacourse/downloads/06-MLAA-on-PS3.pptx
+ */
+#ifndef SMAA_PREDICATION
+#define SMAA_PREDICATION 0
+#endif
+
+/**
+ * Threshold to be used in the additional predication buffer.
+ *
+ * Range: depends on the input, so you'll have to find the magic number that
+ * works for you.
+ */
+#ifndef SMAA_PREDICATION_THRESHOLD
+#define SMAA_PREDICATION_THRESHOLD 0.01
+#endif
+
+/**
+ * How much to scale the global threshold used for luma or color edge
+ * detection when using predication.
+ *
+ * Range: [1, 5]
+ */
+#ifndef SMAA_PREDICATION_SCALE
+#define SMAA_PREDICATION_SCALE 2.0
+#endif
+
+/**
+ * How much to locally decrease the threshold.
+ *
+ * Range: [0, 1]
+ */
+#ifndef SMAA_PREDICATION_STRENGTH
+#define SMAA_PREDICATION_STRENGTH 0.4
+#endif
+
+/**
+ * Temporal reprojection allows to remove ghosting artifacts when using
+ * temporal supersampling. We use the CryEngine 3 method which also introduces
+ * velocity weighting. This feature is of extreme importance for totally
+ * removing ghosting. More information here:
+ * http://iryoku.com/aacourse/downloads/13-Anti-Aliasing-Methods-in-CryENGINE-3.pdf
+ *
+ * Note that you'll need to setup a velocity buffer for enabling reprojection.
+ * For static geometry, saving the previous depth buffer is a viable
+ * alternative.
+ */
+#ifndef SMAA_REPROJECTION
+#define SMAA_REPROJECTION 0
+#endif
+
+/**
+ * SMAA_REPROJECTION_WEIGHT_SCALE controls the velocity weighting. It allows to
+ * remove ghosting trails behind the moving object, which are not removed by
+ * just using reprojection. Using low values will exhibit ghosting, while using
+ * high values will disable temporal supersampling under motion.
+ *
+ * Behind the scenes, velocity weighting removes temporal supersampling when
+ * the velocity of the subsamples differs (meaning they are different objects).
+ *
+ * Range: [0, 80]
+ */
+#ifndef SMAA_REPROJECTION_WEIGHT_SCALE
+#define SMAA_REPROJECTION_WEIGHT_SCALE 30.0
+#endif
+
+/**
+ * On some compilers, discard cannot be used in vertex shaders. Thus, they need
+ * to be compiled separately.
+ */
+#ifndef SMAA_INCLUDE_VS
+#define SMAA_INCLUDE_VS 1
+#endif
+#ifndef SMAA_INCLUDE_PS
+#define SMAA_INCLUDE_PS 1
+#endif
+
+//-----------------------------------------------------------------------------
+// Texture Access Defines
+
+#ifndef SMAA_AREATEX_SELECT
+#if defined(SMAA_HLSL_3)
+#define SMAA_AREATEX_SELECT(sample) sample.ra
+#else
+#define SMAA_AREATEX_SELECT(sample) sample.rg
+#endif
+#endif
+
+#ifndef SMAA_SEARCHTEX_SELECT
+#define SMAA_SEARCHTEX_SELECT(sample) sample.r
+#endif
+
+#ifndef SMAA_DECODE_VELOCITY
+#define SMAA_DECODE_VELOCITY(sample) sample.rg
+#endif
+
+//-----------------------------------------------------------------------------
+// Non-Configurable Defines
+
+#define SMAA_AREATEX_MAX_DISTANCE 16
+#define SMAA_AREATEX_MAX_DISTANCE_DIAG 20
+#define SMAA_AREATEX_PIXEL_SIZE (1.0 / float2(160.0, 560.0))
+#define SMAA_AREATEX_SUBTEX_SIZE (1.0 / 7.0)
+#define SMAA_SEARCHTEX_SIZE float2(66.0, 33.0)
+#define SMAA_SEARCHTEX_PACKED_SIZE float2(64.0, 16.0)
+#define SMAA_CORNER_ROUNDING_NORM (float(SMAA_CORNER_ROUNDING) / 100.0)
+
+//-----------------------------------------------------------------------------
+// Porting Functions
+
+#if defined(SMAA_HLSL_3)
+#define SMAATexture2D(tex) sampler2D tex
+#define SMAATexturePass2D(tex) tex
+#define SMAASampleLevelZero(tex, coord) tex2Dlod(tex, float4(coord, 0.0, 0.0))
+#define SMAASampleLevelZeroPoint(tex, coord) tex2Dlod(tex, float4(coord, 0.0, 0.0))
+#define SMAASampleLevelZeroOffset(tex, coord, offset) tex2Dlod(tex, float4(coord + offset * SMAA_RT_METRICS.xy, 0.0, 0.0))
+#define SMAASample(tex, coord) tex2D(tex, coord)
+#define SMAASamplePoint(tex, coord) tex2D(tex, coord)
+#define SMAASampleOffset(tex, coord, offset) tex2D(tex, coord + offset * SMAA_RT_METRICS.xy)
+#define SMAA_FLATTEN [flatten]
+#define SMAA_BRANCH [branch]
+#endif
+#if defined(SMAA_HLSL_4) || defined(SMAA_HLSL_4_1)
+SamplerState LinearSampler { Filter = MIN_MAG_LINEAR_MIP_POINT; AddressU = Clamp; AddressV = Clamp; };
+SamplerState PointSampler { Filter = MIN_MAG_MIP_POINT; AddressU = Clamp; AddressV = Clamp; };
+#define SMAATexture2D(tex) Texture2D tex
+#define SMAATexturePass2D(tex) tex
+#define SMAASampleLevelZero(tex, coord) tex.SampleLevel(LinearSampler, coord, 0)
+#define SMAASampleLevelZeroPoint(tex, coord) tex.SampleLevel(PointSampler, coord, 0)
+#define SMAASampleLevelZeroOffset(tex, coord, offset) tex.SampleLevel(LinearSampler, coord, 0, offset)
+#define SMAASample(tex, coord) tex.Sample(LinearSampler, coord)
+#define SMAASamplePoint(tex, coord) tex.Sample(PointSampler, coord)
+#define SMAASampleOffset(tex, coord, offset) tex.Sample(LinearSampler, coord, offset)
+#define SMAA_FLATTEN [flatten]
+#define SMAA_BRANCH [branch]
+#define SMAATexture2DMS2(tex) Texture2DMS<float4, 2> tex
+#define SMAALoad(tex, pos, sample) tex.Load(pos, sample)
+#if defined(SMAA_HLSL_4_1)
+#define SMAAGather(tex, coord) tex.Gather(LinearSampler, coord, 0)
+#endif
+#endif
+#if defined(SMAA_GLSL_3) || defined(SMAA_GLSL_4)
+#define SMAATexture2D(tex) sampler2D tex
+#define SMAATexturePass2D(tex) tex
+#define SMAASampleLevelZero(tex, coord) textureLod(tex, coord, 0.0)
+#define SMAASampleLevelZeroPoint(tex, coord) textureLod(tex, coord, 0.0)
+#define SMAASampleLevelZeroOffset(tex, coord, offset) textureLodOffset(tex, coord, 0.0, offset)
+#define SMAASample(tex, coord) texture(tex, coord)
+#define SMAASamplePoint(tex, coord) texture(tex, coord)
+#define SMAASampleOffset(tex, coord, offset) texture(tex, coord, offset)
+#define SMAA_FLATTEN
+#define SMAA_BRANCH
+#define lerp(a, b, t) mix(a, b, t)
+#define saturate(a) clamp(a, 0.0, 1.0)
+#if defined(SMAA_GLSL_4)
+#define mad(a, b, c) fma(a, b, c)
+#define SMAAGather(tex, coord) textureGather(tex, coord)
+#else
+#define mad(a, b, c) (a * b + c)
+#endif
+#define float2 vec2
+#define float3 vec3
+#define float4 vec4
+#define int2 ivec2
+#define int3 ivec3
+#define int4 ivec4
+#define bool2 bvec2
+#define bool3 bvec3
+#define bool4 bvec4
+#endif
+
+#if !defined(SMAA_HLSL_3) && !defined(SMAA_HLSL_4) && !defined(SMAA_HLSL_4_1) && !defined(SMAA_GLSL_3) && !defined(SMAA_GLSL_4) && !defined(SMAA_CUSTOM_SL)
+#error you must define the shading language: SMAA_HLSL_*, SMAA_GLSL_* or SMAA_CUSTOM_SL
+#endif
+
+//-----------------------------------------------------------------------------
+// Misc functions
+
+/**
+ * Gathers current pixel, and the top-left neighbors.
+ */
+float3 SMAAGatherNeighbours(float2 texcoord,
+ float4 offset[3],
+ SMAATexture2D(tex)) {
+ #ifdef SMAAGather
+ return SMAAGather(tex, texcoord + SMAA_RT_METRICS.xy * float2(-0.5, -0.5)).grb;
+ #else
+ float P = SMAASamplePoint(tex, texcoord).r;
+ float Pleft = SMAASamplePoint(tex, offset[0].xy).r;
+ float Ptop = SMAASamplePoint(tex, offset[0].zw).r;
+ return float3(P, Pleft, Ptop);
+ #endif
+}
+
+/**
+ * Adjusts the threshold by means of predication.
+ */
+float2 SMAACalculatePredicatedThreshold(float2 texcoord,
+ float4 offset[3],
+ SMAATexture2D(predicationTex)) {
+ float3 neighbours = SMAAGatherNeighbours(texcoord, offset, SMAATexturePass2D(predicationTex));
+ float2 delta = abs(neighbours.xx - neighbours.yz);
+ float2 edges = step(SMAA_PREDICATION_THRESHOLD, delta);
+ return SMAA_PREDICATION_SCALE * SMAA_THRESHOLD * (1.0 - SMAA_PREDICATION_STRENGTH * edges);
+}
+
+/**
+ * Conditional move:
+ */
+void SMAAMovc(bool2 cond, inout float2 variable, float2 value) {
+ SMAA_FLATTEN if (cond.x) variable.x = value.x;
+ SMAA_FLATTEN if (cond.y) variable.y = value.y;
+}
+
+void SMAAMovc(bool4 cond, inout float4 variable, float4 value) {
+ SMAAMovc(cond.xy, variable.xy, value.xy);
+ SMAAMovc(cond.zw, variable.zw, value.zw);
+}
+
+
+#if SMAA_INCLUDE_VS
+//-----------------------------------------------------------------------------
+// Vertex Shaders
+
+/**
+ * Edge Detection Vertex Shader
+ */
+void SMAAEdgeDetectionVS(float2 texcoord,
+ out float4 offset[3]) {
+ offset[0] = mad(SMAA_RT_METRICS.xyxy, float4(-1.0, 0.0, 0.0, -1.0), texcoord.xyxy);
+ offset[1] = mad(SMAA_RT_METRICS.xyxy, float4( 1.0, 0.0, 0.0, 1.0), texcoord.xyxy);
+ offset[2] = mad(SMAA_RT_METRICS.xyxy, float4(-2.0, 0.0, 0.0, -2.0), texcoord.xyxy);
+}
+
+/**
+ * Blend Weight Calculation Vertex Shader
+ */
+void SMAABlendingWeightCalculationVS(float2 texcoord,
+ out float2 pixcoord,
+ out float4 offset[3]) {
+ pixcoord = texcoord * SMAA_RT_METRICS.zw;
+
+ // We will use these offsets for the searches later on (see @PSEUDO_GATHER4):
+ offset[0] = mad(SMAA_RT_METRICS.xyxy, float4(-0.25, -0.125, 1.25, -0.125), texcoord.xyxy);
+ offset[1] = mad(SMAA_RT_METRICS.xyxy, float4(-0.125, -0.25, -0.125, 1.25), texcoord.xyxy);
+
+ // And these for the searches, they indicate the ends of the loops:
+ offset[2] = mad(SMAA_RT_METRICS.xxyy,
+ float4(-2.0, 2.0, -2.0, 2.0) * float(SMAA_MAX_SEARCH_STEPS),
+ float4(offset[0].xz, offset[1].yw));
+}
+
+/**
+ * Neighborhood Blending Vertex Shader
+ */
+void SMAANeighborhoodBlendingVS(float2 texcoord,
+ out float4 offset) {
+ offset = mad(SMAA_RT_METRICS.xyxy, float4( 1.0, 0.0, 0.0, 1.0), texcoord.xyxy);
+}
+#endif // SMAA_INCLUDE_VS
+
+#if SMAA_INCLUDE_PS
+//-----------------------------------------------------------------------------
+// Edge Detection Pixel Shaders (First Pass)
+
+/**
+ * Luma Edge Detection
+ *
+ * IMPORTANT NOTICE: luma edge detection requires gamma-corrected colors, and
+ * thus 'colorTex' should be a non-sRGB texture.
+ */
+float2 SMAALumaEdgeDetectionPS(float2 texcoord,
+ float4 offset[3],
+ SMAATexture2D(colorTex)
+ #if SMAA_PREDICATION
+ , SMAATexture2D(predicationTex)
+ #endif
+ ) {
+ // Calculate the threshold:
+ #if SMAA_PREDICATION
+ float2 threshold = SMAACalculatePredicatedThreshold(texcoord, offset, SMAATexturePass2D(predicationTex));
+ #else
+ float2 threshold = float2(SMAA_THRESHOLD, SMAA_THRESHOLD);
+ #endif
+
+ // Calculate lumas:
+ float3 weights = float3(0.2126, 0.7152, 0.0722);
+ float L = dot(SMAASamplePoint(colorTex, texcoord).rgb, weights);
+
+ float Lleft = dot(SMAASamplePoint(colorTex, offset[0].xy).rgb, weights);
+ float Ltop = dot(SMAASamplePoint(colorTex, offset[0].zw).rgb, weights);
+
+ // We do the usual threshold:
+ float4 delta;
+ delta.xy = abs(L - float2(Lleft, Ltop));
+ float2 edges = step(threshold, delta.xy);
+
+ // Then discard if there is no edge:
+ if (dot(edges, float2(1.0, 1.0)) == 0.0)
+ return float2(-2.0, -2.0);
+
+ // Calculate right and bottom deltas:
+ float Lright = dot(SMAASamplePoint(colorTex, offset[1].xy).rgb, weights);
+ float Lbottom = dot(SMAASamplePoint(colorTex, offset[1].zw).rgb, weights);
+ delta.zw = abs(L - float2(Lright, Lbottom));
+
+ // Calculate the maximum delta in the direct neighborhood:
+ float2 maxDelta = max(delta.xy, delta.zw);
+
+ // Calculate left-left and top-top deltas:
+ float Lleftleft = dot(SMAASamplePoint(colorTex, offset[2].xy).rgb, weights);
+ float Ltoptop = dot(SMAASamplePoint(colorTex, offset[2].zw).rgb, weights);
+ delta.zw = abs(float2(Lleft, Ltop) - float2(Lleftleft, Ltoptop));
+
+ // Calculate the final maximum delta:
+ maxDelta = max(maxDelta.xy, delta.zw);
+ float finalDelta = max(maxDelta.x, maxDelta.y);
+
+ // Local contrast adaptation:
+ edges.xy *= step(finalDelta, SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR * delta.xy);
+
+ return edges;
+}
+
+/**
+ * Color Edge Detection
+ *
+ * IMPORTANT NOTICE: color edge detection requires gamma-corrected colors, and
+ * thus 'colorTex' should be a non-sRGB texture.
+ */
+float2 SMAAColorEdgeDetectionPS(float2 texcoord,
+ float4 offset[3],
+ SMAATexture2D(colorTex)
+ #if SMAA_PREDICATION
+ , SMAATexture2D(predicationTex)
+ #endif
+ ) {
+ // Calculate the threshold:
+ #if SMAA_PREDICATION
+ float2 threshold = SMAACalculatePredicatedThreshold(texcoord, offset, predicationTex);
+ #else
+ float2 threshold = float2(SMAA_THRESHOLD, SMAA_THRESHOLD);
+ #endif
+
+ // Calculate color deltas:
+ float4 delta;
+ float3 C = SMAASamplePoint(colorTex, texcoord).rgb;
+
+ float3 Cleft = SMAASamplePoint(colorTex, offset[0].xy).rgb;
+ float3 t = abs(C - Cleft);
+ delta.x = max(max(t.r, t.g), t.b);
+
+ float3 Ctop = SMAASamplePoint(colorTex, offset[0].zw).rgb;
+ t = abs(C - Ctop);
+ delta.y = max(max(t.r, t.g), t.b);
+
+ // We do the usual threshold:
+ float2 edges = step(threshold, delta.xy);
+
+ // Then discard if there is no edge:
+ if (dot(edges, float2(1.0, 1.0)) == 0.0)
+ return float2(-2.0, -2.0);
+
+ // Calculate right and bottom deltas:
+ float3 Cright = SMAASamplePoint(colorTex, offset[1].xy).rgb;
+ t = abs(C - Cright);
+ delta.z = max(max(t.r, t.g), t.b);
+
+ float3 Cbottom = SMAASamplePoint(colorTex, offset[1].zw).rgb;
+ t = abs(C - Cbottom);
+ delta.w = max(max(t.r, t.g), t.b);
+
+ // Calculate the maximum delta in the direct neighborhood:
+ float2 maxDelta = max(delta.xy, delta.zw);
+
+ // Calculate left-left and top-top deltas:
+ float3 Cleftleft = SMAASamplePoint(colorTex, offset[2].xy).rgb;
+ t = abs(C - Cleftleft);
+ delta.z = max(max(t.r, t.g), t.b);
+
+ float3 Ctoptop = SMAASamplePoint(colorTex, offset[2].zw).rgb;
+ t = abs(C - Ctoptop);
+ delta.w = max(max(t.r, t.g), t.b);
+
+ // Calculate the final maximum delta:
+ maxDelta = max(maxDelta.xy, delta.zw);
+ float finalDelta = max(maxDelta.x, maxDelta.y);
+
+ // Local contrast adaptation:
+ edges.xy *= step(finalDelta, SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR * delta.xy);
+
+ return edges;
+}
+
+/**
+ * Depth Edge Detection
+ */
+float2 SMAADepthEdgeDetectionPS(float2 texcoord,
+ float4 offset[3],
+ SMAATexture2D(depthTex)) {
+ float3 neighbours = SMAAGatherNeighbours(texcoord, offset, SMAATexturePass2D(depthTex));
+ float2 delta = abs(neighbours.xx - float2(neighbours.y, neighbours.z));
+ float2 edges = step(SMAA_DEPTH_THRESHOLD, delta);
+
+ if (dot(edges, float2(1.0, 1.0)) == 0.0)
+ return float2(-2.0, -2.0);
+
+ return edges;
+}
+
+//-----------------------------------------------------------------------------
+// Diagonal Search Functions
+
+#if !defined(SMAA_DISABLE_DIAG_DETECTION)
+
+/**
+ * Allows to decode two binary values from a bilinear-filtered access.
+ */
+float2 SMAADecodeDiagBilinearAccess(float2 e) {
+ // Bilinear access for fetching 'e' have a 0.25 offset, and we are
+ // interested in the R and G edges:
+ //
+ // +---G---+-------+
+ // | x o R x |
+ // +-------+-------+
+ //
+ // Then, if one of these edge is enabled:
+ // Red: (0.75 * X + 0.25 * 1) => 0.25 or 1.0
+ // Green: (0.75 * 1 + 0.25 * X) => 0.75 or 1.0
+ //
+ // This function will unpack the values (mad + mul + round):
+ // wolframalpha.com: round(x * abs(5 * x - 5 * 0.75)) plot 0 to 1
+ e.r = e.r * abs(5.0 * e.r - 5.0 * 0.75);
+ return round(e);
+}
+
+float4 SMAADecodeDiagBilinearAccess(float4 e) {
+ e.rb = e.rb * abs(5.0 * e.rb - 5.0 * 0.75);
+ return round(e);
+}
+
+/**
+ * These functions allows to perform diagonal pattern searches.
+ */
+float2 SMAASearchDiag1(SMAATexture2D(edgesTex), float2 texcoord, float2 dir, out float2 e) {
+ float4 coord = float4(texcoord, -1.0, 1.0);
+ float3 t = float3(SMAA_RT_METRICS.xy, 1.0);
+ while (coord.z < float(SMAA_MAX_SEARCH_STEPS_DIAG - 1) &&
+ coord.w > 0.9) {
+ coord.xyz = mad(t, float3(dir, 1.0), coord.xyz);
+ e = SMAASampleLevelZero(edgesTex, coord.xy).rg;
+ coord.w = dot(e, float2(0.5, 0.5));
+ }
+ return coord.zw;
+}
+
+float2 SMAASearchDiag2(SMAATexture2D(edgesTex), float2 texcoord, float2 dir, out float2 e) {
+ float4 coord = float4(texcoord, -1.0, 1.0);
+ coord.x += 0.25 * SMAA_RT_METRICS.x; // See @SearchDiag2Optimization
+ float3 t = float3(SMAA_RT_METRICS.xy, 1.0);
+ while (coord.z < float(SMAA_MAX_SEARCH_STEPS_DIAG - 1) &&
+ coord.w > 0.9) {
+ coord.xyz = mad(t, float3(dir, 1.0), coord.xyz);
+
+ // @SearchDiag2Optimization
+ // Fetch both edges at once using bilinear filtering:
+ e = SMAASampleLevelZero(edgesTex, coord.xy).rg;
+ e = SMAADecodeDiagBilinearAccess(e);
+
+ // Non-optimized version:
+ // e.g = SMAASampleLevelZero(edgesTex, coord.xy).g;
+ // e.r = SMAASampleLevelZeroOffset(edgesTex, coord.xy, int2(1, 0)).r;
+
+ coord.w = dot(e, float2(0.5, 0.5));
+ }
+ return coord.zw;
+}
+
+/**
+ * Similar to SMAAArea, this calculates the area corresponding to a certain
+ * diagonal distance and crossing edges 'e'.
+ */
+float2 SMAAAreaDiag(SMAATexture2D(areaTex), float2 dist, float2 e, float offset) {
+ float2 texcoord = mad(float2(SMAA_AREATEX_MAX_DISTANCE_DIAG, SMAA_AREATEX_MAX_DISTANCE_DIAG), e, dist);
+
+ // We do a scale and bias for mapping to texel space:
+ texcoord = mad(SMAA_AREATEX_PIXEL_SIZE, texcoord, 0.5 * SMAA_AREATEX_PIXEL_SIZE);
+
+ // Diagonal areas are on the second half of the texture:
+ texcoord.x += 0.5;
+
+ // Move to proper place, according to the subpixel offset:
+ texcoord.y += SMAA_AREATEX_SUBTEX_SIZE * offset;
+
+ // Do it!
+ return SMAA_AREATEX_SELECT(SMAASampleLevelZero(areaTex, texcoord));
+}
+
+/**
+ * This searches for diagonal patterns and returns the corresponding weights.
+ */
+float2 SMAACalculateDiagWeights(SMAATexture2D(edgesTex), SMAATexture2D(areaTex), float2 texcoord, float2 e, float4 subsampleIndices) {
+ float2 weights = float2(0.0, 0.0);
+
+ // Search for the line ends:
+ float4 d;
+ float2 end;
+ if (e.r > 0.0) {
+ d.xz = SMAASearchDiag1(SMAATexturePass2D(edgesTex), texcoord, float2(-1.0, 1.0), end);
+ d.x += float(end.y > 0.9);
+ } else
+ d.xz = float2(0.0, 0.0);
+ d.yw = SMAASearchDiag1(SMAATexturePass2D(edgesTex), texcoord, float2(1.0, -1.0), end);
+
+ SMAA_BRANCH
+ if (d.x + d.y > 2.0) { // d.x + d.y + 1 > 3
+ // Fetch the crossing edges:
+ float4 coords = mad(float4(-d.x + 0.25, d.x, d.y, -d.y - 0.25), SMAA_RT_METRICS.xyxy, texcoord.xyxy);
+ float4 c;
+ c.xy = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2(-1, 0)).rg;
+ c.zw = SMAASampleLevelZeroOffset(edgesTex, coords.zw, int2( 1, 0)).rg;
+ c.yxwz = SMAADecodeDiagBilinearAccess(c.xyzw);
+
+ // Non-optimized version:
+ // float4 coords = mad(float4(-d.x, d.x, d.y, -d.y), SMAA_RT_METRICS.xyxy, texcoord.xyxy);
+ // float4 c;
+ // c.x = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2(-1, 0)).g;
+ // c.y = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2( 0, 0)).r;
+ // c.z = SMAASampleLevelZeroOffset(edgesTex, coords.zw, int2( 1, 0)).g;
+ // c.w = SMAASampleLevelZeroOffset(edgesTex, coords.zw, int2( 1, -1)).r;
+
+ // Merge crossing edges at each side into a single value:
+ float2 cc = mad(float2(2.0, 2.0), c.xz, c.yw);
+
+ // Remove the crossing edge if we didn't found the end of the line:
+ SMAAMovc(bool2(step(0.9, d.zw)), cc, float2(0.0, 0.0));
+
+ // Fetch the areas for this line:
+ weights += SMAAAreaDiag(SMAATexturePass2D(areaTex), d.xy, cc, subsampleIndices.z);
+ }
+
+ // Search for the line ends:
+ d.xz = SMAASearchDiag2(SMAATexturePass2D(edgesTex), texcoord, float2(-1.0, -1.0), end);
+ if (SMAASampleLevelZeroOffset(edgesTex, texcoord, int2(1, 0)).r > 0.0) {
+ d.yw = SMAASearchDiag2(SMAATexturePass2D(edgesTex), texcoord, float2(1.0, 1.0), end);
+ d.y += float(end.y > 0.9);
+ } else
+ d.yw = float2(0.0, 0.0);
+
+ SMAA_BRANCH
+ if (d.x + d.y > 2.0) { // d.x + d.y + 1 > 3
+ // Fetch the crossing edges:
+ float4 coords = mad(float4(-d.x, -d.x, d.y, d.y), SMAA_RT_METRICS.xyxy, texcoord.xyxy);
+ float4 c;
+ c.x = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2(-1, 0)).g;
+ c.y = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2( 0, -1)).r;
+ c.zw = SMAASampleLevelZeroOffset(edgesTex, coords.zw, int2( 1, 0)).gr;
+ float2 cc = mad(float2(2.0, 2.0), c.xz, c.yw);
+
+ // Remove the crossing edge if we didn't found the end of the line:
+ SMAAMovc(bool2(step(0.9, d.zw)), cc, float2(0.0, 0.0));
+
+ // Fetch the areas for this line:
+ weights += SMAAAreaDiag(SMAATexturePass2D(areaTex), d.xy, cc, subsampleIndices.w).gr;
+ }
+
+ return weights;
+}
+#endif
+
+//-----------------------------------------------------------------------------
+// Horizontal/Vertical Search Functions
+
+/**
+ * This allows to determine how much length should we add in the last step
+ * of the searches. It takes the bilinearly interpolated edge (see
+ * @PSEUDO_GATHER4), and adds 0, 1 or 2, depending on which edges and
+ * crossing edges are active.
+ */
+float SMAASearchLength(SMAATexture2D(searchTex), float2 e, float offset) {
+ // The texture is flipped vertically, with left and right cases taking half
+ // of the space horizontally:
+ float2 scale = SMAA_SEARCHTEX_SIZE * float2(0.5, -1.0);
+ float2 bias = SMAA_SEARCHTEX_SIZE * float2(offset, 1.0);
+
+ // Scale and bias to access texel centers:
+ scale += float2(-1.0, 1.0);
+ bias += float2( 0.5, -0.5);
+
+ // Convert from pixel coordinates to texcoords:
+ // (We use SMAA_SEARCHTEX_PACKED_SIZE because the texture is cropped)
+ scale *= 1.0 / SMAA_SEARCHTEX_PACKED_SIZE;
+ bias *= 1.0 / SMAA_SEARCHTEX_PACKED_SIZE;
+
+ // Lookup the search texture:
+ return SMAA_SEARCHTEX_SELECT(SMAASampleLevelZero(searchTex, mad(scale, e, bias)));
+}
+
+/**
+ * Horizontal/vertical search functions for the 2nd pass.
+ */
+float SMAASearchXLeft(SMAATexture2D(edgesTex), SMAATexture2D(searchTex), float2 texcoord, float end) {
+ /**
+ * @PSEUDO_GATHER4
+ * This texcoord has been offset by (-0.25, -0.125) in the vertex shader to
+ * sample between edge, thus fetching four edges in a row.
+ * Sampling with different offsets in each direction allows to disambiguate
+ * which edges are active from the four fetched ones.
+ */
+ float2 e = float2(0.0, 1.0);
+ while (texcoord.x > end &&
+ e.g > 0.8281 && // Is there some edge not activated?
+ e.r == 0.0) { // Or is there a crossing edge that breaks the line?
+ e = SMAASampleLevelZero(edgesTex, texcoord).rg;
+ texcoord = mad(-float2(2.0, 0.0), SMAA_RT_METRICS.xy, texcoord);
+ }
+
+ float offset = mad(-(255.0 / 127.0), SMAASearchLength(SMAATexturePass2D(searchTex), e, 0.0), 3.25);
+ return mad(SMAA_RT_METRICS.x, offset, texcoord.x);
+
+ // Non-optimized version:
+ // We correct the previous (-0.25, -0.125) offset we applied:
+ // texcoord.x += 0.25 * SMAA_RT_METRICS.x;
+
+ // The searches are bias by 1, so adjust the coords accordingly:
+ // texcoord.x += SMAA_RT_METRICS.x;
+
+ // Disambiguate the length added by the last step:
+ // texcoord.x += 2.0 * SMAA_RT_METRICS.x; // Undo last step
+ // texcoord.x -= SMAA_RT_METRICS.x * (255.0 / 127.0) * SMAASearchLength(SMAATexturePass2D(searchTex), e, 0.0);
+ // return mad(SMAA_RT_METRICS.x, offset, texcoord.x);
+}
+
+float SMAASearchXRight(SMAATexture2D(edgesTex), SMAATexture2D(searchTex), float2 texcoord, float end) {
+ float2 e = float2(0.0, 1.0);
+ while (texcoord.x < end &&
+ e.g > 0.8281 && // Is there some edge not activated?
+ e.r == 0.0) { // Or is there a crossing edge that breaks the line?
+ e = SMAASampleLevelZero(edgesTex, texcoord).rg;
+ texcoord = mad(float2(2.0, 0.0), SMAA_RT_METRICS.xy, texcoord);
+ }
+ float offset = mad(-(255.0 / 127.0), SMAASearchLength(SMAATexturePass2D(searchTex), e, 0.5), 3.25);
+ return mad(-SMAA_RT_METRICS.x, offset, texcoord.x);
+}
+
+float SMAASearchYUp(SMAATexture2D(edgesTex), SMAATexture2D(searchTex), float2 texcoord, float end) {
+ float2 e = float2(1.0, 0.0);
+ while (texcoord.y > end &&
+ e.r > 0.8281 && // Is there some edge not activated?
+ e.g == 0.0) { // Or is there a crossing edge that breaks the line?
+ e = SMAASampleLevelZero(edgesTex, texcoord).rg;
+ texcoord = mad(-float2(0.0, 2.0), SMAA_RT_METRICS.xy, texcoord);
+ }
+ float offset = mad(-(255.0 / 127.0), SMAASearchLength(SMAATexturePass2D(searchTex), e.gr, 0.0), 3.25);
+ return mad(SMAA_RT_METRICS.y, offset, texcoord.y);
+}
+
+float SMAASearchYDown(SMAATexture2D(edgesTex), SMAATexture2D(searchTex), float2 texcoord, float end) {
+ float2 e = float2(1.0, 0.0);
+ while (texcoord.y < end &&
+ e.r > 0.8281 && // Is there some edge not activated?
+ e.g == 0.0) { // Or is there a crossing edge that breaks the line?
+ e = SMAASampleLevelZero(edgesTex, texcoord).rg;
+ texcoord = mad(float2(0.0, 2.0), SMAA_RT_METRICS.xy, texcoord);
+ }
+ float offset = mad(-(255.0 / 127.0), SMAASearchLength(SMAATexturePass2D(searchTex), e.gr, 0.5), 3.25);
+ return mad(-SMAA_RT_METRICS.y, offset, texcoord.y);
+}
+
+/**
+ * Ok, we have the distance and both crossing edges. So, what are the areas
+ * at each side of current edge?
+ */
+float2 SMAAArea(SMAATexture2D(areaTex), float2 dist, float e1, float e2, float offset) {
+ // Rounding prevents precision errors of bilinear filtering:
+ float2 texcoord = mad(float2(SMAA_AREATEX_MAX_DISTANCE, SMAA_AREATEX_MAX_DISTANCE), round(4.0 * float2(e1, e2)), dist);
+
+ // We do a scale and bias for mapping to texel space:
+ texcoord = mad(SMAA_AREATEX_PIXEL_SIZE, texcoord, 0.5 * SMAA_AREATEX_PIXEL_SIZE);
+
+ // Move to proper place, according to the subpixel offset:
+ texcoord.y = mad(SMAA_AREATEX_SUBTEX_SIZE, offset, texcoord.y);
+
+ // Do it!
+ return SMAA_AREATEX_SELECT(SMAASampleLevelZero(areaTex, texcoord));
+}
+
+//-----------------------------------------------------------------------------
+// Corner Detection Functions
+
+void SMAADetectHorizontalCornerPattern(SMAATexture2D(edgesTex), inout float2 weights, float4 texcoord, float2 d) {
+ #if !defined(SMAA_DISABLE_CORNER_DETECTION)
+ float2 leftRight = step(d.xy, d.yx);
+ float2 rounding = (1.0 - SMAA_CORNER_ROUNDING_NORM) * leftRight;
+
+ rounding /= leftRight.x + leftRight.y; // Reduce blending for pixels in the center of a line.
+
+ float2 factor = float2(1.0, 1.0);
+ factor.x -= rounding.x * SMAASampleLevelZeroOffset(edgesTex, texcoord.xy, int2(0, 1)).r;
+ factor.x -= rounding.y * SMAASampleLevelZeroOffset(edgesTex, texcoord.zw, int2(1, 1)).r;
+ factor.y -= rounding.x * SMAASampleLevelZeroOffset(edgesTex, texcoord.xy, int2(0, -2)).r;
+ factor.y -= rounding.y * SMAASampleLevelZeroOffset(edgesTex, texcoord.zw, int2(1, -2)).r;
+
+ weights *= saturate(factor);
+ #endif
+}
+
+void SMAADetectVerticalCornerPattern(SMAATexture2D(edgesTex), inout float2 weights, float4 texcoord, float2 d) {
+ #if !defined(SMAA_DISABLE_CORNER_DETECTION)
+ float2 leftRight = step(d.xy, d.yx);
+ float2 rounding = (1.0 - SMAA_CORNER_ROUNDING_NORM) * leftRight;
+
+ rounding /= leftRight.x + leftRight.y;
+
+ float2 factor = float2(1.0, 1.0);
+ factor.x -= rounding.x * SMAASampleLevelZeroOffset(edgesTex, texcoord.xy, int2( 1, 0)).g;
+ factor.x -= rounding.y * SMAASampleLevelZeroOffset(edgesTex, texcoord.zw, int2( 1, 1)).g;
+ factor.y -= rounding.x * SMAASampleLevelZeroOffset(edgesTex, texcoord.xy, int2(-2, 0)).g;
+ factor.y -= rounding.y * SMAASampleLevelZeroOffset(edgesTex, texcoord.zw, int2(-2, 1)).g;
+
+ weights *= saturate(factor);
+ #endif
+}
+
+//-----------------------------------------------------------------------------
+// Blending Weight Calculation Pixel Shader (Second Pass)
+
+float4 SMAABlendingWeightCalculationPS(float2 texcoord,
+ float2 pixcoord,
+ float4 offset[3],
+ SMAATexture2D(edgesTex),
+ SMAATexture2D(areaTex),
+ SMAATexture2D(searchTex),
+ float4 subsampleIndices) { // Just pass zero for SMAA 1x, see @SUBSAMPLE_INDICES.
+ float4 weights = float4(0.0, 0.0, 0.0, 0.0);
+
+ float2 e = SMAASample(edgesTex, texcoord).rg;
+
+ SMAA_BRANCH
+ if (e.g > 0.0) { // Edge at north
+ #if !defined(SMAA_DISABLE_DIAG_DETECTION)
+ // Diagonals have both north and west edges, so searching for them in
+ // one of the boundaries is enough.
+ weights.rg = SMAACalculateDiagWeights(SMAATexturePass2D(edgesTex), SMAATexturePass2D(areaTex), texcoord, e, subsampleIndices);
+
+ // We give priority to diagonals, so if we find a diagonal we skip
+ // horizontal/vertical processing.
+ SMAA_BRANCH
+ if (weights.r == -weights.g) { // weights.r + weights.g == 0.0
+ #endif
+
+ float2 d;
+
+ // Find the distance to the left:
+ float3 coords;
+ coords.x = SMAASearchXLeft(SMAATexturePass2D(edgesTex), SMAATexturePass2D(searchTex), offset[0].xy, offset[2].x);
+ coords.y = offset[1].y; // offset[1].y = texcoord.y - 0.25 * SMAA_RT_METRICS.y (@CROSSING_OFFSET)
+ d.x = coords.x;
+
+ // Now fetch the left crossing edges, two at a time using bilinear
+ // filtering. Sampling at -0.25 (see @CROSSING_OFFSET) enables to
+ // discern what value each edge has:
+ float e1 = SMAASampleLevelZero(edgesTex, coords.xy).r;
+
+ // Find the distance to the right:
+ coords.z = SMAASearchXRight(SMAATexturePass2D(edgesTex), SMAATexturePass2D(searchTex), offset[0].zw, offset[2].y);
+ d.y = coords.z;
+
+ // We want the distances to be in pixel units (doing this here allow to
+ // better interleave arithmetic and memory accesses):
+ d = abs(round(mad(SMAA_RT_METRICS.zz, d, -pixcoord.xx)));
+
+ // SMAAArea below needs a sqrt, as the areas texture is compressed
+ // quadratically:
+ float2 sqrt_d = sqrt(d);
+
+ // Fetch the right crossing edges:
+ float e2 = SMAASampleLevelZeroOffset(edgesTex, coords.zy, int2(1, 0)).r;
+
+ // Ok, we know how this pattern looks like, now it is time for getting
+ // the actual area:
+ weights.rg = SMAAArea(SMAATexturePass2D(areaTex), sqrt_d, e1, e2, subsampleIndices.y);
+
+ // Fix corners:
+ coords.y = texcoord.y;
+ SMAADetectHorizontalCornerPattern(SMAATexturePass2D(edgesTex), weights.rg, coords.xyzy, d);
+
+ #if !defined(SMAA_DISABLE_DIAG_DETECTION)
+ } else
+ e.r = 0.0; // Skip vertical processing.
+ #endif
+ }
+
+ SMAA_BRANCH
+ if (e.r > 0.0) { // Edge at west
+ float2 d;
+
+ // Find the distance to the top:
+ float3 coords;
+ coords.y = SMAASearchYUp(SMAATexturePass2D(edgesTex), SMAATexturePass2D(searchTex), offset[1].xy, offset[2].z);
+ coords.x = offset[0].x; // offset[1].x = texcoord.x - 0.25 * SMAA_RT_METRICS.x;
+ d.x = coords.y;
+
+ // Fetch the top crossing edges:
+ float e1 = SMAASampleLevelZero(edgesTex, coords.xy).g;
+
+ // Find the distance to the bottom:
+ coords.z = SMAASearchYDown(SMAATexturePass2D(edgesTex), SMAATexturePass2D(searchTex), offset[1].zw, offset[2].w);
+ d.y = coords.z;
+
+ // We want the distances to be in pixel units:
+ d = abs(round(mad(SMAA_RT_METRICS.ww, d, -pixcoord.yy)));
+
+ // SMAAArea below needs a sqrt, as the areas texture is compressed
+ // quadratically:
+ float2 sqrt_d = sqrt(d);
+
+ // Fetch the bottom crossing edges:
+ float e2 = SMAASampleLevelZeroOffset(edgesTex, coords.xz, int2(0, 1)).g;
+
+ // Get the area for this direction:
+ weights.ba = SMAAArea(SMAATexturePass2D(areaTex), sqrt_d, e1, e2, subsampleIndices.x);
+
+ // Fix corners:
+ coords.x = texcoord.x;
+ SMAADetectVerticalCornerPattern(SMAATexturePass2D(edgesTex), weights.ba, coords.xyxz, d);
+ }
+
+ return weights;
+}
+
+//-----------------------------------------------------------------------------
+// Neighborhood Blending Pixel Shader (Third Pass)
+
+float4 SMAANeighborhoodBlendingPS(float2 texcoord,
+ float4 offset,
+ SMAATexture2D(colorTex),
+ SMAATexture2D(blendTex)
+ #if SMAA_REPROJECTION
+ , SMAATexture2D(velocityTex)
+ #endif
+ ) {
+ // Fetch the blending weights for current pixel:
+ float4 a;
+ a.x = SMAASample(blendTex, offset.xy).a; // Right
+ a.y = SMAASample(blendTex, offset.zw).g; // Top
+ a.wz = SMAASample(blendTex, texcoord).xz; // Bottom / Left
+
+ // Is there any blending weight with a value greater than 0.0?
+ SMAA_BRANCH
+ if (dot(a, float4(1.0, 1.0, 1.0, 1.0)) < 1e-5) {
+ float4 color = SMAASampleLevelZero(colorTex, texcoord);
+
+ #if SMAA_REPROJECTION
+ float2 velocity = SMAA_DECODE_VELOCITY(SMAASampleLevelZero(velocityTex, texcoord));
+
+ // Pack velocity into the alpha channel:
+ color.a = sqrt(5.0 * length(velocity));
+ #endif
+
+ return color;
+ } else {
+ bool h = max(a.x, a.z) > max(a.y, a.w); // max(horizontal) > max(vertical)
+
+ // Calculate the blending offsets:
+ float4 blendingOffset = float4(0.0, a.y, 0.0, a.w);
+ float2 blendingWeight = a.yw;
+ SMAAMovc(bool4(h, h, h, h), blendingOffset, float4(a.x, 0.0, a.z, 0.0));
+ SMAAMovc(bool2(h, h), blendingWeight, a.xz);
+ blendingWeight /= dot(blendingWeight, float2(1.0, 1.0));
+
+ // Calculate the texture coordinates:
+ float4 blendingCoord = mad(blendingOffset, float4(SMAA_RT_METRICS.xy, -SMAA_RT_METRICS.xy), texcoord.xyxy);
+
+ // We exploit bilinear filtering to mix current pixel with the chosen
+ // neighbor:
+ float4 color = blendingWeight.x * SMAASampleLevelZero(colorTex, blendingCoord.xy);
+ color += blendingWeight.y * SMAASampleLevelZero(colorTex, blendingCoord.zw);
+
+ #if SMAA_REPROJECTION
+ // Antialias velocity for proper reprojection in a later stage:
+ float2 velocity = blendingWeight.x * SMAA_DECODE_VELOCITY(SMAASampleLevelZero(velocityTex, blendingCoord.xy));
+ velocity += blendingWeight.y * SMAA_DECODE_VELOCITY(SMAASampleLevelZero(velocityTex, blendingCoord.zw));
+
+ // Pack velocity into the alpha channel:
+ color.a = sqrt(5.0 * length(velocity));
+ #endif
+
+ return color;
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Temporal Resolve Pixel Shader (Optional Pass)
+
+float4 SMAAResolvePS(float2 texcoord,
+ SMAATexture2D(currentColorTex),
+ SMAATexture2D(previousColorTex)
+ #if SMAA_REPROJECTION
+ , SMAATexture2D(velocityTex)
+ #endif
+ ) {
+ #if SMAA_REPROJECTION
+ // Velocity is assumed to be calculated for motion blur, so we need to
+ // inverse it for reprojection:
+ float2 velocity = -SMAA_DECODE_VELOCITY(SMAASamplePoint(velocityTex, texcoord).rg);
+
+ // Fetch current pixel:
+ float4 current = SMAASamplePoint(currentColorTex, texcoord);
+
+ // Reproject current coordinates and fetch previous pixel:
+ float4 previous = SMAASamplePoint(previousColorTex, texcoord + velocity);
+
+ // Attenuate the previous pixel if the velocity is different:
+ float delta = abs(current.a * current.a - previous.a * previous.a) / 5.0;
+ float weight = 0.5 * saturate(1.0 - sqrt(delta) * SMAA_REPROJECTION_WEIGHT_SCALE);
+
+ // Blend the pixels according to the calculated weight:
+ return lerp(current, previous, weight);
+ #else
+ // Just blend the pixels:
+ float4 current = SMAASamplePoint(currentColorTex, texcoord);
+ float4 previous = SMAASamplePoint(previousColorTex, texcoord);
+ return lerp(current, previous, 0.5);
+ #endif
+}
+
+//-----------------------------------------------------------------------------
+// Separate Multisamples Pixel Shader (Optional Pass)
+
+#ifdef SMAALoad
+void SMAASeparatePS(float4 position,
+ float2 texcoord,
+ out float4 target0,
+ out float4 target1,
+ SMAATexture2DMS2(colorTexMS)) {
+ int2 pos = int2(position.xy);
+ target0 = SMAALoad(colorTexMS, pos, 0);
+ target1 = SMAALoad(colorTexMS, pos, 1);
+}
+#endif
+
+//-----------------------------------------------------------------------------
+#endif // SMAA_INCLUDE_PS
+
+layout(rgba8, binding = 0, set = 3) uniform image2D imgOutput;
+
+layout(binding = 1, set = 2) uniform sampler2D inputImg;
+layout( binding = 2 ) uniform invResolution
+{
+ vec2 invResolution_data;
+};
+
+void main()
+{
+ vec2 loc = ivec2(gl_GlobalInvocationID.x * 4, gl_GlobalInvocationID.y * 4);
+ for(int i = 0; i < 4; i++)
+ {
+ for(int j = 0; j < 4; j++)
+ {
+ ivec2 texelCoord = ivec2(loc.x + i, loc.y + j);
+ vec2 coord = (texelCoord + vec2(0.5)) / invResolution_data;
+ vec4 offset[3];
+ SMAAEdgeDetectionVS(coord, offset);
+ vec2 oColor = SMAAColorEdgeDetectionPS(coord, offset, inputImg);
+ if (oColor != float2(-2.0, -2.0))
+ {
+ imageStore(imgOutput, texelCoord, vec4(oColor, 0.0, 1.0));
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaEdge.spv b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaEdge.spv
new file mode 100644
index 00000000..1062a9e3
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaEdge.spv
Binary files differ
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaNeighbour.glsl b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaNeighbour.glsl
new file mode 100644
index 00000000..df30d727
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaNeighbour.glsl
@@ -0,0 +1,1403 @@
+#version 430 core
+#define SMAA_GLSL_4 1
+
+layout (constant_id = 0) const int SMAA_PRESET_LOW = 0;
+layout (constant_id = 1) const int SMAA_PRESET_MEDIUM = 0;
+layout (constant_id = 2) const int SMAA_PRESET_HIGH = 0;
+layout (constant_id = 3) const int SMAA_PRESET_ULTRA = 0;
+layout (constant_id = 4) const float METRIC_WIDTH = 1920.0;
+layout (constant_id = 5) const float METRIC_HEIGHT = 1080.0;
+
+#define SMAA_RT_METRICS float4(1.0 / METRIC_WIDTH, 1.0 / METRIC_HEIGHT, METRIC_WIDTH, METRIC_HEIGHT)
+
+layout (local_size_x = 16, local_size_y = 16) in;
+/**
+ * Copyright (C) 2013 Jorge Jimenez (jorge@iryoku.com)
+ * Copyright (C) 2013 Jose I. Echevarria (joseignacioechevarria@gmail.com)
+ * Copyright (C) 2013 Belen Masia (bmasia@unizar.es)
+ * Copyright (C) 2013 Fernando Navarro (fernandn@microsoft.com)
+ * Copyright (C) 2013 Diego Gutierrez (diegog@unizar.es)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished to
+ * do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software. As clarification, there
+ * is no requirement that the copyright notice and permission be included in
+ * binary distributions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+/**
+ * _______ ___ ___ ___ ___
+ * / || \/ | / \ / \
+ * | (---- | \ / | / ^ \ / ^ \
+ * \ \ | |\/| | / /_\ \ / /_\ \
+ * ----) | | | | | / _____ \ / _____ \
+ * |_______/ |__| |__| /__/ \__\ /__/ \__\
+ *
+ * E N H A N C E D
+ * S U B P I X E L M O R P H O L O G I C A L A N T I A L I A S I N G
+ *
+ * http://www.iryoku.com/smaa/
+ *
+ * Hi, welcome aboard!
+ *
+ * Here you'll find instructions to get the shader up and running as fast as
+ * possible.
+ *
+ * IMPORTANTE NOTICE: when updating, remember to update both this file and the
+ * precomputed textures! They may change from version to version.
+ *
+ * The shader has three passes, chained together as follows:
+ *
+ * |input|------------------
+ * v |
+ * [ SMAA*EdgeDetection ] |
+ * v |
+ * |edgesTex| |
+ * v |
+ * [ SMAABlendingWeightCalculation ] |
+ * v |
+ * |blendTex| |
+ * v |
+ * [ SMAANeighborhoodBlending ] <------
+ * v
+ * |output|
+ *
+ * Note that each [pass] has its own vertex and pixel shader. Remember to use
+ * oversized triangles instead of quads to avoid overshading along the
+ * diagonal.
+ *
+ * You've three edge detection methods to choose from: luma, color or depth.
+ * They represent different quality/performance and anti-aliasing/sharpness
+ * tradeoffs, so our recommendation is for you to choose the one that best
+ * suits your particular scenario:
+ *
+ * - Depth edge detection is usually the fastest but it may miss some edges.
+ *
+ * - Luma edge detection is usually more expensive than depth edge detection,
+ * but catches visible edges that depth edge detection can miss.
+ *
+ * - Color edge detection is usually the most expensive one but catches
+ * chroma-only edges.
+ *
+ * For quickstarters: just use luma edge detection.
+ *
+ * The general advice is to not rush the integration process and ensure each
+ * step is done correctly (don't try to integrate SMAA T2x with predicated edge
+ * detection from the start!). Ok then, let's go!
+ *
+ * 1. The first step is to create two RGBA temporal render targets for holding
+ * |edgesTex| and |blendTex|.
+ *
+ * In DX10 or DX11, you can use a RG render target for the edges texture.
+ * In the case of NVIDIA GPUs, using RG render targets seems to actually be
+ * slower.
+ *
+ * On the Xbox 360, you can use the same render target for resolving both
+ * |edgesTex| and |blendTex|, as they aren't needed simultaneously.
+ *
+ * 2. Both temporal render targets |edgesTex| and |blendTex| must be cleared
+ * each frame. Do not forget to clear the alpha channel!
+ *
+ * 3. The next step is loading the two supporting precalculated textures,
+ * 'areaTex' and 'searchTex'. You'll find them in the 'Textures' folder as
+ * C++ headers, and also as regular DDS files. They'll be needed for the
+ * 'SMAABlendingWeightCalculation' pass.
+ *
+ * If you use the C++ headers, be sure to load them in the format specified
+ * inside of them.
+ *
+ * You can also compress 'areaTex' and 'searchTex' using BC5 and BC4
+ * respectively, if you have that option in your content processor pipeline.
+ * When compressing then, you get a non-perceptible quality decrease, and a
+ * marginal performance increase.
+ *
+ * 4. All samplers must be set to linear filtering and clamp.
+ *
+ * After you get the technique working, remember that 64-bit inputs have
+ * half-rate linear filtering on GCN.
+ *
+ * If SMAA is applied to 64-bit color buffers, switching to point filtering
+ * when accesing them will increase the performance. Search for
+ * 'SMAASamplePoint' to see which textures may benefit from point
+ * filtering, and where (which is basically the color input in the edge
+ * detection and resolve passes).
+ *
+ * 5. All texture reads and buffer writes must be non-sRGB, with the exception
+ * of the input read and the output write in
+ * 'SMAANeighborhoodBlending' (and only in this pass!). If sRGB reads in
+ * this last pass are not possible, the technique will work anyway, but
+ * will perform antialiasing in gamma space.
+ *
+ * IMPORTANT: for best results the input read for the color/luma edge
+ * detection should *NOT* be sRGB.
+ *
+ * 6. Before including SMAA.h you'll have to setup the render target metrics,
+ * the target and any optional configuration defines. Optionally you can
+ * use a preset.
+ *
+ * You have the following targets available:
+ * SMAA_HLSL_3
+ * SMAA_HLSL_4
+ * SMAA_HLSL_4_1
+ * SMAA_GLSL_3 *
+ * SMAA_GLSL_4 *
+ *
+ * * (See SMAA_INCLUDE_VS and SMAA_INCLUDE_PS below).
+ *
+ * And four presets:
+ * SMAA_PRESET_LOW (%60 of the quality)
+ * SMAA_PRESET_MEDIUM (%80 of the quality)
+ * SMAA_PRESET_HIGH (%95 of the quality)
+ * SMAA_PRESET_ULTRA (%99 of the quality)
+ *
+ * For example:
+ * #define SMAA_RT_METRICS float4(1.0 / 1280.0, 1.0 / 720.0, 1280.0, 720.0)
+ * #define SMAA_HLSL_4
+ * #define SMAA_PRESET_HIGH
+ * #include "SMAA.h"
+ *
+ * Note that SMAA_RT_METRICS doesn't need to be a macro, it can be a
+ * uniform variable. The code is designed to minimize the impact of not
+ * using a constant value, but it is still better to hardcode it.
+ *
+ * Depending on how you encoded 'areaTex' and 'searchTex', you may have to
+ * add (and customize) the following defines before including SMAA.h:
+ * #define SMAA_AREATEX_SELECT(sample) sample.rg
+ * #define SMAA_SEARCHTEX_SELECT(sample) sample.r
+ *
+ * If your engine is already using porting macros, you can define
+ * SMAA_CUSTOM_SL, and define the porting functions by yourself.
+ *
+ * 7. Then, you'll have to setup the passes as indicated in the scheme above.
+ * You can take a look into SMAA.fx, to see how we did it for our demo.
+ * Checkout the function wrappers, you may want to copy-paste them!
+ *
+ * 8. It's recommended to validate the produced |edgesTex| and |blendTex|.
+ * You can use a screenshot from your engine to compare the |edgesTex|
+ * and |blendTex| produced inside of the engine with the results obtained
+ * with the reference demo.
+ *
+ * 9. After you get the last pass to work, it's time to optimize. You'll have
+ * to initialize a stencil buffer in the first pass (discard is already in
+ * the code), then mask execution by using it the second pass. The last
+ * pass should be executed in all pixels.
+ *
+ *
+ * After this point you can choose to enable predicated thresholding,
+ * temporal supersampling and motion blur integration:
+ *
+ * a) If you want to use predicated thresholding, take a look into
+ * SMAA_PREDICATION; you'll need to pass an extra texture in the edge
+ * detection pass.
+ *
+ * b) If you want to enable temporal supersampling (SMAA T2x):
+ *
+ * 1. The first step is to render using subpixel jitters. I won't go into
+ * detail, but it's as simple as moving each vertex position in the
+ * vertex shader, you can check how we do it in our DX10 demo.
+ *
+ * 2. Then, you must setup the temporal resolve. You may want to take a look
+ * into SMAAResolve for resolving 2x modes. After you get it working, you'll
+ * probably see ghosting everywhere. But fear not, you can enable the
+ * CryENGINE temporal reprojection by setting the SMAA_REPROJECTION macro.
+ * Check out SMAA_DECODE_VELOCITY if your velocity buffer is encoded.
+ *
+ * 3. The next step is to apply SMAA to each subpixel jittered frame, just as
+ * done for 1x.
+ *
+ * 4. At this point you should already have something usable, but for best
+ * results the proper area textures must be set depending on current jitter.
+ * For this, the parameter 'subsampleIndices' of
+ * 'SMAABlendingWeightCalculationPS' must be set as follows, for our T2x
+ * mode:
+ *
+ * @SUBSAMPLE_INDICES
+ *
+ * | S# | Camera Jitter | subsampleIndices |
+ * +----+------------------+---------------------+
+ * | 0 | ( 0.25, -0.25) | float4(1, 1, 1, 0) |
+ * | 1 | (-0.25, 0.25) | float4(2, 2, 2, 0) |
+ *
+ * These jitter positions assume a bottom-to-top y axis. S# stands for the
+ * sample number.
+ *
+ * More information about temporal supersampling here:
+ * http://iryoku.com/aacourse/downloads/13-Anti-Aliasing-Methods-in-CryENGINE-3.pdf
+ *
+ * c) If you want to enable spatial multisampling (SMAA S2x):
+ *
+ * 1. The scene must be rendered using MSAA 2x. The MSAA 2x buffer must be
+ * created with:
+ * - DX10: see below (*)
+ * - DX10.1: D3D10_STANDARD_MULTISAMPLE_PATTERN or
+ * - DX11: D3D11_STANDARD_MULTISAMPLE_PATTERN
+ *
+ * This allows to ensure that the subsample order matches the table in
+ * @SUBSAMPLE_INDICES.
+ *
+ * (*) In the case of DX10, we refer the reader to:
+ * - SMAA::detectMSAAOrder and
+ * - SMAA::msaaReorder
+ *
+ * These functions allow to match the standard multisample patterns by
+ * detecting the subsample order for a specific GPU, and reordering
+ * them appropriately.
+ *
+ * 2. A shader must be run to output each subsample into a separate buffer
+ * (DX10 is required). You can use SMAASeparate for this purpose, or just do
+ * it in an existing pass (for example, in the tone mapping pass, which has
+ * the advantage of feeding tone mapped subsamples to SMAA, which will yield
+ * better results).
+ *
+ * 3. The full SMAA 1x pipeline must be run for each separated buffer, storing
+ * the results in the final buffer. The second run should alpha blend with
+ * the existing final buffer using a blending factor of 0.5.
+ * 'subsampleIndices' must be adjusted as in the SMAA T2x case (see point
+ * b).
+ *
+ * d) If you want to enable temporal supersampling on top of SMAA S2x
+ * (which actually is SMAA 4x):
+ *
+ * 1. SMAA 4x consists on temporally jittering SMAA S2x, so the first step is
+ * to calculate SMAA S2x for current frame. In this case, 'subsampleIndices'
+ * must be set as follows:
+ *
+ * | F# | S# | Camera Jitter | Net Jitter | subsampleIndices |
+ * +----+----+--------------------+-------------------+----------------------+
+ * | 0 | 0 | ( 0.125, 0.125) | ( 0.375, -0.125) | float4(5, 3, 1, 3) |
+ * | 0 | 1 | ( 0.125, 0.125) | (-0.125, 0.375) | float4(4, 6, 2, 3) |
+ * +----+----+--------------------+-------------------+----------------------+
+ * | 1 | 2 | (-0.125, -0.125) | ( 0.125, -0.375) | float4(3, 5, 1, 4) |
+ * | 1 | 3 | (-0.125, -0.125) | (-0.375, 0.125) | float4(6, 4, 2, 4) |
+ *
+ * These jitter positions assume a bottom-to-top y axis. F# stands for the
+ * frame number. S# stands for the sample number.
+ *
+ * 2. After calculating SMAA S2x for current frame (with the new subsample
+ * indices), previous frame must be reprojected as in SMAA T2x mode (see
+ * point b).
+ *
+ * e) If motion blur is used, you may want to do the edge detection pass
+ * together with motion blur. This has two advantages:
+ *
+ * 1. Pixels under heavy motion can be omitted from the edge detection process.
+ * For these pixels we can just store "no edge", as motion blur will take
+ * care of them.
+ * 2. The center pixel tap is reused.
+ *
+ * Note that in this case depth testing should be used instead of stenciling,
+ * as we have to write all the pixels in the motion blur pass.
+ *
+ * That's it!
+ */
+
+//-----------------------------------------------------------------------------
+// SMAA Presets
+
+/**
+ * Note that if you use one of these presets, the following configuration
+ * macros will be ignored if set in the "Configurable Defines" section.
+ */
+
+#if defined(SMAA_PRESET_LOW)
+#define SMAA_THRESHOLD 0.15
+#define SMAA_MAX_SEARCH_STEPS 4
+#define SMAA_DISABLE_DIAG_DETECTION
+#define SMAA_DISABLE_CORNER_DETECTION
+#elif defined(SMAA_PRESET_MEDIUM)
+#define SMAA_THRESHOLD 0.1
+#define SMAA_MAX_SEARCH_STEPS 8
+#define SMAA_DISABLE_DIAG_DETECTION
+#define SMAA_DISABLE_CORNER_DETECTION
+#elif defined(SMAA_PRESET_HIGH)
+#define SMAA_THRESHOLD 0.1
+#define SMAA_MAX_SEARCH_STEPS 16
+#define SMAA_MAX_SEARCH_STEPS_DIAG 8
+#define SMAA_CORNER_ROUNDING 25
+#elif defined(SMAA_PRESET_ULTRA)
+#define SMAA_THRESHOLD 0.05
+#define SMAA_MAX_SEARCH_STEPS 32
+#define SMAA_MAX_SEARCH_STEPS_DIAG 16
+#define SMAA_CORNER_ROUNDING 25
+#endif
+
+//-----------------------------------------------------------------------------
+// Configurable Defines
+
+/**
+ * SMAA_THRESHOLD specifies the threshold or sensitivity to edges.
+ * Lowering this value you will be able to detect more edges at the expense of
+ * performance.
+ *
+ * Range: [0, 0.5]
+ * 0.1 is a reasonable value, and allows to catch most visible edges.
+ * 0.05 is a rather overkill value, that allows to catch 'em all.
+ *
+ * If temporal supersampling is used, 0.2 could be a reasonable value, as low
+ * contrast edges are properly filtered by just 2x.
+ */
+#ifndef SMAA_THRESHOLD
+#define SMAA_THRESHOLD 0.1
+#endif
+
+/**
+ * SMAA_DEPTH_THRESHOLD specifies the threshold for depth edge detection.
+ *
+ * Range: depends on the depth range of the scene.
+ */
+#ifndef SMAA_DEPTH_THRESHOLD
+#define SMAA_DEPTH_THRESHOLD (0.1 * SMAA_THRESHOLD)
+#endif
+
+/**
+ * SMAA_MAX_SEARCH_STEPS specifies the maximum steps performed in the
+ * horizontal/vertical pattern searches, at each side of the pixel.
+ *
+ * In number of pixels, it's actually the double. So the maximum line length
+ * perfectly handled by, for example 16, is 64 (by perfectly, we meant that
+ * longer lines won't look as good, but still antialiased).
+ *
+ * Range: [0, 112]
+ */
+#ifndef SMAA_MAX_SEARCH_STEPS
+#define SMAA_MAX_SEARCH_STEPS 16
+#endif
+
+/**
+ * SMAA_MAX_SEARCH_STEPS_DIAG specifies the maximum steps performed in the
+ * diagonal pattern searches, at each side of the pixel. In this case we jump
+ * one pixel at time, instead of two.
+ *
+ * Range: [0, 20]
+ *
+ * On high-end machines it is cheap (between a 0.8x and 0.9x slower for 16
+ * steps), but it can have a significant impact on older machines.
+ *
+ * Define SMAA_DISABLE_DIAG_DETECTION to disable diagonal processing.
+ */
+#ifndef SMAA_MAX_SEARCH_STEPS_DIAG
+#define SMAA_MAX_SEARCH_STEPS_DIAG 8
+#endif
+
+/**
+ * SMAA_CORNER_ROUNDING specifies how much sharp corners will be rounded.
+ *
+ * Range: [0, 100]
+ *
+ * Define SMAA_DISABLE_CORNER_DETECTION to disable corner processing.
+ */
+#ifndef SMAA_CORNER_ROUNDING
+#define SMAA_CORNER_ROUNDING 25
+#endif
+
+/**
+ * If there is an neighbor edge that has SMAA_LOCAL_CONTRAST_FACTOR times
+ * bigger contrast than current edge, current edge will be discarded.
+ *
+ * This allows to eliminate spurious crossing edges, and is based on the fact
+ * that, if there is too much contrast in a direction, that will hide
+ * perceptually contrast in the other neighbors.
+ */
+#ifndef SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR
+#define SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR 2.0
+#endif
+
+/**
+ * Predicated thresholding allows to better preserve texture details and to
+ * improve performance, by decreasing the number of detected edges using an
+ * additional buffer like the light accumulation buffer, object ids or even the
+ * depth buffer (the depth buffer usage may be limited to indoor or short range
+ * scenes).
+ *
+ * It locally decreases the luma or color threshold if an edge is found in an
+ * additional buffer (so the global threshold can be higher).
+ *
+ * This method was developed by Playstation EDGE MLAA team, and used in
+ * Killzone 3, by using the light accumulation buffer. More information here:
+ * http://iryoku.com/aacourse/downloads/06-MLAA-on-PS3.pptx
+ */
+#ifndef SMAA_PREDICATION
+#define SMAA_PREDICATION 0
+#endif
+
+/**
+ * Threshold to be used in the additional predication buffer.
+ *
+ * Range: depends on the input, so you'll have to find the magic number that
+ * works for you.
+ */
+#ifndef SMAA_PREDICATION_THRESHOLD
+#define SMAA_PREDICATION_THRESHOLD 0.01
+#endif
+
+/**
+ * How much to scale the global threshold used for luma or color edge
+ * detection when using predication.
+ *
+ * Range: [1, 5]
+ */
+#ifndef SMAA_PREDICATION_SCALE
+#define SMAA_PREDICATION_SCALE 2.0
+#endif
+
+/**
+ * How much to locally decrease the threshold.
+ *
+ * Range: [0, 1]
+ */
+#ifndef SMAA_PREDICATION_STRENGTH
+#define SMAA_PREDICATION_STRENGTH 0.4
+#endif
+
+/**
+ * Temporal reprojection allows to remove ghosting artifacts when using
+ * temporal supersampling. We use the CryEngine 3 method which also introduces
+ * velocity weighting. This feature is of extreme importance for totally
+ * removing ghosting. More information here:
+ * http://iryoku.com/aacourse/downloads/13-Anti-Aliasing-Methods-in-CryENGINE-3.pdf
+ *
+ * Note that you'll need to setup a velocity buffer for enabling reprojection.
+ * For static geometry, saving the previous depth buffer is a viable
+ * alternative.
+ */
+#ifndef SMAA_REPROJECTION
+#define SMAA_REPROJECTION 0
+#endif
+
+/**
+ * SMAA_REPROJECTION_WEIGHT_SCALE controls the velocity weighting. It allows to
+ * remove ghosting trails behind the moving object, which are not removed by
+ * just using reprojection. Using low values will exhibit ghosting, while using
+ * high values will disable temporal supersampling under motion.
+ *
+ * Behind the scenes, velocity weighting removes temporal supersampling when
+ * the velocity of the subsamples differs (meaning they are different objects).
+ *
+ * Range: [0, 80]
+ */
+#ifndef SMAA_REPROJECTION_WEIGHT_SCALE
+#define SMAA_REPROJECTION_WEIGHT_SCALE 30.0
+#endif
+
+/**
+ * On some compilers, discard cannot be used in vertex shaders. Thus, they need
+ * to be compiled separately.
+ */
+#ifndef SMAA_INCLUDE_VS
+#define SMAA_INCLUDE_VS 1
+#endif
+#ifndef SMAA_INCLUDE_PS
+#define SMAA_INCLUDE_PS 1
+#endif
+
+//-----------------------------------------------------------------------------
+// Texture Access Defines
+
+#ifndef SMAA_AREATEX_SELECT
+#if defined(SMAA_HLSL_3)
+#define SMAA_AREATEX_SELECT(sample) sample.ra
+#else
+#define SMAA_AREATEX_SELECT(sample) sample.rg
+#endif
+#endif
+
+#ifndef SMAA_SEARCHTEX_SELECT
+#define SMAA_SEARCHTEX_SELECT(sample) sample.r
+#endif
+
+#ifndef SMAA_DECODE_VELOCITY
+#define SMAA_DECODE_VELOCITY(sample) sample.rg
+#endif
+
+//-----------------------------------------------------------------------------
+// Non-Configurable Defines
+
+#define SMAA_AREATEX_MAX_DISTANCE 16
+#define SMAA_AREATEX_MAX_DISTANCE_DIAG 20
+#define SMAA_AREATEX_PIXEL_SIZE (1.0 / float2(160.0, 560.0))
+#define SMAA_AREATEX_SUBTEX_SIZE (1.0 / 7.0)
+#define SMAA_SEARCHTEX_SIZE float2(66.0, 33.0)
+#define SMAA_SEARCHTEX_PACKED_SIZE float2(64.0, 16.0)
+#define SMAA_CORNER_ROUNDING_NORM (float(SMAA_CORNER_ROUNDING) / 100.0)
+
+//-----------------------------------------------------------------------------
+// Porting Functions
+
+#if defined(SMAA_HLSL_3)
+#define SMAATexture2D(tex) sampler2D tex
+#define SMAATexturePass2D(tex) tex
+#define SMAASampleLevelZero(tex, coord) tex2Dlod(tex, float4(coord, 0.0, 0.0))
+#define SMAASampleLevelZeroPoint(tex, coord) tex2Dlod(tex, float4(coord, 0.0, 0.0))
+#define SMAASampleLevelZeroOffset(tex, coord, offset) tex2Dlod(tex, float4(coord + offset * SMAA_RT_METRICS.xy, 0.0, 0.0))
+#define SMAASample(tex, coord) tex2D(tex, coord)
+#define SMAASamplePoint(tex, coord) tex2D(tex, coord)
+#define SMAASampleOffset(tex, coord, offset) tex2D(tex, coord + offset * SMAA_RT_METRICS.xy)
+#define SMAA_FLATTEN [flatten]
+#define SMAA_BRANCH [branch]
+#endif
+#if defined(SMAA_HLSL_4) || defined(SMAA_HLSL_4_1)
+SamplerState LinearSampler { Filter = MIN_MAG_LINEAR_MIP_POINT; AddressU = Clamp; AddressV = Clamp; };
+SamplerState PointSampler { Filter = MIN_MAG_MIP_POINT; AddressU = Clamp; AddressV = Clamp; };
+#define SMAATexture2D(tex) Texture2D tex
+#define SMAATexturePass2D(tex) tex
+#define SMAASampleLevelZero(tex, coord) tex.SampleLevel(LinearSampler, coord, 0)
+#define SMAASampleLevelZeroPoint(tex, coord) tex.SampleLevel(PointSampler, coord, 0)
+#define SMAASampleLevelZeroOffset(tex, coord, offset) tex.SampleLevel(LinearSampler, coord, 0, offset)
+#define SMAASample(tex, coord) tex.Sample(LinearSampler, coord)
+#define SMAASamplePoint(tex, coord) tex.Sample(PointSampler, coord)
+#define SMAASampleOffset(tex, coord, offset) tex.Sample(LinearSampler, coord, offset)
+#define SMAA_FLATTEN [flatten]
+#define SMAA_BRANCH [branch]
+#define SMAATexture2DMS2(tex) Texture2DMS<float4, 2> tex
+#define SMAALoad(tex, pos, sample) tex.Load(pos, sample)
+#if defined(SMAA_HLSL_4_1)
+#define SMAAGather(tex, coord) tex.Gather(LinearSampler, coord, 0)
+#endif
+#endif
+#if defined(SMAA_GLSL_3) || defined(SMAA_GLSL_4)
+#define SMAATexture2D(tex) sampler2D tex
+#define SMAATexturePass2D(tex) tex
+#define SMAASampleLevelZero(tex, coord) textureLod(tex, coord, 0.0)
+#define SMAASampleLevelZeroPoint(tex, coord) textureLod(tex, coord, 0.0)
+#define SMAASampleLevelZeroOffset(tex, coord, offset) textureLodOffset(tex, coord, 0.0, offset)
+#define SMAASample(tex, coord) texture(tex, coord)
+#define SMAASamplePoint(tex, coord) texture(tex, coord)
+#define SMAASampleOffset(tex, coord, offset) texture(tex, coord, offset)
+#define SMAA_FLATTEN
+#define SMAA_BRANCH
+#define lerp(a, b, t) mix(a, b, t)
+#define saturate(a) clamp(a, 0.0, 1.0)
+#if defined(SMAA_GLSL_4)
+#define mad(a, b, c) fma(a, b, c)
+#define SMAAGather(tex, coord) textureGather(tex, coord)
+#else
+#define mad(a, b, c) (a * b + c)
+#endif
+#define float2 vec2
+#define float3 vec3
+#define float4 vec4
+#define int2 ivec2
+#define int3 ivec3
+#define int4 ivec4
+#define bool2 bvec2
+#define bool3 bvec3
+#define bool4 bvec4
+#endif
+
+#if !defined(SMAA_HLSL_3) && !defined(SMAA_HLSL_4) && !defined(SMAA_HLSL_4_1) && !defined(SMAA_GLSL_3) && !defined(SMAA_GLSL_4) && !defined(SMAA_CUSTOM_SL)
+#error you must define the shading language: SMAA_HLSL_*, SMAA_GLSL_* or SMAA_CUSTOM_SL
+#endif
+
+//-----------------------------------------------------------------------------
+// Misc functions
+
+/**
+ * Gathers current pixel, and the top-left neighbors.
+ */
+float3 SMAAGatherNeighbours(float2 texcoord,
+ float4 offset[3],
+ SMAATexture2D(tex)) {
+ #ifdef SMAAGather
+ return SMAAGather(tex, texcoord + SMAA_RT_METRICS.xy * float2(-0.5, -0.5)).grb;
+ #else
+ float P = SMAASamplePoint(tex, texcoord).r;
+ float Pleft = SMAASamplePoint(tex, offset[0].xy).r;
+ float Ptop = SMAASamplePoint(tex, offset[0].zw).r;
+ return float3(P, Pleft, Ptop);
+ #endif
+}
+
+/**
+ * Adjusts the threshold by means of predication.
+ */
+float2 SMAACalculatePredicatedThreshold(float2 texcoord,
+ float4 offset[3],
+ SMAATexture2D(predicationTex)) {
+ float3 neighbours = SMAAGatherNeighbours(texcoord, offset, SMAATexturePass2D(predicationTex));
+ float2 delta = abs(neighbours.xx - neighbours.yz);
+ float2 edges = step(SMAA_PREDICATION_THRESHOLD, delta);
+ return SMAA_PREDICATION_SCALE * SMAA_THRESHOLD * (1.0 - SMAA_PREDICATION_STRENGTH * edges);
+}
+
+/**
+ * Conditional move:
+ */
+void SMAAMovc(bool2 cond, inout float2 variable, float2 value) {
+ SMAA_FLATTEN if (cond.x) variable.x = value.x;
+ SMAA_FLATTEN if (cond.y) variable.y = value.y;
+}
+
+void SMAAMovc(bool4 cond, inout float4 variable, float4 value) {
+ SMAAMovc(cond.xy, variable.xy, value.xy);
+ SMAAMovc(cond.zw, variable.zw, value.zw);
+}
+
+
+#if SMAA_INCLUDE_VS
+//-----------------------------------------------------------------------------
+// Vertex Shaders
+
+/**
+ * Edge Detection Vertex Shader
+ */
+void SMAAEdgeDetectionVS(float2 texcoord,
+ out float4 offset[3]) {
+ offset[0] = mad(SMAA_RT_METRICS.xyxy, float4(-1.0, 0.0, 0.0, -1.0), texcoord.xyxy);
+ offset[1] = mad(SMAA_RT_METRICS.xyxy, float4( 1.0, 0.0, 0.0, 1.0), texcoord.xyxy);
+ offset[2] = mad(SMAA_RT_METRICS.xyxy, float4(-2.0, 0.0, 0.0, -2.0), texcoord.xyxy);
+}
+
+/**
+ * Blend Weight Calculation Vertex Shader
+ */
+void SMAABlendingWeightCalculationVS(float2 texcoord,
+ out float2 pixcoord,
+ out float4 offset[3]) {
+ pixcoord = texcoord * SMAA_RT_METRICS.zw;
+
+ // We will use these offsets for the searches later on (see @PSEUDO_GATHER4):
+ offset[0] = mad(SMAA_RT_METRICS.xyxy, float4(-0.25, -0.125, 1.25, -0.125), texcoord.xyxy);
+ offset[1] = mad(SMAA_RT_METRICS.xyxy, float4(-0.125, -0.25, -0.125, 1.25), texcoord.xyxy);
+
+ // And these for the searches, they indicate the ends of the loops:
+ offset[2] = mad(SMAA_RT_METRICS.xxyy,
+ float4(-2.0, 2.0, -2.0, 2.0) * float(SMAA_MAX_SEARCH_STEPS),
+ float4(offset[0].xz, offset[1].yw));
+}
+
+/**
+ * Neighborhood Blending Vertex Shader
+ */
+void SMAANeighborhoodBlendingVS(float2 texcoord,
+ out float4 offset) {
+ offset = mad(SMAA_RT_METRICS.xyxy, float4( 1.0, 0.0, 0.0, 1.0), texcoord.xyxy);
+}
+#endif // SMAA_INCLUDE_VS
+
+#if SMAA_INCLUDE_PS
+//-----------------------------------------------------------------------------
+// Edge Detection Pixel Shaders (First Pass)
+
+/**
+ * Luma Edge Detection
+ *
+ * IMPORTANT NOTICE: luma edge detection requires gamma-corrected colors, and
+ * thus 'colorTex' should be a non-sRGB texture.
+ */
+float2 SMAALumaEdgeDetectionPS(float2 texcoord,
+ float4 offset[3],
+ SMAATexture2D(colorTex)
+ #if SMAA_PREDICATION
+ , SMAATexture2D(predicationTex)
+ #endif
+ ) {
+ // Calculate the threshold:
+ #if SMAA_PREDICATION
+ float2 threshold = SMAACalculatePredicatedThreshold(texcoord, offset, SMAATexturePass2D(predicationTex));
+ #else
+ float2 threshold = float2(SMAA_THRESHOLD, SMAA_THRESHOLD);
+ #endif
+
+ // Calculate lumas:
+ float3 weights = float3(0.2126, 0.7152, 0.0722);
+ float L = dot(SMAASamplePoint(colorTex, texcoord).rgb, weights);
+
+ float Lleft = dot(SMAASamplePoint(colorTex, offset[0].xy).rgb, weights);
+ float Ltop = dot(SMAASamplePoint(colorTex, offset[0].zw).rgb, weights);
+
+ // We do the usual threshold:
+ float4 delta;
+ delta.xy = abs(L - float2(Lleft, Ltop));
+ float2 edges = step(threshold, delta.xy);
+
+ // Then discard if there is no edge:
+ if (dot(edges, float2(1.0, 1.0)) == 0.0)
+ return float2(-2.0, -2.0);
+
+ // Calculate right and bottom deltas:
+ float Lright = dot(SMAASamplePoint(colorTex, offset[1].xy).rgb, weights);
+ float Lbottom = dot(SMAASamplePoint(colorTex, offset[1].zw).rgb, weights);
+ delta.zw = abs(L - float2(Lright, Lbottom));
+
+ // Calculate the maximum delta in the direct neighborhood:
+ float2 maxDelta = max(delta.xy, delta.zw);
+
+ // Calculate left-left and top-top deltas:
+ float Lleftleft = dot(SMAASamplePoint(colorTex, offset[2].xy).rgb, weights);
+ float Ltoptop = dot(SMAASamplePoint(colorTex, offset[2].zw).rgb, weights);
+ delta.zw = abs(float2(Lleft, Ltop) - float2(Lleftleft, Ltoptop));
+
+ // Calculate the final maximum delta:
+ maxDelta = max(maxDelta.xy, delta.zw);
+ float finalDelta = max(maxDelta.x, maxDelta.y);
+
+ // Local contrast adaptation:
+ edges.xy *= step(finalDelta, SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR * delta.xy);
+
+ return edges;
+}
+
+/**
+ * Color Edge Detection
+ *
+ * IMPORTANT NOTICE: color edge detection requires gamma-corrected colors, and
+ * thus 'colorTex' should be a non-sRGB texture.
+ */
+float2 SMAAColorEdgeDetectionPS(float2 texcoord,
+ float4 offset[3],
+ SMAATexture2D(colorTex)
+ #if SMAA_PREDICATION
+ , SMAATexture2D(predicationTex)
+ #endif
+ ) {
+ // Calculate the threshold:
+ #if SMAA_PREDICATION
+ float2 threshold = SMAACalculatePredicatedThreshold(texcoord, offset, predicationTex);
+ #else
+ float2 threshold = float2(SMAA_THRESHOLD, SMAA_THRESHOLD);
+ #endif
+
+ // Calculate color deltas:
+ float4 delta;
+ float3 C = SMAASamplePoint(colorTex, texcoord).rgb;
+
+ float3 Cleft = SMAASamplePoint(colorTex, offset[0].xy).rgb;
+ float3 t = abs(C - Cleft);
+ delta.x = max(max(t.r, t.g), t.b);
+
+ float3 Ctop = SMAASamplePoint(colorTex, offset[0].zw).rgb;
+ t = abs(C - Ctop);
+ delta.y = max(max(t.r, t.g), t.b);
+
+ // We do the usual threshold:
+ float2 edges = step(threshold, delta.xy);
+
+ // Then discard if there is no edge:
+ if (dot(edges, float2(1.0, 1.0)) == 0.0)
+ return float2(-2.0, -2.0);
+
+ // Calculate right and bottom deltas:
+ float3 Cright = SMAASamplePoint(colorTex, offset[1].xy).rgb;
+ t = abs(C - Cright);
+ delta.z = max(max(t.r, t.g), t.b);
+
+ float3 Cbottom = SMAASamplePoint(colorTex, offset[1].zw).rgb;
+ t = abs(C - Cbottom);
+ delta.w = max(max(t.r, t.g), t.b);
+
+ // Calculate the maximum delta in the direct neighborhood:
+ float2 maxDelta = max(delta.xy, delta.zw);
+
+ // Calculate left-left and top-top deltas:
+ float3 Cleftleft = SMAASamplePoint(colorTex, offset[2].xy).rgb;
+ t = abs(C - Cleftleft);
+ delta.z = max(max(t.r, t.g), t.b);
+
+ float3 Ctoptop = SMAASamplePoint(colorTex, offset[2].zw).rgb;
+ t = abs(C - Ctoptop);
+ delta.w = max(max(t.r, t.g), t.b);
+
+ // Calculate the final maximum delta:
+ maxDelta = max(maxDelta.xy, delta.zw);
+ float finalDelta = max(maxDelta.x, maxDelta.y);
+
+ // Local contrast adaptation:
+ edges.xy *= step(finalDelta, SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR * delta.xy);
+
+ return edges;
+}
+
+/**
+ * Depth Edge Detection
+ */
+float2 SMAADepthEdgeDetectionPS(float2 texcoord,
+ float4 offset[3],
+ SMAATexture2D(depthTex)) {
+ float3 neighbours = SMAAGatherNeighbours(texcoord, offset, SMAATexturePass2D(depthTex));
+ float2 delta = abs(neighbours.xx - float2(neighbours.y, neighbours.z));
+ float2 edges = step(SMAA_DEPTH_THRESHOLD, delta);
+
+ if (dot(edges, float2(1.0, 1.0)) == 0.0)
+ return float2(-2.0, -2.0);
+
+ return edges;
+}
+
+//-----------------------------------------------------------------------------
+// Diagonal Search Functions
+
+#if !defined(SMAA_DISABLE_DIAG_DETECTION)
+
+/**
+ * Allows to decode two binary values from a bilinear-filtered access.
+ */
+float2 SMAADecodeDiagBilinearAccess(float2 e) {
+ // Bilinear access for fetching 'e' have a 0.25 offset, and we are
+ // interested in the R and G edges:
+ //
+ // +---G---+-------+
+ // | x o R x |
+ // +-------+-------+
+ //
+ // Then, if one of these edge is enabled:
+ // Red: (0.75 * X + 0.25 * 1) => 0.25 or 1.0
+ // Green: (0.75 * 1 + 0.25 * X) => 0.75 or 1.0
+ //
+ // This function will unpack the values (mad + mul + round):
+ // wolframalpha.com: round(x * abs(5 * x - 5 * 0.75)) plot 0 to 1
+ e.r = e.r * abs(5.0 * e.r - 5.0 * 0.75);
+ return round(e);
+}
+
+float4 SMAADecodeDiagBilinearAccess(float4 e) {
+ e.rb = e.rb * abs(5.0 * e.rb - 5.0 * 0.75);
+ return round(e);
+}
+
+/**
+ * These functions allows to perform diagonal pattern searches.
+ */
+float2 SMAASearchDiag1(SMAATexture2D(edgesTex), float2 texcoord, float2 dir, out float2 e) {
+ float4 coord = float4(texcoord, -1.0, 1.0);
+ float3 t = float3(SMAA_RT_METRICS.xy, 1.0);
+ while (coord.z < float(SMAA_MAX_SEARCH_STEPS_DIAG - 1) &&
+ coord.w > 0.9) {
+ coord.xyz = mad(t, float3(dir, 1.0), coord.xyz);
+ e = SMAASampleLevelZero(edgesTex, coord.xy).rg;
+ coord.w = dot(e, float2(0.5, 0.5));
+ }
+ return coord.zw;
+}
+
+float2 SMAASearchDiag2(SMAATexture2D(edgesTex), float2 texcoord, float2 dir, out float2 e) {
+ float4 coord = float4(texcoord, -1.0, 1.0);
+ coord.x += 0.25 * SMAA_RT_METRICS.x; // See @SearchDiag2Optimization
+ float3 t = float3(SMAA_RT_METRICS.xy, 1.0);
+ while (coord.z < float(SMAA_MAX_SEARCH_STEPS_DIAG - 1) &&
+ coord.w > 0.9) {
+ coord.xyz = mad(t, float3(dir, 1.0), coord.xyz);
+
+ // @SearchDiag2Optimization
+ // Fetch both edges at once using bilinear filtering:
+ e = SMAASampleLevelZero(edgesTex, coord.xy).rg;
+ e = SMAADecodeDiagBilinearAccess(e);
+
+ // Non-optimized version:
+ // e.g = SMAASampleLevelZero(edgesTex, coord.xy).g;
+ // e.r = SMAASampleLevelZeroOffset(edgesTex, coord.xy, int2(1, 0)).r;
+
+ coord.w = dot(e, float2(0.5, 0.5));
+ }
+ return coord.zw;
+}
+
+/**
+ * Similar to SMAAArea, this calculates the area corresponding to a certain
+ * diagonal distance and crossing edges 'e'.
+ */
+float2 SMAAAreaDiag(SMAATexture2D(areaTex), float2 dist, float2 e, float offset) {
+ float2 texcoord = mad(float2(SMAA_AREATEX_MAX_DISTANCE_DIAG, SMAA_AREATEX_MAX_DISTANCE_DIAG), e, dist);
+
+ // We do a scale and bias for mapping to texel space:
+ texcoord = mad(SMAA_AREATEX_PIXEL_SIZE, texcoord, 0.5 * SMAA_AREATEX_PIXEL_SIZE);
+
+ // Diagonal areas are on the second half of the texture:
+ texcoord.x += 0.5;
+
+ // Move to proper place, according to the subpixel offset:
+ texcoord.y += SMAA_AREATEX_SUBTEX_SIZE * offset;
+
+ // Do it!
+ return SMAA_AREATEX_SELECT(SMAASampleLevelZero(areaTex, texcoord));
+}
+
+/**
+ * This searches for diagonal patterns and returns the corresponding weights.
+ */
+float2 SMAACalculateDiagWeights(SMAATexture2D(edgesTex), SMAATexture2D(areaTex), float2 texcoord, float2 e, float4 subsampleIndices) {
+ float2 weights = float2(0.0, 0.0);
+
+ // Search for the line ends:
+ float4 d;
+ float2 end;
+ if (e.r > 0.0) {
+ d.xz = SMAASearchDiag1(SMAATexturePass2D(edgesTex), texcoord, float2(-1.0, 1.0), end);
+ d.x += float(end.y > 0.9);
+ } else
+ d.xz = float2(0.0, 0.0);
+ d.yw = SMAASearchDiag1(SMAATexturePass2D(edgesTex), texcoord, float2(1.0, -1.0), end);
+
+ SMAA_BRANCH
+ if (d.x + d.y > 2.0) { // d.x + d.y + 1 > 3
+ // Fetch the crossing edges:
+ float4 coords = mad(float4(-d.x + 0.25, d.x, d.y, -d.y - 0.25), SMAA_RT_METRICS.xyxy, texcoord.xyxy);
+ float4 c;
+ c.xy = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2(-1, 0)).rg;
+ c.zw = SMAASampleLevelZeroOffset(edgesTex, coords.zw, int2( 1, 0)).rg;
+ c.yxwz = SMAADecodeDiagBilinearAccess(c.xyzw);
+
+ // Non-optimized version:
+ // float4 coords = mad(float4(-d.x, d.x, d.y, -d.y), SMAA_RT_METRICS.xyxy, texcoord.xyxy);
+ // float4 c;
+ // c.x = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2(-1, 0)).g;
+ // c.y = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2( 0, 0)).r;
+ // c.z = SMAASampleLevelZeroOffset(edgesTex, coords.zw, int2( 1, 0)).g;
+ // c.w = SMAASampleLevelZeroOffset(edgesTex, coords.zw, int2( 1, -1)).r;
+
+ // Merge crossing edges at each side into a single value:
+ float2 cc = mad(float2(2.0, 2.0), c.xz, c.yw);
+
+ // Remove the crossing edge if we didn't found the end of the line:
+ SMAAMovc(bool2(step(0.9, d.zw)), cc, float2(0.0, 0.0));
+
+ // Fetch the areas for this line:
+ weights += SMAAAreaDiag(SMAATexturePass2D(areaTex), d.xy, cc, subsampleIndices.z);
+ }
+
+ // Search for the line ends:
+ d.xz = SMAASearchDiag2(SMAATexturePass2D(edgesTex), texcoord, float2(-1.0, -1.0), end);
+ if (SMAASampleLevelZeroOffset(edgesTex, texcoord, int2(1, 0)).r > 0.0) {
+ d.yw = SMAASearchDiag2(SMAATexturePass2D(edgesTex), texcoord, float2(1.0, 1.0), end);
+ d.y += float(end.y > 0.9);
+ } else
+ d.yw = float2(0.0, 0.0);
+
+ SMAA_BRANCH
+ if (d.x + d.y > 2.0) { // d.x + d.y + 1 > 3
+ // Fetch the crossing edges:
+ float4 coords = mad(float4(-d.x, -d.x, d.y, d.y), SMAA_RT_METRICS.xyxy, texcoord.xyxy);
+ float4 c;
+ c.x = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2(-1, 0)).g;
+ c.y = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2( 0, -1)).r;
+ c.zw = SMAASampleLevelZeroOffset(edgesTex, coords.zw, int2( 1, 0)).gr;
+ float2 cc = mad(float2(2.0, 2.0), c.xz, c.yw);
+
+ // Remove the crossing edge if we didn't found the end of the line:
+ SMAAMovc(bool2(step(0.9, d.zw)), cc, float2(0.0, 0.0));
+
+ // Fetch the areas for this line:
+ weights += SMAAAreaDiag(SMAATexturePass2D(areaTex), d.xy, cc, subsampleIndices.w).gr;
+ }
+
+ return weights;
+}
+#endif
+
+//-----------------------------------------------------------------------------
+// Horizontal/Vertical Search Functions
+
+/**
+ * This allows to determine how much length should we add in the last step
+ * of the searches. It takes the bilinearly interpolated edge (see
+ * @PSEUDO_GATHER4), and adds 0, 1 or 2, depending on which edges and
+ * crossing edges are active.
+ */
+float SMAASearchLength(SMAATexture2D(searchTex), float2 e, float offset) {
+ // The texture is flipped vertically, with left and right cases taking half
+ // of the space horizontally:
+ float2 scale = SMAA_SEARCHTEX_SIZE * float2(0.5, -1.0);
+ float2 bias = SMAA_SEARCHTEX_SIZE * float2(offset, 1.0);
+
+ // Scale and bias to access texel centers:
+ scale += float2(-1.0, 1.0);
+ bias += float2( 0.5, -0.5);
+
+ // Convert from pixel coordinates to texcoords:
+ // (We use SMAA_SEARCHTEX_PACKED_SIZE because the texture is cropped)
+ scale *= 1.0 / SMAA_SEARCHTEX_PACKED_SIZE;
+ bias *= 1.0 / SMAA_SEARCHTEX_PACKED_SIZE;
+
+ // Lookup the search texture:
+ return SMAA_SEARCHTEX_SELECT(SMAASampleLevelZero(searchTex, mad(scale, e, bias)));
+}
+
+/**
+ * Horizontal/vertical search functions for the 2nd pass.
+ */
+float SMAASearchXLeft(SMAATexture2D(edgesTex), SMAATexture2D(searchTex), float2 texcoord, float end) {
+ /**
+ * @PSEUDO_GATHER4
+ * This texcoord has been offset by (-0.25, -0.125) in the vertex shader to
+ * sample between edge, thus fetching four edges in a row.
+ * Sampling with different offsets in each direction allows to disambiguate
+ * which edges are active from the four fetched ones.
+ */
+ float2 e = float2(0.0, 1.0);
+ while (texcoord.x > end &&
+ e.g > 0.8281 && // Is there some edge not activated?
+ e.r == 0.0) { // Or is there a crossing edge that breaks the line?
+ e = SMAASampleLevelZero(edgesTex, texcoord).rg;
+ texcoord = mad(-float2(2.0, 0.0), SMAA_RT_METRICS.xy, texcoord);
+ }
+
+ float offset = mad(-(255.0 / 127.0), SMAASearchLength(SMAATexturePass2D(searchTex), e, 0.0), 3.25);
+ return mad(SMAA_RT_METRICS.x, offset, texcoord.x);
+
+ // Non-optimized version:
+ // We correct the previous (-0.25, -0.125) offset we applied:
+ // texcoord.x += 0.25 * SMAA_RT_METRICS.x;
+
+ // The searches are bias by 1, so adjust the coords accordingly:
+ // texcoord.x += SMAA_RT_METRICS.x;
+
+ // Disambiguate the length added by the last step:
+ // texcoord.x += 2.0 * SMAA_RT_METRICS.x; // Undo last step
+ // texcoord.x -= SMAA_RT_METRICS.x * (255.0 / 127.0) * SMAASearchLength(SMAATexturePass2D(searchTex), e, 0.0);
+ // return mad(SMAA_RT_METRICS.x, offset, texcoord.x);
+}
+
+float SMAASearchXRight(SMAATexture2D(edgesTex), SMAATexture2D(searchTex), float2 texcoord, float end) {
+ float2 e = float2(0.0, 1.0);
+ while (texcoord.x < end &&
+ e.g > 0.8281 && // Is there some edge not activated?
+ e.r == 0.0) { // Or is there a crossing edge that breaks the line?
+ e = SMAASampleLevelZero(edgesTex, texcoord).rg;
+ texcoord = mad(float2(2.0, 0.0), SMAA_RT_METRICS.xy, texcoord);
+ }
+ float offset = mad(-(255.0 / 127.0), SMAASearchLength(SMAATexturePass2D(searchTex), e, 0.5), 3.25);
+ return mad(-SMAA_RT_METRICS.x, offset, texcoord.x);
+}
+
+float SMAASearchYUp(SMAATexture2D(edgesTex), SMAATexture2D(searchTex), float2 texcoord, float end) {
+ float2 e = float2(1.0, 0.0);
+ while (texcoord.y > end &&
+ e.r > 0.8281 && // Is there some edge not activated?
+ e.g == 0.0) { // Or is there a crossing edge that breaks the line?
+ e = SMAASampleLevelZero(edgesTex, texcoord).rg;
+ texcoord = mad(-float2(0.0, 2.0), SMAA_RT_METRICS.xy, texcoord);
+ }
+ float offset = mad(-(255.0 / 127.0), SMAASearchLength(SMAATexturePass2D(searchTex), e.gr, 0.0), 3.25);
+ return mad(SMAA_RT_METRICS.y, offset, texcoord.y);
+}
+
+float SMAASearchYDown(SMAATexture2D(edgesTex), SMAATexture2D(searchTex), float2 texcoord, float end) {
+ float2 e = float2(1.0, 0.0);
+ while (texcoord.y < end &&
+ e.r > 0.8281 && // Is there some edge not activated?
+ e.g == 0.0) { // Or is there a crossing edge that breaks the line?
+ e = SMAASampleLevelZero(edgesTex, texcoord).rg;
+ texcoord = mad(float2(0.0, 2.0), SMAA_RT_METRICS.xy, texcoord);
+ }
+ float offset = mad(-(255.0 / 127.0), SMAASearchLength(SMAATexturePass2D(searchTex), e.gr, 0.5), 3.25);
+ return mad(-SMAA_RT_METRICS.y, offset, texcoord.y);
+}
+
+/**
+ * Ok, we have the distance and both crossing edges. So, what are the areas
+ * at each side of current edge?
+ */
+float2 SMAAArea(SMAATexture2D(areaTex), float2 dist, float e1, float e2, float offset) {
+ // Rounding prevents precision errors of bilinear filtering:
+ float2 texcoord = mad(float2(SMAA_AREATEX_MAX_DISTANCE, SMAA_AREATEX_MAX_DISTANCE), round(4.0 * float2(e1, e2)), dist);
+
+ // We do a scale and bias for mapping to texel space:
+ texcoord = mad(SMAA_AREATEX_PIXEL_SIZE, texcoord, 0.5 * SMAA_AREATEX_PIXEL_SIZE);
+
+ // Move to proper place, according to the subpixel offset:
+ texcoord.y = mad(SMAA_AREATEX_SUBTEX_SIZE, offset, texcoord.y);
+
+ // Do it!
+ return SMAA_AREATEX_SELECT(SMAASampleLevelZero(areaTex, texcoord));
+}
+
+//-----------------------------------------------------------------------------
+// Corner Detection Functions
+
+void SMAADetectHorizontalCornerPattern(SMAATexture2D(edgesTex), inout float2 weights, float4 texcoord, float2 d) {
+ #if !defined(SMAA_DISABLE_CORNER_DETECTION)
+ float2 leftRight = step(d.xy, d.yx);
+ float2 rounding = (1.0 - SMAA_CORNER_ROUNDING_NORM) * leftRight;
+
+ rounding /= leftRight.x + leftRight.y; // Reduce blending for pixels in the center of a line.
+
+ float2 factor = float2(1.0, 1.0);
+ factor.x -= rounding.x * SMAASampleLevelZeroOffset(edgesTex, texcoord.xy, int2(0, 1)).r;
+ factor.x -= rounding.y * SMAASampleLevelZeroOffset(edgesTex, texcoord.zw, int2(1, 1)).r;
+ factor.y -= rounding.x * SMAASampleLevelZeroOffset(edgesTex, texcoord.xy, int2(0, -2)).r;
+ factor.y -= rounding.y * SMAASampleLevelZeroOffset(edgesTex, texcoord.zw, int2(1, -2)).r;
+
+ weights *= saturate(factor);
+ #endif
+}
+
+void SMAADetectVerticalCornerPattern(SMAATexture2D(edgesTex), inout float2 weights, float4 texcoord, float2 d) {
+ #if !defined(SMAA_DISABLE_CORNER_DETECTION)
+ float2 leftRight = step(d.xy, d.yx);
+ float2 rounding = (1.0 - SMAA_CORNER_ROUNDING_NORM) * leftRight;
+
+ rounding /= leftRight.x + leftRight.y;
+
+ float2 factor = float2(1.0, 1.0);
+ factor.x -= rounding.x * SMAASampleLevelZeroOffset(edgesTex, texcoord.xy, int2( 1, 0)).g;
+ factor.x -= rounding.y * SMAASampleLevelZeroOffset(edgesTex, texcoord.zw, int2( 1, 1)).g;
+ factor.y -= rounding.x * SMAASampleLevelZeroOffset(edgesTex, texcoord.xy, int2(-2, 0)).g;
+ factor.y -= rounding.y * SMAASampleLevelZeroOffset(edgesTex, texcoord.zw, int2(-2, 1)).g;
+
+ weights *= saturate(factor);
+ #endif
+}
+
+//-----------------------------------------------------------------------------
+// Blending Weight Calculation Pixel Shader (Second Pass)
+
+float4 SMAABlendingWeightCalculationPS(float2 texcoord,
+ float2 pixcoord,
+ float4 offset[3],
+ SMAATexture2D(edgesTex),
+ SMAATexture2D(areaTex),
+ SMAATexture2D(searchTex),
+ float4 subsampleIndices) { // Just pass zero for SMAA 1x, see @SUBSAMPLE_INDICES.
+ float4 weights = float4(0.0, 0.0, 0.0, 0.0);
+
+ float2 e = SMAASample(edgesTex, texcoord).rg;
+
+ SMAA_BRANCH
+ if (e.g > 0.0) { // Edge at north
+ #if !defined(SMAA_DISABLE_DIAG_DETECTION)
+ // Diagonals have both north and west edges, so searching for them in
+ // one of the boundaries is enough.
+ weights.rg = SMAACalculateDiagWeights(SMAATexturePass2D(edgesTex), SMAATexturePass2D(areaTex), texcoord, e, subsampleIndices);
+
+ // We give priority to diagonals, so if we find a diagonal we skip
+ // horizontal/vertical processing.
+ SMAA_BRANCH
+ if (weights.r == -weights.g) { // weights.r + weights.g == 0.0
+ #endif
+
+ float2 d;
+
+ // Find the distance to the left:
+ float3 coords;
+ coords.x = SMAASearchXLeft(SMAATexturePass2D(edgesTex), SMAATexturePass2D(searchTex), offset[0].xy, offset[2].x);
+ coords.y = offset[1].y; // offset[1].y = texcoord.y - 0.25 * SMAA_RT_METRICS.y (@CROSSING_OFFSET)
+ d.x = coords.x;
+
+ // Now fetch the left crossing edges, two at a time using bilinear
+ // filtering. Sampling at -0.25 (see @CROSSING_OFFSET) enables to
+ // discern what value each edge has:
+ float e1 = SMAASampleLevelZero(edgesTex, coords.xy).r;
+
+ // Find the distance to the right:
+ coords.z = SMAASearchXRight(SMAATexturePass2D(edgesTex), SMAATexturePass2D(searchTex), offset[0].zw, offset[2].y);
+ d.y = coords.z;
+
+ // We want the distances to be in pixel units (doing this here allow to
+ // better interleave arithmetic and memory accesses):
+ d = abs(round(mad(SMAA_RT_METRICS.zz, d, -pixcoord.xx)));
+
+ // SMAAArea below needs a sqrt, as the areas texture is compressed
+ // quadratically:
+ float2 sqrt_d = sqrt(d);
+
+ // Fetch the right crossing edges:
+ float e2 = SMAASampleLevelZeroOffset(edgesTex, coords.zy, int2(1, 0)).r;
+
+ // Ok, we know how this pattern looks like, now it is time for getting
+ // the actual area:
+ weights.rg = SMAAArea(SMAATexturePass2D(areaTex), sqrt_d, e1, e2, subsampleIndices.y);
+
+ // Fix corners:
+ coords.y = texcoord.y;
+ SMAADetectHorizontalCornerPattern(SMAATexturePass2D(edgesTex), weights.rg, coords.xyzy, d);
+
+ #if !defined(SMAA_DISABLE_DIAG_DETECTION)
+ } else
+ e.r = 0.0; // Skip vertical processing.
+ #endif
+ }
+
+ SMAA_BRANCH
+ if (e.r > 0.0) { // Edge at west
+ float2 d;
+
+ // Find the distance to the top:
+ float3 coords;
+ coords.y = SMAASearchYUp(SMAATexturePass2D(edgesTex), SMAATexturePass2D(searchTex), offset[1].xy, offset[2].z);
+ coords.x = offset[0].x; // offset[1].x = texcoord.x - 0.25 * SMAA_RT_METRICS.x;
+ d.x = coords.y;
+
+ // Fetch the top crossing edges:
+ float e1 = SMAASampleLevelZero(edgesTex, coords.xy).g;
+
+ // Find the distance to the bottom:
+ coords.z = SMAASearchYDown(SMAATexturePass2D(edgesTex), SMAATexturePass2D(searchTex), offset[1].zw, offset[2].w);
+ d.y = coords.z;
+
+ // We want the distances to be in pixel units:
+ d = abs(round(mad(SMAA_RT_METRICS.ww, d, -pixcoord.yy)));
+
+ // SMAAArea below needs a sqrt, as the areas texture is compressed
+ // quadratically:
+ float2 sqrt_d = sqrt(d);
+
+ // Fetch the bottom crossing edges:
+ float e2 = SMAASampleLevelZeroOffset(edgesTex, coords.xz, int2(0, 1)).g;
+
+ // Get the area for this direction:
+ weights.ba = SMAAArea(SMAATexturePass2D(areaTex), sqrt_d, e1, e2, subsampleIndices.x);
+
+ // Fix corners:
+ coords.x = texcoord.x;
+ SMAADetectVerticalCornerPattern(SMAATexturePass2D(edgesTex), weights.ba, coords.xyxz, d);
+ }
+
+ return weights;
+}
+
+//-----------------------------------------------------------------------------
+// Neighborhood Blending Pixel Shader (Third Pass)
+
+float4 SMAANeighborhoodBlendingPS(float2 texcoord,
+ float4 offset,
+ SMAATexture2D(colorTex),
+ SMAATexture2D(blendTex)
+ #if SMAA_REPROJECTION
+ , SMAATexture2D(velocityTex)
+ #endif
+ ) {
+ // Fetch the blending weights for current pixel:
+ float4 a;
+ a.x = SMAASample(blendTex, offset.xy).a; // Right
+ a.y = SMAASample(blendTex, offset.zw).g; // Top
+ a.wz = SMAASample(blendTex, texcoord).xz; // Bottom / Left
+
+ // Is there any blending weight with a value greater than 0.0?
+ SMAA_BRANCH
+ if (dot(a, float4(1.0, 1.0, 1.0, 1.0)) < 1e-5) {
+ float4 color = SMAASampleLevelZero(colorTex, texcoord);
+
+ #if SMAA_REPROJECTION
+ float2 velocity = SMAA_DECODE_VELOCITY(SMAASampleLevelZero(velocityTex, texcoord));
+
+ // Pack velocity into the alpha channel:
+ color.a = sqrt(5.0 * length(velocity));
+ #endif
+
+ return color;
+ } else {
+ bool h = max(a.x, a.z) > max(a.y, a.w); // max(horizontal) > max(vertical)
+
+ // Calculate the blending offsets:
+ float4 blendingOffset = float4(0.0, a.y, 0.0, a.w);
+ float2 blendingWeight = a.yw;
+ SMAAMovc(bool4(h, h, h, h), blendingOffset, float4(a.x, 0.0, a.z, 0.0));
+ SMAAMovc(bool2(h, h), blendingWeight, a.xz);
+ blendingWeight /= dot(blendingWeight, float2(1.0, 1.0));
+
+ // Calculate the texture coordinates:
+ float4 blendingCoord = mad(blendingOffset, float4(SMAA_RT_METRICS.xy, -SMAA_RT_METRICS.xy), texcoord.xyxy);
+
+ // We exploit bilinear filtering to mix current pixel with the chosen
+ // neighbor:
+ float4 color = blendingWeight.x * SMAASampleLevelZero(colorTex, blendingCoord.xy);
+ color += blendingWeight.y * SMAASampleLevelZero(colorTex, blendingCoord.zw);
+
+ #if SMAA_REPROJECTION
+ // Antialias velocity for proper reprojection in a later stage:
+ float2 velocity = blendingWeight.x * SMAA_DECODE_VELOCITY(SMAASampleLevelZero(velocityTex, blendingCoord.xy));
+ velocity += blendingWeight.y * SMAA_DECODE_VELOCITY(SMAASampleLevelZero(velocityTex, blendingCoord.zw));
+
+ // Pack velocity into the alpha channel:
+ color.a = sqrt(5.0 * length(velocity));
+ #endif
+
+ return color;
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Temporal Resolve Pixel Shader (Optional Pass)
+
+float4 SMAAResolvePS(float2 texcoord,
+ SMAATexture2D(currentColorTex),
+ SMAATexture2D(previousColorTex)
+ #if SMAA_REPROJECTION
+ , SMAATexture2D(velocityTex)
+ #endif
+ ) {
+ #if SMAA_REPROJECTION
+ // Velocity is assumed to be calculated for motion blur, so we need to
+ // inverse it for reprojection:
+ float2 velocity = -SMAA_DECODE_VELOCITY(SMAASamplePoint(velocityTex, texcoord).rg);
+
+ // Fetch current pixel:
+ float4 current = SMAASamplePoint(currentColorTex, texcoord);
+
+ // Reproject current coordinates and fetch previous pixel:
+ float4 previous = SMAASamplePoint(previousColorTex, texcoord + velocity);
+
+ // Attenuate the previous pixel if the velocity is different:
+ float delta = abs(current.a * current.a - previous.a * previous.a) / 5.0;
+ float weight = 0.5 * saturate(1.0 - sqrt(delta) * SMAA_REPROJECTION_WEIGHT_SCALE);
+
+ // Blend the pixels according to the calculated weight:
+ return lerp(current, previous, weight);
+ #else
+ // Just blend the pixels:
+ float4 current = SMAASamplePoint(currentColorTex, texcoord);
+ float4 previous = SMAASamplePoint(previousColorTex, texcoord);
+ return lerp(current, previous, 0.5);
+ #endif
+}
+
+//-----------------------------------------------------------------------------
+// Separate Multisamples Pixel Shader (Optional Pass)
+
+#ifdef SMAALoad
+void SMAASeparatePS(float4 position,
+ float2 texcoord,
+ out float4 target0,
+ out float4 target1,
+ SMAATexture2DMS2(colorTexMS)) {
+ int2 pos = int2(position.xy);
+ target0 = SMAALoad(colorTexMS, pos, 0);
+ target1 = SMAALoad(colorTexMS, pos, 1);
+}
+#endif
+
+//-----------------------------------------------------------------------------
+#endif // SMAA_INCLUDE_PS
+
+layout(rgba8, binding = 0, set = 3) uniform image2D imgOutput;
+
+layout(binding = 1, set = 2) uniform sampler2D inputImg;
+layout(binding = 3, set = 2) uniform sampler2D samplerBlend;
+layout( binding = 2 ) uniform invResolution
+{
+ vec2 invResolution_data;
+};
+
+void main() {
+ vec2 loc = ivec2(gl_GlobalInvocationID.x * 4, gl_GlobalInvocationID.y * 4);
+ for(int i = 0; i < 4; i++)
+ {
+ for(int j = 0; j < 4; j++)
+ {
+ ivec2 texelCoord = ivec2(loc.x + i, loc.y + j);
+ vec2 coord = (texelCoord + vec2(0.5)) / invResolution_data;
+ vec2 pixCoord;
+ vec4 offset;
+
+ SMAANeighborhoodBlendingVS(coord, offset);
+
+ vec4 oColor = SMAANeighborhoodBlendingPS(coord, offset, inputImg, samplerBlend);
+
+ imageStore(imgOutput, texelCoord, oColor);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaNeighbour.spv b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaNeighbour.spv
new file mode 100644
index 00000000..fa0208f2
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaNeighbour.spv
Binary files differ
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/SmaaConstants.cs b/src/Ryujinx.Graphics.Vulkan/Effects/SmaaConstants.cs
new file mode 100644
index 00000000..a5f060f1
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/SmaaConstants.cs
@@ -0,0 +1,15 @@
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Graphics.Vulkan.Effects
+{
+ [StructLayout(LayoutKind.Sequential, Pack = 4)]
+ internal struct SmaaConstants
+ {
+ public int QualityLow;
+ public int QualityMedium;
+ public int QualityHigh;
+ public int QualityUltra;
+ public float Width;
+ public float Height;
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/SmaaPostProcessingEffect.cs b/src/Ryujinx.Graphics.Vulkan/Effects/SmaaPostProcessingEffect.cs
new file mode 100644
index 00000000..38f86bae
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/SmaaPostProcessingEffect.cs
@@ -0,0 +1,289 @@
+using Ryujinx.Common;
+using Ryujinx.Graphics.GAL;
+using Ryujinx.Graphics.Shader;
+using Ryujinx.Graphics.Shader.Translation;
+using Silk.NET.Vulkan;
+using System;
+using Format = Ryujinx.Graphics.GAL.Format;
+
+namespace Ryujinx.Graphics.Vulkan.Effects
+{
+ internal partial class SmaaPostProcessingEffect : IPostProcessingEffect
+ {
+ public const int AreaWidth = 160;
+ public const int AreaHeight = 560;
+ public const int SearchWidth = 64;
+ public const int SearchHeight = 16;
+
+ private readonly VulkanRenderer _renderer;
+ private ISampler _samplerLinear;
+ private SmaaConstants _specConstants;
+ private ShaderCollection _edgeProgram;
+ private ShaderCollection _blendProgram;
+ private ShaderCollection _neighbourProgram;
+
+ private PipelineHelperShader _pipeline;
+
+ private TextureView _outputTexture;
+ private TextureView _edgeOutputTexture;
+ private TextureView _blendOutputTexture;
+ private TextureView _areaTexture;
+ private TextureView _searchTexture;
+ private Device _device;
+ private bool _recreatePipelines;
+ private int _quality;
+
+ public SmaaPostProcessingEffect(VulkanRenderer renderer, Device device, int quality)
+ {
+ _device = device;
+ _renderer = renderer;
+ _quality = quality;
+
+ Initialize();
+ }
+
+ public int Quality
+ {
+ get => _quality;
+ set
+ {
+ _quality = value;
+
+ _recreatePipelines = true;
+ }
+ }
+
+ public void Dispose()
+ {
+ DeletePipelines();
+ _samplerLinear?.Dispose();
+ _outputTexture?.Dispose();
+ _edgeOutputTexture?.Dispose();
+ _blendOutputTexture?.Dispose();
+ _areaTexture?.Dispose();
+ _searchTexture?.Dispose();
+ }
+
+ private unsafe void RecreateShaders(int width, int height)
+ {
+ _recreatePipelines = false;
+
+ DeletePipelines();
+ _pipeline = new PipelineHelperShader(_renderer, _device);
+
+ _pipeline.Initialize();
+
+ var edgeShader = EmbeddedResources.Read("Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaEdge.spv");
+ var blendShader = EmbeddedResources.Read("Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaBlend.spv");
+ var neighbourShader = EmbeddedResources.Read("Ryujinx.Graphics.Vulkan/Effects/Shaders/SmaaNeighbour.spv");
+
+ var edgeBindings = new ShaderBindings(
+ new[] { 2 },
+ Array.Empty<int>(),
+ new[] { 1 },
+ new[] { 0 });
+
+ var blendBindings = new ShaderBindings(
+ new[] { 2 },
+ Array.Empty<int>(),
+ new[] { 1, 3, 4 },
+ new[] { 0 });
+
+ var neighbourBindings = new ShaderBindings(
+ new[] { 2 },
+ Array.Empty<int>(),
+ new[] { 1, 3 },
+ new[] { 0 });
+
+ _samplerLinear = _renderer.CreateSampler(GAL.SamplerCreateInfo.Create(MinFilter.Linear, MagFilter.Linear));
+
+ _specConstants = new SmaaConstants()
+ {
+ Width = width,
+ Height = height,
+ QualityLow = Quality == 0 ? 1 : 0,
+ QualityMedium = Quality == 1 ? 1 : 0,
+ QualityHigh = Quality == 2 ? 1 : 0,
+ QualityUltra = Quality == 3 ? 1 : 0,
+ };
+
+ var specInfo = new SpecDescription(
+ (0, SpecConstType.Int32),
+ (1, SpecConstType.Int32),
+ (2, SpecConstType.Int32),
+ (3, SpecConstType.Int32),
+ (4, SpecConstType.Float32),
+ (5, SpecConstType.Float32));
+
+ _edgeProgram = _renderer.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(edgeShader, edgeBindings, ShaderStage.Compute, TargetLanguage.Spirv)
+ }, new[] { specInfo });
+
+ _blendProgram = _renderer.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(blendShader, blendBindings, ShaderStage.Compute, TargetLanguage.Spirv)
+ }, new[] { specInfo });
+
+ _neighbourProgram = _renderer.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(neighbourShader, neighbourBindings, ShaderStage.Compute, TargetLanguage.Spirv)
+ }, new[] { specInfo });
+ }
+
+ public void DeletePipelines()
+ {
+ _pipeline?.Dispose();
+ _edgeProgram?.Dispose();
+ _blendProgram?.Dispose();
+ _neighbourProgram?.Dispose();
+ }
+
+ private void Initialize()
+ {
+ var areaInfo = new TextureCreateInfo(AreaWidth,
+ AreaHeight,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ Format.R8G8Unorm,
+ DepthStencilMode.Depth,
+ Target.Texture2D,
+ SwizzleComponent.Red,
+ SwizzleComponent.Green,
+ SwizzleComponent.Blue,
+ SwizzleComponent.Alpha);
+
+ var searchInfo = new TextureCreateInfo(SearchWidth,
+ SearchHeight,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ Format.R8Unorm,
+ DepthStencilMode.Depth,
+ Target.Texture2D,
+ SwizzleComponent.Red,
+ SwizzleComponent.Green,
+ SwizzleComponent.Blue,
+ SwizzleComponent.Alpha);
+
+ var areaTexture = EmbeddedResources.Read("Ryujinx.Graphics.Vulkan/Effects/Textures/SmaaAreaTexture.bin");
+ var searchTexture = EmbeddedResources.Read("Ryujinx.Graphics.Vulkan/Effects/Textures/SmaaSearchTexture.bin");
+
+ _areaTexture = _renderer.CreateTexture(areaInfo, 1) as TextureView;
+ _searchTexture = _renderer.CreateTexture(searchInfo, 1) as TextureView;
+
+ _areaTexture.SetData(areaTexture);
+ _searchTexture.SetData(searchTexture);
+ }
+
+ public TextureView Run(TextureView view, CommandBufferScoped cbs, int width, int height)
+ {
+ if (_recreatePipelines || _outputTexture == null || _outputTexture.Info.Width != view.Width || _outputTexture.Info.Height != view.Height)
+ {
+ RecreateShaders(view.Width, view.Height);
+ _outputTexture?.Dispose();
+ _edgeOutputTexture?.Dispose();
+ _blendOutputTexture?.Dispose();
+
+ var info = view.Info;
+
+ if (view.Info.Format.IsBgr())
+ {
+ info = new TextureCreateInfo(info.Width,
+ info.Height,
+ info.Depth,
+ info.Levels,
+ info.Samples,
+ info.BlockWidth,
+ info.BlockHeight,
+ info.BytesPerPixel,
+ info.Format,
+ info.DepthStencilMode,
+ info.Target,
+ info.SwizzleB,
+ info.SwizzleG,
+ info.SwizzleR,
+ info.SwizzleA);
+ }
+
+ _outputTexture = _renderer.CreateTexture(info, view.ScaleFactor) as TextureView;
+ _edgeOutputTexture = _renderer.CreateTexture(info, view.ScaleFactor) as TextureView;
+ _blendOutputTexture = _renderer.CreateTexture(info, view.ScaleFactor) as TextureView;
+ }
+
+ _pipeline.SetCommandBuffer(cbs);
+
+ Clear(_edgeOutputTexture);
+ Clear(_blendOutputTexture);
+
+ _renderer.Pipeline.TextureBarrier();
+
+ var dispatchX = BitUtils.DivRoundUp(view.Width, IPostProcessingEffect.LocalGroupSize);
+ var dispatchY = BitUtils.DivRoundUp(view.Height, IPostProcessingEffect.LocalGroupSize);
+
+ // Edge pass
+ _pipeline.SetProgram(_edgeProgram);
+ _pipeline.SetTextureAndSampler(ShaderStage.Compute, 1, view, _samplerLinear);
+ _pipeline.Specialize(_specConstants);
+
+ ReadOnlySpan<float> resolutionBuffer = stackalloc float[] { view.Width, view.Height };
+ int rangeSize = resolutionBuffer.Length * sizeof(float);
+ var bufferHandle = _renderer.BufferManager.CreateWithHandle(_renderer, rangeSize);
+
+ _renderer.BufferManager.SetData(bufferHandle, 0, resolutionBuffer);
+ var bufferRanges = new BufferRange(bufferHandle, 0, rangeSize);
+ _pipeline.SetUniformBuffers(stackalloc[] { new BufferAssignment(2, bufferRanges) });
+ _pipeline.SetImage(0, _edgeOutputTexture, GAL.Format.R8G8B8A8Unorm);
+ _pipeline.DispatchCompute(dispatchX, dispatchY, 1);
+ _pipeline.ComputeBarrier();
+
+ // Blend pass
+ _pipeline.SetProgram(_blendProgram);
+ _pipeline.Specialize(_specConstants);
+ _pipeline.SetTextureAndSampler(ShaderStage.Compute, 1, _edgeOutputTexture, _samplerLinear);
+ _pipeline.SetTextureAndSampler(ShaderStage.Compute, 3, _areaTexture, _samplerLinear);
+ _pipeline.SetTextureAndSampler(ShaderStage.Compute, 4, _searchTexture, _samplerLinear);
+ _pipeline.SetImage(0, _blendOutputTexture, GAL.Format.R8G8B8A8Unorm);
+ _pipeline.DispatchCompute(dispatchX, dispatchY, 1);
+ _pipeline.ComputeBarrier();
+
+ // Neighbour pass
+ _pipeline.SetProgram(_neighbourProgram);
+ _pipeline.Specialize(_specConstants);
+ _pipeline.SetTextureAndSampler(ShaderStage.Compute, 3, _blendOutputTexture, _samplerLinear);
+ _pipeline.SetTextureAndSampler(ShaderStage.Compute, 1, view, _samplerLinear);
+ _pipeline.SetImage(0, _outputTexture, GAL.Format.R8G8B8A8Unorm);
+ _pipeline.DispatchCompute(dispatchX, dispatchY, 1);
+ _pipeline.ComputeBarrier();
+
+ _pipeline.Finish();
+
+ _renderer.BufferManager.Delete(bufferHandle);
+
+ return _outputTexture;
+ }
+
+ private void Clear(TextureView texture)
+ {
+ Span<uint> colorMasks = stackalloc uint[1];
+
+ colorMasks[0] = 0xf;
+
+ Span<Rectangle<int>> scissors = stackalloc Rectangle<int>[1];
+
+ scissors[0] = new Rectangle<int>(0, 0, texture.Width, texture.Height);
+
+ _pipeline.SetRenderTarget(texture.GetImageViewForAttachment(), (uint)texture.Width, (uint)texture.Height, false, texture.VkFormat);
+ _pipeline.SetRenderTargetColorMasks(colorMasks);
+ _pipeline.SetScissors(scissors);
+ _pipeline.ClearRenderTargetColor(0, 0, 1, new ColorF(0f, 0f, 0f, 1f));
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/Textures/SmaaAreaTexture.bin b/src/Ryujinx.Graphics.Vulkan/Effects/Textures/SmaaAreaTexture.bin
new file mode 100644
index 00000000..f4a7a1b4
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/Textures/SmaaAreaTexture.bin
Binary files differ
diff --git a/src/Ryujinx.Graphics.Vulkan/Effects/Textures/SmaaSearchTexture.bin b/src/Ryujinx.Graphics.Vulkan/Effects/Textures/SmaaSearchTexture.bin
new file mode 100644
index 00000000..db5bf73f
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Effects/Textures/SmaaSearchTexture.bin
Binary files differ
diff --git a/src/Ryujinx.Graphics.Vulkan/EnumConversion.cs b/src/Ryujinx.Graphics.Vulkan/EnumConversion.cs
new file mode 100644
index 00000000..b69c64aa
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/EnumConversion.cs
@@ -0,0 +1,374 @@
+using Ryujinx.Common.Logging;
+using Ryujinx.Graphics.GAL;
+using Ryujinx.Graphics.Shader;
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ static class EnumConversion
+ {
+ public static ShaderStageFlags Convert(this ShaderStage stage)
+ {
+ return stage switch
+ {
+ ShaderStage.Vertex => ShaderStageFlags.VertexBit,
+ ShaderStage.Geometry => ShaderStageFlags.GeometryBit,
+ ShaderStage.TessellationControl => ShaderStageFlags.TessellationControlBit,
+ ShaderStage.TessellationEvaluation => ShaderStageFlags.TessellationEvaluationBit,
+ ShaderStage.Fragment => ShaderStageFlags.FragmentBit,
+ ShaderStage.Compute => ShaderStageFlags.ComputeBit,
+ _ => LogInvalidAndReturn(stage, nameof(ShaderStage), (ShaderStageFlags)0)
+ };
+ }
+
+ public static PipelineStageFlags ConvertToPipelineStageFlags(this ShaderStage stage)
+ {
+ return stage switch
+ {
+ ShaderStage.Vertex => PipelineStageFlags.VertexShaderBit,
+ ShaderStage.Geometry => PipelineStageFlags.GeometryShaderBit,
+ ShaderStage.TessellationControl => PipelineStageFlags.TessellationControlShaderBit,
+ ShaderStage.TessellationEvaluation => PipelineStageFlags.TessellationEvaluationShaderBit,
+ ShaderStage.Fragment => PipelineStageFlags.FragmentShaderBit,
+ ShaderStage.Compute => PipelineStageFlags.ComputeShaderBit,
+ _ => LogInvalidAndReturn(stage, nameof(ShaderStage), (PipelineStageFlags)0)
+ };
+ }
+
+ public static SamplerAddressMode Convert(this AddressMode mode)
+ {
+ return mode switch
+ {
+ AddressMode.Clamp => SamplerAddressMode.ClampToEdge, // TODO: Should be clamp.
+ AddressMode.Repeat => SamplerAddressMode.Repeat,
+ AddressMode.MirrorClamp => SamplerAddressMode.ClampToEdge, // TODO: Should be mirror clamp.
+ AddressMode.MirrorClampToEdge => SamplerAddressMode.MirrorClampToEdgeKhr,
+ AddressMode.MirrorClampToBorder => SamplerAddressMode.ClampToBorder, // TODO: Should be mirror clamp to border.
+ AddressMode.ClampToBorder => SamplerAddressMode.ClampToBorder,
+ AddressMode.MirroredRepeat => SamplerAddressMode.MirroredRepeat,
+ AddressMode.ClampToEdge => SamplerAddressMode.ClampToEdge,
+ _ => LogInvalidAndReturn(mode, nameof(AddressMode), SamplerAddressMode.ClampToEdge) // TODO: Should be clamp.
+ };
+ }
+
+ public static Silk.NET.Vulkan.BlendFactor Convert(this GAL.BlendFactor factor)
+ {
+ return factor switch
+ {
+ GAL.BlendFactor.Zero or GAL.BlendFactor.ZeroGl => Silk.NET.Vulkan.BlendFactor.Zero,
+ GAL.BlendFactor.One or GAL.BlendFactor.OneGl => Silk.NET.Vulkan.BlendFactor.One,
+ GAL.BlendFactor.SrcColor or GAL.BlendFactor.SrcColorGl => Silk.NET.Vulkan.BlendFactor.SrcColor,
+ GAL.BlendFactor.OneMinusSrcColor or GAL.BlendFactor.OneMinusSrcColorGl => Silk.NET.Vulkan.BlendFactor.OneMinusSrcColor,
+ GAL.BlendFactor.SrcAlpha or GAL.BlendFactor.SrcAlphaGl => Silk.NET.Vulkan.BlendFactor.SrcAlpha,
+ GAL.BlendFactor.OneMinusSrcAlpha or GAL.BlendFactor.OneMinusSrcAlphaGl => Silk.NET.Vulkan.BlendFactor.OneMinusSrcAlpha,
+ GAL.BlendFactor.DstAlpha or GAL.BlendFactor.DstAlphaGl => Silk.NET.Vulkan.BlendFactor.DstAlpha,
+ GAL.BlendFactor.OneMinusDstAlpha or GAL.BlendFactor.OneMinusDstAlphaGl => Silk.NET.Vulkan.BlendFactor.OneMinusDstAlpha,
+ GAL.BlendFactor.DstColor or GAL.BlendFactor.DstColorGl => Silk.NET.Vulkan.BlendFactor.DstColor,
+ GAL.BlendFactor.OneMinusDstColor or GAL.BlendFactor.OneMinusDstColorGl => Silk.NET.Vulkan.BlendFactor.OneMinusDstColor,
+ GAL.BlendFactor.SrcAlphaSaturate or GAL.BlendFactor.SrcAlphaSaturateGl => Silk.NET.Vulkan.BlendFactor.SrcAlphaSaturate,
+ GAL.BlendFactor.Src1Color or GAL.BlendFactor.Src1ColorGl => Silk.NET.Vulkan.BlendFactor.Src1Color,
+ GAL.BlendFactor.OneMinusSrc1Color or GAL.BlendFactor.OneMinusSrc1ColorGl => Silk.NET.Vulkan.BlendFactor.OneMinusSrc1Color,
+ GAL.BlendFactor.Src1Alpha or GAL.BlendFactor.Src1AlphaGl => Silk.NET.Vulkan.BlendFactor.Src1Alpha,
+ GAL.BlendFactor.OneMinusSrc1Alpha or GAL.BlendFactor.OneMinusSrc1AlphaGl => Silk.NET.Vulkan.BlendFactor.OneMinusSrc1Alpha,
+ GAL.BlendFactor.ConstantColor => Silk.NET.Vulkan.BlendFactor.ConstantColor,
+ GAL.BlendFactor.OneMinusConstantColor => Silk.NET.Vulkan.BlendFactor.OneMinusConstantColor,
+ GAL.BlendFactor.ConstantAlpha => Silk.NET.Vulkan.BlendFactor.ConstantAlpha,
+ GAL.BlendFactor.OneMinusConstantAlpha => Silk.NET.Vulkan.BlendFactor.OneMinusConstantAlpha,
+ _ => LogInvalidAndReturn(factor, nameof(GAL.BlendFactor), Silk.NET.Vulkan.BlendFactor.Zero)
+ };
+ }
+
+ public static Silk.NET.Vulkan.BlendOp Convert(this GAL.AdvancedBlendOp op)
+ {
+ return op switch
+ {
+ GAL.AdvancedBlendOp.Zero => Silk.NET.Vulkan.BlendOp.ZeroExt,
+ GAL.AdvancedBlendOp.Src => Silk.NET.Vulkan.BlendOp.SrcExt,
+ GAL.AdvancedBlendOp.Dst => Silk.NET.Vulkan.BlendOp.DstExt,
+ GAL.AdvancedBlendOp.SrcOver => Silk.NET.Vulkan.BlendOp.SrcOverExt,
+ GAL.AdvancedBlendOp.DstOver => Silk.NET.Vulkan.BlendOp.DstOverExt,
+ GAL.AdvancedBlendOp.SrcIn => Silk.NET.Vulkan.BlendOp.SrcInExt,
+ GAL.AdvancedBlendOp.DstIn => Silk.NET.Vulkan.BlendOp.DstInExt,
+ GAL.AdvancedBlendOp.SrcOut => Silk.NET.Vulkan.BlendOp.SrcOutExt,
+ GAL.AdvancedBlendOp.DstOut => Silk.NET.Vulkan.BlendOp.DstOutExt,
+ GAL.AdvancedBlendOp.SrcAtop => Silk.NET.Vulkan.BlendOp.SrcAtopExt,
+ GAL.AdvancedBlendOp.DstAtop => Silk.NET.Vulkan.BlendOp.DstAtopExt,
+ GAL.AdvancedBlendOp.Xor => Silk.NET.Vulkan.BlendOp.XorExt,
+ GAL.AdvancedBlendOp.Plus => Silk.NET.Vulkan.BlendOp.PlusExt,
+ GAL.AdvancedBlendOp.PlusClamped => Silk.NET.Vulkan.BlendOp.PlusClampedExt,
+ GAL.AdvancedBlendOp.PlusClampedAlpha => Silk.NET.Vulkan.BlendOp.PlusClampedAlphaExt,
+ GAL.AdvancedBlendOp.PlusDarker => Silk.NET.Vulkan.BlendOp.PlusDarkerExt,
+ GAL.AdvancedBlendOp.Multiply => Silk.NET.Vulkan.BlendOp.MultiplyExt,
+ GAL.AdvancedBlendOp.Screen => Silk.NET.Vulkan.BlendOp.ScreenExt,
+ GAL.AdvancedBlendOp.Overlay => Silk.NET.Vulkan.BlendOp.OverlayExt,
+ GAL.AdvancedBlendOp.Darken => Silk.NET.Vulkan.BlendOp.DarkenExt,
+ GAL.AdvancedBlendOp.Lighten => Silk.NET.Vulkan.BlendOp.LightenExt,
+ GAL.AdvancedBlendOp.ColorDodge => Silk.NET.Vulkan.BlendOp.ColordodgeExt,
+ GAL.AdvancedBlendOp.ColorBurn => Silk.NET.Vulkan.BlendOp.ColorburnExt,
+ GAL.AdvancedBlendOp.HardLight => Silk.NET.Vulkan.BlendOp.HardlightExt,
+ GAL.AdvancedBlendOp.SoftLight => Silk.NET.Vulkan.BlendOp.SoftlightExt,
+ GAL.AdvancedBlendOp.Difference => Silk.NET.Vulkan.BlendOp.DifferenceExt,
+ GAL.AdvancedBlendOp.Minus => Silk.NET.Vulkan.BlendOp.MinusExt,
+ GAL.AdvancedBlendOp.MinusClamped => Silk.NET.Vulkan.BlendOp.MinusClampedExt,
+ GAL.AdvancedBlendOp.Exclusion => Silk.NET.Vulkan.BlendOp.ExclusionExt,
+ GAL.AdvancedBlendOp.Contrast => Silk.NET.Vulkan.BlendOp.ContrastExt,
+ GAL.AdvancedBlendOp.Invert => Silk.NET.Vulkan.BlendOp.InvertExt,
+ GAL.AdvancedBlendOp.InvertRGB => Silk.NET.Vulkan.BlendOp.InvertRgbExt,
+ GAL.AdvancedBlendOp.InvertOvg => Silk.NET.Vulkan.BlendOp.InvertOvgExt,
+ GAL.AdvancedBlendOp.LinearDodge => Silk.NET.Vulkan.BlendOp.LineardodgeExt,
+ GAL.AdvancedBlendOp.LinearBurn => Silk.NET.Vulkan.BlendOp.LinearburnExt,
+ GAL.AdvancedBlendOp.VividLight => Silk.NET.Vulkan.BlendOp.VividlightExt,
+ GAL.AdvancedBlendOp.LinearLight => Silk.NET.Vulkan.BlendOp.LinearlightExt,
+ GAL.AdvancedBlendOp.PinLight => Silk.NET.Vulkan.BlendOp.PinlightExt,
+ GAL.AdvancedBlendOp.HardMix => Silk.NET.Vulkan.BlendOp.HardmixExt,
+ GAL.AdvancedBlendOp.Red => Silk.NET.Vulkan.BlendOp.RedExt,
+ GAL.AdvancedBlendOp.Green => Silk.NET.Vulkan.BlendOp.GreenExt,
+ GAL.AdvancedBlendOp.Blue => Silk.NET.Vulkan.BlendOp.BlueExt,
+ GAL.AdvancedBlendOp.HslHue => Silk.NET.Vulkan.BlendOp.HslHueExt,
+ GAL.AdvancedBlendOp.HslSaturation => Silk.NET.Vulkan.BlendOp.HslSaturationExt,
+ GAL.AdvancedBlendOp.HslColor => Silk.NET.Vulkan.BlendOp.HslColorExt,
+ GAL.AdvancedBlendOp.HslLuminosity => Silk.NET.Vulkan.BlendOp.HslLuminosityExt,
+ _ => LogInvalidAndReturn(op, nameof(GAL.AdvancedBlendOp), Silk.NET.Vulkan.BlendOp.Add)
+ };
+ }
+
+ public static Silk.NET.Vulkan.BlendOp Convert(this GAL.BlendOp op)
+ {
+ return op switch
+ {
+ GAL.BlendOp.Add or GAL.BlendOp.AddGl => Silk.NET.Vulkan.BlendOp.Add,
+ GAL.BlendOp.Subtract or GAL.BlendOp.SubtractGl => Silk.NET.Vulkan.BlendOp.Subtract,
+ GAL.BlendOp.ReverseSubtract or GAL.BlendOp.ReverseSubtractGl => Silk.NET.Vulkan.BlendOp.ReverseSubtract,
+ GAL.BlendOp.Minimum or GAL.BlendOp.MinimumGl => Silk.NET.Vulkan.BlendOp.Min,
+ GAL.BlendOp.Maximum or GAL.BlendOp.MaximumGl => Silk.NET.Vulkan.BlendOp.Max,
+ _ => LogInvalidAndReturn(op, nameof(GAL.BlendOp), Silk.NET.Vulkan.BlendOp.Add)
+ };
+ }
+
+ public static Silk.NET.Vulkan.BlendOverlapEXT Convert(this GAL.AdvancedBlendOverlap overlap)
+ {
+ return overlap switch
+ {
+ GAL.AdvancedBlendOverlap.Uncorrelated => Silk.NET.Vulkan.BlendOverlapEXT.UncorrelatedExt,
+ GAL.AdvancedBlendOverlap.Disjoint => Silk.NET.Vulkan.BlendOverlapEXT.DisjointExt,
+ GAL.AdvancedBlendOverlap.Conjoint => Silk.NET.Vulkan.BlendOverlapEXT.ConjointExt,
+ _ => LogInvalidAndReturn(overlap, nameof(GAL.AdvancedBlendOverlap), Silk.NET.Vulkan.BlendOverlapEXT.UncorrelatedExt)
+ };
+ }
+
+ public static Silk.NET.Vulkan.CompareOp Convert(this GAL.CompareOp op)
+ {
+ return op switch
+ {
+ GAL.CompareOp.Never or GAL.CompareOp.NeverGl => Silk.NET.Vulkan.CompareOp.Never,
+ GAL.CompareOp.Less or GAL.CompareOp.LessGl => Silk.NET.Vulkan.CompareOp.Less,
+ GAL.CompareOp.Equal or GAL.CompareOp.EqualGl => Silk.NET.Vulkan.CompareOp.Equal,
+ GAL.CompareOp.LessOrEqual or GAL.CompareOp.LessOrEqualGl => Silk.NET.Vulkan.CompareOp.LessOrEqual,
+ GAL.CompareOp.Greater or GAL.CompareOp.GreaterGl => Silk.NET.Vulkan.CompareOp.Greater,
+ GAL.CompareOp.NotEqual or GAL.CompareOp.NotEqualGl => Silk.NET.Vulkan.CompareOp.NotEqual,
+ GAL.CompareOp.GreaterOrEqual or GAL.CompareOp.GreaterOrEqualGl => Silk.NET.Vulkan.CompareOp.GreaterOrEqual,
+ GAL.CompareOp.Always or GAL.CompareOp.AlwaysGl => Silk.NET.Vulkan.CompareOp.Always,
+ _ => LogInvalidAndReturn(op, nameof(GAL.CompareOp), Silk.NET.Vulkan.CompareOp.Never)
+ };
+ }
+
+ public static CullModeFlags Convert(this Face face)
+ {
+ return face switch
+ {
+ Face.Back => CullModeFlags.BackBit,
+ Face.Front => CullModeFlags.FrontBit,
+ Face.FrontAndBack => CullModeFlags.FrontAndBack,
+ _ => LogInvalidAndReturn(face, nameof(Face), CullModeFlags.BackBit)
+ };
+ }
+
+ public static Silk.NET.Vulkan.FrontFace Convert(this GAL.FrontFace frontFace)
+ {
+ // Flipped to account for origin differences.
+ return frontFace switch
+ {
+ GAL.FrontFace.Clockwise => Silk.NET.Vulkan.FrontFace.CounterClockwise,
+ GAL.FrontFace.CounterClockwise => Silk.NET.Vulkan.FrontFace.Clockwise,
+ _ => LogInvalidAndReturn(frontFace, nameof(GAL.FrontFace), Silk.NET.Vulkan.FrontFace.Clockwise)
+ };
+ }
+
+ public static Silk.NET.Vulkan.IndexType Convert(this GAL.IndexType type)
+ {
+ return type switch
+ {
+ GAL.IndexType.UByte => Silk.NET.Vulkan.IndexType.Uint8Ext,
+ GAL.IndexType.UShort => Silk.NET.Vulkan.IndexType.Uint16,
+ GAL.IndexType.UInt => Silk.NET.Vulkan.IndexType.Uint32,
+ _ => LogInvalidAndReturn(type, nameof(GAL.IndexType), Silk.NET.Vulkan.IndexType.Uint16)
+ };
+ }
+
+ public static Filter Convert(this MagFilter filter)
+ {
+ return filter switch
+ {
+ MagFilter.Nearest => Filter.Nearest,
+ MagFilter.Linear => Filter.Linear,
+ _ => LogInvalidAndReturn(filter, nameof(MagFilter), Filter.Nearest)
+ };
+ }
+
+ public static (Filter, SamplerMipmapMode) Convert(this MinFilter filter)
+ {
+ return filter switch
+ {
+ MinFilter.Nearest => (Filter.Nearest, SamplerMipmapMode.Nearest),
+ MinFilter.Linear => (Filter.Linear, SamplerMipmapMode.Nearest),
+ MinFilter.NearestMipmapNearest => (Filter.Nearest, SamplerMipmapMode.Nearest),
+ MinFilter.LinearMipmapNearest => (Filter.Linear, SamplerMipmapMode.Nearest),
+ MinFilter.NearestMipmapLinear => (Filter.Nearest, SamplerMipmapMode.Linear),
+ MinFilter.LinearMipmapLinear => (Filter.Linear, SamplerMipmapMode.Linear),
+ _ => LogInvalidAndReturn(filter, nameof(MinFilter), (Filter.Nearest, SamplerMipmapMode.Nearest))
+ };
+ }
+
+ public static Silk.NET.Vulkan.PrimitiveTopology Convert(this GAL.PrimitiveTopology topology)
+ {
+ return topology switch
+ {
+ GAL.PrimitiveTopology.Points => Silk.NET.Vulkan.PrimitiveTopology.PointList,
+ GAL.PrimitiveTopology.Lines => Silk.NET.Vulkan.PrimitiveTopology.LineList,
+ GAL.PrimitiveTopology.LineStrip => Silk.NET.Vulkan.PrimitiveTopology.LineStrip,
+ GAL.PrimitiveTopology.Triangles => Silk.NET.Vulkan.PrimitiveTopology.TriangleList,
+ GAL.PrimitiveTopology.TriangleStrip => Silk.NET.Vulkan.PrimitiveTopology.TriangleStrip,
+ GAL.PrimitiveTopology.TriangleFan => Silk.NET.Vulkan.PrimitiveTopology.TriangleFan,
+ GAL.PrimitiveTopology.LinesAdjacency => Silk.NET.Vulkan.PrimitiveTopology.LineListWithAdjacency,
+ GAL.PrimitiveTopology.LineStripAdjacency => Silk.NET.Vulkan.PrimitiveTopology.LineStripWithAdjacency,
+ GAL.PrimitiveTopology.TrianglesAdjacency => Silk.NET.Vulkan.PrimitiveTopology.TriangleListWithAdjacency,
+ GAL.PrimitiveTopology.TriangleStripAdjacency => Silk.NET.Vulkan.PrimitiveTopology.TriangleStripWithAdjacency,
+ GAL.PrimitiveTopology.Patches => Silk.NET.Vulkan.PrimitiveTopology.PatchList,
+ GAL.PrimitiveTopology.Polygon => Silk.NET.Vulkan.PrimitiveTopology.TriangleFan,
+ GAL.PrimitiveTopology.Quads => throw new NotSupportedException("Quad topology is not available in Vulkan."),
+ GAL.PrimitiveTopology.QuadStrip => throw new NotSupportedException("QuadStrip topology is not available in Vulkan."),
+ _ => LogInvalidAndReturn(topology, nameof(GAL.PrimitiveTopology), Silk.NET.Vulkan.PrimitiveTopology.TriangleList)
+ };
+ }
+
+ public static Silk.NET.Vulkan.StencilOp Convert(this GAL.StencilOp op)
+ {
+ return op switch
+ {
+ GAL.StencilOp.Keep or GAL.StencilOp.KeepGl => Silk.NET.Vulkan.StencilOp.Keep,
+ GAL.StencilOp.Zero or GAL.StencilOp.ZeroGl => Silk.NET.Vulkan.StencilOp.Zero,
+ GAL.StencilOp.Replace or GAL.StencilOp.ReplaceGl => Silk.NET.Vulkan.StencilOp.Replace,
+ GAL.StencilOp.IncrementAndClamp or GAL.StencilOp.IncrementAndClampGl => Silk.NET.Vulkan.StencilOp.IncrementAndClamp,
+ GAL.StencilOp.DecrementAndClamp or GAL.StencilOp.DecrementAndClampGl => Silk.NET.Vulkan.StencilOp.DecrementAndClamp,
+ GAL.StencilOp.Invert or GAL.StencilOp.InvertGl => Silk.NET.Vulkan.StencilOp.Invert,
+ GAL.StencilOp.IncrementAndWrap or GAL.StencilOp.IncrementAndWrapGl => Silk.NET.Vulkan.StencilOp.IncrementAndWrap,
+ GAL.StencilOp.DecrementAndWrap or GAL.StencilOp.DecrementAndWrapGl => Silk.NET.Vulkan.StencilOp.DecrementAndWrap,
+ _ => LogInvalidAndReturn(op, nameof(GAL.StencilOp), Silk.NET.Vulkan.StencilOp.Keep)
+ };
+ }
+
+ public static ComponentSwizzle Convert(this SwizzleComponent swizzleComponent)
+ {
+ return swizzleComponent switch
+ {
+ SwizzleComponent.Zero => ComponentSwizzle.Zero,
+ SwizzleComponent.One => ComponentSwizzle.One,
+ SwizzleComponent.Red => ComponentSwizzle.R,
+ SwizzleComponent.Green => ComponentSwizzle.G,
+ SwizzleComponent.Blue => ComponentSwizzle.B,
+ SwizzleComponent.Alpha => ComponentSwizzle.A,
+ _ => LogInvalidAndReturn(swizzleComponent, nameof(SwizzleComponent), ComponentSwizzle.Zero)
+ };
+ }
+
+ public static ImageType Convert(this Target target)
+ {
+ return target switch
+ {
+ Target.Texture1D or
+ Target.Texture1DArray or
+ Target.TextureBuffer => ImageType.Type1D,
+ Target.Texture2D or
+ Target.Texture2DArray or
+ Target.Texture2DMultisample or
+ Target.Cubemap or
+ Target.CubemapArray => ImageType.Type2D,
+ Target.Texture3D => ImageType.Type3D,
+ _ => LogInvalidAndReturn(target, nameof(Target), ImageType.Type2D)
+ };
+ }
+
+ public static ImageViewType ConvertView(this Target target)
+ {
+ return target switch
+ {
+ Target.Texture1D => ImageViewType.Type1D,
+ Target.Texture2D or Target.Texture2DMultisample => ImageViewType.Type2D,
+ Target.Texture3D => ImageViewType.Type3D,
+ Target.Texture1DArray => ImageViewType.Type1DArray,
+ Target.Texture2DArray => ImageViewType.Type2DArray,
+ Target.Cubemap => ImageViewType.TypeCube,
+ Target.CubemapArray => ImageViewType.TypeCubeArray,
+ _ => LogInvalidAndReturn(target, nameof(Target), ImageViewType.Type2D)
+ };
+ }
+
+ public static ImageAspectFlags ConvertAspectFlags(this GAL.Format format)
+ {
+ return format switch
+ {
+ GAL.Format.D16Unorm or GAL.Format.D32Float => ImageAspectFlags.DepthBit,
+ GAL.Format.S8Uint => ImageAspectFlags.StencilBit,
+ GAL.Format.D24UnormS8Uint or
+ GAL.Format.D32FloatS8Uint or
+ GAL.Format.S8UintD24Unorm => ImageAspectFlags.DepthBit | ImageAspectFlags.StencilBit,
+ _ => ImageAspectFlags.ColorBit
+ };
+ }
+
+ public static ImageAspectFlags ConvertAspectFlags(this GAL.Format format, DepthStencilMode depthStencilMode)
+ {
+ return format switch
+ {
+ GAL.Format.D16Unorm or GAL.Format.D32Float => ImageAspectFlags.DepthBit,
+ GAL.Format.S8Uint => ImageAspectFlags.StencilBit,
+ GAL.Format.D24UnormS8Uint or
+ GAL.Format.D32FloatS8Uint or
+ GAL.Format.S8UintD24Unorm => depthStencilMode == DepthStencilMode.Stencil ? ImageAspectFlags.StencilBit : ImageAspectFlags.DepthBit,
+ _ => ImageAspectFlags.ColorBit
+ };
+ }
+
+ public static LogicOp Convert(this LogicalOp op)
+ {
+ return op switch
+ {
+ LogicalOp.Clear => LogicOp.Clear,
+ LogicalOp.And => LogicOp.And,
+ LogicalOp.AndReverse => LogicOp.AndReverse,
+ LogicalOp.Copy => LogicOp.Copy,
+ LogicalOp.AndInverted => LogicOp.AndInverted,
+ LogicalOp.Noop => LogicOp.NoOp,
+ LogicalOp.Xor => LogicOp.Xor,
+ LogicalOp.Or => LogicOp.Or,
+ LogicalOp.Nor => LogicOp.Nor,
+ LogicalOp.Equiv => LogicOp.Equivalent,
+ LogicalOp.Invert => LogicOp.Invert,
+ LogicalOp.OrReverse => LogicOp.OrReverse,
+ LogicalOp.CopyInverted => LogicOp.CopyInverted,
+ LogicalOp.OrInverted => LogicOp.OrInverted,
+ LogicalOp.Nand => LogicOp.Nand,
+ LogicalOp.Set => LogicOp.Set,
+ _ => LogInvalidAndReturn(op, nameof(LogicalOp), LogicOp.Copy)
+ };
+ }
+
+ private static T2 LogInvalidAndReturn<T1, T2>(T1 value, string name, T2 defaultValue = default)
+ {
+ Logger.Debug?.Print(LogClass.Gpu, $"Invalid {name} enum value: {value}.");
+
+ return defaultValue;
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/FenceHelper.cs b/src/Ryujinx.Graphics.Vulkan/FenceHelper.cs
new file mode 100644
index 00000000..d6731c0e
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/FenceHelper.cs
@@ -0,0 +1,30 @@
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ static class FenceHelper
+ {
+ private const ulong DefaultTimeout = 100000000; // 100ms
+
+ public static bool AnySignaled(Vk api, Device device, ReadOnlySpan<Fence> fences, ulong timeout = 0)
+ {
+ return api.WaitForFences(device, (uint)fences.Length, fences, false, timeout) == Result.Success;
+ }
+
+ public static bool AllSignaled(Vk api, Device device, ReadOnlySpan<Fence> fences, ulong timeout = 0)
+ {
+ return api.WaitForFences(device, (uint)fences.Length, fences, true, timeout) == Result.Success;
+ }
+
+ public static void WaitAllIndefinitely(Vk api, Device device, ReadOnlySpan<Fence> fences)
+ {
+ Result result;
+ while ((result = api.WaitForFences(device, (uint)fences.Length, fences, true, DefaultTimeout)) == Result.Timeout)
+ {
+ // Keep waiting while the fence is not signaled.
+ }
+ result.ThrowOnError();
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/FenceHolder.cs b/src/Ryujinx.Graphics.Vulkan/FenceHolder.cs
new file mode 100644
index 00000000..1c1e6240
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/FenceHolder.cs
@@ -0,0 +1,79 @@
+using Silk.NET.Vulkan;
+using System;
+using System.Threading;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class FenceHolder : IDisposable
+ {
+ private readonly Vk _api;
+ private readonly Device _device;
+ private Fence _fence;
+ private int _referenceCount;
+ private bool _disposed;
+
+ public unsafe FenceHolder(Vk api, Device device)
+ {
+ _api = api;
+ _device = device;
+
+ var fenceCreateInfo = new FenceCreateInfo()
+ {
+ SType = StructureType.FenceCreateInfo
+ };
+
+ api.CreateFence(device, in fenceCreateInfo, null, out _fence).ThrowOnError();
+
+ _referenceCount = 1;
+ }
+
+ public Fence GetUnsafe()
+ {
+ return _fence;
+ }
+
+ public Fence Get()
+ {
+ Interlocked.Increment(ref _referenceCount);
+ return _fence;
+ }
+
+ public void Put()
+ {
+ if (Interlocked.Decrement(ref _referenceCount) == 0)
+ {
+ _api.DestroyFence(_device, _fence, Span<AllocationCallbacks>.Empty);
+ _fence = default;
+ }
+ }
+
+ public void Wait()
+ {
+ Span<Fence> fences = stackalloc Fence[]
+ {
+ _fence
+ };
+
+ FenceHelper.WaitAllIndefinitely(_api, _device, fences);
+ }
+
+ public bool IsSignaled()
+ {
+ Span<Fence> fences = stackalloc Fence[]
+ {
+ _fence
+ };
+
+ return FenceHelper.AllSignaled(_api, _device, fences);
+ }
+
+ public void Dispose()
+ {
+ if (!_disposed)
+ {
+ Put();
+ _disposed = true;
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/FormatCapabilities.cs b/src/Ryujinx.Graphics.Vulkan/FormatCapabilities.cs
new file mode 100644
index 00000000..7019dfd9
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/FormatCapabilities.cs
@@ -0,0 +1,164 @@
+using Ryujinx.Common.Logging;
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using System;
+using VkFormat = Silk.NET.Vulkan.Format;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class FormatCapabilities
+ {
+ private readonly FormatFeatureFlags[] _bufferTable;
+ private readonly FormatFeatureFlags[] _optimalTable;
+
+ private readonly Vk _api;
+ private readonly PhysicalDevice _physicalDevice;
+
+ public FormatCapabilities(Vk api, PhysicalDevice physicalDevice)
+ {
+ _api = api;
+ _physicalDevice = physicalDevice;
+
+ int totalFormats = Enum.GetNames(typeof(GAL.Format)).Length;
+
+ _bufferTable = new FormatFeatureFlags[totalFormats];
+ _optimalTable = new FormatFeatureFlags[totalFormats];
+ }
+
+ public bool BufferFormatsSupport(FormatFeatureFlags flags, params GAL.Format[] formats)
+ {
+ foreach (GAL.Format format in formats)
+ {
+ if (!BufferFormatSupports(flags, format))
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ public bool OptimalFormatsSupport(FormatFeatureFlags flags, params GAL.Format[] formats)
+ {
+ foreach (GAL.Format format in formats)
+ {
+ if (!OptimalFormatSupports(flags, format))
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ public bool BufferFormatSupports(FormatFeatureFlags flags, GAL.Format format)
+ {
+ var formatFeatureFlags = _bufferTable[(int)format];
+
+ if (formatFeatureFlags == 0)
+ {
+ _api.GetPhysicalDeviceFormatProperties(_physicalDevice, FormatTable.GetFormat(format), out var fp);
+ formatFeatureFlags = fp.BufferFeatures;
+ _bufferTable[(int)format] = formatFeatureFlags;
+ }
+
+ return (formatFeatureFlags & flags) == flags;
+ }
+
+ public bool OptimalFormatSupports(FormatFeatureFlags flags, GAL.Format format)
+ {
+ var formatFeatureFlags = _optimalTable[(int)format];
+
+ if (formatFeatureFlags == 0)
+ {
+ _api.GetPhysicalDeviceFormatProperties(_physicalDevice, FormatTable.GetFormat(format), out var fp);
+ formatFeatureFlags = fp.OptimalTilingFeatures;
+ _optimalTable[(int)format] = formatFeatureFlags;
+ }
+
+ return (formatFeatureFlags & flags) == flags;
+ }
+
+ public VkFormat ConvertToVkFormat(GAL.Format srcFormat)
+ {
+ var format = FormatTable.GetFormat(srcFormat);
+
+ var requiredFeatures = FormatFeatureFlags.SampledImageBit |
+ FormatFeatureFlags.TransferSrcBit |
+ FormatFeatureFlags.TransferDstBit;
+
+ if (srcFormat.IsDepthOrStencil())
+ {
+ requiredFeatures |= FormatFeatureFlags.DepthStencilAttachmentBit;
+ }
+ else if (srcFormat.IsRtColorCompatible())
+ {
+ requiredFeatures |= FormatFeatureFlags.ColorAttachmentBit;
+ }
+
+ if (srcFormat.IsImageCompatible())
+ {
+ requiredFeatures |= FormatFeatureFlags.StorageImageBit;
+ }
+
+ if (!OptimalFormatSupports(requiredFeatures, srcFormat) || (IsD24S8(srcFormat) && VulkanConfiguration.ForceD24S8Unsupported))
+ {
+ // The format is not supported. Can we convert it to a higher precision format?
+ if (IsD24S8(srcFormat))
+ {
+ format = VkFormat.D32SfloatS8Uint;
+ }
+ else if (srcFormat == GAL.Format.R4G4B4A4Unorm)
+ {
+ format = VkFormat.R4G4B4A4UnormPack16;
+ }
+ else
+ {
+ Logger.Error?.Print(LogClass.Gpu, $"Format {srcFormat} is not supported by the host.");
+ }
+ }
+
+ return format;
+ }
+
+ public VkFormat ConvertToVertexVkFormat(GAL.Format srcFormat)
+ {
+ var format = FormatTable.GetFormat(srcFormat);
+
+ if (!BufferFormatSupports(FormatFeatureFlags.VertexBufferBit, srcFormat) ||
+ (IsRGB16IntFloat(srcFormat) && VulkanConfiguration.ForceRGB16IntFloatUnsupported))
+ {
+ // The format is not supported. Can we convert it to an alternative format?
+ switch (srcFormat)
+ {
+ case GAL.Format.R16G16B16Float:
+ format = VkFormat.R16G16B16A16Sfloat;
+ break;
+ case GAL.Format.R16G16B16Sint:
+ format = VkFormat.R16G16B16A16Sint;
+ break;
+ case GAL.Format.R16G16B16Uint:
+ format = VkFormat.R16G16B16A16Uint;
+ break;
+ default:
+ Logger.Error?.Print(LogClass.Gpu, $"Format {srcFormat} is not supported by the host.");
+ break;
+ }
+ }
+
+ return format;
+ }
+
+ public static bool IsD24S8(GAL.Format format)
+ {
+ return format == GAL.Format.D24UnormS8Uint || format == GAL.Format.S8UintD24Unorm;
+ }
+
+ private static bool IsRGB16IntFloat(GAL.Format format)
+ {
+ return format == GAL.Format.R16G16B16Float ||
+ format == GAL.Format.R16G16B16Sint ||
+ format == GAL.Format.R16G16B16Uint;
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/FormatConverter.cs b/src/Ryujinx.Graphics.Vulkan/FormatConverter.cs
new file mode 100644
index 00000000..33472ae4
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/FormatConverter.cs
@@ -0,0 +1,49 @@
+using System;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class FormatConverter
+ {
+ public static void ConvertD24S8ToD32FS8(Span<byte> output, ReadOnlySpan<byte> input)
+ {
+ const float UnormToFloat = 1f / 0xffffff;
+
+ Span<uint> outputUint = MemoryMarshal.Cast<byte, uint>(output);
+ ReadOnlySpan<uint> inputUint = MemoryMarshal.Cast<byte, uint>(input);
+
+ int i = 0;
+
+ for (; i < inputUint.Length; i++)
+ {
+ uint depthStencil = inputUint[i];
+ uint depth = depthStencil >> 8;
+ uint stencil = depthStencil & 0xff;
+
+ int j = i * 2;
+
+ outputUint[j] = (uint)BitConverter.SingleToInt32Bits(depth * UnormToFloat);
+ outputUint[j + 1] = stencil;
+ }
+ }
+
+ public static void ConvertD32FS8ToD24S8(Span<byte> output, ReadOnlySpan<byte> input)
+ {
+ Span<uint> outputUint = MemoryMarshal.Cast<byte, uint>(output);
+ ReadOnlySpan<uint> inputUint = MemoryMarshal.Cast<byte, uint>(input);
+
+ int i = 0;
+
+ for (; i < inputUint.Length; i += 2)
+ {
+ float depth = BitConverter.Int32BitsToSingle((int)inputUint[i]);
+ uint stencil = inputUint[i + 1];
+ uint depthStencil = (Math.Clamp((uint)(depth * 0xffffff), 0, 0xffffff) << 8) | (stencil & 0xff);
+
+ int j = i >> 1;
+
+ outputUint[j] = depthStencil;
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/FormatTable.cs b/src/Ryujinx.Graphics.Vulkan/FormatTable.cs
new file mode 100644
index 00000000..45fc46ad
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/FormatTable.cs
@@ -0,0 +1,172 @@
+using Ryujinx.Graphics.GAL;
+using System;
+using VkFormat = Silk.NET.Vulkan.Format;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ static class FormatTable
+ {
+ private static readonly VkFormat[] _table;
+
+ static FormatTable()
+ {
+ _table = new VkFormat[Enum.GetNames(typeof(Format)).Length];
+
+ Add(Format.R8Unorm, VkFormat.R8Unorm);
+ Add(Format.R8Snorm, VkFormat.R8SNorm);
+ Add(Format.R8Uint, VkFormat.R8Uint);
+ Add(Format.R8Sint, VkFormat.R8Sint);
+ Add(Format.R16Float, VkFormat.R16Sfloat);
+ Add(Format.R16Unorm, VkFormat.R16Unorm);
+ Add(Format.R16Snorm, VkFormat.R16SNorm);
+ Add(Format.R16Uint, VkFormat.R16Uint);
+ Add(Format.R16Sint, VkFormat.R16Sint);
+ Add(Format.R32Float, VkFormat.R32Sfloat);
+ Add(Format.R32Uint, VkFormat.R32Uint);
+ Add(Format.R32Sint, VkFormat.R32Sint);
+ Add(Format.R8G8Unorm, VkFormat.R8G8Unorm);
+ Add(Format.R8G8Snorm, VkFormat.R8G8SNorm);
+ Add(Format.R8G8Uint, VkFormat.R8G8Uint);
+ Add(Format.R8G8Sint, VkFormat.R8G8Sint);
+ Add(Format.R16G16Float, VkFormat.R16G16Sfloat);
+ Add(Format.R16G16Unorm, VkFormat.R16G16Unorm);
+ Add(Format.R16G16Snorm, VkFormat.R16G16SNorm);
+ Add(Format.R16G16Uint, VkFormat.R16G16Uint);
+ Add(Format.R16G16Sint, VkFormat.R16G16Sint);
+ Add(Format.R32G32Float, VkFormat.R32G32Sfloat);
+ Add(Format.R32G32Uint, VkFormat.R32G32Uint);
+ Add(Format.R32G32Sint, VkFormat.R32G32Sint);
+ Add(Format.R8G8B8Unorm, VkFormat.R8G8B8Unorm);
+ Add(Format.R8G8B8Snorm, VkFormat.R8G8B8SNorm);
+ Add(Format.R8G8B8Uint, VkFormat.R8G8B8Uint);
+ Add(Format.R8G8B8Sint, VkFormat.R8G8B8Sint);
+ Add(Format.R16G16B16Float, VkFormat.R16G16B16Sfloat);
+ Add(Format.R16G16B16Unorm, VkFormat.R16G16B16Unorm);
+ Add(Format.R16G16B16Snorm, VkFormat.R16G16B16SNorm);
+ Add(Format.R16G16B16Uint, VkFormat.R16G16B16Uint);
+ Add(Format.R16G16B16Sint, VkFormat.R16G16B16Sint);
+ Add(Format.R32G32B32Float, VkFormat.R32G32B32Sfloat);
+ Add(Format.R32G32B32Uint, VkFormat.R32G32B32Uint);
+ Add(Format.R32G32B32Sint, VkFormat.R32G32B32Sint);
+ Add(Format.R8G8B8A8Unorm, VkFormat.R8G8B8A8Unorm);
+ Add(Format.R8G8B8A8Snorm, VkFormat.R8G8B8A8SNorm);
+ Add(Format.R8G8B8A8Uint, VkFormat.R8G8B8A8Uint);
+ Add(Format.R8G8B8A8Sint, VkFormat.R8G8B8A8Sint);
+ Add(Format.R16G16B16A16Float, VkFormat.R16G16B16A16Sfloat);
+ Add(Format.R16G16B16A16Unorm, VkFormat.R16G16B16A16Unorm);
+ Add(Format.R16G16B16A16Snorm, VkFormat.R16G16B16A16SNorm);
+ Add(Format.R16G16B16A16Uint, VkFormat.R16G16B16A16Uint);
+ Add(Format.R16G16B16A16Sint, VkFormat.R16G16B16A16Sint);
+ Add(Format.R32G32B32A32Float, VkFormat.R32G32B32A32Sfloat);
+ Add(Format.R32G32B32A32Uint, VkFormat.R32G32B32A32Uint);
+ Add(Format.R32G32B32A32Sint, VkFormat.R32G32B32A32Sint);
+ Add(Format.S8Uint, VkFormat.S8Uint);
+ Add(Format.D16Unorm, VkFormat.D16Unorm);
+ Add(Format.S8UintD24Unorm, VkFormat.D24UnormS8Uint);
+ Add(Format.D32Float, VkFormat.D32Sfloat);
+ Add(Format.D24UnormS8Uint, VkFormat.D24UnormS8Uint);
+ Add(Format.D32FloatS8Uint, VkFormat.D32SfloatS8Uint);
+ Add(Format.R8G8B8A8Srgb, VkFormat.R8G8B8A8Srgb);
+ Add(Format.R4G4Unorm, VkFormat.R4G4UnormPack8);
+ Add(Format.R4G4B4A4Unorm, VkFormat.A4B4G4R4UnormPack16Ext);
+ Add(Format.R5G5B5X1Unorm, VkFormat.A1R5G5B5UnormPack16);
+ Add(Format.R5G5B5A1Unorm, VkFormat.A1R5G5B5UnormPack16);
+ Add(Format.R5G6B5Unorm, VkFormat.R5G6B5UnormPack16);
+ Add(Format.R10G10B10A2Unorm, VkFormat.A2B10G10R10UnormPack32);
+ Add(Format.R10G10B10A2Uint, VkFormat.A2B10G10R10UintPack32);
+ Add(Format.R11G11B10Float, VkFormat.B10G11R11UfloatPack32);
+ Add(Format.R9G9B9E5Float, VkFormat.E5B9G9R9UfloatPack32);
+ Add(Format.Bc1RgbaUnorm, VkFormat.BC1RgbaUnormBlock);
+ Add(Format.Bc2Unorm, VkFormat.BC2UnormBlock);
+ Add(Format.Bc3Unorm, VkFormat.BC3UnormBlock);
+ Add(Format.Bc1RgbaSrgb, VkFormat.BC1RgbaSrgbBlock);
+ Add(Format.Bc2Srgb, VkFormat.BC2SrgbBlock);
+ Add(Format.Bc3Srgb, VkFormat.BC3SrgbBlock);
+ Add(Format.Bc4Unorm, VkFormat.BC4UnormBlock);
+ Add(Format.Bc4Snorm, VkFormat.BC4SNormBlock);
+ Add(Format.Bc5Unorm, VkFormat.BC5UnormBlock);
+ Add(Format.Bc5Snorm, VkFormat.BC5SNormBlock);
+ Add(Format.Bc7Unorm, VkFormat.BC7UnormBlock);
+ Add(Format.Bc7Srgb, VkFormat.BC7SrgbBlock);
+ Add(Format.Bc6HSfloat, VkFormat.BC6HSfloatBlock);
+ Add(Format.Bc6HUfloat, VkFormat.BC6HUfloatBlock);
+ Add(Format.Etc2RgbUnorm, VkFormat.Etc2R8G8B8UnormBlock);
+ Add(Format.Etc2RgbaUnorm, VkFormat.Etc2R8G8B8A8UnormBlock);
+ Add(Format.Etc2RgbPtaUnorm, VkFormat.Etc2R8G8B8A1UnormBlock);
+ Add(Format.Etc2RgbSrgb, VkFormat.Etc2R8G8B8SrgbBlock);
+ Add(Format.Etc2RgbaSrgb, VkFormat.Etc2R8G8B8A8SrgbBlock);
+ Add(Format.Etc2RgbPtaSrgb, VkFormat.Etc2R8G8B8A1SrgbBlock);
+ Add(Format.R8Uscaled, VkFormat.R8Uscaled);
+ Add(Format.R8Sscaled, VkFormat.R8Sscaled);
+ Add(Format.R16Uscaled, VkFormat.R16Uscaled);
+ Add(Format.R16Sscaled, VkFormat.R16Sscaled);
+ // Add(Format.R32Uscaled, VkFormat.R32Uscaled);
+ // Add(Format.R32Sscaled, VkFormat.R32Sscaled);
+ Add(Format.R8G8Uscaled, VkFormat.R8G8Uscaled);
+ Add(Format.R8G8Sscaled, VkFormat.R8G8Sscaled);
+ Add(Format.R16G16Uscaled, VkFormat.R16G16Uscaled);
+ Add(Format.R16G16Sscaled, VkFormat.R16G16Sscaled);
+ // Add(Format.R32G32Uscaled, VkFormat.R32G32Uscaled);
+ // Add(Format.R32G32Sscaled, VkFormat.R32G32Sscaled);
+ Add(Format.R8G8B8Uscaled, VkFormat.R8G8B8Uscaled);
+ Add(Format.R8G8B8Sscaled, VkFormat.R8G8B8Sscaled);
+ Add(Format.R16G16B16Uscaled, VkFormat.R16G16B16Uscaled);
+ Add(Format.R16G16B16Sscaled, VkFormat.R16G16B16Sscaled);
+ // Add(Format.R32G32B32Uscaled, VkFormat.R32G32B32Uscaled);
+ // Add(Format.R32G32B32Sscaled, VkFormat.R32G32B32Sscaled);
+ Add(Format.R8G8B8A8Uscaled, VkFormat.R8G8B8A8Uscaled);
+ Add(Format.R8G8B8A8Sscaled, VkFormat.R8G8B8A8Sscaled);
+ Add(Format.R16G16B16A16Uscaled, VkFormat.R16G16B16A16Uscaled);
+ Add(Format.R16G16B16A16Sscaled, VkFormat.R16G16B16A16Sscaled);
+ // Add(Format.R32G32B32A32Uscaled, VkFormat.R32G32B32A32Uscaled);
+ // Add(Format.R32G32B32A32Sscaled, VkFormat.R32G32B32A32Sscaled);
+ Add(Format.R10G10B10A2Snorm, VkFormat.A2B10G10R10SNormPack32);
+ Add(Format.R10G10B10A2Sint, VkFormat.A2B10G10R10SintPack32);
+ Add(Format.R10G10B10A2Uscaled, VkFormat.A2B10G10R10UscaledPack32);
+ Add(Format.R10G10B10A2Sscaled, VkFormat.A2B10G10R10SscaledPack32);
+ Add(Format.Astc4x4Unorm, VkFormat.Astc4x4UnormBlock);
+ Add(Format.Astc5x4Unorm, VkFormat.Astc5x4UnormBlock);
+ Add(Format.Astc5x5Unorm, VkFormat.Astc5x5UnormBlock);
+ Add(Format.Astc6x5Unorm, VkFormat.Astc6x5UnormBlock);
+ Add(Format.Astc6x6Unorm, VkFormat.Astc6x6UnormBlock);
+ Add(Format.Astc8x5Unorm, VkFormat.Astc8x5UnormBlock);
+ Add(Format.Astc8x6Unorm, VkFormat.Astc8x6UnormBlock);
+ Add(Format.Astc8x8Unorm, VkFormat.Astc8x8UnormBlock);
+ Add(Format.Astc10x5Unorm, VkFormat.Astc10x5UnormBlock);
+ Add(Format.Astc10x6Unorm, VkFormat.Astc10x6UnormBlock);
+ Add(Format.Astc10x8Unorm, VkFormat.Astc10x8UnormBlock);
+ Add(Format.Astc10x10Unorm, VkFormat.Astc10x10UnormBlock);
+ Add(Format.Astc12x10Unorm, VkFormat.Astc12x10UnormBlock);
+ Add(Format.Astc12x12Unorm, VkFormat.Astc12x12UnormBlock);
+ Add(Format.Astc4x4Srgb, VkFormat.Astc4x4SrgbBlock);
+ Add(Format.Astc5x4Srgb, VkFormat.Astc5x4SrgbBlock);
+ Add(Format.Astc5x5Srgb, VkFormat.Astc5x5SrgbBlock);
+ Add(Format.Astc6x5Srgb, VkFormat.Astc6x5SrgbBlock);
+ Add(Format.Astc6x6Srgb, VkFormat.Astc6x6SrgbBlock);
+ Add(Format.Astc8x5Srgb, VkFormat.Astc8x5SrgbBlock);
+ Add(Format.Astc8x6Srgb, VkFormat.Astc8x6SrgbBlock);
+ Add(Format.Astc8x8Srgb, VkFormat.Astc8x8SrgbBlock);
+ Add(Format.Astc10x5Srgb, VkFormat.Astc10x5SrgbBlock);
+ Add(Format.Astc10x6Srgb, VkFormat.Astc10x6SrgbBlock);
+ Add(Format.Astc10x8Srgb, VkFormat.Astc10x8SrgbBlock);
+ Add(Format.Astc10x10Srgb, VkFormat.Astc10x10SrgbBlock);
+ Add(Format.Astc12x10Srgb, VkFormat.Astc12x10SrgbBlock);
+ Add(Format.Astc12x12Srgb, VkFormat.Astc12x12SrgbBlock);
+ Add(Format.B5G6R5Unorm, VkFormat.R5G6B5UnormPack16);
+ Add(Format.B5G5R5A1Unorm, VkFormat.A1R5G5B5UnormPack16);
+ Add(Format.A1B5G5R5Unorm, VkFormat.R5G5B5A1UnormPack16);
+ Add(Format.B8G8R8A8Unorm, VkFormat.B8G8R8A8Unorm);
+ Add(Format.B8G8R8A8Srgb, VkFormat.B8G8R8A8Srgb);
+ }
+
+ private static void Add(Format format, VkFormat vkFormat)
+ {
+ _table[(int)format] = vkFormat;
+ }
+
+ public static VkFormat GetFormat(Format format)
+ {
+ return _table[(int)format];
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/FramebufferParams.cs b/src/Ryujinx.Graphics.Vulkan/FramebufferParams.cs
new file mode 100644
index 00000000..cde99202
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/FramebufferParams.cs
@@ -0,0 +1,240 @@
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using System;
+using System.Linq;
+using VkFormat = Silk.NET.Vulkan.Format;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class FramebufferParams
+ {
+ private readonly Device _device;
+ private readonly Auto<DisposableImageView>[] _attachments;
+ private readonly TextureView[] _colors;
+ private readonly TextureView _depthStencil;
+ private uint _validColorAttachments;
+
+ public uint Width { get; }
+ public uint Height { get; }
+ public uint Layers { get; }
+
+ public uint[] AttachmentSamples { get; }
+ public VkFormat[] AttachmentFormats { get; }
+ public int[] AttachmentIndices { get; }
+
+ public int AttachmentsCount { get; }
+ public int MaxColorAttachmentIndex => AttachmentIndices.Length > 0 ? AttachmentIndices[AttachmentIndices.Length - 1] : -1;
+ public bool HasDepthStencil { get; }
+ public int ColorAttachmentsCount => AttachmentsCount - (HasDepthStencil ? 1 : 0);
+
+ public FramebufferParams(
+ Device device,
+ Auto<DisposableImageView> view,
+ uint width,
+ uint height,
+ uint samples,
+ bool isDepthStencil,
+ VkFormat format)
+ {
+ _device = device;
+ _attachments = new[] { view };
+ _validColorAttachments = isDepthStencil ? 0u : 1u;
+
+ Width = width;
+ Height = height;
+ Layers = 1;
+
+ AttachmentSamples = new[] { samples };
+ AttachmentFormats = new[] { format };
+ AttachmentIndices = isDepthStencil ? Array.Empty<int>() : new[] { 0 };
+
+ AttachmentsCount = 1;
+
+ HasDepthStencil = isDepthStencil;
+ }
+
+ public FramebufferParams(Device device, ITexture[] colors, ITexture depthStencil)
+ {
+ _device = device;
+
+ int colorsCount = colors.Count(IsValidTextureView);
+
+ int count = colorsCount + (IsValidTextureView(depthStencil) ? 1 : 0);
+
+ _attachments = new Auto<DisposableImageView>[count];
+ _colors = new TextureView[colorsCount];
+
+ AttachmentSamples = new uint[count];
+ AttachmentFormats = new VkFormat[count];
+ AttachmentIndices = new int[colorsCount];
+
+ uint width = uint.MaxValue;
+ uint height = uint.MaxValue;
+ uint layers = uint.MaxValue;
+
+ int index = 0;
+ int bindIndex = 0;
+
+ foreach (ITexture color in colors)
+ {
+ if (IsValidTextureView(color))
+ {
+ var texture = (TextureView)color;
+
+ _attachments[index] = texture.GetImageViewForAttachment();
+ _colors[index] = texture;
+ _validColorAttachments |= 1u << bindIndex;
+
+ AttachmentSamples[index] = (uint)texture.Info.Samples;
+ AttachmentFormats[index] = texture.VkFormat;
+ AttachmentIndices[index] = bindIndex;
+
+ width = Math.Min(width, (uint)texture.Width);
+ height = Math.Min(height, (uint)texture.Height);
+ layers = Math.Min(layers, (uint)texture.Layers);
+
+ if (++index >= colorsCount)
+ {
+ break;
+ }
+ }
+
+ bindIndex++;
+ }
+
+ if (depthStencil is TextureView dsTexture && dsTexture.Valid)
+ {
+ _attachments[count - 1] = dsTexture.GetImageViewForAttachment();
+ _depthStencil = dsTexture;
+
+ AttachmentSamples[count - 1] = (uint)dsTexture.Info.Samples;
+ AttachmentFormats[count - 1] = dsTexture.VkFormat;
+
+ width = Math.Min(width, (uint)dsTexture.Width);
+ height = Math.Min(height, (uint)dsTexture.Height);
+ layers = Math.Min(layers, (uint)dsTexture.Layers);
+
+ HasDepthStencil = true;
+ }
+
+ if (count == 0)
+ {
+ width = height = layers = 1;
+ }
+
+ Width = width;
+ Height = height;
+ Layers = layers;
+
+ AttachmentsCount = count;
+ }
+
+ public Auto<DisposableImageView> GetAttachment(int index)
+ {
+ if ((uint)index >= _attachments.Length)
+ {
+ return null;
+ }
+
+ return _attachments[index];
+ }
+
+ public ComponentType GetAttachmentComponentType(int index)
+ {
+ if (_colors != null && (uint)index < _colors.Length)
+ {
+ var format = _colors[index].Info.Format;
+
+ if (format.IsSint())
+ {
+ return ComponentType.SignedInteger;
+ }
+ else if (format.IsUint())
+ {
+ return ComponentType.UnsignedInteger;
+ }
+ }
+
+ return ComponentType.Float;
+ }
+
+ public bool IsValidColorAttachment(int bindIndex)
+ {
+ return (uint)bindIndex < Constants.MaxRenderTargets && (_validColorAttachments & (1u << bindIndex)) != 0;
+ }
+
+ private static bool IsValidTextureView(ITexture texture)
+ {
+ return texture is TextureView view && view.Valid;
+ }
+
+ public ClearRect GetClearRect(Rectangle<int> scissor, int layer, int layerCount)
+ {
+ int x = scissor.X;
+ int y = scissor.Y;
+ int width = Math.Min((int)Width - scissor.X, scissor.Width);
+ int height = Math.Min((int)Height - scissor.Y, scissor.Height);
+
+ return new ClearRect(new Rect2D(new Offset2D(x, y), new Extent2D((uint)width, (uint)height)), (uint)layer, (uint)layerCount);
+ }
+
+ public unsafe Auto<DisposableFramebuffer> Create(Vk api, CommandBufferScoped cbs, Auto<DisposableRenderPass> renderPass)
+ {
+ ImageView* attachments = stackalloc ImageView[_attachments.Length];
+
+ for (int i = 0; i < _attachments.Length; i++)
+ {
+ attachments[i] = _attachments[i].Get(cbs).Value;
+ }
+
+ var framebufferCreateInfo = new FramebufferCreateInfo()
+ {
+ SType = StructureType.FramebufferCreateInfo,
+ RenderPass = renderPass.Get(cbs).Value,
+ AttachmentCount = (uint)_attachments.Length,
+ PAttachments = attachments,
+ Width = Width,
+ Height = Height,
+ Layers = Layers
+ };
+
+ api.CreateFramebuffer(_device, framebufferCreateInfo, null, out var framebuffer).ThrowOnError();
+ return new Auto<DisposableFramebuffer>(new DisposableFramebuffer(api, _device, framebuffer), null, _attachments);
+ }
+
+ public void UpdateModifications()
+ {
+ if (_colors != null)
+ {
+ for (int index = 0; index < _colors.Length; index++)
+ {
+ _colors[index].Storage.SetModification(
+ AccessFlags.ColorAttachmentWriteBit,
+ PipelineStageFlags.ColorAttachmentOutputBit);
+ }
+ }
+
+ _depthStencil?.Storage.SetModification(
+ AccessFlags.DepthStencilAttachmentWriteBit,
+ PipelineStageFlags.ColorAttachmentOutputBit);
+ }
+
+ public void InsertClearBarrier(CommandBufferScoped cbs, int index)
+ {
+ if (_colors != null)
+ {
+ int realIndex = Array.IndexOf(AttachmentIndices, index);
+
+ if (realIndex != -1)
+ {
+ _colors[realIndex].Storage?.InsertReadToWriteBarrier(cbs, AccessFlags.ColorAttachmentWriteBit, PipelineStageFlags.ColorAttachmentOutputBit);
+ }
+ }
+ }
+
+ public void InsertClearBarrierDS(CommandBufferScoped cbs)
+ {
+ _depthStencil?.Storage?.InsertReadToWriteBarrier(cbs, AccessFlags.DepthStencilAttachmentWriteBit, PipelineStageFlags.LateFragmentTestsBit);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/HardwareCapabilities.cs b/src/Ryujinx.Graphics.Vulkan/HardwareCapabilities.cs
new file mode 100644
index 00000000..ab82d7b4
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/HardwareCapabilities.cs
@@ -0,0 +1,120 @@
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ [Flags]
+ enum PortabilitySubsetFlags
+ {
+ None = 0,
+
+ NoTriangleFans = 1,
+ NoPointMode = 1 << 1,
+ No3DImageView = 1 << 2,
+ NoLodBias = 1 << 3
+ }
+
+ readonly struct HardwareCapabilities
+ {
+ public readonly bool SupportsIndexTypeUint8;
+ public readonly bool SupportsCustomBorderColor;
+ public readonly bool SupportsBlendEquationAdvanced;
+ public readonly bool SupportsBlendEquationAdvancedCorrelatedOverlap;
+ public readonly bool SupportsBlendEquationAdvancedNonPreMultipliedSrcColor;
+ public readonly bool SupportsBlendEquationAdvancedNonPreMultipliedDstColor;
+ public readonly bool SupportsIndirectParameters;
+ public readonly bool SupportsFragmentShaderInterlock;
+ public readonly bool SupportsGeometryShaderPassthrough;
+ public readonly bool SupportsSubgroupSizeControl;
+ public readonly bool SupportsShaderInt8;
+ public readonly bool SupportsShaderStencilExport;
+ public readonly bool SupportsConditionalRendering;
+ public readonly bool SupportsExtendedDynamicState;
+ public readonly bool SupportsMultiView;
+ public readonly bool SupportsNullDescriptors;
+ public readonly bool SupportsPushDescriptors;
+ public readonly bool SupportsPrimitiveTopologyListRestart;
+ public readonly bool SupportsPrimitiveTopologyPatchListRestart;
+ public readonly bool SupportsTransformFeedback;
+ public readonly bool SupportsTransformFeedbackQueries;
+ public readonly bool SupportsPreciseOcclusionQueries;
+ public readonly bool SupportsPipelineStatisticsQuery;
+ public readonly bool SupportsGeometryShader;
+ public readonly bool SupportsViewportArray2;
+ public readonly uint MinSubgroupSize;
+ public readonly uint MaxSubgroupSize;
+ public readonly ShaderStageFlags RequiredSubgroupSizeStages;
+ public readonly SampleCountFlags SupportedSampleCounts;
+ public readonly PortabilitySubsetFlags PortabilitySubset;
+ public readonly uint VertexBufferAlignment;
+ public readonly uint SubTexelPrecisionBits;
+
+ public HardwareCapabilities(
+ bool supportsIndexTypeUint8,
+ bool supportsCustomBorderColor,
+ bool supportsBlendEquationAdvanced,
+ bool supportsBlendEquationAdvancedCorrelatedOverlap,
+ bool supportsBlendEquationAdvancedNonPreMultipliedSrcColor,
+ bool supportsBlendEquationAdvancedNonPreMultipliedDstColor,
+ bool supportsIndirectParameters,
+ bool supportsFragmentShaderInterlock,
+ bool supportsGeometryShaderPassthrough,
+ bool supportsSubgroupSizeControl,
+ bool supportsShaderInt8,
+ bool supportsShaderStencilExport,
+ bool supportsConditionalRendering,
+ bool supportsExtendedDynamicState,
+ bool supportsMultiView,
+ bool supportsNullDescriptors,
+ bool supportsPushDescriptors,
+ bool supportsPrimitiveTopologyListRestart,
+ bool supportsPrimitiveTopologyPatchListRestart,
+ bool supportsTransformFeedback,
+ bool supportsTransformFeedbackQueries,
+ bool supportsPreciseOcclusionQueries,
+ bool supportsPipelineStatisticsQuery,
+ bool supportsGeometryShader,
+ bool supportsViewportArray2,
+ uint minSubgroupSize,
+ uint maxSubgroupSize,
+ ShaderStageFlags requiredSubgroupSizeStages,
+ SampleCountFlags supportedSampleCounts,
+ PortabilitySubsetFlags portabilitySubset,
+ uint vertexBufferAlignment,
+ uint subTexelPrecisionBits)
+ {
+ SupportsIndexTypeUint8 = supportsIndexTypeUint8;
+ SupportsCustomBorderColor = supportsCustomBorderColor;
+ SupportsBlendEquationAdvanced = supportsBlendEquationAdvanced;
+ SupportsBlendEquationAdvancedCorrelatedOverlap = supportsBlendEquationAdvancedCorrelatedOverlap;
+ SupportsBlendEquationAdvancedNonPreMultipliedSrcColor = supportsBlendEquationAdvancedNonPreMultipliedSrcColor;
+ SupportsBlendEquationAdvancedNonPreMultipliedDstColor = supportsBlendEquationAdvancedNonPreMultipliedDstColor;
+ SupportsIndirectParameters = supportsIndirectParameters;
+ SupportsFragmentShaderInterlock = supportsFragmentShaderInterlock;
+ SupportsGeometryShaderPassthrough = supportsGeometryShaderPassthrough;
+ SupportsSubgroupSizeControl = supportsSubgroupSizeControl;
+ SupportsShaderInt8 = supportsShaderInt8;
+ SupportsShaderStencilExport = supportsShaderStencilExport;
+ SupportsConditionalRendering = supportsConditionalRendering;
+ SupportsExtendedDynamicState = supportsExtendedDynamicState;
+ SupportsMultiView = supportsMultiView;
+ SupportsNullDescriptors = supportsNullDescriptors;
+ SupportsPushDescriptors = supportsPushDescriptors;
+ SupportsPrimitiveTopologyListRestart = supportsPrimitiveTopologyListRestart;
+ SupportsPrimitiveTopologyPatchListRestart = supportsPrimitiveTopologyPatchListRestart;
+ SupportsTransformFeedback = supportsTransformFeedback;
+ SupportsTransformFeedbackQueries = supportsTransformFeedbackQueries;
+ SupportsPreciseOcclusionQueries = supportsPreciseOcclusionQueries;
+ SupportsPipelineStatisticsQuery = supportsPipelineStatisticsQuery;
+ SupportsGeometryShader = supportsGeometryShader;
+ SupportsViewportArray2 = supportsViewportArray2;
+ MinSubgroupSize = minSubgroupSize;
+ MaxSubgroupSize = maxSubgroupSize;
+ RequiredSubgroupSizeStages = requiredSubgroupSizeStages;
+ SupportedSampleCounts = supportedSampleCounts;
+ PortabilitySubset = portabilitySubset;
+ VertexBufferAlignment = vertexBufferAlignment;
+ SubTexelPrecisionBits = subTexelPrecisionBits;
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/HashTableSlim.cs b/src/Ryujinx.Graphics.Vulkan/HashTableSlim.cs
new file mode 100644
index 00000000..e4ad3958
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/HashTableSlim.cs
@@ -0,0 +1,112 @@
+using System;
+using System.Collections.Generic;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ interface IRefEquatable<T>
+ {
+ bool Equals(ref T other);
+ }
+
+ class HashTableSlim<K, V> where K : IRefEquatable<K>
+ {
+ private const int TotalBuckets = 16; // Must be power of 2
+ private const int TotalBucketsMask = TotalBuckets - 1;
+
+ private struct Entry
+ {
+ public int Hash;
+ public K Key;
+ public V Value;
+ }
+
+ private readonly Entry[][] _hashTable = new Entry[TotalBuckets][];
+
+ public IEnumerable<K> Keys
+ {
+ get
+ {
+ foreach (Entry[] bucket in _hashTable)
+ {
+ if (bucket != null)
+ {
+ foreach (Entry entry in bucket)
+ {
+ yield return entry.Key;
+ }
+ }
+ }
+ }
+ }
+
+ public IEnumerable<V> Values
+ {
+ get
+ {
+ foreach (Entry[] bucket in _hashTable)
+ {
+ if (bucket != null)
+ {
+ foreach (Entry entry in bucket)
+ {
+ yield return entry.Value;
+ }
+ }
+ }
+ }
+ }
+
+ public void Add(ref K key, V value)
+ {
+ var entry = new Entry()
+ {
+ Hash = key.GetHashCode(),
+ Key = key,
+ Value = value
+ };
+
+ int hashCode = key.GetHashCode();
+ int bucketIndex = hashCode & TotalBucketsMask;
+
+ var bucket = _hashTable[bucketIndex];
+ if (bucket != null)
+ {
+ int index = bucket.Length;
+
+ Array.Resize(ref _hashTable[bucketIndex], index + 1);
+
+ _hashTable[bucketIndex][index] = entry;
+ }
+ else
+ {
+ _hashTable[bucketIndex] = new Entry[]
+ {
+ entry
+ };
+ }
+ }
+
+ public bool TryGetValue(ref K key, out V value)
+ {
+ int hashCode = key.GetHashCode();
+
+ var bucket = _hashTable[hashCode & TotalBucketsMask];
+ if (bucket != null)
+ {
+ for (int i = 0; i < bucket.Length; i++)
+ {
+ ref var entry = ref bucket[i];
+
+ if (entry.Hash == hashCode && entry.Key.Equals(ref key))
+ {
+ value = entry.Value;
+ return true;
+ }
+ }
+ }
+
+ value = default;
+ return false;
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/HelperShader.cs b/src/Ryujinx.Graphics.Vulkan/HelperShader.cs
new file mode 100644
index 00000000..c57edaf7
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/HelperShader.cs
@@ -0,0 +1,1683 @@
+using Ryujinx.Graphics.GAL;
+using Ryujinx.Graphics.Shader;
+using Ryujinx.Graphics.Shader.Translation;
+using Ryujinx.Graphics.Vulkan.Shaders;
+using Silk.NET.Vulkan;
+using System;
+using System.Collections.Generic;
+using System.Numerics;
+using VkFormat = Silk.NET.Vulkan.Format;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ enum ComponentType
+ {
+ Float,
+ SignedInteger,
+ UnsignedInteger
+ }
+
+ class HelperShader : IDisposable
+ {
+ private const int UniformBufferAlignment = 256;
+
+ private readonly PipelineHelperShader _pipeline;
+ private readonly ISampler _samplerLinear;
+ private readonly ISampler _samplerNearest;
+ private readonly IProgram _programColorBlit;
+ private readonly IProgram _programColorBlitMs;
+ private readonly IProgram _programColorBlitClearAlpha;
+ private readonly IProgram _programColorClearF;
+ private readonly IProgram _programColorClearSI;
+ private readonly IProgram _programColorClearUI;
+ private readonly IProgram _programStrideChange;
+ private readonly IProgram _programConvertIndexBuffer;
+ private readonly IProgram _programConvertIndirectData;
+ private readonly IProgram _programColorCopyShortening;
+ private readonly IProgram _programColorCopyToNonMs;
+ private readonly IProgram _programColorCopyWidening;
+ private readonly IProgram _programColorDrawToMs;
+ private readonly IProgram _programDepthBlit;
+ private readonly IProgram _programDepthBlitMs;
+ private readonly IProgram _programDepthDrawToMs;
+ private readonly IProgram _programDepthDrawToNonMs;
+ private readonly IProgram _programStencilBlit;
+ private readonly IProgram _programStencilBlitMs;
+ private readonly IProgram _programStencilDrawToMs;
+ private readonly IProgram _programStencilDrawToNonMs;
+
+ public HelperShader(VulkanRenderer gd, Device device)
+ {
+ _pipeline = new PipelineHelperShader(gd, device);
+ _pipeline.Initialize();
+
+ _samplerLinear = gd.CreateSampler(GAL.SamplerCreateInfo.Create(MinFilter.Linear, MagFilter.Linear));
+ _samplerNearest = gd.CreateSampler(GAL.SamplerCreateInfo.Create(MinFilter.Nearest, MagFilter.Nearest));
+
+ var blitVertexBindings = new ShaderBindings(
+ new[] { 1 },
+ Array.Empty<int>(),
+ Array.Empty<int>(),
+ Array.Empty<int>());
+
+ var blitFragmentBindings = new ShaderBindings(
+ Array.Empty<int>(),
+ Array.Empty<int>(),
+ new[] { 0 },
+ Array.Empty<int>());
+
+ _programColorBlit = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorBlitVertexShaderSource, blitVertexBindings, ShaderStage.Vertex, TargetLanguage.Spirv),
+ new ShaderSource(ShaderBinaries.ColorBlitFragmentShaderSource, blitFragmentBindings, ShaderStage.Fragment, TargetLanguage.Spirv),
+ });
+
+ _programColorBlitMs = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorBlitVertexShaderSource, blitVertexBindings, ShaderStage.Vertex, TargetLanguage.Spirv),
+ new ShaderSource(ShaderBinaries.ColorBlitMsFragmentShaderSource, blitFragmentBindings, ShaderStage.Fragment, TargetLanguage.Spirv),
+ });
+
+ _programColorBlitClearAlpha = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorBlitVertexShaderSource, blitVertexBindings, ShaderStage.Vertex, TargetLanguage.Spirv),
+ new ShaderSource(ShaderBinaries.ColorBlitClearAlphaFragmentShaderSource, blitFragmentBindings, ShaderStage.Fragment, TargetLanguage.Spirv),
+ });
+
+ var colorClearFragmentBindings = new ShaderBindings(
+ Array.Empty<int>(),
+ Array.Empty<int>(),
+ Array.Empty<int>(),
+ Array.Empty<int>());
+
+ _programColorClearF = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorClearVertexShaderSource, blitVertexBindings, ShaderStage.Vertex, TargetLanguage.Spirv),
+ new ShaderSource(ShaderBinaries.ColorClearFFragmentShaderSource, colorClearFragmentBindings, ShaderStage.Fragment, TargetLanguage.Spirv),
+ });
+
+ _programColorClearSI = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorClearVertexShaderSource, blitVertexBindings, ShaderStage.Vertex, TargetLanguage.Spirv),
+ new ShaderSource(ShaderBinaries.ColorClearSIFragmentShaderSource, colorClearFragmentBindings, ShaderStage.Fragment, TargetLanguage.Spirv),
+ });
+
+ _programColorClearUI = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorClearVertexShaderSource, blitVertexBindings, ShaderStage.Vertex, TargetLanguage.Spirv),
+ new ShaderSource(ShaderBinaries.ColorClearUIFragmentShaderSource, colorClearFragmentBindings, ShaderStage.Fragment, TargetLanguage.Spirv),
+ });
+
+ var strideChangeBindings = new ShaderBindings(
+ new[] { 0 },
+ new[] { 1, 2 },
+ Array.Empty<int>(),
+ Array.Empty<int>());
+
+ _programStrideChange = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ChangeBufferStrideShaderSource, strideChangeBindings, ShaderStage.Compute, TargetLanguage.Spirv),
+ });
+
+ var colorCopyBindings = new ShaderBindings(
+ new[] { 0 },
+ Array.Empty<int>(),
+ new[] { 0 },
+ new[] { 0 });
+
+ _programColorCopyShortening = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorCopyShorteningComputeShaderSource, colorCopyBindings, ShaderStage.Compute, TargetLanguage.Spirv),
+ });
+
+ _programColorCopyToNonMs = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorCopyToNonMsComputeShaderSource, colorCopyBindings, ShaderStage.Compute, TargetLanguage.Spirv),
+ });
+
+ _programColorCopyWidening = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorCopyWideningComputeShaderSource, colorCopyBindings, ShaderStage.Compute, TargetLanguage.Spirv),
+ });
+
+ var colorDrawToMsVertexBindings = new ShaderBindings(
+ Array.Empty<int>(),
+ Array.Empty<int>(),
+ Array.Empty<int>(),
+ Array.Empty<int>());
+
+ var colorDrawToMsFragmentBindings = new ShaderBindings(
+ new[] { 0 },
+ Array.Empty<int>(),
+ new[] { 0 },
+ Array.Empty<int>());
+
+ _programColorDrawToMs = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorDrawToMsVertexShaderSource, colorDrawToMsVertexBindings, ShaderStage.Vertex, TargetLanguage.Spirv),
+ new ShaderSource(ShaderBinaries.ColorDrawToMsFragmentShaderSource, colorDrawToMsFragmentBindings, ShaderStage.Fragment, TargetLanguage.Spirv),
+ });
+
+ var convertIndexBufferBindings = new ShaderBindings(
+ new[] { 0 },
+ new[] { 1, 2 },
+ Array.Empty<int>(),
+ Array.Empty<int>());
+
+ _programConvertIndexBuffer = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ConvertIndexBufferShaderSource, convertIndexBufferBindings, ShaderStage.Compute, TargetLanguage.Spirv),
+ });
+
+ var convertIndirectDataBindings = new ShaderBindings(
+ new[] { 0 },
+ new[] { 1, 2, 3 },
+ Array.Empty<int>(),
+ Array.Empty<int>());
+
+ _programConvertIndirectData = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ConvertIndirectDataShaderSource, convertIndirectDataBindings, ShaderStage.Compute, TargetLanguage.Spirv),
+ });
+
+ _programDepthBlit = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorBlitVertexShaderSource, blitVertexBindings, ShaderStage.Vertex, TargetLanguage.Spirv),
+ new ShaderSource(ShaderBinaries.DepthBlitFragmentShaderSource, blitFragmentBindings, ShaderStage.Fragment, TargetLanguage.Spirv),
+ });
+
+ _programDepthBlitMs = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorBlitVertexShaderSource, blitVertexBindings, ShaderStage.Vertex, TargetLanguage.Spirv),
+ new ShaderSource(ShaderBinaries.DepthBlitMsFragmentShaderSource, blitFragmentBindings, ShaderStage.Fragment, TargetLanguage.Spirv),
+ });
+
+ _programDepthDrawToMs = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorDrawToMsVertexShaderSource, colorDrawToMsVertexBindings, ShaderStage.Vertex, TargetLanguage.Spirv),
+ new ShaderSource(ShaderBinaries.DepthDrawToMsFragmentShaderSource, colorDrawToMsFragmentBindings, ShaderStage.Fragment, TargetLanguage.Spirv),
+ });
+
+ _programDepthDrawToNonMs = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorDrawToMsVertexShaderSource, colorDrawToMsVertexBindings, ShaderStage.Vertex, TargetLanguage.Spirv),
+ new ShaderSource(ShaderBinaries.DepthDrawToNonMsFragmentShaderSource, colorDrawToMsFragmentBindings, ShaderStage.Fragment, TargetLanguage.Spirv),
+ });
+
+ if (gd.Capabilities.SupportsShaderStencilExport)
+ {
+ _programStencilBlit = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorBlitVertexShaderSource, blitVertexBindings, ShaderStage.Vertex, TargetLanguage.Spirv),
+ new ShaderSource(ShaderBinaries.StencilBlitFragmentShaderSource, blitFragmentBindings, ShaderStage.Fragment, TargetLanguage.Spirv),
+ });
+
+ _programStencilBlitMs = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorBlitVertexShaderSource, blitVertexBindings, ShaderStage.Vertex, TargetLanguage.Spirv),
+ new ShaderSource(ShaderBinaries.StencilBlitMsFragmentShaderSource, blitFragmentBindings, ShaderStage.Fragment, TargetLanguage.Spirv),
+ });
+
+ _programStencilDrawToMs = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorDrawToMsVertexShaderSource, colorDrawToMsVertexBindings, ShaderStage.Vertex, TargetLanguage.Spirv),
+ new ShaderSource(ShaderBinaries.StencilDrawToMsFragmentShaderSource, colorDrawToMsFragmentBindings, ShaderStage.Fragment, TargetLanguage.Spirv),
+ });
+
+ _programStencilDrawToNonMs = gd.CreateProgramWithMinimalLayout(new[]
+ {
+ new ShaderSource(ShaderBinaries.ColorDrawToMsVertexShaderSource, colorDrawToMsVertexBindings, ShaderStage.Vertex, TargetLanguage.Spirv),
+ new ShaderSource(ShaderBinaries.StencilDrawToNonMsFragmentShaderSource, colorDrawToMsFragmentBindings, ShaderStage.Fragment, TargetLanguage.Spirv),
+ });
+ }
+ }
+
+ public void Blit(
+ VulkanRenderer gd,
+ TextureView src,
+ TextureView dst,
+ Extents2D srcRegion,
+ Extents2D dstRegion,
+ int layers,
+ int levels,
+ bool isDepthOrStencil,
+ bool linearFilter,
+ bool clearAlpha = false)
+ {
+ gd.FlushAllCommands();
+
+ using var cbs = gd.CommandBufferPool.Rent();
+
+ var dstFormat = dst.VkFormat;
+ var dstSamples = dst.Info.Samples;
+
+ for (int l = 0; l < levels; l++)
+ {
+ int srcWidth = Math.Max(1, src.Width >> l);
+ int srcHeight = Math.Max(1, src.Height >> l);
+
+ int dstWidth = Math.Max(1, dst.Width >> l);
+ int dstHeight = Math.Max(1, dst.Height >> l);
+
+ var mipSrcRegion = new Extents2D(
+ srcRegion.X1 >> l,
+ srcRegion.Y1 >> l,
+ srcRegion.X2 >> l,
+ srcRegion.Y2 >> l);
+
+ var mipDstRegion = new Extents2D(
+ dstRegion.X1 >> l,
+ dstRegion.Y1 >> l,
+ dstRegion.X2 >> l,
+ dstRegion.Y2 >> l);
+
+ for (int z = 0; z < layers; z++)
+ {
+ var srcView = Create2DLayerView(src, z, l);
+ var dstView = Create2DLayerView(dst, z, l);
+
+ if (isDepthOrStencil)
+ {
+ BlitDepthStencil(
+ gd,
+ cbs,
+ srcView,
+ dst.GetImageViewForAttachment(),
+ dstWidth,
+ dstHeight,
+ dstSamples,
+ dstFormat,
+ mipSrcRegion,
+ mipDstRegion);
+ }
+ else
+ {
+ BlitColor(
+ gd,
+ cbs,
+ srcView,
+ dst.GetImageViewForAttachment(),
+ dstWidth,
+ dstHeight,
+ dstSamples,
+ dstFormat,
+ false,
+ mipSrcRegion,
+ mipDstRegion,
+ linearFilter,
+ clearAlpha);
+ }
+
+ if (srcView != src)
+ {
+ srcView.Release();
+ }
+
+ if (dstView != dst)
+ {
+ dstView.Release();
+ }
+ }
+ }
+ }
+
+ public void CopyColor(
+ VulkanRenderer gd,
+ CommandBufferScoped cbs,
+ TextureView src,
+ TextureView dst,
+ int srcLayer,
+ int dstLayer,
+ int srcLevel,
+ int dstLevel,
+ int depth,
+ int levels)
+ {
+ for (int l = 0; l < levels; l++)
+ {
+ int mipSrcLevel = srcLevel + l;
+ int mipDstLevel = dstLevel + l;
+
+ int srcWidth = Math.Max(1, src.Width >> mipSrcLevel);
+ int srcHeight = Math.Max(1, src.Height >> mipSrcLevel);
+
+ int dstWidth = Math.Max(1, dst.Width >> mipDstLevel);
+ int dstHeight = Math.Max(1, dst.Height >> mipDstLevel);
+
+ var extents = new Extents2D(
+ 0,
+ 0,
+ Math.Min(srcWidth, dstWidth),
+ Math.Min(srcHeight, dstHeight));
+
+ for (int z = 0; z < depth; z++)
+ {
+ var srcView = Create2DLayerView(src, srcLayer + z, mipSrcLevel);
+ var dstView = Create2DLayerView(dst, dstLayer + z, mipDstLevel);
+
+ BlitColor(
+ gd,
+ cbs,
+ srcView,
+ dstView.GetImageViewForAttachment(),
+ dstView.Width,
+ dstView.Height,
+ dstView.Info.Samples,
+ dstView.VkFormat,
+ dstView.Info.Format.IsDepthOrStencil(),
+ extents,
+ extents,
+ false);
+
+ if (srcView != src)
+ {
+ srcView.Release();
+ }
+
+ if (dstView != dst)
+ {
+ dstView.Release();
+ }
+ }
+ }
+ }
+
+ public void BlitColor(
+ VulkanRenderer gd,
+ CommandBufferScoped cbs,
+ TextureView src,
+ Auto<DisposableImageView> dst,
+ int dstWidth,
+ int dstHeight,
+ int dstSamples,
+ VkFormat dstFormat,
+ bool dstIsDepthOrStencil,
+ Extents2D srcRegion,
+ Extents2D dstRegion,
+ bool linearFilter,
+ bool clearAlpha = false)
+ {
+ _pipeline.SetCommandBuffer(cbs);
+
+ const int RegionBufferSize = 16;
+
+ var sampler = linearFilter ? _samplerLinear : _samplerNearest;
+
+ _pipeline.SetTextureAndSampler(ShaderStage.Fragment, 0, src, sampler);
+
+ Span<float> region = stackalloc float[RegionBufferSize / sizeof(float)];
+
+ region[0] = (float)srcRegion.X1 / src.Width;
+ region[1] = (float)srcRegion.X2 / src.Width;
+ region[2] = (float)srcRegion.Y1 / src.Height;
+ region[3] = (float)srcRegion.Y2 / src.Height;
+
+ if (dstRegion.X1 > dstRegion.X2)
+ {
+ (region[0], region[1]) = (region[1], region[0]);
+ }
+
+ if (dstRegion.Y1 > dstRegion.Y2)
+ {
+ (region[2], region[3]) = (region[3], region[2]);
+ }
+
+ var bufferHandle = gd.BufferManager.CreateWithHandle(gd, RegionBufferSize);
+
+ gd.BufferManager.SetData<float>(bufferHandle, 0, region);
+
+ _pipeline.SetUniformBuffers(stackalloc[] { new BufferAssignment(1, new BufferRange(bufferHandle, 0, RegionBufferSize)) });
+
+ Span<GAL.Viewport> viewports = stackalloc GAL.Viewport[1];
+
+ var rect = new Rectangle<float>(
+ MathF.Min(dstRegion.X1, dstRegion.X2),
+ MathF.Min(dstRegion.Y1, dstRegion.Y2),
+ MathF.Abs(dstRegion.X2 - dstRegion.X1),
+ MathF.Abs(dstRegion.Y2 - dstRegion.Y1));
+
+ viewports[0] = new GAL.Viewport(
+ rect,
+ ViewportSwizzle.PositiveX,
+ ViewportSwizzle.PositiveY,
+ ViewportSwizzle.PositiveZ,
+ ViewportSwizzle.PositiveW,
+ 0f,
+ 1f);
+
+ Span<Rectangle<int>> scissors = stackalloc Rectangle<int>[1];
+
+ scissors[0] = new Rectangle<int>(0, 0, dstWidth, dstHeight);
+
+ if (dstIsDepthOrStencil)
+ {
+ _pipeline.SetProgram(src.Info.Target.IsMultisample() ? _programDepthBlitMs : _programDepthBlit);
+ _pipeline.SetDepthTest(new DepthTestDescriptor(true, true, GAL.CompareOp.Always));
+ }
+ else if (src.Info.Target.IsMultisample())
+ {
+ _pipeline.SetProgram(_programColorBlitMs);
+ }
+ else if (clearAlpha)
+ {
+ _pipeline.SetProgram(_programColorBlitClearAlpha);
+ }
+ else
+ {
+ _pipeline.SetProgram(_programColorBlit);
+ }
+
+ _pipeline.SetRenderTarget(dst, (uint)dstWidth, (uint)dstHeight, (uint)dstSamples, dstIsDepthOrStencil, dstFormat);
+ _pipeline.SetRenderTargetColorMasks(new uint[] { 0xf });
+ _pipeline.SetScissors(scissors);
+
+ if (clearAlpha)
+ {
+ _pipeline.ClearRenderTargetColor(0, 0, 1, new ColorF(0f, 0f, 0f, 1f));
+ }
+
+ _pipeline.SetViewports(viewports, false);
+ _pipeline.SetPrimitiveTopology(GAL.PrimitiveTopology.TriangleStrip);
+ _pipeline.Draw(4, 1, 0, 0);
+
+ if (dstIsDepthOrStencil)
+ {
+ _pipeline.SetDepthTest(new DepthTestDescriptor(false, false, GAL.CompareOp.Always));
+ }
+
+ _pipeline.Finish(gd, cbs);
+
+ gd.BufferManager.Delete(bufferHandle);
+ }
+
+ private void BlitDepthStencil(
+ VulkanRenderer gd,
+ CommandBufferScoped cbs,
+ TextureView src,
+ Auto<DisposableImageView> dst,
+ int dstWidth,
+ int dstHeight,
+ int dstSamples,
+ VkFormat dstFormat,
+ Extents2D srcRegion,
+ Extents2D dstRegion)
+ {
+ _pipeline.SetCommandBuffer(cbs);
+
+ const int RegionBufferSize = 16;
+
+ Span<float> region = stackalloc float[RegionBufferSize / sizeof(float)];
+
+ region[0] = (float)srcRegion.X1 / src.Width;
+ region[1] = (float)srcRegion.X2 / src.Width;
+ region[2] = (float)srcRegion.Y1 / src.Height;
+ region[3] = (float)srcRegion.Y2 / src.Height;
+
+ if (dstRegion.X1 > dstRegion.X2)
+ {
+ (region[0], region[1]) = (region[1], region[0]);
+ }
+
+ if (dstRegion.Y1 > dstRegion.Y2)
+ {
+ (region[2], region[3]) = (region[3], region[2]);
+ }
+
+ var bufferHandle = gd.BufferManager.CreateWithHandle(gd, RegionBufferSize);
+
+ gd.BufferManager.SetData<float>(bufferHandle, 0, region);
+
+ _pipeline.SetUniformBuffers(stackalloc[] { new BufferAssignment(1, new BufferRange(bufferHandle, 0, RegionBufferSize)) });
+
+ Span<GAL.Viewport> viewports = stackalloc GAL.Viewport[1];
+
+ var rect = new Rectangle<float>(
+ MathF.Min(dstRegion.X1, dstRegion.X2),
+ MathF.Min(dstRegion.Y1, dstRegion.Y2),
+ MathF.Abs(dstRegion.X2 - dstRegion.X1),
+ MathF.Abs(dstRegion.Y2 - dstRegion.Y1));
+
+ viewports[0] = new GAL.Viewport(
+ rect,
+ ViewportSwizzle.PositiveX,
+ ViewportSwizzle.PositiveY,
+ ViewportSwizzle.PositiveZ,
+ ViewportSwizzle.PositiveW,
+ 0f,
+ 1f);
+
+ Span<Rectangle<int>> scissors = stackalloc Rectangle<int>[1];
+
+ scissors[0] = new Rectangle<int>(0, 0, dstWidth, dstHeight);
+
+ _pipeline.SetRenderTarget(dst, (uint)dstWidth, (uint)dstHeight, (uint)dstSamples, true, dstFormat);
+ _pipeline.SetScissors(scissors);
+ _pipeline.SetViewports(viewports, false);
+ _pipeline.SetPrimitiveTopology(GAL.PrimitiveTopology.TriangleStrip);
+
+ var aspectFlags = src.Info.Format.ConvertAspectFlags();
+
+ if (aspectFlags.HasFlag(ImageAspectFlags.DepthBit))
+ {
+ var depthTexture = CreateDepthOrStencilView(src, DepthStencilMode.Depth);
+
+ BlitDepthStencilDraw(depthTexture, isDepth: true);
+
+ if (depthTexture != src)
+ {
+ depthTexture.Release();
+ }
+ }
+
+ if (aspectFlags.HasFlag(ImageAspectFlags.StencilBit) && _programStencilBlit != null)
+ {
+ var stencilTexture = CreateDepthOrStencilView(src, DepthStencilMode.Stencil);
+
+ BlitDepthStencilDraw(stencilTexture, isDepth: false);
+
+ if (stencilTexture != src)
+ {
+ stencilTexture.Release();
+ }
+ }
+
+ _pipeline.Finish(gd, cbs);
+
+ gd.BufferManager.Delete(bufferHandle);
+ }
+
+ private static TextureView CreateDepthOrStencilView(TextureView depthStencilTexture, DepthStencilMode depthStencilMode)
+ {
+ if (depthStencilTexture.Info.DepthStencilMode == depthStencilMode)
+ {
+ return depthStencilTexture;
+ }
+
+ return (TextureView)depthStencilTexture.CreateView(new TextureCreateInfo(
+ depthStencilTexture.Info.Width,
+ depthStencilTexture.Info.Height,
+ depthStencilTexture.Info.Depth,
+ depthStencilTexture.Info.Levels,
+ depthStencilTexture.Info.Samples,
+ depthStencilTexture.Info.BlockWidth,
+ depthStencilTexture.Info.BlockHeight,
+ depthStencilTexture.Info.BytesPerPixel,
+ depthStencilTexture.Info.Format,
+ depthStencilMode,
+ depthStencilTexture.Info.Target,
+ SwizzleComponent.Red,
+ SwizzleComponent.Green,
+ SwizzleComponent.Blue,
+ SwizzleComponent.Alpha), 0, 0);
+ }
+
+ private void BlitDepthStencilDraw(TextureView src, bool isDepth)
+ {
+ _pipeline.SetTextureAndSampler(ShaderStage.Fragment, 0, src, _samplerNearest);
+
+ if (isDepth)
+ {
+ _pipeline.SetProgram(src.Info.Target.IsMultisample() ? _programDepthBlitMs : _programDepthBlit);
+ _pipeline.SetDepthTest(new DepthTestDescriptor(true, true, GAL.CompareOp.Always));
+ }
+ else
+ {
+ _pipeline.SetProgram(src.Info.Target.IsMultisample() ? _programStencilBlitMs : _programStencilBlit);
+ _pipeline.SetStencilTest(CreateStencilTestDescriptor(true));
+ }
+
+ _pipeline.Draw(4, 1, 0, 0);
+
+ if (isDepth)
+ {
+ _pipeline.SetDepthTest(new DepthTestDescriptor(false, false, GAL.CompareOp.Always));
+ }
+ else
+ {
+ _pipeline.SetStencilTest(CreateStencilTestDescriptor(false));
+ }
+ }
+
+ private static StencilTestDescriptor CreateStencilTestDescriptor(bool enabled)
+ {
+ return new StencilTestDescriptor(
+ enabled,
+ GAL.CompareOp.Always,
+ GAL.StencilOp.Replace,
+ GAL.StencilOp.Replace,
+ GAL.StencilOp.Replace,
+ 0,
+ 0xff,
+ 0xff,
+ GAL.CompareOp.Always,
+ GAL.StencilOp.Replace,
+ GAL.StencilOp.Replace,
+ GAL.StencilOp.Replace,
+ 0,
+ 0xff,
+ 0xff);
+ }
+
+ public void Clear(
+ VulkanRenderer gd,
+ Auto<DisposableImageView> dst,
+ ReadOnlySpan<float> clearColor,
+ uint componentMask,
+ int dstWidth,
+ int dstHeight,
+ VkFormat dstFormat,
+ ComponentType type,
+ Rectangle<int> scissor)
+ {
+ const int ClearColorBufferSize = 16;
+
+ gd.FlushAllCommands();
+
+ using var cbs = gd.CommandBufferPool.Rent();
+
+ _pipeline.SetCommandBuffer(cbs);
+
+ var bufferHandle = gd.BufferManager.CreateWithHandle(gd, ClearColorBufferSize);
+
+ gd.BufferManager.SetData<float>(bufferHandle, 0, clearColor);
+
+ _pipeline.SetUniformBuffers(stackalloc[] { new BufferAssignment(1, new BufferRange(bufferHandle, 0, ClearColorBufferSize)) });
+
+ Span<GAL.Viewport> viewports = stackalloc GAL.Viewport[1];
+
+ viewports[0] = new GAL.Viewport(
+ new Rectangle<float>(0, 0, dstWidth, dstHeight),
+ ViewportSwizzle.PositiveX,
+ ViewportSwizzle.PositiveY,
+ ViewportSwizzle.PositiveZ,
+ ViewportSwizzle.PositiveW,
+ 0f,
+ 1f);
+
+ Span<Rectangle<int>> scissors = stackalloc Rectangle<int>[1];
+
+ scissors[0] = scissor;
+
+ IProgram program;
+
+ if (type == ComponentType.SignedInteger)
+ {
+ program = _programColorClearSI;
+ }
+ else if (type == ComponentType.UnsignedInteger)
+ {
+ program = _programColorClearUI;
+ }
+ else
+ {
+ program = _programColorClearF;
+ }
+
+ _pipeline.SetProgram(program);
+ _pipeline.SetRenderTarget(dst, (uint)dstWidth, (uint)dstHeight, false, dstFormat);
+ _pipeline.SetRenderTargetColorMasks(new uint[] { componentMask });
+ _pipeline.SetViewports(viewports, false);
+ _pipeline.SetScissors(scissors);
+ _pipeline.SetPrimitiveTopology(GAL.PrimitiveTopology.TriangleStrip);
+ _pipeline.Draw(4, 1, 0, 0);
+ _pipeline.Finish();
+
+ gd.BufferManager.Delete(bufferHandle);
+ }
+
+ public void DrawTexture(
+ VulkanRenderer gd,
+ PipelineBase pipeline,
+ TextureView src,
+ ISampler srcSampler,
+ Extents2DF srcRegion,
+ Extents2DF dstRegion)
+ {
+ const int RegionBufferSize = 16;
+
+ pipeline.SetTextureAndSampler(ShaderStage.Fragment, 0, src, srcSampler);
+
+ Span<float> region = stackalloc float[RegionBufferSize / sizeof(float)];
+
+ region[0] = srcRegion.X1 / src.Width;
+ region[1] = srcRegion.X2 / src.Width;
+ region[2] = srcRegion.Y1 / src.Height;
+ region[3] = srcRegion.Y2 / src.Height;
+
+ if (dstRegion.X1 > dstRegion.X2)
+ {
+ (region[0], region[1]) = (region[1], region[0]);
+ }
+
+ if (dstRegion.Y1 > dstRegion.Y2)
+ {
+ (region[2], region[3]) = (region[3], region[2]);
+ }
+
+ var bufferHandle = gd.BufferManager.CreateWithHandle(gd, RegionBufferSize);
+
+ gd.BufferManager.SetData<float>(bufferHandle, 0, region);
+
+ pipeline.SetUniformBuffers(stackalloc[] { new BufferAssignment(1, new BufferRange(bufferHandle, 0, RegionBufferSize)) });
+
+ Span<GAL.Viewport> viewports = stackalloc GAL.Viewport[1];
+
+ var rect = new Rectangle<float>(
+ MathF.Min(dstRegion.X1, dstRegion.X2),
+ MathF.Min(dstRegion.Y1, dstRegion.Y2),
+ MathF.Abs(dstRegion.X2 - dstRegion.X1),
+ MathF.Abs(dstRegion.Y2 - dstRegion.Y1));
+
+ viewports[0] = new GAL.Viewport(
+ rect,
+ ViewportSwizzle.PositiveX,
+ ViewportSwizzle.PositiveY,
+ ViewportSwizzle.PositiveZ,
+ ViewportSwizzle.PositiveW,
+ 0f,
+ 1f);
+
+ Span<Rectangle<int>> scissors = stackalloc Rectangle<int>[1];
+
+ pipeline.SetProgram(_programColorBlit);
+ pipeline.SetViewports(viewports, false);
+ pipeline.SetPrimitiveTopology(GAL.PrimitiveTopology.TriangleStrip);
+ pipeline.Draw(4, 1, 0, 0);
+
+ gd.BufferManager.Delete(bufferHandle);
+ }
+
+ public unsafe void ConvertI8ToI16(VulkanRenderer gd, CommandBufferScoped cbs, BufferHolder src, BufferHolder dst, int srcOffset, int size)
+ {
+ ChangeStride(gd, cbs, src, dst, srcOffset, size, 1, 2);
+ }
+
+ public unsafe void ChangeStride(VulkanRenderer gd, CommandBufferScoped cbs, BufferHolder src, BufferHolder dst, int srcOffset, int size, int stride, int newStride)
+ {
+ bool supportsUint8 = gd.Capabilities.SupportsShaderInt8;
+
+ int elems = size / stride;
+ int newSize = elems * newStride;
+
+ var srcBufferAuto = src.GetBuffer();
+ var dstBufferAuto = dst.GetBuffer();
+
+ var srcBuffer = srcBufferAuto.Get(cbs, srcOffset, size).Value;
+ var dstBuffer = dstBufferAuto.Get(cbs, 0, newSize).Value;
+
+ var access = supportsUint8 ? AccessFlags.ShaderWriteBit : AccessFlags.TransferWriteBit;
+ var stage = supportsUint8 ? PipelineStageFlags.ComputeShaderBit : PipelineStageFlags.TransferBit;
+
+ BufferHolder.InsertBufferBarrier(
+ gd,
+ cbs.CommandBuffer,
+ dstBuffer,
+ BufferHolder.DefaultAccessFlags,
+ access,
+ PipelineStageFlags.AllCommandsBit,
+ stage,
+ 0,
+ newSize);
+
+ if (supportsUint8)
+ {
+ const int ParamsBufferSize = 16;
+
+ Span<int> shaderParams = stackalloc int[ParamsBufferSize / sizeof(int)];
+
+ shaderParams[0] = stride;
+ shaderParams[1] = newStride;
+ shaderParams[2] = size;
+ shaderParams[3] = srcOffset;
+
+ var bufferHandle = gd.BufferManager.CreateWithHandle(gd, ParamsBufferSize);
+
+ gd.BufferManager.SetData<int>(bufferHandle, 0, shaderParams);
+
+ _pipeline.SetCommandBuffer(cbs);
+
+ _pipeline.SetUniformBuffers(stackalloc[] { new BufferAssignment(0, new BufferRange(bufferHandle, 0, ParamsBufferSize)) });
+
+ Span<Auto<DisposableBuffer>> sbRanges = new Auto<DisposableBuffer>[2];
+
+ sbRanges[0] = srcBufferAuto;
+ sbRanges[1] = dstBufferAuto;
+
+ _pipeline.SetStorageBuffers(1, sbRanges);
+
+ _pipeline.SetProgram(_programStrideChange);
+ _pipeline.DispatchCompute(1, 1, 1);
+
+ gd.BufferManager.Delete(bufferHandle);
+
+ _pipeline.Finish(gd, cbs);
+ }
+ else
+ {
+ gd.Api.CmdFillBuffer(cbs.CommandBuffer, dstBuffer, 0, Vk.WholeSize, 0);
+
+ var bufferCopy = new BufferCopy[elems];
+
+ for (ulong i = 0; i < (ulong)elems; i++)
+ {
+ bufferCopy[i] = new BufferCopy((ulong)srcOffset + i * (ulong)stride, i * (ulong)newStride, (ulong)stride);
+ }
+
+ fixed (BufferCopy* pBufferCopy = bufferCopy)
+ {
+ gd.Api.CmdCopyBuffer(cbs.CommandBuffer, srcBuffer, dstBuffer, (uint)elems, pBufferCopy);
+ }
+ }
+
+ BufferHolder.InsertBufferBarrier(
+ gd,
+ cbs.CommandBuffer,
+ dstBuffer,
+ access,
+ BufferHolder.DefaultAccessFlags,
+ stage,
+ PipelineStageFlags.AllCommandsBit,
+ 0,
+ newSize);
+ }
+
+ public unsafe void ConvertIndexBuffer(VulkanRenderer gd,
+ CommandBufferScoped cbs,
+ BufferHolder src,
+ BufferHolder dst,
+ IndexBufferPattern pattern,
+ int indexSize,
+ int srcOffset,
+ int indexCount)
+ {
+ // TODO: Support conversion with primitive restart enabled.
+ // TODO: Convert with a compute shader?
+
+ int convertedCount = pattern.GetConvertedCount(indexCount);
+ int outputIndexSize = 4;
+
+ var srcBuffer = src.GetBuffer().Get(cbs, srcOffset, indexCount * indexSize).Value;
+ var dstBuffer = dst.GetBuffer().Get(cbs, 0, convertedCount * outputIndexSize).Value;
+
+ gd.Api.CmdFillBuffer(cbs.CommandBuffer, dstBuffer, 0, Vk.WholeSize, 0);
+
+ var bufferCopy = new List<BufferCopy>();
+ int outputOffset = 0;
+
+ // Try to merge copies of adjacent indices to reduce copy count.
+ int sequenceStart = 0;
+ int sequenceLength = 0;
+
+ foreach (var index in pattern.GetIndexMapping(indexCount))
+ {
+ if (sequenceLength > 0)
+ {
+ if (index == sequenceStart + sequenceLength && indexSize == outputIndexSize)
+ {
+ sequenceLength++;
+ continue;
+ }
+
+ // Commit the copy so far.
+ bufferCopy.Add(new BufferCopy((ulong)(srcOffset + sequenceStart * indexSize), (ulong)outputOffset, (ulong)(indexSize * sequenceLength)));
+ outputOffset += outputIndexSize * sequenceLength;
+ }
+
+ sequenceStart = index;
+ sequenceLength = 1;
+ }
+
+ if (sequenceLength > 0)
+ {
+ // Commit final pending copy.
+ bufferCopy.Add(new BufferCopy((ulong)(srcOffset + sequenceStart * indexSize), (ulong)outputOffset, (ulong)(indexSize * sequenceLength)));
+ }
+
+ var bufferCopyArray = bufferCopy.ToArray();
+
+ BufferHolder.InsertBufferBarrier(
+ gd,
+ cbs.CommandBuffer,
+ dstBuffer,
+ BufferHolder.DefaultAccessFlags,
+ AccessFlags.TransferWriteBit,
+ PipelineStageFlags.AllCommandsBit,
+ PipelineStageFlags.TransferBit,
+ 0,
+ convertedCount * outputIndexSize);
+
+ fixed (BufferCopy* pBufferCopy = bufferCopyArray)
+ {
+ gd.Api.CmdCopyBuffer(cbs.CommandBuffer, srcBuffer, dstBuffer, (uint)bufferCopyArray.Length, pBufferCopy);
+ }
+
+ BufferHolder.InsertBufferBarrier(
+ gd,
+ cbs.CommandBuffer,
+ dstBuffer,
+ AccessFlags.TransferWriteBit,
+ BufferHolder.DefaultAccessFlags,
+ PipelineStageFlags.TransferBit,
+ PipelineStageFlags.AllCommandsBit,
+ 0,
+ convertedCount * outputIndexSize);
+ }
+
+ public void CopyIncompatibleFormats(
+ VulkanRenderer gd,
+ CommandBufferScoped cbs,
+ TextureView src,
+ TextureView dst,
+ int srcLayer,
+ int dstLayer,
+ int srcLevel,
+ int dstLevel,
+ int depth,
+ int levels)
+ {
+ const int ParamsBufferSize = 4;
+
+ Span<int> shaderParams = stackalloc int[sizeof(int)];
+
+ int srcBpp = src.Info.BytesPerPixel;
+ int dstBpp = dst.Info.BytesPerPixel;
+
+ int ratio = srcBpp < dstBpp ? dstBpp / srcBpp : srcBpp / dstBpp;
+
+ shaderParams[0] = BitOperations.Log2((uint)ratio);
+
+ var bufferHandle = gd.BufferManager.CreateWithHandle(gd, ParamsBufferSize);
+
+ gd.BufferManager.SetData<int>(bufferHandle, 0, shaderParams);
+
+ TextureView.InsertImageBarrier(
+ gd.Api,
+ cbs.CommandBuffer,
+ src.GetImage().Get(cbs).Value,
+ TextureStorage.DefaultAccessMask,
+ AccessFlags.ShaderReadBit,
+ PipelineStageFlags.AllCommandsBit,
+ PipelineStageFlags.ComputeShaderBit,
+ ImageAspectFlags.ColorBit,
+ src.FirstLayer + srcLayer,
+ src.FirstLevel + srcLevel,
+ depth,
+ levels);
+
+ _pipeline.SetCommandBuffer(cbs);
+
+ _pipeline.SetProgram(srcBpp < dstBpp ? _programColorCopyWidening : _programColorCopyShortening);
+
+ // Calculate ideal component size, given our constraints:
+ // - Component size must not exceed bytes per pixel of source and destination image formats.
+ // - Maximum component size is 4 (R32).
+ int componentSize = Math.Min(Math.Min(srcBpp, dstBpp), 4);
+
+ var srcFormat = GetFormat(componentSize, srcBpp / componentSize);
+ var dstFormat = GetFormat(componentSize, dstBpp / componentSize);
+
+ _pipeline.SetUniformBuffers(stackalloc[] { new BufferAssignment(0, new BufferRange(bufferHandle, 0, ParamsBufferSize)) });
+
+ for (int l = 0; l < levels; l++)
+ {
+ for (int z = 0; z < depth; z++)
+ {
+ var srcView = Create2DLayerView(src, srcLayer + z, srcLevel + l, srcFormat);
+ var dstView = Create2DLayerView(dst, dstLayer + z, dstLevel + l);
+
+ _pipeline.SetTextureAndSampler(ShaderStage.Compute, 0, srcView, null);
+ _pipeline.SetImage(0, dstView, dstFormat);
+
+ int dispatchX = (Math.Min(srcView.Info.Width, dstView.Info.Width) + 31) / 32;
+ int dispatchY = (Math.Min(srcView.Info.Height, dstView.Info.Height) + 31) / 32;
+
+ _pipeline.DispatchCompute(dispatchX, dispatchY, 1);
+
+ if (srcView != src)
+ {
+ srcView.Release();
+ }
+
+ if (dstView != dst)
+ {
+ dstView.Release();
+ }
+ }
+ }
+
+ gd.BufferManager.Delete(bufferHandle);
+
+ _pipeline.Finish(gd, cbs);
+
+ TextureView.InsertImageBarrier(
+ gd.Api,
+ cbs.CommandBuffer,
+ dst.GetImage().Get(cbs).Value,
+ AccessFlags.ShaderWriteBit,
+ TextureStorage.DefaultAccessMask,
+ PipelineStageFlags.ComputeShaderBit,
+ PipelineStageFlags.AllCommandsBit,
+ ImageAspectFlags.ColorBit,
+ dst.FirstLayer + dstLayer,
+ dst.FirstLevel + dstLevel,
+ depth,
+ levels);
+ }
+
+ public void CopyMSToNonMS(VulkanRenderer gd, CommandBufferScoped cbs, TextureView src, TextureView dst, int srcLayer, int dstLayer, int depth)
+ {
+ const int ParamsBufferSize = 16;
+
+ Span<int> shaderParams = stackalloc int[ParamsBufferSize / sizeof(int)];
+
+ int samples = src.Info.Samples;
+ bool isDepthOrStencil = src.Info.Format.IsDepthOrStencil();
+ var aspectFlags = src.Info.Format.ConvertAspectFlags();
+
+ // X and Y are the expected texture samples.
+ // Z and W are the actual texture samples used.
+ // They may differ if the GPU does not support the samples count requested and we had to use a lower amount.
+ (shaderParams[0], shaderParams[1]) = GetSampleCountXYLog2(samples);
+ (shaderParams[2], shaderParams[3]) = GetSampleCountXYLog2((int)TextureStorage.ConvertToSampleCountFlags(gd.Capabilities.SupportedSampleCounts, (uint)samples));
+
+ var bufferHandle = gd.BufferManager.CreateWithHandle(gd, ParamsBufferSize);
+
+ gd.BufferManager.SetData<int>(bufferHandle, 0, shaderParams);
+
+ TextureView.InsertImageBarrier(
+ gd.Api,
+ cbs.CommandBuffer,
+ src.GetImage().Get(cbs).Value,
+ TextureStorage.DefaultAccessMask,
+ AccessFlags.ShaderReadBit,
+ PipelineStageFlags.AllCommandsBit,
+ isDepthOrStencil ? PipelineStageFlags.FragmentShaderBit : PipelineStageFlags.ComputeShaderBit,
+ aspectFlags,
+ src.FirstLayer + srcLayer,
+ src.FirstLevel,
+ depth,
+ 1);
+
+ _pipeline.SetCommandBuffer(cbs);
+ _pipeline.SetUniformBuffers(stackalloc[] { new BufferAssignment(0, new BufferRange(bufferHandle, 0, ParamsBufferSize)) });
+
+ if (isDepthOrStencil)
+ {
+ // We can't use compute for this case because compute can't modify depth textures.
+
+ Span<GAL.Viewport> viewports = stackalloc GAL.Viewport[1];
+
+ var rect = new Rectangle<float>(0, 0, dst.Width, dst.Height);
+
+ viewports[0] = new GAL.Viewport(
+ rect,
+ ViewportSwizzle.PositiveX,
+ ViewportSwizzle.PositiveY,
+ ViewportSwizzle.PositiveZ,
+ ViewportSwizzle.PositiveW,
+ 0f,
+ 1f);
+
+ Span<Rectangle<int>> scissors = stackalloc Rectangle<int>[1];
+
+ scissors[0] = new Rectangle<int>(0, 0, dst.Width, dst.Height);
+
+ _pipeline.SetScissors(scissors);
+ _pipeline.SetViewports(viewports, false);
+ _pipeline.SetPrimitiveTopology(GAL.PrimitiveTopology.TriangleStrip);
+
+ for (int z = 0; z < depth; z++)
+ {
+ var srcView = Create2DLayerView(src, srcLayer + z, 0);
+ var dstView = Create2DLayerView(dst, dstLayer + z, 0);
+
+ _pipeline.SetRenderTarget(
+ ((TextureView)dstView).GetImageViewForAttachment(),
+ (uint)dst.Width,
+ (uint)dst.Height,
+ true,
+ dst.VkFormat);
+
+ CopyMSDraw(srcView, aspectFlags, fromMS: true);
+
+ if (srcView != src)
+ {
+ srcView.Release();
+ }
+
+ if (dstView != dst)
+ {
+ dstView.Release();
+ }
+ }
+ }
+ else
+ {
+ var format = GetFormat(src.Info.BytesPerPixel);
+
+ int dispatchX = (dst.Info.Width + 31) / 32;
+ int dispatchY = (dst.Info.Height + 31) / 32;
+
+ _pipeline.SetProgram(_programColorCopyToNonMs);
+
+ for (int z = 0; z < depth; z++)
+ {
+ var srcView = Create2DLayerView(src, srcLayer + z, 0, format);
+ var dstView = Create2DLayerView(dst, dstLayer + z, 0);
+
+ _pipeline.SetTextureAndSampler(ShaderStage.Compute, 0, srcView, null);
+ _pipeline.SetImage(0, dstView, format);
+
+ _pipeline.DispatchCompute(dispatchX, dispatchY, 1);
+
+ if (srcView != src)
+ {
+ srcView.Release();
+ }
+
+ if (dstView != dst)
+ {
+ dstView.Release();
+ }
+ }
+ }
+
+ gd.BufferManager.Delete(bufferHandle);
+
+ _pipeline.Finish(gd, cbs);
+
+ TextureView.InsertImageBarrier(
+ gd.Api,
+ cbs.CommandBuffer,
+ dst.GetImage().Get(cbs).Value,
+ isDepthOrStencil ? AccessFlags.DepthStencilAttachmentWriteBit : AccessFlags.ShaderWriteBit,
+ TextureStorage.DefaultAccessMask,
+ isDepthOrStencil ? PipelineStageFlags.LateFragmentTestsBit : PipelineStageFlags.ComputeShaderBit,
+ PipelineStageFlags.AllCommandsBit,
+ aspectFlags,
+ dst.FirstLayer + dstLayer,
+ dst.FirstLevel,
+ depth,
+ 1);
+ }
+
+ public void CopyNonMSToMS(VulkanRenderer gd, CommandBufferScoped cbs, TextureView src, TextureView dst, int srcLayer, int dstLayer, int depth)
+ {
+ const int ParamsBufferSize = 16;
+
+ Span<int> shaderParams = stackalloc int[ParamsBufferSize / sizeof(int)];
+
+ int samples = dst.Info.Samples;
+ bool isDepthOrStencil = src.Info.Format.IsDepthOrStencil();
+ var aspectFlags = src.Info.Format.ConvertAspectFlags();
+
+ // X and Y are the expected texture samples.
+ // Z and W are the actual texture samples used.
+ // They may differ if the GPU does not support the samples count requested and we had to use a lower amount.
+ (shaderParams[0], shaderParams[1]) = GetSampleCountXYLog2(samples);
+ (shaderParams[2], shaderParams[3]) = GetSampleCountXYLog2((int)TextureStorage.ConvertToSampleCountFlags(gd.Capabilities.SupportedSampleCounts, (uint)samples));
+
+ var bufferHandle = gd.BufferManager.CreateWithHandle(gd, ParamsBufferSize);
+
+ gd.BufferManager.SetData<int>(bufferHandle, 0, shaderParams);
+
+ TextureView.InsertImageBarrier(
+ gd.Api,
+ cbs.CommandBuffer,
+ src.GetImage().Get(cbs).Value,
+ TextureStorage.DefaultAccessMask,
+ AccessFlags.ShaderReadBit,
+ PipelineStageFlags.AllCommandsBit,
+ PipelineStageFlags.FragmentShaderBit,
+ aspectFlags,
+ src.FirstLayer + srcLayer,
+ src.FirstLevel,
+ depth,
+ 1);
+
+ _pipeline.SetCommandBuffer(cbs);
+
+ Span<GAL.Viewport> viewports = stackalloc GAL.Viewport[1];
+
+ var rect = new Rectangle<float>(0, 0, dst.Width, dst.Height);
+
+ viewports[0] = new GAL.Viewport(
+ rect,
+ ViewportSwizzle.PositiveX,
+ ViewportSwizzle.PositiveY,
+ ViewportSwizzle.PositiveZ,
+ ViewportSwizzle.PositiveW,
+ 0f,
+ 1f);
+
+ Span<Rectangle<int>> scissors = stackalloc Rectangle<int>[1];
+
+ scissors[0] = new Rectangle<int>(0, 0, dst.Width, dst.Height);
+
+ _pipeline.SetRenderTargetColorMasks(new uint[] { 0xf });
+ _pipeline.SetScissors(scissors);
+ _pipeline.SetViewports(viewports, false);
+ _pipeline.SetPrimitiveTopology(GAL.PrimitiveTopology.TriangleStrip);
+
+ _pipeline.SetUniformBuffers(stackalloc[] { new BufferAssignment(0, new BufferRange(bufferHandle, 0, ParamsBufferSize)) });
+
+ if (isDepthOrStencil)
+ {
+ for (int z = 0; z < depth; z++)
+ {
+ var srcView = Create2DLayerView(src, srcLayer + z, 0);
+ var dstView = Create2DLayerView(dst, dstLayer + z, 0);
+
+ _pipeline.SetRenderTarget(
+ ((TextureView)dstView).GetImageViewForAttachment(),
+ (uint)dst.Width,
+ (uint)dst.Height,
+ (uint)samples,
+ true,
+ dst.VkFormat);
+
+ CopyMSDraw(srcView, aspectFlags, fromMS: false);
+
+ if (srcView != src)
+ {
+ srcView.Release();
+ }
+
+ if (dstView != dst)
+ {
+ dstView.Release();
+ }
+ }
+ }
+ else
+ {
+ _pipeline.SetProgram(_programColorDrawToMs);
+
+ var format = GetFormat(src.Info.BytesPerPixel);
+ var vkFormat = FormatTable.GetFormat(format);
+
+ for (int z = 0; z < depth; z++)
+ {
+ var srcView = Create2DLayerView(src, srcLayer + z, 0, format);
+ var dstView = Create2DLayerView(dst, dstLayer + z, 0);
+
+ _pipeline.SetTextureAndSampler(ShaderStage.Fragment, 0, srcView, null);
+ _pipeline.SetRenderTarget(
+ ((TextureView)dstView).GetView(format).GetImageViewForAttachment(),
+ (uint)dst.Width,
+ (uint)dst.Height,
+ (uint)samples,
+ false,
+ vkFormat);
+
+ _pipeline.Draw(4, 1, 0, 0);
+
+ if (srcView != src)
+ {
+ srcView.Release();
+ }
+
+ if (dstView != dst)
+ {
+ dstView.Release();
+ }
+ }
+ }
+
+ gd.BufferManager.Delete(bufferHandle);
+
+ _pipeline.Finish(gd, cbs);
+
+ TextureView.InsertImageBarrier(
+ gd.Api,
+ cbs.CommandBuffer,
+ dst.GetImage().Get(cbs).Value,
+ isDepthOrStencil ? AccessFlags.DepthStencilAttachmentWriteBit : AccessFlags.ColorAttachmentWriteBit,
+ TextureStorage.DefaultAccessMask,
+ isDepthOrStencil ? PipelineStageFlags.LateFragmentTestsBit : PipelineStageFlags.ColorAttachmentOutputBit,
+ PipelineStageFlags.AllCommandsBit,
+ aspectFlags,
+ dst.FirstLayer + dstLayer,
+ dst.FirstLevel,
+ depth,
+ 1);
+ }
+
+ private void CopyMSDraw(TextureView src, ImageAspectFlags aspectFlags, bool fromMS)
+ {
+ if (aspectFlags.HasFlag(ImageAspectFlags.DepthBit))
+ {
+ var depthTexture = CreateDepthOrStencilView(src, DepthStencilMode.Depth);
+
+ CopyMSAspectDraw(depthTexture, fromMS, isDepth: true);
+
+ if (depthTexture != src)
+ {
+ depthTexture.Release();
+ }
+ }
+
+ if (aspectFlags.HasFlag(ImageAspectFlags.StencilBit) && _programStencilDrawToMs != null)
+ {
+ var stencilTexture = CreateDepthOrStencilView(src, DepthStencilMode.Stencil);
+
+ CopyMSAspectDraw(stencilTexture, fromMS, isDepth: false);
+
+ if (stencilTexture != src)
+ {
+ stencilTexture.Release();
+ }
+ }
+ }
+
+ private void CopyMSAspectDraw(TextureView src, bool fromMS, bool isDepth)
+ {
+ _pipeline.SetTextureAndSampler(ShaderStage.Fragment, 0, src, _samplerNearest);
+
+ if (isDepth)
+ {
+ _pipeline.SetProgram(fromMS ? _programDepthDrawToNonMs : _programDepthDrawToMs);
+ _pipeline.SetDepthTest(new DepthTestDescriptor(true, true, GAL.CompareOp.Always));
+ }
+ else
+ {
+ _pipeline.SetProgram(fromMS ? _programStencilDrawToNonMs : _programStencilDrawToMs);
+ _pipeline.SetStencilTest(CreateStencilTestDescriptor(true));
+ }
+
+ _pipeline.Draw(4, 1, 0, 0);
+
+ if (isDepth)
+ {
+ _pipeline.SetDepthTest(new DepthTestDescriptor(false, false, GAL.CompareOp.Always));
+ }
+ else
+ {
+ _pipeline.SetStencilTest(CreateStencilTestDescriptor(false));
+ }
+ }
+
+ private static (int, int) GetSampleCountXYLog2(int samples)
+ {
+ int samplesInXLog2 = 0;
+ int samplesInYLog2 = 0;
+
+ switch (samples)
+ {
+ case 2: // 2x1
+ samplesInXLog2 = 1;
+ break;
+ case 4: // 2x2
+ samplesInXLog2 = 1;
+ samplesInYLog2 = 1;
+ break;
+ case 8: // 4x2
+ samplesInXLog2 = 2;
+ samplesInYLog2 = 1;
+ break;
+ case 16: // 4x4
+ samplesInXLog2 = 2;
+ samplesInYLog2 = 2;
+ break;
+ case 32: // 8x4
+ samplesInXLog2 = 3;
+ samplesInYLog2 = 2;
+ break;
+ case 64: // 8x8
+ samplesInXLog2 = 3;
+ samplesInYLog2 = 3;
+ break;
+ }
+
+ return (samplesInXLog2, samplesInYLog2);
+ }
+
+ private static TextureView Create2DLayerView(TextureView from, int layer, int level, GAL.Format? format = null)
+ {
+ if (from.Info.Target == Target.Texture2D && level == 0 && (format == null || format.Value == from.Info.Format))
+ {
+ return from;
+ }
+
+ var target = from.Info.Target switch
+ {
+ Target.Texture1DArray => Target.Texture1D,
+ Target.Texture2DMultisampleArray => Target.Texture2DMultisample,
+ _ => Target.Texture2D
+ };
+
+ var info = new TextureCreateInfo(
+ from.Info.Width,
+ from.Info.Height,
+ from.Info.Depth,
+ 1,
+ from.Info.Samples,
+ from.Info.BlockWidth,
+ from.Info.BlockHeight,
+ from.Info.BytesPerPixel,
+ format ?? from.Info.Format,
+ from.Info.DepthStencilMode,
+ target,
+ from.Info.SwizzleR,
+ from.Info.SwizzleG,
+ from.Info.SwizzleB,
+ from.Info.SwizzleA);
+
+ return from.CreateViewImpl(info, layer, level);
+ }
+
+ private static GAL.Format GetFormat(int bytesPerPixel)
+ {
+ return bytesPerPixel switch
+ {
+ 1 => GAL.Format.R8Uint,
+ 2 => GAL.Format.R16Uint,
+ 4 => GAL.Format.R32Uint,
+ 8 => GAL.Format.R32G32Uint,
+ 16 => GAL.Format.R32G32B32A32Uint,
+ _ => throw new ArgumentException($"Invalid bytes per pixel {bytesPerPixel}.")
+ };
+ }
+
+ private static GAL.Format GetFormat(int componentSize, int componentsCount)
+ {
+ if (componentSize == 1)
+ {
+ return componentsCount switch
+ {
+ 1 => GAL.Format.R8Uint,
+ 2 => GAL.Format.R8G8Uint,
+ 4 => GAL.Format.R8G8B8A8Uint,
+ _ => throw new ArgumentException($"Invalid components count {componentsCount}.")
+ };
+ }
+ else if (componentSize == 2)
+ {
+ return componentsCount switch
+ {
+ 1 => GAL.Format.R16Uint,
+ 2 => GAL.Format.R16G16Uint,
+ 4 => GAL.Format.R16G16B16A16Uint,
+ _ => throw new ArgumentException($"Invalid components count {componentsCount}.")
+ };
+ }
+ else if (componentSize == 4)
+ {
+ return componentsCount switch
+ {
+ 1 => GAL.Format.R32Uint,
+ 2 => GAL.Format.R32G32Uint,
+ 4 => GAL.Format.R32G32B32A32Uint,
+ _ => throw new ArgumentException($"Invalid components count {componentsCount}.")
+ };
+ }
+ else
+ {
+ throw new ArgumentException($"Invalid component size {componentSize}.");
+ }
+ }
+
+ public void ConvertIndexBufferIndirect(
+ VulkanRenderer gd,
+ CommandBufferScoped cbs,
+ BufferHolder srcIndirectBuffer,
+ BufferHolder dstIndirectBuffer,
+ BufferRange drawCountBuffer,
+ BufferHolder srcIndexBuffer,
+ BufferHolder dstIndexBuffer,
+ IndexBufferPattern pattern,
+ int indexSize,
+ int srcIndexBufferOffset,
+ int srcIndexBufferSize,
+ int srcIndirectBufferOffset,
+ bool hasDrawCount,
+ int maxDrawCount,
+ int indirectDataStride)
+ {
+ // TODO: Support conversion with primitive restart enabled.
+
+ BufferRange drawCountBufferAligned = new BufferRange(
+ drawCountBuffer.Handle,
+ drawCountBuffer.Offset & ~(UniformBufferAlignment - 1),
+ UniformBufferAlignment);
+
+ int indirectDataSize = maxDrawCount * indirectDataStride;
+
+ int indexCount = srcIndexBufferSize / indexSize;
+ int primitivesCount = pattern.GetPrimitiveCount(indexCount);
+ int convertedCount = pattern.GetConvertedCount(indexCount);
+ int outputIndexSize = 4;
+
+ var srcBuffer = srcIndexBuffer.GetBuffer().Get(cbs, srcIndexBufferOffset, indexCount * indexSize).Value;
+ var dstBuffer = dstIndexBuffer.GetBuffer().Get(cbs, 0, convertedCount * outputIndexSize).Value;
+
+ const int ParamsBufferSize = 24 * sizeof(int);
+ const int ParamsIndirectDispatchOffset = 16 * sizeof(int);
+ const int ParamsIndirectDispatchSize = 3 * sizeof(int);
+
+ Span<int> shaderParams = stackalloc int[ParamsBufferSize / sizeof(int)];
+
+ shaderParams[8] = pattern.PrimitiveVertices;
+ shaderParams[9] = pattern.PrimitiveVerticesOut;
+ shaderParams[10] = indexSize;
+ shaderParams[11] = outputIndexSize;
+ shaderParams[12] = pattern.BaseIndex;
+ shaderParams[13] = pattern.IndexStride;
+ shaderParams[14] = srcIndexBufferOffset;
+ shaderParams[15] = primitivesCount;
+ shaderParams[16] = 1;
+ shaderParams[17] = 1;
+ shaderParams[18] = 1;
+ shaderParams[19] = hasDrawCount ? 1 : 0;
+ shaderParams[20] = maxDrawCount;
+ shaderParams[21] = (drawCountBuffer.Offset & (UniformBufferAlignment - 1)) / 4;
+ shaderParams[22] = indirectDataStride / 4;
+ shaderParams[23] = srcIndirectBufferOffset / 4;
+
+ pattern.OffsetIndex.CopyTo(shaderParams.Slice(0, pattern.OffsetIndex.Length));
+
+ var patternBufferHandle = gd.BufferManager.CreateWithHandle(gd, ParamsBufferSize, out var patternBuffer);
+ var patternBufferAuto = patternBuffer.GetBuffer();
+
+ gd.BufferManager.SetData<int>(patternBufferHandle, 0, shaderParams);
+
+ _pipeline.SetCommandBuffer(cbs);
+
+ BufferHolder.InsertBufferBarrier(
+ gd,
+ cbs.CommandBuffer,
+ srcIndirectBuffer.GetBuffer().Get(cbs, srcIndirectBufferOffset, indirectDataSize).Value,
+ BufferHolder.DefaultAccessFlags,
+ AccessFlags.ShaderReadBit,
+ PipelineStageFlags.AllCommandsBit,
+ PipelineStageFlags.ComputeShaderBit,
+ srcIndirectBufferOffset,
+ indirectDataSize);
+
+ _pipeline.SetUniformBuffers(stackalloc[] { new BufferAssignment(0, drawCountBufferAligned) });
+ _pipeline.SetStorageBuffers(1, new[] { srcIndirectBuffer.GetBuffer(), dstIndirectBuffer.GetBuffer(), patternBuffer.GetBuffer() });
+
+ _pipeline.SetProgram(_programConvertIndirectData);
+ _pipeline.DispatchCompute(1, 1, 1);
+
+ BufferHolder.InsertBufferBarrier(
+ gd,
+ cbs.CommandBuffer,
+ patternBufferAuto.Get(cbs, ParamsIndirectDispatchOffset, ParamsIndirectDispatchSize).Value,
+ AccessFlags.ShaderWriteBit,
+ AccessFlags.IndirectCommandReadBit,
+ PipelineStageFlags.ComputeShaderBit,
+ PipelineStageFlags.DrawIndirectBit,
+ ParamsIndirectDispatchOffset,
+ ParamsIndirectDispatchSize);
+
+ BufferHolder.InsertBufferBarrier(
+ gd,
+ cbs.CommandBuffer,
+ dstBuffer,
+ BufferHolder.DefaultAccessFlags,
+ AccessFlags.TransferWriteBit,
+ PipelineStageFlags.AllCommandsBit,
+ PipelineStageFlags.TransferBit,
+ 0,
+ convertedCount * outputIndexSize);
+
+ _pipeline.SetUniformBuffers(stackalloc[] { new BufferAssignment(0, new BufferRange(patternBufferHandle, 0, ParamsBufferSize)) });
+ _pipeline.SetStorageBuffers(1, new[] { srcIndexBuffer.GetBuffer(), dstIndexBuffer.GetBuffer() });
+
+ _pipeline.SetProgram(_programConvertIndexBuffer);
+ _pipeline.DispatchComputeIndirect(patternBufferAuto, ParamsIndirectDispatchOffset);
+
+ BufferHolder.InsertBufferBarrier(
+ gd,
+ cbs.CommandBuffer,
+ dstBuffer,
+ AccessFlags.TransferWriteBit,
+ BufferHolder.DefaultAccessFlags,
+ PipelineStageFlags.TransferBit,
+ PipelineStageFlags.AllCommandsBit,
+ 0,
+ convertedCount * outputIndexSize);
+
+ gd.BufferManager.Delete(patternBufferHandle);
+
+ _pipeline.Finish(gd, cbs);
+ }
+
+ protected virtual void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ _programColorBlitClearAlpha.Dispose();
+ _programColorBlit.Dispose();
+ _programColorBlitMs.Dispose();
+ _programColorClearF.Dispose();
+ _programColorClearSI.Dispose();
+ _programColorClearUI.Dispose();
+ _programStrideChange.Dispose();
+ _programConvertIndexBuffer.Dispose();
+ _programConvertIndirectData.Dispose();
+ _programColorCopyShortening.Dispose();
+ _programColorCopyToNonMs.Dispose();
+ _programColorCopyWidening.Dispose();
+ _programColorDrawToMs.Dispose();
+ _programDepthBlit.Dispose();
+ _programDepthBlitMs.Dispose();
+ _programDepthDrawToMs.Dispose();
+ _programDepthDrawToNonMs.Dispose();
+ _programStencilBlit?.Dispose();
+ _programStencilBlitMs?.Dispose();
+ _programStencilDrawToMs?.Dispose();
+ _programStencilDrawToNonMs?.Dispose();
+ _samplerNearest.Dispose();
+ _samplerLinear.Dispose();
+ _pipeline.Dispose();
+ }
+ }
+
+ public void Dispose()
+ {
+ Dispose(true);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/IdList.cs b/src/Ryujinx.Graphics.Vulkan/IdList.cs
new file mode 100644
index 00000000..9fba9fe9
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/IdList.cs
@@ -0,0 +1,123 @@
+using System;
+using System.Collections.Generic;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class IdList<T> where T : class
+ {
+ private readonly List<T> _list;
+ private int _freeMin;
+
+ public IdList()
+ {
+ _list = new List<T>();
+ _freeMin = 0;
+ }
+
+ public int Add(T value)
+ {
+ int id;
+ int count = _list.Count;
+ id = _list.IndexOf(null, _freeMin);
+
+ if ((uint)id < (uint)count)
+ {
+ _list[id] = value;
+ }
+ else
+ {
+ id = count;
+ _freeMin = id + 1;
+
+ _list.Add(value);
+ }
+
+ return id + 1;
+ }
+
+ public void Remove(int id)
+ {
+ id--;
+
+ int count = _list.Count;
+
+ if ((uint)id >= (uint)count)
+ {
+ return;
+ }
+
+ if (id + 1 == count)
+ {
+ // Trim unused items.
+ int removeIndex = id;
+
+ while (removeIndex > 0 && _list[removeIndex - 1] == null)
+ {
+ removeIndex--;
+ }
+
+ _list.RemoveRange(removeIndex, count - removeIndex);
+
+ if (_freeMin > removeIndex)
+ {
+ _freeMin = removeIndex;
+ }
+ }
+ else
+ {
+ _list[id] = null;
+
+ if (_freeMin > id)
+ {
+ _freeMin = id;
+ }
+ }
+ }
+
+ public bool TryGetValue(int id, out T value)
+ {
+ id--;
+
+ try
+ {
+ if ((uint)id < (uint)_list.Count)
+ {
+ value = _list[id];
+ return value != null;
+ }
+ else
+ {
+ value = null;
+ return false;
+ }
+ }
+ catch (ArgumentOutOfRangeException)
+ {
+ value = null;
+ return false;
+ }
+ catch (IndexOutOfRangeException)
+ {
+ value = null;
+ return false;
+ }
+ }
+
+ public void Clear()
+ {
+ _list.Clear();
+ _freeMin = 0;
+ }
+
+ public IEnumerator<T> GetEnumerator()
+ {
+ for (int i = 0; i < _list.Count; i++)
+ {
+ if (_list[i] != null)
+ {
+ yield return _list[i];
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/IndexBufferPattern.cs b/src/Ryujinx.Graphics.Vulkan/IndexBufferPattern.cs
new file mode 100644
index 00000000..11f4ec33
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/IndexBufferPattern.cs
@@ -0,0 +1,139 @@
+using Ryujinx.Graphics.GAL;
+using System;
+using System.Collections.Generic;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ internal class IndexBufferPattern : IDisposable
+ {
+ public int PrimitiveVertices { get; }
+ public int PrimitiveVerticesOut { get; }
+ public int BaseIndex { get; }
+ public int[] OffsetIndex { get; }
+ public int IndexStride { get; }
+ public bool RepeatStart { get; }
+
+ private VulkanRenderer _gd;
+ private int _currentSize;
+ private BufferHandle _repeatingBuffer;
+
+ public IndexBufferPattern(VulkanRenderer gd,
+ int primitiveVertices,
+ int primitiveVerticesOut,
+ int baseIndex,
+ int[] offsetIndex,
+ int indexStride,
+ bool repeatStart)
+ {
+ PrimitiveVertices = primitiveVertices;
+ PrimitiveVerticesOut = primitiveVerticesOut;
+ BaseIndex = baseIndex;
+ OffsetIndex = offsetIndex;
+ IndexStride = indexStride;
+ RepeatStart = repeatStart;
+
+ _gd = gd;
+ }
+
+ public int GetPrimitiveCount(int vertexCount)
+ {
+ return Math.Max(0, (vertexCount - BaseIndex) / IndexStride);
+ }
+
+ public int GetConvertedCount(int indexCount)
+ {
+ int primitiveCount = GetPrimitiveCount(indexCount);
+ return primitiveCount * OffsetIndex.Length;
+ }
+
+ public IEnumerable<int> GetIndexMapping(int indexCount)
+ {
+ int primitiveCount = GetPrimitiveCount(indexCount);
+ int index = BaseIndex;
+
+ for (int i = 0; i < primitiveCount; i++)
+ {
+ if (RepeatStart)
+ {
+ // Used for triangle fan
+ yield return 0;
+ }
+
+ for (int j = RepeatStart ? 1 : 0; j < OffsetIndex.Length; j++)
+ {
+ yield return index + OffsetIndex[j];
+ }
+
+ index += IndexStride;
+ }
+ }
+
+ public BufferHandle GetRepeatingBuffer(int vertexCount, out int indexCount)
+ {
+ int primitiveCount = GetPrimitiveCount(vertexCount);
+ indexCount = primitiveCount * PrimitiveVerticesOut;
+
+ int expectedSize = primitiveCount * OffsetIndex.Length;
+
+ if (expectedSize <= _currentSize && _repeatingBuffer != BufferHandle.Null)
+ {
+ return _repeatingBuffer;
+ }
+
+ // Expand the repeating pattern to the number of requested primitives.
+ BufferHandle newBuffer = _gd.BufferManager.CreateWithHandle(_gd, expectedSize * sizeof(int));
+
+ // Copy the old data to the new one.
+ if (_repeatingBuffer != BufferHandle.Null)
+ {
+ _gd.Pipeline.CopyBuffer(_repeatingBuffer, newBuffer, 0, 0, _currentSize * sizeof(int));
+ _gd.DeleteBuffer(_repeatingBuffer);
+ }
+
+ _repeatingBuffer = newBuffer;
+
+ // Add the additional repeats on top.
+ int newPrimitives = primitiveCount;
+ int oldPrimitives = (_currentSize) / OffsetIndex.Length;
+
+ int[] newData;
+
+ newPrimitives -= oldPrimitives;
+ newData = new int[expectedSize - _currentSize];
+
+ int outOffset = 0;
+ int index = oldPrimitives * IndexStride + BaseIndex;
+
+ for (int i = 0; i < newPrimitives; i++)
+ {
+ if (RepeatStart)
+ {
+ // Used for triangle fan
+ newData[outOffset++] = 0;
+ }
+
+ for (int j = RepeatStart ? 1 : 0; j < OffsetIndex.Length; j++)
+ {
+ newData[outOffset++] = index + OffsetIndex[j];
+ }
+
+ index += IndexStride;
+ }
+
+ _gd.SetBufferData(newBuffer, _currentSize * sizeof(int), MemoryMarshal.Cast<int, byte>(newData));
+ _currentSize = expectedSize;
+
+ return newBuffer;
+ }
+
+ public void Dispose()
+ {
+ if (_repeatingBuffer != BufferHandle.Null)
+ {
+ _gd.DeleteBuffer(_repeatingBuffer);
+ _repeatingBuffer = BufferHandle.Null;
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/IndexBufferState.cs b/src/Ryujinx.Graphics.Vulkan/IndexBufferState.cs
new file mode 100644
index 00000000..75b18456
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/IndexBufferState.cs
@@ -0,0 +1,161 @@
+using Silk.NET.Vulkan;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ internal struct IndexBufferState
+ {
+ public static IndexBufferState Null => new IndexBufferState(GAL.BufferHandle.Null, 0, 0);
+
+ private readonly int _offset;
+ private readonly int _size;
+ private readonly IndexType _type;
+
+ private readonly GAL.BufferHandle _handle;
+ private Auto<DisposableBuffer> _buffer;
+
+ public IndexBufferState(GAL.BufferHandle handle, int offset, int size, IndexType type)
+ {
+ _handle = handle;
+ _offset = offset;
+ _size = size;
+ _type = type;
+ _buffer = null;
+ }
+
+ public IndexBufferState(GAL.BufferHandle handle, int offset, int size)
+ {
+ _handle = handle;
+ _offset = offset;
+ _size = size;
+ _type = IndexType.Uint16;
+ _buffer = null;
+ }
+
+ public void BindIndexBuffer(VulkanRenderer gd, CommandBufferScoped cbs)
+ {
+ Auto<DisposableBuffer> autoBuffer;
+ int offset, size;
+ IndexType type = _type;
+
+ if (_type == IndexType.Uint8Ext && !gd.Capabilities.SupportsIndexTypeUint8)
+ {
+ // Index type is not supported. Convert to I16.
+ autoBuffer = gd.BufferManager.GetBufferI8ToI16(cbs, _handle, _offset, _size);
+
+ type = IndexType.Uint16;
+ offset = 0;
+ size = _size * 2;
+ }
+ else
+ {
+ autoBuffer = gd.BufferManager.GetBuffer(cbs.CommandBuffer, _handle, false, out int bufferSize);
+
+ if (_offset >= bufferSize)
+ {
+ autoBuffer = null;
+ }
+
+ offset = _offset;
+ size = _size;
+ }
+
+ _buffer = autoBuffer;
+
+ if (autoBuffer != null)
+ {
+ gd.Api.CmdBindIndexBuffer(cbs.CommandBuffer, autoBuffer.Get(cbs, offset, size).Value, (ulong)offset, type);
+ }
+ }
+
+ public void BindConvertedIndexBuffer(
+ VulkanRenderer gd,
+ CommandBufferScoped cbs,
+ int firstIndex,
+ int indexCount,
+ int convertedCount,
+ IndexBufferPattern pattern)
+ {
+ Auto<DisposableBuffer> autoBuffer;
+
+ // Convert the index buffer using the given pattern.
+ int indexSize = GetIndexSize();
+
+ int firstIndexOffset = firstIndex * indexSize;
+
+ autoBuffer = gd.BufferManager.GetBufferTopologyConversion(cbs, _handle, _offset + firstIndexOffset, indexCount * indexSize, pattern, indexSize);
+
+ int size = convertedCount * 4;
+
+ _buffer = autoBuffer;
+
+ if (autoBuffer != null)
+ {
+ gd.Api.CmdBindIndexBuffer(cbs.CommandBuffer, autoBuffer.Get(cbs, 0, size).Value, 0, IndexType.Uint32);
+ }
+ }
+
+ public Auto<DisposableBuffer> BindConvertedIndexBufferIndirect(
+ VulkanRenderer gd,
+ CommandBufferScoped cbs,
+ GAL.BufferRange indirectBuffer,
+ GAL.BufferRange drawCountBuffer,
+ IndexBufferPattern pattern,
+ bool hasDrawCount,
+ int maxDrawCount,
+ int indirectDataStride)
+ {
+ // Convert the index buffer using the given pattern.
+ int indexSize = GetIndexSize();
+
+ (var indexBufferAuto, var indirectBufferAuto) = gd.BufferManager.GetBufferTopologyConversionIndirect(
+ gd,
+ cbs,
+ new GAL.BufferRange(_handle, _offset, _size),
+ indirectBuffer,
+ drawCountBuffer,
+ pattern,
+ indexSize,
+ hasDrawCount,
+ maxDrawCount,
+ indirectDataStride);
+
+ int convertedCount = pattern.GetConvertedCount(_size / indexSize);
+ int size = convertedCount * 4;
+
+ _buffer = indexBufferAuto;
+
+ if (indexBufferAuto != null)
+ {
+ gd.Api.CmdBindIndexBuffer(cbs.CommandBuffer, indexBufferAuto.Get(cbs, 0, size).Value, 0, IndexType.Uint32);
+ }
+
+ return indirectBufferAuto;
+ }
+
+ private int GetIndexSize()
+ {
+ return _type switch
+ {
+ IndexType.Uint32 => 4,
+ IndexType.Uint16 => 2,
+ _ => 1,
+ };
+ }
+
+ public bool BoundEquals(Auto<DisposableBuffer> buffer)
+ {
+ return _buffer == buffer;
+ }
+
+ public void Swap(Auto<DisposableBuffer> from, Auto<DisposableBuffer> to)
+ {
+ if (_buffer == from)
+ {
+ _buffer.DecrementReferenceCount();
+ to.IncrementReferenceCount();
+
+ _buffer = to;
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/MemoryAllocation.cs b/src/Ryujinx.Graphics.Vulkan/MemoryAllocation.cs
new file mode 100644
index 00000000..76de1296
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/MemoryAllocation.cs
@@ -0,0 +1,37 @@
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ readonly struct MemoryAllocation : IDisposable
+ {
+ private readonly MemoryAllocatorBlockList _owner;
+ private readonly MemoryAllocatorBlockList.Block _block;
+
+ public DeviceMemory Memory { get; }
+ public IntPtr HostPointer { get;}
+ public ulong Offset { get; }
+ public ulong Size { get; }
+
+ public MemoryAllocation(
+ MemoryAllocatorBlockList owner,
+ MemoryAllocatorBlockList.Block block,
+ DeviceMemory memory,
+ IntPtr hostPointer,
+ ulong offset,
+ ulong size)
+ {
+ _owner = owner;
+ _block = block;
+ Memory = memory;
+ HostPointer = hostPointer;
+ Offset = offset;
+ Size = size;
+ }
+
+ public void Dispose()
+ {
+ _owner.Free(_block, Offset, Size);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/MemoryAllocator.cs b/src/Ryujinx.Graphics.Vulkan/MemoryAllocator.cs
new file mode 100644
index 00000000..3139e209
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/MemoryAllocator.cs
@@ -0,0 +1,101 @@
+using Silk.NET.Vulkan;
+using System;
+using System.Collections.Generic;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class MemoryAllocator : IDisposable
+ {
+ private ulong MaxDeviceMemoryUsageEstimate = 16UL * 1024 * 1024 * 1024;
+
+ private readonly Vk _api;
+ private readonly VulkanPhysicalDevice _physicalDevice;
+ private readonly Device _device;
+ private readonly List<MemoryAllocatorBlockList> _blockLists;
+ private readonly int _blockAlignment;
+
+ public MemoryAllocator(Vk api, VulkanPhysicalDevice physicalDevice, Device device)
+ {
+ _api = api;
+ _physicalDevice = physicalDevice;
+ _device = device;
+ _blockLists = new List<MemoryAllocatorBlockList>();
+ _blockAlignment = (int)Math.Min(int.MaxValue, MaxDeviceMemoryUsageEstimate / (ulong)_physicalDevice.PhysicalDeviceProperties.Limits.MaxMemoryAllocationCount);
+ }
+
+ public MemoryAllocation AllocateDeviceMemory(
+ MemoryRequirements requirements,
+ MemoryPropertyFlags flags = 0,
+ bool isBuffer = false)
+ {
+ int memoryTypeIndex = FindSuitableMemoryTypeIndex(requirements.MemoryTypeBits, flags);
+ if (memoryTypeIndex < 0)
+ {
+ return default;
+ }
+
+ bool map = flags.HasFlag(MemoryPropertyFlags.HostVisibleBit);
+ return Allocate(memoryTypeIndex, requirements.Size, requirements.Alignment, map, isBuffer);
+ }
+
+ private MemoryAllocation Allocate(int memoryTypeIndex, ulong size, ulong alignment, bool map, bool isBuffer)
+ {
+ for (int i = 0; i < _blockLists.Count; i++)
+ {
+ var bl = _blockLists[i];
+ if (bl.MemoryTypeIndex == memoryTypeIndex && bl.ForBuffer == isBuffer)
+ {
+ lock (bl)
+ {
+ return bl.Allocate(size, alignment, map);
+ }
+ }
+ }
+
+ var newBl = new MemoryAllocatorBlockList(_api, _device, memoryTypeIndex, _blockAlignment, isBuffer);
+ _blockLists.Add(newBl);
+ return newBl.Allocate(size, alignment, map);
+ }
+
+ private int FindSuitableMemoryTypeIndex(
+ uint memoryTypeBits,
+ MemoryPropertyFlags flags)
+ {
+ for (int i = 0; i < _physicalDevice.PhysicalDeviceMemoryProperties.MemoryTypeCount; i++)
+ {
+ var type = _physicalDevice.PhysicalDeviceMemoryProperties.MemoryTypes[i];
+
+ if ((memoryTypeBits & (1 << i)) != 0)
+ {
+ if (type.PropertyFlags.HasFlag(flags))
+ {
+ return i;
+ }
+ }
+ }
+
+ return -1;
+ }
+
+ public static bool IsDeviceMemoryShared(VulkanPhysicalDevice physicalDevice)
+ {
+ for (int i = 0; i < physicalDevice.PhysicalDeviceMemoryProperties.MemoryHeapCount; i++)
+ {
+ if (!physicalDevice.PhysicalDeviceMemoryProperties.MemoryHeaps[i].Flags.HasFlag(MemoryHeapFlags.DeviceLocalBit))
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ public void Dispose()
+ {
+ for (int i = 0; i < _blockLists.Count; i++)
+ {
+ _blockLists[i].Dispose();
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/MemoryAllocatorBlockList.cs b/src/Ryujinx.Graphics.Vulkan/MemoryAllocatorBlockList.cs
new file mode 100644
index 00000000..e564cb26
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/MemoryAllocatorBlockList.cs
@@ -0,0 +1,282 @@
+using Ryujinx.Common;
+using Silk.NET.Vulkan;
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class MemoryAllocatorBlockList : IDisposable
+ {
+ private const ulong InvalidOffset = ulong.MaxValue;
+
+ public class Block : IComparable<Block>
+ {
+ public DeviceMemory Memory { get; private set; }
+ public IntPtr HostPointer { get; private set; }
+ public ulong Size { get; }
+ public bool Mapped => HostPointer != IntPtr.Zero;
+
+ private readonly struct Range : IComparable<Range>
+ {
+ public ulong Offset { get; }
+ public ulong Size { get; }
+
+ public Range(ulong offset, ulong size)
+ {
+ Offset = offset;
+ Size = size;
+ }
+
+ public int CompareTo(Range other)
+ {
+ return Offset.CompareTo(other.Offset);
+ }
+ }
+
+ private readonly List<Range> _freeRanges;
+
+ public Block(DeviceMemory memory, IntPtr hostPointer, ulong size)
+ {
+ Memory = memory;
+ HostPointer = hostPointer;
+ Size = size;
+ _freeRanges = new List<Range>
+ {
+ new Range(0, size)
+ };
+ }
+
+ public ulong Allocate(ulong size, ulong alignment)
+ {
+ for (int i = 0; i < _freeRanges.Count; i++)
+ {
+ var range = _freeRanges[i];
+
+ ulong alignedOffset = BitUtils.AlignUp<ulong>(range.Offset, alignment);
+ ulong sizeDelta = alignedOffset - range.Offset;
+ ulong usableSize = range.Size - sizeDelta;
+
+ if (sizeDelta < range.Size && usableSize >= size)
+ {
+ _freeRanges.RemoveAt(i);
+
+ if (sizeDelta != 0)
+ {
+ InsertFreeRange(range.Offset, sizeDelta);
+ }
+
+ ulong endOffset = range.Offset + range.Size;
+ ulong remainingSize = endOffset - (alignedOffset + size);
+ if (remainingSize != 0)
+ {
+ InsertFreeRange(endOffset - remainingSize, remainingSize);
+ }
+
+ return alignedOffset;
+ }
+ }
+
+ return InvalidOffset;
+ }
+
+ public void Free(ulong offset, ulong size)
+ {
+ InsertFreeRangeComingled(offset, size);
+ }
+
+ private void InsertFreeRange(ulong offset, ulong size)
+ {
+ var range = new Range(offset, size);
+ int index = _freeRanges.BinarySearch(range);
+ if (index < 0)
+ {
+ index = ~index;
+ }
+
+ _freeRanges.Insert(index, range);
+ }
+
+ private void InsertFreeRangeComingled(ulong offset, ulong size)
+ {
+ ulong endOffset = offset + size;
+ var range = new Range(offset, size);
+ int index = _freeRanges.BinarySearch(range);
+ if (index < 0)
+ {
+ index = ~index;
+ }
+
+ if (index < _freeRanges.Count && _freeRanges[index].Offset == endOffset)
+ {
+ endOffset = _freeRanges[index].Offset + _freeRanges[index].Size;
+ _freeRanges.RemoveAt(index);
+ }
+
+ if (index > 0 && _freeRanges[index - 1].Offset + _freeRanges[index - 1].Size == offset)
+ {
+ offset = _freeRanges[index - 1].Offset;
+ _freeRanges.RemoveAt(--index);
+ }
+
+ range = new Range(offset, endOffset - offset);
+
+ _freeRanges.Insert(index, range);
+ }
+
+ public bool IsTotallyFree()
+ {
+ if (_freeRanges.Count == 1 && _freeRanges[0].Size == Size)
+ {
+ Debug.Assert(_freeRanges[0].Offset == 0);
+ return true;
+ }
+
+ return false;
+ }
+
+ public int CompareTo(Block other)
+ {
+ return Size.CompareTo(other.Size);
+ }
+
+ public unsafe void Destroy(Vk api, Device device)
+ {
+ if (Mapped)
+ {
+ api.UnmapMemory(device, Memory);
+ HostPointer = IntPtr.Zero;
+ }
+
+ if (Memory.Handle != 0)
+ {
+ api.FreeMemory(device, Memory, null);
+ Memory = default;
+ }
+ }
+ }
+
+ private readonly List<Block> _blocks;
+
+ private readonly Vk _api;
+ private readonly Device _device;
+
+ public int MemoryTypeIndex { get; }
+ public bool ForBuffer { get; }
+
+ private readonly int _blockAlignment;
+
+ public MemoryAllocatorBlockList(Vk api, Device device, int memoryTypeIndex, int blockAlignment, bool forBuffer)
+ {
+ _blocks = new List<Block>();
+ _api = api;
+ _device = device;
+ MemoryTypeIndex = memoryTypeIndex;
+ ForBuffer = forBuffer;
+ _blockAlignment = blockAlignment;
+ }
+
+ public unsafe MemoryAllocation Allocate(ulong size, ulong alignment, bool map)
+ {
+ // Ensure we have a sane alignment value.
+ if ((ulong)(int)alignment != alignment || (int)alignment <= 0)
+ {
+ throw new ArgumentOutOfRangeException(nameof(alignment), $"Invalid alignment 0x{alignment:X}.");
+ }
+
+ for (int i = 0; i < _blocks.Count; i++)
+ {
+ var block = _blocks[i];
+
+ if (block.Mapped == map && block.Size >= size)
+ {
+ ulong offset = block.Allocate(size, alignment);
+ if (offset != InvalidOffset)
+ {
+ return new MemoryAllocation(this, block, block.Memory, GetHostPointer(block, offset), offset, size);
+ }
+ }
+ }
+
+ ulong blockAlignedSize = BitUtils.AlignUp<ulong>(size, (ulong)_blockAlignment);
+
+ var memoryAllocateInfo = new MemoryAllocateInfo()
+ {
+ SType = StructureType.MemoryAllocateInfo,
+ AllocationSize = blockAlignedSize,
+ MemoryTypeIndex = (uint)MemoryTypeIndex
+ };
+
+ _api.AllocateMemory(_device, memoryAllocateInfo, null, out var deviceMemory).ThrowOnError();
+
+ IntPtr hostPointer = IntPtr.Zero;
+
+ if (map)
+ {
+ unsafe
+ {
+ void* pointer = null;
+ _api.MapMemory(_device, deviceMemory, 0, blockAlignedSize, 0, ref pointer).ThrowOnError();
+ hostPointer = (IntPtr)pointer;
+ }
+ }
+
+ var newBlock = new Block(deviceMemory, hostPointer, blockAlignedSize);
+
+ InsertBlock(newBlock);
+
+ ulong newBlockOffset = newBlock.Allocate(size, alignment);
+ Debug.Assert(newBlockOffset != InvalidOffset);
+
+ return new MemoryAllocation(this, newBlock, deviceMemory, GetHostPointer(newBlock, newBlockOffset), newBlockOffset, size);
+ }
+
+ private static IntPtr GetHostPointer(Block block, ulong offset)
+ {
+ if (block.HostPointer == IntPtr.Zero)
+ {
+ return IntPtr.Zero;
+ }
+
+ return (IntPtr)((nuint)(nint)block.HostPointer + offset);
+ }
+
+ public unsafe void Free(Block block, ulong offset, ulong size)
+ {
+ block.Free(offset, size);
+
+ if (block.IsTotallyFree())
+ {
+ for (int i = 0; i < _blocks.Count; i++)
+ {
+ if (_blocks[i] == block)
+ {
+ _blocks.RemoveAt(i);
+ break;
+ }
+ }
+
+ block.Destroy(_api, _device);
+ }
+ }
+
+ private void InsertBlock(Block block)
+ {
+ int index = _blocks.BinarySearch(block);
+ if (index < 0)
+ {
+ index = ~index;
+ }
+
+ _blocks.Insert(index, block);
+ }
+
+ public unsafe void Dispose()
+ {
+ for (int i = 0; i < _blocks.Count; i++)
+ {
+ _blocks[i].Destroy(_api, _device);
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/MoltenVK/MVKConfiguration.cs b/src/Ryujinx.Graphics.Vulkan/MoltenVK/MVKConfiguration.cs
new file mode 100644
index 00000000..4fbae86e
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/MoltenVK/MVKConfiguration.cs
@@ -0,0 +1,104 @@
+using System;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Graphics.Vulkan.MoltenVK
+{
+ enum MVKConfigLogLevel : int
+ {
+ None = 0,
+ Error = 1,
+ Warning = 2,
+ Info = 3,
+ Debug = 4
+ }
+
+ enum MVKConfigTraceVulkanCalls : int
+ {
+ None = 0,
+ Enter = 1,
+ EnterExit = 2,
+ Duration = 3
+ }
+
+ enum MVKConfigAutoGPUCaptureScope : int
+ {
+ None = 0,
+ Device = 1,
+ Frame = 2
+ }
+
+ [Flags]
+ enum MVKConfigAdvertiseExtensions : int
+ {
+ All = 0x00000001,
+ MoltenVK = 0x00000002,
+ WSI = 0x00000004,
+ Portability = 0x00000008
+ }
+
+ enum MVKVkSemaphoreSupportStyle : int
+ {
+ MVK_CONFIG_VK_SEMAPHORE_SUPPORT_STYLE_SINGLE_QUEUE = 0,
+ MVK_CONFIG_VK_SEMAPHORE_SUPPORT_STYLE_METAL_EVENTS_WHERE_SAFE = 1,
+ MVK_CONFIG_VK_SEMAPHORE_SUPPORT_STYLE_METAL_EVENTS = 2,
+ MVK_CONFIG_VK_SEMAPHORE_SUPPORT_STYLE_CALLBACK = 3,
+ MVK_CONFIG_VK_SEMAPHORE_SUPPORT_STYLE_MAX_ENUM = 0x7FFFFFFF
+ }
+
+ readonly struct Bool32
+ {
+ uint Value { get; }
+
+ public Bool32(uint value)
+ {
+ Value = value;
+ }
+
+ public Bool32(bool value)
+ {
+ Value = value ? 1u : 0u;
+ }
+
+ public static implicit operator bool(Bool32 val) => val.Value == 1;
+ public static implicit operator Bool32(bool val) => new Bool32(val);
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ struct MVKConfiguration
+ {
+ public Bool32 DebugMode;
+ public Bool32 ShaderConversionFlipVertexY;
+ public Bool32 SynchronousQueueSubmits;
+ public Bool32 PrefillMetalCommandBuffers;
+ public uint MaxActiveMetalCommandBuffersPerQueue;
+ public Bool32 SupportLargeQueryPools;
+ public Bool32 PresentWithCommandBuffer;
+ public Bool32 SwapchainMagFilterUseNearest;
+ public ulong MetalCompileTimeout;
+ public Bool32 PerformanceTracking;
+ public uint PerformanceLoggingFrameCount;
+ public Bool32 DisplayWatermark;
+ public Bool32 SpecializedQueueFamilies;
+ public Bool32 SwitchSystemGPU;
+ public Bool32 FullImageViewSwizzle;
+ public uint DefaultGPUCaptureScopeQueueFamilyIndex;
+ public uint DefaultGPUCaptureScopeQueueIndex;
+ public Bool32 FastMathEnabled;
+ public MVKConfigLogLevel LogLevel;
+ public MVKConfigTraceVulkanCalls TraceVulkanCalls;
+ public Bool32 ForceLowPowerGPU;
+ public Bool32 SemaphoreUseMTLFence;
+ public MVKVkSemaphoreSupportStyle SemaphoreSupportStyle;
+ public MVKConfigAutoGPUCaptureScope AutoGPUCaptureScope;
+ public IntPtr AutoGPUCaptureOutputFilepath;
+ public Bool32 Texture1DAs2D;
+ public Bool32 PreallocateDescriptors;
+ public Bool32 UseCommandPooling;
+ public Bool32 UseMTLHeap;
+ public Bool32 LogActivityPerformanceInline;
+ public uint ApiVersionToAdvertise;
+ public MVKConfigAdvertiseExtensions AdvertiseExtensions;
+ public Bool32 ResumeLostDevice;
+ public Bool32 UseMetalArgumentBuffers;
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/MoltenVK/MVKInitialization.cs b/src/Ryujinx.Graphics.Vulkan/MoltenVK/MVKInitialization.cs
new file mode 100644
index 00000000..5910d1aa
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/MoltenVK/MVKInitialization.cs
@@ -0,0 +1,31 @@
+using Silk.NET.Vulkan;
+using System;
+using System.Runtime.InteropServices;
+using System.Runtime.Versioning;
+
+namespace Ryujinx.Graphics.Vulkan.MoltenVK
+{
+ [SupportedOSPlatform("macos")]
+ public static partial class MVKInitialization
+ {
+ [LibraryImport("libMoltenVK.dylib")]
+ private static partial Result vkGetMoltenVKConfigurationMVK(IntPtr unusedInstance, out MVKConfiguration config, in IntPtr configSize);
+
+ [LibraryImport("libMoltenVK.dylib")]
+ private static partial Result vkSetMoltenVKConfigurationMVK(IntPtr unusedInstance, in MVKConfiguration config, in IntPtr configSize);
+
+ public static void Initialize()
+ {
+ var configSize = (IntPtr)Marshal.SizeOf<MVKConfiguration>();
+
+ vkGetMoltenVKConfigurationMVK(IntPtr.Zero, out MVKConfiguration config, configSize);
+
+ config.UseMetalArgumentBuffers = true;
+
+ config.SemaphoreSupportStyle = MVKVkSemaphoreSupportStyle.MVK_CONFIG_VK_SEMAPHORE_SUPPORT_STYLE_SINGLE_QUEUE;
+ config.SynchronousQueueSubmits = false;
+
+ vkSetMoltenVKConfigurationMVK(IntPtr.Zero, config, configSize);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/MultiFenceHolder.cs b/src/Ryujinx.Graphics.Vulkan/MultiFenceHolder.cs
new file mode 100644
index 00000000..9a9a3626
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/MultiFenceHolder.cs
@@ -0,0 +1,212 @@
+using Silk.NET.Vulkan;
+using System.Collections.Generic;
+using System.Linq;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ /// <summary>
+ /// Holder for multiple host GPU fences.
+ /// </summary>
+ class MultiFenceHolder
+ {
+ private static int BufferUsageTrackingGranularity = 4096;
+
+ private readonly Dictionary<FenceHolder, int> _fences;
+ private BufferUsageBitmap _bufferUsageBitmap;
+
+ /// <summary>
+ /// Creates a new instance of the multiple fence holder.
+ /// </summary>
+ public MultiFenceHolder()
+ {
+ _fences = new Dictionary<FenceHolder, int>();
+ }
+
+ /// <summary>
+ /// Creates a new instance of the multiple fence holder, with a given buffer size in mind.
+ /// </summary>
+ /// <param name="size">Size of the buffer</param>
+ public MultiFenceHolder(int size)
+ {
+ _fences = new Dictionary<FenceHolder, int>();
+ _bufferUsageBitmap = new BufferUsageBitmap(size, BufferUsageTrackingGranularity);
+ }
+
+ /// <summary>
+ /// Adds buffer usage information to the uses list.
+ /// </summary>
+ /// <param name="cbIndex">Index of the command buffer where the buffer is used</param>
+ /// <param name="offset">Offset of the buffer being used</param>
+ /// <param name="size">Size of the buffer region being used, in bytes</param>
+ public void AddBufferUse(int cbIndex, int offset, int size)
+ {
+ _bufferUsageBitmap.Add(cbIndex, offset, size);
+ }
+
+ /// <summary>
+ /// Removes all buffer usage information for a given command buffer.
+ /// </summary>
+ /// <param name="cbIndex">Index of the command buffer where the buffer is used</param>
+ public void RemoveBufferUses(int cbIndex)
+ {
+ _bufferUsageBitmap?.Clear(cbIndex);
+ }
+
+ /// <summary>
+ /// Checks if a given range of a buffer is being used by a command buffer still being processed by the GPU.
+ /// </summary>
+ /// <param name="cbIndex">Index of the command buffer where the buffer is used</param>
+ /// <param name="offset">Offset of the buffer being used</param>
+ /// <param name="size">Size of the buffer region being used, in bytes</param>
+ /// <returns>True if in use, false otherwise</returns>
+ public bool IsBufferRangeInUse(int cbIndex, int offset, int size)
+ {
+ return _bufferUsageBitmap.OverlapsWith(cbIndex, offset, size);
+ }
+
+ /// <summary>
+ /// Checks if a given range of a buffer is being used by any command buffer still being processed by the GPU.
+ /// </summary>
+ /// <param name="offset">Offset of the buffer being used</param>
+ /// <param name="size">Size of the buffer region being used, in bytes</param>
+ /// <returns>True if in use, false otherwise</returns>
+ public bool IsBufferRangeInUse(int offset, int size)
+ {
+ return _bufferUsageBitmap.OverlapsWith(offset, size);
+ }
+
+ /// <summary>
+ /// Adds a fence to the holder.
+ /// </summary>
+ /// <param name="cbIndex">Command buffer index of the command buffer that owns the fence</param>
+ /// <param name="fence">Fence to be added</param>
+ public void AddFence(int cbIndex, FenceHolder fence)
+ {
+ lock (_fences)
+ {
+ _fences.TryAdd(fence, cbIndex);
+ }
+ }
+
+ /// <summary>
+ /// Removes a fence from the holder.
+ /// </summary>
+ /// <param name="cbIndex">Command buffer index of the command buffer that owns the fence</param>
+ /// <param name="fence">Fence to be removed</param>
+ public void RemoveFence(int cbIndex, FenceHolder fence)
+ {
+ lock (_fences)
+ {
+ _fences.Remove(fence);
+ }
+ }
+
+ /// <summary>
+ /// Wait until all the fences on the holder are signaled.
+ /// </summary>
+ /// <param name="api">Vulkan API instance</param>
+ /// <param name="device">GPU device that the fences belongs to</param>
+ public void WaitForFences(Vk api, Device device)
+ {
+ WaitForFencesImpl(api, device, 0, 0, false, 0UL);
+ }
+
+ /// <summary>
+ /// Wait until all the fences on the holder with buffer uses overlapping the specified range are signaled.
+ /// </summary>
+ /// <param name="api">Vulkan API instance</param>
+ /// <param name="device">GPU device that the fences belongs to</param>
+ /// <param name="offset">Start offset of the buffer range</param>
+ /// <param name="size">Size of the buffer range in bytes</param>
+ public void WaitForFences(Vk api, Device device, int offset, int size)
+ {
+ WaitForFencesImpl(api, device, offset, size, false, 0UL);
+ }
+
+ /// <summary>
+ /// Wait until all the fences on the holder are signaled, or the timeout expires.
+ /// </summary>
+ /// <param name="api">Vulkan API instance</param>
+ /// <param name="device">GPU device that the fences belongs to</param>
+ /// <param name="timeout">Timeout in nanoseconds</param>
+ /// <returns>True if all fences were signaled, false otherwise</returns>
+ public bool WaitForFences(Vk api, Device device, ulong timeout)
+ {
+ return WaitForFencesImpl(api, device, 0, 0, true, timeout);
+ }
+
+ /// <summary>
+ /// Wait until all the fences on the holder with buffer uses overlapping the specified range are signaled.
+ /// </summary>
+ /// <param name="api">Vulkan API instance</param>
+ /// <param name="device">GPU device that the fences belongs to</param>
+ /// <param name="offset">Start offset of the buffer range</param>
+ /// <param name="size">Size of the buffer range in bytes</param>
+ /// <param name="hasTimeout">Indicates if <paramref name="timeout"/> should be used</param>
+ /// <param name="timeout">Timeout in nanoseconds</param>
+ /// <returns>True if all fences were signaled before the timeout expired, false otherwise</returns>
+ private bool WaitForFencesImpl(Vk api, Device device, int offset, int size, bool hasTimeout, ulong timeout)
+ {
+ FenceHolder[] fenceHolders;
+ Fence[] fences;
+
+ lock (_fences)
+ {
+ fenceHolders = size != 0 ? GetOverlappingFences(offset, size) : _fences.Keys.ToArray();
+ fences = new Fence[fenceHolders.Length];
+
+ for (int i = 0; i < fenceHolders.Length; i++)
+ {
+ fences[i] = fenceHolders[i].Get();
+ }
+ }
+
+ if (fences.Length == 0)
+ {
+ return true;
+ }
+
+ bool signaled = true;
+
+ if (hasTimeout)
+ {
+ signaled = FenceHelper.AllSignaled(api, device, fences, timeout);
+ }
+ else
+ {
+ FenceHelper.WaitAllIndefinitely(api, device, fences);
+ }
+
+ for (int i = 0; i < fenceHolders.Length; i++)
+ {
+ fenceHolders[i].Put();
+ }
+
+ return signaled;
+ }
+
+ /// <summary>
+ /// Gets fences to wait for use of a given buffer region.
+ /// </summary>
+ /// <param name="offset">Offset of the range</param>
+ /// <param name="size">Size of the range in bytes</param>
+ /// <returns>Fences for the specified region</returns>
+ private FenceHolder[] GetOverlappingFences(int offset, int size)
+ {
+ List<FenceHolder> overlapping = new List<FenceHolder>();
+
+ foreach (var kv in _fences)
+ {
+ var fence = kv.Key;
+ var ownerCbIndex = kv.Value;
+
+ if (_bufferUsageBitmap.OverlapsWith(ownerCbIndex, offset, size))
+ {
+ overlapping.Add(fence);
+ }
+ }
+
+ return overlapping.ToArray();
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/NativeArray.cs b/src/Ryujinx.Graphics.Vulkan/NativeArray.cs
new file mode 100644
index 00000000..3a851287
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/NativeArray.cs
@@ -0,0 +1,48 @@
+using System;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ unsafe class NativeArray<T> : IDisposable where T : unmanaged
+ {
+ public T* Pointer { get; private set; }
+ public int Length { get; }
+
+ public ref T this[int index]
+ {
+ get => ref Pointer[Checked(index)];
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private int Checked(int index)
+ {
+ if ((uint)index >= (uint)Length)
+ {
+ throw new IndexOutOfRangeException();
+ }
+
+ return index;
+ }
+
+ public NativeArray(int length)
+ {
+ Pointer = (T*)Marshal.AllocHGlobal(checked(length * Unsafe.SizeOf<T>()));
+ Length = length;
+ }
+
+ public Span<T> AsSpan()
+ {
+ return new Span<T>(Pointer, Length);
+ }
+
+ public void Dispose()
+ {
+ if (Pointer != null)
+ {
+ Marshal.FreeHGlobal((IntPtr)Pointer);
+ Pointer = null;
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/PersistentFlushBuffer.cs b/src/Ryujinx.Graphics.Vulkan/PersistentFlushBuffer.cs
new file mode 100644
index 00000000..fca13c31
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/PersistentFlushBuffer.cs
@@ -0,0 +1,89 @@
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ internal class PersistentFlushBuffer : IDisposable
+ {
+ private VulkanRenderer _gd;
+
+ private BufferHolder _flushStorage;
+
+ public PersistentFlushBuffer(VulkanRenderer gd)
+ {
+ _gd = gd;
+ }
+
+ private BufferHolder ResizeIfNeeded(int size)
+ {
+ var flushStorage = _flushStorage;
+
+ if (flushStorage == null || size > _flushStorage.Size)
+ {
+ if (flushStorage != null)
+ {
+ flushStorage.Dispose();
+ }
+
+ flushStorage = _gd.BufferManager.Create(_gd, size);
+ _flushStorage = flushStorage;
+ }
+
+ return flushStorage;
+ }
+
+ public Span<byte> GetBufferData(CommandBufferPool cbp, BufferHolder buffer, int offset, int size)
+ {
+ var flushStorage = ResizeIfNeeded(size);
+
+ using (var cbs = cbp.Rent())
+ {
+ var srcBuffer = buffer.GetBuffer(cbs.CommandBuffer);
+ var dstBuffer = flushStorage.GetBuffer(cbs.CommandBuffer);
+
+ BufferHolder.Copy(_gd, cbs, srcBuffer, dstBuffer, offset, 0, size);
+ }
+
+ flushStorage.WaitForFences();
+ return flushStorage.GetDataStorage(0, size);
+ }
+
+ public Span<byte> GetTextureData(CommandBufferPool cbp, TextureView view, int size)
+ {
+ GAL.TextureCreateInfo info = view.Info;
+
+ var flushStorage = ResizeIfNeeded(size);
+
+ using (var cbs = cbp.Rent())
+ {
+ var buffer = flushStorage.GetBuffer(cbs.CommandBuffer).Get(cbs).Value;
+ var image = view.GetImage().Get(cbs).Value;
+
+ view.CopyFromOrToBuffer(cbs.CommandBuffer, buffer, image, size, true, 0, 0, info.GetLayers(), info.Levels, singleSlice: false);
+ }
+
+ flushStorage.WaitForFences();
+ return flushStorage.GetDataStorage(0, size);
+ }
+
+ public Span<byte> GetTextureData(CommandBufferPool cbp, TextureView view, int size, int layer, int level)
+ {
+ var flushStorage = ResizeIfNeeded(size);
+
+ using (var cbs = cbp.Rent())
+ {
+ var buffer = flushStorage.GetBuffer(cbs.CommandBuffer).Get(cbs).Value;
+ var image = view.GetImage().Get(cbs).Value;
+
+ view.CopyFromOrToBuffer(cbs.CommandBuffer, buffer, image, size, true, layer, level, 1, 1, singleSlice: true);
+ }
+
+ flushStorage.WaitForFences();
+ return flushStorage.GetDataStorage(0, size);
+ }
+
+ public void Dispose()
+ {
+ _flushStorage.Dispose();
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/PipelineBase.cs b/src/Ryujinx.Graphics.Vulkan/PipelineBase.cs
new file mode 100644
index 00000000..c54d7980
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/PipelineBase.cs
@@ -0,0 +1,1742 @@
+using Ryujinx.Common;
+using Ryujinx.Graphics.GAL;
+using Ryujinx.Graphics.Shader;
+using Silk.NET.Vulkan;
+using System;
+using System.Linq;
+using System.Numerics;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class PipelineBase : IDisposable
+ {
+ public const int DescriptorSetLayouts = 4;
+
+ public const int UniformSetIndex = 0;
+ public const int StorageSetIndex = 1;
+ public const int TextureSetIndex = 2;
+ public const int ImageSetIndex = 3;
+
+ protected readonly VulkanRenderer Gd;
+ protected readonly Device Device;
+ public readonly PipelineCache PipelineCache;
+
+ public readonly AutoFlushCounter AutoFlush;
+
+ protected PipelineDynamicState DynamicState;
+ private PipelineState _newState;
+ private bool _stateDirty;
+ private GAL.PrimitiveTopology _topology;
+
+ private ulong _currentPipelineHandle;
+
+ protected Auto<DisposablePipeline> Pipeline;
+
+ protected PipelineBindPoint Pbp;
+
+ protected CommandBufferScoped Cbs;
+ protected CommandBufferScoped? PreloadCbs;
+ protected CommandBuffer CommandBuffer;
+
+ public CommandBufferScoped CurrentCommandBuffer => Cbs;
+
+ private ShaderCollection _program;
+
+ private Vector4<float>[] _renderScale = new Vector4<float>[73];
+ private int _fragmentScaleCount;
+
+ protected FramebufferParams FramebufferParams;
+ private Auto<DisposableFramebuffer> _framebuffer;
+ private Auto<DisposableRenderPass> _renderPass;
+ private int _writtenAttachmentCount;
+
+ private bool _framebufferUsingColorWriteMask;
+
+ private ITexture[] _preMaskColors;
+ private ITexture _preMaskDepthStencil;
+
+ private readonly DescriptorSetUpdater _descriptorSetUpdater;
+
+ private IndexBufferState _indexBuffer;
+ private IndexBufferPattern _indexBufferPattern;
+ private readonly BufferState[] _transformFeedbackBuffers;
+ private readonly VertexBufferState[] _vertexBuffers;
+ private ulong _vertexBuffersDirty;
+ protected Rectangle<int> ClearScissor;
+
+ public SupportBufferUpdater SupportBufferUpdater;
+ public IndexBufferPattern QuadsToTrisPattern;
+ public IndexBufferPattern TriFanToTrisPattern;
+
+ private bool _needsIndexBufferRebind;
+ private bool _needsTransformFeedbackBuffersRebind;
+
+ private bool _tfEnabled;
+ private bool _tfActive;
+
+ private PipelineColorBlendAttachmentState[] _storedBlend;
+
+ public ulong DrawCount { get; private set; }
+ public bool RenderPassActive { get; private set; }
+
+ public unsafe PipelineBase(VulkanRenderer gd, Device device)
+ {
+ Gd = gd;
+ Device = device;
+
+ AutoFlush = new AutoFlushCounter(gd);
+
+ var pipelineCacheCreateInfo = new PipelineCacheCreateInfo()
+ {
+ SType = StructureType.PipelineCacheCreateInfo
+ };
+
+ gd.Api.CreatePipelineCache(device, pipelineCacheCreateInfo, null, out PipelineCache).ThrowOnError();
+
+ _descriptorSetUpdater = new DescriptorSetUpdater(gd, this);
+
+ _transformFeedbackBuffers = new BufferState[Constants.MaxTransformFeedbackBuffers];
+ _vertexBuffers = new VertexBufferState[Constants.MaxVertexBuffers + 1];
+
+ const int EmptyVbSize = 16;
+
+ using var emptyVb = gd.BufferManager.Create(gd, EmptyVbSize);
+ emptyVb.SetData(0, new byte[EmptyVbSize]);
+ _vertexBuffers[0] = new VertexBufferState(emptyVb.GetBuffer(), 0, 0, EmptyVbSize, 0);
+ _vertexBuffersDirty = ulong.MaxValue >> (64 - _vertexBuffers.Length);
+
+ ClearScissor = new Rectangle<int>(0, 0, 0xffff, 0xffff);
+
+ var defaultScale = new Vector4<float> { X = 1f, Y = 0f, Z = 0f, W = 0f };
+ new Span<Vector4<float>>(_renderScale).Fill(defaultScale);
+
+ _storedBlend = new PipelineColorBlendAttachmentState[Constants.MaxRenderTargets];
+
+ _newState.Initialize();
+ }
+
+ public void Initialize()
+ {
+ _descriptorSetUpdater.Initialize();
+
+ SupportBufferUpdater = new SupportBufferUpdater(Gd);
+ SupportBufferUpdater.UpdateRenderScale(_renderScale, 0, SupportBuffer.RenderScaleMaxCount);
+
+ QuadsToTrisPattern = new IndexBufferPattern(Gd, 4, 6, 0, new[] { 0, 1, 2, 0, 2, 3 }, 4, false);
+ TriFanToTrisPattern = new IndexBufferPattern(Gd, 3, 3, 2, new[] { int.MinValue, -1, 0 }, 1, true);
+ }
+
+ public unsafe void Barrier()
+ {
+ MemoryBarrier memoryBarrier = new MemoryBarrier()
+ {
+ SType = StructureType.MemoryBarrier,
+ SrcAccessMask = AccessFlags.MemoryReadBit | AccessFlags.MemoryWriteBit,
+ DstAccessMask = AccessFlags.MemoryReadBit | AccessFlags.MemoryWriteBit
+ };
+
+ Gd.Api.CmdPipelineBarrier(
+ CommandBuffer,
+ PipelineStageFlags.FragmentShaderBit,
+ PipelineStageFlags.FragmentShaderBit,
+ 0,
+ 1,
+ memoryBarrier,
+ 0,
+ null,
+ 0,
+ null);
+ }
+
+ public void ComputeBarrier()
+ {
+ MemoryBarrier memoryBarrier = new MemoryBarrier()
+ {
+ SType = StructureType.MemoryBarrier,
+ SrcAccessMask = AccessFlags.MemoryReadBit | AccessFlags.MemoryWriteBit,
+ DstAccessMask = AccessFlags.MemoryReadBit | AccessFlags.MemoryWriteBit
+ };
+
+ Gd.Api.CmdPipelineBarrier(
+ CommandBuffer,
+ PipelineStageFlags.ComputeShaderBit,
+ PipelineStageFlags.AllCommandsBit,
+ 0,
+ 1,
+ new ReadOnlySpan<MemoryBarrier>(memoryBarrier),
+ 0,
+ ReadOnlySpan<BufferMemoryBarrier>.Empty,
+ 0,
+ ReadOnlySpan<ImageMemoryBarrier>.Empty);
+ }
+
+ public void BeginTransformFeedback(GAL.PrimitiveTopology topology)
+ {
+ _tfEnabled = true;
+ }
+
+ public void ClearBuffer(BufferHandle destination, int offset, int size, uint value)
+ {
+ EndRenderPass();
+
+ var dst = Gd.BufferManager.GetBuffer(CommandBuffer, destination, offset, size, true).Get(Cbs, offset, size).Value;
+
+ BufferHolder.InsertBufferBarrier(
+ Gd,
+ Cbs.CommandBuffer,
+ dst,
+ BufferHolder.DefaultAccessFlags,
+ AccessFlags.TransferWriteBit,
+ PipelineStageFlags.AllCommandsBit,
+ PipelineStageFlags.TransferBit,
+ offset,
+ size);
+
+ Gd.Api.CmdFillBuffer(CommandBuffer, dst, (ulong)offset, (ulong)size, value);
+
+ BufferHolder.InsertBufferBarrier(
+ Gd,
+ Cbs.CommandBuffer,
+ dst,
+ AccessFlags.TransferWriteBit,
+ BufferHolder.DefaultAccessFlags,
+ PipelineStageFlags.TransferBit,
+ PipelineStageFlags.AllCommandsBit,
+ offset,
+ size);
+ }
+
+ public unsafe void ClearRenderTargetColor(int index, int layer, int layerCount, ColorF color)
+ {
+ if (FramebufferParams == null || !FramebufferParams.IsValidColorAttachment(index))
+ {
+ return;
+ }
+
+ if (_renderPass == null)
+ {
+ CreateRenderPass();
+ }
+
+ BeginRenderPass();
+
+ var clearValue = new ClearValue(new ClearColorValue(color.Red, color.Green, color.Blue, color.Alpha));
+ var attachment = new ClearAttachment(ImageAspectFlags.ColorBit, (uint)index, clearValue);
+ var clearRect = FramebufferParams.GetClearRect(ClearScissor, layer, layerCount);
+
+ FramebufferParams.InsertClearBarrier(Cbs, index);
+
+ Gd.Api.CmdClearAttachments(CommandBuffer, 1, &attachment, 1, &clearRect);
+ }
+
+ public unsafe void ClearRenderTargetDepthStencil(int layer, int layerCount, float depthValue, bool depthMask, int stencilValue, int stencilMask)
+ {
+ // TODO: Use stencilMask (fully)
+
+ if (FramebufferParams == null || !FramebufferParams.HasDepthStencil)
+ {
+ return;
+ }
+
+ if (_renderPass == null)
+ {
+ CreateRenderPass();
+ }
+
+ BeginRenderPass();
+
+ var clearValue = new ClearValue(null, new ClearDepthStencilValue(depthValue, (uint)stencilValue));
+ var flags = depthMask ? ImageAspectFlags.DepthBit : 0;
+
+ if (stencilMask != 0)
+ {
+ flags |= ImageAspectFlags.StencilBit;
+ }
+
+ var attachment = new ClearAttachment(flags, 0, clearValue);
+ var clearRect = FramebufferParams.GetClearRect(ClearScissor, layer, layerCount);
+
+ FramebufferParams.InsertClearBarrierDS(Cbs);
+
+ Gd.Api.CmdClearAttachments(CommandBuffer, 1, &attachment, 1, &clearRect);
+ }
+
+ public unsafe void CommandBufferBarrier()
+ {
+ MemoryBarrier memoryBarrier = new MemoryBarrier()
+ {
+ SType = StructureType.MemoryBarrier,
+ SrcAccessMask = BufferHolder.DefaultAccessFlags,
+ DstAccessMask = AccessFlags.IndirectCommandReadBit
+ };
+
+ Gd.Api.CmdPipelineBarrier(
+ CommandBuffer,
+ PipelineStageFlags.AllCommandsBit,
+ PipelineStageFlags.DrawIndirectBit,
+ 0,
+ 1,
+ memoryBarrier,
+ 0,
+ null,
+ 0,
+ null);
+ }
+
+ public void CopyBuffer(BufferHandle source, BufferHandle destination, int srcOffset, int dstOffset, int size)
+ {
+ EndRenderPass();
+
+ var src = Gd.BufferManager.GetBuffer(CommandBuffer, source, srcOffset, size, false);
+ var dst = Gd.BufferManager.GetBuffer(CommandBuffer, destination, dstOffset, size, true);
+
+ BufferHolder.Copy(Gd, Cbs, src, dst, srcOffset, dstOffset, size);
+ }
+
+ public void DirtyVertexBuffer(Auto<DisposableBuffer> buffer)
+ {
+ for (int i = 0; i < _vertexBuffers.Length; i++)
+ {
+ if (_vertexBuffers[i].BoundEquals(buffer))
+ {
+ _vertexBuffersDirty |= 1UL << i;
+ }
+ }
+ }
+
+ public void DirtyIndexBuffer(Auto<DisposableBuffer> buffer)
+ {
+ if (_indexBuffer.BoundEquals(buffer))
+ {
+ _needsIndexBufferRebind = true;
+ }
+ }
+
+ public void DispatchCompute(int groupsX, int groupsY, int groupsZ)
+ {
+ if (!_program.IsLinked)
+ {
+ return;
+ }
+
+ EndRenderPass();
+ RecreatePipelineIfNeeded(PipelineBindPoint.Compute);
+
+ Gd.Api.CmdDispatch(CommandBuffer, (uint)groupsX, (uint)groupsY, (uint)groupsZ);
+ }
+
+ public void DispatchComputeIndirect(Auto<DisposableBuffer> indirectBuffer, int indirectBufferOffset)
+ {
+ if (!_program.IsLinked)
+ {
+ return;
+ }
+
+ EndRenderPass();
+ RecreatePipelineIfNeeded(PipelineBindPoint.Compute);
+
+ Gd.Api.CmdDispatchIndirect(CommandBuffer, indirectBuffer.Get(Cbs, indirectBufferOffset, 12).Value, (ulong)indirectBufferOffset);
+ }
+
+ public void Draw(int vertexCount, int instanceCount, int firstVertex, int firstInstance)
+ {
+ if (!_program.IsLinked)
+ {
+ return;
+ }
+
+ RecreatePipelineIfNeeded(PipelineBindPoint.Graphics);
+ BeginRenderPass();
+ DrawCount++;
+
+ if (Gd.TopologyUnsupported(_topology))
+ {
+ // Temporarily bind a conversion pattern as an index buffer.
+ _needsIndexBufferRebind = true;
+
+ IndexBufferPattern pattern = _topology switch
+ {
+ GAL.PrimitiveTopology.Quads => QuadsToTrisPattern,
+ GAL.PrimitiveTopology.TriangleFan or
+ GAL.PrimitiveTopology.Polygon => TriFanToTrisPattern,
+ _ => throw new NotSupportedException($"Unsupported topology: {_topology}")
+ };
+
+ BufferHandle handle = pattern.GetRepeatingBuffer(vertexCount, out int indexCount);
+ var buffer = Gd.BufferManager.GetBuffer(CommandBuffer, handle, false);
+
+ Gd.Api.CmdBindIndexBuffer(CommandBuffer, buffer.Get(Cbs, 0, indexCount * sizeof(int)).Value, 0, Silk.NET.Vulkan.IndexType.Uint32);
+
+ BeginRenderPass(); // May have been interrupted to set buffer data.
+ ResumeTransformFeedbackInternal();
+
+ Gd.Api.CmdDrawIndexed(CommandBuffer, (uint)indexCount, (uint)instanceCount, 0, firstVertex, (uint)firstInstance);
+ }
+ else
+ {
+ ResumeTransformFeedbackInternal();
+
+ Gd.Api.CmdDraw(CommandBuffer, (uint)vertexCount, (uint)instanceCount, (uint)firstVertex, (uint)firstInstance);
+ }
+ }
+
+ private void UpdateIndexBufferPattern()
+ {
+ IndexBufferPattern pattern = null;
+
+ if (Gd.TopologyUnsupported(_topology))
+ {
+ pattern = _topology switch
+ {
+ GAL.PrimitiveTopology.Quads => QuadsToTrisPattern,
+ GAL.PrimitiveTopology.TriangleFan or
+ GAL.PrimitiveTopology.Polygon => TriFanToTrisPattern,
+ _ => throw new NotSupportedException($"Unsupported topology: {_topology}")
+ };
+ }
+
+ if (_indexBufferPattern != pattern)
+ {
+ _indexBufferPattern = pattern;
+ _needsIndexBufferRebind = true;
+ }
+ }
+
+ public void DrawIndexed(int indexCount, int instanceCount, int firstIndex, int firstVertex, int firstInstance)
+ {
+ if (!_program.IsLinked)
+ {
+ return;
+ }
+
+ UpdateIndexBufferPattern();
+ RecreatePipelineIfNeeded(PipelineBindPoint.Graphics);
+ BeginRenderPass();
+ DrawCount++;
+
+ if (_indexBufferPattern != null)
+ {
+ // Convert the index buffer into a supported topology.
+ IndexBufferPattern pattern = _indexBufferPattern;
+
+ int convertedCount = pattern.GetConvertedCount(indexCount);
+
+ if (_needsIndexBufferRebind)
+ {
+ _indexBuffer.BindConvertedIndexBuffer(Gd, Cbs, firstIndex, indexCount, convertedCount, pattern);
+
+ _needsIndexBufferRebind = false;
+ }
+
+ BeginRenderPass(); // May have been interrupted to set buffer data.
+ ResumeTransformFeedbackInternal();
+
+ Gd.Api.CmdDrawIndexed(CommandBuffer, (uint)convertedCount, (uint)instanceCount, 0, firstVertex, (uint)firstInstance);
+ }
+ else
+ {
+ ResumeTransformFeedbackInternal();
+
+ Gd.Api.CmdDrawIndexed(CommandBuffer, (uint)indexCount, (uint)instanceCount, (uint)firstIndex, firstVertex, (uint)firstInstance);
+ }
+ }
+
+ public void DrawIndexedIndirect(BufferRange indirectBuffer)
+ {
+ if (!_program.IsLinked)
+ {
+ return;
+ }
+
+ UpdateIndexBufferPattern();
+ RecreatePipelineIfNeeded(PipelineBindPoint.Graphics);
+ BeginRenderPass();
+ DrawCount++;
+
+ if (_indexBufferPattern != null)
+ {
+ // Convert the index buffer into a supported topology.
+ IndexBufferPattern pattern = _indexBufferPattern;
+
+ Auto<DisposableBuffer> indirectBufferAuto = _indexBuffer.BindConvertedIndexBufferIndirect(
+ Gd,
+ Cbs,
+ indirectBuffer,
+ BufferRange.Empty,
+ pattern,
+ false,
+ 1,
+ indirectBuffer.Size);
+
+ _needsIndexBufferRebind = false;
+
+ BeginRenderPass(); // May have been interrupted to set buffer data.
+ ResumeTransformFeedbackInternal();
+
+ Gd.Api.CmdDrawIndexedIndirect(CommandBuffer, indirectBufferAuto.Get(Cbs, 0, indirectBuffer.Size).Value, 0, 1, (uint)indirectBuffer.Size);
+ }
+ else
+ {
+ var buffer = Gd.BufferManager
+ .GetBuffer(CommandBuffer, indirectBuffer.Handle, indirectBuffer.Offset, indirectBuffer.Size, false)
+ .Get(Cbs, indirectBuffer.Offset, indirectBuffer.Size).Value;
+
+ ResumeTransformFeedbackInternal();
+
+ Gd.Api.CmdDrawIndexedIndirect(CommandBuffer, buffer, (ulong)indirectBuffer.Offset, 1, (uint)indirectBuffer.Size);
+ }
+ }
+
+ public void DrawIndexedIndirectCount(BufferRange indirectBuffer, BufferRange parameterBuffer, int maxDrawCount, int stride)
+ {
+ if (!_program.IsLinked)
+ {
+ return;
+ }
+
+ UpdateIndexBufferPattern();
+ RecreatePipelineIfNeeded(PipelineBindPoint.Graphics);
+ BeginRenderPass();
+ DrawCount++;
+
+ var countBuffer = Gd.BufferManager
+ .GetBuffer(CommandBuffer, parameterBuffer.Handle, parameterBuffer.Offset, parameterBuffer.Size, false)
+ .Get(Cbs, parameterBuffer.Offset, parameterBuffer.Size).Value;
+
+ if (_indexBufferPattern != null)
+ {
+ // Convert the index buffer into a supported topology.
+ IndexBufferPattern pattern = _indexBufferPattern;
+
+ Auto<DisposableBuffer> indirectBufferAuto = _indexBuffer.BindConvertedIndexBufferIndirect(
+ Gd,
+ Cbs,
+ indirectBuffer,
+ parameterBuffer,
+ pattern,
+ true,
+ maxDrawCount,
+ stride);
+
+ _needsIndexBufferRebind = false;
+
+ BeginRenderPass(); // May have been interrupted to set buffer data.
+ ResumeTransformFeedbackInternal();
+
+ if (Gd.Capabilities.SupportsIndirectParameters)
+ {
+ Gd.DrawIndirectCountApi.CmdDrawIndexedIndirectCount(
+ CommandBuffer,
+ indirectBufferAuto.Get(Cbs, 0, indirectBuffer.Size).Value,
+ 0,
+ countBuffer,
+ (ulong)parameterBuffer.Offset,
+ (uint)maxDrawCount,
+ (uint)stride);
+ }
+ else
+ {
+ // This is also fine because the indirect data conversion always zeros
+ // the entries that are past the current draw count.
+
+ Gd.Api.CmdDrawIndexedIndirect(
+ CommandBuffer,
+ indirectBufferAuto.Get(Cbs, 0, indirectBuffer.Size).Value,
+ 0,
+ (uint)maxDrawCount,
+ (uint)stride);
+ }
+
+ }
+ else
+ {
+ var buffer = Gd.BufferManager
+ .GetBuffer(CommandBuffer, indirectBuffer.Handle, indirectBuffer.Offset, indirectBuffer.Size, false)
+ .Get(Cbs, indirectBuffer.Offset, indirectBuffer.Size).Value;
+
+ ResumeTransformFeedbackInternal();
+
+ if (Gd.Capabilities.SupportsIndirectParameters)
+ {
+ Gd.DrawIndirectCountApi.CmdDrawIndexedIndirectCount(
+ CommandBuffer,
+ buffer,
+ (ulong)indirectBuffer.Offset,
+ countBuffer,
+ (ulong)parameterBuffer.Offset,
+ (uint)maxDrawCount,
+ (uint)stride);
+ }
+ else
+ {
+ // Not fully correct, but we can't do much better if the host does not support indirect count.
+ Gd.Api.CmdDrawIndexedIndirect(
+ CommandBuffer,
+ buffer,
+ (ulong)indirectBuffer.Offset,
+ (uint)maxDrawCount,
+ (uint)stride);
+ }
+ }
+ }
+
+ public void DrawIndirect(BufferRange indirectBuffer)
+ {
+ if (!_program.IsLinked)
+ {
+ return;
+ }
+
+ // TODO: Support quads and other unsupported topologies.
+
+ RecreatePipelineIfNeeded(PipelineBindPoint.Graphics);
+ BeginRenderPass();
+ ResumeTransformFeedbackInternal();
+ DrawCount++;
+
+ var buffer = Gd.BufferManager
+ .GetBuffer(CommandBuffer, indirectBuffer.Handle, indirectBuffer.Offset, indirectBuffer.Size, false)
+ .Get(Cbs, indirectBuffer.Offset, indirectBuffer.Size).Value;
+
+ Gd.Api.CmdDrawIndirect(CommandBuffer, buffer, (ulong)indirectBuffer.Offset, 1, (uint)indirectBuffer.Size);
+ }
+
+ public void DrawIndirectCount(BufferRange indirectBuffer, BufferRange parameterBuffer, int maxDrawCount, int stride)
+ {
+ if (!Gd.Capabilities.SupportsIndirectParameters)
+ {
+ // TODO: Fallback for when this is not supported.
+ throw new NotSupportedException();
+ }
+
+ if (!_program.IsLinked)
+ {
+ return;
+ }
+
+ // TODO: Support quads and other unsupported topologies.
+
+ RecreatePipelineIfNeeded(PipelineBindPoint.Graphics);
+ BeginRenderPass();
+ ResumeTransformFeedbackInternal();
+ DrawCount++;
+
+ var buffer = Gd.BufferManager
+ .GetBuffer(CommandBuffer, indirectBuffer.Handle, indirectBuffer.Offset, indirectBuffer.Size, false)
+ .Get(Cbs, indirectBuffer.Offset, indirectBuffer.Size).Value;
+
+ var countBuffer = Gd.BufferManager
+ .GetBuffer(CommandBuffer, parameterBuffer.Handle, parameterBuffer.Offset, parameterBuffer.Size, false)
+ .Get(Cbs, parameterBuffer.Offset, parameterBuffer.Size).Value;
+
+ Gd.DrawIndirectCountApi.CmdDrawIndirectCount(
+ CommandBuffer,
+ buffer,
+ (ulong)indirectBuffer.Offset,
+ countBuffer,
+ (ulong)parameterBuffer.Offset,
+ (uint)maxDrawCount,
+ (uint)stride);
+ }
+
+ public void DrawTexture(ITexture texture, ISampler sampler, Extents2DF srcRegion, Extents2DF dstRegion)
+ {
+ if (texture is TextureView srcTexture)
+ {
+ SupportBufferUpdater.Commit();
+
+ var oldCullMode = _newState.CullMode;
+ var oldStencilTestEnable = _newState.StencilTestEnable;
+ var oldDepthTestEnable = _newState.DepthTestEnable;
+ var oldDepthWriteEnable = _newState.DepthWriteEnable;
+ var oldTopology = _newState.Topology;
+ var oldViewports = DynamicState.Viewports;
+ var oldViewportsCount = _newState.ViewportsCount;
+
+ _newState.CullMode = CullModeFlags.None;
+ _newState.StencilTestEnable = false;
+ _newState.DepthTestEnable = false;
+ _newState.DepthWriteEnable = false;
+ SignalStateChange();
+
+ Gd.HelperShader.DrawTexture(
+ Gd,
+ this,
+ srcTexture,
+ sampler,
+ srcRegion,
+ dstRegion);
+
+ _newState.CullMode = oldCullMode;
+ _newState.StencilTestEnable = oldStencilTestEnable;
+ _newState.DepthTestEnable = oldDepthTestEnable;
+ _newState.DepthWriteEnable = oldDepthWriteEnable;
+ _newState.Topology = oldTopology;
+
+ DynamicState.SetViewports(ref oldViewports, oldViewportsCount);
+
+ _newState.ViewportsCount = oldViewportsCount;
+ SignalStateChange();
+ }
+ }
+
+ public void EndTransformFeedback()
+ {
+ PauseTransformFeedbackInternal();
+ _tfEnabled = false;
+ }
+
+ public double GetCounterDivisor(CounterType type)
+ {
+ if (type == CounterType.SamplesPassed)
+ {
+ return _renderScale[0].X * _renderScale[0].X;
+ }
+
+ return 1;
+ }
+
+ public bool IsCommandBufferActive(CommandBuffer cb)
+ {
+ return CommandBuffer.Handle == cb.Handle;
+ }
+
+ public void SetAlphaTest(bool enable, float reference, GAL.CompareOp op)
+ {
+ // This is currently handled using shader specialization, as Vulkan does not support alpha test.
+ // In the future, we may want to use this to write the reference value into the support buffer,
+ // to avoid creating one version of the shader per reference value used.
+ }
+
+ public void SetBlendState(AdvancedBlendDescriptor blend)
+ {
+ for (int index = 0; index < Constants.MaxRenderTargets; index++)
+ {
+ ref var vkBlend = ref _newState.Internal.ColorBlendAttachmentState[index];
+
+ if (index == 0)
+ {
+ var blendOp = blend.Op.Convert();
+
+ vkBlend = new PipelineColorBlendAttachmentState(
+ blendEnable: true,
+ colorBlendOp: blendOp,
+ alphaBlendOp: blendOp,
+ colorWriteMask: vkBlend.ColorWriteMask);
+
+ if (Gd.Capabilities.SupportsBlendEquationAdvancedNonPreMultipliedSrcColor)
+ {
+ _newState.AdvancedBlendSrcPreMultiplied = blend.SrcPreMultiplied;
+ }
+
+ if (Gd.Capabilities.SupportsBlendEquationAdvancedCorrelatedOverlap)
+ {
+ _newState.AdvancedBlendOverlap = blend.Overlap.Convert();
+ }
+ }
+ else
+ {
+ vkBlend = new PipelineColorBlendAttachmentState(
+ colorWriteMask: vkBlend.ColorWriteMask);
+ }
+
+ if (vkBlend.ColorWriteMask == 0)
+ {
+ _storedBlend[index] = vkBlend;
+
+ vkBlend = new PipelineColorBlendAttachmentState();
+ }
+ }
+
+ SignalStateChange();
+ }
+
+ public void SetBlendState(int index, BlendDescriptor blend)
+ {
+ ref var vkBlend = ref _newState.Internal.ColorBlendAttachmentState[index];
+
+ if (blend.Enable)
+ {
+ vkBlend.BlendEnable = blend.Enable;
+ vkBlend.SrcColorBlendFactor = blend.ColorSrcFactor.Convert();
+ vkBlend.DstColorBlendFactor = blend.ColorDstFactor.Convert();
+ vkBlend.ColorBlendOp = blend.ColorOp.Convert();
+ vkBlend.SrcAlphaBlendFactor = blend.AlphaSrcFactor.Convert();
+ vkBlend.DstAlphaBlendFactor = blend.AlphaDstFactor.Convert();
+ vkBlend.AlphaBlendOp = blend.AlphaOp.Convert();
+ }
+ else
+ {
+ vkBlend = new PipelineColorBlendAttachmentState(
+ colorWriteMask: vkBlend.ColorWriteMask);
+ }
+
+ if (vkBlend.ColorWriteMask == 0)
+ {
+ _storedBlend[index] = vkBlend;
+
+ vkBlend = new PipelineColorBlendAttachmentState();
+ }
+
+ DynamicState.SetBlendConstants(
+ blend.BlendConstant.Red,
+ blend.BlendConstant.Green,
+ blend.BlendConstant.Blue,
+ blend.BlendConstant.Alpha);
+
+ // Reset advanced blend state back defaults to the cache to help the pipeline cache.
+ _newState.AdvancedBlendSrcPreMultiplied = true;
+ _newState.AdvancedBlendDstPreMultiplied = true;
+ _newState.AdvancedBlendOverlap = BlendOverlapEXT.UncorrelatedExt;
+
+ SignalStateChange();
+ }
+
+ public void SetDepthBias(PolygonModeMask enables, float factor, float units, float clamp)
+ {
+ DynamicState.SetDepthBias(factor, units, clamp);
+
+ _newState.DepthBiasEnable = enables != 0;
+ SignalStateChange();
+ }
+
+ public void SetDepthClamp(bool clamp)
+ {
+ _newState.DepthClampEnable = clamp;
+ SignalStateChange();
+ }
+
+ public void SetDepthMode(DepthMode mode)
+ {
+ // Currently this is emulated on the shader, because Vulkan had no support for changing the depth mode.
+ // In the future, we may want to use the VK_EXT_depth_clip_control extension to change it here.
+ }
+
+ public void SetDepthTest(DepthTestDescriptor depthTest)
+ {
+ _newState.DepthTestEnable = depthTest.TestEnable;
+ _newState.DepthWriteEnable = depthTest.WriteEnable;
+ _newState.DepthCompareOp = depthTest.Func.Convert();
+ SignalStateChange();
+ }
+
+ public void SetFaceCulling(bool enable, Face face)
+ {
+ _newState.CullMode = enable ? face.Convert() : CullModeFlags.None;
+ SignalStateChange();
+ }
+
+ public void SetFrontFace(GAL.FrontFace frontFace)
+ {
+ _newState.FrontFace = frontFace.Convert();
+ SignalStateChange();
+ }
+
+ public void SetImage(int binding, ITexture image, GAL.Format imageFormat)
+ {
+ _descriptorSetUpdater.SetImage(binding, image, imageFormat);
+ }
+
+ public void SetImage(int binding, Auto<DisposableImageView> image)
+ {
+ _descriptorSetUpdater.SetImage(binding, image);
+ }
+
+ public void SetIndexBuffer(BufferRange buffer, GAL.IndexType type)
+ {
+ if (buffer.Handle != BufferHandle.Null)
+ {
+ _indexBuffer = new IndexBufferState(buffer.Handle, buffer.Offset, buffer.Size, type.Convert());
+ }
+ else
+ {
+ _indexBuffer = IndexBufferState.Null;
+ }
+
+ _needsIndexBufferRebind = true;
+ }
+
+ public void SetLineParameters(float width, bool smooth)
+ {
+ _newState.LineWidth = width;
+ SignalStateChange();
+ }
+
+ public void SetLogicOpState(bool enable, LogicalOp op)
+ {
+ _newState.LogicOpEnable = enable;
+ _newState.LogicOp = op.Convert();
+ SignalStateChange();
+ }
+
+ public void SetMultisampleState(MultisampleDescriptor multisample)
+ {
+ _newState.AlphaToCoverageEnable = multisample.AlphaToCoverageEnable;
+ _newState.AlphaToOneEnable = multisample.AlphaToOneEnable;
+ SignalStateChange();
+ }
+
+ public void SetOrigin(Origin origin)
+ {
+ // TODO.
+ }
+
+ public unsafe void SetPatchParameters(int vertices, ReadOnlySpan<float> defaultOuterLevel, ReadOnlySpan<float> defaultInnerLevel)
+ {
+ _newState.PatchControlPoints = (uint)vertices;
+ SignalStateChange();
+
+ // TODO: Default levels (likely needs emulation on shaders?)
+ }
+
+ public void SetPointParameters(float size, bool isProgramPointSize, bool enablePointSprite, Origin origin)
+ {
+ // TODO.
+ }
+
+ public void SetPolygonMode(GAL.PolygonMode frontMode, GAL.PolygonMode backMode)
+ {
+ // TODO.
+ }
+
+ public void SetPrimitiveRestart(bool enable, int index)
+ {
+ _newState.PrimitiveRestartEnable = enable;
+ // TODO: What to do about the index?
+ SignalStateChange();
+ }
+
+ public void SetPrimitiveTopology(GAL.PrimitiveTopology topology)
+ {
+ _topology = topology;
+
+ var vkTopology = Gd.TopologyRemap(topology).Convert();
+
+ _newState.Topology = vkTopology;
+
+ SignalStateChange();
+ }
+
+ public void SetProgram(IProgram program)
+ {
+ var internalProgram = (ShaderCollection)program;
+ var stages = internalProgram.GetInfos();
+
+ _program = internalProgram;
+
+ _descriptorSetUpdater.SetProgram(internalProgram);
+
+ _newState.PipelineLayout = internalProgram.PipelineLayout;
+ _newState.StagesCount = (uint)stages.Length;
+
+ stages.CopyTo(_newState.Stages.AsSpan().Slice(0, stages.Length));
+
+ SignalStateChange();
+
+ if (_program.IsCompute)
+ {
+ EndRenderPass();
+ }
+ }
+
+ public void Specialize<T>(in T data) where T : unmanaged
+ {
+ var dataSpan = MemoryMarshal.AsBytes(MemoryMarshal.CreateReadOnlySpan(ref Unsafe.AsRef(in data), 1));
+
+ if (!dataSpan.SequenceEqual(_newState.SpecializationData.Span))
+ {
+ _newState.SpecializationData = new SpecData(dataSpan);
+
+ SignalStateChange();
+ }
+ }
+
+ protected virtual void SignalAttachmentChange()
+ {
+ }
+
+ public void SetRasterizerDiscard(bool discard)
+ {
+ _newState.RasterizerDiscardEnable = discard;
+ SignalStateChange();
+ }
+
+ public void SetRenderTargetColorMasks(ReadOnlySpan<uint> componentMask)
+ {
+ int count = Math.Min(Constants.MaxRenderTargets, componentMask.Length);
+ int writtenAttachments = 0;
+
+ for (int i = 0; i < count; i++)
+ {
+ ref var vkBlend = ref _newState.Internal.ColorBlendAttachmentState[i];
+ var newMask = (ColorComponentFlags)componentMask[i];
+
+ // When color write mask is 0, remove all blend state to help the pipeline cache.
+ // Restore it when the mask becomes non-zero.
+ if (vkBlend.ColorWriteMask != newMask)
+ {
+ if (newMask == 0)
+ {
+ _storedBlend[i] = vkBlend;
+
+ vkBlend = new PipelineColorBlendAttachmentState();
+ }
+ else if (vkBlend.ColorWriteMask == 0)
+ {
+ vkBlend = _storedBlend[i];
+ }
+ }
+
+ vkBlend.ColorWriteMask = newMask;
+
+ if (componentMask[i] != 0)
+ {
+ writtenAttachments++;
+ }
+ }
+
+ if (_framebufferUsingColorWriteMask)
+ {
+ SetRenderTargetsInternal(_preMaskColors, _preMaskDepthStencil, true);
+ }
+ else
+ {
+ SignalStateChange();
+
+ if (writtenAttachments != _writtenAttachmentCount)
+ {
+ SignalAttachmentChange();
+ _writtenAttachmentCount = writtenAttachments;
+ }
+ }
+ }
+
+ private void SetRenderTargetsInternal(ITexture[] colors, ITexture depthStencil, bool filterWriteMasked)
+ {
+ FramebufferParams?.UpdateModifications();
+ CreateFramebuffer(colors, depthStencil, filterWriteMasked);
+ CreateRenderPass();
+ SignalStateChange();
+ SignalAttachmentChange();
+ }
+
+ public void SetRenderTargets(ITexture[] colors, ITexture depthStencil)
+ {
+ _framebufferUsingColorWriteMask = false;
+ SetRenderTargetsInternal(colors, depthStencil, Gd.IsTBDR);
+ }
+
+ public void SetRenderTargetScale(float scale)
+ {
+ _renderScale[0].X = scale;
+ SupportBufferUpdater.UpdateRenderScale(_renderScale, 0, 1); // Just the first element.
+ }
+
+ public void SetScissors(ReadOnlySpan<Rectangle<int>> regions)
+ {
+ int maxScissors = Gd.Capabilities.SupportsMultiView ? Constants.MaxViewports : 1;
+ int count = Math.Min(maxScissors, regions.Length);
+ if (count > 0)
+ {
+ ClearScissor = regions[0];
+ }
+
+ for (int i = 0; i < count; i++)
+ {
+ var region = regions[i];
+ var offset = new Offset2D(region.X, region.Y);
+ var extent = new Extent2D((uint)region.Width, (uint)region.Height);
+
+ DynamicState.SetScissor(i, new Rect2D(offset, extent));
+ }
+
+ DynamicState.ScissorsCount = count;
+
+ _newState.ScissorsCount = (uint)count;
+ SignalStateChange();
+ }
+
+ public void SetStencilTest(StencilTestDescriptor stencilTest)
+ {
+ DynamicState.SetStencilMasks(
+ (uint)stencilTest.BackFuncMask,
+ (uint)stencilTest.BackMask,
+ (uint)stencilTest.BackFuncRef,
+ (uint)stencilTest.FrontFuncMask,
+ (uint)stencilTest.FrontMask,
+ (uint)stencilTest.FrontFuncRef);
+
+ _newState.StencilTestEnable = stencilTest.TestEnable;
+ _newState.StencilBackFailOp = stencilTest.BackSFail.Convert();
+ _newState.StencilBackPassOp = stencilTest.BackDpPass.Convert();
+ _newState.StencilBackDepthFailOp = stencilTest.BackDpFail.Convert();
+ _newState.StencilBackCompareOp = stencilTest.BackFunc.Convert();
+ _newState.StencilFrontFailOp = stencilTest.FrontSFail.Convert();
+ _newState.StencilFrontPassOp = stencilTest.FrontDpPass.Convert();
+ _newState.StencilFrontDepthFailOp = stencilTest.FrontDpFail.Convert();
+ _newState.StencilFrontCompareOp = stencilTest.FrontFunc.Convert();
+ SignalStateChange();
+ }
+
+ public void SetStorageBuffers(ReadOnlySpan<BufferAssignment> buffers)
+ {
+ _descriptorSetUpdater.SetStorageBuffers(CommandBuffer, buffers);
+ }
+
+ public void SetStorageBuffers(int first, ReadOnlySpan<Auto<DisposableBuffer>> buffers)
+ {
+ _descriptorSetUpdater.SetStorageBuffers(CommandBuffer, first, buffers);
+ }
+
+ public void SetTextureAndSampler(ShaderStage stage, int binding, ITexture texture, ISampler sampler)
+ {
+ _descriptorSetUpdater.SetTextureAndSampler(Cbs, stage, binding, texture, sampler);
+ }
+
+ public void SetTransformFeedbackBuffers(ReadOnlySpan<BufferRange> buffers)
+ {
+ PauseTransformFeedbackInternal();
+
+ int count = Math.Min(Constants.MaxTransformFeedbackBuffers, buffers.Length);
+
+ for (int i = 0; i < count; i++)
+ {
+ var range = buffers[i];
+
+ _transformFeedbackBuffers[i].Dispose();
+
+ if (range.Handle != BufferHandle.Null)
+ {
+ _transformFeedbackBuffers[i] =
+ new BufferState(Gd.BufferManager.GetBuffer(CommandBuffer, range.Handle, range.Offset, range.Size, true), range.Offset, range.Size);
+ _transformFeedbackBuffers[i].BindTransformFeedbackBuffer(Gd, Cbs, (uint)i);
+ }
+ else
+ {
+ _transformFeedbackBuffers[i] = BufferState.Null;
+ }
+ }
+ }
+
+ public void SetUniformBuffers(ReadOnlySpan<BufferAssignment> buffers)
+ {
+ _descriptorSetUpdater.SetUniformBuffers(CommandBuffer, buffers);
+ }
+
+ public void SetUserClipDistance(int index, bool enableClip)
+ {
+ // TODO.
+ }
+
+ public void SetVertexAttribs(ReadOnlySpan<VertexAttribDescriptor> vertexAttribs)
+ {
+ var formatCapabilities = Gd.FormatCapabilities;
+
+ Span<int> newVbScalarSizes = stackalloc int[Constants.MaxVertexBuffers];
+
+ int count = Math.Min(Constants.MaxVertexAttributes, vertexAttribs.Length);
+ uint dirtyVbSizes = 0;
+
+ for (int i = 0; i < count; i++)
+ {
+ var attribute = vertexAttribs[i];
+ var rawIndex = attribute.BufferIndex;
+ var bufferIndex = attribute.IsZero ? 0 : rawIndex + 1;
+
+ if (!attribute.IsZero)
+ {
+ newVbScalarSizes[rawIndex] = Math.Max(newVbScalarSizes[rawIndex], attribute.Format.GetScalarSize());
+ dirtyVbSizes |= 1u << rawIndex;
+ }
+
+ _newState.Internal.VertexAttributeDescriptions[i] = new VertexInputAttributeDescription(
+ (uint)i,
+ (uint)bufferIndex,
+ formatCapabilities.ConvertToVertexVkFormat(attribute.Format),
+ (uint)attribute.Offset);
+ }
+
+ while (dirtyVbSizes != 0)
+ {
+ int dirtyBit = BitOperations.TrailingZeroCount(dirtyVbSizes);
+
+ ref var buffer = ref _vertexBuffers[dirtyBit + 1];
+
+ if (buffer.AttributeScalarAlignment != newVbScalarSizes[dirtyBit])
+ {
+ _vertexBuffersDirty |= 1UL << (dirtyBit + 1);
+ buffer.AttributeScalarAlignment = newVbScalarSizes[dirtyBit];
+ }
+
+ dirtyVbSizes &= ~(1u << dirtyBit);
+ }
+
+ _newState.VertexAttributeDescriptionsCount = (uint)count;
+ SignalStateChange();
+ }
+
+ public void SetVertexBuffers(ReadOnlySpan<VertexBufferDescriptor> vertexBuffers)
+ {
+ int count = Math.Min(Constants.MaxVertexBuffers, vertexBuffers.Length);
+
+ _newState.Internal.VertexBindingDescriptions[0] = new VertexInputBindingDescription(0, 0, VertexInputRate.Vertex);
+
+ int validCount = 1;
+
+ for (int i = 0; i < count; i++)
+ {
+ var vertexBuffer = vertexBuffers[i];
+
+ // TODO: Support divisor > 1
+ var inputRate = vertexBuffer.Divisor != 0 ? VertexInputRate.Instance : VertexInputRate.Vertex;
+
+ if (vertexBuffer.Buffer.Handle != BufferHandle.Null)
+ {
+ var vb = Gd.BufferManager.GetBuffer(CommandBuffer, vertexBuffer.Buffer.Handle, false);
+ if (vb != null)
+ {
+ int binding = i + 1;
+ int descriptorIndex = validCount++;
+
+ _newState.Internal.VertexBindingDescriptions[descriptorIndex] = new VertexInputBindingDescription(
+ (uint)binding,
+ (uint)vertexBuffer.Stride,
+ inputRate);
+
+ int vbSize = vertexBuffer.Buffer.Size;
+
+ if (Gd.Vendor == Vendor.Amd && !Gd.IsMoltenVk && vertexBuffer.Stride > 0)
+ {
+ // AMD has a bug where if offset + stride * count is greater than
+ // the size, then the last attribute will have the wrong value.
+ // As a workaround, simply use the full buffer size.
+ int remainder = vbSize % vertexBuffer.Stride;
+ if (remainder != 0)
+ {
+ vbSize += vertexBuffer.Stride - remainder;
+ }
+ }
+
+ ref var buffer = ref _vertexBuffers[binding];
+ int oldScalarAlign = buffer.AttributeScalarAlignment;
+
+ buffer.Dispose();
+
+ if (Gd.Capabilities.VertexBufferAlignment < 2 &&
+ (vertexBuffer.Stride % FormatExtensions.MaxBufferFormatScalarSize) == 0)
+ {
+ buffer = new VertexBufferState(
+ vb,
+ descriptorIndex,
+ vertexBuffer.Buffer.Offset,
+ vbSize,
+ vertexBuffer.Stride);
+
+ buffer.BindVertexBuffer(Gd, Cbs, (uint)binding, ref _newState);
+ }
+ else
+ {
+ // May need to be rewritten. Bind this buffer before draw.
+
+ buffer = new VertexBufferState(
+ vertexBuffer.Buffer.Handle,
+ descriptorIndex,
+ vertexBuffer.Buffer.Offset,
+ vbSize,
+ vertexBuffer.Stride);
+
+ _vertexBuffersDirty |= 1UL << binding;
+ }
+
+ buffer.AttributeScalarAlignment = oldScalarAlign;
+ }
+ }
+ }
+
+ _newState.VertexBindingDescriptionsCount = (uint)validCount;
+ SignalStateChange();
+ }
+
+ public void SetViewports(ReadOnlySpan<GAL.Viewport> viewports, bool disableTransform)
+ {
+ int maxViewports = Gd.Capabilities.SupportsMultiView ? Constants.MaxViewports : 1;
+ int count = Math.Min(maxViewports, viewports.Length);
+
+ static float Clamp(float value)
+ {
+ return Math.Clamp(value, 0f, 1f);
+ }
+
+ DynamicState.ViewportsCount = (uint)count;
+
+ for (int i = 0; i < count; i++)
+ {
+ var viewport = viewports[i];
+
+ DynamicState.SetViewport(i, new Silk.NET.Vulkan.Viewport(
+ viewport.Region.X,
+ viewport.Region.Y,
+ viewport.Region.Width == 0f ? 1f : viewport.Region.Width,
+ viewport.Region.Height == 0f ? 1f : viewport.Region.Height,
+ Clamp(viewport.DepthNear),
+ Clamp(viewport.DepthFar)));
+ }
+
+ float disableTransformF = disableTransform ? 1.0f : 0.0f;
+ if (SupportBufferUpdater.Data.ViewportInverse.W != disableTransformF || disableTransform)
+ {
+ float scale = _renderScale[0].X;
+ SupportBufferUpdater.UpdateViewportInverse(new Vector4<float>
+ {
+ X = scale * 2f / viewports[0].Region.Width,
+ Y = scale * 2f / viewports[0].Region.Height,
+ Z = 1,
+ W = disableTransformF
+ });
+ }
+
+ _newState.ViewportsCount = (uint)count;
+ SignalStateChange();
+ }
+
+ public void SwapBuffer(Auto<DisposableBuffer> from, Auto<DisposableBuffer> to)
+ {
+ _indexBuffer.Swap(from, to);
+
+ for (int i = 0; i < _vertexBuffers.Length; i++)
+ {
+ _vertexBuffers[i].Swap(from, to);
+ }
+
+ for (int i = 0; i < _transformFeedbackBuffers.Length; i++)
+ {
+ _transformFeedbackBuffers[i].Swap(from, to);
+ }
+
+ _descriptorSetUpdater.SwapBuffer(from, to);
+
+ SignalCommandBufferChange();
+ }
+
+ public unsafe void TextureBarrier()
+ {
+ MemoryBarrier memoryBarrier = new MemoryBarrier()
+ {
+ SType = StructureType.MemoryBarrier,
+ SrcAccessMask = AccessFlags.MemoryReadBit | AccessFlags.MemoryWriteBit,
+ DstAccessMask = AccessFlags.MemoryReadBit | AccessFlags.MemoryWriteBit
+ };
+
+ Gd.Api.CmdPipelineBarrier(
+ CommandBuffer,
+ PipelineStageFlags.FragmentShaderBit,
+ PipelineStageFlags.FragmentShaderBit,
+ 0,
+ 1,
+ memoryBarrier,
+ 0,
+ null,
+ 0,
+ null);
+ }
+
+ public void TextureBarrierTiled()
+ {
+ TextureBarrier();
+ }
+
+ public void UpdateRenderScale(ReadOnlySpan<float> scales, int totalCount, int fragmentCount)
+ {
+ bool changed = false;
+
+ for (int index = 0; index < totalCount; index++)
+ {
+ if (_renderScale[1 + index].X != scales[index])
+ {
+ _renderScale[1 + index].X = scales[index];
+ changed = true;
+ }
+ }
+
+ // Only update fragment count if there are scales after it for the vertex stage.
+ if (fragmentCount != totalCount && fragmentCount != _fragmentScaleCount)
+ {
+ _fragmentScaleCount = fragmentCount;
+ SupportBufferUpdater.UpdateFragmentRenderScaleCount(_fragmentScaleCount);
+ }
+
+ if (changed)
+ {
+ SupportBufferUpdater.UpdateRenderScale(_renderScale, 0, 1 + totalCount);
+ }
+ }
+
+ protected void SignalCommandBufferChange()
+ {
+ _needsIndexBufferRebind = true;
+ _needsTransformFeedbackBuffersRebind = true;
+ _vertexBuffersDirty = ulong.MaxValue >> (64 - _vertexBuffers.Length);
+
+ _descriptorSetUpdater.SignalCommandBufferChange();
+ DynamicState.ForceAllDirty();
+ _currentPipelineHandle = 0;
+ }
+
+ private void CreateFramebuffer(ITexture[] colors, ITexture depthStencil, bool filterWriteMasked)
+ {
+ if (filterWriteMasked)
+ {
+ // TBDR GPUs don't work properly if the same attachment is bound to multiple targets,
+ // due to each attachment being a copy of the real attachment, rather than a direct write.
+
+ // Just try to remove duplicate attachments.
+ // Save a copy of the array to rebind when mask changes.
+
+ void maskOut()
+ {
+ if (!_framebufferUsingColorWriteMask)
+ {
+ _preMaskColors = colors.ToArray();
+ _preMaskDepthStencil = depthStencil;
+ }
+
+ // If true, then the framebuffer must be recreated when the mask changes.
+ _framebufferUsingColorWriteMask = true;
+ }
+
+ // Look for textures that are masked out.
+
+ for (int i = 0; i < colors.Length; i++)
+ {
+ if (colors[i] == null)
+ {
+ continue;
+ }
+
+ ref var vkBlend = ref _newState.Internal.ColorBlendAttachmentState[i];
+
+ for (int j = 0; j < i; j++)
+ {
+ // Check each binding for a duplicate binding before it.
+
+ if (colors[i] == colors[j])
+ {
+ // Prefer the binding with no write mask.
+ ref var vkBlend2 = ref _newState.Internal.ColorBlendAttachmentState[j];
+ if (vkBlend.ColorWriteMask == 0)
+ {
+ colors[i] = null;
+ maskOut();
+ }
+ else if (vkBlend2.ColorWriteMask == 0)
+ {
+ colors[j] = null;
+ maskOut();
+ }
+ }
+ }
+ }
+ }
+
+ FramebufferParams = new FramebufferParams(Device, colors, depthStencil);
+ UpdatePipelineAttachmentFormats();
+ }
+
+ protected void UpdatePipelineAttachmentFormats()
+ {
+ var dstAttachmentFormats = _newState.Internal.AttachmentFormats.AsSpan();
+ FramebufferParams.AttachmentFormats.CopyTo(dstAttachmentFormats);
+
+ for (int i = FramebufferParams.AttachmentFormats.Length; i < dstAttachmentFormats.Length; i++)
+ {
+ dstAttachmentFormats[i] = 0;
+ }
+
+ _newState.ColorBlendAttachmentStateCount = (uint)(FramebufferParams.MaxColorAttachmentIndex + 1);
+ _newState.HasDepthStencil = FramebufferParams.HasDepthStencil;
+ _newState.SamplesCount = FramebufferParams.AttachmentSamples.Length != 0 ? FramebufferParams.AttachmentSamples[0] : 1;
+ }
+
+ protected unsafe void CreateRenderPass()
+ {
+ const int MaxAttachments = Constants.MaxRenderTargets + 1;
+
+ AttachmentDescription[] attachmentDescs = null;
+
+ var subpass = new SubpassDescription()
+ {
+ PipelineBindPoint = PipelineBindPoint.Graphics
+ };
+
+ AttachmentReference* attachmentReferences = stackalloc AttachmentReference[MaxAttachments];
+
+ var hasFramebuffer = FramebufferParams != null;
+
+ if (hasFramebuffer && FramebufferParams.AttachmentsCount != 0)
+ {
+ attachmentDescs = new AttachmentDescription[FramebufferParams.AttachmentsCount];
+
+ for (int i = 0; i < FramebufferParams.AttachmentsCount; i++)
+ {
+ attachmentDescs[i] = new AttachmentDescription(
+ 0,
+ FramebufferParams.AttachmentFormats[i],
+ TextureStorage.ConvertToSampleCountFlags(Gd.Capabilities.SupportedSampleCounts, FramebufferParams.AttachmentSamples[i]),
+ AttachmentLoadOp.Load,
+ AttachmentStoreOp.Store,
+ AttachmentLoadOp.Load,
+ AttachmentStoreOp.Store,
+ ImageLayout.General,
+ ImageLayout.General);
+ }
+
+ int colorAttachmentsCount = FramebufferParams.ColorAttachmentsCount;
+
+ if (colorAttachmentsCount > MaxAttachments - 1)
+ {
+ colorAttachmentsCount = MaxAttachments - 1;
+ }
+
+ if (colorAttachmentsCount != 0)
+ {
+ int maxAttachmentIndex = FramebufferParams.MaxColorAttachmentIndex;
+ subpass.ColorAttachmentCount = (uint)maxAttachmentIndex + 1;
+ subpass.PColorAttachments = &attachmentReferences[0];
+
+ // Fill with VK_ATTACHMENT_UNUSED to cover any gaps.
+ for (int i = 0; i <= maxAttachmentIndex; i++)
+ {
+ subpass.PColorAttachments[i] = new AttachmentReference(Vk.AttachmentUnused, ImageLayout.Undefined);
+ }
+
+ for (int i = 0; i < colorAttachmentsCount; i++)
+ {
+ int bindIndex = FramebufferParams.AttachmentIndices[i];
+
+ subpass.PColorAttachments[bindIndex] = new AttachmentReference((uint)i, ImageLayout.General);
+ }
+ }
+
+ if (FramebufferParams.HasDepthStencil)
+ {
+ uint dsIndex = (uint)FramebufferParams.AttachmentsCount - 1;
+
+ subpass.PDepthStencilAttachment = &attachmentReferences[MaxAttachments - 1];
+ *subpass.PDepthStencilAttachment = new AttachmentReference(dsIndex, ImageLayout.General);
+ }
+ }
+
+ var subpassDependency = PipelineConverter.CreateSubpassDependency();
+
+ fixed (AttachmentDescription* pAttachmentDescs = attachmentDescs)
+ {
+ var renderPassCreateInfo = new RenderPassCreateInfo()
+ {
+ SType = StructureType.RenderPassCreateInfo,
+ PAttachments = pAttachmentDescs,
+ AttachmentCount = attachmentDescs != null ? (uint)attachmentDescs.Length : 0,
+ PSubpasses = &subpass,
+ SubpassCount = 1,
+ PDependencies = &subpassDependency,
+ DependencyCount = 1
+ };
+
+ Gd.Api.CreateRenderPass(Device, renderPassCreateInfo, null, out var renderPass).ThrowOnError();
+
+ _renderPass?.Dispose();
+ _renderPass = new Auto<DisposableRenderPass>(new DisposableRenderPass(Gd.Api, Device, renderPass));
+ }
+
+ EndRenderPass();
+
+ _framebuffer?.Dispose();
+ _framebuffer = hasFramebuffer ? FramebufferParams.Create(Gd.Api, Cbs, _renderPass) : null;
+ }
+
+ protected void SignalStateChange()
+ {
+ _stateDirty = true;
+ }
+
+ private void RecreatePipelineIfNeeded(PipelineBindPoint pbp)
+ {
+ if (AutoFlush.ShouldFlushDraw(DrawCount))
+ {
+ Gd.FlushAllCommands();
+ }
+
+ DynamicState.ReplayIfDirty(Gd.Api, CommandBuffer);
+
+ // Commit changes to the support buffer before drawing.
+ SupportBufferUpdater.Commit();
+
+ if (_needsIndexBufferRebind && _indexBufferPattern == null)
+ {
+ _indexBuffer.BindIndexBuffer(Gd, Cbs);
+ _needsIndexBufferRebind = false;
+ }
+
+ if (_needsTransformFeedbackBuffersRebind)
+ {
+ PauseTransformFeedbackInternal();
+
+ for (int i = 0; i < Constants.MaxTransformFeedbackBuffers; i++)
+ {
+ _transformFeedbackBuffers[i].BindTransformFeedbackBuffer(Gd, Cbs, (uint)i);
+ }
+
+ _needsTransformFeedbackBuffersRebind = false;
+ }
+
+ if (_vertexBuffersDirty != 0)
+ {
+ while (_vertexBuffersDirty != 0)
+ {
+ int i = BitOperations.TrailingZeroCount(_vertexBuffersDirty);
+
+ _vertexBuffers[i].BindVertexBuffer(Gd, Cbs, (uint)i, ref _newState);
+
+ _vertexBuffersDirty &= ~(1UL << i);
+ }
+ }
+
+ if (_stateDirty || Pbp != pbp)
+ {
+ CreatePipeline(pbp);
+ _stateDirty = false;
+ Pbp = pbp;
+ }
+
+ _descriptorSetUpdater.UpdateAndBindDescriptorSets(Cbs, pbp);
+ }
+
+ private void CreatePipeline(PipelineBindPoint pbp)
+ {
+ // We can only create a pipeline if the have the shader stages set.
+ if (_newState.Stages != null)
+ {
+ if (pbp == PipelineBindPoint.Graphics && _renderPass == null)
+ {
+ CreateRenderPass();
+ }
+
+ var pipeline = pbp == PipelineBindPoint.Compute
+ ? _newState.CreateComputePipeline(Gd, Device, _program, PipelineCache)
+ : _newState.CreateGraphicsPipeline(Gd, Device, _program, PipelineCache, _renderPass.Get(Cbs).Value);
+
+ ulong pipelineHandle = pipeline.GetUnsafe().Value.Handle;
+
+ if (_currentPipelineHandle != pipelineHandle)
+ {
+ _currentPipelineHandle = pipelineHandle;
+ Pipeline = pipeline;
+
+ PauseTransformFeedbackInternal();
+ Gd.Api.CmdBindPipeline(CommandBuffer, pbp, Pipeline.Get(Cbs).Value);
+ }
+ }
+ }
+
+ private unsafe void BeginRenderPass()
+ {
+ if (!RenderPassActive)
+ {
+ var renderArea = new Rect2D(null, new Extent2D(FramebufferParams.Width, FramebufferParams.Height));
+ var clearValue = new ClearValue();
+
+ var renderPassBeginInfo = new RenderPassBeginInfo()
+ {
+ SType = StructureType.RenderPassBeginInfo,
+ RenderPass = _renderPass.Get(Cbs).Value,
+ Framebuffer = _framebuffer.Get(Cbs).Value,
+ RenderArea = renderArea,
+ PClearValues = &clearValue,
+ ClearValueCount = 1
+ };
+
+ Gd.Api.CmdBeginRenderPass(CommandBuffer, renderPassBeginInfo, SubpassContents.Inline);
+ RenderPassActive = true;
+ }
+ }
+
+ public void EndRenderPass()
+ {
+ if (RenderPassActive)
+ {
+ PauseTransformFeedbackInternal();
+ Gd.Api.CmdEndRenderPass(CommandBuffer);
+ SignalRenderPassEnd();
+ RenderPassActive = false;
+ }
+ }
+
+ protected virtual void SignalRenderPassEnd()
+ {
+ }
+
+ private void PauseTransformFeedbackInternal()
+ {
+ if (_tfEnabled && _tfActive)
+ {
+ EndTransformFeedbackInternal();
+ _tfActive = false;
+ }
+ }
+
+ private void ResumeTransformFeedbackInternal()
+ {
+ if (_tfEnabled && !_tfActive)
+ {
+ BeginTransformFeedbackInternal();
+ _tfActive = true;
+ }
+ }
+
+ private unsafe void BeginTransformFeedbackInternal()
+ {
+ Gd.TransformFeedbackApi.CmdBeginTransformFeedback(CommandBuffer, 0, 0, null, null);
+ }
+
+ private unsafe void EndTransformFeedbackInternal()
+ {
+ Gd.TransformFeedbackApi.CmdEndTransformFeedback(CommandBuffer, 0, 0, null, null);
+ }
+
+ protected virtual void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ _renderPass?.Dispose();
+ _framebuffer?.Dispose();
+ _newState.Dispose();
+ _descriptorSetUpdater.Dispose();
+
+ for (int i = 0; i < _vertexBuffers.Length; i++)
+ {
+ _vertexBuffers[i].Dispose();
+ }
+
+ for (int i = 0; i < _transformFeedbackBuffers.Length; i++)
+ {
+ _transformFeedbackBuffers[i].Dispose();
+ }
+
+ Pipeline?.Dispose();
+
+ unsafe
+ {
+ Gd.Api.DestroyPipelineCache(Device, PipelineCache, null);
+ }
+
+ SupportBufferUpdater.Dispose();
+ }
+ }
+
+ public void Dispose()
+ {
+ Dispose(true);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/PipelineConverter.cs b/src/Ryujinx.Graphics.Vulkan/PipelineConverter.cs
new file mode 100644
index 00000000..da480d9f
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/PipelineConverter.cs
@@ -0,0 +1,318 @@
+using Ryujinx.Common;
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ static class PipelineConverter
+ {
+ private const AccessFlags SubpassSrcAccessMask = AccessFlags.MemoryReadBit | AccessFlags.MemoryWriteBit | AccessFlags.ColorAttachmentWriteBit;
+ private const AccessFlags SubpassDstAccessMask = AccessFlags.MemoryReadBit | AccessFlags.MemoryWriteBit | AccessFlags.ShaderReadBit;
+
+ public static unsafe DisposableRenderPass ToRenderPass(this ProgramPipelineState state, VulkanRenderer gd, Device device)
+ {
+ const int MaxAttachments = Constants.MaxRenderTargets + 1;
+
+ AttachmentDescription[] attachmentDescs = null;
+
+ var subpass = new SubpassDescription()
+ {
+ PipelineBindPoint = PipelineBindPoint.Graphics
+ };
+
+ AttachmentReference* attachmentReferences = stackalloc AttachmentReference[MaxAttachments];
+
+ Span<int> attachmentIndices = stackalloc int[MaxAttachments];
+ Span<Silk.NET.Vulkan.Format> attachmentFormats = stackalloc Silk.NET.Vulkan.Format[MaxAttachments];
+
+ int attachmentCount = 0;
+ int colorCount = 0;
+ int maxColorAttachmentIndex = -1;
+
+ for (int i = 0; i < state.AttachmentEnable.Length; i++)
+ {
+ if (state.AttachmentEnable[i])
+ {
+ attachmentFormats[attachmentCount] = gd.FormatCapabilities.ConvertToVkFormat(state.AttachmentFormats[i]);
+
+ attachmentIndices[attachmentCount++] = i;
+ colorCount++;
+ maxColorAttachmentIndex = i;
+ }
+ }
+
+ if (state.DepthStencilEnable)
+ {
+ attachmentFormats[attachmentCount++] = gd.FormatCapabilities.ConvertToVkFormat(state.DepthStencilFormat);
+ }
+
+ if (attachmentCount != 0)
+ {
+ attachmentDescs = new AttachmentDescription[attachmentCount];
+
+ for (int i = 0; i < attachmentCount; i++)
+ {
+ int bindIndex = attachmentIndices[i];
+
+ attachmentDescs[i] = new AttachmentDescription(
+ 0,
+ attachmentFormats[i],
+ TextureStorage.ConvertToSampleCountFlags(gd.Capabilities.SupportedSampleCounts, (uint)state.SamplesCount),
+ AttachmentLoadOp.Load,
+ AttachmentStoreOp.Store,
+ AttachmentLoadOp.Load,
+ AttachmentStoreOp.Store,
+ ImageLayout.General,
+ ImageLayout.General);
+ }
+
+ int colorAttachmentsCount = colorCount;
+
+ if (colorAttachmentsCount > MaxAttachments - 1)
+ {
+ colorAttachmentsCount = MaxAttachments - 1;
+ }
+
+ if (colorAttachmentsCount != 0)
+ {
+ subpass.ColorAttachmentCount = (uint)maxColorAttachmentIndex + 1;
+ subpass.PColorAttachments = &attachmentReferences[0];
+
+ // Fill with VK_ATTACHMENT_UNUSED to cover any gaps.
+ for (int i = 0; i <= maxColorAttachmentIndex; i++)
+ {
+ subpass.PColorAttachments[i] = new AttachmentReference(Vk.AttachmentUnused, ImageLayout.Undefined);
+ }
+
+ for (int i = 0; i < colorAttachmentsCount; i++)
+ {
+ int bindIndex = attachmentIndices[i];
+
+ subpass.PColorAttachments[bindIndex] = new AttachmentReference((uint)i, ImageLayout.General);
+ }
+ }
+
+ if (state.DepthStencilEnable)
+ {
+ uint dsIndex = (uint)attachmentCount - 1;
+
+ subpass.PDepthStencilAttachment = &attachmentReferences[MaxAttachments - 1];
+ *subpass.PDepthStencilAttachment = new AttachmentReference(dsIndex, ImageLayout.General);
+ }
+ }
+
+ var subpassDependency = CreateSubpassDependency();
+
+ fixed (AttachmentDescription* pAttachmentDescs = attachmentDescs)
+ {
+ var renderPassCreateInfo = new RenderPassCreateInfo()
+ {
+ SType = StructureType.RenderPassCreateInfo,
+ PAttachments = pAttachmentDescs,
+ AttachmentCount = attachmentDescs != null ? (uint)attachmentDescs.Length : 0,
+ PSubpasses = &subpass,
+ SubpassCount = 1,
+ PDependencies = &subpassDependency,
+ DependencyCount = 1
+ };
+
+ gd.Api.CreateRenderPass(device, renderPassCreateInfo, null, out var renderPass).ThrowOnError();
+
+ return new DisposableRenderPass(gd.Api, device, renderPass);
+ }
+ }
+
+ public static SubpassDependency CreateSubpassDependency()
+ {
+ return new SubpassDependency(
+ 0,
+ 0,
+ PipelineStageFlags.AllGraphicsBit,
+ PipelineStageFlags.AllGraphicsBit,
+ SubpassSrcAccessMask,
+ SubpassDstAccessMask,
+ 0);
+ }
+
+ public unsafe static SubpassDependency2 CreateSubpassDependency2()
+ {
+ return new SubpassDependency2(
+ StructureType.SubpassDependency2,
+ null,
+ 0,
+ 0,
+ PipelineStageFlags.AllGraphicsBit,
+ PipelineStageFlags.AllGraphicsBit,
+ SubpassSrcAccessMask,
+ SubpassDstAccessMask,
+ 0);
+ }
+
+ public static PipelineState ToVulkanPipelineState(this ProgramPipelineState state, VulkanRenderer gd)
+ {
+ PipelineState pipeline = new PipelineState();
+ pipeline.Initialize();
+
+ // It is assumed that Dynamic State is enabled when this conversion is used.
+
+ pipeline.CullMode = state.CullEnable ? state.CullMode.Convert() : CullModeFlags.None;
+
+ pipeline.DepthBoundsTestEnable = false; // Not implemented.
+
+ pipeline.DepthClampEnable = state.DepthClampEnable;
+
+ pipeline.DepthTestEnable = state.DepthTest.TestEnable;
+ pipeline.DepthWriteEnable = state.DepthTest.WriteEnable;
+ pipeline.DepthCompareOp = state.DepthTest.Func.Convert();
+
+ pipeline.FrontFace = state.FrontFace.Convert();
+
+ pipeline.HasDepthStencil = state.DepthStencilEnable;
+ pipeline.LineWidth = state.LineWidth;
+ pipeline.LogicOpEnable = state.LogicOpEnable;
+ pipeline.LogicOp = state.LogicOp.Convert();
+
+ pipeline.MinDepthBounds = 0f; // Not implemented.
+ pipeline.MaxDepthBounds = 0f; // Not implemented.
+
+ pipeline.PatchControlPoints = state.PatchControlPoints;
+ pipeline.PolygonMode = Silk.NET.Vulkan.PolygonMode.Fill; // Not implemented.
+ pipeline.PrimitiveRestartEnable = state.PrimitiveRestartEnable;
+ pipeline.RasterizerDiscardEnable = state.RasterizerDiscard;
+ pipeline.SamplesCount = (uint)state.SamplesCount;
+
+ if (gd.Capabilities.SupportsMultiView)
+ {
+ pipeline.ScissorsCount = Constants.MaxViewports;
+ pipeline.ViewportsCount = Constants.MaxViewports;
+ }
+ else
+ {
+ pipeline.ScissorsCount = 1;
+ pipeline.ViewportsCount = 1;
+ }
+
+ pipeline.DepthBiasEnable = state.BiasEnable != 0;
+
+ // Stencil masks and ref are dynamic, so are 0 in the Vulkan pipeline.
+
+ pipeline.StencilFrontFailOp = state.StencilTest.FrontSFail.Convert();
+ pipeline.StencilFrontPassOp = state.StencilTest.FrontDpPass.Convert();
+ pipeline.StencilFrontDepthFailOp = state.StencilTest.FrontDpFail.Convert();
+ pipeline.StencilFrontCompareOp = state.StencilTest.FrontFunc.Convert();
+ pipeline.StencilFrontCompareMask = 0;
+ pipeline.StencilFrontWriteMask = 0;
+ pipeline.StencilFrontReference = 0;
+
+ pipeline.StencilBackFailOp = state.StencilTest.BackSFail.Convert();
+ pipeline.StencilBackPassOp = state.StencilTest.BackDpPass.Convert();
+ pipeline.StencilBackDepthFailOp = state.StencilTest.BackDpFail.Convert();
+ pipeline.StencilBackCompareOp = state.StencilTest.BackFunc.Convert();
+ pipeline.StencilBackCompareMask = 0;
+ pipeline.StencilBackWriteMask = 0;
+ pipeline.StencilBackReference = 0;
+
+ pipeline.StencilTestEnable = state.StencilTest.TestEnable;
+
+ pipeline.Topology = gd.TopologyRemap(state.Topology).Convert();
+
+ int vaCount = Math.Min(Constants.MaxVertexAttributes, state.VertexAttribCount);
+ int vbCount = Math.Min(Constants.MaxVertexBuffers, state.VertexBufferCount);
+
+ Span<int> vbScalarSizes = stackalloc int[vbCount];
+
+ for (int i = 0; i < vaCount; i++)
+ {
+ var attribute = state.VertexAttribs[i];
+ var bufferIndex = attribute.IsZero ? 0 : attribute.BufferIndex + 1;
+
+ pipeline.Internal.VertexAttributeDescriptions[i] = new VertexInputAttributeDescription(
+ (uint)i,
+ (uint)bufferIndex,
+ gd.FormatCapabilities.ConvertToVertexVkFormat(attribute.Format),
+ (uint)attribute.Offset);
+
+ if (!attribute.IsZero && bufferIndex < vbCount)
+ {
+ vbScalarSizes[bufferIndex - 1] = Math.Max(attribute.Format.GetScalarSize(), vbScalarSizes[bufferIndex - 1]);
+ }
+ }
+
+ int descriptorIndex = 1;
+ pipeline.Internal.VertexBindingDescriptions[0] = new VertexInputBindingDescription(0, 0, VertexInputRate.Vertex);
+
+ for (int i = 0; i < vbCount; i++)
+ {
+ var vertexBuffer = state.VertexBuffers[i];
+
+ if (vertexBuffer.Enable)
+ {
+ var inputRate = vertexBuffer.Divisor != 0 ? VertexInputRate.Instance : VertexInputRate.Vertex;
+
+ int alignedStride = vertexBuffer.Stride;
+
+ if (gd.NeedsVertexBufferAlignment(vbScalarSizes[i], out int alignment))
+ {
+ alignedStride = BitUtils.AlignUp(vertexBuffer.Stride, alignment);
+ }
+
+ // TODO: Support divisor > 1
+ pipeline.Internal.VertexBindingDescriptions[descriptorIndex++] = new VertexInputBindingDescription(
+ (uint)i + 1,
+ (uint)alignedStride,
+ inputRate);
+ }
+ }
+
+ pipeline.VertexBindingDescriptionsCount = (uint)descriptorIndex;
+
+ // NOTE: Viewports, Scissors are dynamic.
+
+ for (int i = 0; i < Constants.MaxRenderTargets; i++)
+ {
+ var blend = state.BlendDescriptors[i];
+
+ if (blend.Enable && state.ColorWriteMask[i] != 0)
+ {
+ pipeline.Internal.ColorBlendAttachmentState[i] = new PipelineColorBlendAttachmentState(
+ blend.Enable,
+ blend.ColorSrcFactor.Convert(),
+ blend.ColorDstFactor.Convert(),
+ blend.ColorOp.Convert(),
+ blend.AlphaSrcFactor.Convert(),
+ blend.AlphaDstFactor.Convert(),
+ blend.AlphaOp.Convert(),
+ (ColorComponentFlags)state.ColorWriteMask[i]);
+ }
+ else
+ {
+ pipeline.Internal.ColorBlendAttachmentState[i] = new PipelineColorBlendAttachmentState(
+ colorWriteMask: (ColorComponentFlags)state.ColorWriteMask[i]);
+ }
+ }
+
+ int attachmentCount = 0;
+ int maxColorAttachmentIndex = -1;
+
+ for (int i = 0; i < Constants.MaxRenderTargets; i++)
+ {
+ if (state.AttachmentEnable[i])
+ {
+ pipeline.Internal.AttachmentFormats[attachmentCount++] = gd.FormatCapabilities.ConvertToVkFormat(state.AttachmentFormats[i]);
+ maxColorAttachmentIndex = i;
+ }
+ }
+
+ if (state.DepthStencilEnable)
+ {
+ pipeline.Internal.AttachmentFormats[attachmentCount++] = gd.FormatCapabilities.ConvertToVkFormat(state.DepthStencilFormat);
+ }
+
+ pipeline.ColorBlendAttachmentStateCount = (uint)(maxColorAttachmentIndex + 1);
+ pipeline.VertexAttributeDescriptionsCount = (uint)Math.Min(Constants.MaxVertexAttributes, state.VertexAttribCount);
+
+ return pipeline;
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/PipelineDynamicState.cs b/src/Ryujinx.Graphics.Vulkan/PipelineDynamicState.cs
new file mode 100644
index 00000000..42ea022a
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/PipelineDynamicState.cs
@@ -0,0 +1,170 @@
+using Ryujinx.Common.Memory;
+using Silk.NET.Vulkan;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ struct PipelineDynamicState
+ {
+ private float _depthBiasSlopeFactor;
+ private float _depthBiasConstantFactor;
+ private float _depthBiasClamp;
+
+ public int ScissorsCount;
+ private Array16<Rect2D> _scissors;
+
+ private uint _backCompareMask;
+ private uint _backWriteMask;
+ private uint _backReference;
+ private uint _frontCompareMask;
+ private uint _frontWriteMask;
+ private uint _frontReference;
+
+ private Array4<float> _blendConstants;
+
+ public uint ViewportsCount;
+ public Array16<Viewport> Viewports;
+
+ private enum DirtyFlags
+ {
+ None = 0,
+ Blend = 1 << 0,
+ DepthBias = 1 << 1,
+ Scissor = 1 << 2,
+ Stencil = 1 << 3,
+ Viewport = 1 << 4,
+ All = Blend | DepthBias | Scissor | Stencil | Viewport
+ }
+
+ private DirtyFlags _dirty;
+
+ public void SetBlendConstants(float r, float g, float b, float a)
+ {
+ _blendConstants[0] = r;
+ _blendConstants[1] = g;
+ _blendConstants[2] = b;
+ _blendConstants[3] = a;
+
+ _dirty |= DirtyFlags.Blend;
+ }
+
+ public void SetDepthBias(float slopeFactor, float constantFactor, float clamp)
+ {
+ _depthBiasSlopeFactor = slopeFactor;
+ _depthBiasConstantFactor = constantFactor;
+ _depthBiasClamp = clamp;
+
+ _dirty |= DirtyFlags.DepthBias;
+ }
+
+ public void SetScissor(int index, Rect2D scissor)
+ {
+ _scissors[index] = scissor;
+
+ _dirty |= DirtyFlags.Scissor;
+ }
+
+ public void SetStencilMasks(
+ uint backCompareMask,
+ uint backWriteMask,
+ uint backReference,
+ uint frontCompareMask,
+ uint frontWriteMask,
+ uint frontReference)
+ {
+ _backCompareMask = backCompareMask;
+ _backWriteMask = backWriteMask;
+ _backReference = backReference;
+ _frontCompareMask = frontCompareMask;
+ _frontWriteMask = frontWriteMask;
+ _frontReference = frontReference;
+
+ _dirty |= DirtyFlags.Stencil;
+ }
+
+ public void SetViewport(int index, Viewport viewport)
+ {
+ Viewports[index] = viewport;
+
+ _dirty |= DirtyFlags.Viewport;
+ }
+
+ public void SetViewports(ref Array16<Viewport> viewports, uint viewportsCount)
+ {
+ Viewports = viewports;
+ ViewportsCount = viewportsCount;
+
+ if (ViewportsCount != 0)
+ {
+ _dirty |= DirtyFlags.Viewport;
+ }
+ }
+
+ public void ForceAllDirty()
+ {
+ _dirty = DirtyFlags.All;
+ }
+
+ public void ReplayIfDirty(Vk api, CommandBuffer commandBuffer)
+ {
+ if (_dirty.HasFlag(DirtyFlags.Blend))
+ {
+ RecordBlend(api, commandBuffer);
+ }
+
+ if (_dirty.HasFlag(DirtyFlags.DepthBias))
+ {
+ RecordDepthBias(api, commandBuffer);
+ }
+
+ if (_dirty.HasFlag(DirtyFlags.Scissor))
+ {
+ RecordScissor(api, commandBuffer);
+ }
+
+ if (_dirty.HasFlag(DirtyFlags.Stencil))
+ {
+ RecordStencilMasks(api, commandBuffer);
+ }
+
+ if (_dirty.HasFlag(DirtyFlags.Viewport))
+ {
+ RecordViewport(api, commandBuffer);
+ }
+
+ _dirty = DirtyFlags.None;
+ }
+
+ private void RecordBlend(Vk api, CommandBuffer commandBuffer)
+ {
+ api.CmdSetBlendConstants(commandBuffer, _blendConstants.AsSpan());
+ }
+
+ private void RecordDepthBias(Vk api, CommandBuffer commandBuffer)
+ {
+ api.CmdSetDepthBias(commandBuffer, _depthBiasConstantFactor, _depthBiasClamp, _depthBiasSlopeFactor);
+ }
+
+ private void RecordScissor(Vk api, CommandBuffer commandBuffer)
+ {
+ api.CmdSetScissor(commandBuffer, 0, (uint)ScissorsCount, _scissors.AsSpan());
+ }
+
+ private void RecordStencilMasks(Vk api, CommandBuffer commandBuffer)
+ {
+ api.CmdSetStencilCompareMask(commandBuffer, StencilFaceFlags.FaceBackBit, _backCompareMask);
+ api.CmdSetStencilWriteMask(commandBuffer, StencilFaceFlags.FaceBackBit, _backWriteMask);
+ api.CmdSetStencilReference(commandBuffer, StencilFaceFlags.FaceBackBit, _backReference);
+ api.CmdSetStencilCompareMask(commandBuffer, StencilFaceFlags.FaceFrontBit, _frontCompareMask);
+ api.CmdSetStencilWriteMask(commandBuffer, StencilFaceFlags.FaceFrontBit, _frontWriteMask);
+ api.CmdSetStencilReference(commandBuffer, StencilFaceFlags.FaceFrontBit, _frontReference);
+ }
+
+ private void RecordViewport(Vk api, CommandBuffer commandBuffer)
+ {
+ if (ViewportsCount != 0)
+ {
+ api.CmdSetViewport(commandBuffer, 0, ViewportsCount, Viewports.AsSpan());
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/PipelineFull.cs b/src/Ryujinx.Graphics.Vulkan/PipelineFull.cs
new file mode 100644
index 00000000..8026103e
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/PipelineFull.cs
@@ -0,0 +1,314 @@
+using Ryujinx.Graphics.GAL;
+using Ryujinx.Graphics.Vulkan.Queries;
+using Silk.NET.Vulkan;
+using System;
+using System.Collections.Generic;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class PipelineFull : PipelineBase, IPipeline
+ {
+ private const ulong MinByteWeightForFlush = 256 * 1024 * 1024; // MiB
+
+ private readonly List<(QueryPool, bool)> _activeQueries;
+ private CounterQueueEvent _activeConditionalRender;
+
+ private readonly List<BufferedQuery> _pendingQueryCopies;
+
+ private ulong _byteWeight;
+
+ private List<BufferHolder> _backingSwaps;
+
+ public PipelineFull(VulkanRenderer gd, Device device) : base(gd, device)
+ {
+ _activeQueries = new List<(QueryPool, bool)>();
+ _pendingQueryCopies = new();
+ _backingSwaps = new();
+
+ CommandBuffer = (Cbs = gd.CommandBufferPool.Rent()).CommandBuffer;
+ }
+
+ private void CopyPendingQuery()
+ {
+ foreach (var query in _pendingQueryCopies)
+ {
+ query.PoolCopy(Cbs);
+ }
+
+ _pendingQueryCopies.Clear();
+ }
+
+ public void ClearRenderTargetColor(int index, int layer, int layerCount, uint componentMask, ColorF color)
+ {
+ if (FramebufferParams == null)
+ {
+ return;
+ }
+
+ if (componentMask != 0xf)
+ {
+ // We can't use CmdClearAttachments if not writing all components,
+ // because on Vulkan, the pipeline state does not affect clears.
+ var dstTexture = FramebufferParams.GetAttachment(index);
+ if (dstTexture == null)
+ {
+ return;
+ }
+
+ Span<float> clearColor = stackalloc float[4];
+ clearColor[0] = color.Red;
+ clearColor[1] = color.Green;
+ clearColor[2] = color.Blue;
+ clearColor[3] = color.Alpha;
+
+ // TODO: Clear only the specified layer.
+ Gd.HelperShader.Clear(
+ Gd,
+ dstTexture,
+ clearColor,
+ componentMask,
+ (int)FramebufferParams.Width,
+ (int)FramebufferParams.Height,
+ FramebufferParams.AttachmentFormats[index],
+ FramebufferParams.GetAttachmentComponentType(index),
+ ClearScissor);
+ }
+ else
+ {
+ ClearRenderTargetColor(index, layer, layerCount, color);
+ }
+ }
+
+ public void EndHostConditionalRendering()
+ {
+ if (Gd.Capabilities.SupportsConditionalRendering)
+ {
+ // Gd.ConditionalRenderingApi.CmdEndConditionalRendering(CommandBuffer);
+ }
+ else
+ {
+ // throw new NotSupportedException();
+ }
+
+ _activeConditionalRender?.ReleaseHostAccess();
+ _activeConditionalRender = null;
+ }
+
+ public bool TryHostConditionalRendering(ICounterEvent value, ulong compare, bool isEqual)
+ {
+ // Compare an event and a constant value.
+ if (value is CounterQueueEvent evt)
+ {
+ // Easy host conditional rendering when the check matches what GL can do:
+ // - Event is of type samples passed.
+ // - Result is not a combination of multiple queries.
+ // - Comparing against 0.
+ // - Event has not already been flushed.
+
+ if (compare == 0 && evt.Type == CounterType.SamplesPassed && evt.ClearCounter)
+ {
+ if (!value.ReserveForHostAccess())
+ {
+ // If the event has been flushed, then just use the values on the CPU.
+ // The query object may already be repurposed for another draw (eg. begin + end).
+ return false;
+ }
+
+ if (Gd.Capabilities.SupportsConditionalRendering)
+ {
+ var buffer = evt.GetBuffer().Get(Cbs, 0, sizeof(long)).Value;
+ var flags = isEqual ? ConditionalRenderingFlagsEXT.InvertedBitExt : 0;
+
+ var conditionalRenderingBeginInfo = new ConditionalRenderingBeginInfoEXT()
+ {
+ SType = StructureType.ConditionalRenderingBeginInfoExt,
+ Buffer = buffer,
+ Flags = flags
+ };
+
+ // Gd.ConditionalRenderingApi.CmdBeginConditionalRendering(CommandBuffer, conditionalRenderingBeginInfo);
+ }
+
+ _activeConditionalRender = evt;
+ return true;
+ }
+ }
+
+ // The GPU will flush the queries to CPU and evaluate the condition there instead.
+
+ FlushPendingQuery(); // The thread will be stalled manually flushing the counter, so flush commands now.
+ return false;
+ }
+
+ public bool TryHostConditionalRendering(ICounterEvent value, ICounterEvent compare, bool isEqual)
+ {
+ FlushPendingQuery(); // The thread will be stalled manually flushing the counter, so flush commands now.
+ return false;
+ }
+
+ private void FlushPendingQuery()
+ {
+ if (AutoFlush.ShouldFlushQuery())
+ {
+ FlushCommandsImpl();
+ }
+ }
+
+ public CommandBufferScoped GetPreloadCommandBuffer()
+ {
+ if (PreloadCbs == null)
+ {
+ PreloadCbs = Gd.CommandBufferPool.Rent();
+ }
+
+ return PreloadCbs.Value;
+ }
+
+ public void FlushCommandsIfWeightExceeding(IAuto disposedResource, ulong byteWeight)
+ {
+ bool usedByCurrentCb = disposedResource.HasCommandBufferDependency(Cbs);
+
+ if (PreloadCbs != null && !usedByCurrentCb)
+ {
+ usedByCurrentCb = disposedResource.HasCommandBufferDependency(PreloadCbs.Value);
+ }
+
+ if (usedByCurrentCb)
+ {
+ // Since we can only free memory after the command buffer that uses a given resource was executed,
+ // keeping the command buffer might cause a high amount of memory to be in use.
+ // To prevent that, we force submit command buffers if the memory usage by resources
+ // in use by the current command buffer is above a given limit, and those resources were disposed.
+ _byteWeight += byteWeight;
+
+ if (_byteWeight >= MinByteWeightForFlush)
+ {
+ FlushCommandsImpl();
+ }
+ }
+ }
+
+ private void TryBackingSwaps()
+ {
+ CommandBufferScoped? cbs = null;
+
+ _backingSwaps.RemoveAll((holder) => holder.TryBackingSwap(ref cbs));
+
+ cbs?.Dispose();
+ }
+
+ public void AddBackingSwap(BufferHolder holder)
+ {
+ _backingSwaps.Add(holder);
+ }
+
+ public void Restore()
+ {
+ if (Pipeline != null)
+ {
+ Gd.Api.CmdBindPipeline(CommandBuffer, Pbp, Pipeline.Get(Cbs).Value);
+ }
+
+ SignalCommandBufferChange();
+
+ DynamicState.ReplayIfDirty(Gd.Api, CommandBuffer);
+ }
+
+ public void FlushCommandsImpl()
+ {
+ AutoFlush.RegisterFlush(DrawCount);
+ EndRenderPass();
+
+ foreach ((var queryPool, _) in _activeQueries)
+ {
+ Gd.Api.CmdEndQuery(CommandBuffer, queryPool, 0);
+ }
+
+ _byteWeight = 0;
+
+ if (PreloadCbs != null)
+ {
+ PreloadCbs.Value.Dispose();
+ PreloadCbs = null;
+ }
+
+ CommandBuffer = (Cbs = Gd.CommandBufferPool.ReturnAndRent(Cbs)).CommandBuffer;
+ Gd.RegisterFlush();
+
+ // Restore per-command buffer state.
+
+ foreach ((var queryPool, var isOcclusion) in _activeQueries)
+ {
+ bool isPrecise = Gd.Capabilities.SupportsPreciseOcclusionQueries && isOcclusion;
+
+ Gd.Api.CmdResetQueryPool(CommandBuffer, queryPool, 0, 1);
+ Gd.Api.CmdBeginQuery(CommandBuffer, queryPool, 0, isPrecise ? QueryControlFlags.PreciseBit : 0);
+ }
+
+ Gd.ResetCounterPool();
+
+ TryBackingSwaps();
+
+ Restore();
+ }
+
+ public void BeginQuery(BufferedQuery query, QueryPool pool, bool needsReset, bool isOcclusion, bool fromSamplePool)
+ {
+ if (needsReset)
+ {
+ EndRenderPass();
+
+ Gd.Api.CmdResetQueryPool(CommandBuffer, pool, 0, 1);
+
+ if (fromSamplePool)
+ {
+ // Try reset some additional queries in advance.
+
+ Gd.ResetFutureCounters(CommandBuffer, AutoFlush.GetRemainingQueries());
+ }
+ }
+
+ bool isPrecise = Gd.Capabilities.SupportsPreciseOcclusionQueries && isOcclusion;
+ Gd.Api.CmdBeginQuery(CommandBuffer, pool, 0, isPrecise ? QueryControlFlags.PreciseBit : 0);
+
+ _activeQueries.Add((pool, isOcclusion));
+ }
+
+ public void EndQuery(QueryPool pool)
+ {
+ Gd.Api.CmdEndQuery(CommandBuffer, pool, 0);
+
+ for (int i = 0; i < _activeQueries.Count; i++)
+ {
+ if (_activeQueries[i].Item1.Handle == pool.Handle)
+ {
+ _activeQueries.RemoveAt(i);
+ break;
+ }
+ }
+ }
+
+ public void CopyQueryResults(BufferedQuery query)
+ {
+ _pendingQueryCopies.Add(query);
+
+ if (AutoFlush.RegisterPendingQuery())
+ {
+ FlushCommandsImpl();
+ }
+ }
+
+ protected override void SignalAttachmentChange()
+ {
+ if (AutoFlush.ShouldFlushAttachmentChange(DrawCount))
+ {
+ FlushCommandsImpl();
+ }
+ }
+
+ protected override void SignalRenderPassEnd()
+ {
+ CopyPendingQuery();
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/PipelineHelperShader.cs b/src/Ryujinx.Graphics.Vulkan/PipelineHelperShader.cs
new file mode 100644
index 00000000..b31b72a1
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/PipelineHelperShader.cs
@@ -0,0 +1,59 @@
+using Silk.NET.Vulkan;
+using VkFormat = Silk.NET.Vulkan.Format;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class PipelineHelperShader : PipelineBase
+ {
+ public PipelineHelperShader(VulkanRenderer gd, Device device) : base(gd, device)
+ {
+ }
+
+ public void SetRenderTarget(Auto<DisposableImageView> view, uint width, uint height, bool isDepthStencil, VkFormat format)
+ {
+ SetRenderTarget(view, width, height, 1u, isDepthStencil, format);
+ }
+
+ public void SetRenderTarget(Auto<DisposableImageView> view, uint width, uint height, uint samples, bool isDepthStencil, VkFormat format)
+ {
+ CreateFramebuffer(view, width, height, samples, isDepthStencil, format);
+ CreateRenderPass();
+ SignalStateChange();
+ }
+
+ private void CreateFramebuffer(Auto<DisposableImageView> view, uint width, uint height, uint samples, bool isDepthStencil, VkFormat format)
+ {
+ FramebufferParams = new FramebufferParams(Device, view, width, height, samples, isDepthStencil, format);
+ UpdatePipelineAttachmentFormats();
+ }
+
+ public void SetCommandBuffer(CommandBufferScoped cbs)
+ {
+ CommandBuffer = (Cbs = cbs).CommandBuffer;
+
+ // Restore per-command buffer state.
+
+ if (Pipeline != null)
+ {
+ Gd.Api.CmdBindPipeline(CommandBuffer, Pbp, Pipeline.Get(CurrentCommandBuffer).Value);
+ }
+
+ SignalCommandBufferChange();
+ }
+
+ public void Finish()
+ {
+ EndRenderPass();
+ }
+
+ public void Finish(VulkanRenderer gd, CommandBufferScoped cbs)
+ {
+ Finish();
+
+ if (gd.PipelineInternal.IsCommandBufferActive(cbs.CommandBuffer))
+ {
+ gd.PipelineInternal.Restore();
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/PipelineLayoutCache.cs b/src/Ryujinx.Graphics.Vulkan/PipelineLayoutCache.cs
new file mode 100644
index 00000000..c834fa62
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/PipelineLayoutCache.cs
@@ -0,0 +1,58 @@
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using System.Collections.Generic;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class PipelineLayoutCache
+ {
+ private readonly PipelineLayoutCacheEntry[] _plce;
+ private readonly List<PipelineLayoutCacheEntry> _plceMinimal;
+
+ public PipelineLayoutCache()
+ {
+ _plce = new PipelineLayoutCacheEntry[1 << Constants.MaxShaderStages];
+ _plceMinimal = new List<PipelineLayoutCacheEntry>();
+ }
+
+ public PipelineLayoutCacheEntry Create(VulkanRenderer gd, Device device, ShaderSource[] shaders)
+ {
+ var plce = new PipelineLayoutCacheEntry(gd, device, shaders);
+ _plceMinimal.Add(plce);
+ return plce;
+ }
+
+ public PipelineLayoutCacheEntry GetOrCreate(VulkanRenderer gd, Device device, uint stages, bool usePd)
+ {
+ if (_plce[stages] == null)
+ {
+ _plce[stages] = new PipelineLayoutCacheEntry(gd, device, stages, usePd);
+ }
+
+ return _plce[stages];
+ }
+
+ protected virtual unsafe void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ for (int i = 0; i < _plce.Length; i++)
+ {
+ _plce[i]?.Dispose();
+ }
+
+ foreach (var plce in _plceMinimal)
+ {
+ plce.Dispose();
+ }
+
+ _plceMinimal.Clear();
+ }
+ }
+
+ public void Dispose()
+ {
+ Dispose(true);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/PipelineLayoutCacheEntry.cs b/src/Ryujinx.Graphics.Vulkan/PipelineLayoutCacheEntry.cs
new file mode 100644
index 00000000..2c966115
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/PipelineLayoutCacheEntry.cs
@@ -0,0 +1,112 @@
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using System.Collections.Generic;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class PipelineLayoutCacheEntry
+ {
+ private readonly VulkanRenderer _gd;
+ private readonly Device _device;
+
+ public DescriptorSetLayout[] DescriptorSetLayouts { get; }
+ public PipelineLayout PipelineLayout { get; }
+
+ private readonly List<Auto<DescriptorSetCollection>>[][] _dsCache;
+ private readonly int[] _dsCacheCursor;
+ private int _dsLastCbIndex;
+
+ private PipelineLayoutCacheEntry(VulkanRenderer gd, Device device)
+ {
+ _gd = gd;
+ _device = device;
+
+ _dsCache = new List<Auto<DescriptorSetCollection>>[CommandBufferPool.MaxCommandBuffers][];
+
+ for (int i = 0; i < CommandBufferPool.MaxCommandBuffers; i++)
+ {
+ _dsCache[i] = new List<Auto<DescriptorSetCollection>>[PipelineBase.DescriptorSetLayouts];
+
+ for (int j = 0; j < PipelineBase.DescriptorSetLayouts; j++)
+ {
+ _dsCache[i][j] = new List<Auto<DescriptorSetCollection>>();
+ }
+ }
+
+ _dsCacheCursor = new int[PipelineBase.DescriptorSetLayouts];
+ }
+
+ public PipelineLayoutCacheEntry(VulkanRenderer gd, Device device, uint stages, bool usePd) : this(gd, device)
+ {
+ DescriptorSetLayouts = PipelineLayoutFactory.Create(gd, device, stages, usePd, out var pipelineLayout);
+ PipelineLayout = pipelineLayout;
+ }
+
+ public PipelineLayoutCacheEntry(VulkanRenderer gd, Device device, ShaderSource[] shaders) : this(gd, device)
+ {
+ DescriptorSetLayouts = PipelineLayoutFactory.CreateMinimal(gd, device, shaders, out var pipelineLayout);
+ PipelineLayout = pipelineLayout;
+ }
+
+ public Auto<DescriptorSetCollection> GetNewDescriptorSetCollection(
+ VulkanRenderer gd,
+ int commandBufferIndex,
+ int setIndex,
+ out bool isNew)
+ {
+ if (_dsLastCbIndex != commandBufferIndex)
+ {
+ _dsLastCbIndex = commandBufferIndex;
+
+ for (int i = 0; i < PipelineBase.DescriptorSetLayouts; i++)
+ {
+ _dsCacheCursor[i] = 0;
+ }
+ }
+
+ var list = _dsCache[commandBufferIndex][setIndex];
+ int index = _dsCacheCursor[setIndex]++;
+ if (index == list.Count)
+ {
+ var dsc = gd.DescriptorSetManager.AllocateDescriptorSet(gd.Api, DescriptorSetLayouts[setIndex]);
+ list.Add(dsc);
+ isNew = true;
+ return dsc;
+ }
+
+ isNew = false;
+ return list[index];
+ }
+
+ protected virtual unsafe void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ for (int i = 0; i < _dsCache.Length; i++)
+ {
+ for (int j = 0; j < _dsCache[i].Length; j++)
+ {
+ for (int k = 0; k < _dsCache[i][j].Count; k++)
+ {
+ _dsCache[i][j][k].Dispose();
+ }
+
+ _dsCache[i][j].Clear();
+ }
+ }
+
+ _gd.Api.DestroyPipelineLayout(_device, PipelineLayout, null);
+
+ for (int i = 0; i < DescriptorSetLayouts.Length; i++)
+ {
+ _gd.Api.DestroyDescriptorSetLayout(_device, DescriptorSetLayouts[i], null);
+ }
+ }
+ }
+
+ public void Dispose()
+ {
+ Dispose(true);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/PipelineLayoutFactory.cs b/src/Ryujinx.Graphics.Vulkan/PipelineLayoutFactory.cs
new file mode 100644
index 00000000..96b3b3b1
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/PipelineLayoutFactory.cs
@@ -0,0 +1,244 @@
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using System.Collections.Generic;
+using System.Numerics;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ static class PipelineLayoutFactory
+ {
+ private const ShaderStageFlags SupportBufferStages =
+ ShaderStageFlags.VertexBit |
+ ShaderStageFlags.FragmentBit |
+ ShaderStageFlags.ComputeBit;
+
+ public static unsafe DescriptorSetLayout[] Create(VulkanRenderer gd, Device device, uint stages, bool usePd, out PipelineLayout layout)
+ {
+ int stagesCount = BitOperations.PopCount(stages);
+
+ int uCount = Constants.MaxUniformBuffersPerStage * stagesCount + 1;
+ int tCount = Constants.MaxTexturesPerStage * 2 * stagesCount;
+ int iCount = Constants.MaxImagesPerStage * 2 * stagesCount;
+
+ DescriptorSetLayoutBinding* uLayoutBindings = stackalloc DescriptorSetLayoutBinding[uCount];
+ DescriptorSetLayoutBinding* sLayoutBindings = stackalloc DescriptorSetLayoutBinding[stagesCount];
+ DescriptorSetLayoutBinding* tLayoutBindings = stackalloc DescriptorSetLayoutBinding[tCount];
+ DescriptorSetLayoutBinding* iLayoutBindings = stackalloc DescriptorSetLayoutBinding[iCount];
+
+ uLayoutBindings[0] = new DescriptorSetLayoutBinding
+ {
+ Binding = 0,
+ DescriptorType = DescriptorType.UniformBuffer,
+ DescriptorCount = 1,
+ StageFlags = SupportBufferStages
+ };
+
+ int iter = 0;
+
+ while (stages != 0)
+ {
+ int stage = BitOperations.TrailingZeroCount(stages);
+ stages &= ~(1u << stage);
+
+ var stageFlags = stage switch
+ {
+ 1 => ShaderStageFlags.FragmentBit,
+ 2 => ShaderStageFlags.GeometryBit,
+ 3 => ShaderStageFlags.TessellationControlBit,
+ 4 => ShaderStageFlags.TessellationEvaluationBit,
+ _ => ShaderStageFlags.VertexBit | ShaderStageFlags.ComputeBit
+ };
+
+ void Set(DescriptorSetLayoutBinding* bindings, int maxPerStage, DescriptorType type, int start, int skip)
+ {
+ int totalPerStage = maxPerStage * skip;
+
+ for (int i = 0; i < maxPerStage; i++)
+ {
+ bindings[start + iter * totalPerStage + i] = new DescriptorSetLayoutBinding
+ {
+ Binding = (uint)(start + stage * totalPerStage + i),
+ DescriptorType = type,
+ DescriptorCount = 1,
+ StageFlags = stageFlags
+ };
+ }
+ }
+
+ void SetStorage(DescriptorSetLayoutBinding* bindings, int maxPerStage, int start = 0)
+ {
+ bindings[start + iter] = new DescriptorSetLayoutBinding
+ {
+ Binding = (uint)(start + stage * maxPerStage),
+ DescriptorType = DescriptorType.StorageBuffer,
+ DescriptorCount = (uint)maxPerStage,
+ StageFlags = stageFlags
+ };
+ }
+
+ Set(uLayoutBindings, Constants.MaxUniformBuffersPerStage, DescriptorType.UniformBuffer, 1, 1);
+ SetStorage(sLayoutBindings, Constants.MaxStorageBuffersPerStage);
+ Set(tLayoutBindings, Constants.MaxTexturesPerStage, DescriptorType.CombinedImageSampler, 0, 2);
+ Set(tLayoutBindings, Constants.MaxTexturesPerStage, DescriptorType.UniformTexelBuffer, Constants.MaxTexturesPerStage, 2);
+ Set(iLayoutBindings, Constants.MaxImagesPerStage, DescriptorType.StorageImage, 0, 2);
+ Set(iLayoutBindings, Constants.MaxImagesPerStage, DescriptorType.StorageTexelBuffer, Constants.MaxImagesPerStage, 2);
+
+ iter++;
+ }
+
+ DescriptorSetLayout[] layouts = new DescriptorSetLayout[PipelineBase.DescriptorSetLayouts];
+
+ var uDescriptorSetLayoutCreateInfo = new DescriptorSetLayoutCreateInfo()
+ {
+ SType = StructureType.DescriptorSetLayoutCreateInfo,
+ PBindings = uLayoutBindings,
+ BindingCount = (uint)uCount,
+ Flags = usePd ? DescriptorSetLayoutCreateFlags.PushDescriptorBitKhr : 0
+ };
+
+ var sDescriptorSetLayoutCreateInfo = new DescriptorSetLayoutCreateInfo()
+ {
+ SType = StructureType.DescriptorSetLayoutCreateInfo,
+ PBindings = sLayoutBindings,
+ BindingCount = (uint)stagesCount
+ };
+
+ var tDescriptorSetLayoutCreateInfo = new DescriptorSetLayoutCreateInfo()
+ {
+ SType = StructureType.DescriptorSetLayoutCreateInfo,
+ PBindings = tLayoutBindings,
+ BindingCount = (uint)tCount
+ };
+
+ var iDescriptorSetLayoutCreateInfo = new DescriptorSetLayoutCreateInfo()
+ {
+ SType = StructureType.DescriptorSetLayoutCreateInfo,
+ PBindings = iLayoutBindings,
+ BindingCount = (uint)iCount
+ };
+
+ gd.Api.CreateDescriptorSetLayout(device, uDescriptorSetLayoutCreateInfo, null, out layouts[PipelineBase.UniformSetIndex]).ThrowOnError();
+ gd.Api.CreateDescriptorSetLayout(device, sDescriptorSetLayoutCreateInfo, null, out layouts[PipelineBase.StorageSetIndex]).ThrowOnError();
+ gd.Api.CreateDescriptorSetLayout(device, tDescriptorSetLayoutCreateInfo, null, out layouts[PipelineBase.TextureSetIndex]).ThrowOnError();
+ gd.Api.CreateDescriptorSetLayout(device, iDescriptorSetLayoutCreateInfo, null, out layouts[PipelineBase.ImageSetIndex]).ThrowOnError();
+
+ fixed (DescriptorSetLayout* pLayouts = layouts)
+ {
+ var pipelineLayoutCreateInfo = new PipelineLayoutCreateInfo()
+ {
+ SType = StructureType.PipelineLayoutCreateInfo,
+ PSetLayouts = pLayouts,
+ SetLayoutCount = PipelineBase.DescriptorSetLayouts
+ };
+
+ gd.Api.CreatePipelineLayout(device, &pipelineLayoutCreateInfo, null, out layout).ThrowOnError();
+ }
+
+ return layouts;
+ }
+
+ public static unsafe DescriptorSetLayout[] CreateMinimal(VulkanRenderer gd, Device device, ShaderSource[] shaders, out PipelineLayout layout)
+ {
+ int stagesCount = shaders.Length;
+
+ int uCount = 0;
+ int sCount = 0;
+ int tCount = 0;
+ int iCount = 0;
+
+ foreach (var shader in shaders)
+ {
+ uCount += shader.Bindings.UniformBufferBindings.Count;
+ sCount += shader.Bindings.StorageBufferBindings.Count;
+ tCount += shader.Bindings.TextureBindings.Count;
+ iCount += shader.Bindings.ImageBindings.Count;
+ }
+
+ DescriptorSetLayoutBinding* uLayoutBindings = stackalloc DescriptorSetLayoutBinding[uCount];
+ DescriptorSetLayoutBinding* sLayoutBindings = stackalloc DescriptorSetLayoutBinding[sCount];
+ DescriptorSetLayoutBinding* tLayoutBindings = stackalloc DescriptorSetLayoutBinding[tCount];
+ DescriptorSetLayoutBinding* iLayoutBindings = stackalloc DescriptorSetLayoutBinding[iCount];
+
+ int uIndex = 0;
+ int sIndex = 0;
+ int tIndex = 0;
+ int iIndex = 0;
+
+ foreach (var shader in shaders)
+ {
+ var stageFlags = shader.Stage.Convert();
+
+ void Set(DescriptorSetLayoutBinding* bindings, DescriptorType type, ref int start, IEnumerable<int> bds)
+ {
+ foreach (var b in bds)
+ {
+ bindings[start++] = new DescriptorSetLayoutBinding
+ {
+ Binding = (uint)b,
+ DescriptorType = type,
+ DescriptorCount = 1,
+ StageFlags = stageFlags
+ };
+ }
+ }
+
+ // TODO: Support buffer textures and images here.
+ // This is only used for the helper shaders on the backend, and we don't use buffer textures on them
+ // so far, so it's not really necessary right now.
+ Set(uLayoutBindings, DescriptorType.UniformBuffer, ref uIndex, shader.Bindings.UniformBufferBindings);
+ Set(sLayoutBindings, DescriptorType.StorageBuffer, ref sIndex, shader.Bindings.StorageBufferBindings);
+ Set(tLayoutBindings, DescriptorType.CombinedImageSampler, ref tIndex, shader.Bindings.TextureBindings);
+ Set(iLayoutBindings, DescriptorType.StorageImage, ref iIndex, shader.Bindings.ImageBindings);
+ }
+
+ DescriptorSetLayout[] layouts = new DescriptorSetLayout[PipelineBase.DescriptorSetLayouts];
+
+ var uDescriptorSetLayoutCreateInfo = new DescriptorSetLayoutCreateInfo()
+ {
+ SType = StructureType.DescriptorSetLayoutCreateInfo,
+ PBindings = uLayoutBindings,
+ BindingCount = (uint)uCount
+ };
+
+ var sDescriptorSetLayoutCreateInfo = new DescriptorSetLayoutCreateInfo()
+ {
+ SType = StructureType.DescriptorSetLayoutCreateInfo,
+ PBindings = sLayoutBindings,
+ BindingCount = (uint)sCount
+ };
+
+ var tDescriptorSetLayoutCreateInfo = new DescriptorSetLayoutCreateInfo()
+ {
+ SType = StructureType.DescriptorSetLayoutCreateInfo,
+ PBindings = tLayoutBindings,
+ BindingCount = (uint)tCount
+ };
+
+ var iDescriptorSetLayoutCreateInfo = new DescriptorSetLayoutCreateInfo()
+ {
+ SType = StructureType.DescriptorSetLayoutCreateInfo,
+ PBindings = iLayoutBindings,
+ BindingCount = (uint)iCount
+ };
+
+ gd.Api.CreateDescriptorSetLayout(device, uDescriptorSetLayoutCreateInfo, null, out layouts[PipelineBase.UniformSetIndex]).ThrowOnError();
+ gd.Api.CreateDescriptorSetLayout(device, sDescriptorSetLayoutCreateInfo, null, out layouts[PipelineBase.StorageSetIndex]).ThrowOnError();
+ gd.Api.CreateDescriptorSetLayout(device, tDescriptorSetLayoutCreateInfo, null, out layouts[PipelineBase.TextureSetIndex]).ThrowOnError();
+ gd.Api.CreateDescriptorSetLayout(device, iDescriptorSetLayoutCreateInfo, null, out layouts[PipelineBase.ImageSetIndex]).ThrowOnError();
+
+ fixed (DescriptorSetLayout* pLayouts = layouts)
+ {
+ var pipelineLayoutCreateInfo = new PipelineLayoutCreateInfo()
+ {
+ SType = StructureType.PipelineLayoutCreateInfo,
+ PSetLayouts = pLayouts,
+ SetLayoutCount = PipelineBase.DescriptorSetLayouts
+ };
+
+ gd.Api.CreatePipelineLayout(device, &pipelineLayoutCreateInfo, null, out layout).ThrowOnError();
+ }
+
+ return layouts;
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/PipelineState.cs b/src/Ryujinx.Graphics.Vulkan/PipelineState.cs
new file mode 100644
index 00000000..dccc8ce6
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/PipelineState.cs
@@ -0,0 +1,621 @@
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ struct PipelineState : IDisposable
+ {
+ private const int RequiredSubgroupSize = 32;
+
+ public PipelineUid Internal;
+
+ public float LineWidth
+ {
+ get => BitConverter.Int32BitsToSingle((int)((Internal.Id0 >> 0) & 0xFFFFFFFF));
+ set => Internal.Id0 = (Internal.Id0 & 0xFFFFFFFF00000000) | ((ulong)(uint)BitConverter.SingleToInt32Bits(value) << 0);
+ }
+
+ public float DepthBiasClamp
+ {
+ get => BitConverter.Int32BitsToSingle((int)((Internal.Id0 >> 32) & 0xFFFFFFFF));
+ set => Internal.Id0 = (Internal.Id0 & 0xFFFFFFFF) | ((ulong)(uint)BitConverter.SingleToInt32Bits(value) << 32);
+ }
+
+ public float DepthBiasConstantFactor
+ {
+ get => BitConverter.Int32BitsToSingle((int)((Internal.Id1 >> 0) & 0xFFFFFFFF));
+ set => Internal.Id1 = (Internal.Id1 & 0xFFFFFFFF00000000) | ((ulong)(uint)BitConverter.SingleToInt32Bits(value) << 0);
+ }
+
+ public float DepthBiasSlopeFactor
+ {
+ get => BitConverter.Int32BitsToSingle((int)((Internal.Id1 >> 32) & 0xFFFFFFFF));
+ set => Internal.Id1 = (Internal.Id1 & 0xFFFFFFFF) | ((ulong)(uint)BitConverter.SingleToInt32Bits(value) << 32);
+ }
+
+ public uint StencilFrontCompareMask
+ {
+ get => (uint)((Internal.Id2 >> 0) & 0xFFFFFFFF);
+ set => Internal.Id2 = (Internal.Id2 & 0xFFFFFFFF00000000) | ((ulong)value << 0);
+ }
+
+ public uint StencilFrontWriteMask
+ {
+ get => (uint)((Internal.Id2 >> 32) & 0xFFFFFFFF);
+ set => Internal.Id2 = (Internal.Id2 & 0xFFFFFFFF) | ((ulong)value << 32);
+ }
+
+ public uint StencilFrontReference
+ {
+ get => (uint)((Internal.Id3 >> 0) & 0xFFFFFFFF);
+ set => Internal.Id3 = (Internal.Id3 & 0xFFFFFFFF00000000) | ((ulong)value << 0);
+ }
+
+ public uint StencilBackCompareMask
+ {
+ get => (uint)((Internal.Id3 >> 32) & 0xFFFFFFFF);
+ set => Internal.Id3 = (Internal.Id3 & 0xFFFFFFFF) | ((ulong)value << 32);
+ }
+
+ public uint StencilBackWriteMask
+ {
+ get => (uint)((Internal.Id4 >> 0) & 0xFFFFFFFF);
+ set => Internal.Id4 = (Internal.Id4 & 0xFFFFFFFF00000000) | ((ulong)value << 0);
+ }
+
+ public uint StencilBackReference
+ {
+ get => (uint)((Internal.Id4 >> 32) & 0xFFFFFFFF);
+ set => Internal.Id4 = (Internal.Id4 & 0xFFFFFFFF) | ((ulong)value << 32);
+ }
+
+ public float MinDepthBounds
+ {
+ get => BitConverter.Int32BitsToSingle((int)((Internal.Id5 >> 0) & 0xFFFFFFFF));
+ set => Internal.Id5 = (Internal.Id5 & 0xFFFFFFFF00000000) | ((ulong)(uint)BitConverter.SingleToInt32Bits(value) << 0);
+ }
+
+ public float MaxDepthBounds
+ {
+ get => BitConverter.Int32BitsToSingle((int)((Internal.Id5 >> 32) & 0xFFFFFFFF));
+ set => Internal.Id5 = (Internal.Id5 & 0xFFFFFFFF) | ((ulong)(uint)BitConverter.SingleToInt32Bits(value) << 32);
+ }
+
+ public PolygonMode PolygonMode
+ {
+ get => (PolygonMode)((Internal.Id6 >> 0) & 0x3FFFFFFF);
+ set => Internal.Id6 = (Internal.Id6 & 0xFFFFFFFFC0000000) | ((ulong)value << 0);
+ }
+
+ public uint StagesCount
+ {
+ get => (byte)((Internal.Id6 >> 30) & 0xFF);
+ set => Internal.Id6 = (Internal.Id6 & 0xFFFFFFC03FFFFFFF) | ((ulong)value << 30);
+ }
+
+ public uint VertexAttributeDescriptionsCount
+ {
+ get => (byte)((Internal.Id6 >> 38) & 0xFF);
+ set => Internal.Id6 = (Internal.Id6 & 0xFFFFC03FFFFFFFFF) | ((ulong)value << 38);
+ }
+
+ public uint VertexBindingDescriptionsCount
+ {
+ get => (byte)((Internal.Id6 >> 46) & 0xFF);
+ set => Internal.Id6 = (Internal.Id6 & 0xFFC03FFFFFFFFFFF) | ((ulong)value << 46);
+ }
+
+ public uint ViewportsCount
+ {
+ get => (byte)((Internal.Id6 >> 54) & 0xFF);
+ set => Internal.Id6 = (Internal.Id6 & 0xC03FFFFFFFFFFFFF) | ((ulong)value << 54);
+ }
+
+ public uint ScissorsCount
+ {
+ get => (byte)((Internal.Id7 >> 0) & 0xFF);
+ set => Internal.Id7 = (Internal.Id7 & 0xFFFFFFFFFFFFFF00) | ((ulong)value << 0);
+ }
+
+ public uint ColorBlendAttachmentStateCount
+ {
+ get => (byte)((Internal.Id7 >> 8) & 0xFF);
+ set => Internal.Id7 = (Internal.Id7 & 0xFFFFFFFFFFFF00FF) | ((ulong)value << 8);
+ }
+
+ public PrimitiveTopology Topology
+ {
+ get => (PrimitiveTopology)((Internal.Id7 >> 16) & 0xF);
+ set => Internal.Id7 = (Internal.Id7 & 0xFFFFFFFFFFF0FFFF) | ((ulong)value << 16);
+ }
+
+ public LogicOp LogicOp
+ {
+ get => (LogicOp)((Internal.Id7 >> 20) & 0xF);
+ set => Internal.Id7 = (Internal.Id7 & 0xFFFFFFFFFF0FFFFF) | ((ulong)value << 20);
+ }
+
+ public CompareOp DepthCompareOp
+ {
+ get => (CompareOp)((Internal.Id7 >> 24) & 0x7);
+ set => Internal.Id7 = (Internal.Id7 & 0xFFFFFFFFF8FFFFFF) | ((ulong)value << 24);
+ }
+
+ public StencilOp StencilFrontFailOp
+ {
+ get => (StencilOp)((Internal.Id7 >> 27) & 0x7);
+ set => Internal.Id7 = (Internal.Id7 & 0xFFFFFFFFC7FFFFFF) | ((ulong)value << 27);
+ }
+
+ public StencilOp StencilFrontPassOp
+ {
+ get => (StencilOp)((Internal.Id7 >> 30) & 0x7);
+ set => Internal.Id7 = (Internal.Id7 & 0xFFFFFFFE3FFFFFFF) | ((ulong)value << 30);
+ }
+
+ public StencilOp StencilFrontDepthFailOp
+ {
+ get => (StencilOp)((Internal.Id7 >> 33) & 0x7);
+ set => Internal.Id7 = (Internal.Id7 & 0xFFFFFFF1FFFFFFFF) | ((ulong)value << 33);
+ }
+
+ public CompareOp StencilFrontCompareOp
+ {
+ get => (CompareOp)((Internal.Id7 >> 36) & 0x7);
+ set => Internal.Id7 = (Internal.Id7 & 0xFFFFFF8FFFFFFFFF) | ((ulong)value << 36);
+ }
+
+ public StencilOp StencilBackFailOp
+ {
+ get => (StencilOp)((Internal.Id7 >> 39) & 0x7);
+ set => Internal.Id7 = (Internal.Id7 & 0xFFFFFC7FFFFFFFFF) | ((ulong)value << 39);
+ }
+
+ public StencilOp StencilBackPassOp
+ {
+ get => (StencilOp)((Internal.Id7 >> 42) & 0x7);
+ set => Internal.Id7 = (Internal.Id7 & 0xFFFFE3FFFFFFFFFF) | ((ulong)value << 42);
+ }
+
+ public StencilOp StencilBackDepthFailOp
+ {
+ get => (StencilOp)((Internal.Id7 >> 45) & 0x7);
+ set => Internal.Id7 = (Internal.Id7 & 0xFFFF1FFFFFFFFFFF) | ((ulong)value << 45);
+ }
+
+ public CompareOp StencilBackCompareOp
+ {
+ get => (CompareOp)((Internal.Id7 >> 48) & 0x7);
+ set => Internal.Id7 = (Internal.Id7 & 0xFFF8FFFFFFFFFFFF) | ((ulong)value << 48);
+ }
+
+ public CullModeFlags CullMode
+ {
+ get => (CullModeFlags)((Internal.Id7 >> 51) & 0x3);
+ set => Internal.Id7 = (Internal.Id7 & 0xFFE7FFFFFFFFFFFF) | ((ulong)value << 51);
+ }
+
+ public bool PrimitiveRestartEnable
+ {
+ get => ((Internal.Id7 >> 53) & 0x1) != 0UL;
+ set => Internal.Id7 = (Internal.Id7 & 0xFFDFFFFFFFFFFFFF) | ((value ? 1UL : 0UL) << 53);
+ }
+
+ public bool DepthClampEnable
+ {
+ get => ((Internal.Id7 >> 54) & 0x1) != 0UL;
+ set => Internal.Id7 = (Internal.Id7 & 0xFFBFFFFFFFFFFFFF) | ((value ? 1UL : 0UL) << 54);
+ }
+
+ public bool RasterizerDiscardEnable
+ {
+ get => ((Internal.Id7 >> 55) & 0x1) != 0UL;
+ set => Internal.Id7 = (Internal.Id7 & 0xFF7FFFFFFFFFFFFF) | ((value ? 1UL : 0UL) << 55);
+ }
+
+ public FrontFace FrontFace
+ {
+ get => (FrontFace)((Internal.Id7 >> 56) & 0x1);
+ set => Internal.Id7 = (Internal.Id7 & 0xFEFFFFFFFFFFFFFF) | ((ulong)value << 56);
+ }
+
+ public bool DepthBiasEnable
+ {
+ get => ((Internal.Id7 >> 57) & 0x1) != 0UL;
+ set => Internal.Id7 = (Internal.Id7 & 0xFDFFFFFFFFFFFFFF) | ((value ? 1UL : 0UL) << 57);
+ }
+
+ public bool DepthTestEnable
+ {
+ get => ((Internal.Id7 >> 58) & 0x1) != 0UL;
+ set => Internal.Id7 = (Internal.Id7 & 0xFBFFFFFFFFFFFFFF) | ((value ? 1UL : 0UL) << 58);
+ }
+
+ public bool DepthWriteEnable
+ {
+ get => ((Internal.Id7 >> 59) & 0x1) != 0UL;
+ set => Internal.Id7 = (Internal.Id7 & 0xF7FFFFFFFFFFFFFF) | ((value ? 1UL : 0UL) << 59);
+ }
+
+ public bool DepthBoundsTestEnable
+ {
+ get => ((Internal.Id7 >> 60) & 0x1) != 0UL;
+ set => Internal.Id7 = (Internal.Id7 & 0xEFFFFFFFFFFFFFFF) | ((value ? 1UL : 0UL) << 60);
+ }
+
+ public bool StencilTestEnable
+ {
+ get => ((Internal.Id7 >> 61) & 0x1) != 0UL;
+ set => Internal.Id7 = (Internal.Id7 & 0xDFFFFFFFFFFFFFFF) | ((value ? 1UL : 0UL) << 61);
+ }
+
+ public bool LogicOpEnable
+ {
+ get => ((Internal.Id7 >> 62) & 0x1) != 0UL;
+ set => Internal.Id7 = (Internal.Id7 & 0xBFFFFFFFFFFFFFFF) | ((value ? 1UL : 0UL) << 62);
+ }
+
+ public bool HasDepthStencil
+ {
+ get => ((Internal.Id7 >> 63) & 0x1) != 0UL;
+ set => Internal.Id7 = (Internal.Id7 & 0x7FFFFFFFFFFFFFFF) | ((value ? 1UL : 0UL) << 63);
+ }
+
+ public uint PatchControlPoints
+ {
+ get => (uint)((Internal.Id8 >> 0) & 0xFFFFFFFF);
+ set => Internal.Id8 = (Internal.Id8 & 0xFFFFFFFF00000000) | ((ulong)value << 0);
+ }
+
+ public uint SamplesCount
+ {
+ get => (uint)((Internal.Id8 >> 32) & 0xFFFFFFFF);
+ set => Internal.Id8 = (Internal.Id8 & 0xFFFFFFFF) | ((ulong)value << 32);
+ }
+
+ public bool AlphaToCoverageEnable
+ {
+ get => ((Internal.Id9 >> 0) & 0x1) != 0UL;
+ set => Internal.Id9 = (Internal.Id9 & 0xFFFFFFFFFFFFFFFE) | ((value ? 1UL : 0UL) << 0);
+ }
+
+ public bool AlphaToOneEnable
+ {
+ get => ((Internal.Id9 >> 1) & 0x1) != 0UL;
+ set => Internal.Id9 = (Internal.Id9 & 0xFFFFFFFFFFFFFFFD) | ((value ? 1UL : 0UL) << 1);
+ }
+
+ public bool AdvancedBlendSrcPreMultiplied
+ {
+ get => ((Internal.Id9 >> 2) & 0x1) != 0UL;
+ set => Internal.Id9 = (Internal.Id9 & 0xFFFFFFFFFFFFFFFB) | ((value ? 1UL : 0UL) << 2);
+ }
+
+ public bool AdvancedBlendDstPreMultiplied
+ {
+ get => ((Internal.Id9 >> 3) & 0x1) != 0UL;
+ set => Internal.Id9 = (Internal.Id9 & 0xFFFFFFFFFFFFFFF7) | ((value ? 1UL : 0UL) << 3);
+ }
+
+ public BlendOverlapEXT AdvancedBlendOverlap
+ {
+ get => (BlendOverlapEXT)((Internal.Id9 >> 4) & 0x3);
+ set => Internal.Id9 = (Internal.Id9 & 0xFFFFFFFFFFFFFFCF) | ((ulong)value << 4);
+ }
+
+ public NativeArray<PipelineShaderStageCreateInfo> Stages;
+ public NativeArray<PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT> StageRequiredSubgroupSizes;
+ public PipelineLayout PipelineLayout;
+ public SpecData SpecializationData;
+
+ public void Initialize()
+ {
+ Stages = new NativeArray<PipelineShaderStageCreateInfo>(Constants.MaxShaderStages);
+ StageRequiredSubgroupSizes = new NativeArray<PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT>(Constants.MaxShaderStages);
+
+ for (int index = 0; index < Constants.MaxShaderStages; index++)
+ {
+ StageRequiredSubgroupSizes[index] = new PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT()
+ {
+ SType = StructureType.PipelineShaderStageRequiredSubgroupSizeCreateInfoExt,
+ RequiredSubgroupSize = RequiredSubgroupSize
+ };
+ }
+
+ AdvancedBlendSrcPreMultiplied = true;
+ AdvancedBlendDstPreMultiplied = true;
+ AdvancedBlendOverlap = BlendOverlapEXT.UncorrelatedExt;
+
+ LineWidth = 1f;
+ SamplesCount = 1;
+ }
+
+ public unsafe Auto<DisposablePipeline> CreateComputePipeline(
+ VulkanRenderer gd,
+ Device device,
+ ShaderCollection program,
+ PipelineCache cache)
+ {
+ if (program.TryGetComputePipeline(ref SpecializationData, out var pipeline))
+ {
+ return pipeline;
+ }
+
+ if (gd.Capabilities.SupportsSubgroupSizeControl)
+ {
+ UpdateStageRequiredSubgroupSizes(gd, 1);
+ }
+
+ var pipelineCreateInfo = new ComputePipelineCreateInfo()
+ {
+ SType = StructureType.ComputePipelineCreateInfo,
+ Stage = Stages[0],
+ BasePipelineIndex = -1,
+ Layout = PipelineLayout
+ };
+
+ Pipeline pipelineHandle = default;
+
+ bool hasSpec = program.SpecDescriptions != null;
+
+ var desc = hasSpec ? program.SpecDescriptions[0] : SpecDescription.Empty;
+
+ if (hasSpec && SpecializationData.Length < (int)desc.Info.DataSize)
+ {
+ throw new InvalidOperationException("Specialization data size does not match description");
+ }
+
+ fixed (SpecializationInfo* info = &desc.Info)
+ fixed (SpecializationMapEntry* map = desc.Map)
+ fixed (byte* data = SpecializationData.Span)
+ {
+ if (hasSpec)
+ {
+ info->PMapEntries = map;
+ info->PData = data;
+ pipelineCreateInfo.Stage.PSpecializationInfo = info;
+ }
+
+ gd.Api.CreateComputePipelines(device, cache, 1, &pipelineCreateInfo, null, &pipelineHandle).ThrowOnError();
+ }
+
+ pipeline = new Auto<DisposablePipeline>(new DisposablePipeline(gd.Api, device, pipelineHandle));
+
+ program.AddComputePipeline(ref SpecializationData, pipeline);
+
+ return pipeline;
+ }
+
+ public unsafe Auto<DisposablePipeline> CreateGraphicsPipeline(
+ VulkanRenderer gd,
+ Device device,
+ ShaderCollection program,
+ PipelineCache cache,
+ RenderPass renderPass)
+ {
+ if (program.TryGetGraphicsPipeline(ref Internal, out var pipeline))
+ {
+ return pipeline;
+ }
+
+ Pipeline pipelineHandle = default;
+
+ fixed (VertexInputAttributeDescription* pVertexAttributeDescriptions = &Internal.VertexAttributeDescriptions[0])
+ fixed (VertexInputBindingDescription* pVertexBindingDescriptions = &Internal.VertexBindingDescriptions[0])
+ fixed (Viewport* pViewports = &Internal.Viewports[0])
+ fixed (Rect2D* pScissors = &Internal.Scissors[0])
+ fixed (PipelineColorBlendAttachmentState* pColorBlendAttachmentState = &Internal.ColorBlendAttachmentState[0])
+ {
+ var vertexInputState = new PipelineVertexInputStateCreateInfo
+ {
+ SType = StructureType.PipelineVertexInputStateCreateInfo,
+ VertexAttributeDescriptionCount = VertexAttributeDescriptionsCount,
+ PVertexAttributeDescriptions = pVertexAttributeDescriptions,
+ VertexBindingDescriptionCount = VertexBindingDescriptionsCount,
+ PVertexBindingDescriptions = pVertexBindingDescriptions
+ };
+
+ bool primitiveRestartEnable = PrimitiveRestartEnable;
+
+ bool topologySupportsRestart;
+
+ if (gd.Capabilities.SupportsPrimitiveTopologyListRestart)
+ {
+ topologySupportsRestart = gd.Capabilities.SupportsPrimitiveTopologyPatchListRestart || Topology != PrimitiveTopology.PatchList;
+ }
+ else
+ {
+ topologySupportsRestart = Topology == PrimitiveTopology.LineStrip ||
+ Topology == PrimitiveTopology.TriangleStrip ||
+ Topology == PrimitiveTopology.TriangleFan ||
+ Topology == PrimitiveTopology.LineStripWithAdjacency ||
+ Topology == PrimitiveTopology.TriangleStripWithAdjacency;
+ }
+
+ primitiveRestartEnable &= topologySupportsRestart;
+
+ var inputAssemblyState = new PipelineInputAssemblyStateCreateInfo()
+ {
+ SType = StructureType.PipelineInputAssemblyStateCreateInfo,
+ PrimitiveRestartEnable = primitiveRestartEnable,
+ Topology = Topology
+ };
+
+ var tessellationState = new PipelineTessellationStateCreateInfo()
+ {
+ SType = StructureType.PipelineTessellationStateCreateInfo,
+ PatchControlPoints = PatchControlPoints
+ };
+
+ var rasterizationState = new PipelineRasterizationStateCreateInfo()
+ {
+ SType = StructureType.PipelineRasterizationStateCreateInfo,
+ DepthClampEnable = DepthClampEnable,
+ RasterizerDiscardEnable = RasterizerDiscardEnable,
+ PolygonMode = PolygonMode,
+ LineWidth = LineWidth,
+ CullMode = CullMode,
+ FrontFace = FrontFace,
+ DepthBiasEnable = DepthBiasEnable,
+ DepthBiasClamp = DepthBiasClamp,
+ DepthBiasConstantFactor = DepthBiasConstantFactor,
+ DepthBiasSlopeFactor = DepthBiasSlopeFactor
+ };
+
+ var viewportState = new PipelineViewportStateCreateInfo()
+ {
+ SType = StructureType.PipelineViewportStateCreateInfo,
+ ViewportCount = ViewportsCount,
+ PViewports = pViewports,
+ ScissorCount = ScissorsCount,
+ PScissors = pScissors
+ };
+
+ var multisampleState = new PipelineMultisampleStateCreateInfo
+ {
+ SType = StructureType.PipelineMultisampleStateCreateInfo,
+ SampleShadingEnable = false,
+ RasterizationSamples = TextureStorage.ConvertToSampleCountFlags(gd.Capabilities.SupportedSampleCounts, SamplesCount),
+ MinSampleShading = 1,
+ AlphaToCoverageEnable = AlphaToCoverageEnable,
+ AlphaToOneEnable = AlphaToOneEnable
+ };
+
+ var stencilFront = new StencilOpState(
+ StencilFrontFailOp,
+ StencilFrontPassOp,
+ StencilFrontDepthFailOp,
+ StencilFrontCompareOp,
+ StencilFrontCompareMask,
+ StencilFrontWriteMask,
+ StencilFrontReference);
+
+ var stencilBack = new StencilOpState(
+ StencilBackFailOp,
+ StencilBackPassOp,
+ StencilBackDepthFailOp,
+ StencilBackCompareOp,
+ StencilBackCompareMask,
+ StencilBackWriteMask,
+ StencilBackReference);
+
+ var depthStencilState = new PipelineDepthStencilStateCreateInfo()
+ {
+ SType = StructureType.PipelineDepthStencilStateCreateInfo,
+ DepthTestEnable = DepthTestEnable,
+ DepthWriteEnable = DepthWriteEnable,
+ DepthCompareOp = DepthCompareOp,
+ DepthBoundsTestEnable = DepthBoundsTestEnable,
+ StencilTestEnable = StencilTestEnable,
+ Front = stencilFront,
+ Back = stencilBack,
+ MinDepthBounds = MinDepthBounds,
+ MaxDepthBounds = MaxDepthBounds
+ };
+
+ var colorBlendState = new PipelineColorBlendStateCreateInfo()
+ {
+ SType = StructureType.PipelineColorBlendStateCreateInfo,
+ LogicOpEnable = LogicOpEnable,
+ LogicOp = LogicOp,
+ AttachmentCount = ColorBlendAttachmentStateCount,
+ PAttachments = pColorBlendAttachmentState
+ };
+
+ PipelineColorBlendAdvancedStateCreateInfoEXT colorBlendAdvancedState;
+
+ if (!AdvancedBlendSrcPreMultiplied ||
+ !AdvancedBlendDstPreMultiplied ||
+ AdvancedBlendOverlap != BlendOverlapEXT.UncorrelatedExt)
+ {
+ colorBlendAdvancedState = new PipelineColorBlendAdvancedStateCreateInfoEXT()
+ {
+ SType = StructureType.PipelineColorBlendAdvancedStateCreateInfoExt,
+ SrcPremultiplied = AdvancedBlendSrcPreMultiplied,
+ DstPremultiplied = AdvancedBlendDstPreMultiplied,
+ BlendOverlap = AdvancedBlendOverlap
+ };
+
+ colorBlendState.PNext = &colorBlendAdvancedState;
+ }
+
+ bool supportsExtDynamicState = gd.Capabilities.SupportsExtendedDynamicState;
+ int dynamicStatesCount = supportsExtDynamicState ? 9 : 8;
+
+ DynamicState* dynamicStates = stackalloc DynamicState[dynamicStatesCount];
+
+ dynamicStates[0] = DynamicState.Viewport;
+ dynamicStates[1] = DynamicState.Scissor;
+ dynamicStates[2] = DynamicState.DepthBias;
+ dynamicStates[3] = DynamicState.DepthBounds;
+ dynamicStates[4] = DynamicState.StencilCompareMask;
+ dynamicStates[5] = DynamicState.StencilWriteMask;
+ dynamicStates[6] = DynamicState.StencilReference;
+ dynamicStates[7] = DynamicState.BlendConstants;
+
+ if (supportsExtDynamicState)
+ {
+ dynamicStates[8] = DynamicState.VertexInputBindingStrideExt;
+ }
+
+ var pipelineDynamicStateCreateInfo = new PipelineDynamicStateCreateInfo()
+ {
+ SType = StructureType.PipelineDynamicStateCreateInfo,
+ DynamicStateCount = (uint)dynamicStatesCount,
+ PDynamicStates = dynamicStates
+ };
+
+ if (gd.Capabilities.SupportsSubgroupSizeControl)
+ {
+ UpdateStageRequiredSubgroupSizes(gd, (int)StagesCount);
+ }
+
+ var pipelineCreateInfo = new GraphicsPipelineCreateInfo()
+ {
+ SType = StructureType.GraphicsPipelineCreateInfo,
+ StageCount = StagesCount,
+ PStages = Stages.Pointer,
+ PVertexInputState = &vertexInputState,
+ PInputAssemblyState = &inputAssemblyState,
+ PTessellationState = &tessellationState,
+ PViewportState = &viewportState,
+ PRasterizationState = &rasterizationState,
+ PMultisampleState = &multisampleState,
+ PDepthStencilState = &depthStencilState,
+ PColorBlendState = &colorBlendState,
+ PDynamicState = &pipelineDynamicStateCreateInfo,
+ Layout = PipelineLayout,
+ RenderPass = renderPass,
+ BasePipelineIndex = -1
+ };
+
+ gd.Api.CreateGraphicsPipelines(device, cache, 1, &pipelineCreateInfo, null, &pipelineHandle).ThrowOnError();
+ }
+
+ pipeline = new Auto<DisposablePipeline>(new DisposablePipeline(gd.Api, device, pipelineHandle));
+
+ program.AddGraphicsPipeline(ref Internal, pipeline);
+
+ return pipeline;
+ }
+
+ private unsafe void UpdateStageRequiredSubgroupSizes(VulkanRenderer gd, int count)
+ {
+ for (int index = 0; index < count; index++)
+ {
+ bool canUseExplicitSubgroupSize =
+ (gd.Capabilities.RequiredSubgroupSizeStages & Stages[index].Stage) != 0 &&
+ gd.Capabilities.MinSubgroupSize <= RequiredSubgroupSize &&
+ gd.Capabilities.MaxSubgroupSize >= RequiredSubgroupSize;
+
+ Stages[index].PNext = canUseExplicitSubgroupSize ? StageRequiredSubgroupSizes.Pointer + index : null;
+ }
+ }
+
+ public void Dispose()
+ {
+ Stages.Dispose();
+ StageRequiredSubgroupSizes.Dispose();
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/PipelineUid.cs b/src/Ryujinx.Graphics.Vulkan/PipelineUid.cs
new file mode 100644
index 00000000..78d6e9f7
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/PipelineUid.cs
@@ -0,0 +1,129 @@
+using Ryujinx.Common.Memory;
+using Silk.NET.Vulkan;
+using System;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using System.Runtime.Intrinsics;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ struct PipelineUid : IRefEquatable<PipelineUid>
+ {
+ public ulong Id0;
+ public ulong Id1;
+ public ulong Id2;
+ public ulong Id3;
+
+ public ulong Id4;
+ public ulong Id5;
+ public ulong Id6;
+ public ulong Id7;
+
+ public ulong Id8;
+ public ulong Id9;
+
+ private uint VertexAttributeDescriptionsCount => (byte)((Id6 >> 38) & 0xFF);
+ private uint VertexBindingDescriptionsCount => (byte)((Id6 >> 46) & 0xFF);
+ private uint ViewportsCount => (byte)((Id6 >> 54) & 0xFF);
+ private uint ScissorsCount => (byte)((Id7 >> 0) & 0xFF);
+ private uint ColorBlendAttachmentStateCount => (byte)((Id7 >> 8) & 0xFF);
+ private bool HasDepthStencil => ((Id7 >> 63) & 0x1) != 0UL;
+
+ public Array32<VertexInputAttributeDescription> VertexAttributeDescriptions;
+ public Array33<VertexInputBindingDescription> VertexBindingDescriptions;
+ public Array16<Viewport> Viewports;
+ public Array16<Rect2D> Scissors;
+ public Array8<PipelineColorBlendAttachmentState> ColorBlendAttachmentState;
+ public Array9<Format> AttachmentFormats;
+
+ public override bool Equals(object obj)
+ {
+ return obj is PipelineUid other && Equals(other);
+ }
+
+ public bool Equals(ref PipelineUid other)
+ {
+ if (!Unsafe.As<ulong, Vector256<byte>>(ref Id0).Equals(Unsafe.As<ulong, Vector256<byte>>(ref other.Id0)) ||
+ !Unsafe.As<ulong, Vector256<byte>>(ref Id4).Equals(Unsafe.As<ulong, Vector256<byte>>(ref other.Id4)) ||
+ !Unsafe.As<ulong, Vector128<byte>>(ref Id8).Equals(Unsafe.As<ulong, Vector128<byte>>(ref other.Id8)))
+ {
+ return false;
+ }
+
+ if (!SequenceEqual<VertexInputAttributeDescription>(VertexAttributeDescriptions.AsSpan(), other.VertexAttributeDescriptions.AsSpan(), VertexAttributeDescriptionsCount))
+ {
+ return false;
+ }
+
+ if (!SequenceEqual<VertexInputBindingDescription>(VertexBindingDescriptions.AsSpan(), other.VertexBindingDescriptions.AsSpan(), VertexBindingDescriptionsCount))
+ {
+ return false;
+ }
+
+ if (!SequenceEqual<PipelineColorBlendAttachmentState>(ColorBlendAttachmentState.AsSpan(), other.ColorBlendAttachmentState.AsSpan(), ColorBlendAttachmentStateCount))
+ {
+ return false;
+ }
+
+ if (!SequenceEqual<Format>(AttachmentFormats.AsSpan(), other.AttachmentFormats.AsSpan(), ColorBlendAttachmentStateCount + (HasDepthStencil ? 1u : 0u)))
+ {
+ return false;
+ }
+
+ return true;
+ }
+
+ private static bool SequenceEqual<T>(ReadOnlySpan<T> x, ReadOnlySpan<T> y, uint count) where T : unmanaged
+ {
+ return MemoryMarshal.Cast<T, byte>(x.Slice(0, (int)count)).SequenceEqual(MemoryMarshal.Cast<T, byte>(y.Slice(0, (int)count)));
+ }
+
+ public override int GetHashCode()
+ {
+ ulong hash64 = Id0 * 23 ^
+ Id1 * 23 ^
+ Id2 * 23 ^
+ Id3 * 23 ^
+ Id4 * 23 ^
+ Id5 * 23 ^
+ Id6 * 23 ^
+ Id7 * 23 ^
+ Id8 * 23 ^
+ Id9 * 23;
+
+ for (int i = 0; i < (int)VertexAttributeDescriptionsCount; i++)
+ {
+ hash64 ^= VertexAttributeDescriptions[i].Binding * 23;
+ hash64 ^= (uint)VertexAttributeDescriptions[i].Format * 23;
+ hash64 ^= VertexAttributeDescriptions[i].Location * 23;
+ hash64 ^= VertexAttributeDescriptions[i].Offset * 23;
+ }
+
+ for (int i = 0; i < (int)VertexBindingDescriptionsCount; i++)
+ {
+ hash64 ^= VertexBindingDescriptions[i].Binding * 23;
+ hash64 ^= (uint)VertexBindingDescriptions[i].InputRate * 23;
+ hash64 ^= VertexBindingDescriptions[i].Stride * 23;
+ }
+
+ for (int i = 0; i < (int)ColorBlendAttachmentStateCount; i++)
+ {
+ hash64 ^= ColorBlendAttachmentState[i].BlendEnable * 23;
+ hash64 ^= (uint)ColorBlendAttachmentState[i].SrcColorBlendFactor * 23;
+ hash64 ^= (uint)ColorBlendAttachmentState[i].DstColorBlendFactor * 23;
+ hash64 ^= (uint)ColorBlendAttachmentState[i].ColorBlendOp * 23;
+ hash64 ^= (uint)ColorBlendAttachmentState[i].SrcAlphaBlendFactor * 23;
+ hash64 ^= (uint)ColorBlendAttachmentState[i].DstAlphaBlendFactor * 23;
+ hash64 ^= (uint)ColorBlendAttachmentState[i].AlphaBlendOp * 23;
+ hash64 ^= (uint)ColorBlendAttachmentState[i].ColorWriteMask * 23;
+ }
+
+ for (int i = 0; i < (int)ColorBlendAttachmentStateCount; i++)
+ {
+ hash64 ^= (uint)AttachmentFormats[i] * 23;
+ }
+
+ return (int)hash64 ^ ((int)(hash64 >> 32) * 17);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/Queries/BufferedQuery.cs b/src/Ryujinx.Graphics.Vulkan/Queries/BufferedQuery.cs
new file mode 100644
index 00000000..861155a3
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Queries/BufferedQuery.cs
@@ -0,0 +1,216 @@
+using Ryujinx.Common.Logging;
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using System;
+using System.Runtime.InteropServices;
+using System.Threading;
+
+namespace Ryujinx.Graphics.Vulkan.Queries
+{
+ class BufferedQuery : IDisposable
+ {
+ private const int MaxQueryRetries = 5000;
+ private const long DefaultValue = -1;
+ private const long DefaultValueInt = 0xFFFFFFFF;
+ private const ulong HighMask = 0xFFFFFFFF00000000;
+
+ private readonly Vk _api;
+ private readonly Device _device;
+ private readonly PipelineFull _pipeline;
+
+ private QueryPool _queryPool;
+
+ private readonly BufferHolder _buffer;
+ private readonly IntPtr _bufferMap;
+ private readonly CounterType _type;
+ private bool _result32Bit;
+ private bool _isSupported;
+
+ private long _defaultValue;
+ private int? _resetSequence;
+
+ public unsafe BufferedQuery(VulkanRenderer gd, Device device, PipelineFull pipeline, CounterType type, bool result32Bit)
+ {
+ _api = gd.Api;
+ _device = device;
+ _pipeline = pipeline;
+ _type = type;
+ _result32Bit = result32Bit;
+
+ _isSupported = QueryTypeSupported(gd, type);
+
+ if (_isSupported)
+ {
+ QueryPipelineStatisticFlags flags = type == CounterType.PrimitivesGenerated ?
+ QueryPipelineStatisticFlags.GeometryShaderPrimitivesBit : 0;
+
+ var queryPoolCreateInfo = new QueryPoolCreateInfo()
+ {
+ SType = StructureType.QueryPoolCreateInfo,
+ QueryCount = 1,
+ QueryType = GetQueryType(type),
+ PipelineStatistics = flags
+ };
+
+ gd.Api.CreateQueryPool(device, queryPoolCreateInfo, null, out _queryPool).ThrowOnError();
+ }
+
+ var buffer = gd.BufferManager.Create(gd, sizeof(long), forConditionalRendering: true);
+
+ _bufferMap = buffer.Map(0, sizeof(long));
+ _defaultValue = result32Bit ? DefaultValueInt : DefaultValue;
+ Marshal.WriteInt64(_bufferMap, _defaultValue);
+ _buffer = buffer;
+ }
+
+ private bool QueryTypeSupported(VulkanRenderer gd, CounterType type)
+ {
+ return type switch
+ {
+ CounterType.SamplesPassed => true,
+ CounterType.PrimitivesGenerated => gd.Capabilities.SupportsPipelineStatisticsQuery,
+ CounterType.TransformFeedbackPrimitivesWritten => gd.Capabilities.SupportsTransformFeedbackQueries,
+ _ => false
+ };
+ }
+
+ private static QueryType GetQueryType(CounterType type)
+ {
+ return type switch
+ {
+ CounterType.SamplesPassed => QueryType.Occlusion,
+ CounterType.PrimitivesGenerated => QueryType.PipelineStatistics,
+ CounterType.TransformFeedbackPrimitivesWritten => QueryType.TransformFeedbackStreamExt,
+ _ => QueryType.Occlusion
+ };
+ }
+
+ public Auto<DisposableBuffer> GetBuffer()
+ {
+ return _buffer.GetBuffer();
+ }
+
+ public void Reset()
+ {
+ End(false);
+ Begin(null);
+ }
+
+ public void Begin(int? resetSequence)
+ {
+ if (_isSupported)
+ {
+ bool needsReset = resetSequence == null || _resetSequence == null || resetSequence.Value != _resetSequence.Value;
+ bool isOcclusion = _type == CounterType.SamplesPassed;
+ _pipeline.BeginQuery(this, _queryPool, needsReset, isOcclusion, isOcclusion && resetSequence != null);
+ }
+ _resetSequence = null;
+ }
+
+ public unsafe void End(bool withResult)
+ {
+ if (_isSupported)
+ {
+ _pipeline.EndQuery(_queryPool);
+ }
+
+ if (withResult && _isSupported)
+ {
+ Marshal.WriteInt64(_bufferMap, _defaultValue);
+ _pipeline.CopyQueryResults(this);
+ }
+ else
+ {
+ // Dummy result, just return 0.
+ Marshal.WriteInt64(_bufferMap, 0);
+ }
+ }
+
+ private bool WaitingForValue(long data)
+ {
+ return data == _defaultValue ||
+ (!_result32Bit && ((ulong)data & HighMask) == ((ulong)_defaultValue & HighMask));
+ }
+
+ public bool TryGetResult(out long result)
+ {
+ result = Marshal.ReadInt64(_bufferMap);
+
+ return result != _defaultValue;
+ }
+
+ public long AwaitResult(AutoResetEvent wakeSignal = null)
+ {
+ long data = _defaultValue;
+
+ if (wakeSignal == null)
+ {
+ while (WaitingForValue(data))
+ {
+ data = Marshal.ReadInt64(_bufferMap);
+ }
+ }
+ else
+ {
+ int iterations = 0;
+ while (WaitingForValue(data) && iterations++ < MaxQueryRetries)
+ {
+ data = Marshal.ReadInt64(_bufferMap);
+ if (WaitingForValue(data))
+ {
+ wakeSignal.WaitOne(1);
+ }
+ }
+
+ if (iterations >= MaxQueryRetries)
+ {
+ Logger.Error?.Print(LogClass.Gpu, $"Error: Query result {_type} timed out. Took more than {MaxQueryRetries} tries.");
+ }
+ }
+
+ return data;
+ }
+
+ public void PoolReset(CommandBuffer cmd, int resetSequence)
+ {
+ if (_isSupported)
+ {
+ _api.CmdResetQueryPool(cmd, _queryPool, 0, 1);
+ }
+
+ _resetSequence = resetSequence;
+ }
+
+ public void PoolCopy(CommandBufferScoped cbs)
+ {
+ var buffer = _buffer.GetBuffer(cbs.CommandBuffer, true).Get(cbs, 0, sizeof(long)).Value;
+
+ QueryResultFlags flags = QueryResultFlags.ResultWaitBit;
+
+ if (!_result32Bit)
+ {
+ flags |= QueryResultFlags.Result64Bit;
+ }
+
+ _api.CmdCopyQueryPoolResults(
+ cbs.CommandBuffer,
+ _queryPool,
+ 0,
+ 1,
+ buffer,
+ 0,
+ (ulong)(_result32Bit ? sizeof(int) : sizeof(long)),
+ flags);
+ }
+
+ public unsafe void Dispose()
+ {
+ _buffer.Dispose();
+ if (_isSupported)
+ {
+ _api.DestroyQueryPool(_device, _queryPool, null);
+ }
+ _queryPool = default;
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/Queries/CounterQueue.cs b/src/Ryujinx.Graphics.Vulkan/Queries/CounterQueue.cs
new file mode 100644
index 00000000..c30d91c4
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Queries/CounterQueue.cs
@@ -0,0 +1,245 @@
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Threading;
+
+namespace Ryujinx.Graphics.Vulkan.Queries
+{
+ class CounterQueue : IDisposable
+ {
+ private const int QueryPoolInitialSize = 100;
+
+ private readonly VulkanRenderer _gd;
+ private readonly Device _device;
+ private readonly PipelineFull _pipeline;
+
+ public CounterType Type { get; }
+ public bool Disposed { get; private set; }
+
+ private Queue<CounterQueueEvent> _events = new Queue<CounterQueueEvent>();
+ private CounterQueueEvent _current;
+
+ private ulong _accumulatedCounter;
+ private int _waiterCount;
+
+ private object _lock = new object();
+
+ private Queue<BufferedQuery> _queryPool;
+ private AutoResetEvent _queuedEvent = new AutoResetEvent(false);
+ private AutoResetEvent _wakeSignal = new AutoResetEvent(false);
+ private AutoResetEvent _eventConsumed = new AutoResetEvent(false);
+
+ private Thread _consumerThread;
+
+ public int ResetSequence { get; private set; }
+
+ internal CounterQueue(VulkanRenderer gd, Device device, PipelineFull pipeline, CounterType type)
+ {
+ _gd = gd;
+ _device = device;
+ _pipeline = pipeline;
+
+ Type = type;
+
+ _queryPool = new Queue<BufferedQuery>(QueryPoolInitialSize);
+ for (int i = 0; i < QueryPoolInitialSize; i++)
+ {
+ // AMD Polaris GPUs on Windows seem to have issues reporting 64-bit query results.
+ _queryPool.Enqueue(new BufferedQuery(_gd, _device, _pipeline, type, gd.IsAmdWindows));
+ }
+
+ _current = new CounterQueueEvent(this, type, 0);
+
+ _consumerThread = new Thread(EventConsumer);
+ _consumerThread.Start();
+ }
+
+ public void ResetCounterPool()
+ {
+ ResetSequence++;
+ }
+
+ public void ResetFutureCounters(CommandBuffer cmd, int count)
+ {
+ // Pre-emptively reset queries to avoid render pass splitting.
+ lock (_queryPool)
+ {
+ count = Math.Min(count, _queryPool.Count);
+ for (int i = 0; i < count; i++)
+ {
+ _queryPool.ElementAt(i).PoolReset(cmd, ResetSequence);
+ }
+ }
+ }
+
+ private void EventConsumer()
+ {
+ while (!Disposed)
+ {
+ CounterQueueEvent evt = null;
+ lock (_lock)
+ {
+ if (_events.Count > 0)
+ {
+ evt = _events.Dequeue();
+ }
+ }
+
+ if (evt == null)
+ {
+ _queuedEvent.WaitOne(); // No more events to go through, wait for more.
+ }
+ else
+ {
+ // Spin-wait rather than sleeping if there are any waiters, by passing null instead of the wake signal.
+ evt.TryConsume(ref _accumulatedCounter, true, _waiterCount == 0 ? _wakeSignal : null);
+ }
+
+ if (_waiterCount > 0)
+ {
+ _eventConsumed.Set();
+ }
+ }
+ }
+
+ internal BufferedQuery GetQueryObject()
+ {
+ // Creating/disposing query objects on a context we're sharing with will cause issues.
+ // So instead, make a lot of query objects on the main thread and reuse them.
+
+ lock (_lock)
+ {
+ if (_queryPool.Count > 0)
+ {
+ BufferedQuery result = _queryPool.Dequeue();
+ return result;
+ }
+ else
+ {
+ return new BufferedQuery(_gd, _device, _pipeline, Type, _gd.IsAmdWindows);
+ }
+ }
+ }
+
+ internal void ReturnQueryObject(BufferedQuery query)
+ {
+ lock (_lock)
+ {
+ // The query will be reset when it dequeues.
+ _queryPool.Enqueue(query);
+ }
+ }
+
+ public CounterQueueEvent QueueReport(EventHandler<ulong> resultHandler, ulong lastDrawIndex, bool hostReserved)
+ {
+ CounterQueueEvent result;
+ ulong draws = lastDrawIndex - _current.DrawIndex;
+
+ lock (_lock)
+ {
+ // A query's result only matters if more than one draw was performed during it.
+ // Otherwise, dummy it out and return 0 immediately.
+
+ if (hostReserved)
+ {
+ // This counter event is guaranteed to be available for host conditional rendering.
+ _current.ReserveForHostAccess();
+ }
+
+ _current.Complete(draws > 0 && Type != CounterType.TransformFeedbackPrimitivesWritten, _pipeline.GetCounterDivisor(Type));
+ _events.Enqueue(_current);
+
+ _current.OnResult += resultHandler;
+
+ result = _current;
+
+ _current = new CounterQueueEvent(this, Type, lastDrawIndex);
+ }
+
+ _queuedEvent.Set();
+
+ return result;
+ }
+
+ public void QueueReset(ulong lastDrawIndex)
+ {
+ ulong draws = lastDrawIndex - _current.DrawIndex;
+
+ lock (_lock)
+ {
+ _current.Clear(draws != 0);
+ }
+ }
+
+ public void Flush(bool blocking)
+ {
+ if (!blocking)
+ {
+ // Just wake the consumer thread - it will update the queries.
+ _wakeSignal.Set();
+ return;
+ }
+
+ lock (_lock)
+ {
+ // Tell the queue to process all events.
+ while (_events.Count > 0)
+ {
+ CounterQueueEvent flush = _events.Peek();
+ if (!flush.TryConsume(ref _accumulatedCounter, true))
+ {
+ return; // If not blocking, then return when we encounter an event that is not ready yet.
+ }
+ _events.Dequeue();
+ }
+ }
+ }
+
+ public void FlushTo(CounterQueueEvent evt)
+ {
+ // Flush the counter queue on the main thread.
+ Interlocked.Increment(ref _waiterCount);
+
+ _wakeSignal.Set();
+
+ while (!evt.Disposed)
+ {
+ _eventConsumed.WaitOne(1);
+ }
+
+ Interlocked.Decrement(ref _waiterCount);
+ }
+
+ public void Dispose()
+ {
+ lock (_lock)
+ {
+ while (_events.Count > 0)
+ {
+ CounterQueueEvent evt = _events.Dequeue();
+
+ evt.Dispose();
+ }
+
+ Disposed = true;
+ }
+
+ _queuedEvent.Set();
+
+ _consumerThread.Join();
+
+ _current?.Dispose();
+
+ foreach (BufferedQuery query in _queryPool)
+ {
+ query.Dispose();
+ }
+
+ _queuedEvent.Dispose();
+ _wakeSignal.Dispose();
+ _eventConsumed.Dispose();
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/Queries/CounterQueueEvent.cs b/src/Ryujinx.Graphics.Vulkan/Queries/CounterQueueEvent.cs
new file mode 100644
index 00000000..d3aedb2f
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Queries/CounterQueueEvent.cs
@@ -0,0 +1,170 @@
+using Ryujinx.Graphics.GAL;
+using System;
+using System.Threading;
+
+namespace Ryujinx.Graphics.Vulkan.Queries
+{
+ class CounterQueueEvent : ICounterEvent
+ {
+ public event EventHandler<ulong> OnResult;
+
+ public CounterType Type { get; }
+ public bool ClearCounter { get; private set; }
+
+ public bool Disposed { get; private set; }
+ public bool Invalid { get; set; }
+
+ public ulong DrawIndex { get; }
+
+ private CounterQueue _queue;
+ private BufferedQuery _counter;
+
+ private bool _hostAccessReserved = false;
+ private int _refCount = 1; // Starts with a reference from the counter queue.
+
+ private object _lock = new object();
+ private ulong _result = ulong.MaxValue;
+ private double _divisor = 1f;
+
+ public CounterQueueEvent(CounterQueue queue, CounterType type, ulong drawIndex)
+ {
+ _queue = queue;
+
+ _counter = queue.GetQueryObject();
+ Type = type;
+
+ DrawIndex = drawIndex;
+
+ _counter.Begin(_queue.ResetSequence);
+ }
+
+ public Auto<DisposableBuffer> GetBuffer()
+ {
+ return _counter.GetBuffer();
+ }
+
+ internal void Clear(bool counterReset)
+ {
+ if (counterReset)
+ {
+ _counter.Reset();
+ }
+
+ ClearCounter = true;
+ }
+
+ internal void Complete(bool withResult, double divisor)
+ {
+ _counter.End(withResult);
+
+ _divisor = divisor;
+ }
+
+ internal bool TryConsume(ref ulong result, bool block, AutoResetEvent wakeSignal = null)
+ {
+ lock (_lock)
+ {
+ if (Disposed)
+ {
+ return true;
+ }
+
+ if (ClearCounter)
+ {
+ result = 0;
+ }
+
+ long queryResult;
+
+ if (block)
+ {
+ queryResult = _counter.AwaitResult(wakeSignal);
+ }
+ else
+ {
+ if (!_counter.TryGetResult(out queryResult))
+ {
+ return false;
+ }
+ }
+
+ result += _divisor == 1 ? (ulong)queryResult : (ulong)Math.Ceiling(queryResult / _divisor);
+
+ _result = result;
+
+ OnResult?.Invoke(this, result);
+
+ Dispose(); // Return the our resources to the pool.
+
+ return true;
+ }
+ }
+
+ public void Flush()
+ {
+ if (Disposed)
+ {
+ return;
+ }
+
+ // Tell the queue to process all events up to this one.
+ _queue.FlushTo(this);
+ }
+
+ public void DecrementRefCount()
+ {
+ if (Interlocked.Decrement(ref _refCount) == 0)
+ {
+ DisposeInternal();
+ }
+ }
+
+ public bool ReserveForHostAccess()
+ {
+ if (_hostAccessReserved)
+ {
+ return true;
+ }
+
+ if (IsValueAvailable())
+ {
+ return false;
+ }
+
+ if (Interlocked.Increment(ref _refCount) == 1)
+ {
+ Interlocked.Decrement(ref _refCount);
+
+ return false;
+ }
+
+ _hostAccessReserved = true;
+
+ return true;
+ }
+
+ public void ReleaseHostAccess()
+ {
+ _hostAccessReserved = false;
+
+ DecrementRefCount();
+ }
+
+ private void DisposeInternal()
+ {
+ _queue.ReturnQueryObject(_counter);
+ }
+
+ private bool IsValueAvailable()
+ {
+ return _result != ulong.MaxValue || _counter.TryGetResult(out _);
+ }
+
+ public void Dispose()
+ {
+ Disposed = true;
+
+ DecrementRefCount();
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/Queries/Counters.cs b/src/Ryujinx.Graphics.Vulkan/Queries/Counters.cs
new file mode 100644
index 00000000..d9d65062
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Queries/Counters.cs
@@ -0,0 +1,71 @@
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan.Queries
+{
+ class Counters : IDisposable
+ {
+ private readonly CounterQueue[] _counterQueues;
+ private readonly PipelineFull _pipeline;
+
+ public Counters(VulkanRenderer gd, Device device, PipelineFull pipeline)
+ {
+ _pipeline = pipeline;
+
+ int count = Enum.GetNames(typeof(CounterType)).Length;
+
+ _counterQueues = new CounterQueue[count];
+
+ for (int index = 0; index < _counterQueues.Length; index++)
+ {
+ CounterType type = (CounterType)index;
+ _counterQueues[index] = new CounterQueue(gd, device, pipeline, type);
+ }
+ }
+
+ public void ResetCounterPool()
+ {
+ foreach (var queue in _counterQueues)
+ {
+ queue.ResetCounterPool();
+ }
+ }
+
+ public void ResetFutureCounters(CommandBuffer cmd, int count)
+ {
+ _counterQueues[(int)CounterType.SamplesPassed].ResetFutureCounters(cmd, count);
+ }
+
+ public CounterQueueEvent QueueReport(CounterType type, EventHandler<ulong> resultHandler, bool hostReserved)
+ {
+ return _counterQueues[(int)type].QueueReport(resultHandler, _pipeline.DrawCount, hostReserved);
+ }
+
+ public void QueueReset(CounterType type)
+ {
+ _counterQueues[(int)type].QueueReset(_pipeline.DrawCount);
+ }
+
+ public void Update()
+ {
+ foreach (var queue in _counterQueues)
+ {
+ queue.Flush(false);
+ }
+ }
+
+ public void Flush(CounterType type)
+ {
+ _counterQueues[(int)type].Flush(true);
+ }
+
+ public void Dispose()
+ {
+ foreach (var queue in _counterQueues)
+ {
+ queue.Dispose();
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/Ryujinx.Graphics.Vulkan.csproj b/src/Ryujinx.Graphics.Vulkan/Ryujinx.Graphics.Vulkan.csproj
new file mode 100644
index 00000000..20216e51
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Ryujinx.Graphics.Vulkan.csproj
@@ -0,0 +1,39 @@
+<Project Sdk="Microsoft.NET.Sdk">
+
+ <PropertyGroup>
+ <TargetFramework>net7.0</TargetFramework>
+ </PropertyGroup>
+
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|AnyCPU'">
+ <AllowUnsafeBlocks>true</AllowUnsafeBlocks>
+ </PropertyGroup>
+
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|AnyCPU'">
+ <AllowUnsafeBlocks>true</AllowUnsafeBlocks>
+ </PropertyGroup>
+
+ <ItemGroup>
+ <EmbeddedResource Include="Effects\Textures\SmaaAreaTexture.bin" />
+ <EmbeddedResource Include="Effects\Textures\SmaaSearchTexture.bin" />
+ <EmbeddedResource Include="Effects\Shaders\FsrScaling.spv" />
+ <EmbeddedResource Include="Effects\Shaders\FsrSharpening.spv" />
+ <EmbeddedResource Include="Effects\Shaders\Fxaa.spv" />
+ <EmbeddedResource Include="Effects\Shaders\SmaaBlend.spv" />
+ <EmbeddedResource Include="Effects\Shaders\SmaaEdge.spv" />
+ <EmbeddedResource Include="Effects\Shaders\SmaaNeighbour.spv" />
+ </ItemGroup>
+
+ <ItemGroup>
+ <PackageReference Include="OpenTK.Windowing.GraphicsLibraryFramework" />
+ <PackageReference Include="shaderc.net" />
+ <PackageReference Include="Silk.NET.Vulkan" />
+ <PackageReference Include="Silk.NET.Vulkan.Extensions.EXT" />
+ <PackageReference Include="Silk.NET.Vulkan.Extensions.KHR" />
+ </ItemGroup>
+
+ <ItemGroup>
+ <ProjectReference Include="..\Ryujinx.Common\Ryujinx.Common.csproj" />
+ <ProjectReference Include="..\Ryujinx.Graphics.GAL\Ryujinx.Graphics.GAL.csproj" />
+ </ItemGroup>
+
+</Project>
diff --git a/src/Ryujinx.Graphics.Vulkan/SamplerHolder.cs b/src/Ryujinx.Graphics.Vulkan/SamplerHolder.cs
new file mode 100644
index 00000000..a95e4dba
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/SamplerHolder.cs
@@ -0,0 +1,118 @@
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class SamplerHolder : ISampler
+ {
+ private readonly VulkanRenderer _gd;
+ private readonly Auto<DisposableSampler> _sampler;
+
+ public unsafe SamplerHolder(VulkanRenderer gd, Device device, GAL.SamplerCreateInfo info)
+ {
+ _gd = gd;
+
+ gd.Samplers.Add(this);
+
+ (Filter minFilter, SamplerMipmapMode mipFilter) = EnumConversion.Convert(info.MinFilter);
+
+ float minLod = info.MinLod;
+ float maxLod = info.MaxLod;
+
+ if (info.MinFilter == MinFilter.Nearest || info.MinFilter == MinFilter.Linear)
+ {
+ minLod = 0;
+ maxLod = 0.25f;
+ }
+
+ var borderColor = GetConstrainedBorderColor(info.BorderColor, out var cantConstrain);
+
+ var samplerCreateInfo = new Silk.NET.Vulkan.SamplerCreateInfo()
+ {
+ SType = StructureType.SamplerCreateInfo,
+ MagFilter = info.MagFilter.Convert(),
+ MinFilter = minFilter,
+ MipmapMode = mipFilter,
+ AddressModeU = info.AddressU.Convert(),
+ AddressModeV = info.AddressV.Convert(),
+ AddressModeW = info.AddressP.Convert(),
+ MipLodBias = info.MipLodBias,
+ AnisotropyEnable = info.MaxAnisotropy != 1f,
+ MaxAnisotropy = info.MaxAnisotropy,
+ CompareEnable = info.CompareMode == CompareMode.CompareRToTexture,
+ CompareOp = info.CompareOp.Convert(),
+ MinLod = minLod,
+ MaxLod = maxLod,
+ BorderColor = borderColor,
+ UnnormalizedCoordinates = false // TODO: Use unnormalized coordinates.
+ };
+
+ SamplerCustomBorderColorCreateInfoEXT customBorderColor;
+
+ if (cantConstrain && gd.Capabilities.SupportsCustomBorderColor)
+ {
+ var color = new ClearColorValue(
+ info.BorderColor.Red,
+ info.BorderColor.Green,
+ info.BorderColor.Blue,
+ info.BorderColor.Alpha);
+
+ customBorderColor = new SamplerCustomBorderColorCreateInfoEXT()
+ {
+ SType = StructureType.SamplerCustomBorderColorCreateInfoExt,
+ CustomBorderColor = color
+ };
+
+ samplerCreateInfo.PNext = &customBorderColor;
+ samplerCreateInfo.BorderColor = BorderColor.FloatCustomExt;
+ }
+
+ gd.Api.CreateSampler(device, samplerCreateInfo, null, out var sampler).ThrowOnError();
+
+ _sampler = new Auto<DisposableSampler>(new DisposableSampler(gd.Api, device, sampler));
+ }
+
+ private static BorderColor GetConstrainedBorderColor(ColorF arbitraryBorderColor, out bool cantConstrain)
+ {
+ float r = arbitraryBorderColor.Red;
+ float g = arbitraryBorderColor.Green;
+ float b = arbitraryBorderColor.Blue;
+ float a = arbitraryBorderColor.Alpha;
+
+ if (r == 0f && g == 0f && b == 0f)
+ {
+ if (a == 1f)
+ {
+ cantConstrain = false;
+ return BorderColor.FloatOpaqueBlack;
+ }
+ else if (a == 0f)
+ {
+ cantConstrain = false;
+ return BorderColor.FloatTransparentBlack;
+ }
+ }
+ else if (r == 1f && g == 1f && b == 1f && a == 1f)
+ {
+ cantConstrain = false;
+ return BorderColor.FloatOpaqueWhite;
+ }
+
+ cantConstrain = true;
+ return BorderColor.FloatOpaqueBlack;
+ }
+
+ public Auto<DisposableSampler> GetSampler()
+ {
+ return _sampler;
+ }
+
+ public void Dispose()
+ {
+ if (_gd.Samplers.Remove(this))
+ {
+ _sampler.Dispose();
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/SemaphoreHolder.cs b/src/Ryujinx.Graphics.Vulkan/SemaphoreHolder.cs
new file mode 100644
index 00000000..aa1b0eaf
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/SemaphoreHolder.cs
@@ -0,0 +1,60 @@
+using Silk.NET.Vulkan;
+using System;
+using System.Threading;
+using VkSemaphore = Silk.NET.Vulkan.Semaphore;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class SemaphoreHolder : IDisposable
+ {
+ private readonly Vk _api;
+ private readonly Device _device;
+ private VkSemaphore _semaphore;
+ private int _referenceCount;
+ public bool _disposed;
+
+ public unsafe SemaphoreHolder(Vk api, Device device)
+ {
+ _api = api;
+ _device = device;
+
+ var semaphoreCreateInfo = new SemaphoreCreateInfo()
+ {
+ SType = StructureType.SemaphoreCreateInfo
+ };
+
+ api.CreateSemaphore(device, in semaphoreCreateInfo, null, out _semaphore).ThrowOnError();
+
+ _referenceCount = 1;
+ }
+
+ public VkSemaphore GetUnsafe()
+ {
+ return _semaphore;
+ }
+
+ public VkSemaphore Get()
+ {
+ Interlocked.Increment(ref _referenceCount);
+ return _semaphore;
+ }
+
+ public unsafe void Put()
+ {
+ if (Interlocked.Decrement(ref _referenceCount) == 0)
+ {
+ _api.DestroySemaphore(_device, _semaphore, null);
+ _semaphore = default;
+ }
+ }
+
+ public void Dispose()
+ {
+ if (!_disposed)
+ {
+ Put();
+ _disposed = true;
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/Shader.cs b/src/Ryujinx.Graphics.Vulkan/Shader.cs
new file mode 100644
index 00000000..ca99ebf0
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shader.cs
@@ -0,0 +1,163 @@
+using Ryujinx.Common.Logging;
+using Ryujinx.Graphics.GAL;
+using Ryujinx.Graphics.Shader;
+using shaderc;
+using Silk.NET.Vulkan;
+using System;
+using System.Runtime.InteropServices;
+using System.Threading.Tasks;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class Shader : IDisposable
+ {
+ // The shaderc.net dependency's Options constructor and dispose are not thread safe.
+ // Take this lock when using them.
+ private static object _shaderOptionsLock = new object();
+
+ private static readonly IntPtr _ptrMainEntryPointName = Marshal.StringToHGlobalAnsi("main");
+
+ private readonly Vk _api;
+ private readonly Device _device;
+ private readonly ShaderStageFlags _stage;
+
+ private bool _disposed;
+ private ShaderModule _module;
+
+ public ShaderStageFlags StageFlags => _stage;
+
+ public ShaderBindings Bindings { get; }
+
+ public ProgramLinkStatus CompileStatus { private set; get; }
+
+ public readonly Task CompileTask;
+
+ public unsafe Shader(Vk api, Device device, ShaderSource shaderSource)
+ {
+ _api = api;
+ _device = device;
+ Bindings = shaderSource.Bindings;
+
+ CompileStatus = ProgramLinkStatus.Incomplete;
+
+ _stage = shaderSource.Stage.Convert();
+
+ CompileTask = Task.Run(() =>
+ {
+ byte[] spirv = shaderSource.BinaryCode;
+
+ if (spirv == null)
+ {
+ spirv = GlslToSpirv(shaderSource.Code, shaderSource.Stage);
+
+ if (spirv == null)
+ {
+ CompileStatus = ProgramLinkStatus.Failure;
+
+ return;
+ }
+ }
+
+ fixed (byte* pCode = spirv)
+ {
+ var shaderModuleCreateInfo = new ShaderModuleCreateInfo()
+ {
+ SType = StructureType.ShaderModuleCreateInfo,
+ CodeSize = (uint)spirv.Length,
+ PCode = (uint*)pCode
+ };
+
+ api.CreateShaderModule(device, shaderModuleCreateInfo, null, out _module).ThrowOnError();
+ }
+
+ CompileStatus = ProgramLinkStatus.Success;
+ });
+ }
+
+ private unsafe static byte[] GlslToSpirv(string glsl, ShaderStage stage)
+ {
+ Options options;
+
+ lock (_shaderOptionsLock)
+ {
+ options = new Options(false)
+ {
+ SourceLanguage = SourceLanguage.Glsl,
+ TargetSpirVVersion = new SpirVVersion(1, 5)
+ };
+ }
+
+ options.SetTargetEnvironment(TargetEnvironment.Vulkan, EnvironmentVersion.Vulkan_1_2);
+ Compiler compiler = new Compiler(options);
+ var scr = compiler.Compile(glsl, "Ryu", GetShaderCShaderStage(stage));
+
+ lock (_shaderOptionsLock)
+ {
+ options.Dispose();
+ }
+
+ if (scr.Status != Status.Success)
+ {
+ Logger.Error?.Print(LogClass.Gpu, $"Shader compilation error: {scr.Status} {scr.ErrorMessage}");
+
+ return null;
+ }
+
+ var spirvBytes = new Span<byte>((void*)scr.CodePointer, (int)scr.CodeLength);
+
+ byte[] code = new byte[(scr.CodeLength + 3) & ~3];
+
+ spirvBytes.CopyTo(code.AsSpan().Slice(0, (int)scr.CodeLength));
+
+ return code;
+ }
+
+ private static ShaderKind GetShaderCShaderStage(ShaderStage stage)
+ {
+ switch (stage)
+ {
+ case ShaderStage.Vertex:
+ return ShaderKind.GlslVertexShader;
+ case ShaderStage.Geometry:
+ return ShaderKind.GlslGeometryShader;
+ case ShaderStage.TessellationControl:
+ return ShaderKind.GlslTessControlShader;
+ case ShaderStage.TessellationEvaluation:
+ return ShaderKind.GlslTessEvaluationShader;
+ case ShaderStage.Fragment:
+ return ShaderKind.GlslFragmentShader;
+ case ShaderStage.Compute:
+ return ShaderKind.GlslComputeShader;
+ }
+
+ Logger.Debug?.Print(LogClass.Gpu, $"Invalid {nameof(ShaderStage)} enum value: {stage}.");
+
+ return ShaderKind.GlslVertexShader;
+ }
+
+ public unsafe PipelineShaderStageCreateInfo GetInfo()
+ {
+ return new PipelineShaderStageCreateInfo()
+ {
+ SType = StructureType.PipelineShaderStageCreateInfo,
+ Stage = _stage,
+ Module = _module,
+ PName = (byte*)_ptrMainEntryPointName
+ };
+ }
+
+ public void WaitForCompile()
+ {
+ CompileTask.Wait();
+ }
+
+ public unsafe void Dispose()
+ {
+ if (!_disposed)
+ {
+ _api.DestroyShaderModule(_device, _module, null);
+ _disposed = true;
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/ShaderCollection.cs b/src/Ryujinx.Graphics.Vulkan/ShaderCollection.cs
new file mode 100644
index 00000000..1694049c
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/ShaderCollection.cs
@@ -0,0 +1,427 @@
+using Ryujinx.Common.Logging;
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Threading.Tasks;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class ShaderCollection : IProgram
+ {
+ private readonly PipelineShaderStageCreateInfo[] _infos;
+ private readonly Shader[] _shaders;
+
+ private readonly PipelineLayoutCacheEntry _plce;
+
+ public PipelineLayout PipelineLayout => _plce.PipelineLayout;
+
+ public bool HasMinimalLayout { get; }
+ public bool UsePushDescriptors { get; }
+ public bool IsCompute { get; }
+
+ public uint Stages { get; }
+
+ public int[][][] Bindings { get; }
+
+ public ProgramLinkStatus LinkStatus { get; private set; }
+
+ public readonly SpecDescription[] SpecDescriptions;
+
+ public bool IsLinked
+ {
+ get
+ {
+ if (LinkStatus == ProgramLinkStatus.Incomplete)
+ {
+ CheckProgramLink(true);
+ }
+
+ return LinkStatus == ProgramLinkStatus.Success;
+ }
+ }
+
+ private HashTableSlim<PipelineUid, Auto<DisposablePipeline>> _graphicsPipelineCache;
+ private HashTableSlim<SpecData, Auto<DisposablePipeline>> _computePipelineCache;
+
+ private VulkanRenderer _gd;
+ private Device _device;
+ private bool _initialized;
+
+ private ProgramPipelineState _state;
+ private DisposableRenderPass _dummyRenderPass;
+ private Task _compileTask;
+ private bool _firstBackgroundUse;
+
+ public ShaderCollection(VulkanRenderer gd, Device device, ShaderSource[] shaders, SpecDescription[] specDescription = null, bool isMinimal = false)
+ {
+ _gd = gd;
+ _device = device;
+
+ if (specDescription != null && specDescription.Length != shaders.Length)
+ {
+ throw new ArgumentException($"{nameof(specDescription)} array length must match {nameof(shaders)} array if provided");
+ }
+
+ gd.Shaders.Add(this);
+
+ var internalShaders = new Shader[shaders.Length];
+
+ _infos = new PipelineShaderStageCreateInfo[shaders.Length];
+
+ SpecDescriptions = specDescription;
+
+ LinkStatus = ProgramLinkStatus.Incomplete;
+
+ uint stages = 0;
+
+ for (int i = 0; i < shaders.Length; i++)
+ {
+ var shader = new Shader(gd.Api, device, shaders[i]);
+
+ stages |= 1u << shader.StageFlags switch
+ {
+ ShaderStageFlags.FragmentBit => 1,
+ ShaderStageFlags.GeometryBit => 2,
+ ShaderStageFlags.TessellationControlBit => 3,
+ ShaderStageFlags.TessellationEvaluationBit => 4,
+ _ => 0
+ };
+
+ if (shader.StageFlags == ShaderStageFlags.ComputeBit)
+ {
+ IsCompute = true;
+ }
+
+ internalShaders[i] = shader;
+ }
+
+ _shaders = internalShaders;
+
+ bool usePd = !isMinimal && VulkanConfiguration.UsePushDescriptors && _gd.Capabilities.SupportsPushDescriptors;
+
+ _plce = isMinimal
+ ? gd.PipelineLayoutCache.Create(gd, device, shaders)
+ : gd.PipelineLayoutCache.GetOrCreate(gd, device, stages, usePd);
+
+ HasMinimalLayout = isMinimal;
+ UsePushDescriptors = usePd;
+
+ Stages = stages;
+
+ int[][] GrabAll(Func<ShaderBindings, IReadOnlyCollection<int>> selector)
+ {
+ bool hasAny = false;
+ int[][] bindings = new int[internalShaders.Length][];
+
+ for (int i = 0; i < internalShaders.Length; i++)
+ {
+ var collection = selector(internalShaders[i].Bindings);
+ hasAny |= collection.Count != 0;
+ bindings[i] = collection.ToArray();
+ }
+
+ return hasAny ? bindings : Array.Empty<int[]>();
+ }
+
+ Bindings = new[]
+ {
+ GrabAll(x => x.UniformBufferBindings),
+ GrabAll(x => x.StorageBufferBindings),
+ GrabAll(x => x.TextureBindings),
+ GrabAll(x => x.ImageBindings)
+ };
+
+ _compileTask = Task.CompletedTask;
+ _firstBackgroundUse = false;
+ }
+
+ public ShaderCollection(
+ VulkanRenderer gd,
+ Device device,
+ ShaderSource[] sources,
+ ProgramPipelineState state,
+ bool fromCache) : this(gd, device, sources)
+ {
+ _state = state;
+
+ _compileTask = BackgroundCompilation();
+ _firstBackgroundUse = !fromCache;
+ }
+
+ private async Task BackgroundCompilation()
+ {
+ await Task.WhenAll(_shaders.Select(shader => shader.CompileTask));
+
+ if (_shaders.Any(shader => shader.CompileStatus == ProgramLinkStatus.Failure))
+ {
+ LinkStatus = ProgramLinkStatus.Failure;
+
+ return;
+ }
+
+ try
+ {
+ if (IsCompute)
+ {
+ CreateBackgroundComputePipeline();
+ }
+ else
+ {
+ CreateBackgroundGraphicsPipeline();
+ }
+ }
+ catch (VulkanException e)
+ {
+ Logger.Error?.PrintMsg(LogClass.Gpu, $"Background Compilation failed: {e.Message}");
+
+ LinkStatus = ProgramLinkStatus.Failure;
+ }
+ }
+
+ private void EnsureShadersReady()
+ {
+ if (!_initialized)
+ {
+ CheckProgramLink(true);
+
+ ProgramLinkStatus resultStatus = ProgramLinkStatus.Success;
+
+ for (int i = 0; i < _shaders.Length; i++)
+ {
+ var shader = _shaders[i];
+
+ if (shader.CompileStatus != ProgramLinkStatus.Success)
+ {
+ resultStatus = ProgramLinkStatus.Failure;
+ }
+
+ _infos[i] = shader.GetInfo();
+ }
+
+ // If the link status was already set as failure by background compilation, prefer that decision.
+ if (LinkStatus != ProgramLinkStatus.Failure)
+ {
+ LinkStatus = resultStatus;
+ }
+
+ _initialized = true;
+ }
+ }
+
+ public PipelineShaderStageCreateInfo[] GetInfos()
+ {
+ EnsureShadersReady();
+
+ return _infos;
+ }
+
+ protected unsafe DisposableRenderPass CreateDummyRenderPass()
+ {
+ if (_dummyRenderPass.Value.Handle != 0)
+ {
+ return _dummyRenderPass;
+ }
+
+ return _dummyRenderPass = _state.ToRenderPass(_gd, _device);
+ }
+
+ public void CreateBackgroundComputePipeline()
+ {
+ PipelineState pipeline = new PipelineState();
+ pipeline.Initialize();
+
+ pipeline.Stages[0] = _shaders[0].GetInfo();
+ pipeline.StagesCount = 1;
+ pipeline.PipelineLayout = PipelineLayout;
+
+ pipeline.CreateComputePipeline(_gd, _device, this, (_gd.Pipeline as PipelineBase).PipelineCache);
+ pipeline.Dispose();
+ }
+
+ public void CreateBackgroundGraphicsPipeline()
+ {
+ // To compile shaders in the background in Vulkan, we need to create valid pipelines using the shader modules.
+ // The GPU provides pipeline state via the GAL that can be converted into our internal Vulkan pipeline state.
+ // This should match the pipeline state at the time of the first draw. If it doesn't, then it'll likely be
+ // close enough that the GPU driver will reuse the compiled shader for the different state.
+
+ // First, we need to create a render pass object compatible with the one that will be used at runtime.
+ // The active attachment formats have been provided by the abstraction layer.
+ var renderPass = CreateDummyRenderPass();
+
+ PipelineState pipeline = _state.ToVulkanPipelineState(_gd);
+
+ // Copy the shader stage info to the pipeline.
+ var stages = pipeline.Stages.AsSpan();
+
+ for (int i = 0; i < _shaders.Length; i++)
+ {
+ stages[i] = _shaders[i].GetInfo();
+ }
+
+ pipeline.StagesCount = (uint)_shaders.Length;
+ pipeline.PipelineLayout = PipelineLayout;
+
+ pipeline.CreateGraphicsPipeline(_gd, _device, this, (_gd.Pipeline as PipelineBase).PipelineCache, renderPass.Value);
+ pipeline.Dispose();
+ }
+
+ public ProgramLinkStatus CheckProgramLink(bool blocking)
+ {
+ if (LinkStatus == ProgramLinkStatus.Incomplete)
+ {
+ ProgramLinkStatus resultStatus = ProgramLinkStatus.Success;
+
+ foreach (Shader shader in _shaders)
+ {
+ if (shader.CompileStatus == ProgramLinkStatus.Incomplete)
+ {
+ if (blocking)
+ {
+ // Wait for this shader to finish compiling.
+ shader.WaitForCompile();
+
+ if (shader.CompileStatus != ProgramLinkStatus.Success)
+ {
+ resultStatus = ProgramLinkStatus.Failure;
+ }
+ }
+ else
+ {
+ return ProgramLinkStatus.Incomplete;
+ }
+ }
+ }
+
+ if (!_compileTask.IsCompleted)
+ {
+ if (blocking)
+ {
+ _compileTask.Wait();
+
+ if (LinkStatus == ProgramLinkStatus.Failure)
+ {
+ return ProgramLinkStatus.Failure;
+ }
+ }
+ else
+ {
+ return ProgramLinkStatus.Incomplete;
+ }
+ }
+
+ return resultStatus;
+ }
+
+ return LinkStatus;
+ }
+
+ public byte[] GetBinary()
+ {
+ return null;
+ }
+
+ public void AddComputePipeline(ref SpecData key, Auto<DisposablePipeline> pipeline)
+ {
+ (_computePipelineCache ??= new()).Add(ref key, pipeline);
+ }
+
+ public void AddGraphicsPipeline(ref PipelineUid key, Auto<DisposablePipeline> pipeline)
+ {
+ (_graphicsPipelineCache ??= new()).Add(ref key, pipeline);
+ }
+
+ public bool TryGetComputePipeline(ref SpecData key, out Auto<DisposablePipeline> pipeline)
+ {
+ if (_computePipelineCache == null)
+ {
+ pipeline = default;
+ return false;
+ }
+
+ if (_computePipelineCache.TryGetValue(ref key, out pipeline))
+ {
+ return true;
+ }
+
+ return false;
+ }
+
+ public bool TryGetGraphicsPipeline(ref PipelineUid key, out Auto<DisposablePipeline> pipeline)
+ {
+ if (_graphicsPipelineCache == null)
+ {
+ pipeline = default;
+ return false;
+ }
+
+ if (!_graphicsPipelineCache.TryGetValue(ref key, out pipeline))
+ {
+ if (_firstBackgroundUse)
+ {
+ Logger.Warning?.Print(LogClass.Gpu, "Background pipeline compile missed on draw - incorrect pipeline state?");
+ _firstBackgroundUse = false;
+ }
+
+ return false;
+ }
+
+ _firstBackgroundUse = false;
+
+ return true;
+ }
+
+ public Auto<DescriptorSetCollection> GetNewDescriptorSetCollection(
+ VulkanRenderer gd,
+ int commandBufferIndex,
+ int setIndex,
+ out bool isNew)
+ {
+ return _plce.GetNewDescriptorSetCollection(gd, commandBufferIndex, setIndex, out isNew);
+ }
+
+ protected virtual unsafe void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ if (!_gd.Shaders.Remove(this))
+ {
+ return;
+ }
+
+ for (int i = 0; i < _shaders.Length; i++)
+ {
+ _shaders[i].Dispose();
+ }
+
+ if (_graphicsPipelineCache != null)
+ {
+ foreach (Auto<DisposablePipeline> pipeline in _graphicsPipelineCache.Values)
+ {
+ pipeline.Dispose();
+ }
+ }
+
+ if (_computePipelineCache != null)
+ {
+ foreach (Auto<DisposablePipeline> pipeline in _computePipelineCache.Values)
+ {
+ pipeline.Dispose();
+ }
+ }
+
+ if (_dummyRenderPass.Value.Handle != 0)
+ {
+ _dummyRenderPass.Dispose();
+ }
+ }
+ }
+
+ public void Dispose()
+ {
+ Dispose(true);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/ChangeBufferStrideShaderSource.comp b/src/Ryujinx.Graphics.Vulkan/Shaders/ChangeBufferStrideShaderSource.comp
new file mode 100644
index 00000000..081fc119
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/ChangeBufferStrideShaderSource.comp
@@ -0,0 +1,64 @@
+#version 450 core
+
+#extension GL_EXT_shader_8bit_storage : require
+
+layout (local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
+
+layout (std140, set = 0, binding = 0) uniform stride_arguments
+{
+ ivec4 stride_arguments_data;
+};
+
+layout (std430, set = 1, binding = 1) buffer in_s
+{
+ uint8_t[] in_data;
+};
+
+layout (std430, set = 1, binding = 2) buffer out_s
+{
+ uint8_t[] out_data;
+};
+
+void main()
+{
+ // Determine what slice of the stride copies this invocation will perform.
+
+ int sourceStride = stride_arguments_data.x;
+ int targetStride = stride_arguments_data.y;
+ int bufferSize = stride_arguments_data.z;
+ int sourceOffset = stride_arguments_data.w;
+
+ int strideRemainder = targetStride - sourceStride;
+ int invocations = int(gl_WorkGroupSize.x);
+
+ int copiesRequired = bufferSize / sourceStride;
+
+ // Find the copies that this invocation should perform.
+
+ // - Copies that all invocations perform.
+ int allInvocationCopies = copiesRequired / invocations;
+
+ // - Extra remainder copy that this invocation performs.
+ int index = int(gl_LocalInvocationID.x);
+ int extra = (index < (copiesRequired % invocations)) ? 1 : 0;
+
+ int copyCount = allInvocationCopies + extra;
+
+ // Finally, get the starting offset. Make sure to count extra copies.
+
+ int startCopy = allInvocationCopies * index + min(copiesRequired % invocations, index);
+
+ int srcOffset = sourceOffset + startCopy * sourceStride;
+ int dstOffset = startCopy * targetStride;
+
+ // Perform the copies for this region
+ for (int i=0; i<copyCount; i++) {
+ for (int j=0; j<sourceStride; j++) {
+ out_data[dstOffset++] = in_data[srcOffset++];
+ }
+
+ for (int j=0; j<strideRemainder; j++) {
+ out_data[dstOffset++] = uint8_t(0);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/ColorBlitClearAlphaFragmentShaderSource.frag b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorBlitClearAlphaFragmentShaderSource.frag
new file mode 100644
index 00000000..f31316d0
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorBlitClearAlphaFragmentShaderSource.frag
@@ -0,0 +1,11 @@
+#version 450 core
+
+layout (binding = 0, set = 2) uniform sampler2D tex;
+
+layout (location = 0) in vec2 tex_coord;
+layout (location = 0) out vec4 colour;
+
+void main()
+{
+ colour = vec4(texture(tex, tex_coord).rgb, 1.0f);
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/ColorBlitFragmentShaderSource.frag b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorBlitFragmentShaderSource.frag
new file mode 100644
index 00000000..89dc1ff8
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorBlitFragmentShaderSource.frag
@@ -0,0 +1,11 @@
+#version 450 core
+
+layout (binding = 0, set = 2) uniform sampler2D tex;
+
+layout (location = 0) in vec2 tex_coord;
+layout (location = 0) out vec4 colour;
+
+void main()
+{
+ colour = texture(tex, tex_coord);
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/ColorBlitMsFragmentShaderSource.frag b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorBlitMsFragmentShaderSource.frag
new file mode 100644
index 00000000..71145e02
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorBlitMsFragmentShaderSource.frag
@@ -0,0 +1,11 @@
+#version 450 core
+
+layout (binding = 0, set = 2) uniform sampler2DMS tex;
+
+layout (location = 0) in vec2 tex_coord;
+layout (location = 0) out vec4 colour;
+
+void main()
+{
+ colour = texelFetch(tex, ivec2(tex_coord * vec2(textureSize(tex).xy)), gl_SampleID);
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/ColorBlitVertexShaderSource.vert b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorBlitVertexShaderSource.vert
new file mode 100644
index 00000000..be93a64d
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorBlitVertexShaderSource.vert
@@ -0,0 +1,20 @@
+#version 450 core
+
+layout (std140, binding = 1) uniform tex_coord_in
+{
+ vec4 tex_coord_in_data;
+};
+
+layout (location = 0) out vec2 tex_coord;
+
+void main()
+{
+ int low = gl_VertexIndex & 1;
+ int high = gl_VertexIndex >> 1;
+ tex_coord.x = tex_coord_in_data[low];
+ tex_coord.y = tex_coord_in_data[2 + high];
+ gl_Position.x = (float(low) - 0.5f) * 2.0f;
+ gl_Position.y = (float(high) - 0.5f) * 2.0f;
+ gl_Position.z = 0.0f;
+ gl_Position.w = 1.0f;
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/ColorClearFFragmentShaderSource.frag b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorClearFFragmentShaderSource.frag
new file mode 100644
index 00000000..ddd4369c
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorClearFFragmentShaderSource.frag
@@ -0,0 +1,9 @@
+#version 450 core
+
+layout (location = 0) in vec4 clear_colour;
+layout (location = 0) out vec4 colour;
+
+void main()
+{
+ colour = clear_colour;
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/ColorClearSIFragmentShaderSource.frag b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorClearSIFragmentShaderSource.frag
new file mode 100644
index 00000000..4254f4f8
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorClearSIFragmentShaderSource.frag
@@ -0,0 +1,9 @@
+#version 450 core
+
+layout (location = 0) in vec4 clear_colour;
+layout (location = 0) out ivec4 colour;
+
+void main()
+{
+ colour = floatBitsToInt(clear_colour);
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/ColorClearUIFragmentShaderSource.frag b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorClearUIFragmentShaderSource.frag
new file mode 100644
index 00000000..08a6b864
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorClearUIFragmentShaderSource.frag
@@ -0,0 +1,9 @@
+#version 450 core
+
+layout (location = 0) in vec4 clear_colour;
+layout (location = 0) out uvec4 colour;
+
+void main()
+{
+ colour = floatBitsToUint(clear_colour);
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/ColorClearVertexShaderSource.vert b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorClearVertexShaderSource.vert
new file mode 100644
index 00000000..2f1b9b2c
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorClearVertexShaderSource.vert
@@ -0,0 +1,19 @@
+#version 450 core
+
+layout (std140, binding = 1) uniform clear_colour_in
+{
+ vec4 clear_colour_in_data;
+};
+
+layout (location = 0) out vec4 clear_colour;
+
+void main()
+{
+ int low = gl_VertexIndex & 1;
+ int high = gl_VertexIndex >> 1;
+ clear_colour = clear_colour_in_data;
+ gl_Position.x = (float(low) - 0.5f) * 2.0f;
+ gl_Position.y = (float(high) - 0.5f) * 2.0f;
+ gl_Position.z = 0.0f;
+ gl_Position.w = 1.0f;
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/ColorCopyShorteningComputeShaderSource.comp b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorCopyShorteningComputeShaderSource.comp
new file mode 100644
index 00000000..78cc1cc6
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorCopyShorteningComputeShaderSource.comp
@@ -0,0 +1,36 @@
+#version 450 core
+
+layout (std140, binding = 0) uniform ratio_in
+{
+ int ratio;
+};
+
+layout (set = 2, binding = 0) uniform usampler2D src;
+layout (set = 3, binding = 0) writeonly uniform uimage2D dst;
+
+layout (local_size_x = 32, local_size_y = 32, local_size_z = 1) in;
+
+void main()
+{
+ uvec2 coords = gl_GlobalInvocationID.xy;
+ ivec2 textureSz = textureSize(src, 0);
+
+ if (int(coords.x) >= textureSz.x || int(coords.y) >= textureSz.y)
+ {
+ return;
+ }
+
+ uint coordsShifted = coords.x << ratio;
+
+ uvec2 dstCoords0 = uvec2(coordsShifted, coords.y);
+ uvec2 dstCoords1 = uvec2(coordsShifted + 1, coords.y);
+ uvec2 dstCoords2 = uvec2(coordsShifted + 2, coords.y);
+ uvec2 dstCoords3 = uvec2(coordsShifted + 3, coords.y);
+
+ uvec4 rgba = texelFetch(src, ivec2(coords), 0);
+
+ imageStore(dst, ivec2(dstCoords0), rgba.rrrr);
+ imageStore(dst, ivec2(dstCoords1), rgba.gggg);
+ imageStore(dst, ivec2(dstCoords2), rgba.bbbb);
+ imageStore(dst, ivec2(dstCoords3), rgba.aaaa);
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/ColorCopyToNonMsComputeShaderSource.comp b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorCopyToNonMsComputeShaderSource.comp
new file mode 100644
index 00000000..a3fe02ca
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorCopyToNonMsComputeShaderSource.comp
@@ -0,0 +1,37 @@
+#version 450 core
+
+layout (std140, binding = 0) uniform sample_counts_log2_in
+{
+ ivec4 sample_counts_log2;
+};
+
+layout (set = 2, binding = 0) uniform usampler2DMS srcMS;
+layout (set = 3, binding = 0) writeonly uniform uimage2D dst;
+
+layout (local_size_x = 32, local_size_y = 32, local_size_z = 1) in;
+
+void main()
+{
+ uvec2 coords = gl_GlobalInvocationID.xy;
+ ivec2 imageSz = imageSize(dst);
+
+ if (int(coords.x) >= imageSz.x || int(coords.y) >= imageSz.y)
+ {
+ return;
+ }
+
+ int deltaX = sample_counts_log2.x - sample_counts_log2.z;
+ int deltaY = sample_counts_log2.y - sample_counts_log2.w;
+ int samplesInXLog2 = sample_counts_log2.z;
+ int samplesInYLog2 = sample_counts_log2.w;
+ int samplesInX = 1 << samplesInXLog2;
+ int samplesInY = 1 << samplesInYLog2;
+ int sampleIdx = ((int(coords.x) >> deltaX) & (samplesInX - 1)) | (((int(coords.y) >> deltaY) & (samplesInY - 1)) << samplesInXLog2);
+
+ samplesInXLog2 = sample_counts_log2.x;
+ samplesInYLog2 = sample_counts_log2.y;
+
+ ivec2 shiftedCoords = ivec2(int(coords.x) >> samplesInXLog2, int(coords.y) >> samplesInYLog2);
+
+ imageStore(dst, ivec2(coords), texelFetch(srcMS, shiftedCoords, sampleIdx));
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/ColorCopyWideningComputeShaderSource.comp b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorCopyWideningComputeShaderSource.comp
new file mode 100644
index 00000000..a9be454f
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorCopyWideningComputeShaderSource.comp
@@ -0,0 +1,31 @@
+#version 450 core
+
+layout (std140, binding = 0) uniform ratio_in
+{
+ int ratio;
+};
+
+layout (set = 2, binding = 0) uniform usampler2D src;
+layout (set = 3, binding = 0) writeonly uniform uimage2D dst;
+
+layout (local_size_x = 32, local_size_y = 32, local_size_z = 1) in;
+
+void main()
+{
+ uvec2 coords = gl_GlobalInvocationID.xy;
+ ivec2 imageSz = imageSize(dst);
+
+ if (int(coords.x) >= imageSz.x || int(coords.y) >= imageSz.y)
+ {
+ return;
+ }
+
+ uvec2 srcCoords = uvec2(coords.x << ratio, coords.y);
+
+ uint r = texelFetchOffset(src, ivec2(srcCoords), 0, ivec2(0, 0)).r;
+ uint g = texelFetchOffset(src, ivec2(srcCoords), 0, ivec2(1, 0)).r;
+ uint b = texelFetchOffset(src, ivec2(srcCoords), 0, ivec2(2, 0)).r;
+ uint a = texelFetchOffset(src, ivec2(srcCoords), 0, ivec2(3, 0)).r;
+
+ imageStore(dst, ivec2(coords), uvec4(r, g, b, a));
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/ColorDrawToMsFragmentShaderSource.frag b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorDrawToMsFragmentShaderSource.frag
new file mode 100644
index 00000000..e9151c44
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorDrawToMsFragmentShaderSource.frag
@@ -0,0 +1,27 @@
+#version 450 core
+
+layout (std140, binding = 0) uniform sample_counts_log2_in
+{
+ ivec4 sample_counts_log2;
+};
+
+layout (set = 2, binding = 0) uniform usampler2D src;
+
+layout (location = 0) out uvec4 colour;
+
+void main()
+{
+ int deltaX = sample_counts_log2.x - sample_counts_log2.z;
+ int deltaY = sample_counts_log2.y - sample_counts_log2.w;
+ int samplesInXLog2 = sample_counts_log2.z;
+ int samplesInYLog2 = sample_counts_log2.w;
+ int samplesInX = 1 << samplesInXLog2;
+ int samplesInY = 1 << samplesInYLog2;
+
+ int sampleIndex = gl_SampleID;
+
+ int inX = (int(gl_FragCoord.x) << sample_counts_log2.x) | ((sampleIndex & (samplesInX - 1)) << deltaX);
+ int inY = (int(gl_FragCoord.y) << sample_counts_log2.y) | ((sampleIndex >> samplesInXLog2) << deltaY);
+
+ colour = texelFetch(src, ivec2(inX, inY), 0);
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/ColorDrawToMsVertexShaderSource.vert b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorDrawToMsVertexShaderSource.vert
new file mode 100644
index 00000000..558792cc
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/ColorDrawToMsVertexShaderSource.vert
@@ -0,0 +1,11 @@
+#version 450 core
+
+void main()
+{
+ int low = gl_VertexIndex & 1;
+ int high = gl_VertexIndex >> 1;
+ gl_Position.x = (float(low) - 0.5f) * 2.0f;
+ gl_Position.y = (float(high) - 0.5f) * 2.0f;
+ gl_Position.z = 0.0f;
+ gl_Position.w = 1.0f;
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/ConvertIndexBufferShaderSource.comp b/src/Ryujinx.Graphics.Vulkan/Shaders/ConvertIndexBufferShaderSource.comp
new file mode 100644
index 00000000..d56d6cfd
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/ConvertIndexBufferShaderSource.comp
@@ -0,0 +1,58 @@
+#version 450 core
+
+#extension GL_EXT_scalar_block_layout : require
+#extension GL_EXT_shader_8bit_storage : require
+
+layout (local_size_x = 16, local_size_y = 1, local_size_z = 1) in;
+
+layout (std430, set = 0, binding = 0) uniform index_buffer_pattern
+{
+ int ibp_pattern[8];
+ int ibp_primitive_vertices;
+ int ibp_primitive_vertices_out;
+ int ibp_index_size;
+ int ibp_index_size_out;
+ int ibp_base_index;
+ int ibp_index_stride;
+ int src_offset;
+ int total_primitives;
+};
+
+layout (std430, set = 1, binding = 1) buffer in_s
+{
+ uint8_t[] in_data;
+};
+
+layout (std430, set = 1, binding = 2) buffer out_s
+{
+ uint8_t[] out_data;
+};
+
+void main()
+{
+ int primitiveIndex = int(gl_GlobalInvocationID.x);
+ if (primitiveIndex >= total_primitives)
+ {
+ return;
+ }
+
+ int inOffset = primitiveIndex * ibp_index_stride;
+ int outOffset = primitiveIndex * ibp_primitive_vertices_out;
+
+ for (int i = 0; i < ibp_primitive_vertices_out; i++)
+ {
+ int j;
+ int io = max(0, inOffset + ibp_base_index + ibp_pattern[i]) * ibp_index_size;
+ int oo = (outOffset + i) * ibp_index_size_out;
+
+ for (j = 0; j < ibp_index_size; j++)
+ {
+ out_data[oo + j] = in_data[src_offset + io + j];
+ }
+
+ for (; j < ibp_index_size_out; j++)
+ {
+ out_data[oo + j] = uint8_t(0);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/ConvertIndirectDataShaderSource.comp b/src/Ryujinx.Graphics.Vulkan/Shaders/ConvertIndirectDataShaderSource.comp
new file mode 100644
index 00000000..6ee96b7b
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/ConvertIndirectDataShaderSource.comp
@@ -0,0 +1,103 @@
+#version 450 core
+
+#extension GL_EXT_scalar_block_layout : require
+
+layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+
+layout (std430, set = 0, binding = 0) uniform draw_count_uniform
+{
+ uint[64] draw_count_buffer;
+};
+
+layout (std430, set = 1, binding = 1) buffer indirect_in
+{
+ int[] indirect_data_in;
+};
+
+layout (std430, set = 1, binding = 2) buffer indirect_out
+{
+ int[] indirect_data_out;
+};
+
+layout (std430, set = 1, binding = 3) buffer index_buffer_pattern
+{
+ int ibp_pattern[8];
+ int ibp_primitive_vertices;
+ int ibp_primitive_vertices_out;
+ int ibp_index_size;
+ int ibp_index_size_out;
+ int ibp_base_index;
+ int ibp_index_stride;
+ int src_offset;
+ int total_primitives;
+ int dispatch_x;
+ int dispatch_y;
+ int dispatch_z;
+ int has_draw_count;
+ uint max_draw_count;
+ int draw_count_offset;
+ int indirect_data_stride;
+ int indirect_data_offset;
+};
+
+int GetPrimitiveCount(int vertexCount)
+{
+ return max(0, (vertexCount - ibp_base_index) / ibp_index_stride);
+}
+
+int GetConvertedCount(int indexCount)
+{
+ int primitiveCount = GetPrimitiveCount(indexCount);
+ return primitiveCount * ibp_primitive_vertices_out;
+}
+
+void main()
+{
+ uint drawCount = has_draw_count != 0 ? min(draw_count_buffer[draw_count_offset], max_draw_count) : max_draw_count;
+ uint i = 0;
+
+ if (drawCount != 0)
+ {
+ int firstIndex = indirect_data_in[indirect_data_offset + 2];
+ int endIndex = firstIndex + indirect_data_in[indirect_data_offset];
+
+ for (i = 1; i < drawCount; i++)
+ {
+ int offset = int(i) * indirect_data_stride;
+ int inOffset = indirect_data_offset + offset;
+
+ int currentFirstIndex = indirect_data_in[inOffset + 2];
+ firstIndex = min(firstIndex, currentFirstIndex);
+ endIndex = max(endIndex, currentFirstIndex + indirect_data_in[inOffset]);
+ }
+
+ int indexCount = endIndex - firstIndex;
+
+ dispatch_x = (indexCount + 15) / 16;
+ src_offset += firstIndex * ibp_index_size;
+ total_primitives = GetPrimitiveCount(indexCount);
+
+ for (i = 0; i < drawCount; i++)
+ {
+ int offset = int(i) * indirect_data_stride;
+ int inOffset = indirect_data_offset + offset;
+
+ indirect_data_out[offset] = GetConvertedCount(indirect_data_in[inOffset]); // Index count
+ indirect_data_out[offset + 1] = indirect_data_in[inOffset + 1]; // Instance count
+ indirect_data_out[offset + 2] = GetConvertedCount(indirect_data_in[inOffset + 2] - firstIndex); // First index
+ indirect_data_out[offset + 3] = indirect_data_in[inOffset + 3]; // Vertex offset
+ indirect_data_out[offset + 4] = indirect_data_in[inOffset + 4]; // First instance
+ }
+ }
+
+ for (; i < max_draw_count; i++)
+ {
+ int offset = int(i) * indirect_data_stride;
+
+ indirect_data_out[offset] = 0;
+ indirect_data_out[offset + 1] = 0;
+ indirect_data_out[offset + 2] = 0;
+ indirect_data_out[offset + 3] = 0;
+ indirect_data_out[offset + 4] = 0;
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/DepthBlitFragmentShaderSource.frag b/src/Ryujinx.Graphics.Vulkan/Shaders/DepthBlitFragmentShaderSource.frag
new file mode 100644
index 00000000..55b7be13
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/DepthBlitFragmentShaderSource.frag
@@ -0,0 +1,10 @@
+#version 450 core
+
+layout (binding = 0, set = 2) uniform sampler2D texDepth;
+
+layout (location = 0) in vec2 tex_coord;
+
+void main()
+{
+ gl_FragDepth = texture(texDepth, tex_coord).r;
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/DepthBlitMsFragmentShaderSource.frag b/src/Ryujinx.Graphics.Vulkan/Shaders/DepthBlitMsFragmentShaderSource.frag
new file mode 100644
index 00000000..c93c7e7f
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/DepthBlitMsFragmentShaderSource.frag
@@ -0,0 +1,10 @@
+#version 450 core
+
+layout (binding = 0, set = 2) uniform sampler2DMS texDepth;
+
+layout (location = 0) in vec2 tex_coord;
+
+void main()
+{
+ gl_FragDepth = texelFetch(texDepth, ivec2(tex_coord * vec2(textureSize(texDepth).xy)), gl_SampleID).r;
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/DepthDrawToMsFragmentShaderSource.frag b/src/Ryujinx.Graphics.Vulkan/Shaders/DepthDrawToMsFragmentShaderSource.frag
new file mode 100644
index 00000000..bf5f612f
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/DepthDrawToMsFragmentShaderSource.frag
@@ -0,0 +1,25 @@
+#version 450 core
+
+layout (std140, binding = 0) uniform sample_counts_log2_in
+{
+ ivec4 sample_counts_log2;
+};
+
+layout (set = 2, binding = 0) uniform sampler2D src;
+
+void main()
+{
+ int deltaX = sample_counts_log2.x - sample_counts_log2.z;
+ int deltaY = sample_counts_log2.y - sample_counts_log2.w;
+ int samplesInXLog2 = sample_counts_log2.z;
+ int samplesInYLog2 = sample_counts_log2.w;
+ int samplesInX = 1 << samplesInXLog2;
+ int samplesInY = 1 << samplesInYLog2;
+
+ int sampleIndex = gl_SampleID;
+
+ int inX = (int(gl_FragCoord.x) << sample_counts_log2.x) | ((sampleIndex & (samplesInX - 1)) << deltaX);
+ int inY = (int(gl_FragCoord.y) << sample_counts_log2.y) | ((sampleIndex >> samplesInXLog2) << deltaY);
+
+ gl_FragDepth = texelFetch(src, ivec2(inX, inY), 0).r;
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/DepthDrawToNonMsFragmentShaderSource.frag b/src/Ryujinx.Graphics.Vulkan/Shaders/DepthDrawToNonMsFragmentShaderSource.frag
new file mode 100644
index 00000000..e376b2e7
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/DepthDrawToNonMsFragmentShaderSource.frag
@@ -0,0 +1,28 @@
+#version 450 core
+
+layout (std140, binding = 0) uniform sample_counts_log2_in
+{
+ ivec4 sample_counts_log2;
+};
+
+layout (set = 2, binding = 0) uniform sampler2DMS srcMS;
+
+void main()
+{
+ uvec2 coords = uvec2(gl_FragCoord.xy);
+
+ int deltaX = sample_counts_log2.x - sample_counts_log2.z;
+ int deltaY = sample_counts_log2.y - sample_counts_log2.w;
+ int samplesInXLog2 = sample_counts_log2.z;
+ int samplesInYLog2 = sample_counts_log2.w;
+ int samplesInX = 1 << samplesInXLog2;
+ int samplesInY = 1 << samplesInYLog2;
+ int sampleIdx = ((int(coords.x) >> deltaX) & (samplesInX - 1)) | (((int(coords.y) >> deltaY) & (samplesInY - 1)) << samplesInXLog2);
+
+ samplesInXLog2 = sample_counts_log2.x;
+ samplesInYLog2 = sample_counts_log2.y;
+
+ ivec2 shiftedCoords = ivec2(int(coords.x) >> samplesInXLog2, int(coords.y) >> samplesInYLog2);
+
+ gl_FragDepth = texelFetch(srcMS, shiftedCoords, sampleIdx).r;
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/ShaderBinaries.cs b/src/Ryujinx.Graphics.Vulkan/Shaders/ShaderBinaries.cs
new file mode 100644
index 00000000..c9dde7b6
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/ShaderBinaries.cs
@@ -0,0 +1,2413 @@
+namespace Ryujinx.Graphics.Vulkan.Shaders
+{
+ static class ShaderBinaries
+ {
+ public static readonly byte[] ChangeBufferStrideShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x05, 0x01, 0x00, 0x0A, 0x00, 0x0D, 0x00, 0x8E, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x60, 0x11, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C,
+ 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x30, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x72, 0x00, 0x00, 0x00, 0x10, 0x00, 0x06, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00,
+ 0x04, 0x00, 0x08, 0x00, 0x47, 0x4C, 0x5F, 0x45, 0x58, 0x54, 0x5F, 0x73, 0x68, 0x61, 0x64, 0x65,
+ 0x72, 0x5F, 0x38, 0x62, 0x69, 0x74, 0x5F, 0x73, 0x74, 0x6F, 0x72, 0x61, 0x67, 0x65, 0x00, 0x00,
+ 0x04, 0x00, 0x0A, 0x00, 0x47, 0x4C, 0x5F, 0x47, 0x4F, 0x4F, 0x47, 0x4C, 0x45, 0x5F, 0x63, 0x70,
+ 0x70, 0x5F, 0x73, 0x74, 0x79, 0x6C, 0x65, 0x5F, 0x6C, 0x69, 0x6E, 0x65, 0x5F, 0x64, 0x69, 0x72,
+ 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x47, 0x4C, 0x5F, 0x47,
+ 0x4F, 0x4F, 0x47, 0x4C, 0x45, 0x5F, 0x69, 0x6E, 0x63, 0x6C, 0x75, 0x64, 0x65, 0x5F, 0x64, 0x69,
+ 0x72, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x00, 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x73, 0x6F, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x72, 0x69, 0x64, 0x65, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x07, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x73, 0x74, 0x72, 0x69, 0x64, 0x65, 0x5F, 0x61,
+ 0x72, 0x67, 0x75, 0x6D, 0x65, 0x6E, 0x74, 0x73, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x09, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x74, 0x72, 0x69, 0x64, 0x65, 0x5F, 0x61,
+ 0x72, 0x67, 0x75, 0x6D, 0x65, 0x6E, 0x74, 0x73, 0x5F, 0x64, 0x61, 0x74, 0x61, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x03, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00,
+ 0x13, 0x00, 0x00, 0x00, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x74, 0x72, 0x69, 0x64, 0x65,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x17, 0x00, 0x00, 0x00, 0x62, 0x75, 0x66, 0x66,
+ 0x65, 0x72, 0x53, 0x69, 0x7A, 0x65, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x1B, 0x00, 0x00, 0x00,
+ 0x73, 0x6F, 0x75, 0x72, 0x63, 0x65, 0x4F, 0x66, 0x66, 0x73, 0x65, 0x74, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x06, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x73, 0x74, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65,
+ 0x6D, 0x61, 0x69, 0x6E, 0x64, 0x65, 0x72, 0x00, 0x05, 0x00, 0x05, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x69, 0x6E, 0x76, 0x6F, 0x63, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x73, 0x00, 0x05, 0x00, 0x06, 0x00,
+ 0x25, 0x00, 0x00, 0x00, 0x63, 0x6F, 0x70, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72,
+ 0x65, 0x64, 0x00, 0x00, 0x05, 0x00, 0x07, 0x00, 0x29, 0x00, 0x00, 0x00, 0x61, 0x6C, 0x6C, 0x49,
+ 0x6E, 0x76, 0x6F, 0x63, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x43, 0x6F, 0x70, 0x69, 0x65, 0x73, 0x00,
+ 0x05, 0x00, 0x04, 0x00, 0x2D, 0x00, 0x00, 0x00, 0x69, 0x6E, 0x64, 0x65, 0x78, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x08, 0x00, 0x30, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x4C, 0x6F, 0x63, 0x61, 0x6C,
+ 0x49, 0x6E, 0x76, 0x6F, 0x63, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x49, 0x44, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x04, 0x00, 0x35, 0x00, 0x00, 0x00, 0x65, 0x78, 0x74, 0x72, 0x61, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x05, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x63, 0x6F, 0x70, 0x79, 0x43, 0x6F, 0x75, 0x6E,
+ 0x74, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x42, 0x00, 0x00, 0x00, 0x73, 0x74, 0x61, 0x72,
+ 0x74, 0x43, 0x6F, 0x70, 0x79, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x4C, 0x00, 0x00, 0x00,
+ 0x73, 0x72, 0x63, 0x4F, 0x66, 0x66, 0x73, 0x65, 0x74, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00,
+ 0x52, 0x00, 0x00, 0x00, 0x64, 0x73, 0x74, 0x4F, 0x66, 0x66, 0x73, 0x65, 0x74, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x03, 0x00, 0x56, 0x00, 0x00, 0x00, 0x69, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00,
+ 0x5F, 0x00, 0x00, 0x00, 0x6A, 0x00, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x6A, 0x00, 0x00, 0x00,
+ 0x6F, 0x75, 0x74, 0x5F, 0x73, 0x00, 0x00, 0x00, 0x06, 0x00, 0x06, 0x00, 0x6A, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x6F, 0x75, 0x74, 0x5F, 0x64, 0x61, 0x74, 0x61, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x03, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00,
+ 0x70, 0x00, 0x00, 0x00, 0x69, 0x6E, 0x5F, 0x73, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x05, 0x00,
+ 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0x6E, 0x5F, 0x64, 0x61, 0x74, 0x61, 0x00,
+ 0x05, 0x00, 0x03, 0x00, 0x72, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00,
+ 0x7B, 0x00, 0x00, 0x00, 0x6A, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x30, 0x00, 0x00, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x69, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x6A, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00,
+ 0x6A, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x6C, 0x00, 0x00, 0x00,
+ 0x22, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x6C, 0x00, 0x00, 0x00,
+ 0x21, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x6F, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x70, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x72, 0x00, 0x00, 0x00,
+ 0x22, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x72, 0x00, 0x00, 0x00,
+ 0x21, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x8D, 0x00, 0x00, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x03, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00,
+ 0x1C, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x24, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x2E, 0x00, 0x00, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x2F, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x2F, 0x00, 0x00, 0x00,
+ 0x30, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x31, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x14, 0x00, 0x02, 0x00, 0x3A, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x04, 0x00, 0x68, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1D, 0x00, 0x03, 0x00, 0x69, 0x00, 0x00, 0x00, 0x68, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x03, 0x00,
+ 0x6A, 0x00, 0x00, 0x00, 0x69, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x6B, 0x00, 0x00, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x6A, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x6B, 0x00, 0x00, 0x00,
+ 0x6C, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x03, 0x00, 0x6F, 0x00, 0x00, 0x00,
+ 0x68, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x03, 0x00, 0x70, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x71, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x71, 0x00, 0x00, 0x00, 0x72, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x75, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x68, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x8C, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0x2C, 0x00, 0x06, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x8D, 0x00, 0x00, 0x00, 0x8C, 0x00, 0x00, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00,
+ 0x05, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x2D, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x4C, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x56, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x7B, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x10, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x03, 0x00, 0x13, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x10, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00,
+ 0x19, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x17, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x10, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x1E, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x1B, 0x00, 0x00, 0x00,
+ 0x1E, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x13, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x1F, 0x00, 0x00, 0x00,
+ 0x22, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x87, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00,
+ 0x27, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x25, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x87, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x29, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x05, 0x00, 0x31, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00,
+ 0x32, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
+ 0x33, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x2D, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x2D, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x37, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x8B, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x39, 0x00, 0x00, 0x00, 0x37, 0x00, 0x00, 0x00,
+ 0x38, 0x00, 0x00, 0x00, 0xB1, 0x00, 0x05, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00,
+ 0x36, 0x00, 0x00, 0x00, 0x39, 0x00, 0x00, 0x00, 0xA9, 0x00, 0x06, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x03, 0x00, 0x35, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x41, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x03, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x41, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x2D, 0x00, 0x00, 0x00, 0x84, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x47, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x8B, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x49, 0x00, 0x00, 0x00,
+ 0x2D, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00, 0x4A, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x49, 0x00, 0x00, 0x00,
+ 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00,
+ 0x4A, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x42, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x4D, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x4E, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x4F, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x84, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x4E, 0x00, 0x00, 0x00,
+ 0x4F, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x51, 0x00, 0x00, 0x00,
+ 0x4D, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x4C, 0x00, 0x00, 0x00,
+ 0x51, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x53, 0x00, 0x00, 0x00,
+ 0x42, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x54, 0x00, 0x00, 0x00,
+ 0x13, 0x00, 0x00, 0x00, 0x84, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00,
+ 0x53, 0x00, 0x00, 0x00, 0x54, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x52, 0x00, 0x00, 0x00,
+ 0x55, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x56, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0xF9, 0x00, 0x02, 0x00, 0x57, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x57, 0x00, 0x00, 0x00,
+ 0xF6, 0x00, 0x04, 0x00, 0x59, 0x00, 0x00, 0x00, 0x5A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xF9, 0x00, 0x02, 0x00, 0x5B, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x5B, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00, 0x56, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00,
+ 0xB1, 0x00, 0x05, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x5E, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00,
+ 0x5D, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x5E, 0x00, 0x00, 0x00, 0x58, 0x00, 0x00, 0x00,
+ 0x59, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x58, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00,
+ 0x5F, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x60, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x60, 0x00, 0x00, 0x00, 0xF6, 0x00, 0x04, 0x00, 0x62, 0x00, 0x00, 0x00,
+ 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x64, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x64, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x65, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x66, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0xB1, 0x00, 0x05, 0x00, 0x3A, 0x00, 0x00, 0x00,
+ 0x67, 0x00, 0x00, 0x00, 0x65, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00,
+ 0x67, 0x00, 0x00, 0x00, 0x61, 0x00, 0x00, 0x00, 0x62, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00,
+ 0x61, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x6D, 0x00, 0x00, 0x00,
+ 0x52, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x6E, 0x00, 0x00, 0x00,
+ 0x6D, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x52, 0x00, 0x00, 0x00,
+ 0x6E, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x73, 0x00, 0x00, 0x00,
+ 0x4C, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x74, 0x00, 0x00, 0x00,
+ 0x73, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x4C, 0x00, 0x00, 0x00,
+ 0x74, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x75, 0x00, 0x00, 0x00, 0x76, 0x00, 0x00, 0x00,
+ 0x72, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x73, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x68, 0x00, 0x00, 0x00, 0x77, 0x00, 0x00, 0x00, 0x76, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x75, 0x00, 0x00, 0x00, 0x78, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x6D, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x78, 0x00, 0x00, 0x00, 0x77, 0x00, 0x00, 0x00,
+ 0xF9, 0x00, 0x02, 0x00, 0x63, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x63, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x79, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x00, 0x00,
+ 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x7A, 0x00, 0x00, 0x00, 0x79, 0x00, 0x00, 0x00,
+ 0x3C, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x5F, 0x00, 0x00, 0x00, 0x7A, 0x00, 0x00, 0x00,
+ 0xF9, 0x00, 0x02, 0x00, 0x60, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x62, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x03, 0x00, 0x7B, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00,
+ 0x7C, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x7C, 0x00, 0x00, 0x00, 0xF6, 0x00, 0x04, 0x00,
+ 0x7E, 0x00, 0x00, 0x00, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00,
+ 0x80, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x80, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00, 0x00, 0x7B, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x82, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0xB1, 0x00, 0x05, 0x00,
+ 0x3A, 0x00, 0x00, 0x00, 0x83, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00, 0x00, 0x82, 0x00, 0x00, 0x00,
+ 0xFA, 0x00, 0x04, 0x00, 0x83, 0x00, 0x00, 0x00, 0x7D, 0x00, 0x00, 0x00, 0x7E, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x7D, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x84, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x85, 0x00, 0x00, 0x00, 0x84, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00,
+ 0x52, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, 0x00, 0x71, 0x00, 0x04, 0x00, 0x68, 0x00, 0x00, 0x00,
+ 0x86, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x75, 0x00, 0x00, 0x00,
+ 0x87, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x84, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x03, 0x00, 0x87, 0x00, 0x00, 0x00, 0x86, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00,
+ 0x7F, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x7F, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, 0x7B, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x89, 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x03, 0x00, 0x7B, 0x00, 0x00, 0x00, 0x89, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00,
+ 0x7C, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x7E, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00,
+ 0x5A, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x5A, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x8A, 0x00, 0x00, 0x00, 0x56, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x8B, 0x00, 0x00, 0x00, 0x8A, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x03, 0x00, 0x56, 0x00, 0x00, 0x00, 0x8B, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00,
+ 0x57, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x59, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00,
+ 0x38, 0x00, 0x01, 0x00
+ };
+
+ public static readonly byte[] ColorBlitClearAlphaFragmentShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0A, 0x00, 0x08, 0x00, 0x1B, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x07, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x63, 0x6F, 0x6C, 0x6F,
+ 0x75, 0x72, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x00,
+ 0x05, 0x00, 0x05, 0x00, 0x11, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x5F, 0x63, 0x6F, 0x6F, 0x72,
+ 0x64, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x16, 0x00, 0x03, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x19, 0x00, 0x09, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x03, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x80, 0x3F, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x57, 0x00, 0x05, 0x00, 0x07, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00,
+ 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x07, 0x00, 0x07, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] ColorBlitFragmentShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0A, 0x00, 0x08, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x07, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x63, 0x6F, 0x6C, 0x6F,
+ 0x75, 0x72, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x00,
+ 0x05, 0x00, 0x05, 0x00, 0x11, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x5F, 0x63, 0x6F, 0x6F, 0x72,
+ 0x64, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x16, 0x00, 0x03, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x19, 0x00, 0x09, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x03, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x57, 0x00, 0x05, 0x00, 0x07, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x09, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00,
+ 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] ColorBlitMsFragmentShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0B, 0x00, 0x08, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x32, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x08, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00,
+ 0x63, 0x6F, 0x6C, 0x6F, 0x75, 0x72, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x74, 0x65, 0x78, 0x00, 0x05, 0x00, 0x05, 0x00, 0x11, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x5F,
+ 0x63, 0x6F, 0x6F, 0x72, 0x64, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x1C, 0x00, 0x00, 0x00,
+ 0x67, 0x6C, 0x5F, 0x53, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x49, 0x44, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00,
+ 0x1C, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x1C, 0x00, 0x00, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x16, 0x00, 0x03, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x19, 0x00, 0x09, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x03, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x1B, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x1B, 0x00, 0x00, 0x00,
+ 0x1C, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00,
+ 0x05, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x64, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x68, 0x00, 0x04, 0x00, 0x16, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x00, 0x00, 0x85, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x6E, 0x00, 0x04, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x1A, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x1D, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x64, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x1E, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x07, 0x00, 0x07, 0x00, 0x00, 0x00,
+ 0x1F, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0x1D, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x09, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00,
+ 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] ColorBlitVertexShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0A, 0x00, 0x08, 0x00, 0x3F, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x49, 0x6E, 0x64,
+ 0x65, 0x78, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x14, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x5F,
+ 0x63, 0x6F, 0x6F, 0x72, 0x64, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x74, 0x65, 0x78, 0x5F, 0x63, 0x6F, 0x6F, 0x72, 0x64, 0x5F, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x08, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x5F,
+ 0x63, 0x6F, 0x6F, 0x72, 0x64, 0x5F, 0x69, 0x6E, 0x5F, 0x64, 0x61, 0x74, 0x61, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00,
+ 0x2A, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x50, 0x65, 0x72, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78,
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x06, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x67, 0x6C, 0x5F, 0x50, 0x6F, 0x73, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x00, 0x06, 0x00, 0x07, 0x00,
+ 0x2A, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x50, 0x6F, 0x69, 0x6E, 0x74,
+ 0x53, 0x69, 0x7A, 0x65, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x07, 0x00, 0x2A, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x43, 0x6C, 0x69, 0x70, 0x44, 0x69, 0x73, 0x74, 0x61,
+ 0x6E, 0x63, 0x65, 0x00, 0x06, 0x00, 0x07, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x67, 0x6C, 0x5F, 0x43, 0x75, 0x6C, 0x6C, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6E, 0x63, 0x65, 0x00,
+ 0x05, 0x00, 0x03, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x03, 0x00, 0x16, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00,
+ 0x2A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x48, 0x00, 0x05, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x2A, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00,
+ 0x2A, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x16, 0x00, 0x03, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x13, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x13, 0x00, 0x00, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x03, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x20, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x04, 0x00, 0x29, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x27, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x06, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x2B, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x2B, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x2F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0x2B, 0x00, 0x04, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x2B, 0x00, 0x04, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x1E, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3F, 0x2B, 0x00, 0x04, 0x00,
+ 0x1E, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0xC7, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x19, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x1D, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x21, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00,
+ 0x21, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x24, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x1B, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x24, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00,
+ 0x25, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x20, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x28, 0x00, 0x00, 0x00,
+ 0x26, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x83, 0x00, 0x05, 0x00, 0x11, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00,
+ 0x2E, 0x00, 0x00, 0x00, 0x2F, 0x00, 0x00, 0x00, 0x85, 0x00, 0x05, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x32, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x1F, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x33, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00,
+ 0x6F, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x83, 0x00, 0x05, 0x00, 0x11, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00,
+ 0x2F, 0x00, 0x00, 0x00, 0x85, 0x00, 0x05, 0x00, 0x11, 0x00, 0x00, 0x00, 0x37, 0x00, 0x00, 0x00,
+ 0x36, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x38, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x03, 0x00, 0x38, 0x00, 0x00, 0x00, 0x37, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x3A, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x39, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x20, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00,
+ 0x19, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x3E, 0x00, 0x00, 0x00,
+ 0x3C, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] ColorClearFFragmentShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0A, 0x00, 0x08, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x07, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x63, 0x6F, 0x6C, 0x6F,
+ 0x75, 0x72, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x63, 0x6C, 0x65, 0x61,
+ 0x72, 0x5F, 0x63, 0x6F, 0x6C, 0x6F, 0x75, 0x72, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x16, 0x00, 0x03, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x09, 0x00, 0x00, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] ColorClearSIFragmentShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0A, 0x00, 0x08, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x07, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x63, 0x6F, 0x6C, 0x6F,
+ 0x75, 0x72, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x63, 0x6C, 0x65, 0x61,
+ 0x72, 0x5F, 0x63, 0x6F, 0x6C, 0x6F, 0x75, 0x72, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x16, 0x00, 0x03, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x09, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] ColorClearUIFragmentShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0A, 0x00, 0x08, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x07, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x63, 0x6F, 0x6C, 0x6F,
+ 0x75, 0x72, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x63, 0x6C, 0x65, 0x61,
+ 0x72, 0x5F, 0x63, 0x6F, 0x6C, 0x6F, 0x75, 0x72, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x16, 0x00, 0x03, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x09, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] ColorClearVertexShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0A, 0x00, 0x08, 0x00, 0x36, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x49, 0x6E, 0x64,
+ 0x65, 0x78, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x14, 0x00, 0x00, 0x00, 0x63, 0x6C, 0x65, 0x61,
+ 0x72, 0x5F, 0x63, 0x6F, 0x6C, 0x6F, 0x75, 0x72, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x63, 0x6C, 0x65, 0x61, 0x72, 0x5F, 0x63, 0x6F, 0x6C, 0x6F, 0x75, 0x72,
+ 0x5F, 0x69, 0x6E, 0x00, 0x06, 0x00, 0x09, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x63, 0x6C, 0x65, 0x61, 0x72, 0x5F, 0x63, 0x6F, 0x6C, 0x6F, 0x75, 0x72, 0x5F, 0x69, 0x6E, 0x5F,
+ 0x64, 0x61, 0x74, 0x61, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x17, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x50,
+ 0x65, 0x72, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x06, 0x00,
+ 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x50, 0x6F, 0x73, 0x69, 0x74,
+ 0x69, 0x6F, 0x6E, 0x00, 0x06, 0x00, 0x07, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x67, 0x6C, 0x5F, 0x50, 0x6F, 0x69, 0x6E, 0x74, 0x53, 0x69, 0x7A, 0x65, 0x00, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x07, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x43,
+ 0x6C, 0x69, 0x70, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6E, 0x63, 0x65, 0x00, 0x06, 0x00, 0x07, 0x00,
+ 0x1F, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x43, 0x75, 0x6C, 0x6C, 0x44,
+ 0x69, 0x73, 0x74, 0x61, 0x6E, 0x63, 0x65, 0x00, 0x05, 0x00, 0x03, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x2A, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x14, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x1F, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00,
+ 0x1F, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x48, 0x00, 0x05, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x16, 0x00, 0x03, 0x00, 0x11, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x04, 0x00, 0x12, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x13, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x13, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x1E, 0x00, 0x03, 0x00, 0x15, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x19, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00,
+ 0x1C, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x1C, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x04, 0x00,
+ 0x1E, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x06, 0x00,
+ 0x1F, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00,
+ 0x1E, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x20, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x1F, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x20, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x3F, 0x2B, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x40, 0x2B, 0x00, 0x04, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x29, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x80, 0x3F, 0x2B, 0x00, 0x04, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0xC7, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x1A, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x83, 0x00, 0x05, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x25, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x85, 0x00, 0x05, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x29, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x2A, 0x00, 0x00, 0x00,
+ 0x27, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x00, 0x00, 0x83, 0x00, 0x05, 0x00, 0x11, 0x00, 0x00, 0x00, 0x2D, 0x00, 0x00, 0x00,
+ 0x2C, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x85, 0x00, 0x05, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x2E, 0x00, 0x00, 0x00, 0x2D, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x29, 0x00, 0x00, 0x00, 0x2F, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x1D, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x2F, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x29, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x32, 0x00, 0x00, 0x00,
+ 0x30, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x29, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00,
+ 0x21, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00,
+ 0x35, 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] ColorCopyShorteningComputeShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x05, 0x01, 0x00, 0x0B, 0x00, 0x08, 0x00, 0x79, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x32, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x38, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00,
+ 0x60, 0x00, 0x00, 0x00, 0x10, 0x00, 0x06, 0x00, 0x04, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x67, 0x6C, 0x5F, 0x47, 0x6C, 0x6F, 0x62, 0x61, 0x6C, 0x49, 0x6E, 0x76, 0x6F, 0x63, 0x61, 0x74,
+ 0x69, 0x6F, 0x6E, 0x49, 0x44, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x73, 0x72, 0x63, 0x00, 0x05, 0x00, 0x05, 0x00, 0x36, 0x00, 0x00, 0x00, 0x72, 0x61, 0x74, 0x69,
+ 0x6F, 0x5F, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x05, 0x00, 0x36, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x72, 0x61, 0x74, 0x69, 0x6F, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00,
+ 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x60, 0x00, 0x00, 0x00,
+ 0x64, 0x73, 0x74, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x1C, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x16, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x16, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x36, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x38, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x38, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x60, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x60, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x60, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x76, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x19, 0x00, 0x09, 0x00, 0x13, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x03, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x13, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x15, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x02, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x03, 0x00,
+ 0x36, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x37, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x37, 0x00, 0x00, 0x00,
+ 0x38, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x39, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x4A, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x51, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x56, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x19, 0x00, 0x09, 0x00, 0x5E, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x5F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5E, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x5F, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x75, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x06, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x76, 0x00, 0x00, 0x00, 0x75, 0x00, 0x00, 0x00, 0x75, 0x00, 0x00, 0x00,
+ 0x28, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0xF7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFB, 0x00, 0x03, 0x00,
+ 0x1C, 0x00, 0x00, 0x00, 0x78, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x78, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x4F, 0x00, 0x07, 0x00, 0x07, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x64, 0x00, 0x04, 0x00,
+ 0x13, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x67, 0x00, 0x05, 0x00,
+ 0x10, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x1F, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x1A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAF, 0x00, 0x05, 0x00, 0x1B, 0x00, 0x00, 0x00,
+ 0x24, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0xA8, 0x00, 0x04, 0x00,
+ 0x1B, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0xF7, 0x00, 0x03, 0x00,
+ 0x27, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x25, 0x00, 0x00, 0x00,
+ 0x26, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x26, 0x00, 0x00, 0x00,
+ 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00,
+ 0x2A, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x2D, 0x00, 0x00, 0x00,
+ 0x1A, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xAF, 0x00, 0x05, 0x00, 0x1B, 0x00, 0x00, 0x00,
+ 0x2E, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00, 0x2D, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00,
+ 0x27, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x27, 0x00, 0x00, 0x00, 0xF5, 0x00, 0x07, 0x00,
+ 0x1B, 0x00, 0x00, 0x00, 0x2F, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x78, 0x00, 0x00, 0x00,
+ 0x2E, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0xF7, 0x00, 0x03, 0x00, 0x31, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x2F, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00,
+ 0x31, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x30, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00,
+ 0x77, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x31, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00,
+ 0x39, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00,
+ 0xC4, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x50, 0x00, 0x05, 0x00, 0x07, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x05, 0x00, 0x07, 0x00, 0x00, 0x00, 0x47, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00,
+ 0x40, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x00, 0x00,
+ 0x3C, 0x00, 0x00, 0x00, 0x4A, 0x00, 0x00, 0x00, 0x50, 0x00, 0x05, 0x00, 0x07, 0x00, 0x00, 0x00,
+ 0x4E, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x51, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x05, 0x00, 0x07, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00,
+ 0x40, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00, 0x5B, 0x00, 0x00, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x64, 0x00, 0x04, 0x00, 0x13, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x07, 0x00, 0x56, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00,
+ 0x5C, 0x00, 0x00, 0x00, 0x5B, 0x00, 0x00, 0x00, 0x02, 0x20, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x5E, 0x00, 0x00, 0x00, 0x61, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00,
+ 0x7C, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00, 0x63, 0x00, 0x00, 0x00, 0x41, 0x00, 0x00, 0x00,
+ 0x4F, 0x00, 0x09, 0x00, 0x56, 0x00, 0x00, 0x00, 0x65, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00,
+ 0x5D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x63, 0x00, 0x05, 0x00, 0x61, 0x00, 0x00, 0x00, 0x63, 0x00, 0x00, 0x00,
+ 0x65, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x5E, 0x00, 0x00, 0x00,
+ 0x66, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x68, 0x00, 0x00, 0x00, 0x47, 0x00, 0x00, 0x00, 0x4F, 0x00, 0x09, 0x00, 0x56, 0x00, 0x00, 0x00,
+ 0x6A, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x63, 0x00, 0x05, 0x00,
+ 0x66, 0x00, 0x00, 0x00, 0x68, 0x00, 0x00, 0x00, 0x6A, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x5E, 0x00, 0x00, 0x00, 0x6B, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00,
+ 0x7C, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00, 0x6D, 0x00, 0x00, 0x00, 0x4E, 0x00, 0x00, 0x00,
+ 0x4F, 0x00, 0x09, 0x00, 0x56, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00,
+ 0x5D, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x63, 0x00, 0x05, 0x00, 0x6B, 0x00, 0x00, 0x00, 0x6D, 0x00, 0x00, 0x00,
+ 0x6F, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x5E, 0x00, 0x00, 0x00,
+ 0x70, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x72, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x4F, 0x00, 0x09, 0x00, 0x56, 0x00, 0x00, 0x00,
+ 0x74, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x63, 0x00, 0x05, 0x00,
+ 0x70, 0x00, 0x00, 0x00, 0x72, 0x00, 0x00, 0x00, 0x74, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
+ 0xF9, 0x00, 0x02, 0x00, 0x77, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x77, 0x00, 0x00, 0x00,
+ 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] ColorCopyToNonMsComputeShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0A, 0x00, 0x08, 0x00, 0x86, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x32, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x38, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x06, 0x00, 0x05, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x10, 0x00, 0x06, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x08, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x47, 0x6C, 0x6F, 0x62, 0x61, 0x6C, 0x49, 0x6E, 0x76,
+ 0x6F, 0x63, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x49, 0x44, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x64, 0x73, 0x74, 0x00, 0x05, 0x00, 0x08, 0x00, 0x32, 0x00, 0x00, 0x00,
+ 0x73, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x5F, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x73, 0x5F, 0x6C, 0x6F,
+ 0x67, 0x32, 0x5F, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x32, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x73, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x5F, 0x63, 0x6F, 0x75, 0x6E, 0x74,
+ 0x73, 0x5F, 0x6C, 0x6F, 0x67, 0x32, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x34, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x7B, 0x00, 0x00, 0x00, 0x73, 0x72, 0x63, 0x4D,
+ 0x53, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x1C, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x15, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x15, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x15, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x48, 0x00, 0x05, 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x32, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x34, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x34, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x7B, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x7B, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x83, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x19, 0x00, 0x09, 0x00, 0x13, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x02, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x04, 0x00, 0x31, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x1E, 0x00, 0x03, 0x00, 0x32, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x33, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x33, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x36, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x39, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x19, 0x00, 0x09, 0x00,
+ 0x78, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1B, 0x00, 0x03, 0x00, 0x79, 0x00, 0x00, 0x00, 0x78, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x7A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x7A, 0x00, 0x00, 0x00, 0x7B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00,
+ 0x80, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x82, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x06, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x83, 0x00, 0x00, 0x00, 0x82, 0x00, 0x00, 0x00, 0x82, 0x00, 0x00, 0x00,
+ 0x25, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0xF7, 0x00, 0x03, 0x00, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFB, 0x00, 0x03, 0x00,
+ 0x19, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x85, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x4F, 0x00, 0x07, 0x00, 0x07, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x13, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x68, 0x00, 0x04, 0x00,
+ 0x10, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7C, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00,
+ 0x51, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xAF, 0x00, 0x05, 0x00, 0x18, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x1D, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xA8, 0x00, 0x04, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x22, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0xF7, 0x00, 0x03, 0x00, 0x24, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x22, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x24, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x23, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x7C, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00,
+ 0x51, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0xAF, 0x00, 0x05, 0x00, 0x18, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00,
+ 0x28, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x24, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x24, 0x00, 0x00, 0x00, 0xF5, 0x00, 0x07, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x2C, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0xF7, 0x00, 0x03, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFA, 0x00, 0x04, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x2D, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x2D, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x84, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x36, 0x00, 0x00, 0x00,
+ 0x37, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x37, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x36, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
+ 0x35, 0x00, 0x00, 0x00, 0x39, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x3C, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x36, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00,
+ 0x25, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x36, 0x00, 0x00, 0x00, 0x41, 0x00, 0x00, 0x00,
+ 0x34, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0x41, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00,
+ 0xC4, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x4D, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00,
+ 0x4B, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x56, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x58, 0x00, 0x00, 0x00, 0x4D, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x00, 0x00,
+ 0xC7, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x56, 0x00, 0x00, 0x00,
+ 0x58, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x5B, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x5C, 0x00, 0x00, 0x00, 0x5B, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x5E, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x00, 0x00,
+ 0xC7, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x61, 0x00, 0x00, 0x00, 0x5E, 0x00, 0x00, 0x00,
+ 0x60, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x63, 0x00, 0x00, 0x00,
+ 0x61, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0xC5, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x63, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x05, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x6E, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00,
+ 0xC3, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x73, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00,
+ 0x3F, 0x00, 0x00, 0x00, 0x50, 0x00, 0x05, 0x00, 0x10, 0x00, 0x00, 0x00, 0x74, 0x00, 0x00, 0x00,
+ 0x6E, 0x00, 0x00, 0x00, 0x73, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x13, 0x00, 0x00, 0x00,
+ 0x75, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x77, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x79, 0x00, 0x00, 0x00,
+ 0x7C, 0x00, 0x00, 0x00, 0x7B, 0x00, 0x00, 0x00, 0x64, 0x00, 0x04, 0x00, 0x78, 0x00, 0x00, 0x00,
+ 0x7F, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x07, 0x00, 0x80, 0x00, 0x00, 0x00,
+ 0x81, 0x00, 0x00, 0x00, 0x7F, 0x00, 0x00, 0x00, 0x74, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x00, 0x00, 0x63, 0x00, 0x04, 0x00, 0x75, 0x00, 0x00, 0x00, 0x77, 0x00, 0x00, 0x00,
+ 0x81, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x84, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00,
+ 0x84, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] ColorCopyWideningComputeShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x05, 0x01, 0x00, 0x0B, 0x00, 0x08, 0x00, 0x72, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x32, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x38, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00,
+ 0x42, 0x00, 0x00, 0x00, 0x10, 0x00, 0x06, 0x00, 0x04, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x67, 0x6C, 0x5F, 0x47, 0x6C, 0x6F, 0x62, 0x61, 0x6C, 0x49, 0x6E, 0x76, 0x6F, 0x63, 0x61, 0x74,
+ 0x69, 0x6F, 0x6E, 0x49, 0x44, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x64, 0x73, 0x74, 0x00, 0x05, 0x00, 0x05, 0x00, 0x33, 0x00, 0x00, 0x00, 0x72, 0x61, 0x74, 0x69,
+ 0x6F, 0x5F, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x05, 0x00, 0x33, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x72, 0x61, 0x74, 0x69, 0x6F, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00,
+ 0x35, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x42, 0x00, 0x00, 0x00,
+ 0x73, 0x72, 0x63, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x1C, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x15, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x15, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x15, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x48, 0x00, 0x05, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x33, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x35, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x35, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x42, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x42, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x19, 0x00, 0x09, 0x00, 0x13, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x02, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x1E, 0x00, 0x03, 0x00, 0x33, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x34, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x34, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x37, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x19, 0x00, 0x09, 0x00,
+ 0x3F, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1B, 0x00, 0x03, 0x00, 0x40, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x41, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x41, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x05, 0x00,
+ 0x10, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x04, 0x00, 0x48, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x4F, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x2C, 0x00, 0x05, 0x00, 0x10, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x4F, 0x00, 0x00, 0x00,
+ 0x36, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x58, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x05, 0x00, 0x10, 0x00, 0x00, 0x00, 0x59, 0x00, 0x00, 0x00,
+ 0x58, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x61, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x05, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x62, 0x00, 0x00, 0x00, 0x61, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x6E, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x06, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x6E, 0x00, 0x00, 0x00, 0x6E, 0x00, 0x00, 0x00,
+ 0x25, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0xF7, 0x00, 0x03, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFB, 0x00, 0x03, 0x00,
+ 0x19, 0x00, 0x00, 0x00, 0x71, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x71, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x4F, 0x00, 0x07, 0x00, 0x07, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x13, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x68, 0x00, 0x04, 0x00,
+ 0x10, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7C, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00,
+ 0x51, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xAF, 0x00, 0x05, 0x00, 0x18, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x1D, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xA8, 0x00, 0x04, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x22, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0xF7, 0x00, 0x03, 0x00, 0x24, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x22, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x24, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x23, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x7C, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00,
+ 0x51, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0xAF, 0x00, 0x05, 0x00, 0x18, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00,
+ 0x28, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x24, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x24, 0x00, 0x00, 0x00, 0xF5, 0x00, 0x07, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x2C, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x71, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0xF7, 0x00, 0x03, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFA, 0x00, 0x04, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x2D, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x2D, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x70, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x37, 0x00, 0x00, 0x00,
+ 0x38, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x39, 0x00, 0x00, 0x00,
+ 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x50, 0x00, 0x05, 0x00, 0x07, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00,
+ 0x3A, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0x43, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x45, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x64, 0x00, 0x04, 0x00, 0x3F, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x08, 0x00, 0x48, 0x00, 0x00, 0x00,
+ 0x49, 0x00, 0x00, 0x00, 0x47, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00, 0x0A, 0x20, 0x00, 0x00,
+ 0x36, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x4A, 0x00, 0x00, 0x00, 0x49, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x04, 0x00,
+ 0x3F, 0x00, 0x00, 0x00, 0x51, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x08, 0x00,
+ 0x48, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x51, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00,
+ 0x0A, 0x20, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x53, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x04, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x5A, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00,
+ 0x5F, 0x00, 0x08, 0x00, 0x48, 0x00, 0x00, 0x00, 0x5B, 0x00, 0x00, 0x00, 0x5A, 0x00, 0x00, 0x00,
+ 0x45, 0x00, 0x00, 0x00, 0x0A, 0x20, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x59, 0x00, 0x00, 0x00,
+ 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00, 0x5B, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x04, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x63, 0x00, 0x00, 0x00,
+ 0x43, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x08, 0x00, 0x48, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
+ 0x63, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00, 0x0A, 0x20, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00,
+ 0x62, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x65, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x13, 0x00, 0x00, 0x00,
+ 0x66, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x68, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x50, 0x00, 0x07, 0x00, 0x48, 0x00, 0x00, 0x00,
+ 0x6D, 0x00, 0x00, 0x00, 0x4A, 0x00, 0x00, 0x00, 0x53, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00,
+ 0x65, 0x00, 0x00, 0x00, 0x63, 0x00, 0x05, 0x00, 0x66, 0x00, 0x00, 0x00, 0x68, 0x00, 0x00, 0x00,
+ 0x6D, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x70, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x70, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] ColorDrawToMsVertexShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0A, 0x00, 0x08, 0x00, 0x2E, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x67, 0x6C, 0x5F, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x49, 0x6E, 0x64, 0x65, 0x78, 0x00, 0x00,
+ 0x05, 0x00, 0x06, 0x00, 0x16, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x50, 0x65, 0x72, 0x56, 0x65,
+ 0x72, 0x74, 0x65, 0x78, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x06, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x50, 0x6F, 0x73, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x00,
+ 0x06, 0x00, 0x07, 0x00, 0x16, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x50,
+ 0x6F, 0x69, 0x6E, 0x74, 0x53, 0x69, 0x7A, 0x65, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x07, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x43, 0x6C, 0x69, 0x70, 0x44,
+ 0x69, 0x73, 0x74, 0x61, 0x6E, 0x63, 0x65, 0x00, 0x06, 0x00, 0x07, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x43, 0x75, 0x6C, 0x6C, 0x44, 0x69, 0x73, 0x74, 0x61,
+ 0x6E, 0x63, 0x65, 0x00, 0x05, 0x00, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00,
+ 0x48, 0x00, 0x05, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x16, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x03, 0x00, 0x16, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x16, 0x00, 0x03, 0x00, 0x11, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00,
+ 0x13, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x13, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x04, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x06, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x3F, 0x2B, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x40, 0x2B, 0x00, 0x04, 0x00, 0x13, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x21, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x13, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x80, 0x3F, 0x2B, 0x00, 0x04, 0x00, 0x13, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0xC7, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x1B, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x83, 0x00, 0x05, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x1D, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x85, 0x00, 0x05, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x21, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x19, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x1F, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x00, 0x00, 0x83, 0x00, 0x05, 0x00, 0x11, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00,
+ 0x24, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x85, 0x00, 0x05, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x26, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x21, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x27, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x21, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x19, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x2A, 0x00, 0x00, 0x00,
+ 0x28, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x21, 0x00, 0x00, 0x00, 0x2D, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00,
+ 0x2D, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] ColorDrawToMsFragmentShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0A, 0x00, 0x08, 0x00, 0x5E, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C,
+ 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x08, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00,
+ 0x34, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00,
+ 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x08, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x73, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x5F, 0x63,
+ 0x6F, 0x75, 0x6E, 0x74, 0x73, 0x5F, 0x6C, 0x6F, 0x67, 0x32, 0x5F, 0x69, 0x6E, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x08, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x61, 0x6D, 0x70,
+ 0x6C, 0x65, 0x5F, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x73, 0x5F, 0x6C, 0x6F, 0x67, 0x32, 0x00, 0x00,
+ 0x05, 0x00, 0x03, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00,
+ 0x2E, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x53, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x49, 0x44, 0x00,
+ 0x05, 0x00, 0x06, 0x00, 0x34, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x46, 0x72, 0x61, 0x67, 0x43,
+ 0x6F, 0x6F, 0x72, 0x64, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x52, 0x00, 0x00, 0x00,
+ 0x63, 0x6F, 0x6C, 0x6F, 0x75, 0x72, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x56, 0x00, 0x00, 0x00,
+ 0x73, 0x72, 0x63, 0x00, 0x48, 0x00, 0x05, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x34, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x52, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x56, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x56, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x03, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x2D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x2D, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x16, 0x00, 0x03, 0x00, 0x31, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00,
+ 0x32, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x33, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x33, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x35, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00,
+ 0x50, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x51, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x51, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x19, 0x00, 0x09, 0x00,
+ 0x53, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1B, 0x00, 0x03, 0x00, 0x54, 0x00, 0x00, 0x00, 0x53, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x55, 0x00, 0x00, 0x00, 0x56, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00,
+ 0x5A, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x10, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00,
+ 0x19, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x10, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00,
+ 0xC4, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x2F, 0x00, 0x00, 0x00,
+ 0x2E, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x35, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00,
+ 0x34, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x31, 0x00, 0x00, 0x00,
+ 0x37, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x6E, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x38, 0x00, 0x00, 0x00, 0x37, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00,
+ 0xC7, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x2F, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x41, 0x00, 0x00, 0x00,
+ 0x3F, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0xC5, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x42, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x41, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00,
+ 0x35, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x31, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00,
+ 0x6E, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00,
+ 0xC4, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x49, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00,
+ 0x1A, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x4C, 0x00, 0x00, 0x00,
+ 0x2F, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x4E, 0x00, 0x00, 0x00, 0x4C, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0xC5, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x4F, 0x00, 0x00, 0x00, 0x49, 0x00, 0x00, 0x00, 0x4E, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x54, 0x00, 0x00, 0x00, 0x57, 0x00, 0x00, 0x00, 0x56, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x05, 0x00, 0x5A, 0x00, 0x00, 0x00, 0x5B, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00,
+ 0x4F, 0x00, 0x00, 0x00, 0x64, 0x00, 0x04, 0x00, 0x53, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00,
+ 0x57, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x07, 0x00, 0x50, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00,
+ 0x5C, 0x00, 0x00, 0x00, 0x5B, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x03, 0x00, 0x52, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00,
+ 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] ConvertIndexBufferShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0A, 0x00, 0x08, 0x00, 0x91, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x61, 0x11, 0x00, 0x00, 0x0A, 0x00, 0x07, 0x00, 0x53, 0x50, 0x56, 0x5F, 0x4B, 0x48, 0x52, 0x5F,
+ 0x38, 0x62, 0x69, 0x74, 0x5F, 0x73, 0x74, 0x6F, 0x72, 0x61, 0x67, 0x65, 0x00, 0x00, 0x00, 0x00,
+ 0x0B, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64,
+ 0x2E, 0x34, 0x35, 0x30, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x06, 0x00, 0x05, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x10, 0x00, 0x06, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00,
+ 0x04, 0x00, 0x08, 0x00, 0x47, 0x4C, 0x5F, 0x45, 0x58, 0x54, 0x5F, 0x73, 0x63, 0x61, 0x6C, 0x61,
+ 0x72, 0x5F, 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x5F, 0x6C, 0x61, 0x79, 0x6F, 0x75, 0x74, 0x00, 0x00,
+ 0x04, 0x00, 0x08, 0x00, 0x47, 0x4C, 0x5F, 0x45, 0x58, 0x54, 0x5F, 0x73, 0x68, 0x61, 0x64, 0x65,
+ 0x72, 0x5F, 0x38, 0x62, 0x69, 0x74, 0x5F, 0x73, 0x74, 0x6F, 0x72, 0x61, 0x67, 0x65, 0x00, 0x00,
+ 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x08, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x47, 0x6C, 0x6F, 0x62, 0x61,
+ 0x6C, 0x49, 0x6E, 0x76, 0x6F, 0x63, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x49, 0x44, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x69, 0x6E, 0x64, 0x65, 0x78, 0x5F, 0x62, 0x75,
+ 0x66, 0x66, 0x65, 0x72, 0x5F, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6E, 0x00, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x06, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0x62, 0x70, 0x5F,
+ 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6E, 0x00, 0x06, 0x00, 0x09, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x69, 0x62, 0x70, 0x5F, 0x70, 0x72, 0x69, 0x6D, 0x69, 0x74, 0x69, 0x76,
+ 0x65, 0x5F, 0x76, 0x65, 0x72, 0x74, 0x69, 0x63, 0x65, 0x73, 0x00, 0x00, 0x06, 0x00, 0x0A, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x69, 0x62, 0x70, 0x5F, 0x70, 0x72, 0x69, 0x6D,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x5F, 0x76, 0x65, 0x72, 0x74, 0x69, 0x63, 0x65, 0x73, 0x5F, 0x6F,
+ 0x75, 0x74, 0x00, 0x00, 0x06, 0x00, 0x07, 0x00, 0x15, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x69, 0x62, 0x70, 0x5F, 0x69, 0x6E, 0x64, 0x65, 0x78, 0x5F, 0x73, 0x69, 0x7A, 0x65, 0x00, 0x00,
+ 0x06, 0x00, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x69, 0x62, 0x70, 0x5F,
+ 0x69, 0x6E, 0x64, 0x65, 0x78, 0x5F, 0x73, 0x69, 0x7A, 0x65, 0x5F, 0x6F, 0x75, 0x74, 0x00, 0x00,
+ 0x06, 0x00, 0x07, 0x00, 0x15, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x69, 0x62, 0x70, 0x5F,
+ 0x62, 0x61, 0x73, 0x65, 0x5F, 0x69, 0x6E, 0x64, 0x65, 0x78, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x69, 0x62, 0x70, 0x5F, 0x69, 0x6E, 0x64, 0x65,
+ 0x78, 0x5F, 0x73, 0x74, 0x72, 0x69, 0x64, 0x65, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x06, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x73, 0x72, 0x63, 0x5F, 0x6F, 0x66, 0x66, 0x73,
+ 0x65, 0x74, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x15, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x74, 0x6F, 0x74, 0x61, 0x6C, 0x5F, 0x70, 0x72, 0x69, 0x6D, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x04, 0x00, 0x5B, 0x00, 0x00, 0x00, 0x6F, 0x75, 0x74, 0x5F, 0x73, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x06, 0x00, 0x5B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6F, 0x75, 0x74, 0x5F,
+ 0x64, 0x61, 0x74, 0x61, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x5D, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x62, 0x00, 0x00, 0x00, 0x69, 0x6E, 0x5F, 0x73,
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x05, 0x00, 0x62, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x69, 0x6E, 0x5F, 0x64, 0x61, 0x74, 0x61, 0x00, 0x05, 0x00, 0x03, 0x00, 0x64, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x1C, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x14, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00,
+ 0x48, 0x00, 0x05, 0x00, 0x15, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x28, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x15, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
+ 0x48, 0x00, 0x05, 0x00, 0x15, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x38, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x15, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x5A, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x5B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x5B, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x5D, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x5D, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x61, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x62, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x62, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x64, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x64, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x87, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x19, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00,
+ 0x13, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x04, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x0B, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x16, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x16, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x19, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x14, 0x00, 0x02, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00,
+ 0x59, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x03, 0x00,
+ 0x5A, 0x00, 0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x03, 0x00, 0x5B, 0x00, 0x00, 0x00,
+ 0x5A, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x5C, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x5B, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x5C, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x03, 0x00, 0x61, 0x00, 0x00, 0x00, 0x59, 0x00, 0x00, 0x00,
+ 0x1E, 0x00, 0x03, 0x00, 0x62, 0x00, 0x00, 0x00, 0x61, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x63, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x62, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x63, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x65, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x6C, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x71, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x86, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x06, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x87, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, 0x00, 0x86, 0x00, 0x00, 0x00,
+ 0x86, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0xF7, 0x00, 0x03, 0x00, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFB, 0x00, 0x03, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x89, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x89, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x05, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x19, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x1B, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, 0xAF, 0x00, 0x05, 0x00, 0x1C, 0x00, 0x00, 0x00,
+ 0x1D, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, 0xF7, 0x00, 0x03, 0x00,
+ 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x1D, 0x00, 0x00, 0x00,
+ 0x1E, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x1E, 0x00, 0x00, 0x00,
+ 0xF9, 0x00, 0x02, 0x00, 0x88, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x05, 0x00, 0x19, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00,
+ 0x24, 0x00, 0x00, 0x00, 0x84, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x2A, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x84, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00,
+ 0xF9, 0x00, 0x02, 0x00, 0x2F, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x2F, 0x00, 0x00, 0x00,
+ 0xF5, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00, 0x8E, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00,
+ 0x1F, 0x00, 0x00, 0x00, 0x84, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, 0xB1, 0x00, 0x05, 0x00,
+ 0x1C, 0x00, 0x00, 0x00, 0x37, 0x00, 0x00, 0x00, 0x8E, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00,
+ 0xF6, 0x00, 0x04, 0x00, 0x31, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFA, 0x00, 0x04, 0x00, 0x37, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x30, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x19, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00,
+ 0x2E, 0x00, 0x00, 0x00, 0x8E, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x40, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x07, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00,
+ 0x2E, 0x00, 0x00, 0x00, 0x41, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x44, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x84, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00,
+ 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x4A, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00,
+ 0x8E, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x19, 0x00, 0x00, 0x00, 0x4C, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x4D, 0x00, 0x00, 0x00, 0x4C, 0x00, 0x00, 0x00, 0x84, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x4E, 0x00, 0x00, 0x00, 0x4A, 0x00, 0x00, 0x00, 0x4D, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00,
+ 0x50, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x50, 0x00, 0x00, 0x00, 0xF5, 0x00, 0x07, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x8F, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00,
+ 0x72, 0x00, 0x00, 0x00, 0x51, 0x00, 0x00, 0x00, 0xB1, 0x00, 0x05, 0x00, 0x1C, 0x00, 0x00, 0x00,
+ 0x58, 0x00, 0x00, 0x00, 0x8F, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00, 0xF6, 0x00, 0x04, 0x00,
+ 0x52, 0x00, 0x00, 0x00, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00,
+ 0x58, 0x00, 0x00, 0x00, 0x51, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00,
+ 0x51, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00,
+ 0x4E, 0x00, 0x00, 0x00, 0x8F, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x66, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x65, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x67, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x69, 0x00, 0x00, 0x00, 0x67, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00,
+ 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x6B, 0x00, 0x00, 0x00, 0x69, 0x00, 0x00, 0x00,
+ 0x8F, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x6D, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x6B, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x59, 0x00, 0x00, 0x00, 0x6E, 0x00, 0x00, 0x00, 0x6D, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x6C, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00,
+ 0x60, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x6E, 0x00, 0x00, 0x00,
+ 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x72, 0x00, 0x00, 0x00, 0x8F, 0x00, 0x00, 0x00,
+ 0x71, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x50, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00,
+ 0x52, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x73, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00,
+ 0x73, 0x00, 0x00, 0x00, 0xF5, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00, 0x90, 0x00, 0x00, 0x00,
+ 0x8F, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x82, 0x00, 0x00, 0x00, 0x74, 0x00, 0x00, 0x00,
+ 0xB1, 0x00, 0x05, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x7B, 0x00, 0x00, 0x00, 0x90, 0x00, 0x00, 0x00,
+ 0x4D, 0x00, 0x00, 0x00, 0xF6, 0x00, 0x04, 0x00, 0x75, 0x00, 0x00, 0x00, 0x74, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x7B, 0x00, 0x00, 0x00, 0x74, 0x00, 0x00, 0x00,
+ 0x75, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x74, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x7E, 0x00, 0x00, 0x00, 0x4E, 0x00, 0x00, 0x00, 0x90, 0x00, 0x00, 0x00,
+ 0x71, 0x00, 0x04, 0x00, 0x59, 0x00, 0x00, 0x00, 0x7F, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00,
+ 0x2E, 0x00, 0x00, 0x00, 0x7E, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x80, 0x00, 0x00, 0x00,
+ 0x7F, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x82, 0x00, 0x00, 0x00,
+ 0x90, 0x00, 0x00, 0x00, 0x71, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x73, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x75, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x32, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x32, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x84, 0x00, 0x00, 0x00, 0x8E, 0x00, 0x00, 0x00, 0x71, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00,
+ 0x2F, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x31, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00,
+ 0x88, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x88, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00,
+ 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] ConvertIndirectDataShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0A, 0x00, 0x08, 0x00, 0x3D, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x06, 0x00, 0x04, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00, 0x04, 0x00, 0x08, 0x00, 0x47, 0x4C, 0x5F, 0x45,
+ 0x58, 0x54, 0x5F, 0x73, 0x63, 0x61, 0x6C, 0x61, 0x72, 0x5F, 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x5F,
+ 0x6C, 0x61, 0x79, 0x6F, 0x75, 0x74, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x08, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x69, 0x6E, 0x64, 0x65, 0x78, 0x5F, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5F, 0x70, 0x61, 0x74,
+ 0x74, 0x65, 0x72, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x06, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x69, 0x62, 0x70, 0x5F, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6E, 0x00,
+ 0x06, 0x00, 0x09, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x69, 0x62, 0x70, 0x5F,
+ 0x70, 0x72, 0x69, 0x6D, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5F, 0x76, 0x65, 0x72, 0x74, 0x69, 0x63,
+ 0x65, 0x73, 0x00, 0x00, 0x06, 0x00, 0x0A, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x69, 0x62, 0x70, 0x5F, 0x70, 0x72, 0x69, 0x6D, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5F, 0x76, 0x65,
+ 0x72, 0x74, 0x69, 0x63, 0x65, 0x73, 0x5F, 0x6F, 0x75, 0x74, 0x00, 0x00, 0x06, 0x00, 0x07, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x69, 0x62, 0x70, 0x5F, 0x69, 0x6E, 0x64, 0x65,
+ 0x78, 0x5F, 0x73, 0x69, 0x7A, 0x65, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x69, 0x62, 0x70, 0x5F, 0x69, 0x6E, 0x64, 0x65, 0x78, 0x5F, 0x73, 0x69,
+ 0x7A, 0x65, 0x5F, 0x6F, 0x75, 0x74, 0x00, 0x00, 0x06, 0x00, 0x07, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x00, 0x00, 0x69, 0x62, 0x70, 0x5F, 0x62, 0x61, 0x73, 0x65, 0x5F, 0x69, 0x6E, 0x64,
+ 0x65, 0x78, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x14, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x69, 0x62, 0x70, 0x5F, 0x69, 0x6E, 0x64, 0x65, 0x78, 0x5F, 0x73, 0x74, 0x72, 0x69, 0x64, 0x65,
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x06, 0x00, 0x14, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00,
+ 0x73, 0x72, 0x63, 0x5F, 0x6F, 0x66, 0x66, 0x73, 0x65, 0x74, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x74, 0x6F, 0x74, 0x61, 0x6C, 0x5F, 0x70, 0x72,
+ 0x69, 0x6D, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x06, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68,
+ 0x5F, 0x78, 0x00, 0x00, 0x06, 0x00, 0x06, 0x00, 0x14, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x5F, 0x79, 0x00, 0x00, 0x06, 0x00, 0x06, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68,
+ 0x5F, 0x7A, 0x00, 0x00, 0x06, 0x00, 0x07, 0x00, 0x14, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x68, 0x61, 0x73, 0x5F, 0x64, 0x72, 0x61, 0x77, 0x5F, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x00, 0x00,
+ 0x06, 0x00, 0x07, 0x00, 0x14, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x78, 0x5F,
+ 0x64, 0x72, 0x61, 0x77, 0x5F, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x64, 0x72, 0x61, 0x77, 0x5F, 0x63, 0x6F, 0x75,
+ 0x6E, 0x74, 0x5F, 0x6F, 0x66, 0x66, 0x73, 0x65, 0x74, 0x00, 0x00, 0x00, 0x06, 0x00, 0x09, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x69, 0x6E, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x5F, 0x64, 0x61, 0x74, 0x61, 0x5F, 0x73, 0x74, 0x72, 0x69, 0x64, 0x65, 0x00, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x09, 0x00, 0x14, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x69, 0x6E, 0x64, 0x69,
+ 0x72, 0x65, 0x63, 0x74, 0x5F, 0x64, 0x61, 0x74, 0x61, 0x5F, 0x6F, 0x66, 0x66, 0x73, 0x65, 0x74,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x07, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x64, 0x72, 0x61, 0x77, 0x5F, 0x63, 0x6F, 0x75,
+ 0x6E, 0x74, 0x5F, 0x75, 0x6E, 0x69, 0x66, 0x6F, 0x72, 0x6D, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00,
+ 0x3A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x72, 0x61, 0x77, 0x5F, 0x63, 0x6F, 0x75,
+ 0x6E, 0x74, 0x5F, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00,
+ 0x3C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x53, 0x00, 0x00, 0x00,
+ 0x69, 0x6E, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5F, 0x69, 0x6E, 0x00, 0x06, 0x00, 0x08, 0x00,
+ 0x53, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0x6E, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x5F, 0x64, 0x61, 0x74, 0x61, 0x5F, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00,
+ 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0xB6, 0x00, 0x00, 0x00,
+ 0x69, 0x6E, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5F, 0x6F, 0x75, 0x74, 0x00, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x08, 0x00, 0xB6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0x6E, 0x64, 0x69,
+ 0x72, 0x65, 0x63, 0x74, 0x5F, 0x64, 0x61, 0x74, 0x61, 0x5F, 0x6F, 0x75, 0x74, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x03, 0x00, 0xB8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x13, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x48, 0x00, 0x05, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00,
+ 0x48, 0x00, 0x05, 0x00, 0x14, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x30, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x14, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00,
+ 0x48, 0x00, 0x05, 0x00, 0x14, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x40, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x14, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x4C, 0x00, 0x00, 0x00,
+ 0x48, 0x00, 0x05, 0x00, 0x14, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x14, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x54, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x58, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x03, 0x00, 0x14, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x39, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00,
+ 0x3A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x03, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x3C, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x3C, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x52, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00,
+ 0x53, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x03, 0x00, 0x53, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x55, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x55, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0xB5, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00,
+ 0xB6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x03, 0x00, 0xB6, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0xB8, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0xB8, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x03, 0x01, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x1C, 0x00, 0x04, 0x00, 0x13, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x1E, 0x00, 0x13, 0x00, 0x14, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x14, 0x00, 0x02, 0x00,
+ 0x33, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00,
+ 0x40, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x04, 0x00, 0x39, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x38, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x03, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x39, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x40, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x4C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1D, 0x00, 0x03, 0x00, 0x52, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x03, 0x00,
+ 0x53, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x54, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x53, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x54, 0x00, 0x00, 0x00,
+ 0x55, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x56, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x63, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x6F, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x88, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x8E, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x93, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x95, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x9D, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x03, 0x00, 0xB5, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x03, 0x00, 0xB6, 0x00, 0x00, 0x00, 0xB5, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0xB7, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0xB6, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0xB7, 0x00, 0x00, 0x00, 0xB8, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0xDA, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x04, 0x00, 0x02, 0x01, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x2C, 0x00, 0x06, 0x00, 0x02, 0x01, 0x00, 0x00, 0x03, 0x01, 0x00, 0x00, 0x63, 0x00, 0x00, 0x00,
+ 0x63, 0x00, 0x00, 0x00, 0x63, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00,
+ 0x05, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x18, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x32, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0xAB, 0x00, 0x05, 0x00, 0x33, 0x00, 0x00, 0x00,
+ 0x34, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0xF7, 0x00, 0x03, 0x00,
+ 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x34, 0x00, 0x00, 0x00,
+ 0x36, 0x00, 0x00, 0x00, 0x47, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x36, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x05, 0x00, 0x18, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x40, 0x00, 0x00, 0x00, 0x41, 0x00, 0x00, 0x00,
+ 0x3C, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0x41, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00,
+ 0x40, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00,
+ 0x0C, 0x00, 0x07, 0x00, 0x11, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x26, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00,
+ 0x37, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x47, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00,
+ 0x40, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x49, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00,
+ 0xF9, 0x00, 0x02, 0x00, 0x37, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x37, 0x00, 0x00, 0x00,
+ 0xF5, 0x00, 0x07, 0x00, 0x11, 0x00, 0x00, 0x00, 0x36, 0x01, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00,
+ 0x36, 0x00, 0x00, 0x00, 0x49, 0x00, 0x00, 0x00, 0x47, 0x00, 0x00, 0x00, 0xAB, 0x00, 0x05, 0x00,
+ 0x33, 0x00, 0x00, 0x00, 0x4E, 0x00, 0x00, 0x00, 0x36, 0x01, 0x00, 0x00, 0x4C, 0x00, 0x00, 0x00,
+ 0xF7, 0x00, 0x03, 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00,
+ 0x4E, 0x00, 0x00, 0x00, 0x4F, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00,
+ 0x4F, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x18, 0x00, 0x00, 0x00, 0x57, 0x00, 0x00, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x56, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x58, 0x00, 0x00, 0x00, 0x57, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x59, 0x00, 0x00, 0x00, 0x58, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x5A, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x59, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x5B, 0x00, 0x00, 0x00,
+ 0x5A, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x00, 0x00,
+ 0x57, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x18, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00,
+ 0x55, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x61, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x62, 0x00, 0x00, 0x00, 0x5B, 0x00, 0x00, 0x00, 0x61, 0x00, 0x00, 0x00,
+ 0xF9, 0x00, 0x02, 0x00, 0x64, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x64, 0x00, 0x00, 0x00,
+ 0xF5, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00, 0x39, 0x01, 0x00, 0x00, 0x5B, 0x00, 0x00, 0x00,
+ 0x4F, 0x00, 0x00, 0x00, 0x7F, 0x00, 0x00, 0x00, 0x65, 0x00, 0x00, 0x00, 0xF5, 0x00, 0x07, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x38, 0x01, 0x00, 0x00, 0x62, 0x00, 0x00, 0x00, 0x4F, 0x00, 0x00, 0x00,
+ 0x86, 0x00, 0x00, 0x00, 0x65, 0x00, 0x00, 0x00, 0xF5, 0x00, 0x07, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x37, 0x01, 0x00, 0x00, 0x63, 0x00, 0x00, 0x00, 0x4F, 0x00, 0x00, 0x00, 0x89, 0x00, 0x00, 0x00,
+ 0x65, 0x00, 0x00, 0x00, 0xB0, 0x00, 0x05, 0x00, 0x33, 0x00, 0x00, 0x00, 0x6B, 0x00, 0x00, 0x00,
+ 0x37, 0x01, 0x00, 0x00, 0x36, 0x01, 0x00, 0x00, 0xF6, 0x00, 0x04, 0x00, 0x66, 0x00, 0x00, 0x00,
+ 0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x6B, 0x00, 0x00, 0x00,
+ 0x65, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x65, 0x00, 0x00, 0x00,
+ 0x7C, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x6E, 0x00, 0x00, 0x00, 0x37, 0x01, 0x00, 0x00,
+ 0x41, 0x00, 0x05, 0x00, 0x18, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x6F, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x71, 0x00, 0x00, 0x00,
+ 0x70, 0x00, 0x00, 0x00, 0x84, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x72, 0x00, 0x00, 0x00,
+ 0x6E, 0x00, 0x00, 0x00, 0x71, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x75, 0x00, 0x00, 0x00, 0x57, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x77, 0x00, 0x00, 0x00, 0x75, 0x00, 0x00, 0x00, 0x72, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x7A, 0x00, 0x00, 0x00, 0x77, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x18, 0x00, 0x00, 0x00, 0x7B, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x7A, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x7C, 0x00, 0x00, 0x00, 0x7B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x7F, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x39, 0x01, 0x00, 0x00,
+ 0x7C, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x18, 0x00, 0x00, 0x00, 0x83, 0x00, 0x00, 0x00,
+ 0x55, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x77, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x84, 0x00, 0x00, 0x00, 0x83, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x00, 0x00, 0x84, 0x00, 0x00, 0x00,
+ 0x0C, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00, 0x86, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x2A, 0x00, 0x00, 0x00, 0x38, 0x01, 0x00, 0x00, 0x85, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x89, 0x00, 0x00, 0x00, 0x37, 0x01, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00,
+ 0xF9, 0x00, 0x02, 0x00, 0x64, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x66, 0x00, 0x00, 0x00,
+ 0x82, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x8D, 0x00, 0x00, 0x00, 0x38, 0x01, 0x00, 0x00,
+ 0x39, 0x01, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x90, 0x00, 0x00, 0x00,
+ 0x8D, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x87, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x91, 0x00, 0x00, 0x00, 0x90, 0x00, 0x00, 0x00, 0x56, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x92, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x8E, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x03, 0x00, 0x92, 0x00, 0x00, 0x00, 0x91, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x96, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x95, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x97, 0x00, 0x00, 0x00, 0x96, 0x00, 0x00, 0x00,
+ 0x84, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x98, 0x00, 0x00, 0x00, 0x39, 0x01, 0x00, 0x00,
+ 0x97, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x18, 0x00, 0x00, 0x00, 0x99, 0x00, 0x00, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x93, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x9A, 0x00, 0x00, 0x00, 0x99, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x9B, 0x00, 0x00, 0x00, 0x9A, 0x00, 0x00, 0x00, 0x98, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00,
+ 0x99, 0x00, 0x00, 0x00, 0x9B, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x07, 0x01, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x08, 0x01, 0x00, 0x00, 0x07, 0x01, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x09, 0x01, 0x00, 0x00, 0x8D, 0x00, 0x00, 0x00, 0x08, 0x01, 0x00, 0x00,
+ 0x41, 0x00, 0x05, 0x00, 0x18, 0x00, 0x00, 0x00, 0x0A, 0x01, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x1C, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0B, 0x01, 0x00, 0x00,
+ 0x0A, 0x01, 0x00, 0x00, 0x87, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0C, 0x01, 0x00, 0x00,
+ 0x09, 0x01, 0x00, 0x00, 0x0B, 0x01, 0x00, 0x00, 0x0C, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x0D, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x0C, 0x01, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x18, 0x00, 0x00, 0x00, 0xA1, 0x00, 0x00, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x9D, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0xA1, 0x00, 0x00, 0x00,
+ 0x0D, 0x01, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0xA2, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00,
+ 0xA2, 0x00, 0x00, 0x00, 0xF5, 0x00, 0x07, 0x00, 0x11, 0x00, 0x00, 0x00, 0x3A, 0x01, 0x00, 0x00,
+ 0x4C, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x00, 0xE2, 0x00, 0x00, 0x00, 0xA3, 0x00, 0x00, 0x00,
+ 0xB0, 0x00, 0x05, 0x00, 0x33, 0x00, 0x00, 0x00, 0xA9, 0x00, 0x00, 0x00, 0x3A, 0x01, 0x00, 0x00,
+ 0x36, 0x01, 0x00, 0x00, 0xF6, 0x00, 0x04, 0x00, 0xA4, 0x00, 0x00, 0x00, 0xA3, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0xA9, 0x00, 0x00, 0x00, 0xA3, 0x00, 0x00, 0x00,
+ 0xA4, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0xA3, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0xAC, 0x00, 0x00, 0x00, 0x3A, 0x01, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0xAD, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0xAE, 0x00, 0x00, 0x00, 0xAD, 0x00, 0x00, 0x00,
+ 0x84, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0xAF, 0x00, 0x00, 0x00, 0xAC, 0x00, 0x00, 0x00,
+ 0xAE, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0xB2, 0x00, 0x00, 0x00,
+ 0x57, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0xB4, 0x00, 0x00, 0x00,
+ 0xB2, 0x00, 0x00, 0x00, 0xAF, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0xBC, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0xB4, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0xBD, 0x00, 0x00, 0x00, 0xBC, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x1C, 0x01, 0x00, 0x00, 0x07, 0x01, 0x00, 0x00,
+ 0x82, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x1D, 0x01, 0x00, 0x00, 0xBD, 0x00, 0x00, 0x00,
+ 0x1C, 0x01, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x1F, 0x01, 0x00, 0x00,
+ 0x0A, 0x01, 0x00, 0x00, 0x87, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x01, 0x00, 0x00,
+ 0x1D, 0x01, 0x00, 0x00, 0x1F, 0x01, 0x00, 0x00, 0x0C, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x21, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x20, 0x01, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x18, 0x00, 0x00, 0x00, 0x15, 0x01, 0x00, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x16, 0x01, 0x00, 0x00, 0x15, 0x01, 0x00, 0x00, 0x84, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x17, 0x01, 0x00, 0x00, 0x21, 0x01, 0x00, 0x00, 0x16, 0x01, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0xBF, 0x00, 0x00, 0x00, 0xB8, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0xAF, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0xBF, 0x00, 0x00, 0x00, 0x17, 0x01, 0x00, 0x00,
+ 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0xC1, 0x00, 0x00, 0x00, 0xAF, 0x00, 0x00, 0x00,
+ 0x88, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x00, 0x00,
+ 0xB4, 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0xC4, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0xC5, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x18, 0x00, 0x00, 0x00, 0xC6, 0x00, 0x00, 0x00, 0xB8, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0xC1, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0xC6, 0x00, 0x00, 0x00,
+ 0xC5, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0xC8, 0x00, 0x00, 0x00,
+ 0xAF, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0xCA, 0x00, 0x00, 0x00, 0xB4, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0xCB, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0xCA, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0xCC, 0x00, 0x00, 0x00,
+ 0xCB, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0xCE, 0x00, 0x00, 0x00,
+ 0xCC, 0x00, 0x00, 0x00, 0x39, 0x01, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x30, 0x01, 0x00, 0x00, 0x07, 0x01, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x31, 0x01, 0x00, 0x00, 0xCE, 0x00, 0x00, 0x00, 0x30, 0x01, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x33, 0x01, 0x00, 0x00, 0x0A, 0x01, 0x00, 0x00, 0x87, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x34, 0x01, 0x00, 0x00, 0x31, 0x01, 0x00, 0x00, 0x33, 0x01, 0x00, 0x00,
+ 0x0C, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00, 0x35, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x2A, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x34, 0x01, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x2A, 0x01, 0x00, 0x00, 0x15, 0x01, 0x00, 0x00, 0x84, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x2B, 0x01, 0x00, 0x00, 0x35, 0x01, 0x00, 0x00, 0x2A, 0x01, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x18, 0x00, 0x00, 0x00, 0xD1, 0x00, 0x00, 0x00, 0xB8, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0xC8, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0xD1, 0x00, 0x00, 0x00,
+ 0x2B, 0x01, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0xD3, 0x00, 0x00, 0x00,
+ 0xAF, 0x00, 0x00, 0x00, 0x95, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0xD5, 0x00, 0x00, 0x00, 0xB4, 0x00, 0x00, 0x00, 0x95, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0xD6, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0xD5, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0xD7, 0x00, 0x00, 0x00,
+ 0xD6, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x18, 0x00, 0x00, 0x00, 0xD8, 0x00, 0x00, 0x00,
+ 0xB8, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0xD3, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00,
+ 0xD8, 0x00, 0x00, 0x00, 0xD7, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0xDB, 0x00, 0x00, 0x00, 0xAF, 0x00, 0x00, 0x00, 0xDA, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0xDD, 0x00, 0x00, 0x00, 0xB4, 0x00, 0x00, 0x00, 0xDA, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x18, 0x00, 0x00, 0x00, 0xDE, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0xDD, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0xDF, 0x00, 0x00, 0x00, 0xDE, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0xE0, 0x00, 0x00, 0x00, 0xB8, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0xDB, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x03, 0x00, 0xE0, 0x00, 0x00, 0x00, 0xDF, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0xE2, 0x00, 0x00, 0x00, 0x3A, 0x01, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00,
+ 0xF9, 0x00, 0x02, 0x00, 0xA2, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0xA4, 0x00, 0x00, 0x00,
+ 0xF9, 0x00, 0x02, 0x00, 0x50, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x50, 0x00, 0x00, 0x00,
+ 0xF5, 0x00, 0x07, 0x00, 0x11, 0x00, 0x00, 0x00, 0x3C, 0x01, 0x00, 0x00, 0x4C, 0x00, 0x00, 0x00,
+ 0x37, 0x00, 0x00, 0x00, 0x3A, 0x01, 0x00, 0x00, 0xA4, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00,
+ 0xE3, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0xE3, 0x00, 0x00, 0x00, 0xF5, 0x00, 0x07, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x3B, 0x01, 0x00, 0x00, 0x3C, 0x01, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00,
+ 0x01, 0x01, 0x00, 0x00, 0xE4, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0xE9, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0xEA, 0x00, 0x00, 0x00, 0xE9, 0x00, 0x00, 0x00, 0xB0, 0x00, 0x05, 0x00,
+ 0x33, 0x00, 0x00, 0x00, 0xEB, 0x00, 0x00, 0x00, 0x3B, 0x01, 0x00, 0x00, 0xEA, 0x00, 0x00, 0x00,
+ 0xF6, 0x00, 0x04, 0x00, 0xE5, 0x00, 0x00, 0x00, 0xE4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xFA, 0x00, 0x04, 0x00, 0xEB, 0x00, 0x00, 0x00, 0xE4, 0x00, 0x00, 0x00, 0xE5, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0xE4, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0xEE, 0x00, 0x00, 0x00, 0x3B, 0x01, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0xEF, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0xF0, 0x00, 0x00, 0x00, 0xEF, 0x00, 0x00, 0x00, 0x84, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0xF1, 0x00, 0x00, 0x00, 0xEE, 0x00, 0x00, 0x00, 0xF0, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x18, 0x00, 0x00, 0x00, 0xF3, 0x00, 0x00, 0x00, 0xB8, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0xF1, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0xF3, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0xF5, 0x00, 0x00, 0x00,
+ 0xF1, 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0xF6, 0x00, 0x00, 0x00, 0xB8, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0xF5, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x03, 0x00, 0xF6, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x00, 0x00, 0xF1, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x18, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x00, 0x00, 0xB8, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0xF9, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0xFB, 0x00, 0x00, 0x00,
+ 0xF1, 0x00, 0x00, 0x00, 0x95, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0xFC, 0x00, 0x00, 0x00, 0xB8, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0xFB, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x03, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0xFE, 0x00, 0x00, 0x00, 0xF1, 0x00, 0x00, 0x00, 0xDA, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x18, 0x00, 0x00, 0x00, 0xFF, 0x00, 0x00, 0x00, 0xB8, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0xFE, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0xFF, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x11, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00,
+ 0x3B, 0x01, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0xE3, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0xE5, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] DepthBlitFragmentShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0B, 0x00, 0x08, 0x00, 0x17, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x07, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00,
+ 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x06, 0x00, 0x08, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x46, 0x72, 0x61, 0x67, 0x44,
+ 0x65, 0x70, 0x74, 0x68, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x74, 0x65, 0x78, 0x44, 0x65, 0x70, 0x74, 0x68, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00,
+ 0x10, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x5F, 0x63, 0x6F, 0x6F, 0x72, 0x64, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x16, 0x00, 0x03, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x19, 0x00, 0x09, 0x00, 0x09, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x03, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x04, 0x00, 0x12, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x57, 0x00, 0x05, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x08, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] DepthBlitMsFragmentShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0B, 0x00, 0x08, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x32, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x08, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x08, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x46,
+ 0x72, 0x61, 0x67, 0x44, 0x65, 0x70, 0x74, 0x68, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x44, 0x65, 0x70, 0x74, 0x68, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x05, 0x00, 0x10, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x5F, 0x63, 0x6F, 0x6F, 0x72,
+ 0x64, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x53,
+ 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x49, 0x44, 0x00, 0x47, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x22, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x1B, 0x00, 0x00, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x16, 0x00, 0x03, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x19, 0x00, 0x09, 0x00, 0x09, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x03, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x14, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x15, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x68, 0x00, 0x04, 0x00, 0x15, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00,
+ 0x6F, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x85, 0x00, 0x05, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x00, 0x00, 0x6E, 0x00, 0x04, 0x00, 0x15, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x14, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00,
+ 0x1B, 0x00, 0x00, 0x00, 0x64, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x07, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00,
+ 0x1D, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00,
+ 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x08, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] DepthDrawToMsFragmentShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0B, 0x00, 0x08, 0x00, 0x5E, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C,
+ 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x08, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00,
+ 0x34, 0x00, 0x00, 0x00, 0x51, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x08, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x73, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x5F, 0x63, 0x6F, 0x75, 0x6E, 0x74,
+ 0x73, 0x5F, 0x6C, 0x6F, 0x67, 0x32, 0x5F, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x5F, 0x63,
+ 0x6F, 0x75, 0x6E, 0x74, 0x73, 0x5F, 0x6C, 0x6F, 0x67, 0x32, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x2E, 0x00, 0x00, 0x00,
+ 0x67, 0x6C, 0x5F, 0x53, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x49, 0x44, 0x00, 0x05, 0x00, 0x06, 0x00,
+ 0x34, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x46, 0x72, 0x61, 0x67, 0x43, 0x6F, 0x6F, 0x72, 0x64,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x51, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x46,
+ 0x72, 0x61, 0x67, 0x44, 0x65, 0x70, 0x74, 0x68, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00,
+ 0x55, 0x00, 0x00, 0x00, 0x73, 0x72, 0x63, 0x00, 0x48, 0x00, 0x05, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x2E, 0x00, 0x00, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x34, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x51, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x55, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x55, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x03, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x2D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x2D, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x16, 0x00, 0x03, 0x00, 0x31, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x04, 0x00, 0x32, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x33, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x33, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x35, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x50, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x50, 0x00, 0x00, 0x00, 0x51, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x19, 0x00, 0x09, 0x00, 0x52, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x03, 0x00, 0x53, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x53, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x54, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x04, 0x00, 0x59, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x10, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x10, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x1A, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x1C, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00,
+ 0x82, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00,
+ 0x1D, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00,
+ 0x26, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x2F, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x35, 0x00, 0x00, 0x00,
+ 0x36, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x31, 0x00, 0x00, 0x00, 0x37, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x6E, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x37, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x82, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00,
+ 0x26, 0x00, 0x00, 0x00, 0xC7, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00,
+ 0x2F, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0xC5, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x41, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x05, 0x00, 0x35, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x31, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00,
+ 0x44, 0x00, 0x00, 0x00, 0x6E, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00,
+ 0x45, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x49, 0x00, 0x00, 0x00,
+ 0x46, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x4C, 0x00, 0x00, 0x00, 0x2F, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x4E, 0x00, 0x00, 0x00, 0x4C, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00,
+ 0xC5, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x4F, 0x00, 0x00, 0x00, 0x49, 0x00, 0x00, 0x00,
+ 0x4E, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x53, 0x00, 0x00, 0x00, 0x56, 0x00, 0x00, 0x00,
+ 0x55, 0x00, 0x00, 0x00, 0x50, 0x00, 0x05, 0x00, 0x59, 0x00, 0x00, 0x00, 0x5A, 0x00, 0x00, 0x00,
+ 0x42, 0x00, 0x00, 0x00, 0x4F, 0x00, 0x00, 0x00, 0x64, 0x00, 0x04, 0x00, 0x52, 0x00, 0x00, 0x00,
+ 0x5B, 0x00, 0x00, 0x00, 0x56, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x07, 0x00, 0x32, 0x00, 0x00, 0x00,
+ 0x5C, 0x00, 0x00, 0x00, 0x5B, 0x00, 0x00, 0x00, 0x5A, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x31, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00,
+ 0x5C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x51, 0x00, 0x00, 0x00,
+ 0x5D, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] DepthDrawToNonMsFragmentShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0B, 0x00, 0x08, 0x00, 0x6A, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x07, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00,
+ 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x06, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x46, 0x72, 0x61, 0x67, 0x43,
+ 0x6F, 0x6F, 0x72, 0x64, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x08, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x73, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x5F, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x73, 0x5F, 0x6C, 0x6F,
+ 0x67, 0x32, 0x5F, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x73, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x5F, 0x63, 0x6F, 0x75, 0x6E, 0x74,
+ 0x73, 0x5F, 0x6C, 0x6F, 0x67, 0x32, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x5F, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x46,
+ 0x72, 0x61, 0x67, 0x44, 0x65, 0x70, 0x74, 0x68, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00,
+ 0x63, 0x00, 0x00, 0x00, 0x73, 0x72, 0x63, 0x4D, 0x53, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x03, 0x00, 0x16, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x5F, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x63, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x63, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x16, 0x00, 0x03, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x03, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x1A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x1B, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x1E, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x26, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x31, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x50, 0x00, 0x00, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x5E, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x5E, 0x00, 0x00, 0x00,
+ 0x5F, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x19, 0x00, 0x09, 0x00, 0x60, 0x00, 0x00, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x03, 0x00,
+ 0x61, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x62, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x61, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x62, 0x00, 0x00, 0x00,
+ 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00,
+ 0x05, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x4F, 0x00, 0x07, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x6D, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x19, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x1D, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x1B, 0x00, 0x00, 0x00,
+ 0x1F, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x12, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00,
+ 0x82, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x1B, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x26, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x12, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00,
+ 0x27, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00,
+ 0x25, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x33, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00,
+ 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00, 0x12, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00,
+ 0x3A, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x3F, 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0xC7, 0x00, 0x05, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00,
+ 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00, 0x12, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00,
+ 0x42, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00,
+ 0x43, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0xC7, 0x00, 0x05, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00, 0x47, 0x00, 0x00, 0x00,
+ 0xC4, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00, 0x4A, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0xC5, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x00, 0x00,
+ 0x40, 0x00, 0x00, 0x00, 0x4A, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x57, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x05, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x05, 0x00, 0x50, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00, 0x57, 0x00, 0x00, 0x00,
+ 0x5C, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x61, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
+ 0x63, 0x00, 0x00, 0x00, 0x64, 0x00, 0x04, 0x00, 0x60, 0x00, 0x00, 0x00, 0x67, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x07, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x68, 0x00, 0x00, 0x00,
+ 0x67, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x00, 0x00,
+ 0x51, 0x00, 0x05, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x69, 0x00, 0x00, 0x00, 0x68, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x5F, 0x00, 0x00, 0x00, 0x69, 0x00, 0x00, 0x00,
+ 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] StencilBlitFragmentShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0B, 0x00, 0x08, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x95, 0x13, 0x00, 0x00, 0x0A, 0x00, 0x09, 0x00, 0x53, 0x50, 0x56, 0x5F, 0x45, 0x58, 0x54, 0x5F,
+ 0x73, 0x68, 0x61, 0x64, 0x65, 0x72, 0x5F, 0x73, 0x74, 0x65, 0x6E, 0x63, 0x69, 0x6C, 0x5F, 0x65,
+ 0x78, 0x70, 0x6F, 0x72, 0x74, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30, 0x00, 0x00, 0x00, 0x00,
+ 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x07, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x00, 0xA3, 0x13, 0x00, 0x00,
+ 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00, 0x04, 0x00, 0x09, 0x00,
+ 0x47, 0x4C, 0x5F, 0x41, 0x52, 0x42, 0x5F, 0x73, 0x68, 0x61, 0x64, 0x65, 0x72, 0x5F, 0x73, 0x74,
+ 0x65, 0x6E, 0x63, 0x69, 0x6C, 0x5F, 0x65, 0x78, 0x70, 0x6F, 0x72, 0x74, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x46, 0x72, 0x61, 0x67, 0x53,
+ 0x74, 0x65, 0x6E, 0x63, 0x69, 0x6C, 0x52, 0x65, 0x66, 0x41, 0x52, 0x42, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x05, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x53, 0x74, 0x65, 0x6E, 0x63,
+ 0x69, 0x6C, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x11, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x5F,
+ 0x63, 0x6F, 0x6F, 0x72, 0x64, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x96, 0x13, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x22, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x19, 0x00, 0x09, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1B, 0x00, 0x03, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x00, 0x03, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x13, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00,
+ 0x05, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x57, 0x00, 0x05, 0x00, 0x13, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] StencilBlitMsFragmentShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0B, 0x00, 0x08, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x32, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x95, 0x13, 0x00, 0x00, 0x0A, 0x00, 0x09, 0x00, 0x53, 0x50, 0x56, 0x5F, 0x45, 0x58, 0x54, 0x5F,
+ 0x73, 0x68, 0x61, 0x64, 0x65, 0x72, 0x5F, 0x73, 0x74, 0x65, 0x6E, 0x63, 0x69, 0x6C, 0x5F, 0x65,
+ 0x78, 0x70, 0x6F, 0x72, 0x74, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30, 0x00, 0x00, 0x00, 0x00,
+ 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x08, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0xA3, 0x13, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00,
+ 0x04, 0x00, 0x09, 0x00, 0x47, 0x4C, 0x5F, 0x41, 0x52, 0x42, 0x5F, 0x73, 0x68, 0x61, 0x64, 0x65,
+ 0x72, 0x5F, 0x73, 0x74, 0x65, 0x6E, 0x63, 0x69, 0x6C, 0x5F, 0x65, 0x78, 0x70, 0x6F, 0x72, 0x74,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x46,
+ 0x72, 0x61, 0x67, 0x53, 0x74, 0x65, 0x6E, 0x63, 0x69, 0x6C, 0x52, 0x65, 0x66, 0x41, 0x52, 0x42,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x53,
+ 0x74, 0x65, 0x6E, 0x63, 0x69, 0x6C, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x74, 0x65, 0x78, 0x5F, 0x63, 0x6F, 0x6F, 0x72, 0x64, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00,
+ 0x1B, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x53, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x49, 0x44, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x96, 0x13, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x11, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x03, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x1B, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x19, 0x00, 0x09, 0x00, 0x09, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x03, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x16, 0x00, 0x03, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x10, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x1A, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x1A, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00,
+ 0x1E, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x64, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x68, 0x00, 0x04, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x04, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x85, 0x00, 0x05, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x6E, 0x00, 0x04, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x64, 0x00, 0x04, 0x00,
+ 0x09, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x07, 0x00,
+ 0x1E, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x40, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x22, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] StencilDrawToMsFragmentShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0B, 0x00, 0x08, 0x00, 0x5E, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x95, 0x13, 0x00, 0x00, 0x0A, 0x00, 0x09, 0x00,
+ 0x53, 0x50, 0x56, 0x5F, 0x45, 0x58, 0x54, 0x5F, 0x73, 0x68, 0x61, 0x64, 0x65, 0x72, 0x5F, 0x73,
+ 0x74, 0x65, 0x6E, 0x63, 0x69, 0x6C, 0x5F, 0x65, 0x78, 0x70, 0x6F, 0x72, 0x74, 0x00, 0x00, 0x00,
+ 0x0B, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64,
+ 0x2E, 0x34, 0x35, 0x30, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x08, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
+ 0x51, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x00, 0xA3, 0x13, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00, 0x04, 0x00, 0x09, 0x00, 0x47, 0x4C, 0x5F, 0x41,
+ 0x52, 0x42, 0x5F, 0x73, 0x68, 0x61, 0x64, 0x65, 0x72, 0x5F, 0x73, 0x74, 0x65, 0x6E, 0x63, 0x69,
+ 0x6C, 0x5F, 0x65, 0x78, 0x70, 0x6F, 0x72, 0x74, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x08, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x73, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x5F, 0x63, 0x6F, 0x75, 0x6E, 0x74,
+ 0x73, 0x5F, 0x6C, 0x6F, 0x67, 0x32, 0x5F, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00,
+ 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x5F, 0x63,
+ 0x6F, 0x75, 0x6E, 0x74, 0x73, 0x5F, 0x6C, 0x6F, 0x67, 0x32, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x2E, 0x00, 0x00, 0x00,
+ 0x67, 0x6C, 0x5F, 0x53, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x49, 0x44, 0x00, 0x05, 0x00, 0x06, 0x00,
+ 0x34, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x46, 0x72, 0x61, 0x67, 0x43, 0x6F, 0x6F, 0x72, 0x64,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x08, 0x00, 0x51, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x46,
+ 0x72, 0x61, 0x67, 0x53, 0x74, 0x65, 0x6E, 0x63, 0x69, 0x6C, 0x52, 0x65, 0x66, 0x41, 0x52, 0x42,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x55, 0x00, 0x00, 0x00, 0x73, 0x72, 0x63, 0x00,
+ 0x48, 0x00, 0x05, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0x00, 0x03, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x2E, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x34, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x51, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x96, 0x13, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x55, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00,
+ 0x55, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x1E, 0x00, 0x03, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x10, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00,
+ 0x2D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00,
+ 0x2D, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x16, 0x00, 0x03, 0x00,
+ 0x31, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x32, 0x00, 0x00, 0x00,
+ 0x31, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x33, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x33, 0x00, 0x00, 0x00,
+ 0x34, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x35, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x50, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x50, 0x00, 0x00, 0x00,
+ 0x51, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x19, 0x00, 0x09, 0x00, 0x52, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x03, 0x00,
+ 0x53, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x54, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x53, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x54, 0x00, 0x00, 0x00,
+ 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x59, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00,
+ 0x05, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x10, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
+ 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x13, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x14, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x19, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x10, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x1D, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x1E, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x2F, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x05, 0x00, 0x35, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x31, 0x00, 0x00, 0x00, 0x37, 0x00, 0x00, 0x00,
+ 0x36, 0x00, 0x00, 0x00, 0x6E, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00,
+ 0x37, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00,
+ 0x38, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0xC7, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x2F, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00,
+ 0xC4, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x41, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00,
+ 0x16, 0x00, 0x00, 0x00, 0xC5, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x00, 0x00, 0x41, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x35, 0x00, 0x00, 0x00,
+ 0x44, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x31, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x6E, 0x00, 0x04, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x49, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00,
+ 0xC3, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x4C, 0x00, 0x00, 0x00, 0x2F, 0x00, 0x00, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x4E, 0x00, 0x00, 0x00,
+ 0x4C, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0xC5, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x4F, 0x00, 0x00, 0x00, 0x49, 0x00, 0x00, 0x00, 0x4E, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x53, 0x00, 0x00, 0x00, 0x56, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x50, 0x00, 0x05, 0x00,
+ 0x59, 0x00, 0x00, 0x00, 0x5A, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0x4F, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x04, 0x00, 0x52, 0x00, 0x00, 0x00, 0x5B, 0x00, 0x00, 0x00, 0x56, 0x00, 0x00, 0x00,
+ 0x5F, 0x00, 0x07, 0x00, 0x09, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00, 0x5B, 0x00, 0x00, 0x00,
+ 0x5A, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3E, 0x00, 0x03, 0x00, 0x51, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00,
+ 0x38, 0x00, 0x01, 0x00,
+ };
+
+ public static readonly byte[] StencilDrawToNonMsFragmentShaderSource = new byte[]
+ {
+ 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0B, 0x00, 0x08, 0x00, 0x6A, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x95, 0x13, 0x00, 0x00, 0x0A, 0x00, 0x09, 0x00, 0x53, 0x50, 0x56, 0x5F, 0x45, 0x58, 0x54, 0x5F,
+ 0x73, 0x68, 0x61, 0x64, 0x65, 0x72, 0x5F, 0x73, 0x74, 0x65, 0x6E, 0x63, 0x69, 0x6C, 0x5F, 0x65,
+ 0x78, 0x70, 0x6F, 0x72, 0x74, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30, 0x00, 0x00, 0x00, 0x00,
+ 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x07, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00,
+ 0x0D, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x00, 0xA3, 0x13, 0x00, 0x00,
+ 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00, 0x04, 0x00, 0x09, 0x00,
+ 0x47, 0x4C, 0x5F, 0x41, 0x52, 0x42, 0x5F, 0x73, 0x68, 0x61, 0x64, 0x65, 0x72, 0x5F, 0x73, 0x74,
+ 0x65, 0x6E, 0x63, 0x69, 0x6C, 0x5F, 0x65, 0x78, 0x70, 0x6F, 0x72, 0x74, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x00, 0x06, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x46, 0x72, 0x61, 0x67, 0x43,
+ 0x6F, 0x6F, 0x72, 0x64, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x08, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x73, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x5F, 0x63, 0x6F, 0x75, 0x6E, 0x74, 0x73, 0x5F, 0x6C, 0x6F,
+ 0x67, 0x32, 0x5F, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x73, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x5F, 0x63, 0x6F, 0x75, 0x6E, 0x74,
+ 0x73, 0x5F, 0x6C, 0x6F, 0x67, 0x32, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x08, 0x00, 0x5F, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x46,
+ 0x72, 0x61, 0x67, 0x53, 0x74, 0x65, 0x6E, 0x63, 0x69, 0x6C, 0x52, 0x65, 0x66, 0x41, 0x52, 0x42,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x63, 0x00, 0x00, 0x00, 0x73, 0x72, 0x63, 0x4D,
+ 0x53, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x0F, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x18, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x18, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x5F, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00,
+ 0x96, 0x13, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x63, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x63, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x16, 0x00, 0x03, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x12, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x15, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x03, 0x00, 0x16, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x12, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x2B, 0x00, 0x04, 0x00, 0x12, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x17, 0x00, 0x04, 0x00, 0x50, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x5E, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x5E, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x19, 0x00, 0x09, 0x00, 0x60, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x03, 0x00, 0x61, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x04, 0x00, 0x62, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x00, 0x00, 0x00,
+ 0x3B, 0x00, 0x04, 0x00, 0x62, 0x00, 0x00, 0x00, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x0B, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x4F, 0x00, 0x07, 0x00,
+ 0x0E, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x6D, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00,
+ 0x11, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x1B, 0x00, 0x00, 0x00,
+ 0x1C, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x04, 0x00, 0x12, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00,
+ 0x41, 0x00, 0x06, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x19, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x21, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00,
+ 0x1B, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00,
+ 0x23, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x12, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00,
+ 0x24, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00,
+ 0xC4, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00,
+ 0x31, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x3A, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x05, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00,
+ 0x82, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00,
+ 0x31, 0x00, 0x00, 0x00, 0xC7, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x42, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x04, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x05, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00,
+ 0x82, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00, 0x47, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00,
+ 0x31, 0x00, 0x00, 0x00, 0xC7, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00,
+ 0x45, 0x00, 0x00, 0x00, 0x47, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x4A, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xC5, 0x00, 0x05, 0x00,
+ 0x12, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x4A, 0x00, 0x00, 0x00,
+ 0xC3, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00, 0x57, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00,
+ 0x1D, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00,
+ 0x43, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, 0x50, 0x00, 0x05, 0x00, 0x50, 0x00, 0x00, 0x00,
+ 0x5D, 0x00, 0x00, 0x00, 0x57, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00,
+ 0x61, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x63, 0x00, 0x00, 0x00, 0x64, 0x00, 0x04, 0x00,
+ 0x60, 0x00, 0x00, 0x00, 0x67, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x07, 0x00,
+ 0x15, 0x00, 0x00, 0x00, 0x68, 0x00, 0x00, 0x00, 0x67, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00,
+ 0x40, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x69, 0x00, 0x00, 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00,
+ 0x5F, 0x00, 0x00, 0x00, 0x69, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+ };
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/StencilBlitFragmentShaderSource.frag b/src/Ryujinx.Graphics.Vulkan/Shaders/StencilBlitFragmentShaderSource.frag
new file mode 100644
index 00000000..1919269b
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/StencilBlitFragmentShaderSource.frag
@@ -0,0 +1,12 @@
+#version 450 core
+
+#extension GL_ARB_shader_stencil_export : require
+
+layout (binding = 0, set = 2) uniform isampler2D texStencil;
+
+layout (location = 0) in vec2 tex_coord;
+
+void main()
+{
+ gl_FragStencilRefARB = texture(texStencil, tex_coord).r;
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/StencilBlitMsFragmentShaderSource.frag b/src/Ryujinx.Graphics.Vulkan/Shaders/StencilBlitMsFragmentShaderSource.frag
new file mode 100644
index 00000000..7e26672a
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/StencilBlitMsFragmentShaderSource.frag
@@ -0,0 +1,12 @@
+#version 450 core
+
+#extension GL_ARB_shader_stencil_export : require
+
+layout (binding = 0, set = 2) uniform isampler2DMS texStencil;
+
+layout (location = 0) in vec2 tex_coord;
+
+void main()
+{
+ gl_FragStencilRefARB = texelFetch(texStencil, ivec2(tex_coord * vec2(textureSize(texStencil).xy)), gl_SampleID).r;
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/StencilDrawToMsFragmentShaderSource.frag b/src/Ryujinx.Graphics.Vulkan/Shaders/StencilDrawToMsFragmentShaderSource.frag
new file mode 100644
index 00000000..a07ae9d1
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/StencilDrawToMsFragmentShaderSource.frag
@@ -0,0 +1,27 @@
+#version 450 core
+
+#extension GL_ARB_shader_stencil_export : require
+
+layout (std140, binding = 0) uniform sample_counts_log2_in
+{
+ ivec4 sample_counts_log2;
+};
+
+layout (set = 2, binding = 0) uniform isampler2D src;
+
+void main()
+{
+ int deltaX = sample_counts_log2.x - sample_counts_log2.z;
+ int deltaY = sample_counts_log2.y - sample_counts_log2.w;
+ int samplesInXLog2 = sample_counts_log2.z;
+ int samplesInYLog2 = sample_counts_log2.w;
+ int samplesInX = 1 << samplesInXLog2;
+ int samplesInY = 1 << samplesInYLog2;
+
+ int sampleIndex = gl_SampleID;
+
+ int inX = (int(gl_FragCoord.x) << sample_counts_log2.x) | ((sampleIndex & (samplesInX - 1)) << deltaX);
+ int inY = (int(gl_FragCoord.y) << sample_counts_log2.y) | ((sampleIndex >> samplesInXLog2) << deltaY);
+
+ gl_FragStencilRefARB = texelFetch(src, ivec2(inX, inY), 0).r;
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Shaders/StencilDrawToNonMsFragmentShaderSource.frag b/src/Ryujinx.Graphics.Vulkan/Shaders/StencilDrawToNonMsFragmentShaderSource.frag
new file mode 100644
index 00000000..3addd9d1
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Shaders/StencilDrawToNonMsFragmentShaderSource.frag
@@ -0,0 +1,30 @@
+#version 450 core
+
+#extension GL_ARB_shader_stencil_export : require
+
+layout (std140, binding = 0) uniform sample_counts_log2_in
+{
+ ivec4 sample_counts_log2;
+};
+
+layout (set = 2, binding = 0) uniform isampler2DMS srcMS;
+
+void main()
+{
+ uvec2 coords = uvec2(gl_FragCoord.xy);
+
+ int deltaX = sample_counts_log2.x - sample_counts_log2.z;
+ int deltaY = sample_counts_log2.y - sample_counts_log2.w;
+ int samplesInXLog2 = sample_counts_log2.z;
+ int samplesInYLog2 = sample_counts_log2.w;
+ int samplesInX = 1 << samplesInXLog2;
+ int samplesInY = 1 << samplesInYLog2;
+ int sampleIdx = ((int(coords.x) >> deltaX) & (samplesInX - 1)) | (((int(coords.y) >> deltaY) & (samplesInY - 1)) << samplesInXLog2);
+
+ samplesInXLog2 = sample_counts_log2.x;
+ samplesInYLog2 = sample_counts_log2.y;
+
+ ivec2 shiftedCoords = ivec2(int(coords.x) >> samplesInXLog2, int(coords.y) >> samplesInYLog2);
+
+ gl_FragStencilRefARB = texelFetch(srcMS, shiftedCoords, sampleIdx).r;
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/SpecInfo.cs b/src/Ryujinx.Graphics.Vulkan/SpecInfo.cs
new file mode 100644
index 00000000..4d226f61
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/SpecInfo.cs
@@ -0,0 +1,102 @@
+using Silk.NET.Vulkan;
+using System;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ public enum SpecConstType
+ {
+ Bool32,
+ Int16,
+ Int32,
+ Int64,
+ Float16,
+ Float32,
+ Float64
+ }
+
+ sealed class SpecDescription
+ {
+ public readonly SpecializationInfo Info;
+ public readonly SpecializationMapEntry[] Map;
+
+ // For mapping a simple packed struct or single entry
+ public SpecDescription(params (uint Id, SpecConstType Type)[] description)
+ {
+ int count = description.Length;
+ Map = new SpecializationMapEntry[count];
+
+ uint structSize = 0;
+
+ for (int i = 0; i < Map.Length; ++i)
+ {
+ var typeSize = SizeOf(description[i].Type);
+ Map[i] = new SpecializationMapEntry(description[i].Id, structSize, typeSize);
+ structSize += typeSize;
+ }
+
+ Info = new SpecializationInfo()
+ {
+ DataSize = structSize,
+ MapEntryCount = (uint)count
+ };
+ }
+
+ // For advanced mapping with overlapping or staggered fields
+ public SpecDescription(SpecializationMapEntry[] map)
+ {
+ Map = map;
+
+ uint structSize = 0;
+ for (int i = 0; i < map.Length; ++i)
+ {
+ structSize = Math.Max(structSize, map[i].Offset + (uint)map[i].Size);
+ }
+
+ Info = new SpecializationInfo()
+ {
+ DataSize = structSize,
+ MapEntryCount = (uint)map.Length
+ };
+ }
+
+ private static uint SizeOf(SpecConstType type) => type switch
+ {
+ SpecConstType.Int16 or SpecConstType.Float16 => 2,
+ SpecConstType.Bool32 or SpecConstType.Int32 or SpecConstType.Float32 => 4,
+ SpecConstType.Int64 or SpecConstType.Float64 => 8,
+ _ => throw new ArgumentOutOfRangeException(nameof(type))
+ };
+
+ private SpecDescription()
+ {
+ Info = new();
+ }
+
+ public static readonly SpecDescription Empty = new();
+ }
+
+ readonly struct SpecData : IRefEquatable<SpecData>
+ {
+ private readonly byte[] _data;
+ private readonly int _hash;
+
+ public int Length => _data.Length;
+ public ReadOnlySpan<byte> Span => _data.AsSpan();
+ public override int GetHashCode() => _hash;
+
+ public SpecData(ReadOnlySpan<byte> data)
+ {
+ _data = new byte[data.Length];
+ data.CopyTo(_data);
+
+ var hc = new HashCode();
+ hc.AddBytes(data);
+ _hash = hc.ToHashCode();
+ }
+
+ public override bool Equals(object obj) => obj is SpecData other && Equals(other);
+ public bool Equals(ref SpecData other) => _data.AsSpan().SequenceEqual(other._data);
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/StagingBuffer.cs b/src/Ryujinx.Graphics.Vulkan/StagingBuffer.cs
new file mode 100644
index 00000000..4e3c1dee
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/StagingBuffer.cs
@@ -0,0 +1,194 @@
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class StagingBuffer : IDisposable
+ {
+ private const int BufferSize = 16 * 1024 * 1024;
+
+ private int _freeOffset;
+ private int _freeSize;
+
+ private readonly VulkanRenderer _gd;
+ private readonly BufferHolder _buffer;
+
+ private readonly struct PendingCopy
+ {
+ public FenceHolder Fence { get; }
+ public int Size { get; }
+
+ public PendingCopy(FenceHolder fence, int size)
+ {
+ Fence = fence;
+ Size = size;
+ fence.Get();
+ }
+ }
+
+ private readonly Queue<PendingCopy> _pendingCopies;
+
+ public StagingBuffer(VulkanRenderer gd, BufferManager bufferManager)
+ {
+ _gd = gd;
+ _buffer = bufferManager.Create(gd, BufferSize);
+ _pendingCopies = new Queue<PendingCopy>();
+ _freeSize = BufferSize;
+ }
+
+ public unsafe void PushData(CommandBufferPool cbp, CommandBufferScoped? cbs, Action endRenderPass, BufferHolder dst, int dstOffset, ReadOnlySpan<byte> data)
+ {
+ bool isRender = cbs != null;
+ CommandBufferScoped scoped = cbs ?? cbp.Rent();
+
+ // Must push all data to the buffer. If it can't fit, split it up.
+
+ endRenderPass?.Invoke();
+
+ while (data.Length > 0)
+ {
+ if (_freeSize < data.Length)
+ {
+ FreeCompleted();
+ }
+
+ while (_freeSize == 0)
+ {
+ if (!WaitFreeCompleted(cbp))
+ {
+ if (isRender)
+ {
+ _gd.FlushAllCommands();
+ scoped = cbp.Rent();
+ isRender = false;
+ }
+ else
+ {
+ scoped = cbp.ReturnAndRent(scoped);
+ }
+ }
+ }
+
+ int chunkSize = Math.Min(_freeSize, data.Length);
+
+ PushDataImpl(scoped, dst, dstOffset, data.Slice(0, chunkSize));
+
+ dstOffset += chunkSize;
+ data = data.Slice(chunkSize);
+ }
+
+ if (!isRender)
+ {
+ scoped.Dispose();
+ }
+ }
+
+ private void PushDataImpl(CommandBufferScoped cbs, BufferHolder dst, int dstOffset, ReadOnlySpan<byte> data)
+ {
+ var srcBuffer = _buffer.GetBuffer();
+ var dstBuffer = dst.GetBuffer(cbs.CommandBuffer, dstOffset, data.Length, true);
+
+ int offset = _freeOffset;
+ int capacity = BufferSize - offset;
+ if (capacity < data.Length)
+ {
+ _buffer.SetDataUnchecked(offset, data.Slice(0, capacity));
+ _buffer.SetDataUnchecked(0, data.Slice(capacity));
+
+ BufferHolder.Copy(_gd, cbs, srcBuffer, dstBuffer, offset, dstOffset, capacity);
+ BufferHolder.Copy(_gd, cbs, srcBuffer, dstBuffer, 0, dstOffset + capacity, data.Length - capacity);
+ }
+ else
+ {
+ _buffer.SetDataUnchecked(offset, data);
+
+ BufferHolder.Copy(_gd, cbs, srcBuffer, dstBuffer, offset, dstOffset, data.Length);
+ }
+
+ _freeOffset = (offset + data.Length) & (BufferSize - 1);
+ _freeSize -= data.Length;
+ Debug.Assert(_freeSize >= 0);
+
+ _pendingCopies.Enqueue(new PendingCopy(cbs.GetFence(), data.Length));
+ }
+
+ public unsafe bool TryPushData(CommandBufferScoped cbs, Action endRenderPass, BufferHolder dst, int dstOffset, ReadOnlySpan<byte> data)
+ {
+ if (data.Length > BufferSize)
+ {
+ return false;
+ }
+
+ if (_freeSize < data.Length)
+ {
+ FreeCompleted();
+
+ if (_freeSize < data.Length)
+ {
+ return false;
+ }
+ }
+
+ endRenderPass();
+
+ PushDataImpl(cbs, dst, dstOffset, data);
+
+ return true;
+ }
+
+ private bool WaitFreeCompleted(CommandBufferPool cbp)
+ {
+ if (_pendingCopies.TryPeek(out var pc))
+ {
+ if (!pc.Fence.IsSignaled())
+ {
+ if (cbp.IsFenceOnRentedCommandBuffer(pc.Fence))
+ {
+ return false;
+ }
+
+ pc.Fence.Wait();
+ }
+
+ var dequeued = _pendingCopies.Dequeue();
+ Debug.Assert(dequeued.Fence == pc.Fence);
+ _freeSize += pc.Size;
+ pc.Fence.Put();
+ }
+
+ return true;
+ }
+
+ private void FreeCompleted()
+ {
+ FenceHolder signalledFence = null;
+ while (_pendingCopies.TryPeek(out var pc) && (pc.Fence == signalledFence || pc.Fence.IsSignaled()))
+ {
+ signalledFence = pc.Fence; // Already checked - don't need to do it again.
+ var dequeued = _pendingCopies.Dequeue();
+ Debug.Assert(dequeued.Fence == pc.Fence);
+ _freeSize += pc.Size;
+ pc.Fence.Put();
+ }
+ }
+
+ protected virtual void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ _buffer.Dispose();
+
+ while (_pendingCopies.TryDequeue(out var pc))
+ {
+ pc.Fence.Put();
+ }
+ }
+ }
+
+ public void Dispose()
+ {
+ Dispose(true);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/SyncManager.cs b/src/Ryujinx.Graphics.Vulkan/SyncManager.cs
new file mode 100644
index 00000000..432d224f
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/SyncManager.cs
@@ -0,0 +1,206 @@
+using Ryujinx.Common.Logging;
+using Silk.NET.Vulkan;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Linq;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class SyncManager
+ {
+ private class SyncHandle
+ {
+ public ulong ID;
+ public MultiFenceHolder Waitable;
+ public ulong FlushId;
+ public bool Signalled;
+
+ public bool NeedsFlush(ulong currentFlushId)
+ {
+ return (long)(FlushId - currentFlushId) >= 0;
+ }
+ }
+
+ private ulong _firstHandle = 0;
+
+ private readonly VulkanRenderer _gd;
+ private readonly Device _device;
+ private List<SyncHandle> _handles;
+ private ulong FlushId;
+ private long WaitTicks;
+
+ public SyncManager(VulkanRenderer gd, Device device)
+ {
+ _gd = gd;
+ _device = device;
+ _handles = new List<SyncHandle>();
+ }
+
+ public void RegisterFlush()
+ {
+ FlushId++;
+ }
+
+ public void Create(ulong id, bool strict)
+ {
+ ulong flushId = FlushId;
+ MultiFenceHolder waitable = new MultiFenceHolder();
+ if (strict || _gd.InterruptAction == null)
+ {
+ _gd.FlushAllCommands();
+ _gd.CommandBufferPool.AddWaitable(waitable);
+ }
+ else
+ {
+ // Don't flush commands, instead wait for the current command buffer to finish.
+ // If this sync is waited on before the command buffer is submitted, interrupt the gpu thread and flush it manually.
+
+ _gd.CommandBufferPool.AddInUseWaitable(waitable);
+ }
+
+ SyncHandle handle = new SyncHandle
+ {
+ ID = id,
+ Waitable = waitable,
+ FlushId = flushId
+ };
+
+ lock (_handles)
+ {
+ _handles.Add(handle);
+ }
+ }
+
+ public ulong GetCurrent()
+ {
+ lock (_handles)
+ {
+ ulong lastHandle = _firstHandle;
+
+ foreach (SyncHandle handle in _handles)
+ {
+ lock (handle)
+ {
+ if (handle.Waitable == null)
+ {
+ continue;
+ }
+
+ if (handle.ID > lastHandle)
+ {
+ bool signaled = handle.Signalled || handle.Waitable.WaitForFences(_gd.Api, _device, 0);
+ if (signaled)
+ {
+ lastHandle = handle.ID;
+ handle.Signalled = true;
+ }
+ }
+ }
+ }
+
+ return lastHandle;
+ }
+ }
+
+ public void Wait(ulong id)
+ {
+ SyncHandle result = null;
+
+ lock (_handles)
+ {
+ if ((long)(_firstHandle - id) > 0)
+ {
+ return; // The handle has already been signalled or deleted.
+ }
+
+ foreach (SyncHandle handle in _handles)
+ {
+ if (handle.ID == id)
+ {
+ result = handle;
+ break;
+ }
+ }
+ }
+
+ if (result != null)
+ {
+ lock (result)
+ {
+ if (result.Waitable == null)
+ {
+ return;
+ }
+
+ long beforeTicks = Stopwatch.GetTimestamp();
+
+ if (result.NeedsFlush(FlushId))
+ {
+ _gd.InterruptAction(() =>
+ {
+ if (result.NeedsFlush(FlushId))
+ {
+ _gd.FlushAllCommands();
+ }
+ });
+ }
+
+ bool signaled = result.Signalled || result.Waitable.WaitForFences(_gd.Api, _device, 1000000000);
+
+ if (!signaled)
+ {
+ Logger.Error?.PrintMsg(LogClass.Gpu, $"VK Sync Object {result.ID} failed to signal within 1000ms. Continuing...");
+ }
+ else
+ {
+ WaitTicks += Stopwatch.GetTimestamp() - beforeTicks;
+ result.Signalled = true;
+ }
+ }
+ }
+ }
+
+ public void Cleanup()
+ {
+ // Iterate through handles and remove any that have already been signalled.
+
+ while (true)
+ {
+ SyncHandle first = null;
+ lock (_handles)
+ {
+ first = _handles.FirstOrDefault();
+ }
+
+ if (first == null || first.NeedsFlush(FlushId)) break;
+
+ bool signaled = first.Waitable.WaitForFences(_gd.Api, _device, 0);
+ if (signaled)
+ {
+ // Delete the sync object.
+ lock (_handles)
+ {
+ lock (first)
+ {
+ _firstHandle = first.ID + 1;
+ _handles.RemoveAt(0);
+ first.Waitable = null;
+ }
+ }
+ } else
+ {
+ // This sync handle and any following have not been reached yet.
+ break;
+ }
+ }
+ }
+
+ public long GetAndResetWaitTicks()
+ {
+ long result = WaitTicks;
+ WaitTicks = 0;
+
+ return result;
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/TextureBuffer.cs b/src/Ryujinx.Graphics.Vulkan/TextureBuffer.cs
new file mode 100644
index 00000000..738bf57d
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/TextureBuffer.cs
@@ -0,0 +1,160 @@
+using Ryujinx.Common.Memory;
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using System;
+using System.Collections.Generic;
+using VkFormat = Silk.NET.Vulkan.Format;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class TextureBuffer : ITexture
+ {
+ private readonly VulkanRenderer _gd;
+
+ private BufferHandle _bufferHandle;
+ private int _offset;
+ private int _size;
+ private Auto<DisposableBufferView> _bufferView;
+ private Dictionary<GAL.Format, Auto<DisposableBufferView>> _selfManagedViews;
+
+ private int _bufferCount;
+
+ public int Width { get; }
+ public int Height { get; }
+
+ public VkFormat VkFormat { get; }
+
+ public float ScaleFactor { get; }
+
+ public TextureBuffer(VulkanRenderer gd, TextureCreateInfo info, float scale)
+ {
+ _gd = gd;
+ Width = info.Width;
+ Height = info.Height;
+ VkFormat = FormatTable.GetFormat(info.Format);
+ ScaleFactor = scale;
+
+ gd.Textures.Add(this);
+ }
+
+ public void CopyTo(ITexture destination, int firstLayer, int firstLevel)
+ {
+ throw new NotSupportedException();
+ }
+
+ public void CopyTo(ITexture destination, int srcLayer, int dstLayer, int srcLevel, int dstLevel)
+ {
+ throw new NotSupportedException();
+ }
+
+ public void CopyTo(ITexture destination, Extents2D srcRegion, Extents2D dstRegion, bool linearFilter)
+ {
+ throw new NotSupportedException();
+ }
+
+ public ITexture CreateView(TextureCreateInfo info, int firstLayer, int firstLevel)
+ {
+ throw new NotSupportedException();
+ }
+
+ public PinnedSpan<byte> GetData()
+ {
+ return _gd.GetBufferData(_bufferHandle, _offset, _size);
+ }
+
+ public PinnedSpan<byte> GetData(int layer, int level)
+ {
+ return GetData();
+ }
+
+ public void Release()
+ {
+ if (_gd.Textures.Remove(this))
+ {
+ ReleaseImpl();
+ }
+ }
+
+ private void ReleaseImpl()
+ {
+ if (_selfManagedViews != null)
+ {
+ foreach (var bufferView in _selfManagedViews.Values)
+ {
+ bufferView.Dispose();
+ }
+
+ _selfManagedViews = null;
+ }
+
+ _bufferView?.Dispose();
+ _bufferView = null;
+ }
+
+ public void SetData(SpanOrArray<byte> data)
+ {
+ _gd.SetBufferData(_bufferHandle, _offset, data);
+ }
+
+ public void SetData(SpanOrArray<byte> data, int layer, int level)
+ {
+ throw new NotSupportedException();
+ }
+
+ public void SetData(SpanOrArray<byte> data, int layer, int level, Rectangle<int> region)
+ {
+ throw new NotSupportedException();
+ }
+
+ public void SetStorage(BufferRange buffer)
+ {
+ if (_bufferHandle == buffer.Handle &&
+ _offset == buffer.Offset &&
+ _size == buffer.Size &&
+ _bufferCount == _gd.BufferManager.BufferCount)
+ {
+ return;
+ }
+
+ _bufferHandle = buffer.Handle;
+ _offset = buffer.Offset;
+ _size = buffer.Size;
+ _bufferCount = _gd.BufferManager.BufferCount;
+
+ ReleaseImpl();
+ }
+
+ public BufferView GetBufferView(CommandBufferScoped cbs)
+ {
+ if (_bufferView == null)
+ {
+ _bufferView = _gd.BufferManager.CreateView(_bufferHandle, VkFormat, _offset, _size, ReleaseImpl);
+ }
+
+ return _bufferView?.Get(cbs, _offset, _size).Value ?? default;
+ }
+
+ public BufferView GetBufferView(CommandBufferScoped cbs, GAL.Format format)
+ {
+ var vkFormat = FormatTable.GetFormat(format);
+ if (vkFormat == VkFormat)
+ {
+ return GetBufferView(cbs);
+ }
+
+ if (_selfManagedViews != null && _selfManagedViews.TryGetValue(format, out var bufferView))
+ {
+ return bufferView.Get(cbs, _offset, _size).Value;
+ }
+
+ bufferView = _gd.BufferManager.CreateView(_bufferHandle, vkFormat, _offset, _size, ReleaseImpl);
+
+ if (bufferView != null)
+ {
+ (_selfManagedViews ??= new Dictionary<GAL.Format, Auto<DisposableBufferView>>()).Add(format, bufferView);
+ }
+
+ return bufferView?.Get(cbs, _offset, _size).Value ?? default;
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/TextureCopy.cs b/src/Ryujinx.Graphics.Vulkan/TextureCopy.cs
new file mode 100644
index 00000000..c7ce2d99
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/TextureCopy.cs
@@ -0,0 +1,476 @@
+using Ryujinx.Common;
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using System;
+using System.Numerics;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ static class TextureCopy
+ {
+ public static void Blit(
+ Vk api,
+ CommandBuffer commandBuffer,
+ Image srcImage,
+ Image dstImage,
+ TextureCreateInfo srcInfo,
+ TextureCreateInfo dstInfo,
+ Extents2D srcRegion,
+ Extents2D dstRegion,
+ int srcLayer,
+ int dstLayer,
+ int srcLevel,
+ int dstLevel,
+ int layers,
+ int levels,
+ bool linearFilter,
+ ImageAspectFlags srcAspectFlags = 0,
+ ImageAspectFlags dstAspectFlags = 0)
+ {
+ static (Offset3D, Offset3D) ExtentsToOffset3D(Extents2D extents, int width, int height, int level)
+ {
+ static int Clamp(int value, int max)
+ {
+ return Math.Clamp(value, 0, max);
+ }
+
+ var xy1 = new Offset3D(Clamp(extents.X1, width) >> level, Clamp(extents.Y1, height) >> level, 0);
+ var xy2 = new Offset3D(Clamp(extents.X2, width) >> level, Clamp(extents.Y2, height) >> level, 1);
+
+ return (xy1, xy2);
+ }
+
+ if (srcAspectFlags == 0)
+ {
+ srcAspectFlags = srcInfo.Format.ConvertAspectFlags();
+ }
+
+ if (dstAspectFlags == 0)
+ {
+ dstAspectFlags = dstInfo.Format.ConvertAspectFlags();
+ }
+
+ var srcOffsets = new ImageBlit.SrcOffsetsBuffer();
+ var dstOffsets = new ImageBlit.DstOffsetsBuffer();
+
+ var filter = linearFilter && !dstInfo.Format.IsDepthOrStencil() ? Filter.Linear : Filter.Nearest;
+
+ TextureView.InsertImageBarrier(
+ api,
+ commandBuffer,
+ srcImage,
+ TextureStorage.DefaultAccessMask,
+ AccessFlags.TransferReadBit,
+ PipelineStageFlags.AllCommandsBit,
+ PipelineStageFlags.TransferBit,
+ srcAspectFlags,
+ srcLayer,
+ srcLevel,
+ layers,
+ levels);
+
+ uint copySrcLevel = (uint)srcLevel;
+ uint copyDstLevel = (uint)dstLevel;
+
+ for (int level = 0; level < levels; level++)
+ {
+ var srcSl = new ImageSubresourceLayers(srcAspectFlags, copySrcLevel, (uint)srcLayer, (uint)layers);
+ var dstSl = new ImageSubresourceLayers(dstAspectFlags, copyDstLevel, (uint)dstLayer, (uint)layers);
+
+ (srcOffsets.Element0, srcOffsets.Element1) = ExtentsToOffset3D(srcRegion, srcInfo.Width, srcInfo.Height, level);
+ (dstOffsets.Element0, dstOffsets.Element1) = ExtentsToOffset3D(dstRegion, dstInfo.Width, dstInfo.Height, level);
+
+ var region = new ImageBlit()
+ {
+ SrcSubresource = srcSl,
+ SrcOffsets = srcOffsets,
+ DstSubresource = dstSl,
+ DstOffsets = dstOffsets
+ };
+
+ api.CmdBlitImage(commandBuffer, srcImage, ImageLayout.General, dstImage, ImageLayout.General, 1, region, filter);
+
+ copySrcLevel++;
+ copyDstLevel++;
+
+ if (srcInfo.Target == Target.Texture3D || dstInfo.Target == Target.Texture3D)
+ {
+ layers = Math.Max(1, layers >> 1);
+ }
+ }
+
+ TextureView.InsertImageBarrier(
+ api,
+ commandBuffer,
+ dstImage,
+ AccessFlags.TransferWriteBit,
+ TextureStorage.DefaultAccessMask,
+ PipelineStageFlags.TransferBit,
+ PipelineStageFlags.AllCommandsBit,
+ dstAspectFlags,
+ dstLayer,
+ dstLevel,
+ layers,
+ levels);
+ }
+
+ public static void Copy(
+ Vk api,
+ CommandBuffer commandBuffer,
+ Image srcImage,
+ Image dstImage,
+ TextureCreateInfo srcInfo,
+ TextureCreateInfo dstInfo,
+ int srcViewLayer,
+ int dstViewLayer,
+ int srcViewLevel,
+ int dstViewLevel,
+ int srcLayer,
+ int dstLayer,
+ int srcLevel,
+ int dstLevel)
+ {
+ int srcDepth = srcInfo.GetDepthOrLayers();
+ int srcLevels = srcInfo.Levels;
+
+ int dstDepth = dstInfo.GetDepthOrLayers();
+ int dstLevels = dstInfo.Levels;
+
+ if (dstInfo.Target == Target.Texture3D)
+ {
+ dstDepth = Math.Max(1, dstDepth >> dstLevel);
+ }
+
+ int depth = Math.Min(srcDepth, dstDepth);
+ int levels = Math.Min(srcLevels, dstLevels);
+
+ Copy(
+ api,
+ commandBuffer,
+ srcImage,
+ dstImage,
+ srcInfo,
+ dstInfo,
+ srcViewLayer,
+ dstViewLayer,
+ srcViewLevel,
+ dstViewLevel,
+ srcLayer,
+ dstLayer,
+ srcLevel,
+ dstLevel,
+ depth,
+ levels);
+ }
+
+ private static int ClampLevels(TextureCreateInfo info, int levels)
+ {
+ int width = info.Width;
+ int height = info.Height;
+ int depth = info.Target == Target.Texture3D ? info.Depth : 1;
+
+ int maxLevels = 1 + BitOperations.Log2((uint)Math.Max(Math.Max(width, height), depth));
+
+ if (levels > maxLevels)
+ {
+ levels = maxLevels;
+ }
+
+ return levels;
+ }
+
+ public static void Copy(
+ Vk api,
+ CommandBuffer commandBuffer,
+ Image srcImage,
+ Image dstImage,
+ TextureCreateInfo srcInfo,
+ TextureCreateInfo dstInfo,
+ int srcViewLayer,
+ int dstViewLayer,
+ int srcViewLevel,
+ int dstViewLevel,
+ int srcDepthOrLayer,
+ int dstDepthOrLayer,
+ int srcLevel,
+ int dstLevel,
+ int depthOrLayers,
+ int levels)
+ {
+ int srcZ;
+ int srcLayer;
+ int srcDepth;
+ int srcLayers;
+
+ if (srcInfo.Target == Target.Texture3D)
+ {
+ srcZ = srcDepthOrLayer;
+ srcLayer = 0;
+ srcDepth = depthOrLayers;
+ srcLayers = 1;
+ }
+ else
+ {
+ srcZ = 0;
+ srcLayer = srcDepthOrLayer;
+ srcDepth = 1;
+ srcLayers = depthOrLayers;
+ }
+
+ int dstZ;
+ int dstLayer;
+ int dstDepth;
+ int dstLayers;
+
+ if (dstInfo.Target == Target.Texture3D)
+ {
+ dstZ = dstDepthOrLayer;
+ dstLayer = 0;
+ dstDepth = depthOrLayers;
+ dstLayers = 1;
+ }
+ else
+ {
+ dstZ = 0;
+ dstLayer = dstDepthOrLayer;
+ dstDepth = 1;
+ dstLayers = depthOrLayers;
+ }
+
+ int srcWidth = srcInfo.Width;
+ int srcHeight = srcInfo.Height;
+
+ int dstWidth = dstInfo.Width;
+ int dstHeight = dstInfo.Height;
+
+ srcWidth = Math.Max(1, srcWidth >> srcLevel);
+ srcHeight = Math.Max(1, srcHeight >> srcLevel);
+
+ dstWidth = Math.Max(1, dstWidth >> dstLevel);
+ dstHeight = Math.Max(1, dstHeight >> dstLevel);
+
+ int blockWidth = 1;
+ int blockHeight = 1;
+ bool sizeInBlocks = false;
+
+ // When copying from a compressed to a non-compressed format,
+ // the non-compressed texture will have the size of the texture
+ // in blocks (not in texels), so we must adjust that size to
+ // match the size in texels of the compressed texture.
+ if (!srcInfo.IsCompressed && dstInfo.IsCompressed)
+ {
+ srcWidth *= dstInfo.BlockWidth;
+ srcHeight *= dstInfo.BlockHeight;
+ blockWidth = dstInfo.BlockWidth;
+ blockHeight = dstInfo.BlockHeight;
+
+ sizeInBlocks = true;
+ }
+ else if (srcInfo.IsCompressed && !dstInfo.IsCompressed)
+ {
+ dstWidth *= srcInfo.BlockWidth;
+ dstHeight *= srcInfo.BlockHeight;
+ blockWidth = srcInfo.BlockWidth;
+ blockHeight = srcInfo.BlockHeight;
+ }
+
+ int width = Math.Min(srcWidth, dstWidth);
+ int height = Math.Min(srcHeight, dstHeight);
+
+ ImageAspectFlags srcAspect = srcInfo.Format.ConvertAspectFlags();
+ ImageAspectFlags dstAspect = dstInfo.Format.ConvertAspectFlags();
+
+ TextureView.InsertImageBarrier(
+ api,
+ commandBuffer,
+ srcImage,
+ TextureStorage.DefaultAccessMask,
+ AccessFlags.TransferReadBit,
+ PipelineStageFlags.AllCommandsBit,
+ PipelineStageFlags.TransferBit,
+ srcAspect,
+ srcViewLayer + srcLayer,
+ srcViewLevel + srcLevel,
+ srcLayers,
+ levels);
+
+ for (int level = 0; level < levels; level++)
+ {
+ // Stop copy if we are already out of the levels range.
+ if (level >= srcInfo.Levels || dstLevel + level >= dstInfo.Levels)
+ {
+ break;
+ }
+
+ var srcSl = new ImageSubresourceLayers(
+ srcAspect,
+ (uint)(srcViewLevel + srcLevel + level),
+ (uint)(srcViewLayer + srcLayer),
+ (uint)srcLayers);
+
+ var dstSl = new ImageSubresourceLayers(
+ dstAspect,
+ (uint)(dstViewLevel + dstLevel + level),
+ (uint)(dstViewLayer + dstLayer),
+ (uint)dstLayers);
+
+ int copyWidth = sizeInBlocks ? BitUtils.DivRoundUp(width, blockWidth) : width;
+ int copyHeight = sizeInBlocks ? BitUtils.DivRoundUp(height, blockHeight) : height;
+
+ var extent = new Extent3D((uint)copyWidth, (uint)copyHeight, (uint)srcDepth);
+
+ if (srcInfo.Samples > 1 && srcInfo.Samples != dstInfo.Samples)
+ {
+ var region = new ImageResolve(srcSl, new Offset3D(0, 0, srcZ), dstSl, new Offset3D(0, 0, dstZ), extent);
+
+ api.CmdResolveImage(commandBuffer, srcImage, ImageLayout.General, dstImage, ImageLayout.General, 1, region);
+ }
+ else
+ {
+ var region = new ImageCopy(srcSl, new Offset3D(0, 0, srcZ), dstSl, new Offset3D(0, 0, dstZ), extent);
+
+ api.CmdCopyImage(commandBuffer, srcImage, ImageLayout.General, dstImage, ImageLayout.General, 1, region);
+ }
+
+ width = Math.Max(1, width >> 1);
+ height = Math.Max(1, height >> 1);
+
+ if (srcInfo.Target == Target.Texture3D)
+ {
+ srcDepth = Math.Max(1, srcDepth >> 1);
+ }
+ }
+
+ TextureView.InsertImageBarrier(
+ api,
+ commandBuffer,
+ dstImage,
+ AccessFlags.TransferWriteBit,
+ TextureStorage.DefaultAccessMask,
+ PipelineStageFlags.TransferBit,
+ PipelineStageFlags.AllCommandsBit,
+ dstAspect,
+ dstViewLayer + dstLayer,
+ dstViewLevel + dstLevel,
+ dstLayers,
+ levels);
+ }
+
+ public unsafe static void ResolveDepthStencil(
+ VulkanRenderer gd,
+ Device device,
+ CommandBufferScoped cbs,
+ TextureView src,
+ TextureView dst)
+ {
+ var dsAttachmentReference = new AttachmentReference2(StructureType.AttachmentReference2, null, 0, ImageLayout.General);
+ var dsResolveAttachmentReference = new AttachmentReference2(StructureType.AttachmentReference2, null, 1, ImageLayout.General);
+
+ var subpassDsResolve = new SubpassDescriptionDepthStencilResolve()
+ {
+ SType = StructureType.SubpassDescriptionDepthStencilResolve,
+ PDepthStencilResolveAttachment = &dsResolveAttachmentReference,
+ DepthResolveMode = ResolveModeFlags.SampleZeroBit,
+ StencilResolveMode = ResolveModeFlags.SampleZeroBit
+ };
+
+ var subpass = new SubpassDescription2()
+ {
+ SType = StructureType.SubpassDescription2,
+ PipelineBindPoint = PipelineBindPoint.Graphics,
+ PDepthStencilAttachment = &dsAttachmentReference,
+ PNext = &subpassDsResolve
+ };
+
+ AttachmentDescription2[] attachmentDescs = new AttachmentDescription2[2];
+
+ attachmentDescs[0] = new AttachmentDescription2(
+ StructureType.AttachmentDescription2,
+ null,
+ 0,
+ src.VkFormat,
+ TextureStorage.ConvertToSampleCountFlags(gd.Capabilities.SupportedSampleCounts, (uint)src.Info.Samples),
+ AttachmentLoadOp.Load,
+ AttachmentStoreOp.Store,
+ AttachmentLoadOp.Load,
+ AttachmentStoreOp.Store,
+ ImageLayout.General,
+ ImageLayout.General);
+
+ attachmentDescs[1] = new AttachmentDescription2(
+ StructureType.AttachmentDescription2,
+ null,
+ 0,
+ dst.VkFormat,
+ TextureStorage.ConvertToSampleCountFlags(gd.Capabilities.SupportedSampleCounts, (uint)dst.Info.Samples),
+ AttachmentLoadOp.Load,
+ AttachmentStoreOp.Store,
+ AttachmentLoadOp.Load,
+ AttachmentStoreOp.Store,
+ ImageLayout.General,
+ ImageLayout.General);
+
+ var subpassDependency = PipelineConverter.CreateSubpassDependency2();
+
+ fixed (AttachmentDescription2* pAttachmentDescs = attachmentDescs)
+ {
+ var renderPassCreateInfo = new RenderPassCreateInfo2()
+ {
+ SType = StructureType.RenderPassCreateInfo2,
+ PAttachments = pAttachmentDescs,
+ AttachmentCount = (uint)attachmentDescs.Length,
+ PSubpasses = &subpass,
+ SubpassCount = 1,
+ PDependencies = &subpassDependency,
+ DependencyCount = 1
+ };
+
+ gd.Api.CreateRenderPass2(device, renderPassCreateInfo, null, out var renderPass).ThrowOnError();
+
+ using var rp = new Auto<DisposableRenderPass>(new DisposableRenderPass(gd.Api, device, renderPass));
+
+ ImageView* attachments = stackalloc ImageView[2];
+
+ var srcView = src.GetImageViewForAttachment();
+ var dstView = dst.GetImageViewForAttachment();
+
+ attachments[0] = srcView.Get(cbs).Value;
+ attachments[1] = dstView.Get(cbs).Value;
+
+ var framebufferCreateInfo = new FramebufferCreateInfo()
+ {
+ SType = StructureType.FramebufferCreateInfo,
+ RenderPass = rp.Get(cbs).Value,
+ AttachmentCount = 2,
+ PAttachments = attachments,
+ Width = (uint)src.Width,
+ Height = (uint)src.Height,
+ Layers = (uint)src.Layers
+ };
+
+ gd.Api.CreateFramebuffer(device, framebufferCreateInfo, null, out var framebuffer).ThrowOnError();
+ using var fb = new Auto<DisposableFramebuffer>(new DisposableFramebuffer(gd.Api, device, framebuffer), null, new[] { srcView, dstView });
+
+ var renderArea = new Rect2D(null, new Extent2D((uint)src.Info.Width, (uint)src.Info.Height));
+ var clearValue = new ClearValue();
+
+ var renderPassBeginInfo = new RenderPassBeginInfo()
+ {
+ SType = StructureType.RenderPassBeginInfo,
+ RenderPass = rp.Get(cbs).Value,
+ Framebuffer = fb.Get(cbs).Value,
+ RenderArea = renderArea,
+ PClearValues = &clearValue,
+ ClearValueCount = 1
+ };
+
+ // The resolve operation happens at the end of the subpass, so let's just do a begin/end
+ // to resolve the depth-stencil texture.
+ // TODO: Do speculative resolve and part of the same render pass as the draw to avoid
+ // ending the current render pass?
+ gd.Api.CmdBeginRenderPass(cbs.CommandBuffer, renderPassBeginInfo, SubpassContents.Inline);
+ gd.Api.CmdEndRenderPass(cbs.CommandBuffer);
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/TextureStorage.cs b/src/Ryujinx.Graphics.Vulkan/TextureStorage.cs
new file mode 100644
index 00000000..0582e6ca
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/TextureStorage.cs
@@ -0,0 +1,530 @@
+using Ryujinx.Common;
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using System;
+using System.Collections.Generic;
+using System.Numerics;
+using VkBuffer = Silk.NET.Vulkan.Buffer;
+using VkFormat = Silk.NET.Vulkan.Format;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class TextureStorage : IDisposable
+ {
+ private const MemoryPropertyFlags DefaultImageMemoryFlags =
+ MemoryPropertyFlags.DeviceLocalBit;
+
+ private const ImageUsageFlags DefaultUsageFlags =
+ ImageUsageFlags.SampledBit |
+ ImageUsageFlags.TransferSrcBit |
+ ImageUsageFlags.TransferDstBit;
+
+ public const AccessFlags DefaultAccessMask =
+ AccessFlags.ShaderReadBit |
+ AccessFlags.ShaderWriteBit |
+ AccessFlags.ColorAttachmentReadBit |
+ AccessFlags.ColorAttachmentWriteBit |
+ AccessFlags.DepthStencilAttachmentReadBit |
+ AccessFlags.DepthStencilAttachmentWriteBit |
+ AccessFlags.TransferReadBit |
+ AccessFlags.TransferWriteBit;
+
+ private readonly VulkanRenderer _gd;
+
+ private readonly Device _device;
+
+ private TextureCreateInfo _info;
+
+ public TextureCreateInfo Info => _info;
+
+ private readonly Image _image;
+ private readonly Auto<DisposableImage> _imageAuto;
+ private readonly Auto<MemoryAllocation> _allocationAuto;
+ private Auto<MemoryAllocation> _foreignAllocationAuto;
+
+ private Dictionary<GAL.Format, TextureStorage> _aliasedStorages;
+
+ private AccessFlags _lastModificationAccess;
+ private PipelineStageFlags _lastModificationStage;
+ private AccessFlags _lastReadAccess;
+ private PipelineStageFlags _lastReadStage;
+
+ private int _viewsCount;
+ private ulong _size;
+
+ public VkFormat VkFormat { get; }
+ public float ScaleFactor { get; }
+
+ public unsafe TextureStorage(
+ VulkanRenderer gd,
+ Device device,
+ TextureCreateInfo info,
+ float scaleFactor,
+ Auto<MemoryAllocation> foreignAllocation = null)
+ {
+ _gd = gd;
+ _device = device;
+ _info = info;
+ ScaleFactor = scaleFactor;
+
+ var format = _gd.FormatCapabilities.ConvertToVkFormat(info.Format);
+ var levels = (uint)info.Levels;
+ var layers = (uint)info.GetLayers();
+ var depth = (uint)(info.Target == Target.Texture3D ? info.Depth : 1);
+
+ VkFormat = format;
+
+ var type = info.Target.Convert();
+
+ var extent = new Extent3D((uint)info.Width, (uint)info.Height, depth);
+
+ var sampleCountFlags = ConvertToSampleCountFlags(gd.Capabilities.SupportedSampleCounts, (uint)info.Samples);
+
+ var usage = GetImageUsageFromFormat(info.Format);
+
+ var flags = ImageCreateFlags.CreateMutableFormatBit;
+
+ // This flag causes mipmapped texture arrays to break on AMD GCN, so for that copy dependencies are forced for aliasing as cube.
+ bool isCube = info.Target == Target.Cubemap || info.Target == Target.CubemapArray;
+ bool cubeCompatible = gd.IsAmdGcn ? isCube : (info.Width == info.Height && layers >= 6);
+
+ if (type == ImageType.Type2D && cubeCompatible)
+ {
+ flags |= ImageCreateFlags.CreateCubeCompatibleBit;
+ }
+
+ if (type == ImageType.Type3D && !gd.Capabilities.PortabilitySubset.HasFlag(PortabilitySubsetFlags.No3DImageView))
+ {
+ flags |= ImageCreateFlags.Create2DArrayCompatibleBit;
+ }
+
+ var imageCreateInfo = new ImageCreateInfo()
+ {
+ SType = StructureType.ImageCreateInfo,
+ ImageType = type,
+ Format = format,
+ Extent = extent,
+ MipLevels = levels,
+ ArrayLayers = layers,
+ Samples = sampleCountFlags,
+ Tiling = ImageTiling.Optimal,
+ Usage = usage,
+ SharingMode = SharingMode.Exclusive,
+ InitialLayout = ImageLayout.Undefined,
+ Flags = flags
+ };
+
+ gd.Api.CreateImage(device, imageCreateInfo, null, out _image).ThrowOnError();
+
+ if (foreignAllocation == null)
+ {
+ gd.Api.GetImageMemoryRequirements(device, _image, out var requirements);
+ var allocation = gd.MemoryAllocator.AllocateDeviceMemory(requirements, DefaultImageMemoryFlags);
+
+ if (allocation.Memory.Handle == 0UL)
+ {
+ gd.Api.DestroyImage(device, _image, null);
+ throw new Exception("Image initialization failed.");
+ }
+
+ _size = requirements.Size;
+
+ gd.Api.BindImageMemory(device, _image, allocation.Memory, allocation.Offset).ThrowOnError();
+
+ _allocationAuto = new Auto<MemoryAllocation>(allocation);
+ _imageAuto = new Auto<DisposableImage>(new DisposableImage(_gd.Api, device, _image), null, _allocationAuto);
+
+ InitialTransition(ImageLayout.Undefined, ImageLayout.General);
+ }
+ else
+ {
+ _foreignAllocationAuto = foreignAllocation;
+ foreignAllocation.IncrementReferenceCount();
+ var allocation = foreignAllocation.GetUnsafe();
+
+ gd.Api.BindImageMemory(device, _image, allocation.Memory, allocation.Offset).ThrowOnError();
+
+ _imageAuto = new Auto<DisposableImage>(new DisposableImage(_gd.Api, device, _image));
+
+ InitialTransition(ImageLayout.Preinitialized, ImageLayout.General);
+ }
+ }
+
+ public TextureStorage CreateAliasedColorForDepthStorageUnsafe(GAL.Format format)
+ {
+ var colorFormat = format switch
+ {
+ GAL.Format.S8Uint => GAL.Format.R8Unorm,
+ GAL.Format.D16Unorm => GAL.Format.R16Unorm,
+ GAL.Format.S8UintD24Unorm => GAL.Format.R8G8B8A8Unorm,
+ GAL.Format.D32Float => GAL.Format.R32Float,
+ GAL.Format.D24UnormS8Uint => GAL.Format.R8G8B8A8Unorm,
+ GAL.Format.D32FloatS8Uint => GAL.Format.R32G32Float,
+ _ => throw new ArgumentException($"\"{format}\" is not a supported depth or stencil format.")
+ };
+
+ return CreateAliasedStorageUnsafe(colorFormat);
+ }
+
+ public TextureStorage CreateAliasedStorageUnsafe(GAL.Format format)
+ {
+ if (_aliasedStorages == null || !_aliasedStorages.TryGetValue(format, out var storage))
+ {
+ _aliasedStorages ??= new Dictionary<GAL.Format, TextureStorage>();
+
+ var info = NewCreateInfoWith(ref _info, format, _info.BytesPerPixel);
+
+ storage = new TextureStorage(_gd, _device, info, ScaleFactor, _allocationAuto);
+
+ _aliasedStorages.Add(format, storage);
+ }
+
+ return storage;
+ }
+
+ public static TextureCreateInfo NewCreateInfoWith(ref TextureCreateInfo info, GAL.Format format, int bytesPerPixel)
+ {
+ return NewCreateInfoWith(ref info, format, bytesPerPixel, info.Width, info.Height);
+ }
+
+ public static TextureCreateInfo NewCreateInfoWith(
+ ref TextureCreateInfo info,
+ GAL.Format format,
+ int bytesPerPixel,
+ int width,
+ int height)
+ {
+ return new TextureCreateInfo(
+ width,
+ height,
+ info.Depth,
+ info.Levels,
+ info.Samples,
+ info.BlockWidth,
+ info.BlockHeight,
+ bytesPerPixel,
+ format,
+ info.DepthStencilMode,
+ info.Target,
+ info.SwizzleR,
+ info.SwizzleG,
+ info.SwizzleB,
+ info.SwizzleA);
+ }
+
+ public Auto<DisposableImage> GetImage()
+ {
+ return _imageAuto;
+ }
+
+ public Image GetImageForViewCreation()
+ {
+ return _image;
+ }
+
+ public bool HasCommandBufferDependency(CommandBufferScoped cbs)
+ {
+ if (_foreignAllocationAuto != null)
+ {
+ return _foreignAllocationAuto.HasCommandBufferDependency(cbs);
+ }
+ else if (_allocationAuto != null)
+ {
+ return _allocationAuto.HasCommandBufferDependency(cbs);
+ }
+
+ return false;
+ }
+
+ private unsafe void InitialTransition(ImageLayout srcLayout, ImageLayout dstLayout)
+ {
+ CommandBufferScoped cbs;
+ bool useTempCbs = !_gd.CommandBufferPool.OwnedByCurrentThread;
+
+ if (useTempCbs)
+ {
+ cbs = _gd.BackgroundResources.Get().GetPool().Rent();
+ }
+ else
+ {
+ if (_gd.PipelineInternal != null)
+ {
+ cbs = _gd.PipelineInternal.GetPreloadCommandBuffer();
+ }
+ else
+ {
+ cbs = _gd.CommandBufferPool.Rent();
+ useTempCbs = true;
+ }
+ }
+
+ var aspectFlags = _info.Format.ConvertAspectFlags();
+
+ var subresourceRange = new ImageSubresourceRange(aspectFlags, 0, (uint)_info.Levels, 0, (uint)_info.GetLayers());
+
+ var barrier = new ImageMemoryBarrier()
+ {
+ SType = StructureType.ImageMemoryBarrier,
+ SrcAccessMask = 0,
+ DstAccessMask = DefaultAccessMask,
+ OldLayout = srcLayout,
+ NewLayout = dstLayout,
+ SrcQueueFamilyIndex = Vk.QueueFamilyIgnored,
+ DstQueueFamilyIndex = Vk.QueueFamilyIgnored,
+ Image = _imageAuto.Get(cbs).Value,
+ SubresourceRange = subresourceRange
+ };
+
+ _gd.Api.CmdPipelineBarrier(
+ cbs.CommandBuffer,
+ PipelineStageFlags.TopOfPipeBit,
+ PipelineStageFlags.AllCommandsBit,
+ 0,
+ 0,
+ null,
+ 0,
+ null,
+ 1,
+ barrier);
+
+ if (useTempCbs)
+ {
+ cbs.Dispose();
+ }
+ }
+
+ public static ImageUsageFlags GetImageUsageFromFormat(GAL.Format format)
+ {
+ var usage = DefaultUsageFlags;
+
+ if (format.IsDepthOrStencil())
+ {
+ usage |= ImageUsageFlags.DepthStencilAttachmentBit;
+ }
+ else if (format.IsRtColorCompatible())
+ {
+ usage |= ImageUsageFlags.ColorAttachmentBit;
+ }
+
+ if (format.IsImageCompatible())
+ {
+ usage |= ImageUsageFlags.StorageBit;
+ }
+
+ return usage;
+ }
+
+ public static SampleCountFlags ConvertToSampleCountFlags(SampleCountFlags supportedSampleCounts, uint samples)
+ {
+ if (samples == 0 || samples > (uint)SampleCountFlags.Count64Bit)
+ {
+ return SampleCountFlags.Count1Bit;
+ }
+
+ // Round up to the nearest power of two.
+ SampleCountFlags converted = (SampleCountFlags)(1u << (31 - BitOperations.LeadingZeroCount(samples)));
+
+ // Pick nearest sample count that the host actually supports.
+ while (converted != SampleCountFlags.Count1Bit && (converted & supportedSampleCounts) == 0)
+ {
+ converted = (SampleCountFlags)((uint)converted >> 1);
+ }
+
+ return converted;
+ }
+
+ public TextureView CreateView(TextureCreateInfo info, int firstLayer, int firstLevel)
+ {
+ return new TextureView(_gd, _device, info, this, firstLayer, firstLevel);
+ }
+
+ public void CopyFromOrToBuffer(
+ CommandBuffer commandBuffer,
+ VkBuffer buffer,
+ Image image,
+ int size,
+ bool to,
+ int x,
+ int y,
+ int dstLayer,
+ int dstLevel,
+ int dstLayers,
+ int dstLevels,
+ bool singleSlice,
+ ImageAspectFlags aspectFlags,
+ bool forFlush)
+ {
+ bool is3D = Info.Target == Target.Texture3D;
+ int width = Info.Width;
+ int height = Info.Height;
+ int depth = is3D && !singleSlice ? Info.Depth : 1;
+ int layer = is3D ? 0 : dstLayer;
+ int layers = dstLayers;
+ int levels = dstLevels;
+
+ int offset = 0;
+
+ for (int level = 0; level < levels; level++)
+ {
+ int mipSize = Info.GetMipSize(level);
+
+ if (forFlush)
+ {
+ mipSize = GetBufferDataLength(mipSize);
+ }
+
+ int endOffset = offset + mipSize;
+
+ if ((uint)endOffset > (uint)size)
+ {
+ break;
+ }
+
+ int rowLength = (Info.GetMipStride(level) / Info.BytesPerPixel) * Info.BlockWidth;
+
+ var sl = new ImageSubresourceLayers(
+ aspectFlags,
+ (uint)(dstLevel + level),
+ (uint)layer,
+ (uint)layers);
+
+ var extent = new Extent3D((uint)width, (uint)height, (uint)depth);
+
+ int z = is3D ? dstLayer : 0;
+
+ var region = new BufferImageCopy(
+ (ulong)offset,
+ (uint)BitUtils.AlignUp(rowLength, Info.BlockWidth),
+ (uint)BitUtils.AlignUp(height, Info.BlockHeight),
+ sl,
+ new Offset3D(x, y, z),
+ extent);
+
+ if (to)
+ {
+ _gd.Api.CmdCopyImageToBuffer(commandBuffer, image, ImageLayout.General, buffer, 1, region);
+ }
+ else
+ {
+ _gd.Api.CmdCopyBufferToImage(commandBuffer, buffer, image, ImageLayout.General, 1, region);
+ }
+
+ offset += mipSize;
+
+ width = Math.Max(1, width >> 1);
+ height = Math.Max(1, height >> 1);
+
+ if (Info.Target == Target.Texture3D)
+ {
+ depth = Math.Max(1, depth >> 1);
+ }
+ }
+ }
+
+ private int GetBufferDataLength(int length)
+ {
+ if (NeedsD24S8Conversion())
+ {
+ return length * 2;
+ }
+
+ return length;
+ }
+
+ private bool NeedsD24S8Conversion()
+ {
+ return FormatCapabilities.IsD24S8(Info.Format) && VkFormat == VkFormat.D32SfloatS8Uint;
+ }
+
+ public void SetModification(AccessFlags accessFlags, PipelineStageFlags stage)
+ {
+ _lastModificationAccess = accessFlags;
+ _lastModificationStage = stage;
+ }
+
+ public void InsertReadToWriteBarrier(CommandBufferScoped cbs, AccessFlags dstAccessFlags, PipelineStageFlags dstStageFlags)
+ {
+ if (_lastReadAccess != AccessFlags.None)
+ {
+ ImageAspectFlags aspectFlags = Info.Format.ConvertAspectFlags();
+
+ TextureView.InsertImageBarrier(
+ _gd.Api,
+ cbs.CommandBuffer,
+ _imageAuto.Get(cbs).Value,
+ _lastReadAccess,
+ dstAccessFlags,
+ _lastReadStage,
+ dstStageFlags,
+ aspectFlags,
+ 0,
+ 0,
+ _info.GetLayers(),
+ _info.Levels);
+
+ _lastReadAccess = AccessFlags.None;
+ _lastReadStage = PipelineStageFlags.None;
+ }
+ }
+
+ public void InsertWriteToReadBarrier(CommandBufferScoped cbs, AccessFlags dstAccessFlags, PipelineStageFlags dstStageFlags)
+ {
+ _lastReadAccess |= dstAccessFlags;
+ _lastReadStage |= dstStageFlags;
+
+ if (_lastModificationAccess != AccessFlags.None)
+ {
+ ImageAspectFlags aspectFlags = Info.Format.ConvertAspectFlags();
+
+ TextureView.InsertImageBarrier(
+ _gd.Api,
+ cbs.CommandBuffer,
+ _imageAuto.Get(cbs).Value,
+ _lastModificationAccess,
+ dstAccessFlags,
+ _lastModificationStage,
+ dstStageFlags,
+ aspectFlags,
+ 0,
+ 0,
+ _info.GetLayers(),
+ _info.Levels);
+
+ _lastModificationAccess = AccessFlags.None;
+ }
+ }
+
+ public void IncrementViewsCount()
+ {
+ _viewsCount++;
+ }
+
+ public void DecrementViewsCount()
+ {
+ if (--_viewsCount == 0)
+ {
+ _gd.PipelineInternal?.FlushCommandsIfWeightExceeding(_imageAuto, _size);
+
+ Dispose();
+ }
+ }
+
+ public void Dispose()
+ {
+ if (_aliasedStorages != null)
+ {
+ foreach (var storage in _aliasedStorages.Values)
+ {
+ storage.Dispose();
+ }
+
+ _aliasedStorages.Clear();
+ }
+
+ _imageAuto.Dispose();
+ _allocationAuto?.Dispose();
+ _foreignAllocationAuto?.DecrementReferenceCount();
+ _foreignAllocationAuto = null;
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/TextureView.cs b/src/Ryujinx.Graphics.Vulkan/TextureView.cs
new file mode 100644
index 00000000..cd280d5f
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/TextureView.cs
@@ -0,0 +1,885 @@
+using Ryujinx.Common.Memory;
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using System;
+using System.Collections.Generic;
+using VkBuffer = Silk.NET.Vulkan.Buffer;
+using VkFormat = Silk.NET.Vulkan.Format;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class TextureView : ITexture, IDisposable
+ {
+ private readonly VulkanRenderer _gd;
+
+ private readonly Device _device;
+
+ private readonly Auto<DisposableImageView> _imageView;
+ private readonly Auto<DisposableImageView> _imageViewIdentity;
+ private readonly Auto<DisposableImageView> _imageView2dArray;
+ private Dictionary<GAL.Format, TextureView> _selfManagedViews;
+
+ private TextureCreateInfo _info;
+
+ public TextureCreateInfo Info => _info;
+
+ public TextureStorage Storage { get; }
+
+ public int Width => Info.Width;
+ public int Height => Info.Height;
+ public int Layers => Info.GetDepthOrLayers();
+ public int FirstLayer { get; }
+ public int FirstLevel { get; }
+ public float ScaleFactor => Storage.ScaleFactor;
+ public VkFormat VkFormat { get; }
+ public bool Valid { get; private set; }
+
+ public TextureView(
+ VulkanRenderer gd,
+ Device device,
+ TextureCreateInfo info,
+ TextureStorage storage,
+ int firstLayer,
+ int firstLevel)
+ {
+ _gd = gd;
+ _device = device;
+ _info = info;
+ Storage = storage;
+ FirstLayer = firstLayer;
+ FirstLevel = firstLevel;
+
+ storage.IncrementViewsCount();
+
+ gd.Textures.Add(this);
+
+ var format = _gd.FormatCapabilities.ConvertToVkFormat(info.Format);
+ var usage = TextureStorage.GetImageUsageFromFormat(info.Format);
+ var levels = (uint)info.Levels;
+ var layers = (uint)info.GetLayers();
+
+ VkFormat = format;
+
+ var type = info.Target.ConvertView();
+
+ var swizzleR = info.SwizzleR.Convert();
+ var swizzleG = info.SwizzleG.Convert();
+ var swizzleB = info.SwizzleB.Convert();
+ var swizzleA = info.SwizzleA.Convert();
+
+ if (info.Format == GAL.Format.R5G5B5A1Unorm ||
+ info.Format == GAL.Format.R5G5B5X1Unorm ||
+ info.Format == GAL.Format.R5G6B5Unorm)
+ {
+ var temp = swizzleR;
+
+ swizzleR = swizzleB;
+ swizzleB = temp;
+ }
+ else if (VkFormat == VkFormat.R4G4B4A4UnormPack16 || info.Format == GAL.Format.A1B5G5R5Unorm)
+ {
+ var tempB = swizzleB;
+ var tempA = swizzleA;
+
+ swizzleB = swizzleG;
+ swizzleA = swizzleR;
+ swizzleR = tempA;
+ swizzleG = tempB;
+ }
+
+ var componentMapping = new ComponentMapping(swizzleR, swizzleG, swizzleB, swizzleA);
+
+ var aspectFlags = info.Format.ConvertAspectFlags(info.DepthStencilMode);
+ var aspectFlagsDepth = info.Format.ConvertAspectFlags();
+
+ var subresourceRange = new ImageSubresourceRange(aspectFlags, (uint)firstLevel, levels, (uint)firstLayer, layers);
+ var subresourceRangeDepth = new ImageSubresourceRange(aspectFlagsDepth, (uint)firstLevel, levels, (uint)firstLayer, layers);
+
+ unsafe Auto<DisposableImageView> CreateImageView(ComponentMapping cm, ImageSubresourceRange sr, ImageViewType viewType, ImageUsageFlags usageFlags)
+ {
+ var usage = new ImageViewUsageCreateInfo()
+ {
+ SType = StructureType.ImageViewUsageCreateInfo,
+ Usage = usageFlags
+ };
+
+ var imageCreateInfo = new ImageViewCreateInfo()
+ {
+ SType = StructureType.ImageViewCreateInfo,
+ Image = storage.GetImageForViewCreation(),
+ ViewType = viewType,
+ Format = format,
+ Components = cm,
+ SubresourceRange = sr,
+ PNext = &usage
+ };
+
+ gd.Api.CreateImageView(device, imageCreateInfo, null, out var imageView).ThrowOnError();
+ return new Auto<DisposableImageView>(new DisposableImageView(gd.Api, device, imageView), null, storage.GetImage());
+ }
+
+ _imageView = CreateImageView(componentMapping, subresourceRange, type, ImageUsageFlags.SampledBit);
+
+ // Framebuffer attachments and storage images requires a identity component mapping.
+ var identityComponentMapping = new ComponentMapping(
+ ComponentSwizzle.R,
+ ComponentSwizzle.G,
+ ComponentSwizzle.B,
+ ComponentSwizzle.A);
+
+ _imageViewIdentity = CreateImageView(identityComponentMapping, subresourceRangeDepth, type, usage);
+
+ // Framebuffer attachments also require 3D textures to be bound as 2D array.
+ if (info.Target == Target.Texture3D)
+ {
+ if (gd.Capabilities.PortabilitySubset.HasFlag(PortabilitySubsetFlags.No3DImageView))
+ {
+ if (levels == 1 && (info.Format.IsRtColorCompatible() || info.Format.IsDepthOrStencil()))
+ {
+ subresourceRange = new ImageSubresourceRange(aspectFlags, (uint)firstLevel, levels, (uint)firstLayer, 1);
+
+ _imageView2dArray = CreateImageView(identityComponentMapping, subresourceRange, ImageViewType.Type2D, ImageUsageFlags.ColorAttachmentBit);
+ }
+ }
+ else
+ {
+ subresourceRange = new ImageSubresourceRange(aspectFlags, (uint)firstLevel, levels, (uint)firstLayer, (uint)info.Depth);
+
+ _imageView2dArray = CreateImageView(identityComponentMapping, subresourceRange, ImageViewType.Type2DArray, usage);
+ }
+ }
+
+ Valid = true;
+ }
+
+ public Auto<DisposableImage> GetImage()
+ {
+ return Storage.GetImage();
+ }
+
+ public Auto<DisposableImageView> GetImageView()
+ {
+ return _imageView;
+ }
+
+ public Auto<DisposableImageView> GetIdentityImageView()
+ {
+ return _imageViewIdentity;
+ }
+
+ public Auto<DisposableImageView> GetImageViewForAttachment()
+ {
+ return _imageView2dArray ?? _imageViewIdentity;
+ }
+
+ public void CopyTo(ITexture destination, int firstLayer, int firstLevel)
+ {
+ var src = this;
+ var dst = (TextureView)destination;
+
+ if (!Valid || !dst.Valid)
+ {
+ return;
+ }
+
+ _gd.PipelineInternal.EndRenderPass();
+
+ var cbs = _gd.PipelineInternal.CurrentCommandBuffer;
+
+ var srcImage = src.GetImage().Get(cbs).Value;
+ var dstImage = dst.GetImage().Get(cbs).Value;
+
+ if (!dst.Info.Target.IsMultisample() && Info.Target.IsMultisample())
+ {
+ int layers = Math.Min(Info.GetLayers(), dst.Info.GetLayers() - firstLayer);
+ _gd.HelperShader.CopyMSToNonMS(_gd, cbs, src, dst, 0, firstLayer, layers);
+ }
+ else if (dst.Info.Target.IsMultisample() && !Info.Target.IsMultisample())
+ {
+ int layers = Math.Min(Info.GetLayers(), dst.Info.GetLayers() - firstLayer);
+ _gd.HelperShader.CopyNonMSToMS(_gd, cbs, src, dst, 0, firstLayer, layers);
+ }
+ else if (dst.Info.BytesPerPixel != Info.BytesPerPixel)
+ {
+ int layers = Math.Min(Info.GetLayers(), dst.Info.GetLayers() - firstLayer);
+ int levels = Math.Min(Info.Levels, dst.Info.Levels - firstLevel);
+ _gd.HelperShader.CopyIncompatibleFormats(_gd, cbs, src, dst, 0, firstLayer, 0, firstLevel, layers, levels);
+ }
+ else
+ {
+ TextureCopy.Copy(
+ _gd.Api,
+ cbs.CommandBuffer,
+ srcImage,
+ dstImage,
+ src.Info,
+ dst.Info,
+ src.FirstLayer,
+ dst.FirstLayer,
+ src.FirstLevel,
+ dst.FirstLevel,
+ 0,
+ firstLayer,
+ 0,
+ firstLevel);
+ }
+ }
+
+ public void CopyTo(ITexture destination, int srcLayer, int dstLayer, int srcLevel, int dstLevel)
+ {
+ var src = this;
+ var dst = (TextureView)destination;
+
+ if (!Valid || !dst.Valid)
+ {
+ return;
+ }
+
+ _gd.PipelineInternal.EndRenderPass();
+
+ var cbs = _gd.PipelineInternal.CurrentCommandBuffer;
+
+ var srcImage = src.GetImage().Get(cbs).Value;
+ var dstImage = dst.GetImage().Get(cbs).Value;
+
+ if (!dst.Info.Target.IsMultisample() && Info.Target.IsMultisample())
+ {
+ _gd.HelperShader.CopyMSToNonMS(_gd, cbs, src, dst, srcLayer, dstLayer, 1);
+ }
+ else if (dst.Info.Target.IsMultisample() && !Info.Target.IsMultisample())
+ {
+ _gd.HelperShader.CopyNonMSToMS(_gd, cbs, src, dst, srcLayer, dstLayer, 1);
+ }
+ else if (dst.Info.BytesPerPixel != Info.BytesPerPixel)
+ {
+ _gd.HelperShader.CopyIncompatibleFormats(_gd, cbs, src, dst, srcLayer, dstLayer, srcLevel, dstLevel, 1, 1);
+ }
+ else
+ {
+ TextureCopy.Copy(
+ _gd.Api,
+ cbs.CommandBuffer,
+ srcImage,
+ dstImage,
+ src.Info,
+ dst.Info,
+ src.FirstLayer,
+ dst.FirstLayer,
+ src.FirstLevel,
+ dst.FirstLevel,
+ srcLayer,
+ dstLayer,
+ srcLevel,
+ dstLevel,
+ 1,
+ 1);
+ }
+ }
+
+ public void CopyTo(ITexture destination, Extents2D srcRegion, Extents2D dstRegion, bool linearFilter)
+ {
+ var dst = (TextureView)destination;
+
+ if (_gd.CommandBufferPool.OwnedByCurrentThread)
+ {
+ _gd.PipelineInternal.EndRenderPass();
+
+ var cbs = _gd.PipelineInternal.CurrentCommandBuffer;
+
+ CopyToImpl(cbs, dst, srcRegion, dstRegion, linearFilter);
+ }
+ else
+ {
+ var cbp = _gd.BackgroundResources.Get().GetPool();
+
+ using var cbs = cbp.Rent();
+
+ CopyToImpl(cbs, dst, srcRegion, dstRegion, linearFilter);
+ }
+ }
+
+ private void CopyToImpl(CommandBufferScoped cbs, TextureView dst, Extents2D srcRegion, Extents2D dstRegion, bool linearFilter)
+ {
+ var src = this;
+
+ var srcFormat = GetCompatibleGalFormat(src.Info.Format);
+ var dstFormat = GetCompatibleGalFormat(dst.Info.Format);
+
+ bool srcUsesStorageFormat = src.VkFormat == src.Storage.VkFormat;
+ bool dstUsesStorageFormat = dst.VkFormat == dst.Storage.VkFormat;
+
+ int layers = Math.Min(dst.Info.GetDepthOrLayers(), src.Info.GetDepthOrLayers());
+ int levels = Math.Min(dst.Info.Levels, src.Info.Levels);
+
+ if (srcUsesStorageFormat && dstUsesStorageFormat)
+ {
+ if ((srcRegion.X1 | dstRegion.X1) == 0 &&
+ (srcRegion.Y1 | dstRegion.Y1) == 0 &&
+ srcRegion.X2 == src.Width &&
+ srcRegion.Y2 == src.Height &&
+ dstRegion.X2 == dst.Width &&
+ dstRegion.Y2 == dst.Height &&
+ src.Width == dst.Width &&
+ src.Height == dst.Height &&
+ src.VkFormat == dst.VkFormat)
+ {
+ if (src.Info.Samples > 1 && src.Info.Samples != dst.Info.Samples && src.Info.Format.IsDepthOrStencil())
+ {
+ // CmdResolveImage does not support depth-stencil resolve, so we need to use an alternative path
+ // for those textures.
+ TextureCopy.ResolveDepthStencil(_gd, _device, cbs, src, dst);
+ }
+ else
+ {
+ TextureCopy.Copy(
+ _gd.Api,
+ cbs.CommandBuffer,
+ src.GetImage().Get(cbs).Value,
+ dst.GetImage().Get(cbs).Value,
+ src.Info,
+ dst.Info,
+ src.FirstLayer,
+ dst.FirstLayer,
+ src.FirstLevel,
+ dst.FirstLevel,
+ 0,
+ 0,
+ 0,
+ 0,
+ layers,
+ levels);
+ }
+
+ return;
+ }
+ else if (_gd.FormatCapabilities.OptimalFormatSupports(FormatFeatureFlags.BlitSrcBit, srcFormat) &&
+ _gd.FormatCapabilities.OptimalFormatSupports(FormatFeatureFlags.BlitDstBit, dstFormat))
+ {
+ TextureCopy.Blit(
+ _gd.Api,
+ cbs.CommandBuffer,
+ src.GetImage().Get(cbs).Value,
+ dst.GetImage().Get(cbs).Value,
+ src.Info,
+ dst.Info,
+ srcRegion,
+ dstRegion,
+ src.FirstLayer,
+ dst.FirstLayer,
+ src.FirstLevel,
+ dst.FirstLevel,
+ layers,
+ levels,
+ linearFilter);
+
+ return;
+ }
+ }
+
+ bool isDepthOrStencil = dst.Info.Format.IsDepthOrStencil();
+
+ if (VulkanConfiguration.UseSlowSafeBlitOnAmd && (_gd.Vendor == Vendor.Amd || _gd.IsMoltenVk))
+ {
+ _gd.HelperShader.Blit(
+ _gd,
+ src,
+ dst,
+ srcRegion,
+ dstRegion,
+ layers,
+ levels,
+ isDepthOrStencil,
+ linearFilter);
+
+ return;
+ }
+
+ Auto<DisposableImage> srcImage;
+ Auto<DisposableImage> dstImage;
+
+ if (isDepthOrStencil)
+ {
+ srcImage = src.Storage.CreateAliasedColorForDepthStorageUnsafe(srcFormat).GetImage();
+ dstImage = dst.Storage.CreateAliasedColorForDepthStorageUnsafe(dstFormat).GetImage();
+ }
+ else
+ {
+ srcImage = src.Storage.CreateAliasedStorageUnsafe(srcFormat).GetImage();
+ dstImage = dst.Storage.CreateAliasedStorageUnsafe(dstFormat).GetImage();
+ }
+
+ TextureCopy.Blit(
+ _gd.Api,
+ cbs.CommandBuffer,
+ srcImage.Get(cbs).Value,
+ dstImage.Get(cbs).Value,
+ src.Info,
+ dst.Info,
+ srcRegion,
+ dstRegion,
+ src.FirstLayer,
+ dst.FirstLayer,
+ src.FirstLevel,
+ dst.FirstLevel,
+ layers,
+ levels,
+ linearFilter,
+ ImageAspectFlags.ColorBit,
+ ImageAspectFlags.ColorBit);
+ }
+
+ public static unsafe void InsertImageBarrier(
+ Vk api,
+ CommandBuffer commandBuffer,
+ Image image,
+ AccessFlags srcAccessMask,
+ AccessFlags dstAccessMask,
+ PipelineStageFlags srcStageMask,
+ PipelineStageFlags dstStageMask,
+ ImageAspectFlags aspectFlags,
+ int firstLayer,
+ int firstLevel,
+ int layers,
+ int levels)
+ {
+ ImageMemoryBarrier memoryBarrier = new ImageMemoryBarrier()
+ {
+ SType = StructureType.ImageMemoryBarrier,
+ SrcAccessMask = srcAccessMask,
+ DstAccessMask = dstAccessMask,
+ SrcQueueFamilyIndex = Vk.QueueFamilyIgnored,
+ DstQueueFamilyIndex = Vk.QueueFamilyIgnored,
+ Image = image,
+ OldLayout = ImageLayout.General,
+ NewLayout = ImageLayout.General,
+ SubresourceRange = new ImageSubresourceRange(aspectFlags, (uint)firstLevel, (uint)levels, (uint)firstLayer, (uint)layers)
+ };
+
+ api.CmdPipelineBarrier(
+ commandBuffer,
+ srcStageMask,
+ dstStageMask,
+ 0,
+ 0,
+ null,
+ 0,
+ null,
+ 1,
+ memoryBarrier);
+ }
+
+ public TextureView GetView(GAL.Format format)
+ {
+ if (format == Info.Format)
+ {
+ return this;
+ }
+
+ if (_selfManagedViews != null && _selfManagedViews.TryGetValue(format, out var view))
+ {
+ return view;
+ }
+
+ view = CreateViewImpl(new TextureCreateInfo(
+ Info.Width,
+ Info.Height,
+ Info.Depth,
+ Info.Levels,
+ Info.Samples,
+ Info.BlockWidth,
+ Info.BlockHeight,
+ Info.BytesPerPixel,
+ format,
+ Info.DepthStencilMode,
+ Info.Target,
+ Info.SwizzleR,
+ Info.SwizzleG,
+ Info.SwizzleB,
+ Info.SwizzleA), 0, 0);
+
+ (_selfManagedViews ??= new Dictionary<GAL.Format, TextureView>()).Add(format, view);
+
+ return view;
+ }
+
+ public ITexture CreateView(TextureCreateInfo info, int firstLayer, int firstLevel)
+ {
+ return CreateViewImpl(info, firstLayer, firstLevel);
+ }
+
+ public TextureView CreateViewImpl(TextureCreateInfo info, int firstLayer, int firstLevel)
+ {
+ return new TextureView(_gd, _device, info, Storage, FirstLayer + firstLayer, FirstLevel + firstLevel);
+ }
+
+ public byte[] GetData(int x, int y, int width, int height)
+ {
+ int size = width * height * Info.BytesPerPixel;
+ using var bufferHolder = _gd.BufferManager.Create(_gd, size);
+
+ using (var cbs = _gd.CommandBufferPool.Rent())
+ {
+ var buffer = bufferHolder.GetBuffer(cbs.CommandBuffer).Get(cbs).Value;
+ var image = GetImage().Get(cbs).Value;
+
+ CopyFromOrToBuffer(cbs.CommandBuffer, buffer, image, size, true, 0, 0, x, y, width, height);
+ }
+
+ bufferHolder.WaitForFences();
+ byte[] bitmap = new byte[size];
+ GetDataFromBuffer(bufferHolder.GetDataStorage(0, size), size, Span<byte>.Empty).CopyTo(bitmap);
+ return bitmap;
+ }
+
+ public PinnedSpan<byte> GetData()
+ {
+ BackgroundResource resources = _gd.BackgroundResources.Get();
+
+ if (_gd.CommandBufferPool.OwnedByCurrentThread)
+ {
+ _gd.FlushAllCommands();
+
+ return PinnedSpan<byte>.UnsafeFromSpan(GetData(_gd.CommandBufferPool, resources.GetFlushBuffer()));
+ }
+ else
+ {
+ return PinnedSpan<byte>.UnsafeFromSpan(GetData(resources.GetPool(), resources.GetFlushBuffer()));
+ }
+ }
+
+ public PinnedSpan<byte> GetData(int layer, int level)
+ {
+ BackgroundResource resources = _gd.BackgroundResources.Get();
+
+ if (_gd.CommandBufferPool.OwnedByCurrentThread)
+ {
+ _gd.FlushAllCommands();
+
+ return PinnedSpan<byte>.UnsafeFromSpan(GetData(_gd.CommandBufferPool, resources.GetFlushBuffer(), layer, level));
+ }
+ else
+ {
+ return PinnedSpan<byte>.UnsafeFromSpan(GetData(resources.GetPool(), resources.GetFlushBuffer(), layer, level));
+ }
+ }
+
+ private ReadOnlySpan<byte> GetData(CommandBufferPool cbp, PersistentFlushBuffer flushBuffer)
+ {
+ int size = 0;
+
+ for (int level = 0; level < Info.Levels; level++)
+ {
+ size += Info.GetMipSize(level);
+ }
+
+ size = GetBufferDataLength(size);
+
+ Span<byte> result = flushBuffer.GetTextureData(cbp, this, size);
+ return GetDataFromBuffer(result, size, result);
+ }
+
+ private ReadOnlySpan<byte> GetData(CommandBufferPool cbp, PersistentFlushBuffer flushBuffer, int layer, int level)
+ {
+ int size = GetBufferDataLength(Info.GetMipSize(level));
+
+ Span<byte> result = flushBuffer.GetTextureData(cbp, this, size, layer, level);
+ return GetDataFromBuffer(result, size, result);
+ }
+
+ public void SetData(SpanOrArray<byte> data)
+ {
+ SetData(data, 0, 0, Info.GetLayers(), Info.Levels, singleSlice: false);
+ }
+
+ public void SetData(SpanOrArray<byte> data, int layer, int level)
+ {
+ SetData(data, layer, level, 1, 1, singleSlice: true);
+ }
+
+ public void SetData(SpanOrArray<byte> data, int layer, int level, Rectangle<int> region)
+ {
+ SetData(data, layer, level, 1, 1, singleSlice: true, region);
+ }
+
+ private void SetData(ReadOnlySpan<byte> data, int layer, int level, int layers, int levels, bool singleSlice, Rectangle<int>? region = null)
+ {
+ int bufferDataLength = GetBufferDataLength(data.Length);
+
+ using var bufferHolder = _gd.BufferManager.Create(_gd, bufferDataLength);
+
+ Auto<DisposableImage> imageAuto = GetImage();
+
+ // Load texture data inline if the texture has been used on the current command buffer.
+
+ bool loadInline = Storage.HasCommandBufferDependency(_gd.PipelineInternal.CurrentCommandBuffer);
+
+ var cbs = loadInline ? _gd.PipelineInternal.CurrentCommandBuffer : _gd.PipelineInternal.GetPreloadCommandBuffer();
+
+ if (loadInline)
+ {
+ _gd.PipelineInternal.EndRenderPass();
+ }
+
+ CopyDataToBuffer(bufferHolder.GetDataStorage(0, bufferDataLength), data);
+
+ var buffer = bufferHolder.GetBuffer(cbs.CommandBuffer).Get(cbs).Value;
+ var image = imageAuto.Get(cbs).Value;
+
+ if (region.HasValue)
+ {
+ CopyFromOrToBuffer(
+ cbs.CommandBuffer,
+ buffer,
+ image,
+ bufferDataLength,
+ false,
+ layer,
+ level,
+ region.Value.X,
+ region.Value.Y,
+ region.Value.Width,
+ region.Value.Height);
+ }
+ else
+ {
+ CopyFromOrToBuffer(cbs.CommandBuffer, buffer, image, bufferDataLength, false, layer, level, layers, levels, singleSlice);
+ }
+ }
+
+ private int GetBufferDataLength(int length)
+ {
+ if (NeedsD24S8Conversion())
+ {
+ return length * 2;
+ }
+
+ return length;
+ }
+
+ private GAL.Format GetCompatibleGalFormat(GAL.Format format)
+ {
+ if (NeedsD24S8Conversion())
+ {
+ return GAL.Format.D32FloatS8Uint;
+ }
+
+ return format;
+ }
+
+ private void CopyDataToBuffer(Span<byte> storage, ReadOnlySpan<byte> input)
+ {
+ if (NeedsD24S8Conversion())
+ {
+ FormatConverter.ConvertD24S8ToD32FS8(storage, input);
+ return;
+ }
+
+ input.CopyTo(storage);
+ }
+
+ private ReadOnlySpan<byte> GetDataFromBuffer(ReadOnlySpan<byte> storage, int size, Span<byte> output)
+ {
+ if (NeedsD24S8Conversion())
+ {
+ if (output.IsEmpty)
+ {
+ output = new byte[GetBufferDataLength(size)];
+ }
+
+ FormatConverter.ConvertD32FS8ToD24S8(output, storage);
+ return output;
+ }
+
+ return storage;
+ }
+
+ private bool NeedsD24S8Conversion()
+ {
+ return FormatCapabilities.IsD24S8(Info.Format) && VkFormat == VkFormat.D32SfloatS8Uint;
+ }
+
+ public void CopyFromOrToBuffer(
+ CommandBuffer commandBuffer,
+ VkBuffer buffer,
+ Image image,
+ int size,
+ bool to,
+ int dstLayer,
+ int dstLevel,
+ int dstLayers,
+ int dstLevels,
+ bool singleSlice)
+ {
+ bool is3D = Info.Target == Target.Texture3D;
+ int width = Math.Max(1, Info.Width >> dstLevel);
+ int height = Math.Max(1, Info.Height >> dstLevel);
+ int depth = is3D && !singleSlice ? Math.Max(1, Info.Depth >> dstLevel) : 1;
+ int layer = is3D ? 0 : dstLayer;
+ int layers = dstLayers;
+ int levels = dstLevels;
+
+ int offset = 0;
+
+ for (int level = 0; level < levels; level++)
+ {
+ int mipSize = GetBufferDataLength(Info.GetMipSize2D(dstLevel + level) * dstLayers);
+
+ int endOffset = offset + mipSize;
+
+ if ((uint)endOffset > (uint)size)
+ {
+ break;
+ }
+
+ int rowLength = (Info.GetMipStride(dstLevel + level) / Info.BytesPerPixel) * Info.BlockWidth;
+
+ var aspectFlags = Info.Format.ConvertAspectFlags();
+
+ if (aspectFlags == (ImageAspectFlags.DepthBit | ImageAspectFlags.StencilBit))
+ {
+ aspectFlags = ImageAspectFlags.DepthBit;
+ }
+
+ var sl = new ImageSubresourceLayers(
+ aspectFlags,
+ (uint)(FirstLevel + dstLevel + level),
+ (uint)(FirstLayer + layer),
+ (uint)layers);
+
+ var extent = new Extent3D((uint)width, (uint)height, (uint)depth);
+
+ int z = is3D ? dstLayer : 0;
+
+ var region = new BufferImageCopy(
+ (ulong)offset,
+ (uint)AlignUpNpot(rowLength, Info.BlockWidth),
+ (uint)AlignUpNpot(height, Info.BlockHeight),
+ sl,
+ new Offset3D(0, 0, z),
+ extent);
+
+ if (to)
+ {
+ _gd.Api.CmdCopyImageToBuffer(commandBuffer, image, ImageLayout.General, buffer, 1, region);
+ }
+ else
+ {
+ _gd.Api.CmdCopyBufferToImage(commandBuffer, buffer, image, ImageLayout.General, 1, region);
+ }
+
+ offset += mipSize;
+
+ width = Math.Max(1, width >> 1);
+ height = Math.Max(1, height >> 1);
+
+ if (Info.Target == Target.Texture3D)
+ {
+ depth = Math.Max(1, depth >> 1);
+ }
+ }
+ }
+
+ private void CopyFromOrToBuffer(
+ CommandBuffer commandBuffer,
+ VkBuffer buffer,
+ Image image,
+ int size,
+ bool to,
+ int dstLayer,
+ int dstLevel,
+ int x,
+ int y,
+ int width,
+ int height)
+ {
+ var aspectFlags = Info.Format.ConvertAspectFlags();
+
+ if (aspectFlags == (ImageAspectFlags.DepthBit | ImageAspectFlags.StencilBit))
+ {
+ aspectFlags = ImageAspectFlags.DepthBit;
+ }
+
+ var sl = new ImageSubresourceLayers(aspectFlags, (uint)(FirstLevel + dstLevel), (uint)(FirstLayer + dstLayer), 1);
+
+ var extent = new Extent3D((uint)width, (uint)height, 1);
+
+ int rowLengthAlignment = Info.BlockWidth;
+
+ // We expect all data being written into the texture to have a stride aligned by 4.
+ if (!to && Info.BytesPerPixel < 4)
+ {
+ rowLengthAlignment = 4 / Info.BytesPerPixel;
+ }
+
+ var region = new BufferImageCopy(
+ 0,
+ (uint)AlignUpNpot(width, rowLengthAlignment),
+ (uint)AlignUpNpot(height, Info.BlockHeight),
+ sl,
+ new Offset3D(x, y, 0),
+ extent);
+
+ if (to)
+ {
+ _gd.Api.CmdCopyImageToBuffer(commandBuffer, image, ImageLayout.General, buffer, 1, region);
+ }
+ else
+ {
+ _gd.Api.CmdCopyBufferToImage(commandBuffer, buffer, image, ImageLayout.General, 1, region);
+ }
+ }
+
+ private static int AlignUpNpot(int size, int alignment)
+ {
+ int remainder = size % alignment;
+ if (remainder == 0)
+ {
+ return size;
+ }
+
+ return size + (alignment - remainder);
+ }
+
+ public void SetStorage(BufferRange buffer)
+ {
+ throw new NotImplementedException();
+ }
+
+ protected virtual void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ Valid = false;
+
+ if (_gd.Textures.Remove(this))
+ {
+ _imageView.Dispose();
+ _imageViewIdentity.Dispose();
+ _imageView2dArray?.Dispose();
+
+ Storage.DecrementViewsCount();
+ }
+ }
+ }
+
+ public void Dispose()
+ {
+ if (_selfManagedViews != null)
+ {
+ foreach (var view in _selfManagedViews.Values)
+ {
+ view.Dispose();
+ }
+
+ _selfManagedViews = null;
+ }
+
+ Dispose(true);
+ }
+
+ public void Release()
+ {
+ Dispose();
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/Vendor.cs b/src/Ryujinx.Graphics.Vulkan/Vendor.cs
new file mode 100644
index 00000000..5e0290c0
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Vendor.cs
@@ -0,0 +1,62 @@
+using System.Text.RegularExpressions;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ enum Vendor
+ {
+ Amd,
+ ImgTec,
+ Intel,
+ Nvidia,
+ ARM,
+ Broadcom,
+ Qualcomm,
+ Apple,
+ Unknown
+ }
+
+ static partial class VendorUtils
+ {
+ [GeneratedRegex("Radeon (((HD|R(5|7|9|X)) )?((M?[2-6]\\d{2}(\\D|$))|([7-8]\\d{3}(\\D|$))|Fury|Nano))|(Pro Duo)")]
+ public static partial Regex AmdGcnRegex();
+
+ public static Vendor FromId(uint id)
+ {
+ return id switch
+ {
+ 0x1002 => Vendor.Amd,
+ 0x1010 => Vendor.ImgTec,
+ 0x106B => Vendor.Apple,
+ 0x10DE => Vendor.Nvidia,
+ 0x13B5 => Vendor.ARM,
+ 0x14E4 => Vendor.Broadcom,
+ 0x8086 => Vendor.Intel,
+ 0x5143 => Vendor.Qualcomm,
+ _ => Vendor.Unknown
+ };
+ }
+
+ public static string GetNameFromId(uint id)
+ {
+ return id switch
+ {
+ 0x1002 => "AMD",
+ 0x1010 => "ImgTec",
+ 0x106B => "Apple",
+ 0x10DE => "NVIDIA",
+ 0x13B5 => "ARM",
+ 0x14E4 => "Broadcom",
+ 0x1AE0 => "Google",
+ 0x5143 => "Qualcomm",
+ 0x8086 => "Intel",
+ 0x10001 => "Vivante",
+ 0x10002 => "VeriSilicon",
+ 0x10003 => "Kazan",
+ 0x10004 => "Codeplay Software Ltd.",
+ 0x10005 => "Mesa",
+ 0x10006 => "PoCL",
+ _ => $"0x{id:X}"
+ };
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/VertexBufferState.cs b/src/Ryujinx.Graphics.Vulkan/VertexBufferState.cs
new file mode 100644
index 00000000..c4856019
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/VertexBufferState.cs
@@ -0,0 +1,153 @@
+using BufferHandle = Ryujinx.Graphics.GAL.BufferHandle;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ internal struct VertexBufferState
+ {
+ public static VertexBufferState Null => new VertexBufferState(null, 0, 0, 0);
+
+ private readonly int _offset;
+ private readonly int _size;
+ private readonly int _stride;
+
+ private readonly BufferHandle _handle;
+ private Auto<DisposableBuffer> _buffer;
+
+ internal readonly int DescriptorIndex;
+ internal int AttributeScalarAlignment;
+
+ public VertexBufferState(Auto<DisposableBuffer> buffer, int descriptorIndex, int offset, int size, int stride = 0)
+ {
+ _buffer = buffer;
+ _handle = BufferHandle.Null;
+
+ _offset = offset;
+ _size = size;
+ _stride = stride;
+
+ DescriptorIndex = descriptorIndex;
+ AttributeScalarAlignment = 1;
+
+ buffer?.IncrementReferenceCount();
+ }
+
+ public VertexBufferState(BufferHandle handle, int descriptorIndex, int offset, int size, int stride = 0)
+ {
+ // This buffer state may be rewritten at bind time, so it must be retrieved on bind.
+
+ _buffer = null;
+ _handle = handle;
+
+ _offset = offset;
+ _size = size;
+ _stride = stride;
+
+ DescriptorIndex = descriptorIndex;
+ AttributeScalarAlignment = 1;
+ }
+
+ public void BindVertexBuffer(VulkanRenderer gd, CommandBufferScoped cbs, uint binding, ref PipelineState state)
+ {
+ var autoBuffer = _buffer;
+
+ if (_handle != BufferHandle.Null)
+ {
+ // May need to restride the vertex buffer.
+
+ if (gd.NeedsVertexBufferAlignment(AttributeScalarAlignment, out int alignment) && (_stride % alignment) != 0)
+ {
+ autoBuffer = gd.BufferManager.GetAlignedVertexBuffer(cbs, _handle, _offset, _size, _stride, alignment);
+
+ if (autoBuffer != null)
+ {
+ int stride = (_stride + (alignment - 1)) & -alignment;
+ int newSize = (_size / _stride) * stride;
+
+ var buffer = autoBuffer.Get(cbs, 0, newSize).Value;
+
+ if (gd.Capabilities.SupportsExtendedDynamicState)
+ {
+ gd.ExtendedDynamicStateApi.CmdBindVertexBuffers2(
+ cbs.CommandBuffer,
+ binding,
+ 1,
+ buffer,
+ 0,
+ (ulong)newSize,
+ (ulong)stride);
+ }
+ else
+ {
+ gd.Api.CmdBindVertexBuffers(cbs.CommandBuffer, binding, 1, buffer, 0);
+ }
+
+ _buffer = autoBuffer;
+
+ state.Internal.VertexBindingDescriptions[DescriptorIndex].Stride = (uint)stride;
+ }
+
+ return;
+ }
+ else
+ {
+ autoBuffer = gd.BufferManager.GetBuffer(cbs.CommandBuffer, _handle, false, out int size);
+
+ // The original stride must be reapplied in case it was rewritten.
+ state.Internal.VertexBindingDescriptions[DescriptorIndex].Stride = (uint)_stride;
+
+ if (_offset >= size)
+ {
+ autoBuffer = null;
+ }
+ }
+ }
+
+ if (autoBuffer != null)
+ {
+ var buffer = autoBuffer.Get(cbs, _offset, _size).Value;
+
+ if (gd.Capabilities.SupportsExtendedDynamicState)
+ {
+ gd.ExtendedDynamicStateApi.CmdBindVertexBuffers2(
+ cbs.CommandBuffer,
+ binding,
+ 1,
+ buffer,
+ (ulong)_offset,
+ (ulong)_size,
+ (ulong)_stride);
+ }
+ else
+ {
+ gd.Api.CmdBindVertexBuffers(cbs.CommandBuffer, binding, 1, buffer, (ulong)_offset);
+ }
+ }
+ }
+
+ public bool BoundEquals(Auto<DisposableBuffer> buffer)
+ {
+ return _buffer == buffer;
+ }
+
+ public void Swap(Auto<DisposableBuffer> from, Auto<DisposableBuffer> to)
+ {
+ if (_buffer == from)
+ {
+ _buffer.DecrementReferenceCount();
+ to.IncrementReferenceCount();
+
+ _buffer = to;
+ }
+ }
+
+ public void Dispose()
+ {
+ // Only dispose if this buffer is not refetched on each bind.
+
+ if (_handle == BufferHandle.Null)
+ {
+ _buffer?.DecrementReferenceCount();
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/VulkanConfiguration.cs b/src/Ryujinx.Graphics.Vulkan/VulkanConfiguration.cs
new file mode 100644
index 00000000..752d4f7c
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/VulkanConfiguration.cs
@@ -0,0 +1,12 @@
+namespace Ryujinx.Graphics.Vulkan
+{
+ static class VulkanConfiguration
+ {
+ public const bool UseFastBufferUpdates = true;
+ public const bool UseSlowSafeBlitOnAmd = true;
+ public const bool UsePushDescriptors = false;
+
+ public const bool ForceD24S8Unsupported = false;
+ public const bool ForceRGB16IntFloatUnsupported = false;
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/VulkanDebugMessenger.cs b/src/Ryujinx.Graphics.Vulkan/VulkanDebugMessenger.cs
new file mode 100644
index 00000000..7e39a251
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/VulkanDebugMessenger.cs
@@ -0,0 +1,153 @@
+using Ryujinx.Common.Configuration;
+using Ryujinx.Common.Logging;
+using Ryujinx.Common.Utilities;
+using Silk.NET.Vulkan;
+using Silk.NET.Vulkan.Extensions.EXT;
+using System;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class VulkanDebugMessenger : IDisposable
+ {
+ private static string[] _excludedMessages = new string[]
+ {
+ // NOTE: Done on purpose right now.
+ "UNASSIGNED-CoreValidation-Shader-OutputNotConsumed",
+ // TODO: Figure out if fixable
+ "VUID-vkCmdDrawIndexed-None-04584",
+ // TODO: Might be worth looking into making this happy to possibly optimize copies.
+ "UNASSIGNED-CoreValidation-DrawState-InvalidImageLayout",
+ // TODO: Fix this, it's causing too much noise right now.
+ "VUID-VkSubpassDependency-srcSubpass-00867"
+ };
+
+ private readonly Vk _api;
+ private readonly Instance _instance;
+ private readonly GraphicsDebugLevel _logLevel;
+ private readonly ExtDebugUtils _debugUtils;
+ private readonly DebugUtilsMessengerEXT? _debugUtilsMessenger;
+ private bool _disposed;
+
+ public VulkanDebugMessenger(Vk api, Instance instance, GraphicsDebugLevel logLevel)
+ {
+ _api = api;
+ _instance = instance;
+ _logLevel = logLevel;
+
+ _api.TryGetInstanceExtension(instance, out _debugUtils);
+
+ Result result = TryInitialize(out _debugUtilsMessenger);
+
+ if (result != Result.Success)
+ {
+ Logger.Error?.Print(LogClass.Gpu, $"Vulkan debug messenger initialization failed with error {result}");
+ }
+ }
+
+ private Result TryInitialize(out DebugUtilsMessengerEXT? debugUtilsMessengerHandle)
+ {
+ debugUtilsMessengerHandle = null;
+
+ if (_debugUtils != null && _logLevel != GraphicsDebugLevel.None)
+ {
+ var messageType = _logLevel switch
+ {
+ GraphicsDebugLevel.Error => DebugUtilsMessageTypeFlagsEXT.ValidationBitExt,
+ GraphicsDebugLevel.Slowdowns => DebugUtilsMessageTypeFlagsEXT.ValidationBitExt |
+ DebugUtilsMessageTypeFlagsEXT.PerformanceBitExt,
+ GraphicsDebugLevel.All => DebugUtilsMessageTypeFlagsEXT.GeneralBitExt |
+ DebugUtilsMessageTypeFlagsEXT.ValidationBitExt |
+ DebugUtilsMessageTypeFlagsEXT.PerformanceBitExt,
+ _ => throw new ArgumentException($"Invalid log level \"{_logLevel}\".")
+ };
+
+ var messageSeverity = _logLevel switch
+ {
+ GraphicsDebugLevel.Error => DebugUtilsMessageSeverityFlagsEXT.ErrorBitExt,
+ GraphicsDebugLevel.Slowdowns => DebugUtilsMessageSeverityFlagsEXT.ErrorBitExt |
+ DebugUtilsMessageSeverityFlagsEXT.WarningBitExt,
+ GraphicsDebugLevel.All => DebugUtilsMessageSeverityFlagsEXT.InfoBitExt |
+ DebugUtilsMessageSeverityFlagsEXT.WarningBitExt |
+ DebugUtilsMessageSeverityFlagsEXT.VerboseBitExt |
+ DebugUtilsMessageSeverityFlagsEXT.ErrorBitExt,
+ _ => throw new ArgumentException($"Invalid log level \"{_logLevel}\".")
+ };
+
+ var debugUtilsMessengerCreateInfo = new DebugUtilsMessengerCreateInfoEXT()
+ {
+ SType = StructureType.DebugUtilsMessengerCreateInfoExt,
+ MessageType = messageType,
+ MessageSeverity = messageSeverity
+ };
+
+ unsafe
+ {
+ debugUtilsMessengerCreateInfo.PfnUserCallback = new PfnDebugUtilsMessengerCallbackEXT(UserCallback);
+ }
+
+ DebugUtilsMessengerEXT messengerHandle = default;
+
+ Result result = _debugUtils.CreateDebugUtilsMessenger(_instance, SpanHelpers.AsReadOnlySpan(ref debugUtilsMessengerCreateInfo), ReadOnlySpan<AllocationCallbacks>.Empty, SpanHelpers.AsSpan(ref messengerHandle));
+
+ if (result == Result.Success)
+ {
+ debugUtilsMessengerHandle = messengerHandle;
+ }
+
+ return result;
+ }
+
+ return Result.Success;
+ }
+
+ private unsafe static uint UserCallback(
+ DebugUtilsMessageSeverityFlagsEXT messageSeverity,
+ DebugUtilsMessageTypeFlagsEXT messageTypes,
+ DebugUtilsMessengerCallbackDataEXT* pCallbackData,
+ void* pUserData)
+ {
+ var msg = Marshal.PtrToStringAnsi((IntPtr)pCallbackData->PMessage);
+
+ foreach (string excludedMessagePart in _excludedMessages)
+ {
+ if (msg.Contains(excludedMessagePart))
+ {
+ return 0;
+ }
+ }
+
+ if (messageSeverity.HasFlag(DebugUtilsMessageSeverityFlagsEXT.ErrorBitExt))
+ {
+ Logger.Error?.Print(LogClass.Gpu, msg);
+ }
+ else if (messageSeverity.HasFlag(DebugUtilsMessageSeverityFlagsEXT.WarningBitExt))
+ {
+ Logger.Warning?.Print(LogClass.Gpu, msg);
+ }
+ else if (messageSeverity.HasFlag(DebugUtilsMessageSeverityFlagsEXT.InfoBitExt))
+ {
+ Logger.Info?.Print(LogClass.Gpu, msg);
+ }
+ else // if (messageSeverity.HasFlag(DebugUtilsMessageSeverityFlagsEXT.VerboseBitExt))
+ {
+ Logger.Debug?.Print(LogClass.Gpu, msg);
+ }
+
+ return 0;
+ }
+
+ public void Dispose()
+ {
+ if (!_disposed)
+ {
+ if (_debugUtilsMessenger.HasValue)
+ {
+ _debugUtils.DestroyDebugUtilsMessenger(_instance, _debugUtilsMessenger.Value, Span<AllocationCallbacks>.Empty);
+ }
+
+ _disposed = true;
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/VulkanException.cs b/src/Ryujinx.Graphics.Vulkan/VulkanException.cs
new file mode 100644
index 00000000..983f03d4
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/VulkanException.cs
@@ -0,0 +1,41 @@
+using Silk.NET.Vulkan;
+using System;
+using System.Runtime.Serialization;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ static class ResultExtensions
+ {
+ public static void ThrowOnError(this Result result)
+ {
+ // Only negative result codes are errors.
+ if ((int)result < (int)Result.Success)
+ {
+ throw new VulkanException(result);
+ }
+ }
+ }
+
+ class VulkanException : Exception
+ {
+ public VulkanException()
+ {
+ }
+
+ public VulkanException(Result result) : base($"Unexpected API error \"{result}\".")
+ {
+ }
+
+ public VulkanException(string message) : base(message)
+ {
+ }
+
+ public VulkanException(string message, Exception innerException) : base(message, innerException)
+ {
+ }
+
+ protected VulkanException(SerializationInfo info, StreamingContext context) : base(info, context)
+ {
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/VulkanInitialization.cs b/src/Ryujinx.Graphics.Vulkan/VulkanInitialization.cs
new file mode 100644
index 00000000..50a6fcb9
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/VulkanInitialization.cs
@@ -0,0 +1,539 @@
+using Ryujinx.Common.Configuration;
+using Ryujinx.Common.Logging;
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using Silk.NET.Vulkan.Extensions.EXT;
+using Silk.NET.Vulkan.Extensions.KHR;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ public unsafe static class VulkanInitialization
+ {
+ private const uint InvalidIndex = uint.MaxValue;
+ private static uint MinimalVulkanVersion = Vk.Version11.Value;
+ private static uint MinimalInstanceVulkanVersion = Vk.Version12.Value;
+ private static uint MaximumVulkanVersion = Vk.Version12.Value;
+ private const string AppName = "Ryujinx.Graphics.Vulkan";
+ private const int QueuesCount = 2;
+
+ private static readonly string[] _desirableExtensions = new string[]
+ {
+ ExtConditionalRendering.ExtensionName,
+ ExtExtendedDynamicState.ExtensionName,
+ ExtTransformFeedback.ExtensionName,
+ KhrDrawIndirectCount.ExtensionName,
+ KhrPushDescriptor.ExtensionName,
+ "VK_EXT_blend_operation_advanced",
+ "VK_EXT_custom_border_color",
+ "VK_EXT_descriptor_indexing", // Enabling this works around an issue with disposed buffer bindings on RADV.
+ "VK_EXT_fragment_shader_interlock",
+ "VK_EXT_index_type_uint8",
+ "VK_EXT_primitive_topology_list_restart",
+ "VK_EXT_robustness2",
+ "VK_EXT_shader_stencil_export",
+ "VK_KHR_shader_float16_int8",
+ "VK_EXT_shader_subgroup_ballot",
+ "VK_EXT_subgroup_size_control",
+ "VK_NV_geometry_shader_passthrough",
+ "VK_NV_viewport_array2",
+ "VK_KHR_portability_subset" // As per spec, we should enable this if present.
+ };
+
+ private static readonly string[] _requiredExtensions = new string[]
+ {
+ KhrSwapchain.ExtensionName
+ };
+
+ internal static VulkanInstance CreateInstance(Vk api, GraphicsDebugLevel logLevel, string[] requiredExtensions)
+ {
+ var enabledLayers = new List<string>();
+
+ var instanceExtensions = VulkanInstance.GetInstanceExtensions(api);
+ var instanceLayers = VulkanInstance.GetInstanceLayers(api);
+
+ void AddAvailableLayer(string layerName)
+ {
+ if (instanceLayers.Contains(layerName))
+ {
+ enabledLayers.Add(layerName);
+ }
+ else
+ {
+ Logger.Warning?.Print(LogClass.Gpu, $"Missing layer {layerName}");
+ }
+ }
+
+ if (logLevel != GraphicsDebugLevel.None)
+ {
+ AddAvailableLayer("VK_LAYER_KHRONOS_validation");
+ }
+
+ var enabledExtensions = requiredExtensions;
+
+ if (instanceExtensions.Contains("VK_EXT_debug_utils"))
+ {
+ enabledExtensions = enabledExtensions.Append(ExtDebugUtils.ExtensionName).ToArray();
+ }
+
+ var appName = Marshal.StringToHGlobalAnsi(AppName);
+
+ var applicationInfo = new ApplicationInfo
+ {
+ PApplicationName = (byte*)appName,
+ ApplicationVersion = 1,
+ PEngineName = (byte*)appName,
+ EngineVersion = 1,
+ ApiVersion = MaximumVulkanVersion
+ };
+
+ IntPtr* ppEnabledExtensions = stackalloc IntPtr[enabledExtensions.Length];
+ IntPtr* ppEnabledLayers = stackalloc IntPtr[enabledLayers.Count];
+
+ for (int i = 0; i < enabledExtensions.Length; i++)
+ {
+ ppEnabledExtensions[i] = Marshal.StringToHGlobalAnsi(enabledExtensions[i]);
+ }
+
+ for (int i = 0; i < enabledLayers.Count; i++)
+ {
+ ppEnabledLayers[i] = Marshal.StringToHGlobalAnsi(enabledLayers[i]);
+ }
+
+ var instanceCreateInfo = new InstanceCreateInfo
+ {
+ SType = StructureType.InstanceCreateInfo,
+ PApplicationInfo = &applicationInfo,
+ PpEnabledExtensionNames = (byte**)ppEnabledExtensions,
+ PpEnabledLayerNames = (byte**)ppEnabledLayers,
+ EnabledExtensionCount = (uint)enabledExtensions.Length,
+ EnabledLayerCount = (uint)enabledLayers.Count
+ };
+
+ Result result = VulkanInstance.Create(api, ref instanceCreateInfo, out var instance);
+
+ Marshal.FreeHGlobal(appName);
+
+ for (int i = 0; i < enabledExtensions.Length; i++)
+ {
+ Marshal.FreeHGlobal(ppEnabledExtensions[i]);
+ }
+
+ for (int i = 0; i < enabledLayers.Count; i++)
+ {
+ Marshal.FreeHGlobal(ppEnabledLayers[i]);
+ }
+
+ result.ThrowOnError();
+
+ return instance;
+ }
+
+ internal static VulkanPhysicalDevice FindSuitablePhysicalDevice(Vk api, VulkanInstance instance, SurfaceKHR surface, string preferredGpuId)
+ {
+ instance.EnumeratePhysicalDevices(out var physicalDevices).ThrowOnError();
+
+ // First we try to pick the the user preferred GPU.
+ for (int i = 0; i < physicalDevices.Length; i++)
+ {
+ if (IsPreferredAndSuitableDevice(api, physicalDevices[i], surface, preferredGpuId))
+ {
+ return physicalDevices[i];
+ }
+ }
+
+ // If we fail to do that, just use the first compatible GPU.
+ for (int i = 0; i < physicalDevices.Length; i++)
+ {
+ if (IsSuitableDevice(api, physicalDevices[i], surface))
+ {
+ return physicalDevices[i];
+ }
+ }
+
+ throw new VulkanException("Initialization failed, none of the available GPUs meets the minimum requirements.");
+ }
+
+ internal static DeviceInfo[] GetSuitablePhysicalDevices(Vk api)
+ {
+ var appName = Marshal.StringToHGlobalAnsi(AppName);
+
+ var applicationInfo = new ApplicationInfo
+ {
+ PApplicationName = (byte*)appName,
+ ApplicationVersion = 1,
+ PEngineName = (byte*)appName,
+ EngineVersion = 1,
+ ApiVersion = MaximumVulkanVersion
+ };
+
+ var instanceCreateInfo = new InstanceCreateInfo
+ {
+ SType = StructureType.InstanceCreateInfo,
+ PApplicationInfo = &applicationInfo,
+ PpEnabledExtensionNames = null,
+ PpEnabledLayerNames = null,
+ EnabledExtensionCount = 0,
+ EnabledLayerCount = 0
+ };
+
+ Result result = VulkanInstance.Create(api, ref instanceCreateInfo, out var rawInstance);
+
+ Marshal.FreeHGlobal(appName);
+
+ result.ThrowOnError();
+
+ using VulkanInstance instance = rawInstance;
+
+ // We currently assume that the instance is compatible with Vulkan 1.2
+ // TODO: Remove this once we relax our initialization codepaths.
+ if (instance.InstanceVersion < MinimalInstanceVulkanVersion)
+ {
+ return Array.Empty<DeviceInfo>();
+ }
+
+ instance.EnumeratePhysicalDevices(out VulkanPhysicalDevice[] physicalDevices).ThrowOnError();
+
+ List<DeviceInfo> deviceInfos = new List<DeviceInfo>();
+
+ foreach (VulkanPhysicalDevice physicalDevice in physicalDevices)
+ {
+ if (physicalDevice.PhysicalDeviceProperties.ApiVersion < MinimalVulkanVersion)
+ {
+ continue;
+ }
+
+ deviceInfos.Add(physicalDevice.ToDeviceInfo());
+ }
+
+ return deviceInfos.ToArray();
+ }
+
+ private static bool IsPreferredAndSuitableDevice(Vk api, VulkanPhysicalDevice physicalDevice, SurfaceKHR surface, string preferredGpuId)
+ {
+ if (physicalDevice.Id != preferredGpuId)
+ {
+ return false;
+ }
+
+ return IsSuitableDevice(api, physicalDevice, surface);
+ }
+
+ private static bool IsSuitableDevice(Vk api, VulkanPhysicalDevice physicalDevice, SurfaceKHR surface)
+ {
+ int extensionMatches = 0;
+
+ foreach (string requiredExtension in _requiredExtensions)
+ {
+ if (physicalDevice.IsDeviceExtensionPresent(requiredExtension))
+ {
+ extensionMatches++;
+ }
+ }
+
+ return extensionMatches == _requiredExtensions.Length && FindSuitableQueueFamily(api, physicalDevice, surface, out _) != InvalidIndex;
+ }
+
+ internal static uint FindSuitableQueueFamily(Vk api, VulkanPhysicalDevice physicalDevice, SurfaceKHR surface, out uint queueCount)
+ {
+ const QueueFlags RequiredFlags = QueueFlags.GraphicsBit | QueueFlags.ComputeBit;
+
+ var khrSurface = new KhrSurface(api.Context);
+
+ for (uint index = 0; index < physicalDevice.QueueFamilyProperties.Length; index++)
+ {
+ ref QueueFamilyProperties property = ref physicalDevice.QueueFamilyProperties[index];
+
+ khrSurface.GetPhysicalDeviceSurfaceSupport(physicalDevice.PhysicalDevice, index, surface, out var surfaceSupported).ThrowOnError();
+
+ if (property.QueueFlags.HasFlag(RequiredFlags) && surfaceSupported)
+ {
+ queueCount = property.QueueCount;
+
+ return index;
+ }
+ }
+
+ queueCount = 0;
+
+ return InvalidIndex;
+ }
+
+ internal static Device CreateDevice(Vk api, VulkanPhysicalDevice physicalDevice, uint queueFamilyIndex, uint queueCount)
+ {
+ if (queueCount > QueuesCount)
+ {
+ queueCount = QueuesCount;
+ }
+
+ float* queuePriorities = stackalloc float[(int)queueCount];
+
+ for (int i = 0; i < queueCount; i++)
+ {
+ queuePriorities[i] = 1f;
+ }
+
+ var queueCreateInfo = new DeviceQueueCreateInfo()
+ {
+ SType = StructureType.DeviceQueueCreateInfo,
+ QueueFamilyIndex = queueFamilyIndex,
+ QueueCount = queueCount,
+ PQueuePriorities = queuePriorities
+ };
+
+ bool useRobustBufferAccess = VendorUtils.FromId(physicalDevice.PhysicalDeviceProperties.VendorID) == Vendor.Nvidia;
+
+ PhysicalDeviceFeatures2 features2 = new PhysicalDeviceFeatures2()
+ {
+ SType = StructureType.PhysicalDeviceFeatures2
+ };
+
+ PhysicalDeviceVulkan11Features supportedFeaturesVk11 = new PhysicalDeviceVulkan11Features()
+ {
+ SType = StructureType.PhysicalDeviceVulkan11Features,
+ PNext = features2.PNext
+ };
+
+ features2.PNext = &supportedFeaturesVk11;
+
+ PhysicalDeviceCustomBorderColorFeaturesEXT supportedFeaturesCustomBorderColor = new PhysicalDeviceCustomBorderColorFeaturesEXT()
+ {
+ SType = StructureType.PhysicalDeviceCustomBorderColorFeaturesExt,
+ PNext = features2.PNext
+ };
+
+ if (physicalDevice.IsDeviceExtensionPresent("VK_EXT_custom_border_color"))
+ {
+ features2.PNext = &supportedFeaturesCustomBorderColor;
+ }
+
+ PhysicalDevicePrimitiveTopologyListRestartFeaturesEXT supportedFeaturesPrimitiveTopologyListRestart = new PhysicalDevicePrimitiveTopologyListRestartFeaturesEXT()
+ {
+ SType = StructureType.PhysicalDevicePrimitiveTopologyListRestartFeaturesExt,
+ PNext = features2.PNext
+ };
+
+ if (physicalDevice.IsDeviceExtensionPresent("VK_EXT_primitive_topology_list_restart"))
+ {
+ features2.PNext = &supportedFeaturesPrimitiveTopologyListRestart;
+ }
+
+ PhysicalDeviceTransformFeedbackFeaturesEXT supportedFeaturesTransformFeedback = new PhysicalDeviceTransformFeedbackFeaturesEXT()
+ {
+ SType = StructureType.PhysicalDeviceTransformFeedbackFeaturesExt,
+ PNext = features2.PNext
+ };
+
+ if (physicalDevice.IsDeviceExtensionPresent(ExtTransformFeedback.ExtensionName))
+ {
+ features2.PNext = &supportedFeaturesTransformFeedback;
+ }
+
+ PhysicalDeviceRobustness2FeaturesEXT supportedFeaturesRobustness2 = new PhysicalDeviceRobustness2FeaturesEXT()
+ {
+ SType = StructureType.PhysicalDeviceRobustness2FeaturesExt
+ };
+
+ if (physicalDevice.IsDeviceExtensionPresent("VK_EXT_robustness2"))
+ {
+ supportedFeaturesRobustness2.PNext = features2.PNext;
+
+ features2.PNext = &supportedFeaturesRobustness2;
+ }
+
+ api.GetPhysicalDeviceFeatures2(physicalDevice.PhysicalDevice, &features2);
+
+ var supportedFeatures = features2.Features;
+
+ var features = new PhysicalDeviceFeatures()
+ {
+ DepthBiasClamp = true,
+ DepthClamp = supportedFeatures.DepthClamp,
+ DualSrcBlend = supportedFeatures.DualSrcBlend,
+ FragmentStoresAndAtomics = true,
+ GeometryShader = supportedFeatures.GeometryShader,
+ ImageCubeArray = true,
+ IndependentBlend = true,
+ LogicOp = supportedFeatures.LogicOp,
+ OcclusionQueryPrecise = supportedFeatures.OcclusionQueryPrecise,
+ MultiViewport = supportedFeatures.MultiViewport,
+ PipelineStatisticsQuery = supportedFeatures.PipelineStatisticsQuery,
+ SamplerAnisotropy = true,
+ ShaderClipDistance = true,
+ ShaderFloat64 = supportedFeatures.ShaderFloat64,
+ ShaderImageGatherExtended = supportedFeatures.ShaderImageGatherExtended,
+ ShaderStorageImageMultisample = supportedFeatures.ShaderStorageImageMultisample,
+ // ShaderStorageImageReadWithoutFormat = true,
+ // ShaderStorageImageWriteWithoutFormat = true,
+ TessellationShader = supportedFeatures.TessellationShader,
+ VertexPipelineStoresAndAtomics = true,
+ RobustBufferAccess = useRobustBufferAccess
+ };
+
+ void* pExtendedFeatures = null;
+
+ PhysicalDeviceTransformFeedbackFeaturesEXT featuresTransformFeedback;
+
+ if (physicalDevice.IsDeviceExtensionPresent(ExtTransformFeedback.ExtensionName))
+ {
+ featuresTransformFeedback = new PhysicalDeviceTransformFeedbackFeaturesEXT()
+ {
+ SType = StructureType.PhysicalDeviceTransformFeedbackFeaturesExt,
+ PNext = pExtendedFeatures,
+ TransformFeedback = supportedFeaturesTransformFeedback.TransformFeedback
+ };
+
+ pExtendedFeatures = &featuresTransformFeedback;
+ }
+
+ PhysicalDevicePrimitiveTopologyListRestartFeaturesEXT featuresPrimitiveTopologyListRestart;
+
+ if (physicalDevice.IsDeviceExtensionPresent("VK_EXT_primitive_topology_list_restart"))
+ {
+ featuresPrimitiveTopologyListRestart = new PhysicalDevicePrimitiveTopologyListRestartFeaturesEXT()
+ {
+ SType = StructureType.PhysicalDevicePrimitiveTopologyListRestartFeaturesExt,
+ PNext = pExtendedFeatures,
+ PrimitiveTopologyListRestart = supportedFeaturesPrimitiveTopologyListRestart.PrimitiveTopologyListRestart,
+ PrimitiveTopologyPatchListRestart = supportedFeaturesPrimitiveTopologyListRestart.PrimitiveTopologyPatchListRestart
+ };
+
+ pExtendedFeatures = &featuresPrimitiveTopologyListRestart;
+ }
+
+ PhysicalDeviceRobustness2FeaturesEXT featuresRobustness2;
+
+ if (physicalDevice.IsDeviceExtensionPresent("VK_EXT_robustness2"))
+ {
+ featuresRobustness2 = new PhysicalDeviceRobustness2FeaturesEXT()
+ {
+ SType = StructureType.PhysicalDeviceRobustness2FeaturesExt,
+ PNext = pExtendedFeatures,
+ NullDescriptor = supportedFeaturesRobustness2.NullDescriptor
+ };
+
+ pExtendedFeatures = &featuresRobustness2;
+ }
+
+ var featuresExtendedDynamicState = new PhysicalDeviceExtendedDynamicStateFeaturesEXT()
+ {
+ SType = StructureType.PhysicalDeviceExtendedDynamicStateFeaturesExt,
+ PNext = pExtendedFeatures,
+ ExtendedDynamicState = physicalDevice.IsDeviceExtensionPresent(ExtExtendedDynamicState.ExtensionName)
+ };
+
+ pExtendedFeatures = &featuresExtendedDynamicState;
+
+ var featuresVk11 = new PhysicalDeviceVulkan11Features()
+ {
+ SType = StructureType.PhysicalDeviceVulkan11Features,
+ PNext = pExtendedFeatures,
+ ShaderDrawParameters = supportedFeaturesVk11.ShaderDrawParameters
+ };
+
+ pExtendedFeatures = &featuresVk11;
+
+ var featuresVk12 = new PhysicalDeviceVulkan12Features()
+ {
+ SType = StructureType.PhysicalDeviceVulkan12Features,
+ PNext = pExtendedFeatures,
+ DescriptorIndexing = physicalDevice.IsDeviceExtensionPresent("VK_EXT_descriptor_indexing"),
+ DrawIndirectCount = physicalDevice.IsDeviceExtensionPresent(KhrDrawIndirectCount.ExtensionName),
+ UniformBufferStandardLayout = physicalDevice.IsDeviceExtensionPresent("VK_KHR_uniform_buffer_standard_layout")
+ };
+
+ pExtendedFeatures = &featuresVk12;
+
+ PhysicalDeviceIndexTypeUint8FeaturesEXT featuresIndexU8;
+
+ if (physicalDevice.IsDeviceExtensionPresent("VK_EXT_index_type_uint8"))
+ {
+ featuresIndexU8 = new PhysicalDeviceIndexTypeUint8FeaturesEXT()
+ {
+ SType = StructureType.PhysicalDeviceIndexTypeUint8FeaturesExt,
+ PNext = pExtendedFeatures,
+ IndexTypeUint8 = true
+ };
+
+ pExtendedFeatures = &featuresIndexU8;
+ }
+
+ PhysicalDeviceFragmentShaderInterlockFeaturesEXT featuresFragmentShaderInterlock;
+
+ if (physicalDevice.IsDeviceExtensionPresent("VK_EXT_fragment_shader_interlock"))
+ {
+ featuresFragmentShaderInterlock = new PhysicalDeviceFragmentShaderInterlockFeaturesEXT()
+ {
+ SType = StructureType.PhysicalDeviceFragmentShaderInterlockFeaturesExt,
+ PNext = pExtendedFeatures,
+ FragmentShaderPixelInterlock = true
+ };
+
+ pExtendedFeatures = &featuresFragmentShaderInterlock;
+ }
+
+ PhysicalDeviceSubgroupSizeControlFeaturesEXT featuresSubgroupSizeControl;
+
+ if (physicalDevice.IsDeviceExtensionPresent("VK_EXT_subgroup_size_control"))
+ {
+ featuresSubgroupSizeControl = new PhysicalDeviceSubgroupSizeControlFeaturesEXT()
+ {
+ SType = StructureType.PhysicalDeviceSubgroupSizeControlFeaturesExt,
+ PNext = pExtendedFeatures,
+ SubgroupSizeControl = true
+ };
+
+ pExtendedFeatures = &featuresSubgroupSizeControl;
+ }
+
+ PhysicalDeviceCustomBorderColorFeaturesEXT featuresCustomBorderColor;
+
+ if (physicalDevice.IsDeviceExtensionPresent("VK_EXT_custom_border_color") &&
+ supportedFeaturesCustomBorderColor.CustomBorderColors &&
+ supportedFeaturesCustomBorderColor.CustomBorderColorWithoutFormat)
+ {
+ featuresCustomBorderColor = new PhysicalDeviceCustomBorderColorFeaturesEXT()
+ {
+ SType = StructureType.PhysicalDeviceCustomBorderColorFeaturesExt,
+ PNext = pExtendedFeatures,
+ CustomBorderColors = true,
+ CustomBorderColorWithoutFormat = true,
+ };
+
+ pExtendedFeatures = &featuresCustomBorderColor;
+ }
+
+ var enabledExtensions = _requiredExtensions.Union(_desirableExtensions.Intersect(physicalDevice.DeviceExtensions)).ToArray();
+
+ IntPtr* ppEnabledExtensions = stackalloc IntPtr[enabledExtensions.Length];
+
+ for (int i = 0; i < enabledExtensions.Length; i++)
+ {
+ ppEnabledExtensions[i] = Marshal.StringToHGlobalAnsi(enabledExtensions[i]);
+ }
+
+ var deviceCreateInfo = new DeviceCreateInfo()
+ {
+ SType = StructureType.DeviceCreateInfo,
+ PNext = pExtendedFeatures,
+ QueueCreateInfoCount = 1,
+ PQueueCreateInfos = &queueCreateInfo,
+ PpEnabledExtensionNames = (byte**)ppEnabledExtensions,
+ EnabledExtensionCount = (uint)enabledExtensions.Length,
+ PEnabledFeatures = &features
+ };
+
+ api.CreateDevice(physicalDevice.PhysicalDevice, in deviceCreateInfo, null, out var device).ThrowOnError();
+
+ for (int i = 0; i < enabledExtensions.Length; i++)
+ {
+ Marshal.FreeHGlobal(ppEnabledExtensions[i]);
+ }
+
+ return device;
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/VulkanInstance.cs b/src/Ryujinx.Graphics.Vulkan/VulkanInstance.cs
new file mode 100644
index 00000000..843d3412
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/VulkanInstance.cs
@@ -0,0 +1,127 @@
+using Ryujinx.Common.Utilities;
+using Silk.NET.Core;
+using Silk.NET.Vulkan;
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Linq;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class VulkanInstance : IDisposable
+ {
+ private readonly Vk _api;
+ public readonly Instance Instance;
+ public readonly Version32 InstanceVersion;
+
+ private bool _disposed;
+
+ private VulkanInstance(Vk api, Instance instance)
+ {
+ _api = api;
+ Instance = instance;
+
+ if (api.GetInstanceProcAddr(instance, "vkEnumerateInstanceVersion") == IntPtr.Zero)
+ {
+ InstanceVersion = Vk.Version10;
+ }
+ else
+ {
+ uint rawInstanceVersion = 0;
+
+ if (api.EnumerateInstanceVersion(ref rawInstanceVersion) != Result.Success)
+ {
+ rawInstanceVersion = Vk.Version11.Value;
+ }
+
+ InstanceVersion = (Version32)rawInstanceVersion;
+ }
+ }
+
+ public static Result Create(Vk api, ref InstanceCreateInfo createInfo, out VulkanInstance instance)
+ {
+ instance = null;
+
+ Instance rawInstance = default;
+
+ Result result = api.CreateInstance(SpanHelpers.AsReadOnlySpan(ref createInfo), ReadOnlySpan<AllocationCallbacks>.Empty, SpanHelpers.AsSpan(ref rawInstance));
+
+ if (result == Result.Success)
+ {
+ instance = new VulkanInstance(api, rawInstance);
+ }
+
+ return result;
+ }
+
+ public Result EnumeratePhysicalDevices(out VulkanPhysicalDevice[] physicalDevices)
+ {
+ physicalDevices = null;
+
+ uint physicalDeviceCount = 0;
+
+ Result result = _api.EnumeratePhysicalDevices(Instance, SpanHelpers.AsSpan(ref physicalDeviceCount), Span<PhysicalDevice>.Empty);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ PhysicalDevice[] rawPhysicalDevices = new PhysicalDevice[physicalDeviceCount];
+
+ result = _api.EnumeratePhysicalDevices(Instance, SpanHelpers.AsSpan(ref physicalDeviceCount), rawPhysicalDevices);
+
+ if (result != Result.Success)
+ {
+ return result;
+ }
+
+ physicalDevices = rawPhysicalDevices.Select(x => new VulkanPhysicalDevice(_api, x)).ToArray();
+
+ return Result.Success;
+ }
+
+ public static IReadOnlySet<string> GetInstanceExtensions(Vk api)
+ {
+ uint propertiesCount = 0;
+
+ api.EnumerateInstanceExtensionProperties(ReadOnlySpan<byte>.Empty, SpanHelpers.AsSpan(ref propertiesCount), Span<ExtensionProperties>.Empty).ThrowOnError();
+
+ ExtensionProperties[] extensionProperties = new ExtensionProperties[propertiesCount];
+
+ api.EnumerateInstanceExtensionProperties(ReadOnlySpan<byte>.Empty, SpanHelpers.AsSpan(ref propertiesCount), extensionProperties).ThrowOnError();
+
+ unsafe
+ {
+ return extensionProperties.Select(x => Marshal.PtrToStringAnsi((IntPtr)x.ExtensionName)).ToImmutableHashSet();
+ }
+ }
+
+ public static IReadOnlySet<string> GetInstanceLayers(Vk api)
+ {
+ uint propertiesCount = 0;
+
+ api.EnumerateInstanceLayerProperties(SpanHelpers.AsSpan(ref propertiesCount), Span<LayerProperties>.Empty).ThrowOnError();
+
+ LayerProperties[] layerProperties = new LayerProperties[propertiesCount];
+
+ api.EnumerateInstanceLayerProperties(SpanHelpers.AsSpan(ref propertiesCount), layerProperties).ThrowOnError();
+
+ unsafe
+ {
+ return layerProperties.Select(x => Marshal.PtrToStringAnsi((IntPtr)x.LayerName)).ToImmutableHashSet();
+ }
+ }
+
+ public void Dispose()
+ {
+ if (!_disposed)
+ {
+ _api.DestroyInstance(Instance, ReadOnlySpan<AllocationCallbacks>.Empty);
+
+ _disposed = true;
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/VulkanPhysicalDevice.cs b/src/Ryujinx.Graphics.Vulkan/VulkanPhysicalDevice.cs
new file mode 100644
index 00000000..547f3654
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/VulkanPhysicalDevice.cs
@@ -0,0 +1,70 @@
+using Ryujinx.Common.Utilities;
+using Ryujinx.Graphics.GAL;
+using Silk.NET.Vulkan;
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Linq;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ readonly struct VulkanPhysicalDevice
+ {
+ public readonly PhysicalDevice PhysicalDevice;
+ public readonly PhysicalDeviceFeatures PhysicalDeviceFeatures;
+ public readonly PhysicalDeviceProperties PhysicalDeviceProperties;
+ public readonly PhysicalDeviceMemoryProperties PhysicalDeviceMemoryProperties;
+ public readonly QueueFamilyProperties[] QueueFamilyProperties;
+ public readonly string DeviceName;
+ public readonly IReadOnlySet<string> DeviceExtensions;
+
+ public VulkanPhysicalDevice(Vk api, PhysicalDevice physicalDevice)
+ {
+ PhysicalDevice = physicalDevice;
+ PhysicalDeviceFeatures = api.GetPhysicalDeviceFeature(PhysicalDevice);
+
+ api.GetPhysicalDeviceProperties(PhysicalDevice, out var physicalDeviceProperties);
+ PhysicalDeviceProperties = physicalDeviceProperties;
+
+ api.GetPhysicalDeviceMemoryProperties(PhysicalDevice, out PhysicalDeviceMemoryProperties);
+
+ unsafe
+ {
+ DeviceName = Marshal.PtrToStringAnsi((IntPtr)physicalDeviceProperties.DeviceName);
+ }
+
+ uint propertiesCount = 0;
+
+ api.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, SpanHelpers.AsSpan(ref propertiesCount), Span<QueueFamilyProperties>.Empty);
+
+ QueueFamilyProperties = new QueueFamilyProperties[propertiesCount];
+
+ api.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, SpanHelpers.AsSpan(ref propertiesCount), QueueFamilyProperties);
+
+ api.EnumerateDeviceExtensionProperties(PhysicalDevice, Span<byte>.Empty, SpanHelpers.AsSpan(ref propertiesCount), Span<ExtensionProperties>.Empty).ThrowOnError();
+
+ ExtensionProperties[] extensionProperties = new ExtensionProperties[propertiesCount];
+
+ api.EnumerateDeviceExtensionProperties(PhysicalDevice, Span<byte>.Empty, SpanHelpers.AsSpan(ref propertiesCount), extensionProperties).ThrowOnError();
+
+ unsafe
+ {
+ DeviceExtensions = extensionProperties.Select(x => Marshal.PtrToStringAnsi((IntPtr)x.ExtensionName)).ToImmutableHashSet();
+ }
+ }
+
+ public string Id => $"0x{PhysicalDeviceProperties.VendorID:X}_0x{PhysicalDeviceProperties.DeviceID:X}";
+
+ public bool IsDeviceExtensionPresent(string extension) => DeviceExtensions.Contains(extension);
+
+ public DeviceInfo ToDeviceInfo()
+ {
+ return new DeviceInfo(
+ Id,
+ VendorUtils.GetNameFromId(PhysicalDeviceProperties.VendorID),
+ DeviceName,
+ PhysicalDeviceProperties.DeviceType == PhysicalDeviceType.DiscreteGpu);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/VulkanRenderer.cs b/src/Ryujinx.Graphics.Vulkan/VulkanRenderer.cs
new file mode 100644
index 00000000..e7475b6b
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/VulkanRenderer.cs
@@ -0,0 +1,820 @@
+using Ryujinx.Common.Configuration;
+using Ryujinx.Common.Logging;
+using Ryujinx.Graphics.GAL;
+using Ryujinx.Graphics.Shader;
+using Ryujinx.Graphics.Shader.Translation;
+using Ryujinx.Graphics.Vulkan.MoltenVK;
+using Ryujinx.Graphics.Vulkan.Queries;
+using Silk.NET.Vulkan;
+using Silk.NET.Vulkan.Extensions.EXT;
+using Silk.NET.Vulkan.Extensions.KHR;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ public sealed class VulkanRenderer : IRenderer
+ {
+ private VulkanInstance _instance;
+ private SurfaceKHR _surface;
+ private VulkanPhysicalDevice _physicalDevice;
+ private Device _device;
+ private WindowBase _window;
+
+ private bool _initialized;
+
+ internal FormatCapabilities FormatCapabilities { get; private set; }
+ internal HardwareCapabilities Capabilities;
+
+ internal Vk Api { get; private set; }
+ internal KhrSurface SurfaceApi { get; private set; }
+ internal KhrSwapchain SwapchainApi { get; private set; }
+ internal ExtConditionalRendering ConditionalRenderingApi { get; private set; }
+ internal ExtExtendedDynamicState ExtendedDynamicStateApi { get; private set; }
+ internal KhrPushDescriptor PushDescriptorApi { get; private set; }
+ internal ExtTransformFeedback TransformFeedbackApi { get; private set; }
+ internal KhrDrawIndirectCount DrawIndirectCountApi { get; private set; }
+
+ internal uint QueueFamilyIndex { get; private set; }
+ internal Queue Queue { get; private set; }
+ internal Queue BackgroundQueue { get; private set; }
+ internal object BackgroundQueueLock { get; private set; }
+ internal object QueueLock { get; private set; }
+
+ internal MemoryAllocator MemoryAllocator { get; private set; }
+ internal CommandBufferPool CommandBufferPool { get; private set; }
+ internal DescriptorSetManager DescriptorSetManager { get; private set; }
+ internal PipelineLayoutCache PipelineLayoutCache { get; private set; }
+ internal BackgroundResources BackgroundResources { get; private set; }
+ internal Action<Action> InterruptAction { get; private set; }
+ internal SyncManager SyncManager { get; private set; }
+
+ internal BufferManager BufferManager { get; private set; }
+
+ internal HashSet<ShaderCollection> Shaders { get; }
+ internal HashSet<ITexture> Textures { get; }
+ internal HashSet<SamplerHolder> Samplers { get; }
+
+ private VulkanDebugMessenger _debugMessenger;
+ private Counters _counters;
+
+ private PipelineFull _pipeline;
+
+ internal HelperShader HelperShader { get; private set; }
+ internal PipelineFull PipelineInternal => _pipeline;
+
+ public IPipeline Pipeline => _pipeline;
+
+ public IWindow Window => _window;
+
+ private readonly Func<Instance, Vk, SurfaceKHR> _getSurface;
+ private readonly Func<string[]> _getRequiredExtensions;
+ private readonly string _preferredGpuId;
+
+ internal Vendor Vendor { get; private set; }
+ internal bool IsAmdWindows { get; private set; }
+ internal bool IsIntelWindows { get; private set; }
+ internal bool IsAmdGcn { get; private set; }
+ internal bool IsMoltenVk { get; private set; }
+ internal bool IsTBDR { get; private set; }
+ internal bool IsSharedMemory { get; private set; }
+ public string GpuVendor { get; private set; }
+ public string GpuRenderer { get; private set; }
+ public string GpuVersion { get; private set; }
+
+ public bool PreferThreading => true;
+
+ public event EventHandler<ScreenCaptureImageInfo> ScreenCaptured;
+
+ public VulkanRenderer(Func<Instance, Vk, SurfaceKHR> surfaceFunc, Func<string[]> requiredExtensionsFunc, string preferredGpuId)
+ {
+ _getSurface = surfaceFunc;
+ _getRequiredExtensions = requiredExtensionsFunc;
+ _preferredGpuId = preferredGpuId;
+ Shaders = new HashSet<ShaderCollection>();
+ Textures = new HashSet<ITexture>();
+ Samplers = new HashSet<SamplerHolder>();
+
+ if (OperatingSystem.IsMacOS())
+ {
+ MVKInitialization.Initialize();
+
+ // Any device running on MacOS is using MoltenVK, even Intel and AMD vendors.
+ IsMoltenVk = true;
+ }
+ }
+
+ private unsafe void LoadFeatures(uint maxQueueCount, uint queueFamilyIndex)
+ {
+ FormatCapabilities = new FormatCapabilities(Api, _physicalDevice.PhysicalDevice);
+
+ if (Api.TryGetDeviceExtension(_instance.Instance, _device, out ExtConditionalRendering conditionalRenderingApi))
+ {
+ ConditionalRenderingApi = conditionalRenderingApi;
+ }
+
+ if (Api.TryGetDeviceExtension(_instance.Instance, _device, out ExtExtendedDynamicState extendedDynamicStateApi))
+ {
+ ExtendedDynamicStateApi = extendedDynamicStateApi;
+ }
+
+ if (Api.TryGetDeviceExtension(_instance.Instance, _device, out KhrPushDescriptor pushDescriptorApi))
+ {
+ PushDescriptorApi = pushDescriptorApi;
+ }
+
+ if (Api.TryGetDeviceExtension(_instance.Instance, _device, out ExtTransformFeedback transformFeedbackApi))
+ {
+ TransformFeedbackApi = transformFeedbackApi;
+ }
+
+ if (Api.TryGetDeviceExtension(_instance.Instance, _device, out KhrDrawIndirectCount drawIndirectCountApi))
+ {
+ DrawIndirectCountApi = drawIndirectCountApi;
+ }
+
+ if (maxQueueCount >= 2)
+ {
+ Api.GetDeviceQueue(_device, queueFamilyIndex, 1, out var backgroundQueue);
+ BackgroundQueue = backgroundQueue;
+ BackgroundQueueLock = new object();
+ }
+
+ PhysicalDeviceProperties2 properties2 = new PhysicalDeviceProperties2()
+ {
+ SType = StructureType.PhysicalDeviceProperties2
+ };
+
+ PhysicalDeviceBlendOperationAdvancedPropertiesEXT propertiesBlendOperationAdvanced = new PhysicalDeviceBlendOperationAdvancedPropertiesEXT()
+ {
+ SType = StructureType.PhysicalDeviceBlendOperationAdvancedPropertiesExt
+ };
+
+ bool supportsBlendOperationAdvanced = _physicalDevice.IsDeviceExtensionPresent("VK_EXT_blend_operation_advanced");
+
+ if (supportsBlendOperationAdvanced)
+ {
+ propertiesBlendOperationAdvanced.PNext = properties2.PNext;
+ properties2.PNext = &propertiesBlendOperationAdvanced;
+ }
+
+ PhysicalDeviceSubgroupSizeControlPropertiesEXT propertiesSubgroupSizeControl = new PhysicalDeviceSubgroupSizeControlPropertiesEXT()
+ {
+ SType = StructureType.PhysicalDeviceSubgroupSizeControlPropertiesExt
+ };
+
+ bool supportsSubgroupSizeControl = _physicalDevice.IsDeviceExtensionPresent("VK_EXT_subgroup_size_control");
+
+ if (supportsSubgroupSizeControl)
+ {
+ properties2.PNext = &propertiesSubgroupSizeControl;
+ }
+
+ bool supportsTransformFeedback = _physicalDevice.IsDeviceExtensionPresent(ExtTransformFeedback.ExtensionName);
+
+ PhysicalDeviceTransformFeedbackPropertiesEXT propertiesTransformFeedback = new PhysicalDeviceTransformFeedbackPropertiesEXT()
+ {
+ SType = StructureType.PhysicalDeviceTransformFeedbackPropertiesExt
+ };
+
+ if (supportsTransformFeedback)
+ {
+ propertiesTransformFeedback.PNext = properties2.PNext;
+ properties2.PNext = &propertiesTransformFeedback;
+ }
+
+ PhysicalDevicePortabilitySubsetPropertiesKHR propertiesPortabilitySubset = new PhysicalDevicePortabilitySubsetPropertiesKHR()
+ {
+ SType = StructureType.PhysicalDevicePortabilitySubsetPropertiesKhr
+ };
+
+ PhysicalDeviceFeatures2 features2 = new PhysicalDeviceFeatures2()
+ {
+ SType = StructureType.PhysicalDeviceFeatures2
+ };
+
+ PhysicalDevicePrimitiveTopologyListRestartFeaturesEXT featuresPrimitiveTopologyListRestart = new PhysicalDevicePrimitiveTopologyListRestartFeaturesEXT()
+ {
+ SType = StructureType.PhysicalDevicePrimitiveTopologyListRestartFeaturesExt
+ };
+
+ PhysicalDeviceRobustness2FeaturesEXT featuresRobustness2 = new PhysicalDeviceRobustness2FeaturesEXT()
+ {
+ SType = StructureType.PhysicalDeviceRobustness2FeaturesExt
+ };
+
+ PhysicalDeviceShaderFloat16Int8FeaturesKHR featuresShaderInt8 = new PhysicalDeviceShaderFloat16Int8FeaturesKHR()
+ {
+ SType = StructureType.PhysicalDeviceShaderFloat16Int8Features
+ };
+
+ PhysicalDeviceCustomBorderColorFeaturesEXT featuresCustomBorderColor = new PhysicalDeviceCustomBorderColorFeaturesEXT()
+ {
+ SType = StructureType.PhysicalDeviceCustomBorderColorFeaturesExt
+ };
+
+ PhysicalDevicePortabilitySubsetFeaturesKHR featuresPortabilitySubset = new PhysicalDevicePortabilitySubsetFeaturesKHR()
+ {
+ SType = StructureType.PhysicalDevicePortabilitySubsetFeaturesKhr
+ };
+
+ if (_physicalDevice.IsDeviceExtensionPresent("VK_EXT_primitive_topology_list_restart"))
+ {
+ features2.PNext = &featuresPrimitiveTopologyListRestart;
+ }
+
+ if (_physicalDevice.IsDeviceExtensionPresent("VK_EXT_robustness2"))
+ {
+ featuresRobustness2.PNext = features2.PNext;
+ features2.PNext = &featuresRobustness2;
+ }
+
+ if (_physicalDevice.IsDeviceExtensionPresent("VK_KHR_shader_float16_int8"))
+ {
+ featuresShaderInt8.PNext = features2.PNext;
+ features2.PNext = &featuresShaderInt8;
+ }
+
+ if (_physicalDevice.IsDeviceExtensionPresent("VK_EXT_custom_border_color"))
+ {
+ featuresCustomBorderColor.PNext = features2.PNext;
+ features2.PNext = &featuresCustomBorderColor;
+ }
+
+ bool usePortability = _physicalDevice.IsDeviceExtensionPresent("VK_KHR_portability_subset");
+
+ if (usePortability)
+ {
+ propertiesPortabilitySubset.PNext = properties2.PNext;
+ properties2.PNext = &propertiesPortabilitySubset;
+
+ featuresPortabilitySubset.PNext = features2.PNext;
+ features2.PNext = &featuresPortabilitySubset;
+ }
+
+ Api.GetPhysicalDeviceProperties2(_physicalDevice.PhysicalDevice, &properties2);
+ Api.GetPhysicalDeviceFeatures2(_physicalDevice.PhysicalDevice, &features2);
+
+ var portabilityFlags = PortabilitySubsetFlags.None;
+ uint vertexBufferAlignment = 1;
+
+ if (usePortability)
+ {
+ vertexBufferAlignment = propertiesPortabilitySubset.MinVertexInputBindingStrideAlignment;
+
+ portabilityFlags |= featuresPortabilitySubset.TriangleFans ? 0 : PortabilitySubsetFlags.NoTriangleFans;
+ portabilityFlags |= featuresPortabilitySubset.PointPolygons ? 0 : PortabilitySubsetFlags.NoPointMode;
+ portabilityFlags |= featuresPortabilitySubset.ImageView2DOn3DImage ? 0 : PortabilitySubsetFlags.No3DImageView;
+ portabilityFlags |= featuresPortabilitySubset.SamplerMipLodBias ? 0 : PortabilitySubsetFlags.NoLodBias;
+ }
+
+ bool supportsCustomBorderColor = _physicalDevice.IsDeviceExtensionPresent("VK_EXT_custom_border_color") &&
+ featuresCustomBorderColor.CustomBorderColors &&
+ featuresCustomBorderColor.CustomBorderColorWithoutFormat;
+
+ ref var properties = ref properties2.Properties;
+
+ SampleCountFlags supportedSampleCounts =
+ properties.Limits.FramebufferColorSampleCounts &
+ properties.Limits.FramebufferDepthSampleCounts &
+ properties.Limits.FramebufferStencilSampleCounts;
+
+ Capabilities = new HardwareCapabilities(
+ _physicalDevice.IsDeviceExtensionPresent("VK_EXT_index_type_uint8"),
+ supportsCustomBorderColor,
+ supportsBlendOperationAdvanced,
+ propertiesBlendOperationAdvanced.AdvancedBlendCorrelatedOverlap,
+ propertiesBlendOperationAdvanced.AdvancedBlendNonPremultipliedSrcColor,
+ propertiesBlendOperationAdvanced.AdvancedBlendNonPremultipliedDstColor,
+ _physicalDevice.IsDeviceExtensionPresent(KhrDrawIndirectCount.ExtensionName),
+ _physicalDevice.IsDeviceExtensionPresent("VK_EXT_fragment_shader_interlock"),
+ _physicalDevice.IsDeviceExtensionPresent("VK_NV_geometry_shader_passthrough"),
+ supportsSubgroupSizeControl,
+ featuresShaderInt8.ShaderInt8,
+ _physicalDevice.IsDeviceExtensionPresent("VK_EXT_shader_stencil_export"),
+ _physicalDevice.IsDeviceExtensionPresent(ExtConditionalRendering.ExtensionName),
+ _physicalDevice.IsDeviceExtensionPresent(ExtExtendedDynamicState.ExtensionName),
+ features2.Features.MultiViewport,
+ featuresRobustness2.NullDescriptor || IsMoltenVk,
+ _physicalDevice.IsDeviceExtensionPresent(KhrPushDescriptor.ExtensionName),
+ featuresPrimitiveTopologyListRestart.PrimitiveTopologyListRestart,
+ featuresPrimitiveTopologyListRestart.PrimitiveTopologyPatchListRestart,
+ supportsTransformFeedback,
+ propertiesTransformFeedback.TransformFeedbackQueries,
+ features2.Features.OcclusionQueryPrecise,
+ _physicalDevice.PhysicalDeviceFeatures.PipelineStatisticsQuery,
+ _physicalDevice.PhysicalDeviceFeatures.GeometryShader,
+ _physicalDevice.IsDeviceExtensionPresent("VK_NV_viewport_array2"),
+ propertiesSubgroupSizeControl.MinSubgroupSize,
+ propertiesSubgroupSizeControl.MaxSubgroupSize,
+ propertiesSubgroupSizeControl.RequiredSubgroupSizeStages,
+ supportedSampleCounts,
+ portabilityFlags,
+ vertexBufferAlignment,
+ properties.Limits.SubTexelPrecisionBits);
+
+ IsSharedMemory = MemoryAllocator.IsDeviceMemoryShared(_physicalDevice);
+
+ MemoryAllocator = new MemoryAllocator(Api, _physicalDevice, _device);
+
+ CommandBufferPool = new CommandBufferPool(Api, _device, Queue, QueueLock, queueFamilyIndex);
+
+ DescriptorSetManager = new DescriptorSetManager(_device);
+
+ PipelineLayoutCache = new PipelineLayoutCache();
+
+ BackgroundResources = new BackgroundResources(this, _device);
+
+ BufferManager = new BufferManager(this, _device);
+
+ SyncManager = new SyncManager(this, _device);
+ _pipeline = new PipelineFull(this, _device);
+ _pipeline.Initialize();
+
+ HelperShader = new HelperShader(this, _device);
+
+ _counters = new Counters(this, _device, _pipeline);
+ }
+
+ private unsafe void SetupContext(GraphicsDebugLevel logLevel)
+ {
+ var api = Vk.GetApi();
+
+ Api = api;
+
+ _instance = VulkanInitialization.CreateInstance(api, logLevel, _getRequiredExtensions());
+ _debugMessenger = new VulkanDebugMessenger(api, _instance.Instance, logLevel);
+
+ if (api.TryGetInstanceExtension(_instance.Instance, out KhrSurface surfaceApi))
+ {
+ SurfaceApi = surfaceApi;
+ }
+
+ _surface = _getSurface(_instance.Instance, api);
+ _physicalDevice = VulkanInitialization.FindSuitablePhysicalDevice(api, _instance, _surface, _preferredGpuId);
+
+ var queueFamilyIndex = VulkanInitialization.FindSuitableQueueFamily(api, _physicalDevice, _surface, out uint maxQueueCount);
+
+ _device = VulkanInitialization.CreateDevice(api, _physicalDevice, queueFamilyIndex, maxQueueCount);
+
+ if (api.TryGetDeviceExtension(_instance.Instance, _device, out KhrSwapchain swapchainApi))
+ {
+ SwapchainApi = swapchainApi;
+ }
+
+ api.GetDeviceQueue(_device, queueFamilyIndex, 0, out var queue);
+ Queue = queue;
+ QueueLock = new object();
+
+ LoadFeatures(maxQueueCount, queueFamilyIndex);
+
+ _window = new Window(this, _surface, _physicalDevice.PhysicalDevice, _device);
+
+ _initialized = true;
+ }
+
+ public BufferHandle CreateBuffer(int size, BufferHandle storageHint)
+ {
+ return BufferManager.CreateWithHandle(this, size, BufferAllocationType.Auto, storageHint);
+ }
+
+ public IProgram CreateProgram(ShaderSource[] sources, ShaderInfo info)
+ {
+ bool isCompute = sources.Length == 1 && sources[0].Stage == ShaderStage.Compute;
+
+ if (info.State.HasValue || isCompute)
+ {
+ return new ShaderCollection(this, _device, sources, info.State ?? default, info.FromCache);
+ }
+ else
+ {
+ return new ShaderCollection(this, _device, sources);
+ }
+ }
+
+ internal ShaderCollection CreateProgramWithMinimalLayout(ShaderSource[] sources, SpecDescription[] specDescription = null)
+ {
+ return new ShaderCollection(this, _device, sources, specDescription: specDescription, isMinimal: true);
+ }
+
+ public ISampler CreateSampler(GAL.SamplerCreateInfo info)
+ {
+ return new SamplerHolder(this, _device, info);
+ }
+
+ public ITexture CreateTexture(TextureCreateInfo info, float scale)
+ {
+ if (info.Target == Target.TextureBuffer)
+ {
+ return new TextureBuffer(this, info, scale);
+ }
+
+ return CreateTextureView(info, scale);
+ }
+
+ internal TextureView CreateTextureView(TextureCreateInfo info, float scale)
+ {
+ // This should be disposed when all views are destroyed.
+ var storage = CreateTextureStorage(info, scale);
+ return storage.CreateView(info, 0, 0);
+ }
+
+ internal TextureStorage CreateTextureStorage(TextureCreateInfo info, float scale)
+ {
+ return new TextureStorage(this, _device, info, scale);
+ }
+
+ public void DeleteBuffer(BufferHandle buffer)
+ {
+ BufferManager.Delete(buffer);
+ }
+
+ internal void FlushAllCommands()
+ {
+ _pipeline?.FlushCommandsImpl();
+ }
+
+ internal void RegisterFlush()
+ {
+ SyncManager.RegisterFlush();
+ }
+
+ public PinnedSpan<byte> GetBufferData(BufferHandle buffer, int offset, int size)
+ {
+ return BufferManager.GetData(buffer, offset, size);
+ }
+
+ public unsafe Capabilities GetCapabilities()
+ {
+ FormatFeatureFlags compressedFormatFeatureFlags =
+ FormatFeatureFlags.SampledImageBit |
+ FormatFeatureFlags.SampledImageFilterLinearBit |
+ FormatFeatureFlags.BlitSrcBit |
+ FormatFeatureFlags.TransferSrcBit |
+ FormatFeatureFlags.TransferDstBit;
+
+ bool supportsBc123CompressionFormat = FormatCapabilities.OptimalFormatsSupport(compressedFormatFeatureFlags,
+ GAL.Format.Bc1RgbaSrgb,
+ GAL.Format.Bc1RgbaUnorm,
+ GAL.Format.Bc2Srgb,
+ GAL.Format.Bc2Unorm,
+ GAL.Format.Bc3Srgb,
+ GAL.Format.Bc3Unorm);
+
+ bool supportsBc45CompressionFormat = FormatCapabilities.OptimalFormatsSupport(compressedFormatFeatureFlags,
+ GAL.Format.Bc4Snorm,
+ GAL.Format.Bc4Unorm,
+ GAL.Format.Bc5Snorm,
+ GAL.Format.Bc5Unorm);
+
+ bool supportsBc67CompressionFormat = FormatCapabilities.OptimalFormatsSupport(compressedFormatFeatureFlags,
+ GAL.Format.Bc6HSfloat,
+ GAL.Format.Bc6HUfloat,
+ GAL.Format.Bc7Srgb,
+ GAL.Format.Bc7Unorm);
+
+ bool supportsEtc2CompressionFormat = FormatCapabilities.OptimalFormatsSupport(compressedFormatFeatureFlags,
+ GAL.Format.Etc2RgbaSrgb,
+ GAL.Format.Etc2RgbaUnorm,
+ GAL.Format.Etc2RgbPtaSrgb,
+ GAL.Format.Etc2RgbPtaUnorm,
+ GAL.Format.Etc2RgbSrgb,
+ GAL.Format.Etc2RgbUnorm);
+
+ bool supports5BitComponentFormat = FormatCapabilities.OptimalFormatsSupport(compressedFormatFeatureFlags,
+ GAL.Format.R5G6B5Unorm,
+ GAL.Format.R5G5B5A1Unorm,
+ GAL.Format.R5G5B5X1Unorm,
+ GAL.Format.B5G6R5Unorm,
+ GAL.Format.B5G5R5A1Unorm,
+ GAL.Format.A1B5G5R5Unorm);
+
+ bool supportsR4G4B4A4Format = FormatCapabilities.OptimalFormatsSupport(compressedFormatFeatureFlags,
+ GAL.Format.R4G4B4A4Unorm);
+
+ bool supportsAstcFormats = FormatCapabilities.OptimalFormatsSupport(compressedFormatFeatureFlags,
+ GAL.Format.Astc4x4Unorm,
+ GAL.Format.Astc5x4Unorm,
+ GAL.Format.Astc5x5Unorm,
+ GAL.Format.Astc6x5Unorm,
+ GAL.Format.Astc6x6Unorm,
+ GAL.Format.Astc8x5Unorm,
+ GAL.Format.Astc8x6Unorm,
+ GAL.Format.Astc8x8Unorm,
+ GAL.Format.Astc10x5Unorm,
+ GAL.Format.Astc10x6Unorm,
+ GAL.Format.Astc10x8Unorm,
+ GAL.Format.Astc10x10Unorm,
+ GAL.Format.Astc12x10Unorm,
+ GAL.Format.Astc12x12Unorm,
+ GAL.Format.Astc4x4Srgb,
+ GAL.Format.Astc5x4Srgb,
+ GAL.Format.Astc5x5Srgb,
+ GAL.Format.Astc6x5Srgb,
+ GAL.Format.Astc6x6Srgb,
+ GAL.Format.Astc8x5Srgb,
+ GAL.Format.Astc8x6Srgb,
+ GAL.Format.Astc8x8Srgb,
+ GAL.Format.Astc10x5Srgb,
+ GAL.Format.Astc10x6Srgb,
+ GAL.Format.Astc10x8Srgb,
+ GAL.Format.Astc10x10Srgb,
+ GAL.Format.Astc12x10Srgb,
+ GAL.Format.Astc12x12Srgb);
+
+ PhysicalDeviceVulkan12Features featuresVk12 = new PhysicalDeviceVulkan12Features()
+ {
+ SType = StructureType.PhysicalDeviceVulkan12Features
+ };
+
+ PhysicalDeviceFeatures2 features2 = new PhysicalDeviceFeatures2()
+ {
+ SType = StructureType.PhysicalDeviceFeatures2,
+ PNext = &featuresVk12
+ };
+
+ Api.GetPhysicalDeviceFeatures2(_physicalDevice.PhysicalDevice, &features2);
+
+ var limits = _physicalDevice.PhysicalDeviceProperties.Limits;
+
+ return new Capabilities(
+ api: TargetApi.Vulkan,
+ GpuVendor,
+ hasFrontFacingBug: IsIntelWindows,
+ hasVectorIndexingBug: Vendor == Vendor.Qualcomm,
+ needsFragmentOutputSpecialization: IsMoltenVk,
+ reduceShaderPrecision: IsMoltenVk,
+ supportsAstcCompression: features2.Features.TextureCompressionAstcLdr && supportsAstcFormats,
+ supportsBc123Compression: supportsBc123CompressionFormat,
+ supportsBc45Compression: supportsBc45CompressionFormat,
+ supportsBc67Compression: supportsBc67CompressionFormat,
+ supportsEtc2Compression: supportsEtc2CompressionFormat,
+ supports3DTextureCompression: true,
+ supportsBgraFormat: true,
+ supportsR4G4Format: false,
+ supportsR4G4B4A4Format: supportsR4G4B4A4Format,
+ supportsSnormBufferTextureFormat: true,
+ supports5BitComponentFormat: supports5BitComponentFormat,
+ supportsBlendEquationAdvanced: Capabilities.SupportsBlendEquationAdvanced,
+ supportsFragmentShaderInterlock: Capabilities.SupportsFragmentShaderInterlock,
+ supportsFragmentShaderOrderingIntel: false,
+ supportsGeometryShader: Capabilities.SupportsGeometryShader,
+ supportsGeometryShaderPassthrough: Capabilities.SupportsGeometryShaderPassthrough,
+ supportsImageLoadFormatted: features2.Features.ShaderStorageImageReadWithoutFormat,
+ supportsLayerVertexTessellation: featuresVk12.ShaderOutputLayer,
+ supportsMismatchingViewFormat: true,
+ supportsCubemapView: !IsAmdGcn,
+ supportsNonConstantTextureOffset: false,
+ supportsShaderBallot: false,
+ supportsTextureShadowLod: false,
+ supportsViewportIndexVertexTessellation: featuresVk12.ShaderOutputViewportIndex,
+ supportsViewportMask: Capabilities.SupportsViewportArray2,
+ supportsViewportSwizzle: false,
+ supportsIndirectParameters: true,
+ maximumUniformBuffersPerStage: Constants.MaxUniformBuffersPerStage,
+ maximumStorageBuffersPerStage: Constants.MaxStorageBuffersPerStage,
+ maximumTexturesPerStage: Constants.MaxTexturesPerStage,
+ maximumImagesPerStage: Constants.MaxImagesPerStage,
+ maximumComputeSharedMemorySize: (int)limits.MaxComputeSharedMemorySize,
+ maximumSupportedAnisotropy: (int)limits.MaxSamplerAnisotropy,
+ storageBufferOffsetAlignment: (int)limits.MinStorageBufferOffsetAlignment,
+ gatherBiasPrecision: IsIntelWindows || IsAmdWindows ? (int)Capabilities.SubTexelPrecisionBits : 0);
+ }
+
+ public HardwareInfo GetHardwareInfo()
+ {
+ return new HardwareInfo(GpuVendor, GpuRenderer);
+ }
+
+ public static DeviceInfo[] GetPhysicalDevices()
+ {
+ try
+ {
+ return VulkanInitialization.GetSuitablePhysicalDevices(Vk.GetApi());
+ }
+ catch (Exception)
+ {
+ // If we got an exception here, Vulkan is most likely not supported.
+ return Array.Empty<DeviceInfo>();
+ }
+ }
+
+ private static string ParseStandardVulkanVersion(uint version)
+ {
+ return $"{version >> 22}.{(version >> 12) & 0x3FF}.{version & 0xFFF}";
+ }
+
+ private static string ParseDriverVersion(ref PhysicalDeviceProperties properties)
+ {
+ uint driverVersionRaw = properties.DriverVersion;
+
+ // NVIDIA differ from the standard here and uses a different format.
+ if (properties.VendorID == 0x10DE)
+ {
+ return $"{(driverVersionRaw >> 22) & 0x3FF}.{(driverVersionRaw >> 14) & 0xFF}.{(driverVersionRaw >> 6) & 0xFF}.{driverVersionRaw & 0x3F}";
+ }
+ else
+ {
+ return ParseStandardVulkanVersion(driverVersionRaw);
+ }
+ }
+
+ private unsafe void PrintGpuInformation()
+ {
+ var properties = _physicalDevice.PhysicalDeviceProperties;
+
+ string vendorName = VendorUtils.GetNameFromId(properties.VendorID);
+
+ Vendor = VendorUtils.FromId(properties.VendorID);
+
+ IsAmdWindows = Vendor == Vendor.Amd && OperatingSystem.IsWindows();
+ IsIntelWindows = Vendor == Vendor.Intel && OperatingSystem.IsWindows();
+ IsTBDR = IsMoltenVk ||
+ Vendor == Vendor.Qualcomm ||
+ Vendor == Vendor.ARM ||
+ Vendor == Vendor.Broadcom ||
+ Vendor == Vendor.ImgTec;
+
+ GpuVendor = vendorName;
+ GpuRenderer = Marshal.PtrToStringAnsi((IntPtr)properties.DeviceName);
+ GpuVersion = $"Vulkan v{ParseStandardVulkanVersion(properties.ApiVersion)}, Driver v{ParseDriverVersion(ref properties)}";
+
+ IsAmdGcn = !IsMoltenVk && Vendor == Vendor.Amd && VendorUtils.AmdGcnRegex().IsMatch(GpuRenderer);
+
+ Logger.Notice.Print(LogClass.Gpu, $"{GpuVendor} {GpuRenderer} ({GpuVersion})");
+ }
+
+ public GAL.PrimitiveTopology TopologyRemap(GAL.PrimitiveTopology topology)
+ {
+ return topology switch
+ {
+ GAL.PrimitiveTopology.Quads => GAL.PrimitiveTopology.Triangles,
+ GAL.PrimitiveTopology.QuadStrip => GAL.PrimitiveTopology.TriangleStrip,
+ GAL.PrimitiveTopology.TriangleFan => Capabilities.PortabilitySubset.HasFlag(PortabilitySubsetFlags.NoTriangleFans) ? GAL.PrimitiveTopology.Triangles : topology,
+ _ => topology
+ };
+ }
+
+ public bool TopologyUnsupported(GAL.PrimitiveTopology topology)
+ {
+ return topology switch
+ {
+ GAL.PrimitiveTopology.Quads => true,
+ GAL.PrimitiveTopology.TriangleFan => Capabilities.PortabilitySubset.HasFlag(PortabilitySubsetFlags.NoTriangleFans),
+ _ => false
+ };
+ }
+
+ public void Initialize(GraphicsDebugLevel logLevel)
+ {
+ SetupContext(logLevel);
+
+ PrintGpuInformation();
+ }
+
+ internal bool NeedsVertexBufferAlignment(int attrScalarAlignment, out int alignment)
+ {
+ if (Capabilities.VertexBufferAlignment > 1)
+ {
+ alignment = (int)Capabilities.VertexBufferAlignment;
+
+ return true;
+ }
+ else if (Vendor != Vendor.Nvidia)
+ {
+ // Vulkan requires that vertex attributes are globally aligned by their component size,
+ // so buffer strides that don't divide by the largest scalar element are invalid.
+ // Guest applications do this, NVIDIA GPUs are OK with it, others are not.
+
+ alignment = attrScalarAlignment;
+
+ return true;
+ }
+
+ alignment = 1;
+
+ return false;
+ }
+
+ public void PreFrame()
+ {
+ SyncManager.Cleanup();
+ }
+
+ public ICounterEvent ReportCounter(CounterType type, EventHandler<ulong> resultHandler, bool hostReserved)
+ {
+ return _counters.QueueReport(type, resultHandler, hostReserved);
+ }
+
+ public void ResetCounter(CounterType type)
+ {
+ _counters.QueueReset(type);
+ }
+
+ public void SetBufferData(BufferHandle buffer, int offset, ReadOnlySpan<byte> data)
+ {
+ BufferManager.SetData(buffer, offset, data, _pipeline.CurrentCommandBuffer, _pipeline.EndRenderPass);
+ }
+
+ public void UpdateCounters()
+ {
+ _counters.Update();
+ }
+
+ public void ResetCounterPool()
+ {
+ _counters.ResetCounterPool();
+ }
+
+ public void ResetFutureCounters(CommandBuffer cmd, int count)
+ {
+ _counters?.ResetFutureCounters(cmd, count);
+ }
+
+ public void BackgroundContextAction(Action action, bool alwaysBackground = false)
+ {
+ action();
+ }
+
+ public void CreateSync(ulong id, bool strict)
+ {
+ SyncManager.Create(id, strict);
+ }
+
+ public IProgram LoadProgramBinary(byte[] programBinary, bool isFragment, ShaderInfo info)
+ {
+ throw new NotImplementedException();
+ }
+
+ public void WaitSync(ulong id)
+ {
+ SyncManager.Wait(id);
+ }
+
+ public ulong GetCurrentSync()
+ {
+ return SyncManager.GetCurrent();
+ }
+
+ public void SetInterruptAction(Action<Action> interruptAction)
+ {
+ InterruptAction = interruptAction;
+ }
+
+ public void Screenshot()
+ {
+ _window.ScreenCaptureRequested = true;
+ }
+
+ public void OnScreenCaptured(ScreenCaptureImageInfo bitmap)
+ {
+ ScreenCaptured?.Invoke(this, bitmap);
+ }
+
+ public unsafe void Dispose()
+ {
+ if (!_initialized)
+ {
+ return;
+ }
+
+ CommandBufferPool.Dispose();
+ BackgroundResources.Dispose();
+ _counters.Dispose();
+ _window.Dispose();
+ HelperShader.Dispose();
+ _pipeline.Dispose();
+ BufferManager.Dispose();
+ DescriptorSetManager.Dispose();
+ PipelineLayoutCache.Dispose();
+
+ MemoryAllocator.Dispose();
+
+ foreach (var shader in Shaders)
+ {
+ shader.Dispose();
+ }
+
+ foreach (var texture in Textures)
+ {
+ texture.Release();
+ }
+
+ foreach (var sampler in Samplers)
+ {
+ sampler.Dispose();
+ }
+
+ SurfaceApi.DestroySurface(_instance.Instance, _surface, null);
+
+ Api.DestroyDevice(_device, null);
+
+ _debugMessenger.Dispose();
+
+ // Last step destroy the instance
+ _instance.Dispose();
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Graphics.Vulkan/Window.cs b/src/Ryujinx.Graphics.Vulkan/Window.cs
new file mode 100644
index 00000000..075d1b30
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/Window.cs
@@ -0,0 +1,603 @@
+using Ryujinx.Graphics.GAL;
+using Ryujinx.Graphics.Vulkan.Effects;
+using Silk.NET.Vulkan;
+using Silk.NET.Vulkan.Extensions.KHR;
+using System;
+using System.Linq;
+using VkFormat = Silk.NET.Vulkan.Format;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ class Window : WindowBase, IDisposable
+ {
+ private const int SurfaceWidth = 1280;
+ private const int SurfaceHeight = 720;
+
+ private readonly VulkanRenderer _gd;
+ private readonly SurfaceKHR _surface;
+ private readonly PhysicalDevice _physicalDevice;
+ private readonly Device _device;
+ private SwapchainKHR _swapchain;
+
+ private Image[] _swapchainImages;
+ private Auto<DisposableImageView>[] _swapchainImageViews;
+
+ private Semaphore _imageAvailableSemaphore;
+ private Semaphore _renderFinishedSemaphore;
+
+ private int _width;
+ private int _height;
+ private bool _vsyncEnabled;
+ private bool _vsyncModeChanged;
+ private VkFormat _format;
+ private AntiAliasing _currentAntiAliasing;
+ private bool _updateEffect;
+ private IPostProcessingEffect _effect;
+ private IScalingFilter _scalingFilter;
+ private bool _isLinear;
+ private float _scalingFilterLevel;
+ private bool _updateScalingFilter;
+ private ScalingFilter _currentScalingFilter;
+
+ public unsafe Window(VulkanRenderer gd, SurfaceKHR surface, PhysicalDevice physicalDevice, Device device)
+ {
+ _gd = gd;
+ _physicalDevice = physicalDevice;
+ _device = device;
+ _surface = surface;
+
+ CreateSwapchain();
+
+ var semaphoreCreateInfo = new SemaphoreCreateInfo()
+ {
+ SType = StructureType.SemaphoreCreateInfo
+ };
+
+ gd.Api.CreateSemaphore(device, semaphoreCreateInfo, null, out _imageAvailableSemaphore).ThrowOnError();
+ gd.Api.CreateSemaphore(device, semaphoreCreateInfo, null, out _renderFinishedSemaphore).ThrowOnError();
+ }
+
+ private void RecreateSwapchain()
+ {
+ var oldSwapchain = _swapchain;
+ _vsyncModeChanged = false;
+
+ for (int i = 0; i < _swapchainImageViews.Length; i++)
+ {
+ _swapchainImageViews[i].Dispose();
+ }
+
+ // Destroy old Swapchain.
+ _gd.Api.DeviceWaitIdle(_device);
+ _gd.SwapchainApi.DestroySwapchain(_device, oldSwapchain, Span<AllocationCallbacks>.Empty);
+
+ CreateSwapchain();
+ }
+
+ private unsafe void CreateSwapchain()
+ {
+ _gd.SurfaceApi.GetPhysicalDeviceSurfaceCapabilities(_physicalDevice, _surface, out var capabilities);
+
+ uint surfaceFormatsCount;
+
+ _gd.SurfaceApi.GetPhysicalDeviceSurfaceFormats(_physicalDevice, _surface, &surfaceFormatsCount, null);
+
+ var surfaceFormats = new SurfaceFormatKHR[surfaceFormatsCount];
+
+ fixed (SurfaceFormatKHR* pSurfaceFormats = surfaceFormats)
+ {
+ _gd.SurfaceApi.GetPhysicalDeviceSurfaceFormats(_physicalDevice, _surface, &surfaceFormatsCount, pSurfaceFormats);
+ }
+
+ uint presentModesCount;
+
+ _gd.SurfaceApi.GetPhysicalDeviceSurfacePresentModes(_physicalDevice, _surface, &presentModesCount, null);
+
+ var presentModes = new PresentModeKHR[presentModesCount];
+
+ fixed (PresentModeKHR* pPresentModes = presentModes)
+ {
+ _gd.SurfaceApi.GetPhysicalDeviceSurfacePresentModes(_physicalDevice, _surface, &presentModesCount, pPresentModes);
+ }
+
+ uint imageCount = capabilities.MinImageCount + 1;
+ if (capabilities.MaxImageCount > 0 && imageCount > capabilities.MaxImageCount)
+ {
+ imageCount = capabilities.MaxImageCount;
+ }
+
+ var surfaceFormat = ChooseSwapSurfaceFormat(surfaceFormats);
+
+ var extent = ChooseSwapExtent(capabilities);
+
+ _width = (int)extent.Width;
+ _height = (int)extent.Height;
+ _format = surfaceFormat.Format;
+
+ var oldSwapchain = _swapchain;
+
+ var swapchainCreateInfo = new SwapchainCreateInfoKHR()
+ {
+ SType = StructureType.SwapchainCreateInfoKhr,
+ Surface = _surface,
+ MinImageCount = imageCount,
+ ImageFormat = surfaceFormat.Format,
+ ImageColorSpace = surfaceFormat.ColorSpace,
+ ImageExtent = extent,
+ ImageUsage = ImageUsageFlags.ColorAttachmentBit | ImageUsageFlags.TransferDstBit | ImageUsageFlags.StorageBit,
+ ImageSharingMode = SharingMode.Exclusive,
+ ImageArrayLayers = 1,
+ PreTransform = capabilities.CurrentTransform,
+ CompositeAlpha = ChooseCompositeAlpha(capabilities.SupportedCompositeAlpha),
+ PresentMode = ChooseSwapPresentMode(presentModes, _vsyncEnabled),
+ Clipped = true
+ };
+
+ _gd.SwapchainApi.CreateSwapchain(_device, swapchainCreateInfo, null, out _swapchain).ThrowOnError();
+
+ _gd.SwapchainApi.GetSwapchainImages(_device, _swapchain, &imageCount, null);
+
+ _swapchainImages = new Image[imageCount];
+
+ fixed (Image* pSwapchainImages = _swapchainImages)
+ {
+ _gd.SwapchainApi.GetSwapchainImages(_device, _swapchain, &imageCount, pSwapchainImages);
+ }
+
+ _swapchainImageViews = new Auto<DisposableImageView>[imageCount];
+
+ for (int i = 0; i < _swapchainImageViews.Length; i++)
+ {
+ _swapchainImageViews[i] = CreateSwapchainImageView(_swapchainImages[i], surfaceFormat.Format);
+ }
+ }
+
+ private unsafe Auto<DisposableImageView> CreateSwapchainImageView(Image swapchainImage, VkFormat format)
+ {
+ var componentMapping = new ComponentMapping(
+ ComponentSwizzle.R,
+ ComponentSwizzle.G,
+ ComponentSwizzle.B,
+ ComponentSwizzle.A);
+
+ var aspectFlags = ImageAspectFlags.ColorBit;
+
+ var subresourceRange = new ImageSubresourceRange(aspectFlags, 0, 1, 0, 1);
+
+ var imageCreateInfo = new ImageViewCreateInfo()
+ {
+ SType = StructureType.ImageViewCreateInfo,
+ Image = swapchainImage,
+ ViewType = ImageViewType.Type2D,
+ Format = format,
+ Components = componentMapping,
+ SubresourceRange = subresourceRange
+ };
+
+ _gd.Api.CreateImageView(_device, imageCreateInfo, null, out var imageView).ThrowOnError();
+ return new Auto<DisposableImageView>(new DisposableImageView(_gd.Api, _device, imageView));
+ }
+
+ private static SurfaceFormatKHR ChooseSwapSurfaceFormat(SurfaceFormatKHR[] availableFormats)
+ {
+ if (availableFormats.Length == 1 && availableFormats[0].Format == VkFormat.Undefined)
+ {
+ return new SurfaceFormatKHR(VkFormat.B8G8R8A8Unorm, ColorSpaceKHR.PaceSrgbNonlinearKhr);
+ }
+
+ foreach (var format in availableFormats)
+ {
+ if (format.Format == VkFormat.B8G8R8A8Unorm && format.ColorSpace == ColorSpaceKHR.PaceSrgbNonlinearKhr)
+ {
+ return format;
+ }
+ }
+
+ return availableFormats[0];
+ }
+
+ private static CompositeAlphaFlagsKHR ChooseCompositeAlpha(CompositeAlphaFlagsKHR supportedFlags)
+ {
+ if (supportedFlags.HasFlag(CompositeAlphaFlagsKHR.OpaqueBitKhr))
+ {
+ return CompositeAlphaFlagsKHR.OpaqueBitKhr;
+ }
+ else if (supportedFlags.HasFlag(CompositeAlphaFlagsKHR.PreMultipliedBitKhr))
+ {
+ return CompositeAlphaFlagsKHR.PreMultipliedBitKhr;
+ }
+ else
+ {
+ return CompositeAlphaFlagsKHR.InheritBitKhr;
+ }
+ }
+
+ private static PresentModeKHR ChooseSwapPresentMode(PresentModeKHR[] availablePresentModes, bool vsyncEnabled)
+ {
+ if (!vsyncEnabled && availablePresentModes.Contains(PresentModeKHR.ImmediateKhr))
+ {
+ return PresentModeKHR.ImmediateKhr;
+ }
+ else if (availablePresentModes.Contains(PresentModeKHR.MailboxKhr))
+ {
+ return PresentModeKHR.MailboxKhr;
+ }
+ else
+ {
+ return PresentModeKHR.FifoKhr;
+ }
+ }
+
+ public static Extent2D ChooseSwapExtent(SurfaceCapabilitiesKHR capabilities)
+ {
+ if (capabilities.CurrentExtent.Width != uint.MaxValue)
+ {
+ return capabilities.CurrentExtent;
+ }
+ else
+ {
+ uint width = Math.Max(capabilities.MinImageExtent.Width, Math.Min(capabilities.MaxImageExtent.Width, SurfaceWidth));
+ uint height = Math.Max(capabilities.MinImageExtent.Height, Math.Min(capabilities.MaxImageExtent.Height, SurfaceHeight));
+
+ return new Extent2D(width, height);
+ }
+ }
+
+ public unsafe override void Present(ITexture texture, ImageCrop crop, Action swapBuffersCallback)
+ {
+ _gd.PipelineInternal.AutoFlush.Present();
+
+ uint nextImage = 0;
+
+ while (true)
+ {
+ var acquireResult = _gd.SwapchainApi.AcquireNextImage(
+ _device,
+ _swapchain,
+ ulong.MaxValue,
+ _imageAvailableSemaphore,
+ new Fence(),
+ ref nextImage);
+
+ if (acquireResult == Result.ErrorOutOfDateKhr ||
+ acquireResult == Result.SuboptimalKhr ||
+ _vsyncModeChanged)
+ {
+ RecreateSwapchain();
+ }
+ else
+ {
+ acquireResult.ThrowOnError();
+ break;
+ }
+ }
+
+ var swapchainImage = _swapchainImages[nextImage];
+
+ _gd.FlushAllCommands();
+
+ var cbs = _gd.CommandBufferPool.Rent();
+
+ Transition(
+ cbs.CommandBuffer,
+ swapchainImage,
+ 0,
+ AccessFlags.TransferWriteBit,
+ ImageLayout.Undefined,
+ ImageLayout.General);
+
+ var view = (TextureView)texture;
+
+ UpdateEffect();
+
+ if (_effect != null)
+ {
+ view = _effect.Run(view, cbs, _width, _height);
+ }
+
+ int srcX0, srcX1, srcY0, srcY1;
+ float scale = view.ScaleFactor;
+
+ if (crop.Left == 0 && crop.Right == 0)
+ {
+ srcX0 = 0;
+ srcX1 = (int)(view.Width / scale);
+ }
+ else
+ {
+ srcX0 = crop.Left;
+ srcX1 = crop.Right;
+ }
+
+ if (crop.Top == 0 && crop.Bottom == 0)
+ {
+ srcY0 = 0;
+ srcY1 = (int)(view.Height / scale);
+ }
+ else
+ {
+ srcY0 = crop.Top;
+ srcY1 = crop.Bottom;
+ }
+
+ if (scale != 1f)
+ {
+ srcX0 = (int)(srcX0 * scale);
+ srcY0 = (int)(srcY0 * scale);
+ srcX1 = (int)Math.Ceiling(srcX1 * scale);
+ srcY1 = (int)Math.Ceiling(srcY1 * scale);
+ }
+
+ if (ScreenCaptureRequested)
+ {
+ if (_effect != null)
+ {
+ _gd.CommandBufferPool.Return(
+ cbs,
+ null,
+ stackalloc[] { PipelineStageFlags.ColorAttachmentOutputBit },
+ null);
+ _gd.FlushAllCommands();
+ cbs.GetFence().Wait();
+ cbs = _gd.CommandBufferPool.Rent();
+ }
+
+ CaptureFrame(view, srcX0, srcY0, srcX1 - srcX0, srcY1 - srcY0, view.Info.Format.IsBgr(), crop.FlipX, crop.FlipY);
+
+ ScreenCaptureRequested = false;
+ }
+
+ float ratioX = crop.IsStretched ? 1.0f : MathF.Min(1.0f, _height * crop.AspectRatioX / (_width * crop.AspectRatioY));
+ float ratioY = crop.IsStretched ? 1.0f : MathF.Min(1.0f, _width * crop.AspectRatioY / (_height * crop.AspectRatioX));
+
+ int dstWidth = (int)(_width * ratioX);
+ int dstHeight = (int)(_height * ratioY);
+
+ int dstPaddingX = (_width - dstWidth) / 2;
+ int dstPaddingY = (_height - dstHeight) / 2;
+
+ int dstX0 = crop.FlipX ? _width - dstPaddingX : dstPaddingX;
+ int dstX1 = crop.FlipX ? dstPaddingX : _width - dstPaddingX;
+
+ int dstY0 = crop.FlipY ? dstPaddingY : _height - dstPaddingY;
+ int dstY1 = crop.FlipY ? _height - dstPaddingY : dstPaddingY;
+
+ if (_scalingFilter != null)
+ {
+ _scalingFilter.Run(
+ view,
+ cbs,
+ _swapchainImageViews[nextImage],
+ _format,
+ _width,
+ _height,
+ new Extents2D(srcX0, srcY0, srcX1, srcY1),
+ new Extents2D(dstX0, dstY0, dstX1, dstY1)
+ );
+ }
+ else
+ {
+ _gd.HelperShader.BlitColor(
+ _gd,
+ cbs,
+ view,
+ _swapchainImageViews[nextImage],
+ _width,
+ _height,
+ 1,
+ _format,
+ false,
+ new Extents2D(srcX0, srcY0, srcX1, srcY1),
+ new Extents2D(dstX0, dstY1, dstX1, dstY0),
+ _isLinear,
+ true);
+ }
+
+ Transition(
+ cbs.CommandBuffer,
+ swapchainImage,
+ 0,
+ 0,
+ ImageLayout.General,
+ ImageLayout.PresentSrcKhr);
+
+ _gd.CommandBufferPool.Return(
+ cbs,
+ stackalloc[] { _imageAvailableSemaphore },
+ stackalloc[] { PipelineStageFlags.ColorAttachmentOutputBit },
+ stackalloc[] { _renderFinishedSemaphore });
+
+ // TODO: Present queue.
+ var semaphore = _renderFinishedSemaphore;
+ var swapchain = _swapchain;
+
+ Result result;
+
+ var presentInfo = new PresentInfoKHR()
+ {
+ SType = StructureType.PresentInfoKhr,
+ WaitSemaphoreCount = 1,
+ PWaitSemaphores = &semaphore,
+ SwapchainCount = 1,
+ PSwapchains = &swapchain,
+ PImageIndices = &nextImage,
+ PResults = &result
+ };
+
+ lock (_gd.QueueLock)
+ {
+ _gd.SwapchainApi.QueuePresent(_gd.Queue, presentInfo);
+ }
+ }
+
+ public override void SetAntiAliasing(AntiAliasing effect)
+ {
+ if (_currentAntiAliasing == effect && _effect != null)
+ {
+ return;
+ }
+
+ _currentAntiAliasing = effect;
+
+ _updateEffect = true;
+ }
+
+ public override void SetScalingFilter(ScalingFilter type)
+ {
+ if (_currentScalingFilter == type && _effect != null)
+ {
+ return;
+ }
+
+ _currentScalingFilter = type;
+
+ _updateScalingFilter = true;
+ }
+
+ private void UpdateEffect()
+ {
+ if (_updateEffect)
+ {
+ _updateEffect = false;
+
+ switch (_currentAntiAliasing)
+ {
+ case AntiAliasing.Fxaa:
+ _effect?.Dispose();
+ _effect = new FxaaPostProcessingEffect(_gd, _device);
+ break;
+ case AntiAliasing.None:
+ _effect?.Dispose();
+ _effect = null;
+ break;
+ case AntiAliasing.SmaaLow:
+ case AntiAliasing.SmaaMedium:
+ case AntiAliasing.SmaaHigh:
+ case AntiAliasing.SmaaUltra:
+ var quality = _currentAntiAliasing - AntiAliasing.SmaaLow;
+ if (_effect is SmaaPostProcessingEffect smaa)
+ {
+ smaa.Quality = quality;
+ }
+ else
+ {
+ _effect?.Dispose();
+ _effect = new SmaaPostProcessingEffect(_gd, _device, quality);
+ }
+ break;
+ }
+ }
+
+ if (_updateScalingFilter)
+ {
+ _updateScalingFilter = false;
+
+ switch (_currentScalingFilter)
+ {
+ case ScalingFilter.Bilinear:
+ case ScalingFilter.Nearest:
+ _scalingFilter?.Dispose();
+ _scalingFilter = null;
+ _isLinear = _currentScalingFilter == ScalingFilter.Bilinear;
+ break;
+ case ScalingFilter.Fsr:
+ if (_scalingFilter is not FsrScalingFilter)
+ {
+ _scalingFilter?.Dispose();
+ _scalingFilter = new FsrScalingFilter(_gd, _device);
+ }
+
+ _scalingFilter.Level = _scalingFilterLevel;
+ break;
+ }
+ }
+ }
+
+ public override void SetScalingFilterLevel(float level)
+ {
+ _scalingFilterLevel = level;
+ _updateScalingFilter = true;
+ }
+
+ private unsafe void Transition(
+ CommandBuffer commandBuffer,
+ Image image,
+ AccessFlags srcAccess,
+ AccessFlags dstAccess,
+ ImageLayout srcLayout,
+ ImageLayout dstLayout)
+ {
+ var subresourceRange = new ImageSubresourceRange(ImageAspectFlags.ColorBit, 0, 1, 0, 1);
+
+ var barrier = new ImageMemoryBarrier()
+ {
+ SType = StructureType.ImageMemoryBarrier,
+ SrcAccessMask = srcAccess,
+ DstAccessMask = dstAccess,
+ OldLayout = srcLayout,
+ NewLayout = dstLayout,
+ SrcQueueFamilyIndex = Vk.QueueFamilyIgnored,
+ DstQueueFamilyIndex = Vk.QueueFamilyIgnored,
+ Image = image,
+ SubresourceRange = subresourceRange
+ };
+
+ _gd.Api.CmdPipelineBarrier(
+ commandBuffer,
+ PipelineStageFlags.TopOfPipeBit,
+ PipelineStageFlags.AllCommandsBit,
+ 0,
+ 0,
+ null,
+ 0,
+ null,
+ 1,
+ barrier);
+ }
+
+ private void CaptureFrame(TextureView texture, int x, int y, int width, int height, bool isBgra, bool flipX, bool flipY)
+ {
+ byte[] bitmap = texture.GetData(x, y, width, height);
+
+ _gd.OnScreenCaptured(new ScreenCaptureImageInfo(width, height, isBgra, bitmap, flipX, flipY));
+ }
+
+ public override void SetSize(int width, int height)
+ {
+ // Not needed as we can get the size from the surface.
+ }
+
+ public override void ChangeVSyncMode(bool vsyncEnabled)
+ {
+ _vsyncEnabled = vsyncEnabled;
+ _vsyncModeChanged = true;
+ }
+
+ protected virtual void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ unsafe
+ {
+ _gd.Api.DestroySemaphore(_device, _renderFinishedSemaphore, null);
+ _gd.Api.DestroySemaphore(_device, _imageAvailableSemaphore, null);
+
+ for (int i = 0; i < _swapchainImageViews.Length; i++)
+ {
+ _swapchainImageViews[i].Dispose();
+ }
+
+ _gd.SwapchainApi.DestroySwapchain(_device, _swapchain, null);
+ }
+
+ _effect?.Dispose();
+ _scalingFilter?.Dispose();
+ }
+ }
+
+ public override void Dispose()
+ {
+ Dispose(true);
+ }
+ }
+}
diff --git a/src/Ryujinx.Graphics.Vulkan/WindowBase.cs b/src/Ryujinx.Graphics.Vulkan/WindowBase.cs
new file mode 100644
index 00000000..0a365e8f
--- /dev/null
+++ b/src/Ryujinx.Graphics.Vulkan/WindowBase.cs
@@ -0,0 +1,18 @@
+using Ryujinx.Graphics.GAL;
+using System;
+
+namespace Ryujinx.Graphics.Vulkan
+{
+ internal abstract class WindowBase: IWindow
+ {
+ public bool ScreenCaptureRequested { get; set; }
+
+ public abstract void Dispose();
+ public abstract void Present(ITexture texture, ImageCrop crop, Action swapBuffersCallback);
+ public abstract void SetSize(int width, int height);
+ public abstract void ChangeVSyncMode(bool vsyncEnabled);
+ public abstract void SetAntiAliasing(AntiAliasing effect);
+ public abstract void SetScalingFilter(ScalingFilter scalerType);
+ public abstract void SetScalingFilterLevel(float scale);
+ }
+} \ No newline at end of file