From cee712105850ac3385cd0091a923438167433f9f Mon Sep 17 00:00:00 2001
From: TSR Berry <20988865+TSRBerry@users.noreply.github.com>
Date: Sat, 8 Apr 2023 01:22:00 +0200
Subject: Move solution and projects to src
---
src/Ryujinx.Graphics.Gpu/Memory/BufferManager.cs | 754 +++++++++++++++++++++++
1 file changed, 754 insertions(+)
create mode 100644 src/Ryujinx.Graphics.Gpu/Memory/BufferManager.cs
(limited to 'src/Ryujinx.Graphics.Gpu/Memory/BufferManager.cs')
diff --git a/src/Ryujinx.Graphics.Gpu/Memory/BufferManager.cs b/src/Ryujinx.Graphics.Gpu/Memory/BufferManager.cs
new file mode 100644
index 00000000..e20e1bb6
--- /dev/null
+++ b/src/Ryujinx.Graphics.Gpu/Memory/BufferManager.cs
@@ -0,0 +1,754 @@
+using Ryujinx.Common;
+using Ryujinx.Graphics.GAL;
+using Ryujinx.Graphics.Gpu.Image;
+using Ryujinx.Graphics.Gpu.Shader;
+using Ryujinx.Graphics.Shader;
+using System;
+using System.Collections.Generic;
+using System.Runtime.CompilerServices;
+
+namespace Ryujinx.Graphics.Gpu.Memory
+{
+ ///
+ /// Buffer manager.
+ ///
+ class BufferManager
+ {
+ private readonly GpuContext _context;
+ private readonly GpuChannel _channel;
+
+ private int _unalignedStorageBuffers;
+ public bool HasUnalignedStorageBuffers => _unalignedStorageBuffers > 0;
+
+ private IndexBuffer _indexBuffer;
+ private readonly VertexBuffer[] _vertexBuffers;
+ private readonly BufferBounds[] _transformFeedbackBuffers;
+ private readonly List _bufferTextures;
+ private readonly BufferAssignment[] _ranges;
+
+ ///
+ /// Holds shader stage buffer state and binding information.
+ ///
+ private class BuffersPerStage
+ {
+ ///
+ /// Shader buffer binding information.
+ ///
+ public BufferDescriptor[] Bindings { get; private set; }
+
+ ///
+ /// Buffer regions.
+ ///
+ public BufferBounds[] Buffers { get; }
+
+ ///
+ /// Flag indicating if this binding is unaligned.
+ ///
+ public bool[] Unaligned { get; }
+
+ ///
+ /// Total amount of buffers used on the shader.
+ ///
+ public int Count { get; private set; }
+
+ ///
+ /// Creates a new instance of the shader stage buffer information.
+ ///
+ /// Maximum amount of buffers that the shader stage can use
+ public BuffersPerStage(int count)
+ {
+ Bindings = new BufferDescriptor[count];
+ Buffers = new BufferBounds[count];
+ Unaligned = new bool[count];
+ }
+
+ ///
+ /// Sets the region of a buffer at a given slot.
+ ///
+ /// Buffer slot
+ /// Region virtual address
+ /// Region size in bytes
+ /// Buffer usage flags
+ public void SetBounds(int index, ulong address, ulong size, BufferUsageFlags flags = BufferUsageFlags.None)
+ {
+ Buffers[index] = new BufferBounds(address, size, flags);
+ }
+
+ ///
+ /// Sets shader buffer binding information.
+ ///
+ /// Buffer binding information
+ public void SetBindings(BufferDescriptor[] descriptors)
+ {
+ if (descriptors == null)
+ {
+ Count = 0;
+ return;
+ }
+
+ if ((Count = descriptors.Length) != 0)
+ {
+ Bindings = descriptors;
+ }
+ }
+ }
+
+ private readonly BuffersPerStage _cpStorageBuffers;
+ private readonly BuffersPerStage _cpUniformBuffers;
+ private readonly BuffersPerStage[] _gpStorageBuffers;
+ private readonly BuffersPerStage[] _gpUniformBuffers;
+
+ private bool _gpStorageBuffersDirty;
+ private bool _gpUniformBuffersDirty;
+
+ private bool _indexBufferDirty;
+ private bool _vertexBuffersDirty;
+ private uint _vertexBuffersEnableMask;
+ private bool _transformFeedbackBuffersDirty;
+
+ private bool _rebind;
+
+ ///
+ /// Creates a new instance of the buffer manager.
+ ///
+ /// GPU context that the buffer manager belongs to
+ /// GPU channel that the buffer manager belongs to
+ public BufferManager(GpuContext context, GpuChannel channel)
+ {
+ _context = context;
+ _channel = channel;
+
+ _vertexBuffers = new VertexBuffer[Constants.TotalVertexBuffers];
+
+ _transformFeedbackBuffers = new BufferBounds[Constants.TotalTransformFeedbackBuffers];
+
+ _cpStorageBuffers = new BuffersPerStage(Constants.TotalCpStorageBuffers);
+ _cpUniformBuffers = new BuffersPerStage(Constants.TotalCpUniformBuffers);
+
+ _gpStorageBuffers = new BuffersPerStage[Constants.ShaderStages];
+ _gpUniformBuffers = new BuffersPerStage[Constants.ShaderStages];
+
+ for (int index = 0; index < Constants.ShaderStages; index++)
+ {
+ _gpStorageBuffers[index] = new BuffersPerStage(Constants.TotalGpStorageBuffers);
+ _gpUniformBuffers[index] = new BuffersPerStage(Constants.TotalGpUniformBuffers);
+ }
+
+ _bufferTextures = new List();
+
+ _ranges = new BufferAssignment[Constants.TotalGpUniformBuffers * Constants.ShaderStages];
+ }
+
+
+ ///
+ /// Sets the memory range with the index buffer data, to be used for subsequent draw calls.
+ ///
+ /// Start GPU virtual address of the index buffer
+ /// Size, in bytes, of the index buffer
+ /// Type of each index buffer element
+ public void SetIndexBuffer(ulong gpuVa, ulong size, IndexType type)
+ {
+ ulong address = _channel.MemoryManager.Physical.BufferCache.TranslateAndCreateBuffer(_channel.MemoryManager, gpuVa, size);
+
+ _indexBuffer.Address = address;
+ _indexBuffer.Size = size;
+ _indexBuffer.Type = type;
+
+ _indexBufferDirty = true;
+ }
+
+ ///
+ /// Sets a new index buffer that overrides the one set on the call to .
+ ///
+ /// Buffer to be used as index buffer
+ /// Type of each index buffer element
+ public void SetIndexBuffer(BufferRange buffer, IndexType type)
+ {
+ _context.Renderer.Pipeline.SetIndexBuffer(buffer, type);
+
+ _indexBufferDirty = true;
+ }
+
+ ///
+ /// Sets the memory range with vertex buffer data, to be used for subsequent draw calls.
+ ///
+ /// Index of the vertex buffer (up to 16)
+ /// GPU virtual address of the buffer
+ /// Size in bytes of the buffer
+ /// Stride of the buffer, defined as the number of bytes of each vertex
+ /// Vertex divisor of the buffer, for instanced draws
+ public void SetVertexBuffer(int index, ulong gpuVa, ulong size, int stride, int divisor)
+ {
+ ulong address = _channel.MemoryManager.Physical.BufferCache.TranslateAndCreateBuffer(_channel.MemoryManager, gpuVa, size);
+
+ _vertexBuffers[index].Address = address;
+ _vertexBuffers[index].Size = size;
+ _vertexBuffers[index].Stride = stride;
+ _vertexBuffers[index].Divisor = divisor;
+
+ _vertexBuffersDirty = true;
+
+ if (address != 0)
+ {
+ _vertexBuffersEnableMask |= 1u << index;
+ }
+ else
+ {
+ _vertexBuffersEnableMask &= ~(1u << index);
+ }
+ }
+
+ ///
+ /// Sets a transform feedback buffer on the graphics pipeline.
+ /// The output from the vertex transformation stages are written into the feedback buffer.
+ ///
+ /// Index of the transform feedback buffer
+ /// Start GPU virtual address of the buffer
+ /// Size in bytes of the transform feedback buffer
+ public void SetTransformFeedbackBuffer(int index, ulong gpuVa, ulong size)
+ {
+ ulong address = _channel.MemoryManager.Physical.BufferCache.TranslateAndCreateBuffer(_channel.MemoryManager, gpuVa, size);
+
+ _transformFeedbackBuffers[index] = new BufferBounds(address, size);
+ _transformFeedbackBuffersDirty = true;
+ }
+
+ ///
+ /// Records the alignment of a storage buffer.
+ /// Unaligned storage buffers disable some optimizations on the shader.
+ ///
+ /// The binding list to modify
+ /// Index of the storage buffer
+ /// Start GPU virtual address of the buffer
+ private void RecordStorageAlignment(BuffersPerStage buffers, int index, ulong gpuVa)
+ {
+ bool unaligned = (gpuVa & (Constants.StorageAlignment - 1)) != 0;
+
+ if (unaligned || HasUnalignedStorageBuffers)
+ {
+ // Check if the alignment changed for this binding.
+
+ ref bool currentUnaligned = ref buffers.Unaligned[index];
+
+ if (currentUnaligned != unaligned)
+ {
+ currentUnaligned = unaligned;
+ _unalignedStorageBuffers += unaligned ? 1 : -1;
+ }
+ }
+ }
+
+ ///
+ /// Sets a storage buffer on the compute pipeline.
+ /// Storage buffers can be read and written to on shaders.
+ ///
+ /// Index of the storage buffer
+ /// Start GPU virtual address of the buffer
+ /// Size in bytes of the storage buffer
+ /// Buffer usage flags
+ public void SetComputeStorageBuffer(int index, ulong gpuVa, ulong size, BufferUsageFlags flags)
+ {
+ size += gpuVa & ((ulong)_context.Capabilities.StorageBufferOffsetAlignment - 1);
+
+ RecordStorageAlignment(_cpStorageBuffers, index, gpuVa);
+
+ gpuVa = BitUtils.AlignDown(gpuVa, (ulong)_context.Capabilities.StorageBufferOffsetAlignment);
+
+ ulong address = _channel.MemoryManager.Physical.BufferCache.TranslateAndCreateBuffer(_channel.MemoryManager, gpuVa, size);
+
+ _cpStorageBuffers.SetBounds(index, address, size, flags);
+ }
+
+ ///
+ /// Sets a storage buffer on the graphics pipeline.
+ /// Storage buffers can be read and written to on shaders.
+ ///
+ /// Index of the shader stage
+ /// Index of the storage buffer
+ /// Start GPU virtual address of the buffer
+ /// Size in bytes of the storage buffer
+ /// Buffer usage flags
+ public void SetGraphicsStorageBuffer(int stage, int index, ulong gpuVa, ulong size, BufferUsageFlags flags)
+ {
+ size += gpuVa & ((ulong)_context.Capabilities.StorageBufferOffsetAlignment - 1);
+
+ BuffersPerStage buffers = _gpStorageBuffers[stage];
+
+ RecordStorageAlignment(buffers, index, gpuVa);
+
+ gpuVa = BitUtils.AlignDown(gpuVa, (ulong)_context.Capabilities.StorageBufferOffsetAlignment);
+
+ ulong address = _channel.MemoryManager.Physical.BufferCache.TranslateAndCreateBuffer(_channel.MemoryManager, gpuVa, size);
+
+ if (buffers.Buffers[index].Address != address ||
+ buffers.Buffers[index].Size != size)
+ {
+ _gpStorageBuffersDirty = true;
+ }
+
+ buffers.SetBounds(index, address, size, flags);
+ }
+
+ ///
+ /// Sets a uniform buffer on the compute pipeline.
+ /// Uniform buffers are read-only from shaders, and have a small capacity.
+ ///
+ /// Index of the uniform buffer
+ /// Start GPU virtual address of the buffer
+ /// Size in bytes of the storage buffer
+ public void SetComputeUniformBuffer(int index, ulong gpuVa, ulong size)
+ {
+ ulong address = _channel.MemoryManager.Physical.BufferCache.TranslateAndCreateBuffer(_channel.MemoryManager, gpuVa, size);
+
+ _cpUniformBuffers.SetBounds(index, address, size);
+ }
+
+ ///
+ /// Sets a uniform buffer on the graphics pipeline.
+ /// Uniform buffers are read-only from shaders, and have a small capacity.
+ ///
+ /// Index of the shader stage
+ /// Index of the uniform buffer
+ /// Start GPU virtual address of the buffer
+ /// Size in bytes of the storage buffer
+ public void SetGraphicsUniformBuffer(int stage, int index, ulong gpuVa, ulong size)
+ {
+ ulong address = _channel.MemoryManager.Physical.BufferCache.TranslateAndCreateBuffer(_channel.MemoryManager, gpuVa, size);
+
+ _gpUniformBuffers[stage].SetBounds(index, address, size);
+ _gpUniformBuffersDirty = true;
+ }
+
+ ///
+ /// Sets the binding points for the storage buffers bound on the compute pipeline.
+ ///
+ /// Bindings for the active shader
+ public void SetComputeBufferBindings(CachedShaderBindings bindings)
+ {
+ _cpStorageBuffers.SetBindings(bindings.StorageBufferBindings[0]);
+ _cpUniformBuffers.SetBindings(bindings.ConstantBufferBindings[0]);
+ }
+
+ ///
+ /// Sets the binding points for the storage buffers bound on the graphics pipeline.
+ ///
+ /// Bindings for the active shader
+ public void SetGraphicsBufferBindings(CachedShaderBindings bindings)
+ {
+ for (int i = 0; i < Constants.ShaderStages; i++)
+ {
+ _gpStorageBuffers[i].SetBindings(bindings.StorageBufferBindings[i]);
+ _gpUniformBuffers[i].SetBindings(bindings.ConstantBufferBindings[i]);
+ }
+
+ _gpStorageBuffersDirty = true;
+ _gpUniformBuffersDirty = true;
+ }
+
+ ///
+ /// Gets a bit mask indicating which compute uniform buffers are currently bound.
+ ///
+ /// Mask where each bit set indicates a bound constant buffer
+ public uint GetComputeUniformBufferUseMask()
+ {
+ uint mask = 0;
+
+ for (int i = 0; i < _cpUniformBuffers.Buffers.Length; i++)
+ {
+ if (_cpUniformBuffers.Buffers[i].Address != 0)
+ {
+ mask |= 1u << i;
+ }
+ }
+
+ return mask;
+ }
+
+ ///
+ /// Gets a bit mask indicating which graphics uniform buffers are currently bound.
+ ///
+ /// Index of the shader stage
+ /// Mask where each bit set indicates a bound constant buffer
+ public uint GetGraphicsUniformBufferUseMask(int stage)
+ {
+ uint mask = 0;
+
+ for (int i = 0; i < _gpUniformBuffers[stage].Buffers.Length; i++)
+ {
+ if (_gpUniformBuffers[stage].Buffers[i].Address != 0)
+ {
+ mask |= 1u << i;
+ }
+ }
+
+ return mask;
+ }
+
+ ///
+ /// Gets the address of the compute uniform buffer currently bound at the given index.
+ ///
+ /// Index of the uniform buffer binding
+ /// The uniform buffer address, or an undefined value if the buffer is not currently bound
+ public ulong GetComputeUniformBufferAddress(int index)
+ {
+ return _cpUniformBuffers.Buffers[index].Address;
+ }
+
+ ///
+ /// Gets the address of the graphics uniform buffer currently bound at the given index.
+ ///
+ /// Index of the shader stage
+ /// Index of the uniform buffer binding
+ /// The uniform buffer address, or an undefined value if the buffer is not currently bound
+ public ulong GetGraphicsUniformBufferAddress(int stage, int index)
+ {
+ return _gpUniformBuffers[stage].Buffers[index].Address;
+ }
+
+ ///
+ /// Gets the bounds of the uniform buffer currently bound at the given index.
+ ///
+ /// Indicates whenever the uniform is requested by the 3D or compute engine
+ /// Index of the shader stage, if the uniform is for the 3D engine
+ /// Index of the uniform buffer binding
+ /// The uniform buffer bounds, or an undefined value if the buffer is not currently bound
+ public ref BufferBounds GetUniformBufferBounds(bool isCompute, int stage, int index)
+ {
+ if (isCompute)
+ {
+ return ref _cpUniformBuffers.Buffers[index];
+ }
+ else
+ {
+ return ref _gpUniformBuffers[stage].Buffers[index];
+ }
+ }
+
+ ///
+ /// Ensures that the compute engine bindings are visible to the host GPU.
+ /// Note: this actually performs the binding using the host graphics API.
+ ///
+ public void CommitComputeBindings()
+ {
+ var bufferCache = _channel.MemoryManager.Physical.BufferCache;
+
+ BindBuffers(bufferCache, _cpStorageBuffers, isStorage: true);
+ BindBuffers(bufferCache, _cpUniformBuffers, isStorage: false);
+
+ CommitBufferTextureBindings();
+
+ // Force rebind after doing compute work.
+ Rebind();
+ }
+
+ ///
+ /// Commit any queued buffer texture bindings.
+ ///
+ private void CommitBufferTextureBindings()
+ {
+ if (_bufferTextures.Count > 0)
+ {
+ foreach (var binding in _bufferTextures)
+ {
+ var isStore = binding.BindingInfo.Flags.HasFlag(TextureUsageFlags.ImageStore);
+ var range = _channel.MemoryManager.Physical.BufferCache.GetBufferRange(binding.Address, binding.Size, isStore);
+ binding.Texture.SetStorage(range);
+
+ // The texture must be rebound to use the new storage if it was updated.
+
+ if (binding.IsImage)
+ {
+ _context.Renderer.Pipeline.SetImage(binding.BindingInfo.Binding, binding.Texture, binding.Format);
+ }
+ else
+ {
+ _context.Renderer.Pipeline.SetTextureAndSampler(binding.Stage, binding.BindingInfo.Binding, binding.Texture, null);
+ }
+ }
+
+ _bufferTextures.Clear();
+ }
+ }
+
+ ///
+ /// Ensures that the graphics engine bindings are visible to the host GPU.
+ /// Note: this actually performs the binding using the host graphics API.
+ ///
+ public void CommitGraphicsBindings()
+ {
+ var bufferCache = _channel.MemoryManager.Physical.BufferCache;
+
+ if (_indexBufferDirty || _rebind)
+ {
+ _indexBufferDirty = false;
+
+ if (_indexBuffer.Address != 0)
+ {
+ BufferRange buffer = bufferCache.GetBufferRange(_indexBuffer.Address, _indexBuffer.Size);
+
+ _context.Renderer.Pipeline.SetIndexBuffer(buffer, _indexBuffer.Type);
+ }
+ }
+ else if (_indexBuffer.Address != 0)
+ {
+ bufferCache.SynchronizeBufferRange(_indexBuffer.Address, _indexBuffer.Size);
+ }
+
+ uint vbEnableMask = _vertexBuffersEnableMask;
+
+ if (_vertexBuffersDirty || _rebind)
+ {
+ _vertexBuffersDirty = false;
+
+ Span vertexBuffers = stackalloc VertexBufferDescriptor[Constants.TotalVertexBuffers];
+
+ for (int index = 0; (vbEnableMask >> index) != 0; index++)
+ {
+ VertexBuffer vb = _vertexBuffers[index];
+
+ if (vb.Address == 0)
+ {
+ continue;
+ }
+
+ BufferRange buffer = bufferCache.GetBufferRange(vb.Address, vb.Size);
+
+ vertexBuffers[index] = new VertexBufferDescriptor(buffer, vb.Stride, vb.Divisor);
+ }
+
+ _context.Renderer.Pipeline.SetVertexBuffers(vertexBuffers);
+ }
+ else
+ {
+ for (int index = 0; (vbEnableMask >> index) != 0; index++)
+ {
+ VertexBuffer vb = _vertexBuffers[index];
+
+ if (vb.Address == 0)
+ {
+ continue;
+ }
+
+ bufferCache.SynchronizeBufferRange(vb.Address, vb.Size);
+ }
+ }
+
+ if (_transformFeedbackBuffersDirty || _rebind)
+ {
+ _transformFeedbackBuffersDirty = false;
+
+ Span tfbs = stackalloc BufferRange[Constants.TotalTransformFeedbackBuffers];
+
+ for (int index = 0; index < Constants.TotalTransformFeedbackBuffers; index++)
+ {
+ BufferBounds tfb = _transformFeedbackBuffers[index];
+
+ if (tfb.Address == 0)
+ {
+ tfbs[index] = BufferRange.Empty;
+ continue;
+ }
+
+ tfbs[index] = bufferCache.GetBufferRange(tfb.Address, tfb.Size, write: true);
+ }
+
+ _context.Renderer.Pipeline.SetTransformFeedbackBuffers(tfbs);
+ }
+ else
+ {
+ for (int index = 0; index < Constants.TotalTransformFeedbackBuffers; index++)
+ {
+ BufferBounds tfb = _transformFeedbackBuffers[index];
+
+ if (tfb.Address == 0)
+ {
+ continue;
+ }
+
+ bufferCache.SynchronizeBufferRange(tfb.Address, tfb.Size);
+ }
+ }
+
+ if (_gpStorageBuffersDirty || _rebind)
+ {
+ _gpStorageBuffersDirty = false;
+
+ BindBuffers(bufferCache, _gpStorageBuffers, isStorage: true);
+ }
+ else
+ {
+ UpdateBuffers(_gpStorageBuffers);
+ }
+
+ if (_gpUniformBuffersDirty || _rebind)
+ {
+ _gpUniformBuffersDirty = false;
+
+ BindBuffers(bufferCache, _gpUniformBuffers, isStorage: false);
+ }
+ else
+ {
+ UpdateBuffers(_gpUniformBuffers);
+ }
+
+ CommitBufferTextureBindings();
+
+ _rebind = false;
+ }
+
+ ///
+ /// Bind respective buffer bindings on the host API.
+ ///
+ /// Buffer cache holding the buffers for the specified ranges
+ /// Buffer memory ranges to bind
+ /// True to bind as storage buffer, false to bind as uniform buffer
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void BindBuffers(BufferCache bufferCache, BuffersPerStage[] bindings, bool isStorage)
+ {
+ int rangesCount = 0;
+
+ Span ranges = _ranges;
+
+ for (ShaderStage stage = ShaderStage.Vertex; stage <= ShaderStage.Fragment; stage++)
+ {
+ ref var buffers = ref bindings[(int)stage - 1];
+
+ for (int index = 0; index < buffers.Count; index++)
+ {
+ ref var bindingInfo = ref buffers.Bindings[index];
+
+ BufferBounds bounds = buffers.Buffers[bindingInfo.Slot];
+
+ if (bounds.Address != 0)
+ {
+ var isWrite = bounds.Flags.HasFlag(BufferUsageFlags.Write);
+ var range = isStorage
+ ? bufferCache.GetBufferRangeTillEnd(bounds.Address, bounds.Size, isWrite)
+ : bufferCache.GetBufferRange(bounds.Address, bounds.Size);
+
+ ranges[rangesCount++] = new BufferAssignment(bindingInfo.Binding, range);
+ }
+ }
+ }
+
+ if (rangesCount != 0)
+ {
+ SetHostBuffers(ranges, rangesCount, isStorage);
+ }
+ }
+
+ ///
+ /// Bind respective buffer bindings on the host API.
+ ///
+ /// Buffer cache holding the buffers for the specified ranges
+ /// Buffer memory ranges to bind
+ /// True to bind as storage buffer, false to bind as uniform buffer
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void BindBuffers(BufferCache bufferCache, BuffersPerStage buffers, bool isStorage)
+ {
+ int rangesCount = 0;
+
+ Span ranges = _ranges;
+
+ for (int index = 0; index < buffers.Count; index++)
+ {
+ ref var bindingInfo = ref buffers.Bindings[index];
+
+ BufferBounds bounds = buffers.Buffers[bindingInfo.Slot];
+
+ if (bounds.Address != 0)
+ {
+ var isWrite = bounds.Flags.HasFlag(BufferUsageFlags.Write);
+ var range = isStorage
+ ? bufferCache.GetBufferRangeTillEnd(bounds.Address, bounds.Size, isWrite)
+ : bufferCache.GetBufferRange(bounds.Address, bounds.Size);
+
+ ranges[rangesCount++] = new BufferAssignment(bindingInfo.Binding, range);
+ }
+ }
+
+ if (rangesCount != 0)
+ {
+ SetHostBuffers(ranges, rangesCount, isStorage);
+ }
+ }
+
+ ///
+ /// Bind respective buffer bindings on the host API.
+ ///
+ /// Host buffers to bind, with their offsets and sizes
+ /// First binding point
+ /// Number of bindings
+ /// Indicates if the buffers are storage or uniform buffers
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void SetHostBuffers(ReadOnlySpan ranges, int count, bool isStorage)
+ {
+ if (isStorage)
+ {
+ _context.Renderer.Pipeline.SetStorageBuffers(ranges.Slice(0, count));
+ }
+ else
+ {
+ _context.Renderer.Pipeline.SetUniformBuffers(ranges.Slice(0, count));
+ }
+ }
+
+ ///
+ /// Updates data for the already bound buffer bindings.
+ ///
+ /// Bindings to update
+ private void UpdateBuffers(BuffersPerStage[] bindings)
+ {
+ for (ShaderStage stage = ShaderStage.Vertex; stage <= ShaderStage.Fragment; stage++)
+ {
+ ref var buffers = ref bindings[(int)stage - 1];
+
+ for (int index = 0; index < buffers.Count; index++)
+ {
+ ref var binding = ref buffers.Bindings[index];
+
+ BufferBounds bounds = buffers.Buffers[binding.Slot];
+
+ if (bounds.Address == 0)
+ {
+ continue;
+ }
+
+ _channel.MemoryManager.Physical.BufferCache.SynchronizeBufferRange(bounds.Address, bounds.Size);
+ }
+ }
+ }
+
+ ///
+ /// Sets the buffer storage of a buffer texture. This will be bound when the buffer manager commits bindings.
+ ///
+ /// Shader stage accessing the texture
+ /// Buffer texture
+ /// Address of the buffer in memory
+ /// Size of the buffer in bytes
+ /// Binding info for the buffer texture
+ /// Format of the buffer texture
+ /// Whether the binding is for an image or a sampler
+ public void SetBufferTextureStorage(
+ ShaderStage stage,
+ ITexture texture,
+ ulong address,
+ ulong size,
+ TextureBindingInfo bindingInfo,
+ Format format,
+ bool isImage)
+ {
+ _channel.MemoryManager.Physical.BufferCache.CreateBuffer(address, size);
+
+ _bufferTextures.Add(new BufferTextureBinding(stage, texture, address, size, bindingInfo, format, isImage));
+ }
+
+ ///
+ /// Force all bound textures and images to be rebound the next time CommitBindings is called.
+ ///
+ public void Rebind()
+ {
+ _rebind = true;
+ }
+ }
+}
--
cgit v1.2.3