aboutsummaryrefslogtreecommitdiff
path: root/src/Ryujinx.Audio/Renderer/Server/Voice
diff options
context:
space:
mode:
authorTSR Berry <20988865+TSRBerry@users.noreply.github.com>2023-04-08 01:22:00 +0200
committerMary <thog@protonmail.com>2023-04-27 23:51:14 +0200
commitcee712105850ac3385cd0091a923438167433f9f (patch)
tree4a5274b21d8b7f938c0d0ce18736d3f2993b11b1 /src/Ryujinx.Audio/Renderer/Server/Voice
parentcd124bda587ef09668a971fa1cac1c3f0cfc9f21 (diff)
Move solution and projects to src
Diffstat (limited to 'src/Ryujinx.Audio/Renderer/Server/Voice')
-rw-r--r--src/Ryujinx.Audio/Renderer/Server/Voice/VoiceChannelResource.cs40
-rw-r--r--src/Ryujinx.Audio/Renderer/Server/Voice/VoiceContext.cs149
-rw-r--r--src/Ryujinx.Audio/Renderer/Server/Voice/VoiceState.cs699
-rw-r--r--src/Ryujinx.Audio/Renderer/Server/Voice/WaveBuffer.cs104
4 files changed, 992 insertions, 0 deletions
diff --git a/src/Ryujinx.Audio/Renderer/Server/Voice/VoiceChannelResource.cs b/src/Ryujinx.Audio/Renderer/Server/Voice/VoiceChannelResource.cs
new file mode 100644
index 00000000..939d9294
--- /dev/null
+++ b/src/Ryujinx.Audio/Renderer/Server/Voice/VoiceChannelResource.cs
@@ -0,0 +1,40 @@
+using Ryujinx.Common.Memory;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Audio.Renderer.Server.Voice
+{
+ /// <summary>
+ /// Server state for a voice channel resource.
+ /// </summary>
+ [StructLayout(LayoutKind.Sequential, Size = 0xD0, Pack = Alignment)]
+ public struct VoiceChannelResource
+ {
+ public const int Alignment = 0x10;
+
+ /// <summary>
+ /// Mix volumes for the resource.
+ /// </summary>
+ public Array24<float> Mix;
+
+ /// <summary>
+ /// Previous mix volumes for resource.
+ /// </summary>
+ public Array24<float> PreviousMix;
+
+ /// <summary>
+ /// The id of the resource.
+ /// </summary>
+ public uint Id;
+
+ /// <summary>
+ /// Indicate if the resource is used.
+ /// </summary>
+ [MarshalAs(UnmanagedType.I1)]
+ public bool IsUsed;
+
+ public void UpdateState()
+ {
+ Mix.AsSpan().CopyTo(PreviousMix.AsSpan());
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Audio/Renderer/Server/Voice/VoiceContext.cs b/src/Ryujinx.Audio/Renderer/Server/Voice/VoiceContext.cs
new file mode 100644
index 00000000..1c57b71b
--- /dev/null
+++ b/src/Ryujinx.Audio/Renderer/Server/Voice/VoiceContext.cs
@@ -0,0 +1,149 @@
+using Ryujinx.Audio.Renderer.Common;
+using Ryujinx.Audio.Renderer.Utils;
+using System;
+using System.Diagnostics;
+
+namespace Ryujinx.Audio.Renderer.Server.Voice
+{
+ /// <summary>
+ /// Voice context.
+ /// </summary>
+ public class VoiceContext
+ {
+ /// <summary>
+ /// Storage of the sorted indices to <see cref="VoiceState"/>.
+ /// </summary>
+ private Memory<int> _sortedVoices;
+
+ /// <summary>
+ /// Storage for <see cref="VoiceState"/>.
+ /// </summary>
+ private Memory<VoiceState> _voices;
+
+ /// <summary>
+ /// Storage for <see cref="VoiceChannelResource"/>.
+ /// </summary>
+ private Memory<VoiceChannelResource> _voiceChannelResources;
+
+ /// <summary>
+ /// Storage for <see cref="VoiceUpdateState"/> that are used during audio renderer server updates.
+ /// </summary>
+ private Memory<VoiceUpdateState> _voiceUpdateStatesCpu;
+
+ /// <summary>
+ /// Storage for <see cref="VoiceUpdateState"/> for the <see cref="Dsp.AudioProcessor"/>.
+ /// </summary>
+ private Memory<VoiceUpdateState> _voiceUpdateStatesDsp;
+
+ /// <summary>
+ /// The total voice count.
+ /// </summary>
+ private uint _voiceCount;
+
+ public void Initialize(Memory<int> sortedVoices, Memory<VoiceState> voices, Memory<VoiceChannelResource> voiceChannelResources, Memory<VoiceUpdateState> voiceUpdateStatesCpu, Memory<VoiceUpdateState> voiceUpdateStatesDsp, uint voiceCount)
+ {
+ _sortedVoices = sortedVoices;
+ _voices = voices;
+ _voiceChannelResources = voiceChannelResources;
+ _voiceUpdateStatesCpu = voiceUpdateStatesCpu;
+ _voiceUpdateStatesDsp = voiceUpdateStatesDsp;
+ _voiceCount = voiceCount;
+ }
+
+ /// <summary>
+ /// Get the total voice count.
+ /// </summary>
+ /// <returns>The total voice count.</returns>
+ public uint GetCount()
+ {
+ return _voiceCount;
+ }
+
+ /// <summary>
+ /// Get a reference to a <see cref="VoiceChannelResource"/> at the given <paramref name="id"/>.
+ /// </summary>
+ /// <param name="id">The index to use.</param>
+ /// <returns>A reference to a <see cref="VoiceChannelResource"/> at the given <paramref name="id"/>.</returns>
+ public ref VoiceChannelResource GetChannelResource(int id)
+ {
+ return ref SpanIOHelper.GetFromMemory(_voiceChannelResources, id, _voiceCount);
+ }
+
+ /// <summary>
+ /// Get a <see cref="Memory{VoiceUpdateState}"/> at the given <paramref name="id"/>.
+ /// </summary>
+ /// <param name="id">The index to use.</param>
+ /// <returns>A <see cref="Memory{VoiceUpdateState}"/> at the given <paramref name="id"/>.</returns>
+ /// <remarks>The returned <see cref="Memory{VoiceUpdateState}"/> should only be used when updating the server state.</remarks>
+ public Memory<VoiceUpdateState> GetUpdateStateForCpu(int id)
+ {
+ return SpanIOHelper.GetMemory(_voiceUpdateStatesCpu, id, _voiceCount);
+ }
+
+ /// <summary>
+ /// Get a <see cref="Memory{VoiceUpdateState}"/> at the given <paramref name="id"/>.
+ /// </summary>
+ /// <param name="id">The index to use.</param>
+ /// <returns>A <see cref="Memory{VoiceUpdateState}"/> at the given <paramref name="id"/>.</returns>
+ /// <remarks>The returned <see cref="Memory{VoiceUpdateState}"/> should only be used in the context of processing on the <see cref="Dsp.AudioProcessor"/>.</remarks>
+ public Memory<VoiceUpdateState> GetUpdateStateForDsp(int id)
+ {
+ return SpanIOHelper.GetMemory(_voiceUpdateStatesDsp, id, _voiceCount);
+ }
+
+ /// <summary>
+ /// Get a reference to a <see cref="VoiceState"/> at the given <paramref name="id"/>.
+ /// </summary>
+ /// <param name="id">The index to use.</param>
+ /// <returns>A reference to a <see cref="VoiceState"/> at the given <paramref name="id"/>.</returns>
+ public ref VoiceState GetState(int id)
+ {
+ return ref SpanIOHelper.GetFromMemory(_voices, id, _voiceCount);
+ }
+
+ public ref VoiceState GetSortedState(int id)
+ {
+ Debug.Assert(id >= 0 && id < _voiceCount);
+
+ return ref GetState(_sortedVoices.Span[id]);
+ }
+
+ /// <summary>
+ /// Update internal state during command generation.
+ /// </summary>
+ public void UpdateForCommandGeneration()
+ {
+ _voiceUpdateStatesDsp.CopyTo(_voiceUpdateStatesCpu);
+ }
+
+ /// <summary>
+ /// Sort the internal voices by priority and sorting order (if the priorities match).
+ /// </summary>
+ public void Sort()
+ {
+ for (int i = 0; i < _voiceCount; i++)
+ {
+ _sortedVoices.Span[i] = i;
+ }
+
+ int[] sortedVoicesTemp = _sortedVoices.Slice(0, (int)GetCount()).ToArray();
+
+ Array.Sort(sortedVoicesTemp, (a, b) =>
+ {
+ ref VoiceState aState = ref GetState(a);
+ ref VoiceState bState = ref GetState(b);
+
+ int result = aState.Priority.CompareTo(bState.Priority);
+
+ if (result == 0)
+ {
+ return aState.SortingOrder.CompareTo(bState.SortingOrder);
+ }
+
+ return result;
+ });
+
+ sortedVoicesTemp.AsSpan().CopyTo(_sortedVoices.Span);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Audio/Renderer/Server/Voice/VoiceState.cs b/src/Ryujinx.Audio/Renderer/Server/Voice/VoiceState.cs
new file mode 100644
index 00000000..0bf53c54
--- /dev/null
+++ b/src/Ryujinx.Audio/Renderer/Server/Voice/VoiceState.cs
@@ -0,0 +1,699 @@
+using Ryujinx.Audio.Common;
+using Ryujinx.Audio.Renderer.Common;
+using Ryujinx.Audio.Renderer.Parameter;
+using Ryujinx.Audio.Renderer.Server.MemoryPool;
+using Ryujinx.Common.Memory;
+using Ryujinx.Common.Utilities;
+using System;
+using System.Diagnostics;
+using System.Runtime.InteropServices;
+using static Ryujinx.Audio.Renderer.Common.BehaviourParameter;
+using static Ryujinx.Audio.Renderer.Parameter.VoiceInParameter;
+
+namespace Ryujinx.Audio.Renderer.Server.Voice
+{
+ [StructLayout(LayoutKind.Sequential, Pack = Alignment)]
+ public struct VoiceState
+ {
+ public const int Alignment = 0x10;
+
+ /// <summary>
+ /// Set to true if the voice is used.
+ /// </summary>
+ [MarshalAs(UnmanagedType.I1)]
+ public bool InUse;
+
+ /// <summary>
+ /// Set to true if the voice is new.
+ /// </summary>
+ [MarshalAs(UnmanagedType.I1)]
+ public bool IsNew;
+
+ [MarshalAs(UnmanagedType.I1)]
+ public bool WasPlaying;
+
+ /// <summary>
+ /// The <see cref="SampleFormat"/> of the voice.
+ /// </summary>
+ public SampleFormat SampleFormat;
+
+ /// <summary>
+ /// The sample rate of the voice.
+ /// </summary>
+ public uint SampleRate;
+
+ /// <summary>
+ /// The total channel count used.
+ /// </summary>
+ public uint ChannelsCount;
+
+ /// <summary>
+ /// Id of the voice.
+ /// </summary>
+ public int Id;
+
+ /// <summary>
+ /// Node id of the voice.
+ /// </summary>
+ public int NodeId;
+
+ /// <summary>
+ /// The target mix id of the voice.
+ /// </summary>
+ public int MixId;
+
+ /// <summary>
+ /// The current voice <see cref="Types.PlayState"/>.
+ /// </summary>
+ public Types.PlayState PlayState;
+
+ /// <summary>
+ /// The previous voice <see cref="Types.PlayState"/>.
+ /// </summary>
+ public Types.PlayState PreviousPlayState;
+
+ /// <summary>
+ /// The priority of the voice.
+ /// </summary>
+ public uint Priority;
+
+ /// <summary>
+ /// Target sorting position of the voice. (used to sort voice with the same <see cref="Priority"/>)
+ /// </summary>
+ public uint SortingOrder;
+
+ /// <summary>
+ /// The pitch used on the voice.
+ /// </summary>
+ public float Pitch;
+
+ /// <summary>
+ /// The output volume of the voice.
+ /// </summary>
+ public float Volume;
+
+ /// <summary>
+ /// The previous output volume of the voice.
+ /// </summary>
+ public float PreviousVolume;
+
+ /// <summary>
+ /// Biquad filters to apply to the output of the voice.
+ /// </summary>
+ public Array2<BiquadFilterParameter> BiquadFilters;
+
+ /// <summary>
+ /// Total count of <see cref="WaveBufferInternal"/> of the voice.
+ /// </summary>
+ public uint WaveBuffersCount;
+
+ /// <summary>
+ /// Current playing <see cref="WaveBufferInternal"/> of the voice.
+ /// </summary>
+ public uint WaveBuffersIndex;
+
+ /// <summary>
+ /// Change the behaviour of the voice.
+ /// </summary>
+ /// <remarks>This was added on REV5.</remarks>
+ public DecodingBehaviour DecodingBehaviour;
+
+ /// <summary>
+ /// User state <see cref="AddressInfo"/> required by the data source.
+ /// </summary>
+ /// <remarks>Only used for <see cref="SampleFormat.Adpcm"/> as the GC-ADPCM coefficients.</remarks>
+ public AddressInfo DataSourceStateAddressInfo;
+
+ /// <summary>
+ /// The wavebuffers of this voice.
+ /// </summary>
+ public Array4<WaveBuffer> WaveBuffers;
+
+ /// <summary>
+ /// The channel resource ids associated to the voice.
+ /// </summary>
+ public Array6<int> ChannelResourceIds;
+
+ /// <summary>
+ /// The target splitter id of the voice.
+ /// </summary>
+ public uint SplitterId;
+
+ /// <summary>
+ /// Change the Sample Rate Conversion (SRC) quality of the voice.
+ /// </summary>
+ /// <remarks>This was added on REV8.</remarks>
+ public SampleRateConversionQuality SrcQuality;
+
+ /// <summary>
+ /// If set to true, the voice was dropped.
+ /// </summary>
+ [MarshalAs(UnmanagedType.I1)]
+ public bool VoiceDropFlag;
+
+ /// <summary>
+ /// Set to true if the data source state work buffer wasn't mapped.
+ /// </summary>
+ [MarshalAs(UnmanagedType.I1)]
+ public bool DataSourceStateUnmapped;
+
+ /// <summary>
+ /// Set to true if any of the <see cref="WaveBuffer.BufferAddressInfo"/> work buffer wasn't mapped.
+ /// </summary>
+ [MarshalAs(UnmanagedType.I1)]
+ public bool BufferInfoUnmapped;
+
+ /// <summary>
+ /// The biquad filter initialization state storage.
+ /// </summary>
+ private BiquadFilterNeedInitializationArrayStruct _biquadFilterNeedInitialization;
+
+ /// <summary>
+ /// Flush the amount of wavebuffer specified. This will result in the wavebuffer being skipped and marked played.
+ /// </summary>
+ /// <remarks>This was added on REV5.</remarks>
+ public byte FlushWaveBufferCount;
+
+ [StructLayout(LayoutKind.Sequential, Size = Constants.VoiceBiquadFilterCount)]
+ private struct BiquadFilterNeedInitializationArrayStruct { }
+
+ /// <summary>
+ /// The biquad filter initialization state array.
+ /// </summary>
+ public Span<bool> BiquadFilterNeedInitialization => SpanHelpers.AsSpan<BiquadFilterNeedInitializationArrayStruct, bool>(ref _biquadFilterNeedInitialization);
+
+ /// <summary>
+ /// Initialize the <see cref="VoiceState"/>.
+ /// </summary>
+ public void Initialize()
+ {
+ IsNew = false;
+ VoiceDropFlag = false;
+ DataSourceStateUnmapped = false;
+ BufferInfoUnmapped = false;
+ FlushWaveBufferCount = 0;
+ PlayState = Types.PlayState.Stopped;
+ Priority = Constants.VoiceLowestPriority;
+ Id = 0;
+ NodeId = 0;
+ SampleRate = 0;
+ SampleFormat = SampleFormat.Invalid;
+ ChannelsCount = 0;
+ Pitch = 0.0f;
+ Volume = 0.0f;
+ PreviousVolume = 0.0f;
+ BiquadFilters.AsSpan().Fill(new BiquadFilterParameter());
+ WaveBuffersCount = 0;
+ WaveBuffersIndex = 0;
+ MixId = Constants.UnusedMixId;
+ SplitterId = Constants.UnusedSplitterId;
+ DataSourceStateAddressInfo.Setup(0, 0);
+
+ InitializeWaveBuffers();
+ }
+
+ /// <summary>
+ /// Initialize the <see cref="WaveBuffer"/> in this <see cref="VoiceState"/>.
+ /// </summary>
+ private void InitializeWaveBuffers()
+ {
+ for (int i = 0; i < WaveBuffers.Length; i++)
+ {
+ WaveBuffers[i].StartSampleOffset = 0;
+ WaveBuffers[i].EndSampleOffset = 0;
+ WaveBuffers[i].ShouldLoop = false;
+ WaveBuffers[i].IsEndOfStream = false;
+ WaveBuffers[i].BufferAddressInfo.Setup(0, 0);
+ WaveBuffers[i].ContextAddressInfo.Setup(0, 0);
+ WaveBuffers[i].IsSendToAudioProcessor = true;
+ }
+ }
+
+ /// <summary>
+ /// Check if the voice needs to be skipped.
+ /// </summary>
+ /// <returns>Returns true if the voice needs to be skipped.</returns>
+ public bool ShouldSkip()
+ {
+ return !InUse || WaveBuffersCount == 0 || DataSourceStateUnmapped || BufferInfoUnmapped || VoiceDropFlag;
+ }
+
+ /// <summary>
+ /// Return true if the mix has any destinations.
+ /// </summary>
+ /// <returns>True if the mix has any destinations.</returns>
+ public bool HasAnyDestination()
+ {
+ return MixId != Constants.UnusedMixId || SplitterId != Constants.UnusedSplitterId;
+ }
+
+ /// <summary>
+ /// Indicate if the server voice information needs to be updated.
+ /// </summary>
+ /// <param name="parameter">The user parameter.</param>
+ /// <returns>Return true, if the server voice information needs to be updated.</returns>
+ private bool ShouldUpdateParameters(ref VoiceInParameter parameter)
+ {
+ if (DataSourceStateAddressInfo.CpuAddress == parameter.DataSourceStateAddress)
+ {
+ return DataSourceStateAddressInfo.Size != parameter.DataSourceStateSize;
+ }
+
+ return DataSourceStateAddressInfo.CpuAddress != parameter.DataSourceStateAddress ||
+ DataSourceStateAddressInfo.Size != parameter.DataSourceStateSize ||
+ DataSourceStateUnmapped;
+ }
+
+ /// <summary>
+ /// Update the internal state from a user parameter.
+ /// </summary>
+ /// <param name="outErrorInfo">The possible <see cref="ErrorInfo"/> that was generated.</param>
+ /// <param name="parameter">The user parameter.</param>
+ /// <param name="poolMapper">The mapper to use.</param>
+ /// <param name="behaviourContext">The behaviour context.</param>
+ public void UpdateParameters(out ErrorInfo outErrorInfo, ref VoiceInParameter parameter, ref PoolMapper poolMapper, ref BehaviourContext behaviourContext)
+ {
+ InUse = parameter.InUse;
+ Id = parameter.Id;
+ NodeId = parameter.NodeId;
+
+ UpdatePlayState(parameter.PlayState);
+
+ SrcQuality = parameter.SrcQuality;
+
+ Priority = parameter.Priority;
+ SortingOrder = parameter.SortingOrder;
+ SampleRate = parameter.SampleRate;
+ SampleFormat = parameter.SampleFormat;
+ ChannelsCount = parameter.ChannelCount;
+ Pitch = parameter.Pitch;
+ Volume = parameter.Volume;
+ parameter.BiquadFilters.AsSpan().CopyTo(BiquadFilters.AsSpan());
+ WaveBuffersCount = parameter.WaveBuffersCount;
+ WaveBuffersIndex = parameter.WaveBuffersIndex;
+
+ if (behaviourContext.IsFlushVoiceWaveBuffersSupported())
+ {
+ FlushWaveBufferCount += parameter.FlushWaveBufferCount;
+ }
+
+ MixId = parameter.MixId;
+
+ if (behaviourContext.IsSplitterSupported())
+ {
+ SplitterId = parameter.SplitterId;
+ }
+ else
+ {
+ SplitterId = Constants.UnusedSplitterId;
+ }
+
+ parameter.ChannelResourceIds.AsSpan().CopyTo(ChannelResourceIds.AsSpan());
+
+ DecodingBehaviour behaviour = DecodingBehaviour.Default;
+
+ if (behaviourContext.IsDecodingBehaviourFlagSupported())
+ {
+ behaviour = parameter.DecodingBehaviourFlags;
+ }
+
+ DecodingBehaviour = behaviour;
+
+ if (parameter.ResetVoiceDropFlag)
+ {
+ VoiceDropFlag = false;
+ }
+
+ if (ShouldUpdateParameters(ref parameter))
+ {
+ DataSourceStateUnmapped = !poolMapper.TryAttachBuffer(out outErrorInfo, ref DataSourceStateAddressInfo, parameter.DataSourceStateAddress, parameter.DataSourceStateSize);
+ }
+ else
+ {
+ outErrorInfo = new ErrorInfo();
+ }
+ }
+
+ /// <summary>
+ /// Update the internal play state from user play state.
+ /// </summary>
+ /// <param name="userPlayState">The target user play state.</param>
+ public void UpdatePlayState(PlayState userPlayState)
+ {
+ Types.PlayState oldServerPlayState = PlayState;
+
+ PreviousPlayState = oldServerPlayState;
+
+ Types.PlayState newServerPlayState;
+
+ switch (userPlayState)
+ {
+ case Common.PlayState.Start:
+ newServerPlayState = Types.PlayState.Started;
+ break;
+
+ case Common.PlayState.Stop:
+ if (oldServerPlayState == Types.PlayState.Stopped)
+ {
+ return;
+ }
+
+ newServerPlayState = Types.PlayState.Stopping;
+ break;
+
+ case Common.PlayState.Pause:
+ newServerPlayState = Types.PlayState.Paused;
+ break;
+
+ default:
+ throw new NotImplementedException($"Unhandled PlayState.{userPlayState}");
+ }
+
+ PlayState = newServerPlayState;
+ }
+
+ /// <summary>
+ /// Write the status of the voice to the given user output.
+ /// </summary>
+ /// <param name="outStatus">The given user output.</param>
+ /// <param name="parameter">The user parameter.</param>
+ /// <param name="voiceUpdateStates">The voice states associated to the <see cref="VoiceState"/>.</param>
+ public void WriteOutStatus(ref VoiceOutStatus outStatus, ref VoiceInParameter parameter, ReadOnlySpan<Memory<VoiceUpdateState>> voiceUpdateStates)
+ {
+#if DEBUG
+ // Sanity check in debug mode of the internal state
+ if (!parameter.IsNew && !IsNew)
+ {
+ for (int i = 1; i < ChannelsCount; i++)
+ {
+ ref VoiceUpdateState stateA = ref voiceUpdateStates[i - 1].Span[0];
+ ref VoiceUpdateState stateB = ref voiceUpdateStates[i].Span[0];
+
+ Debug.Assert(stateA.WaveBufferConsumed == stateB.WaveBufferConsumed);
+ Debug.Assert(stateA.PlayedSampleCount == stateB.PlayedSampleCount);
+ Debug.Assert(stateA.Offset == stateB.Offset);
+ Debug.Assert(stateA.WaveBufferIndex == stateB.WaveBufferIndex);
+ Debug.Assert(stateA.Fraction == stateB.Fraction);
+ Debug.Assert(stateA.IsWaveBufferValid.SequenceEqual(stateB.IsWaveBufferValid));
+ }
+ }
+#endif
+ if (parameter.IsNew || IsNew)
+ {
+ IsNew = true;
+
+ outStatus.VoiceDropFlag = false;
+ outStatus.PlayedWaveBuffersCount = 0;
+ outStatus.PlayedSampleCount = 0;
+ }
+ else
+ {
+ ref VoiceUpdateState state = ref voiceUpdateStates[0].Span[0];
+
+ outStatus.VoiceDropFlag = VoiceDropFlag;
+ outStatus.PlayedWaveBuffersCount = state.WaveBufferConsumed;
+ outStatus.PlayedSampleCount = state.PlayedSampleCount;
+ }
+ }
+
+ /// <summary>
+ /// Update the internal state of all the <see cref="WaveBuffer"/> of the <see cref="VoiceState"/>.
+ /// </summary>
+ /// <param name="errorInfos">An array of <see cref="ErrorInfo"/> used to report errors when mapping any of the <see cref="WaveBuffer"/>.</param>
+ /// <param name="parameter">The user parameter.</param>
+ /// <param name="voiceUpdateStates">The voice states associated to the <see cref="VoiceState"/>.</param>
+ /// <param name="mapper">The mapper to use.</param>
+ /// <param name="behaviourContext">The behaviour context.</param>
+ public void UpdateWaveBuffers(out ErrorInfo[] errorInfos, ref VoiceInParameter parameter, ReadOnlySpan<Memory<VoiceUpdateState>> voiceUpdateStates, ref PoolMapper mapper, ref BehaviourContext behaviourContext)
+ {
+ errorInfos = new ErrorInfo[Constants.VoiceWaveBufferCount * 2];
+
+ if (parameter.IsNew)
+ {
+ InitializeWaveBuffers();
+
+ for (int i = 0; i < parameter.ChannelCount; i++)
+ {
+ voiceUpdateStates[i].Span[0].IsWaveBufferValid.Fill(false);
+ }
+ }
+
+ ref VoiceUpdateState voiceUpdateState = ref voiceUpdateStates[0].Span[0];
+
+ for (int i = 0; i < Constants.VoiceWaveBufferCount; i++)
+ {
+ UpdateWaveBuffer(errorInfos.AsSpan(i * 2, 2), ref WaveBuffers[i], ref parameter.WaveBuffers[i], parameter.SampleFormat, voiceUpdateState.IsWaveBufferValid[i], ref mapper, ref behaviourContext);
+ }
+ }
+
+ /// <summary>
+ /// Update the internal state of one of the <see cref="WaveBuffer"/> of the <see cref="VoiceState"/>.
+ /// </summary>
+ /// <param name="errorInfos">A <see cref="Span{ErrorInfo}"/> used to report errors when mapping the <see cref="WaveBuffer"/>.</param>
+ /// <param name="waveBuffer">The <see cref="WaveBuffer"/> to update.</param>
+ /// <param name="inputWaveBuffer">The <see cref="WaveBufferInternal"/> from the user input.</param>
+ /// <param name="sampleFormat">The <see cref="SampleFormat"/> from the user input.</param>
+ /// <param name="isValid">If set to true, the server side wavebuffer is considered valid.</param>
+ /// <param name="mapper">The mapper to use.</param>
+ /// <param name="behaviourContext">The behaviour context.</param>
+ private void UpdateWaveBuffer(Span<ErrorInfo> errorInfos, ref WaveBuffer waveBuffer, ref WaveBufferInternal inputWaveBuffer, SampleFormat sampleFormat, bool isValid, ref PoolMapper mapper, ref BehaviourContext behaviourContext)
+ {
+ if (!isValid && waveBuffer.IsSendToAudioProcessor && waveBuffer.BufferAddressInfo.CpuAddress != 0)
+ {
+ mapper.ForceUnmap(ref waveBuffer.BufferAddressInfo);
+ waveBuffer.BufferAddressInfo.Setup(0, 0);
+ }
+
+ if (!inputWaveBuffer.SentToServer || BufferInfoUnmapped)
+ {
+ if (inputWaveBuffer.IsSampleOffsetValid(sampleFormat))
+ {
+ Debug.Assert(waveBuffer.IsSendToAudioProcessor);
+
+ waveBuffer.IsSendToAudioProcessor = false;
+ waveBuffer.StartSampleOffset = inputWaveBuffer.StartSampleOffset;
+ waveBuffer.EndSampleOffset = inputWaveBuffer.EndSampleOffset;
+ waveBuffer.ShouldLoop = inputWaveBuffer.ShouldLoop;
+ waveBuffer.IsEndOfStream = inputWaveBuffer.IsEndOfStream;
+ waveBuffer.LoopStartSampleOffset = inputWaveBuffer.LoopFirstSampleOffset;
+ waveBuffer.LoopEndSampleOffset = inputWaveBuffer.LoopLastSampleOffset;
+ waveBuffer.LoopCount = inputWaveBuffer.LoopCount;
+
+ BufferInfoUnmapped = !mapper.TryAttachBuffer(out ErrorInfo bufferInfoError, ref waveBuffer.BufferAddressInfo, inputWaveBuffer.Address, inputWaveBuffer.Size);
+
+ errorInfos[0] = bufferInfoError;
+
+ if (sampleFormat == SampleFormat.Adpcm && behaviourContext.IsAdpcmLoopContextBugFixed() && inputWaveBuffer.ContextAddress != 0)
+ {
+ bool adpcmLoopContextMapped = mapper.TryAttachBuffer(out ErrorInfo adpcmLoopContextInfoError,
+ ref waveBuffer.ContextAddressInfo,
+ inputWaveBuffer.ContextAddress,
+ inputWaveBuffer.ContextSize);
+
+ errorInfos[1] = adpcmLoopContextInfoError;
+
+ if (adpcmLoopContextMapped)
+ {
+ BufferInfoUnmapped = DataSourceStateUnmapped;
+ }
+ else
+ {
+ BufferInfoUnmapped = true;
+ }
+ }
+ else
+ {
+ waveBuffer.ContextAddressInfo.Setup(0, 0);
+ }
+ }
+ else
+ {
+ errorInfos[0].ErrorCode = ResultCode.InvalidAddressInfo;
+ errorInfos[0].ExtraErrorInfo = inputWaveBuffer.Address;
+ }
+ }
+ }
+
+ /// <summary>
+ /// Reset the resources associated to this <see cref="VoiceState"/>.
+ /// </summary>
+ /// <param name="context">The voice context.</param>
+ private void ResetResources(VoiceContext context)
+ {
+ for (int i = 0; i < ChannelsCount; i++)
+ {
+ int channelResourceId = ChannelResourceIds[i];
+
+ ref VoiceChannelResource voiceChannelResource = ref context.GetChannelResource(channelResourceId);
+
+ Debug.Assert(voiceChannelResource.IsUsed);
+
+ Memory<VoiceUpdateState> dspSharedState = context.GetUpdateStateForDsp(channelResourceId);
+
+ MemoryMarshal.Cast<VoiceUpdateState, byte>(dspSharedState.Span).Fill(0);
+
+ voiceChannelResource.UpdateState();
+ }
+ }
+
+ /// <summary>
+ /// Flush a certain amount of <see cref="WaveBuffer"/>.
+ /// </summary>
+ /// <param name="waveBufferCount">The amount of wavebuffer to flush.</param>
+ /// <param name="voiceUpdateStates">The voice states associated to the <see cref="VoiceState"/>.</param>
+ /// <param name="channelCount">The channel count from user input.</param>
+ private void FlushWaveBuffers(uint waveBufferCount, Memory<VoiceUpdateState>[] voiceUpdateStates, uint channelCount)
+ {
+ uint waveBufferIndex = WaveBuffersIndex;
+
+ for (int i = 0; i < waveBufferCount; i++)
+ {
+ WaveBuffers[(int)waveBufferIndex].IsSendToAudioProcessor = true;
+
+ for (int j = 0; j < channelCount; j++)
+ {
+ ref VoiceUpdateState voiceUpdateState = ref voiceUpdateStates[j].Span[0];
+
+ voiceUpdateState.WaveBufferIndex = (voiceUpdateState.WaveBufferIndex + 1) % Constants.VoiceWaveBufferCount;
+ voiceUpdateState.WaveBufferConsumed++;
+ voiceUpdateState.IsWaveBufferValid[(int)waveBufferIndex] = false;
+ }
+
+ waveBufferIndex = (waveBufferIndex + 1) % Constants.VoiceWaveBufferCount;
+ }
+ }
+
+ /// <summary>
+ /// Update the internal parameters for command generation.
+ /// </summary>
+ /// <param name="voiceUpdateStates">The voice states associated to the <see cref="VoiceState"/>.</param>
+ /// <returns>Return true if this voice should be played.</returns>
+ public bool UpdateParametersForCommandGeneration(Memory<VoiceUpdateState>[] voiceUpdateStates)
+ {
+ if (FlushWaveBufferCount != 0)
+ {
+ FlushWaveBuffers(FlushWaveBufferCount, voiceUpdateStates, ChannelsCount);
+
+ FlushWaveBufferCount = 0;
+ }
+
+ switch (PlayState)
+ {
+ case Types.PlayState.Started:
+ for (int i = 0; i < WaveBuffers.Length; i++)
+ {
+ ref WaveBuffer wavebuffer = ref WaveBuffers[i];
+
+ if (!wavebuffer.IsSendToAudioProcessor)
+ {
+ for (int y = 0; y < ChannelsCount; y++)
+ {
+ Debug.Assert(!voiceUpdateStates[y].Span[0].IsWaveBufferValid[i]);
+
+ voiceUpdateStates[y].Span[0].IsWaveBufferValid[i] = true;
+ }
+
+ wavebuffer.IsSendToAudioProcessor = true;
+ }
+ }
+
+ WasPlaying = false;
+
+ ref VoiceUpdateState primaryVoiceUpdateState = ref voiceUpdateStates[0].Span[0];
+
+ for (int i = 0; i < primaryVoiceUpdateState.IsWaveBufferValid.Length; i++)
+ {
+ if (primaryVoiceUpdateState.IsWaveBufferValid[i])
+ {
+ return true;
+ }
+ }
+
+ return false;
+
+ case Types.PlayState.Stopping:
+ for (int i = 0; i < WaveBuffers.Length; i++)
+ {
+ ref WaveBuffer wavebuffer = ref WaveBuffers[i];
+
+ wavebuffer.IsSendToAudioProcessor = true;
+
+ for (int j = 0; j < ChannelsCount; j++)
+ {
+ ref VoiceUpdateState voiceUpdateState = ref voiceUpdateStates[j].Span[0];
+
+ if (voiceUpdateState.IsWaveBufferValid[i])
+ {
+ voiceUpdateState.WaveBufferIndex = (voiceUpdateState.WaveBufferIndex + 1) % Constants.VoiceWaveBufferCount;
+ voiceUpdateState.WaveBufferConsumed++;
+ }
+
+ voiceUpdateState.IsWaveBufferValid[i] = false;
+ }
+ }
+
+ for (int i = 0; i < ChannelsCount; i++)
+ {
+ ref VoiceUpdateState voiceUpdateState = ref voiceUpdateStates[i].Span[0];
+
+ voiceUpdateState.Offset = 0;
+ voiceUpdateState.PlayedSampleCount = 0;
+ voiceUpdateState.Pitch.AsSpan().Fill(0);
+ voiceUpdateState.Fraction = 0;
+ voiceUpdateState.LoopContext = new Dsp.State.AdpcmLoopContext();
+ }
+
+ PlayState = Types.PlayState.Stopped;
+ WasPlaying = PreviousPlayState == Types.PlayState.Started;
+
+ return WasPlaying;
+
+ case Types.PlayState.Stopped:
+ case Types.PlayState.Paused:
+ foreach (ref WaveBuffer wavebuffer in WaveBuffers.AsSpan())
+ {
+ wavebuffer.BufferAddressInfo.GetReference(true);
+ wavebuffer.ContextAddressInfo.GetReference(true);
+ }
+
+ if (SampleFormat == SampleFormat.Adpcm)
+ {
+ if (DataSourceStateAddressInfo.CpuAddress != 0)
+ {
+ DataSourceStateAddressInfo.GetReference(true);
+ }
+ }
+
+ WasPlaying = PreviousPlayState == Types.PlayState.Started;
+
+ return WasPlaying;
+ default:
+ throw new NotImplementedException($"{PlayState}");
+ }
+ }
+
+ /// <summary>
+ /// Update the internal state for command generation.
+ /// </summary>
+ /// <param name="context">The voice context.</param>
+ /// <returns>Return true if this voice should be played.</returns>
+ public bool UpdateForCommandGeneration(VoiceContext context)
+ {
+ if (IsNew)
+ {
+ ResetResources(context);
+ PreviousVolume = Volume;
+ IsNew = false;
+ }
+
+ Memory<VoiceUpdateState>[] voiceUpdateStates = new Memory<VoiceUpdateState>[Constants.VoiceChannelCountMax];
+
+ for (int i = 0; i < ChannelsCount; i++)
+ {
+ voiceUpdateStates[i] = context.GetUpdateStateForDsp(ChannelResourceIds[i]);
+ }
+
+ return UpdateParametersForCommandGeneration(voiceUpdateStates);
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Ryujinx.Audio/Renderer/Server/Voice/WaveBuffer.cs b/src/Ryujinx.Audio/Renderer/Server/Voice/WaveBuffer.cs
new file mode 100644
index 00000000..4bf7dd28
--- /dev/null
+++ b/src/Ryujinx.Audio/Renderer/Server/Voice/WaveBuffer.cs
@@ -0,0 +1,104 @@
+using Ryujinx.Audio.Renderer.Server.MemoryPool;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Audio.Renderer.Server.Voice
+{
+ /// <summary>
+ /// A wavebuffer used for server update.
+ /// </summary>
+ [StructLayout(LayoutKind.Sequential, Size = 0x58, Pack = 1)]
+ public struct WaveBuffer
+ {
+ /// <summary>
+ /// The <see cref="AddressInfo"/> of the sample data of the wavebuffer.
+ /// </summary>
+ public AddressInfo BufferAddressInfo;
+
+ /// <summary>
+ /// The <see cref="AddressInfo"/> of the context of the wavebuffer.
+ /// </summary>
+ /// <remarks>Only used by <see cref="Common.SampleFormat.Adpcm"/>.</remarks>
+ public AddressInfo ContextAddressInfo;
+
+
+ /// <summary>
+ /// First sample to play of the wavebuffer.
+ /// </summary>
+ public uint StartSampleOffset;
+
+ /// <summary>
+ /// Last sample to play of the wavebuffer.
+ /// </summary>
+ public uint EndSampleOffset;
+
+ /// <summary>
+ /// Set to true if the wavebuffer is looping.
+ /// </summary>
+ [MarshalAs(UnmanagedType.I1)]
+ public bool ShouldLoop;
+
+ /// <summary>
+ /// Set to true if the wavebuffer is the end of stream.
+ /// </summary>
+ [MarshalAs(UnmanagedType.I1)]
+ public bool IsEndOfStream;
+
+ /// <summary>
+ /// Set to true if the wavebuffer wasn't sent to the <see cref="Dsp.AudioProcessor"/>.
+ /// </summary>
+ [MarshalAs(UnmanagedType.I1)]
+ public bool IsSendToAudioProcessor;
+
+ /// <summary>
+ /// First sample to play when looping the wavebuffer.
+ /// </summary>
+ public uint LoopStartSampleOffset;
+
+ /// <summary>
+ /// Last sample to play when looping the wavebuffer.
+ /// </summary>
+ public uint LoopEndSampleOffset;
+
+ /// <summary>
+ /// The max loop count.
+ /// </summary>
+ public int LoopCount;
+
+ /// <summary>
+ /// Create a new <see cref="Common.WaveBuffer"/> for use by the <see cref="Dsp.AudioProcessor"/>.
+ /// </summary>
+ /// <param name="version">The target version of the wavebuffer.</param>
+ /// <returns>A new <see cref="Common.WaveBuffer"/> for use by the <see cref="Dsp.AudioProcessor"/>.</returns>
+ public Common.WaveBuffer ToCommon(int version)
+ {
+ Common.WaveBuffer waveBuffer = new Common.WaveBuffer();
+
+ waveBuffer.Buffer = BufferAddressInfo.GetReference(true);
+ waveBuffer.BufferSize = (uint)BufferAddressInfo.Size;
+
+ if (ContextAddressInfo.CpuAddress != 0)
+ {
+ waveBuffer.Context = ContextAddressInfo.GetReference(true);
+ waveBuffer.ContextSize = (uint)ContextAddressInfo.Size;
+ }
+
+ waveBuffer.StartSampleOffset = StartSampleOffset;
+ waveBuffer.EndSampleOffset = EndSampleOffset;
+ waveBuffer.Looping = ShouldLoop;
+ waveBuffer.IsEndOfStream = IsEndOfStream;
+
+ if (version == 2)
+ {
+ waveBuffer.LoopCount = LoopCount;
+ waveBuffer.LoopStartSampleOffset = LoopStartSampleOffset;
+ waveBuffer.LoopEndSampleOffset = LoopEndSampleOffset;
+ }
+ else
+ {
+ waveBuffer.LoopCount = -1;
+ }
+
+ return waveBuffer;
+ }
+ }
+} \ No newline at end of file