Audio Restructuring #50
|
@ -0,0 +1,71 @@
|
|||
using System;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
/// <summary>
|
||||
/// Contains raw audio data in the format specified by Format.
|
||||
/// Submit this to a SourceVoice to play audio.
|
||||
/// </summary>
|
||||
public class AudioBuffer : AudioResource
|
||||
{
|
||||
IntPtr BufferDataPtr;
|
||||
uint BufferDataLength;
|
||||
private bool OwnsBufferData;
|
||||
|
||||
public Format Format { get; }
|
||||
|
||||
public AudioBuffer(
|
||||
AudioDevice device,
|
||||
Format format,
|
||||
IntPtr bufferPtr,
|
||||
uint bufferLengthInBytes,
|
||||
bool ownsBufferData) : base(device)
|
||||
{
|
||||
Format = format;
|
||||
BufferDataPtr = bufferPtr;
|
||||
BufferDataLength = bufferLengthInBytes;
|
||||
OwnsBufferData = ownsBufferData;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Create another AudioBuffer from this audio buffer.
|
||||
/// It will not own the buffer data.
|
||||
/// </summary>
|
||||
/// <param name="offset">Offset in bytes from the top of the original buffer.</param>
|
||||
/// <param name="length">Length in bytes of the new buffer.</param>
|
||||
/// <returns></returns>
|
||||
public AudioBuffer Slice(int offset, uint length)
|
||||
{
|
||||
return new AudioBuffer(Device, Format, BufferDataPtr + offset, length, false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Create an FAudioBuffer struct from this AudioBuffer.
|
||||
/// </summary>
|
||||
/// <param name="loop">Whether we should set the FAudioBuffer to loop.</param>
|
||||
public FAudio.FAudioBuffer ToFAudioBuffer(bool loop = false)
|
||||
{
|
||||
return new FAudio.FAudioBuffer
|
||||
{
|
||||
Flags = FAudio.FAUDIO_END_OF_STREAM,
|
||||
pContext = IntPtr.Zero,
|
||||
pAudioData = BufferDataPtr,
|
||||
AudioBytes = BufferDataLength,
|
||||
PlayBegin = 0,
|
||||
PlayLength = 0,
|
||||
LoopBegin = 0,
|
||||
LoopLength = 0,
|
||||
LoopCount = loop ? FAudio.FAUDIO_LOOP_INFINITE : 0
|
||||
};
|
||||
}
|
||||
|
||||
protected override unsafe void Destroy()
|
||||
{
|
||||
if (OwnsBufferData)
|
||||
{
|
||||
NativeMemory.Free((void*) BufferDataPtr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,138 @@
|
|||
using System;
|
||||
using System.IO;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
/// <summary>
|
||||
/// Streamable audio in Ogg format.
|
||||
/// </summary>
|
||||
public class AudioDataOgg : AudioDataStreamable
|
||||
{
|
||||
private IntPtr FileDataPtr = IntPtr.Zero;
|
||||
private IntPtr VorbisHandle = IntPtr.Zero;
|
||||
|
||||
private string FilePath;
|
||||
|
||||
public override bool Loaded => VorbisHandle != IntPtr.Zero;
|
||||
public override uint DecodeBufferSize => 32768;
|
||||
|
||||
public AudioDataOgg(AudioDevice device, string filePath) : base(device)
|
||||
{
|
||||
FilePath = filePath;
|
||||
|
||||
var handle = FAudio.stb_vorbis_open_filename(filePath, out var error, IntPtr.Zero);
|
||||
|
||||
if (error != 0)
|
||||
{
|
||||
throw new AudioLoadException("Error loading file!");
|
||||
}
|
||||
|
||||
var info = FAudio.stb_vorbis_get_info(handle);
|
||||
|
||||
Format = new Format
|
||||
{
|
||||
Tag = FormatTag.IEEE_FLOAT,
|
||||
BitsPerSample = 32,
|
||||
Channels = (ushort) info.channels,
|
||||
SampleRate = info.sample_rate
|
||||
};
|
||||
|
||||
FAudio.stb_vorbis_close(handle);
|
||||
}
|
||||
|
||||
public override unsafe void Decode(void* buffer, int bufferLengthInBytes, out int filledLengthInBytes, out bool reachedEnd)
|
||||
{
|
||||
var lengthInFloats = bufferLengthInBytes / sizeof(float);
|
||||
|
||||
/* NOTE: this function returns samples per channel, not total samples */
|
||||
var samples = FAudio.stb_vorbis_get_samples_float_interleaved(
|
||||
VorbisHandle,
|
||||
Format.Channels,
|
||||
(IntPtr) buffer,
|
||||
lengthInFloats
|
||||
);
|
||||
|
||||
var sampleCount = samples * Format.Channels;
|
||||
reachedEnd = sampleCount < lengthInFloats;
|
||||
filledLengthInBytes = sampleCount * sizeof(float);
|
||||
}
|
||||
|
||||
public override unsafe void Load()
|
||||
{
|
||||
if (!Loaded)
|
||||
{
|
||||
var fileStream = new FileStream(FilePath, FileMode.Open, FileAccess.Read);
|
||||
FileDataPtr = (nint) NativeMemory.Alloc((nuint) fileStream.Length);
|
||||
var fileDataSpan = new Span<byte>((void*) FileDataPtr, (int) fileStream.Length);
|
||||
fileStream.ReadExactly(fileDataSpan);
|
||||
fileStream.Close();
|
||||
|
||||
VorbisHandle = FAudio.stb_vorbis_open_memory(FileDataPtr, fileDataSpan.Length, out int error, IntPtr.Zero);
|
||||
if (error != 0)
|
||||
{
|
||||
NativeMemory.Free((void*) FileDataPtr);
|
||||
Logger.LogError("Error opening OGG file!");
|
||||
Logger.LogError("Error: " + error);
|
||||
throw new AudioLoadException("Error opening OGG file!");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public override void Seek(uint sampleFrame)
|
||||
{
|
||||
FAudio.stb_vorbis_seek(VorbisHandle, sampleFrame);
|
||||
}
|
||||
|
||||
public override unsafe void Unload()
|
||||
{
|
||||
if (Loaded)
|
||||
{
|
||||
FAudio.stb_vorbis_close(VorbisHandle);
|
||||
NativeMemory.Free((void*) FileDataPtr);
|
||||
|
||||
VorbisHandle = IntPtr.Zero;
|
||||
FileDataPtr = IntPtr.Zero;
|
||||
}
|
||||
}
|
||||
|
||||
public unsafe static AudioBuffer CreateBuffer(AudioDevice device, string filePath)
|
||||
{
|
||||
var filePointer = FAudio.stb_vorbis_open_filename(filePath, out var error, IntPtr.Zero);
|
||||
|
||||
if (error != 0)
|
||||
{
|
||||
throw new AudioLoadException("Error loading file!");
|
||||
}
|
||||
var info = FAudio.stb_vorbis_get_info(filePointer);
|
||||
var lengthInFloats =
|
||||
FAudio.stb_vorbis_stream_length_in_samples(filePointer) * info.channels;
|
||||
var lengthInBytes = lengthInFloats * Marshal.SizeOf<float>();
|
||||
var buffer = NativeMemory.Alloc((nuint) lengthInBytes);
|
||||
|
||||
FAudio.stb_vorbis_get_samples_float_interleaved(
|
||||
filePointer,
|
||||
info.channels,
|
||||
(nint) buffer,
|
||||
(int) lengthInFloats
|
||||
);
|
||||
|
||||
FAudio.stb_vorbis_close(filePointer);
|
||||
|
||||
var format = new Format
|
||||
{
|
||||
Tag = FormatTag.IEEE_FLOAT,
|
||||
BitsPerSample = 32,
|
||||
Channels = (ushort) info.channels,
|
||||
SampleRate = info.sample_rate
|
||||
};
|
||||
|
||||
return new AudioBuffer(
|
||||
device,
|
||||
format,
|
||||
(nint) buffer,
|
||||
(uint) lengthInBytes,
|
||||
true);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,155 @@
|
|||
using System;
|
||||
using System.IO;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
/// <summary>
|
||||
/// Streamable audio in QOA format.
|
||||
/// </summary>
|
||||
public class AudioDataQoa : AudioDataStreamable
|
||||
{
|
||||
private IntPtr QoaHandle = IntPtr.Zero;
|
||||
private IntPtr FileDataPtr = IntPtr.Zero;
|
||||
|
||||
private string FilePath;
|
||||
|
||||
private const uint QOA_MAGIC = 0x716f6166; /* 'qoaf' */
|
||||
|
||||
public override bool Loaded => QoaHandle != IntPtr.Zero;
|
||||
|
||||
private uint decodeBufferSize;
|
||||
public override uint DecodeBufferSize => decodeBufferSize;
|
||||
|
||||
public AudioDataQoa(AudioDevice device, string filePath) : base(device)
|
||||
{
|
||||
FilePath = filePath;
|
||||
|
||||
using var stream = new FileStream(FilePath, FileMode.Open, FileAccess.Read);
|
||||
using var reader = new BinaryReader(stream);
|
||||
|
||||
UInt64 fileHeader = ReverseEndianness(reader.ReadUInt64());
|
||||
if ((fileHeader >> 32) != QOA_MAGIC)
|
||||
{
|
||||
throw new AudioLoadException("Specified file is not a QOA file.");
|
||||
}
|
||||
|
||||
uint totalSamplesPerChannel = (uint) (fileHeader & (0xFFFFFFFF));
|
||||
if (totalSamplesPerChannel == 0)
|
||||
{
|
||||
throw new AudioLoadException("Specified file is not a valid QOA file.");
|
||||
}
|
||||
|
||||
UInt64 frameHeader = ReverseEndianness(reader.ReadUInt64());
|
||||
uint channels = (uint) ((frameHeader >> 56) & 0x0000FF);
|
||||
uint samplerate = (uint) ((frameHeader >> 32) & 0xFFFFFF);
|
||||
uint samplesPerChannelPerFrame = (uint) ((frameHeader >> 16) & 0x00FFFF);
|
||||
|
||||
Format = new Format
|
||||
{
|
||||
Tag = FormatTag.PCM,
|
||||
BitsPerSample = 16,
|
||||
Channels = (ushort) channels,
|
||||
SampleRate = samplerate
|
||||
};
|
||||
|
||||
decodeBufferSize = channels * samplesPerChannelPerFrame * sizeof(short);
|
||||
}
|
||||
|
||||
public override unsafe void Decode(void* buffer, int bufferLengthInBytes, out int filledLengthInBytes, out bool reachedEnd)
|
||||
{
|
||||
var lengthInShorts = bufferLengthInBytes / sizeof(short);
|
||||
|
||||
// NOTE: this function returns samples per channel!
|
||||
var samples = FAudio.qoa_decode_next_frame(QoaHandle, (short*) buffer);
|
||||
|
||||
var sampleCount = samples * Format.Channels;
|
||||
reachedEnd = sampleCount < lengthInShorts;
|
||||
filledLengthInBytes = (int) (sampleCount * sizeof(short));
|
||||
}
|
||||
|
||||
public override unsafe void Load()
|
||||
{
|
||||
if (!Loaded)
|
||||
{
|
||||
var fileStream = new FileStream(FilePath, FileMode.Open, FileAccess.Read);
|
||||
FileDataPtr = (nint) NativeMemory.Alloc((nuint) fileStream.Length);
|
||||
var fileDataSpan = new Span<byte>((void*) FileDataPtr, (int) fileStream.Length);
|
||||
fileStream.ReadExactly(fileDataSpan);
|
||||
fileStream.Close();
|
||||
|
||||
QoaHandle = FAudio.qoa_open_from_memory((char*) FileDataPtr, (uint) fileDataSpan.Length, 0);
|
||||
if (QoaHandle == IntPtr.Zero)
|
||||
{
|
||||
NativeMemory.Free((void*) FileDataPtr);
|
||||
Logger.LogError("Error opening QOA file!");
|
||||
throw new AudioLoadException("Error opening QOA file!");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public override void Seek(uint sampleFrame)
|
||||
{
|
||||
FAudio.qoa_seek_frame(QoaHandle, (int) sampleFrame);
|
||||
}
|
||||
|
||||
public override unsafe void Unload()
|
||||
{
|
||||
if (Loaded)
|
||||
{
|
||||
FAudio.qoa_close(QoaHandle);
|
||||
NativeMemory.Free((void*) FileDataPtr);
|
||||
|
||||
QoaHandle = IntPtr.Zero;
|
||||
FileDataPtr = IntPtr.Zero;
|
||||
}
|
||||
}
|
||||
|
||||
public unsafe static AudioBuffer CreateBuffer(AudioDevice device, string filePath)
|
||||
{
|
||||
using var fileStream = new FileStream(filePath, FileMode.Open, FileAccess.Read);
|
||||
var fileDataPtr = NativeMemory.Alloc((nuint) fileStream.Length);
|
||||
var fileDataSpan = new Span<byte>(fileDataPtr, (int) fileStream.Length);
|
||||
fileStream.ReadExactly(fileDataSpan);
|
||||
fileStream.Close();
|
||||
|
||||
var qoaHandle = FAudio.qoa_open_from_memory((char*) fileDataPtr, (uint) fileDataSpan.Length, 0);
|
||||
if (qoaHandle == 0)
|
||||
{
|
||||
NativeMemory.Free(fileDataPtr);
|
||||
Logger.LogError("Error opening QOA file!");
|
||||
throw new AudioLoadException("Error opening QOA file!");
|
||||
}
|
||||
|
||||
FAudio.qoa_attributes(qoaHandle, out var channels, out var samplerate, out var samples_per_channel_per_frame, out var total_samples_per_channel);
|
||||
|
||||
var bufferLengthInBytes = total_samples_per_channel * channels * sizeof(short);
|
||||
var buffer = NativeMemory.Alloc(bufferLengthInBytes);
|
||||
FAudio.qoa_decode_entire(qoaHandle, (short*) buffer);
|
||||
|
||||
FAudio.qoa_close(qoaHandle);
|
||||
NativeMemory.Free(fileDataPtr);
|
||||
|
||||
var format = new Format
|
||||
{
|
||||
Tag = FormatTag.PCM,
|
||||
BitsPerSample = 16,
|
||||
Channels = (ushort) channels,
|
||||
SampleRate = samplerate
|
||||
};
|
||||
|
||||
return new AudioBuffer(device, format, (nint) buffer, bufferLengthInBytes, true);
|
||||
}
|
||||
|
||||
private static unsafe UInt64 ReverseEndianness(UInt64 value)
|
||||
{
|
||||
byte* bytes = (byte*) &value;
|
||||
|
||||
return
|
||||
((UInt64)(bytes[0]) << 56) | ((UInt64)(bytes[1]) << 48) |
|
||||
((UInt64)(bytes[2]) << 40) | ((UInt64)(bytes[3]) << 32) |
|
||||
((UInt64)(bytes[4]) << 24) | ((UInt64)(bytes[5]) << 16) |
|
||||
((UInt64)(bytes[6]) << 8) | ((UInt64)(bytes[7]) << 0);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
namespace MoonWorks.Audio
|
||||
{
|
||||
/// <summary>
|
||||
/// Use this in conjunction with a StreamingVoice to play back streaming audio data.
|
||||
/// </summary>
|
||||
public abstract class AudioDataStreamable : AudioResource
|
||||
{
|
||||
public Format Format { get; protected set; }
|
||||
public abstract bool Loaded { get; }
|
||||
public abstract uint DecodeBufferSize { get; }
|
||||
|
||||
protected AudioDataStreamable(AudioDevice device) : base(device)
|
||||
{
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Loads the raw audio data into memory to prepare it for stream decoding.
|
||||
/// </summary>
|
||||
public abstract void Load();
|
||||
|
||||
/// <summary>
|
||||
/// Unloads the raw audio data from memory.
|
||||
/// </summary>
|
||||
public abstract void Unload();
|
||||
|
||||
/// <summary>
|
||||
/// Seeks to the given sample frame.
|
||||
/// </summary>
|
||||
public abstract void Seek(uint sampleFrame);
|
||||
|
||||
/// <summary>
|
||||
/// Attempts to decodes data of length bufferLengthInBytes into the provided buffer.
|
||||
/// </summary>
|
||||
/// <param name="buffer">The buffer that decoded bytes will be placed into.</param>
|
||||
/// <param name="bufferLengthInBytes">Requested length of decoded audio data.</param>
|
||||
/// <param name="filledLengthInBytes">How much data was actually filled in by the decode.</param>
|
||||
/// <param name="reachedEnd">Whether the end of the data was reached on this decode.</param>
|
||||
public abstract unsafe void Decode(void* buffer, int bufferLengthInBytes, out int filledLengthInBytes, out bool reachedEnd);
|
||||
|
||||
protected override void Destroy()
|
||||
{
|
||||
Unload();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,100 @@
|
|||
using System;
|
||||
using System.IO;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
public static class AudioDataWav
|
||||
{
|
||||
/// <summary>
|
||||
/// Create an AudioBuffer containing all the WAV audio data in a file.
|
||||
/// </summary>
|
||||
/// <returns></returns>
|
||||
public unsafe static AudioBuffer CreateBuffer(AudioDevice device, string filePath)
|
||||
{
|
||||
// mostly borrowed from https://github.com/FNA-XNA/FNA/blob/b71b4a35ae59970ff0070dea6f8620856d8d4fec/src/Audio/SoundEffect.cs#L385
|
||||
|
||||
// WaveFormatEx data
|
||||
ushort wFormatTag;
|
||||
ushort nChannels;
|
||||
uint nSamplesPerSec;
|
||||
uint nAvgBytesPerSec;
|
||||
ushort nBlockAlign;
|
||||
ushort wBitsPerSample;
|
||||
|
||||
using var stream = new FileStream(filePath, FileMode.Open, FileAccess.Read);
|
||||
using var reader = new BinaryReader(stream);
|
||||
|
||||
// RIFF Signature
|
||||
string signature = new string(reader.ReadChars(4));
|
||||
if (signature != "RIFF")
|
||||
{
|
||||
throw new NotSupportedException("Specified stream is not a wave file.");
|
||||
}
|
||||
|
||||
reader.ReadUInt32(); // Riff Chunk Size
|
||||
|
||||
string wformat = new string(reader.ReadChars(4));
|
||||
if (wformat != "WAVE")
|
||||
{
|
||||
throw new NotSupportedException("Specified stream is not a wave file.");
|
||||
}
|
||||
|
||||
// WAVE Header
|
||||
string format_signature = new string(reader.ReadChars(4));
|
||||
while (format_signature != "fmt ")
|
||||
{
|
||||
reader.ReadBytes(reader.ReadInt32());
|
||||
format_signature = new string(reader.ReadChars(4));
|
||||
}
|
||||
|
||||
int format_chunk_size = reader.ReadInt32();
|
||||
|
||||
wFormatTag = reader.ReadUInt16();
|
||||
nChannels = reader.ReadUInt16();
|
||||
nSamplesPerSec = reader.ReadUInt32();
|
||||
nAvgBytesPerSec = reader.ReadUInt32();
|
||||
nBlockAlign = reader.ReadUInt16();
|
||||
wBitsPerSample = reader.ReadUInt16();
|
||||
|
||||
// Reads residual bytes
|
||||
if (format_chunk_size > 16)
|
||||
{
|
||||
reader.ReadBytes(format_chunk_size - 16);
|
||||
}
|
||||
|
||||
// data Signature
|
||||
string data_signature = new string(reader.ReadChars(4));
|
||||
while (data_signature.ToLowerInvariant() != "data")
|
||||
{
|
||||
reader.ReadBytes(reader.ReadInt32());
|
||||
data_signature = new string(reader.ReadChars(4));
|
||||
}
|
||||
if (data_signature != "data")
|
||||
{
|
||||
throw new NotSupportedException("Specified wave file is not supported.");
|
||||
}
|
||||
|
||||
int waveDataLength = reader.ReadInt32();
|
||||
var waveDataBuffer = NativeMemory.Alloc((nuint) waveDataLength);
|
||||
var waveDataSpan = new Span<byte>(waveDataBuffer, waveDataLength);
|
||||
stream.ReadExactly(waveDataSpan);
|
||||
|
||||
var format = new Format
|
||||
{
|
||||
Tag = (FormatTag) wFormatTag,
|
||||
BitsPerSample = wBitsPerSample,
|
||||
Channels = nChannels,
|
||||
SampleRate = nSamplesPerSec
|
||||
};
|
||||
|
||||
return new AudioBuffer(
|
||||
device,
|
||||
format,
|
||||
(nint) waveDataBuffer,
|
||||
(uint) waveDataLength,
|
||||
true
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,6 +1,5 @@
|
|||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Runtime.InteropServices;
|
||||
using System.Threading;
|
||||
|
||||
namespace MoonWorks.Audio
|
||||
|
@ -9,31 +8,27 @@ namespace MoonWorks.Audio
|
|||
{
|
||||
public IntPtr Handle { get; }
|
||||
public byte[] Handle3D { get; }
|
||||
public IntPtr MasteringVoice { get; }
|
||||
public FAudio.FAudioDeviceDetails DeviceDetails { get; }
|
||||
|
||||
private IntPtr trueMasteringVoice;
|
||||
|
||||
// this is a fun little trick where we use a submix voice as a "faux" mastering voice
|
||||
// this lets us maintain API consistency for effects like panning and reverb
|
||||
private SubmixVoice fauxMasteringVoice;
|
||||
public SubmixVoice MasteringVoice => fauxMasteringVoice;
|
||||
|
||||
public float CurveDistanceScalar = 1f;
|
||||
public float DopplerScale = 1f;
|
||||
public float SpeedOfSound = 343.5f;
|
||||
|
||||
private float masteringVolume = 1f;
|
||||
public float MasteringVolume
|
||||
{
|
||||
get => masteringVolume;
|
||||
set
|
||||
{
|
||||
masteringVolume = value;
|
||||
FAudio.FAudioVoice_SetVolume(MasteringVoice, masteringVolume, 0);
|
||||
}
|
||||
}
|
||||
|
||||
private readonly HashSet<WeakReference> resources = new HashSet<WeakReference>();
|
||||
private readonly List<StreamingSound> autoUpdateStreamingSoundReferences = new List<StreamingSound>();
|
||||
private readonly List<StaticSoundInstance> autoFreeStaticSoundInstanceReferences = new List<StaticSoundInstance>();
|
||||
private readonly List<WeakReference<SoundSequence>> soundSequenceReferences = new List<WeakReference<SoundSequence>>();
|
||||
private readonly HashSet<SourceVoice> activeSourceVoices = new HashSet<SourceVoice>();
|
||||
|
||||
private AudioTweenManager AudioTweenManager;
|
||||
|
||||
private SourceVoicePool VoicePool;
|
||||
private List<SourceVoice> VoicesToReturn = new List<SourceVoice>();
|
||||
|
||||
private const int Step = 200;
|
||||
private TimeSpan UpdateInterval;
|
||||
private System.Diagnostics.Stopwatch TickStopwatch = new System.Diagnostics.Stopwatch();
|
||||
|
@ -93,25 +88,24 @@ namespace MoonWorks.Audio
|
|||
}
|
||||
|
||||
/* Init Mastering Voice */
|
||||
IntPtr masteringVoice;
|
||||
|
||||
if (FAudio.FAudio_CreateMasteringVoice(
|
||||
var result = FAudio.FAudio_CreateMasteringVoice(
|
||||
Handle,
|
||||
out masteringVoice,
|
||||
out trueMasteringVoice,
|
||||
FAudio.FAUDIO_DEFAULT_CHANNELS,
|
||||
FAudio.FAUDIO_DEFAULT_SAMPLERATE,
|
||||
0,
|
||||
i,
|
||||
IntPtr.Zero
|
||||
) != 0)
|
||||
);
|
||||
|
||||
if (result != 0)
|
||||
{
|
||||
Logger.LogError("No mastering voice found!");
|
||||
FAudio.FAudio_Release(Handle);
|
||||
Handle = IntPtr.Zero;
|
||||
Logger.LogError("Failed to create a mastering voice!");
|
||||
Logger.LogError("Audio device creation failed!");
|
||||
return;
|
||||
}
|
||||
|
||||
MasteringVoice = masteringVoice;
|
||||
fauxMasteringVoice = new SubmixVoice(this, DeviceDetails.OutputFormat.Format.nChannels, DeviceDetails.OutputFormat.Format.nSamplesPerSec, int.MaxValue);
|
||||
|
||||
/* Init 3D Audio */
|
||||
|
||||
|
@ -123,6 +117,7 @@ namespace MoonWorks.Audio
|
|||
);
|
||||
|
||||
AudioTweenManager = new AudioTweenManager();
|
||||
VoicePool = new SourceVoicePool(this);
|
||||
|
||||
Logger.LogInfo("Setting up audio thread...");
|
||||
WakeSignal = new AutoResetEvent(true);
|
||||
|
@ -163,53 +158,60 @@ namespace MoonWorks.Audio
|
|||
previousTickTime = TickStopwatch.Elapsed.Ticks;
|
||||
float elapsedSeconds = (float) tickDelta / System.TimeSpan.TicksPerSecond;
|
||||
|
||||
for (var i = autoUpdateStreamingSoundReferences.Count - 1; i >= 0; i -= 1)
|
||||
{
|
||||
var streamingSound = autoUpdateStreamingSoundReferences[i];
|
||||
|
||||
if (streamingSound.Loaded)
|
||||
{
|
||||
streamingSound.Update();
|
||||
}
|
||||
else
|
||||
{
|
||||
autoUpdateStreamingSoundReferences.RemoveAt(i);
|
||||
}
|
||||
}
|
||||
|
||||
for (var i = autoFreeStaticSoundInstanceReferences.Count - 1; i >= 0; i -= 1)
|
||||
{
|
||||
var staticSoundInstance = autoFreeStaticSoundInstanceReferences[i];
|
||||
|
||||
if (staticSoundInstance.State == SoundState.Stopped)
|
||||
{
|
||||
staticSoundInstance.Free();
|
||||
autoFreeStaticSoundInstanceReferences.RemoveAt(i);
|
||||
}
|
||||
}
|
||||
|
||||
for (var i = soundSequenceReferences.Count - 1; i >= 0; i -= 1)
|
||||
{
|
||||
if (soundSequenceReferences[i].TryGetTarget(out var soundSequence))
|
||||
{
|
||||
soundSequence.Update();
|
||||
}
|
||||
else
|
||||
{
|
||||
soundSequenceReferences.RemoveAt(i);
|
||||
}
|
||||
}
|
||||
|
||||
AudioTweenManager.Update(elapsedSeconds);
|
||||
|
||||
foreach (var voice in activeSourceVoices)
|
||||
{
|
||||
voice.Update();
|
||||
}
|
||||
|
||||
foreach (var voice in VoicesToReturn)
|
||||
{
|
||||
voice.Reset();
|
||||
activeSourceVoices.Remove(voice);
|
||||
VoicePool.Return(voice);
|
||||
}
|
||||
|
||||
VoicesToReturn.Clear();
|
||||
}
|
||||
|
||||
public void SyncPlay()
|
||||
/// <summary>
|
||||
/// Triggers all pending operations with the given syncGroup value.
|
||||
/// </summary>
|
||||
public void TriggerSyncGroup(uint syncGroup)
|
||||
{
|
||||
FAudio.FAudio_CommitChanges(Handle, 1);
|
||||
FAudio.FAudio_CommitChanges(Handle, syncGroup);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Obtains an appropriate source voice from the voice pool.
|
||||
/// </summary>
|
||||
/// <param name="format">The format that the voice must match.</param>
|
||||
/// <returns>A source voice with the given format.</returns>
|
||||
public T Obtain<T>(Format format) where T : SourceVoice, IPoolable<T>
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
var voice = VoicePool.Obtain<T>(format);
|
||||
activeSourceVoices.Add(voice);
|
||||
return voice;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns the source voice to the voice pool.
|
||||
/// </summary>
|
||||
/// <param name="voice"></param>
|
||||
internal void Return(SourceVoice voice)
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
VoicesToReturn.Add(voice);
|
||||
}
|
||||
}
|
||||
|
||||
internal void CreateTween(
|
||||
SoundInstance soundInstance,
|
||||
Voice voice,
|
||||
AudioTweenProperty property,
|
||||
System.Func<float, float> easingFunction,
|
||||
float start,
|
||||
|
@ -220,7 +222,7 @@ namespace MoonWorks.Audio
|
|||
lock (StateLock)
|
||||
{
|
||||
AudioTweenManager.CreateTween(
|
||||
soundInstance,
|
||||
voice,
|
||||
property,
|
||||
easingFunction,
|
||||
start,
|
||||
|
@ -232,12 +234,12 @@ namespace MoonWorks.Audio
|
|||
}
|
||||
|
||||
internal void ClearTweens(
|
||||
SoundInstance soundReference,
|
||||
Voice voice,
|
||||
AudioTweenProperty property
|
||||
) {
|
||||
lock (StateLock)
|
||||
{
|
||||
AudioTweenManager.ClearTweens(soundReference, property);
|
||||
AudioTweenManager.ClearTweens(voice, property);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -262,21 +264,6 @@ namespace MoonWorks.Audio
|
|||
}
|
||||
}
|
||||
|
||||
internal void AddAutoUpdateStreamingSoundInstance(StreamingSound instance)
|
||||
{
|
||||
autoUpdateStreamingSoundReferences.Add(instance);
|
||||
}
|
||||
|
||||
internal void AddAutoFreeStaticSoundInstance(StaticSoundInstance instance)
|
||||
{
|
||||
autoFreeStaticSoundInstanceReferences.Add(instance);
|
||||
}
|
||||
|
||||
internal void AddSoundSequenceReference(SoundSequence sequence)
|
||||
{
|
||||
soundSequenceReferences.Add(new WeakReference<SoundSequence>(sequence));
|
||||
}
|
||||
|
||||
protected virtual void Dispose(bool disposing)
|
||||
{
|
||||
if (!IsDisposed)
|
||||
|
@ -286,6 +273,18 @@ namespace MoonWorks.Audio
|
|||
|
||||
if (disposing)
|
||||
{
|
||||
// stop all source voices
|
||||
foreach (var weakReference in resources)
|
||||
{
|
||||
var target = weakReference.Target;
|
||||
|
||||
if (target != null && target is SourceVoice voice)
|
||||
{
|
||||
voice.Stop();
|
||||
}
|
||||
}
|
||||
|
||||
// destroy all audio resources
|
||||
foreach (var weakReference in resources)
|
||||
{
|
||||
var target = weakReference.Target;
|
||||
|
@ -295,10 +294,11 @@ namespace MoonWorks.Audio
|
|||
(target as IDisposable).Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
resources.Clear();
|
||||
}
|
||||
|
||||
FAudio.FAudioVoice_DestroyVoice(MasteringVoice);
|
||||
FAudio.FAudioVoice_DestroyVoice(trueMasteringVoice);
|
||||
FAudio.FAudio_Release(Handle);
|
||||
|
||||
IsDisposed = true;
|
||||
|
|
|
@ -14,7 +14,7 @@ namespace MoonWorks.Audio
|
|||
|
||||
internal class AudioTween
|
||||
{
|
||||
public SoundInstance SoundInstance;
|
||||
public Voice Voice;
|
||||
public AudioTweenProperty Property;
|
||||
public EasingFunction EasingFunction;
|
||||
public float Time;
|
||||
|
@ -51,7 +51,7 @@ namespace MoonWorks.Audio
|
|||
|
||||
public void Free(AudioTween tween)
|
||||
{
|
||||
tween.SoundInstance = null;
|
||||
tween.Voice = null;
|
||||
Tweens.Enqueue(tween);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ namespace MoonWorks.Audio
|
|||
internal class AudioTweenManager
|
||||
{
|
||||
private AudioTweenPool AudioTweenPool = new AudioTweenPool();
|
||||
private readonly Dictionary<(SoundInstance, AudioTweenProperty), AudioTween> AudioTweens = new Dictionary<(SoundInstance, AudioTweenProperty), AudioTween>();
|
||||
private readonly Dictionary<(Voice, AudioTweenProperty), AudioTween> AudioTweens = new Dictionary<(Voice, AudioTweenProperty), AudioTween>();
|
||||
private readonly List<AudioTween> DelayedAudioTweens = new List<AudioTween>();
|
||||
|
||||
public void Update(float elapsedSeconds)
|
||||
|
@ -14,7 +14,7 @@ namespace MoonWorks.Audio
|
|||
for (var i = DelayedAudioTweens.Count - 1; i >= 0; i--)
|
||||
{
|
||||
var audioTween = DelayedAudioTweens[i];
|
||||
var soundInstance = audioTween.SoundInstance;
|
||||
var voice = audioTween.Voice;
|
||||
|
||||
audioTween.Time += elapsedSeconds;
|
||||
|
||||
|
@ -24,23 +24,23 @@ namespace MoonWorks.Audio
|
|||
switch (audioTween.Property)
|
||||
{
|
||||
case AudioTweenProperty.Pan:
|
||||
audioTween.StartValue = soundInstance.Pan;
|
||||
audioTween.StartValue = voice.Pan;
|
||||
break;
|
||||
|
||||
case AudioTweenProperty.Pitch:
|
||||
audioTween.StartValue = soundInstance.Pitch;
|
||||
audioTween.StartValue = voice.Pitch;
|
||||
break;
|
||||
|
||||
case AudioTweenProperty.Volume:
|
||||
audioTween.StartValue = soundInstance.Volume;
|
||||
audioTween.StartValue = voice.Volume;
|
||||
break;
|
||||
|
||||
case AudioTweenProperty.FilterFrequency:
|
||||
audioTween.StartValue = soundInstance.FilterFrequency;
|
||||
audioTween.StartValue = voice.FilterFrequency;
|
||||
break;
|
||||
|
||||
case AudioTweenProperty.Reverb:
|
||||
audioTween.StartValue = soundInstance.Reverb;
|
||||
audioTween.StartValue = voice.Reverb;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -64,7 +64,7 @@ namespace MoonWorks.Audio
|
|||
}
|
||||
|
||||
public void CreateTween(
|
||||
SoundInstance soundInstance,
|
||||
Voice voice,
|
||||
AudioTweenProperty property,
|
||||
System.Func<float, float> easingFunction,
|
||||
float start,
|
||||
|
@ -73,7 +73,7 @@ namespace MoonWorks.Audio
|
|||
float delayTime
|
||||
) {
|
||||
var tween = AudioTweenPool.Obtain();
|
||||
tween.SoundInstance = soundInstance;
|
||||
tween.Voice = voice;
|
||||
tween.Property = property;
|
||||
tween.EasingFunction = easingFunction;
|
||||
tween.StartValue = start;
|
||||
|
@ -92,21 +92,21 @@ namespace MoonWorks.Audio
|
|||
}
|
||||
}
|
||||
|
||||
public void ClearTweens(SoundInstance soundInstance, AudioTweenProperty property)
|
||||
public void ClearTweens(Voice voice, AudioTweenProperty property)
|
||||
{
|
||||
AudioTweens.Remove((soundInstance, property));
|
||||
AudioTweens.Remove((voice, property));
|
||||
}
|
||||
|
||||
private void AddTween(
|
||||
AudioTween audioTween
|
||||
) {
|
||||
// if a tween with the same sound and property already exists, get rid of it
|
||||
if (AudioTweens.TryGetValue((audioTween.SoundInstance, audioTween.Property), out var currentTween))
|
||||
if (AudioTweens.TryGetValue((audioTween.Voice, audioTween.Property), out var currentTween))
|
||||
{
|
||||
AudioTweenPool.Free(currentTween);
|
||||
}
|
||||
|
||||
AudioTweens[(audioTween.SoundInstance, audioTween.Property)] = audioTween;
|
||||
AudioTweens[(audioTween.Voice, audioTween.Property)] = audioTween;
|
||||
}
|
||||
|
||||
private static bool UpdateAudioTween(AudioTween audioTween, float delta)
|
||||
|
@ -133,23 +133,23 @@ namespace MoonWorks.Audio
|
|||
switch (audioTween.Property)
|
||||
{
|
||||
case AudioTweenProperty.Pan:
|
||||
audioTween.SoundInstance.Pan = value;
|
||||
audioTween.Voice.Pan = value;
|
||||
break;
|
||||
|
||||
case AudioTweenProperty.Pitch:
|
||||
audioTween.SoundInstance.Pitch = value;
|
||||
audioTween.Voice.Pitch = value;
|
||||
break;
|
||||
|
||||
case AudioTweenProperty.Volume:
|
||||
audioTween.SoundInstance.Volume = value;
|
||||
audioTween.Voice.Volume = value;
|
||||
break;
|
||||
|
||||
case AudioTweenProperty.FilterFrequency:
|
||||
audioTween.SoundInstance.FilterFrequency = value;
|
||||
audioTween.Voice.FilterFrequency = value;
|
||||
break;
|
||||
|
||||
case AudioTweenProperty.Reverb:
|
||||
audioTween.SoundInstance.Reverb = value;
|
||||
audioTween.Voice.Reverb = value;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
using System.IO;
|
||||
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
public static class AudioUtils
|
||||
{
|
||||
public struct WaveHeaderData
|
||||
{
|
||||
public int FileLength;
|
||||
public short FormatTag;
|
||||
public short Channels;
|
||||
public int SampleRate;
|
||||
public short BitsPerSample;
|
||||
public short BlockAlign;
|
||||
public int DataLength;
|
||||
}
|
||||
|
||||
public static WaveHeaderData ReadWaveHeaderData(string filePath)
|
||||
{
|
||||
WaveHeaderData headerData;
|
||||
var fileInfo = new FileInfo(filePath);
|
||||
using FileStream fs = new FileStream(filePath, FileMode.Open, FileAccess.Read);
|
||||
using BinaryReader br = new BinaryReader(fs);
|
||||
|
||||
headerData.FileLength = (int)fileInfo.Length - 8;
|
||||
fs.Position = 20;
|
||||
headerData.FormatTag = br.ReadInt16();
|
||||
fs.Position = 22;
|
||||
headerData.Channels = br.ReadInt16();
|
||||
fs.Position = 24;
|
||||
headerData.SampleRate = br.ReadInt32();
|
||||
fs.Position = 32;
|
||||
headerData.BlockAlign = br.ReadInt16();
|
||||
fs.Position = 34;
|
||||
headerData.BitsPerSample = br.ReadInt16();
|
||||
fs.Position = 40;
|
||||
headerData.DataLength = br.ReadInt32();
|
||||
|
||||
return headerData;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
namespace MoonWorks.Audio
|
||||
{
|
||||
public enum FormatTag : ushort
|
||||
{
|
||||
Unknown = 0,
|
||||
PCM = 1,
|
||||
MSADPCM = 2,
|
||||
IEEE_FLOAT = 3
|
||||
}
|
||||
|
||||
public record struct Format
|
||||
{
|
||||
public FormatTag Tag;
|
||||
public ushort Channels;
|
||||
public uint SampleRate;
|
||||
public ushort BitsPerSample;
|
||||
|
||||
internal FAudio.FAudioWaveFormatEx ToFAudioFormat()
|
||||
{
|
||||
var blockAlign = (ushort) ((BitsPerSample / 8) * Channels);
|
||||
|
||||
return new FAudio.FAudioWaveFormatEx
|
||||
{
|
||||
wFormatTag = (ushort) Tag,
|
||||
nChannels = Channels,
|
||||
nSamplesPerSec = SampleRate,
|
||||
wBitsPerSample = BitsPerSample,
|
||||
nBlockAlign = blockAlign,
|
||||
nAvgBytesPerSec = blockAlign * SampleRate
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
namespace MoonWorks.Audio
|
||||
{
|
||||
public interface IPoolable<T>
|
||||
{
|
||||
static abstract T Create(AudioDevice device, Format format);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
namespace MoonWorks.Audio
|
||||
{
|
||||
/// <summary>
|
||||
/// PersistentVoice should be used when you need to maintain a long-term reference to a source voice.
|
||||
/// </summary>
|
||||
public class PersistentVoice : SourceVoice, IPoolable<PersistentVoice>
|
||||
{
|
||||
public PersistentVoice(AudioDevice device, Format format) : base(device, format)
|
||||
{
|
||||
}
|
||||
|
||||
public static PersistentVoice Create(AudioDevice device, Format format)
|
||||
{
|
||||
return new PersistentVoice(device, format);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Adds an AudioBuffer to the voice queue.
|
||||
/// The voice processes and plays back the buffers in its queue in the order that they were submitted.
|
||||
/// </summary>
|
||||
/// <param name="buffer">The buffer to submit to the voice.</param>
|
||||
/// <param name="loop">Whether the voice should loop this buffer.</param>
|
||||
public void Submit(AudioBuffer buffer, bool loop = false)
|
||||
{
|
||||
Submit(buffer.ToFAudioBuffer(loop));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -3,54 +3,34 @@ using System.Runtime.InteropServices;
|
|||
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
// sound instances can send their audio to this voice to add reverb
|
||||
public unsafe class ReverbEffect : AudioResource
|
||||
/// <summary>
|
||||
/// Use this in conjunction with SourceVoice.SetReverbEffectChain to add reverb to a voice.
|
||||
/// </summary>
|
||||
public unsafe class ReverbEffect : SubmixVoice
|
||||
{
|
||||
private IntPtr voice;
|
||||
public IntPtr Voice => voice;
|
||||
|
||||
public ReverbEffect(AudioDevice audioDevice) : base(audioDevice)
|
||||
public ReverbEffect(AudioDevice audioDevice, uint processingStage) : base(audioDevice, 1, audioDevice.DeviceDetails.OutputFormat.Format.nSamplesPerSec, processingStage)
|
||||
{
|
||||
/* Init reverb */
|
||||
|
||||
IntPtr reverb;
|
||||
FAudio.FAudioCreateReverb(out reverb, 0);
|
||||
|
||||
IntPtr chainPtr;
|
||||
chainPtr = (nint) NativeMemory.Alloc(
|
||||
(nuint) Marshal.SizeOf<FAudio.FAudioEffectChain>()
|
||||
var chain = new FAudio.FAudioEffectChain();
|
||||
var descriptor = new FAudio.FAudioEffectDescriptor();
|
||||
|
||||
descriptor.InitialState = 1;
|
||||
descriptor.OutputChannels = 1;
|
||||
descriptor.pEffect = reverb;
|
||||
|
||||
chain.EffectCount = 1;
|
||||
chain.pEffectDescriptors = (nint) (&descriptor);
|
||||
|
||||
FAudio.FAudioVoice_SetEffectChain(
|
||||
Handle,
|
||||
ref chain
|
||||
);
|
||||
|
||||
FAudio.FAudioEffectChain* reverbChain = (FAudio.FAudioEffectChain*) chainPtr;
|
||||
reverbChain->EffectCount = 1;
|
||||
reverbChain->pEffectDescriptors = (nint) NativeMemory.Alloc(
|
||||
(nuint) Marshal.SizeOf<FAudio.FAudioEffectDescriptor>()
|
||||
);
|
||||
|
||||
FAudio.FAudioEffectDescriptor* reverbDescriptor =
|
||||
(FAudio.FAudioEffectDescriptor*) reverbChain->pEffectDescriptors;
|
||||
|
||||
reverbDescriptor->InitialState = 1;
|
||||
reverbDescriptor->OutputChannels = (uint) (
|
||||
(audioDevice.DeviceDetails.OutputFormat.Format.nChannels == 6) ? 6 : 1
|
||||
);
|
||||
reverbDescriptor->pEffect = reverb;
|
||||
|
||||
FAudio.FAudio_CreateSubmixVoice(
|
||||
audioDevice.Handle,
|
||||
out voice,
|
||||
1, /* omnidirectional reverb */
|
||||
audioDevice.DeviceDetails.OutputFormat.Format.nSamplesPerSec,
|
||||
0,
|
||||
0,
|
||||
IntPtr.Zero,
|
||||
chainPtr
|
||||
);
|
||||
FAudio.FAPOBase_Release(reverb);
|
||||
|
||||
NativeMemory.Free((void*) reverbChain->pEffectDescriptors);
|
||||
NativeMemory.Free((void*) chainPtr);
|
||||
|
||||
/* Init reverb params */
|
||||
// Defaults based on FAUDIOFX_I3DL2_PRESET_GENERIC
|
||||
|
||||
|
@ -86,7 +66,7 @@ namespace MoonWorks.Audio
|
|||
fixed (FAudio.FAudioFXReverbParameters* reverbParamsPtr = &reverbParams)
|
||||
{
|
||||
FAudio.FAudioVoice_SetEffectParameters(
|
||||
voice,
|
||||
Handle,
|
||||
0,
|
||||
(nint) reverbParamsPtr,
|
||||
(uint) Marshal.SizeOf<FAudio.FAudioFXReverbParameters>(),
|
||||
|
@ -94,10 +74,5 @@ namespace MoonWorks.Audio
|
|||
);
|
||||
}
|
||||
}
|
||||
|
||||
protected override void Destroy()
|
||||
{
|
||||
FAudio.FAudioVoice_DestroyVoice(Voice);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,130 +0,0 @@
|
|||
using System;
|
||||
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
// NOTE: all sounds played with a SoundSequence must have the same audio format!
|
||||
public class SoundSequence : SoundInstance
|
||||
{
|
||||
public int NeedSoundThreshold = 0;
|
||||
public delegate void OnSoundNeededFunc();
|
||||
public OnSoundNeededFunc OnSoundNeeded;
|
||||
|
||||
private object StateLock = new object();
|
||||
|
||||
public SoundSequence(AudioDevice device, ushort formatTag, ushort bitsPerSample, ushort blockAlign, ushort channels, uint samplesPerSecond) : base(device, formatTag, bitsPerSample, blockAlign, channels, samplesPerSecond)
|
||||
{
|
||||
device.AddSoundSequenceReference(this);
|
||||
}
|
||||
|
||||
public SoundSequence(AudioDevice device, StaticSound templateSound) : base(device, templateSound.FormatTag, templateSound.BitsPerSample, templateSound.BlockAlign, templateSound.Channels, templateSound.SamplesPerSecond)
|
||||
{
|
||||
device.AddSoundSequenceReference(this);
|
||||
}
|
||||
|
||||
public void Update()
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
if (IsDisposed) { return; }
|
||||
if (State != SoundState.Playing) { return; }
|
||||
|
||||
if (NeedSoundThreshold > 0)
|
||||
{
|
||||
FAudio.FAudioSourceVoice_GetState(
|
||||
Voice,
|
||||
out var state,
|
||||
FAudio.FAUDIO_VOICE_NOSAMPLESPLAYED
|
||||
);
|
||||
|
||||
var queuedBufferCount = state.BuffersQueued;
|
||||
for (int i = 0; i < NeedSoundThreshold - queuedBufferCount; i += 1)
|
||||
{
|
||||
if (OnSoundNeeded != null)
|
||||
{
|
||||
OnSoundNeeded();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void EnqueueSound(StaticSound sound)
|
||||
{
|
||||
#if DEBUG
|
||||
if (
|
||||
sound.FormatTag != Format.wFormatTag ||
|
||||
sound.BitsPerSample != Format.wBitsPerSample ||
|
||||
sound.Channels != Format.nChannels ||
|
||||
sound.SamplesPerSecond != Format.nSamplesPerSec
|
||||
)
|
||||
{
|
||||
Logger.LogWarn("Playlist audio format mismatch!");
|
||||
}
|
||||
#endif
|
||||
|
||||
lock (StateLock)
|
||||
{
|
||||
FAudio.FAudioSourceVoice_SubmitSourceBuffer(
|
||||
Voice,
|
||||
ref sound.Handle,
|
||||
IntPtr.Zero
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
public override void Pause()
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
if (State == SoundState.Playing)
|
||||
{
|
||||
FAudio.FAudioSourceVoice_Stop(Voice, 0, 0);
|
||||
State = SoundState.Paused;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public override void Play()
|
||||
{
|
||||
PlayUsingOperationSet(0);
|
||||
}
|
||||
|
||||
public override void QueueSyncPlay()
|
||||
{
|
||||
PlayUsingOperationSet(1);
|
||||
}
|
||||
|
||||
private void PlayUsingOperationSet(uint operationSet)
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
if (State == SoundState.Playing)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
FAudio.FAudioSourceVoice_Start(Voice, 0, operationSet);
|
||||
State = SoundState.Playing;
|
||||
}
|
||||
}
|
||||
|
||||
public override void Stop()
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
FAudio.FAudioSourceVoice_ExitLoop(Voice, 0);
|
||||
State = SoundState.Stopped;
|
||||
}
|
||||
}
|
||||
|
||||
public override void StopImmediate()
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
FAudio.FAudioSourceVoice_Stop(Voice, 0, 0);
|
||||
FAudio.FAudioSourceVoice_FlushSourceBuffers(Voice);
|
||||
State = SoundState.Stopped;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
namespace MoonWorks.Audio
|
||||
{
|
||||
/// <summary>
|
||||
/// Plays back a series of AudioBuffers in sequence. Set the OnSoundNeeded callback to add AudioBuffers dynamically.
|
||||
/// </summary>
|
||||
public class SoundSequence : SourceVoice
|
||||
{
|
||||
public int NeedSoundThreshold = 0;
|
||||
public delegate void OnSoundNeededFunc();
|
||||
public OnSoundNeededFunc OnSoundNeeded;
|
||||
|
||||
public SoundSequence(AudioDevice device, Format format) : base(device, format)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
public SoundSequence(AudioDevice device, AudioBuffer templateSound) : base(device, templateSound.Format)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
public override void Update()
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
if (State != SoundState.Playing) { return; }
|
||||
|
||||
if (NeedSoundThreshold > 0)
|
||||
{
|
||||
for (int i = 0; i < NeedSoundThreshold - BuffersQueued; i += 1)
|
||||
{
|
||||
if (OnSoundNeeded != null)
|
||||
{
|
||||
OnSoundNeeded();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void EnqueueSound(AudioBuffer buffer)
|
||||
{
|
||||
#if DEBUG
|
||||
if (!(buffer.Format == Format))
|
||||
{
|
||||
Logger.LogWarn("Sound sequence audio format mismatch!");
|
||||
}
|
||||
#endif
|
||||
|
||||
lock (StateLock)
|
||||
{
|
||||
Submit(buffer.ToFAudioBuffer());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,228 @@
|
|||
using System;
|
||||
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
/// <summary>
|
||||
/// Emits audio from submitted audio buffers.
|
||||
/// </summary>
|
||||
public abstract class SourceVoice : Voice
|
||||
{
|
||||
private Format format;
|
||||
public Format Format => format;
|
||||
|
||||
protected bool PlaybackInitiated;
|
||||
|
||||
/// <summary>
|
||||
/// The number of buffers queued in the voice.
|
||||
/// This includes the currently playing voice!
|
||||
/// </summary>
|
||||
public uint BuffersQueued
|
||||
{
|
||||
get
|
||||
{
|
||||
FAudio.FAudioSourceVoice_GetState(
|
||||
Handle,
|
||||
out var state,
|
||||
FAudio.FAUDIO_VOICE_NOSAMPLESPLAYED
|
||||
);
|
||||
|
||||
return state.BuffersQueued;
|
||||
}
|
||||
}
|
||||
|
||||
private SoundState state;
|
||||
public SoundState State
|
||||
{
|
||||
get
|
||||
{
|
||||
if (BuffersQueued == 0)
|
||||
{
|
||||
Stop();
|
||||
}
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
internal set
|
||||
{
|
||||
state = value;
|
||||
}
|
||||
}
|
||||
|
||||
protected object StateLock = new object();
|
||||
|
||||
public SourceVoice(
|
||||
AudioDevice device,
|
||||
Format format
|
||||
) : base(device, format.Channels, device.DeviceDetails.OutputFormat.Format.nChannels)
|
||||
{
|
||||
this.format = format;
|
||||
var fAudioFormat = format.ToFAudioFormat();
|
||||
|
||||
FAudio.FAudio_CreateSourceVoice(
|
||||
device.Handle,
|
||||
out handle,
|
||||
ref fAudioFormat,
|
||||
FAudio.FAUDIO_VOICE_USEFILTER,
|
||||
FAudio.FAUDIO_DEFAULT_FREQ_RATIO,
|
||||
IntPtr.Zero,
|
||||
IntPtr.Zero, // default sends to mastering voice!
|
||||
IntPtr.Zero
|
||||
);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Starts consumption and processing of audio by the voice.
|
||||
/// Delivers the result to any connected submix or mastering voice.
|
||||
/// </summary>
|
||||
/// <param name="syncGroup">Optional. Denotes that the operation will be pending until AudioDevice.TriggerSyncGroup is called.</param>
|
||||
public void Play(uint syncGroup = FAudio.FAUDIO_COMMIT_NOW)
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
FAudio.FAudioSourceVoice_Start(Handle, 0, syncGroup);
|
||||
|
||||
State = SoundState.Playing;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Pauses playback.
|
||||
/// All source buffers that are queued on the voice and the current cursor position are preserved.
|
||||
/// </summary>
|
||||
/// <param name="syncGroup">Optional. Denotes that the operation will be pending until AudioDevice.TriggerSyncGroup is called.</param>
|
||||
public void Pause(uint syncGroup = FAudio.FAUDIO_COMMIT_NOW)
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
FAudio.FAudioSourceVoice_Stop(Handle, 0, syncGroup);
|
||||
|
||||
State = SoundState.Paused;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Stops looping the voice when it reaches the end of the current loop region.
|
||||
/// If the cursor for the voice is not in a loop region, ExitLoop does nothing.
|
||||
/// </summary>
|
||||
/// <param name="syncGroup">Optional. Denotes that the operation will be pending until AudioDevice.TriggerSyncGroup is called.</param>
|
||||
public void ExitLoop(uint syncGroup = FAudio.FAUDIO_COMMIT_NOW)
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
FAudio.FAudioSourceVoice_ExitLoop(Handle, syncGroup);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Stops playback and removes all pending audio buffers from the voice queue.
|
||||
/// </summary>
|
||||
/// <param name="syncGroup">Optional. Denotes that the operation will be pending until AudioDevice.TriggerSyncGroup is called.</param>
|
||||
public void Stop(uint syncGroup = FAudio.FAUDIO_COMMIT_NOW)
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
FAudio.FAudioSourceVoice_Stop(Handle, 0, syncGroup);
|
||||
FAudio.FAudioSourceVoice_FlushSourceBuffers(Handle);
|
||||
|
||||
State = SoundState.Stopped;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Adds an AudioBuffer to the voice queue.
|
||||
/// The voice processes and plays back the buffers in its queue in the order that they were submitted.
|
||||
/// </summary>
|
||||
/// <param name="buffer">The buffer to submit to the voice.</param>
|
||||
public void Submit(AudioBuffer buffer)
|
||||
{
|
||||
Submit(buffer.ToFAudioBuffer());
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Calculates positional sound. This must be called continuously to update positional sound.
|
||||
/// </summary>
|
||||
/// <param name="listener"></param>
|
||||
/// <param name="emitter"></param>
|
||||
public unsafe void Apply3D(AudioListener listener, AudioEmitter emitter)
|
||||
{
|
||||
Is3D = true;
|
||||
|
||||
emitter.emitterData.CurveDistanceScaler = Device.CurveDistanceScalar;
|
||||
emitter.emitterData.ChannelCount = SourceChannelCount;
|
||||
|
||||
var dspSettings = new FAudio.F3DAUDIO_DSP_SETTINGS
|
||||
{
|
||||
DopplerFactor = DopplerFactor,
|
||||
SrcChannelCount = SourceChannelCount,
|
||||
DstChannelCount = DestinationChannelCount,
|
||||
pMatrixCoefficients = (nint) pMatrixCoefficients
|
||||
};
|
||||
|
||||
FAudio.F3DAudioCalculate(
|
||||
Device.Handle3D,
|
||||
ref listener.listenerData,
|
||||
ref emitter.emitterData,
|
||||
FAudio.F3DAUDIO_CALCULATE_MATRIX | FAudio.F3DAUDIO_CALCULATE_DOPPLER,
|
||||
ref dspSettings
|
||||
);
|
||||
|
||||
UpdatePitch();
|
||||
|
||||
FAudio.FAudioVoice_SetOutputMatrix(
|
||||
Handle,
|
||||
OutputVoice.Handle,
|
||||
SourceChannelCount,
|
||||
DestinationChannelCount,
|
||||
(nint) pMatrixCoefficients,
|
||||
0
|
||||
);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Specifies that this source voice can be returned to the voice pool.
|
||||
/// Holding on to the reference after calling this will cause problems!
|
||||
/// </summary>
|
||||
public void Return()
|
||||
{
|
||||
Stop();
|
||||
Device.Return(this);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Called automatically by AudioDevice in the audio thread.
|
||||
/// Don't call this yourself! You might regret it!
|
||||
/// </summary>
|
||||
public virtual void Update() { }
|
||||
|
||||
/// <summary>
|
||||
/// Adds an FAudio buffer to the voice queue.
|
||||
/// The voice processes and plays back the buffers in its queue in the order that they were submitted.
|
||||
/// </summary>
|
||||
/// <param name="buffer">The buffer to submit to the voice.</param>
|
||||
protected void Submit(FAudio.FAudioBuffer buffer)
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
FAudio.FAudioSourceVoice_SubmitSourceBuffer(
|
||||
Handle,
|
||||
ref buffer,
|
||||
IntPtr.Zero
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
public override void Reset()
|
||||
{
|
||||
Stop();
|
||||
PlaybackInitiated = false;
|
||||
base.Reset();
|
||||
}
|
||||
|
||||
protected override unsafe void Destroy()
|
||||
{
|
||||
Stop();
|
||||
base.Destroy();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
using System.Collections.Generic;
|
||||
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
internal class SourceVoicePool
|
||||
{
|
||||
private AudioDevice Device;
|
||||
|
||||
Dictionary<(System.Type, Format), Queue<SourceVoice>> VoiceLists = new Dictionary<(System.Type, Format), Queue<SourceVoice>>();
|
||||
|
||||
public SourceVoicePool(AudioDevice device)
|
||||
{
|
||||
Device = device;
|
||||
}
|
||||
|
||||
public T Obtain<T>(Format format) where T : SourceVoice, IPoolable<T>
|
||||
{
|
||||
if (!VoiceLists.ContainsKey((typeof(T), format)))
|
||||
{
|
||||
VoiceLists.Add((typeof(T), format), new Queue<SourceVoice>());
|
||||
}
|
||||
|
||||
var list = VoiceLists[(typeof(T), format)];
|
||||
|
||||
if (list.Count == 0)
|
||||
{
|
||||
list.Enqueue(T.Create(Device, format));
|
||||
}
|
||||
|
||||
return (T) list.Dequeue();
|
||||
}
|
||||
|
||||
public void Return(SourceVoice voice)
|
||||
{
|
||||
var list = VoiceLists[(voice.GetType(), voice.Format)];
|
||||
list.Enqueue(voice);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,332 +0,0 @@
|
|||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
public class StaticSound : AudioResource
|
||||
{
|
||||
internal FAudio.FAudioBuffer Handle;
|
||||
public ushort FormatTag { get; }
|
||||
public ushort BitsPerSample { get; }
|
||||
public ushort Channels { get; }
|
||||
public uint SamplesPerSecond { get; }
|
||||
public ushort BlockAlign { get; }
|
||||
|
||||
public uint LoopStart { get; set; } = 0;
|
||||
public uint LoopLength { get; set; } = 0;
|
||||
|
||||
private Stack<StaticSoundInstance> AvailableInstances = new Stack<StaticSoundInstance>();
|
||||
private HashSet<StaticSoundInstance> UsedInstances = new HashSet<StaticSoundInstance>();
|
||||
|
||||
private bool OwnsBuffer;
|
||||
|
||||
public static unsafe StaticSound LoadOgg(AudioDevice device, string filePath)
|
||||
{
|
||||
var filePointer = FAudio.stb_vorbis_open_filename(filePath, out var error, IntPtr.Zero);
|
||||
|
||||
if (error != 0)
|
||||
{
|
||||
throw new AudioLoadException("Error loading file!");
|
||||
}
|
||||
var info = FAudio.stb_vorbis_get_info(filePointer);
|
||||
var lengthInFloats =
|
||||
FAudio.stb_vorbis_stream_length_in_samples(filePointer) * info.channels;
|
||||
var lengthInBytes = lengthInFloats * Marshal.SizeOf<float>();
|
||||
var buffer = NativeMemory.Alloc((nuint) lengthInBytes);
|
||||
|
||||
FAudio.stb_vorbis_get_samples_float_interleaved(
|
||||
filePointer,
|
||||
info.channels,
|
||||
(nint) buffer,
|
||||
(int) lengthInFloats
|
||||
);
|
||||
|
||||
FAudio.stb_vorbis_close(filePointer);
|
||||
|
||||
return new StaticSound(
|
||||
device,
|
||||
3,
|
||||
32,
|
||||
(ushort) (4 * info.channels),
|
||||
(ushort) info.channels,
|
||||
info.sample_rate,
|
||||
(nint) buffer,
|
||||
(uint) lengthInBytes,
|
||||
true);
|
||||
}
|
||||
|
||||
// mostly borrowed from https://github.com/FNA-XNA/FNA/blob/b71b4a35ae59970ff0070dea6f8620856d8d4fec/src/Audio/SoundEffect.cs#L385
|
||||
public static unsafe StaticSound LoadWav(AudioDevice device, string filePath)
|
||||
{
|
||||
// WaveFormatEx data
|
||||
ushort wFormatTag;
|
||||
ushort nChannels;
|
||||
uint nSamplesPerSec;
|
||||
uint nAvgBytesPerSec;
|
||||
ushort nBlockAlign;
|
||||
ushort wBitsPerSample;
|
||||
int samplerLoopStart = 0;
|
||||
int samplerLoopEnd = 0;
|
||||
|
||||
using var stream = new FileStream(filePath, FileMode.Open, FileAccess.Read);
|
||||
using var reader = new BinaryReader(stream);
|
||||
|
||||
// RIFF Signature
|
||||
string signature = new string(reader.ReadChars(4));
|
||||
if (signature != "RIFF")
|
||||
{
|
||||
throw new NotSupportedException("Specified stream is not a wave file.");
|
||||
}
|
||||
|
||||
reader.ReadUInt32(); // Riff Chunk Size
|
||||
|
||||
string wformat = new string(reader.ReadChars(4));
|
||||
if (wformat != "WAVE")
|
||||
{
|
||||
throw new NotSupportedException("Specified stream is not a wave file.");
|
||||
}
|
||||
|
||||
// WAVE Header
|
||||
string format_signature = new string(reader.ReadChars(4));
|
||||
while (format_signature != "fmt ")
|
||||
{
|
||||
reader.ReadBytes(reader.ReadInt32());
|
||||
format_signature = new string(reader.ReadChars(4));
|
||||
}
|
||||
|
||||
int format_chunk_size = reader.ReadInt32();
|
||||
|
||||
wFormatTag = reader.ReadUInt16();
|
||||
nChannels = reader.ReadUInt16();
|
||||
nSamplesPerSec = reader.ReadUInt32();
|
||||
nAvgBytesPerSec = reader.ReadUInt32();
|
||||
nBlockAlign = reader.ReadUInt16();
|
||||
wBitsPerSample = reader.ReadUInt16();
|
||||
|
||||
// Reads residual bytes
|
||||
if (format_chunk_size > 16)
|
||||
{
|
||||
reader.ReadBytes(format_chunk_size - 16);
|
||||
}
|
||||
|
||||
// data Signature
|
||||
string data_signature = new string(reader.ReadChars(4));
|
||||
while (data_signature.ToLowerInvariant() != "data")
|
||||
{
|
||||
reader.ReadBytes(reader.ReadInt32());
|
||||
data_signature = new string(reader.ReadChars(4));
|
||||
}
|
||||
if (data_signature != "data")
|
||||
{
|
||||
throw new NotSupportedException("Specified wave file is not supported.");
|
||||
}
|
||||
|
||||
int waveDataLength = reader.ReadInt32();
|
||||
var waveDataBuffer = NativeMemory.Alloc((nuint) waveDataLength);
|
||||
var waveDataSpan = new Span<byte>(waveDataBuffer, waveDataLength);
|
||||
stream.ReadExactly(waveDataSpan);
|
||||
|
||||
// Scan for other chunks
|
||||
while (reader.PeekChar() != -1)
|
||||
{
|
||||
char[] chunkIDChars = reader.ReadChars(4);
|
||||
if (chunkIDChars.Length < 4)
|
||||
{
|
||||
break; // EOL!
|
||||
}
|
||||
byte[] chunkSizeBytes = reader.ReadBytes(4);
|
||||
if (chunkSizeBytes.Length < 4)
|
||||
{
|
||||
break; // EOL!
|
||||
}
|
||||
string chunk_signature = new string(chunkIDChars);
|
||||
int chunkDataSize = BitConverter.ToInt32(chunkSizeBytes, 0);
|
||||
if (chunk_signature == "smpl") // "smpl", Sampler Chunk Found
|
||||
{
|
||||
reader.ReadUInt32(); // Manufacturer
|
||||
reader.ReadUInt32(); // Product
|
||||
reader.ReadUInt32(); // Sample Period
|
||||
reader.ReadUInt32(); // MIDI Unity Note
|
||||
reader.ReadUInt32(); // MIDI Pitch Fraction
|
||||
reader.ReadUInt32(); // SMPTE Format
|
||||
reader.ReadUInt32(); // SMPTE Offset
|
||||
uint numSampleLoops = reader.ReadUInt32();
|
||||
int samplerData = reader.ReadInt32();
|
||||
|
||||
for (int i = 0; i < numSampleLoops; i += 1)
|
||||
{
|
||||
reader.ReadUInt32(); // Cue Point ID
|
||||
reader.ReadUInt32(); // Type
|
||||
int start = reader.ReadInt32();
|
||||
int end = reader.ReadInt32();
|
||||
reader.ReadUInt32(); // Fraction
|
||||
reader.ReadUInt32(); // Play Count
|
||||
|
||||
if (i == 0) // Grab loopStart and loopEnd from first sample loop
|
||||
{
|
||||
samplerLoopStart = start;
|
||||
samplerLoopEnd = end;
|
||||
}
|
||||
}
|
||||
|
||||
if (samplerData != 0) // Read Sampler Data if it exists
|
||||
{
|
||||
reader.ReadBytes(samplerData);
|
||||
}
|
||||
}
|
||||
else // Read unwanted chunk data and try again
|
||||
{
|
||||
reader.ReadBytes(chunkDataSize);
|
||||
}
|
||||
}
|
||||
// End scan
|
||||
|
||||
var sound = new StaticSound(
|
||||
device,
|
||||
wFormatTag,
|
||||
wBitsPerSample,
|
||||
nBlockAlign,
|
||||
nChannels,
|
||||
nSamplesPerSec,
|
||||
(nint) waveDataBuffer,
|
||||
(uint) waveDataLength,
|
||||
true
|
||||
);
|
||||
|
||||
return sound;
|
||||
}
|
||||
|
||||
public static unsafe StaticSound FromQOA(AudioDevice device, string path)
|
||||
{
|
||||
var fileStream = new FileStream(path, FileMode.Open, FileAccess.Read);
|
||||
var fileDataPtr = NativeMemory.Alloc((nuint) fileStream.Length);
|
||||
var fileDataSpan = new Span<byte>(fileDataPtr, (int) fileStream.Length);
|
||||
fileStream.ReadExactly(fileDataSpan);
|
||||
fileStream.Close();
|
||||
|
||||
var qoaHandle = FAudio.qoa_open_from_memory((char*) fileDataPtr, (uint) fileDataSpan.Length, 0);
|
||||
if (qoaHandle == 0)
|
||||
{
|
||||
NativeMemory.Free(fileDataPtr);
|
||||
Logger.LogError("Error opening QOA file!");
|
||||
throw new AudioLoadException("Error opening QOA file!");
|
||||
}
|
||||
|
||||
FAudio.qoa_attributes(qoaHandle, out var channels, out var samplerate, out var samples_per_channel_per_frame, out var total_samples_per_channel);
|
||||
|
||||
var bufferLengthInBytes = total_samples_per_channel * channels * sizeof(short);
|
||||
var buffer = NativeMemory.Alloc(bufferLengthInBytes);
|
||||
FAudio.qoa_decode_entire(qoaHandle, (short*) buffer);
|
||||
|
||||
FAudio.qoa_close(qoaHandle);
|
||||
NativeMemory.Free(fileDataPtr);
|
||||
|
||||
return new StaticSound(
|
||||
device,
|
||||
1,
|
||||
16,
|
||||
(ushort) (channels * 2),
|
||||
(ushort) channels,
|
||||
samplerate,
|
||||
(nint) buffer,
|
||||
bufferLengthInBytes,
|
||||
true
|
||||
);
|
||||
}
|
||||
|
||||
public StaticSound(
|
||||
AudioDevice device,
|
||||
ushort formatTag,
|
||||
ushort bitsPerSample,
|
||||
ushort blockAlign,
|
||||
ushort channels,
|
||||
uint samplesPerSecond,
|
||||
IntPtr bufferPtr,
|
||||
uint bufferLengthInBytes,
|
||||
bool ownsBuffer) : base(device)
|
||||
{
|
||||
FormatTag = formatTag;
|
||||
BitsPerSample = bitsPerSample;
|
||||
BlockAlign = blockAlign;
|
||||
Channels = channels;
|
||||
SamplesPerSecond = samplesPerSecond;
|
||||
|
||||
Handle = new FAudio.FAudioBuffer
|
||||
{
|
||||
Flags = FAudio.FAUDIO_END_OF_STREAM,
|
||||
pContext = IntPtr.Zero,
|
||||
pAudioData = bufferPtr,
|
||||
AudioBytes = bufferLengthInBytes,
|
||||
PlayBegin = 0,
|
||||
PlayLength = 0
|
||||
};
|
||||
|
||||
OwnsBuffer = ownsBuffer;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Gets a sound instance from the pool.
|
||||
/// NOTE: If AutoFree is false, you will have to call StaticSoundInstance.Free() yourself or leak the instance!
|
||||
/// </summary>
|
||||
public StaticSoundInstance GetInstance(bool autoFree = true)
|
||||
{
|
||||
StaticSoundInstance instance;
|
||||
|
||||
lock (AvailableInstances)
|
||||
{
|
||||
if (AvailableInstances.Count == 0)
|
||||
{
|
||||
AvailableInstances.Push(new StaticSoundInstance(Device, this));
|
||||
}
|
||||
|
||||
instance = AvailableInstances.Pop();
|
||||
}
|
||||
|
||||
instance.AutoFree = autoFree;
|
||||
|
||||
lock (UsedInstances)
|
||||
{
|
||||
UsedInstances.Add(instance);
|
||||
}
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
internal void FreeInstance(StaticSoundInstance instance)
|
||||
{
|
||||
instance.Reset();
|
||||
|
||||
lock (UsedInstances)
|
||||
{
|
||||
UsedInstances.Remove(instance);
|
||||
}
|
||||
|
||||
lock (AvailableInstances)
|
||||
{
|
||||
AvailableInstances.Push(instance);
|
||||
}
|
||||
}
|
||||
|
||||
protected override unsafe void Destroy()
|
||||
{
|
||||
foreach (var instance in UsedInstances)
|
||||
{
|
||||
instance.Free();
|
||||
}
|
||||
|
||||
foreach (var instance in AvailableInstances)
|
||||
{
|
||||
instance.Dispose();
|
||||
}
|
||||
|
||||
AvailableInstances.Clear();
|
||||
|
||||
if (OwnsBuffer)
|
||||
{
|
||||
NativeMemory.Free((void*) Handle.pAudioData);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,141 +0,0 @@
|
|||
using System;
|
||||
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
public class StaticSoundInstance : SoundInstance
|
||||
{
|
||||
public StaticSound Parent { get; }
|
||||
|
||||
public bool Loop { get; set; }
|
||||
|
||||
private SoundState _state = SoundState.Stopped;
|
||||
public override SoundState State
|
||||
{
|
||||
get
|
||||
{
|
||||
FAudio.FAudioSourceVoice_GetState(
|
||||
Voice,
|
||||
out var state,
|
||||
FAudio.FAUDIO_VOICE_NOSAMPLESPLAYED
|
||||
);
|
||||
if (state.BuffersQueued == 0)
|
||||
{
|
||||
StopImmediate();
|
||||
}
|
||||
|
||||
return _state;
|
||||
}
|
||||
|
||||
protected set
|
||||
{
|
||||
_state = value;
|
||||
}
|
||||
}
|
||||
|
||||
public bool AutoFree { get; internal set; }
|
||||
|
||||
internal StaticSoundInstance(
|
||||
AudioDevice device,
|
||||
StaticSound parent
|
||||
) : base(device, parent.FormatTag, parent.BitsPerSample, parent.BlockAlign, parent.Channels, parent.SamplesPerSecond)
|
||||
{
|
||||
Parent = parent;
|
||||
}
|
||||
|
||||
public override void Play()
|
||||
{
|
||||
PlayUsingOperationSet(0);
|
||||
}
|
||||
|
||||
public override void QueueSyncPlay()
|
||||
{
|
||||
PlayUsingOperationSet(1);
|
||||
}
|
||||
|
||||
private void PlayUsingOperationSet(uint operationSet)
|
||||
{
|
||||
if (State == SoundState.Playing)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (Loop)
|
||||
{
|
||||
Parent.Handle.LoopCount = 255;
|
||||
Parent.Handle.LoopBegin = Parent.LoopStart;
|
||||
Parent.Handle.LoopLength = Parent.LoopLength;
|
||||
}
|
||||
else
|
||||
{
|
||||
Parent.Handle.LoopCount = 0;
|
||||
Parent.Handle.LoopBegin = 0;
|
||||
Parent.Handle.LoopLength = 0;
|
||||
}
|
||||
|
||||
FAudio.FAudioSourceVoice_SubmitSourceBuffer(
|
||||
Voice,
|
||||
ref Parent.Handle,
|
||||
IntPtr.Zero
|
||||
);
|
||||
|
||||
FAudio.FAudioSourceVoice_Start(Voice, 0, operationSet);
|
||||
State = SoundState.Playing;
|
||||
|
||||
if (AutoFree)
|
||||
{
|
||||
Device.AddAutoFreeStaticSoundInstance(this);
|
||||
}
|
||||
}
|
||||
|
||||
public override void Pause()
|
||||
{
|
||||
if (State == SoundState.Playing)
|
||||
{
|
||||
FAudio.FAudioSourceVoice_Stop(Voice, 0, 0);
|
||||
State = SoundState.Paused;
|
||||
}
|
||||
}
|
||||
|
||||
public override void Stop()
|
||||
{
|
||||
FAudio.FAudioSourceVoice_ExitLoop(Voice, 0);
|
||||
State = SoundState.Stopped;
|
||||
}
|
||||
|
||||
public override void StopImmediate()
|
||||
{
|
||||
FAudio.FAudioSourceVoice_Stop(Voice, 0, 0);
|
||||
FAudio.FAudioSourceVoice_FlushSourceBuffers(Voice);
|
||||
State = SoundState.Stopped;
|
||||
}
|
||||
|
||||
public void Seek(uint sampleFrame)
|
||||
{
|
||||
if (State == SoundState.Playing)
|
||||
{
|
||||
FAudio.FAudioSourceVoice_Stop(Voice, 0, 0);
|
||||
FAudio.FAudioSourceVoice_FlushSourceBuffers(Voice);
|
||||
}
|
||||
|
||||
Parent.Handle.PlayBegin = sampleFrame;
|
||||
}
|
||||
|
||||
// Call this when you no longer need the sound instance.
|
||||
// If AutoFree is set, this will automatically be called when the sound instance stops playing.
|
||||
// If the sound isn't stopped when you call this, things might get weird!
|
||||
public void Free()
|
||||
{
|
||||
Parent.FreeInstance(this);
|
||||
}
|
||||
|
||||
internal void Reset()
|
||||
{
|
||||
Pan = 0;
|
||||
Pitch = 0;
|
||||
Volume = 1;
|
||||
Loop = false;
|
||||
Is3D = false;
|
||||
FilterType = FilterType.None;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,239 +0,0 @@
|
|||
using System;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
/// <summary>
|
||||
/// For streaming long playback.
|
||||
/// Must be extended with a decoder routine called by FillBuffer.
|
||||
/// See StreamingSoundOgg for an example.
|
||||
/// </summary>
|
||||
public abstract class StreamingSound : SoundInstance
|
||||
{
|
||||
// Are we actively consuming buffers?
|
||||
protected bool ConsumingBuffers = false;
|
||||
|
||||
private const int BUFFER_COUNT = 3;
|
||||
private nuint BufferSize;
|
||||
private readonly IntPtr[] buffers;
|
||||
private int nextBufferIndex = 0;
|
||||
private uint queuedBufferCount = 0;
|
||||
|
||||
private readonly object StateLock = new object();
|
||||
|
||||
public bool AutoUpdate { get; }
|
||||
|
||||
public abstract bool Loaded { get; }
|
||||
|
||||
public unsafe StreamingSound(
|
||||
AudioDevice device,
|
||||
ushort formatTag,
|
||||
ushort bitsPerSample,
|
||||
ushort blockAlign,
|
||||
ushort channels,
|
||||
uint samplesPerSecond,
|
||||
uint bufferSize,
|
||||
bool autoUpdate // should the AudioDevice thread automatically update this sound?
|
||||
) : base(device, formatTag, bitsPerSample, blockAlign, channels, samplesPerSecond)
|
||||
{
|
||||
BufferSize = bufferSize;
|
||||
|
||||
buffers = new IntPtr[BUFFER_COUNT];
|
||||
for (int i = 0; i < BUFFER_COUNT; i += 1)
|
||||
{
|
||||
buffers[i] = (IntPtr) NativeMemory.Alloc(bufferSize);
|
||||
}
|
||||
|
||||
AutoUpdate = autoUpdate;
|
||||
}
|
||||
|
||||
public override void Play()
|
||||
{
|
||||
PlayUsingOperationSet(0);
|
||||
}
|
||||
|
||||
public override void QueueSyncPlay()
|
||||
{
|
||||
PlayUsingOperationSet(1);
|
||||
}
|
||||
|
||||
private void PlayUsingOperationSet(uint operationSet)
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
if (!Loaded)
|
||||
{
|
||||
Logger.LogError("Cannot play StreamingSound before calling Load!");
|
||||
return;
|
||||
}
|
||||
|
||||
if (State == SoundState.Playing)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
State = SoundState.Playing;
|
||||
|
||||
ConsumingBuffers = true;
|
||||
if (AutoUpdate)
|
||||
{
|
||||
Device.AddAutoUpdateStreamingSoundInstance(this);
|
||||
}
|
||||
|
||||
QueueBuffers();
|
||||
FAudio.FAudioSourceVoice_Start(Voice, 0, operationSet);
|
||||
}
|
||||
}
|
||||
|
||||
public override void Pause()
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
if (State == SoundState.Playing)
|
||||
{
|
||||
ConsumingBuffers = false;
|
||||
FAudio.FAudioSourceVoice_Stop(Voice, 0, 0);
|
||||
State = SoundState.Paused;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public override void Stop()
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
ConsumingBuffers = false;
|
||||
State = SoundState.Stopped;
|
||||
}
|
||||
}
|
||||
|
||||
public override void StopImmediate()
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
ConsumingBuffers = false;
|
||||
FAudio.FAudioSourceVoice_Stop(Voice, 0, 0);
|
||||
FAudio.FAudioSourceVoice_FlushSourceBuffers(Voice);
|
||||
ClearBuffers();
|
||||
|
||||
State = SoundState.Stopped;
|
||||
}
|
||||
}
|
||||
|
||||
internal unsafe void Update()
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
if (!IsDisposed)
|
||||
{
|
||||
if (State != SoundState.Playing)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
QueueBuffers();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void QueueBuffers()
|
||||
{
|
||||
FAudio.FAudioSourceVoice_GetState(
|
||||
Voice,
|
||||
out var state,
|
||||
FAudio.FAUDIO_VOICE_NOSAMPLESPLAYED
|
||||
);
|
||||
|
||||
queuedBufferCount = state.BuffersQueued;
|
||||
|
||||
if (ConsumingBuffers)
|
||||
{
|
||||
for (int i = 0; i < BUFFER_COUNT - queuedBufferCount; i += 1)
|
||||
{
|
||||
AddBuffer();
|
||||
}
|
||||
}
|
||||
else if (queuedBufferCount == 0)
|
||||
{
|
||||
Stop();
|
||||
}
|
||||
}
|
||||
|
||||
protected unsafe void ClearBuffers()
|
||||
{
|
||||
nextBufferIndex = 0;
|
||||
queuedBufferCount = 0;
|
||||
}
|
||||
|
||||
protected unsafe void AddBuffer()
|
||||
{
|
||||
var buffer = buffers[nextBufferIndex];
|
||||
nextBufferIndex = (nextBufferIndex + 1) % BUFFER_COUNT;
|
||||
|
||||
FillBuffer(
|
||||
(void*) buffer,
|
||||
(int) BufferSize,
|
||||
out int filledLengthInBytes,
|
||||
out bool reachedEnd
|
||||
);
|
||||
|
||||
if (filledLengthInBytes > 0)
|
||||
{
|
||||
FAudio.FAudioBuffer buf = new FAudio.FAudioBuffer
|
||||
{
|
||||
AudioBytes = (uint) filledLengthInBytes,
|
||||
pAudioData = (IntPtr) buffer,
|
||||
PlayLength = (
|
||||
(uint) (filledLengthInBytes /
|
||||
Format.nChannels /
|
||||
(uint) (Format.wBitsPerSample / 8))
|
||||
)
|
||||
};
|
||||
|
||||
FAudio.FAudioSourceVoice_SubmitSourceBuffer(
|
||||
Voice,
|
||||
ref buf,
|
||||
IntPtr.Zero
|
||||
);
|
||||
|
||||
queuedBufferCount += 1;
|
||||
}
|
||||
|
||||
if (reachedEnd)
|
||||
{
|
||||
/* We have reached the end of the data, what do we do? */
|
||||
ConsumingBuffers = false;
|
||||
OnReachedEnd();
|
||||
}
|
||||
}
|
||||
|
||||
public abstract void Load();
|
||||
public abstract void Unload();
|
||||
|
||||
protected unsafe abstract void FillBuffer(
|
||||
void* buffer,
|
||||
int bufferLengthInBytes, /* in bytes */
|
||||
out int filledLengthInBytes, /* in bytes */
|
||||
out bool reachedEnd
|
||||
);
|
||||
|
||||
protected abstract void OnReachedEnd();
|
||||
|
||||
protected unsafe override void Destroy()
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
if (!IsDisposed)
|
||||
{
|
||||
StopImmediate();
|
||||
Unload();
|
||||
|
||||
for (int i = 0; i < BUFFER_COUNT; i += 1)
|
||||
{
|
||||
NativeMemory.Free((void*) buffers[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,113 +0,0 @@
|
|||
using System;
|
||||
using System.IO;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
public class StreamingSoundOgg : StreamingSoundSeekable
|
||||
{
|
||||
private IntPtr FileDataPtr = IntPtr.Zero;
|
||||
private IntPtr VorbisHandle = IntPtr.Zero;
|
||||
private FAudio.stb_vorbis_info Info;
|
||||
|
||||
public override bool Loaded => VorbisHandle != IntPtr.Zero;
|
||||
private string FilePath;
|
||||
|
||||
public unsafe static StreamingSoundOgg Create(AudioDevice device, string filePath)
|
||||
{
|
||||
var handle = FAudio.stb_vorbis_open_filename(filePath, out int error, IntPtr.Zero);
|
||||
if (error != 0)
|
||||
{
|
||||
Logger.LogError("Error: " + error);
|
||||
throw new AudioLoadException("Error opening ogg file!");
|
||||
}
|
||||
|
||||
var info = FAudio.stb_vorbis_get_info(handle);
|
||||
|
||||
var streamingSound = new StreamingSoundOgg(
|
||||
device,
|
||||
filePath,
|
||||
info
|
||||
);
|
||||
|
||||
FAudio.stb_vorbis_close(handle);
|
||||
|
||||
return streamingSound;
|
||||
}
|
||||
|
||||
internal unsafe StreamingSoundOgg(
|
||||
AudioDevice device,
|
||||
string filePath,
|
||||
FAudio.stb_vorbis_info info,
|
||||
uint bufferSize = 32768
|
||||
) : base(
|
||||
device,
|
||||
3, /* float type */
|
||||
32, /* size of float */
|
||||
(ushort) (4 * info.channels),
|
||||
(ushort) info.channels,
|
||||
info.sample_rate,
|
||||
bufferSize,
|
||||
true
|
||||
) {
|
||||
Info = info;
|
||||
FilePath = filePath;
|
||||
}
|
||||
|
||||
public override void Seek(uint sampleFrame)
|
||||
{
|
||||
FAudio.stb_vorbis_seek(VorbisHandle, sampleFrame);
|
||||
}
|
||||
|
||||
public override unsafe void Load()
|
||||
{
|
||||
var fileStream = new FileStream(FilePath, FileMode.Open, FileAccess.Read);
|
||||
FileDataPtr = (nint) NativeMemory.Alloc((nuint) fileStream.Length);
|
||||
var fileDataSpan = new Span<byte>((void*) FileDataPtr, (int) fileStream.Length);
|
||||
fileStream.ReadExactly(fileDataSpan);
|
||||
fileStream.Close();
|
||||
|
||||
VorbisHandle = FAudio.stb_vorbis_open_memory(FileDataPtr, fileDataSpan.Length, out int error, IntPtr.Zero);
|
||||
if (error != 0)
|
||||
{
|
||||
NativeMemory.Free((void*) FileDataPtr);
|
||||
Logger.LogError("Error opening OGG file!");
|
||||
Logger.LogError("Error: " + error);
|
||||
throw new AudioLoadException("Error opening OGG file!");
|
||||
}
|
||||
}
|
||||
|
||||
public override unsafe void Unload()
|
||||
{
|
||||
if (Loaded)
|
||||
{
|
||||
FAudio.stb_vorbis_close(VorbisHandle);
|
||||
NativeMemory.Free((void*) FileDataPtr);
|
||||
|
||||
VorbisHandle = IntPtr.Zero;
|
||||
FileDataPtr = IntPtr.Zero;
|
||||
}
|
||||
}
|
||||
|
||||
protected unsafe override void FillBuffer(
|
||||
void* buffer,
|
||||
int bufferLengthInBytes,
|
||||
out int filledLengthInBytes,
|
||||
out bool reachedEnd
|
||||
) {
|
||||
var lengthInFloats = bufferLengthInBytes / sizeof(float);
|
||||
|
||||
/* NOTE: this function returns samples per channel, not total samples */
|
||||
var samples = FAudio.stb_vorbis_get_samples_float_interleaved(
|
||||
VorbisHandle,
|
||||
Info.channels,
|
||||
(IntPtr) buffer,
|
||||
lengthInFloats
|
||||
);
|
||||
|
||||
var sampleCount = samples * Info.channels;
|
||||
reachedEnd = sampleCount < lengthInFloats;
|
||||
filledLengthInBytes = sampleCount * sizeof(float);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,137 +0,0 @@
|
|||
using System;
|
||||
using System.IO;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
public class StreamingSoundQoa : StreamingSoundSeekable
|
||||
{
|
||||
private IntPtr QoaHandle = IntPtr.Zero;
|
||||
private IntPtr FileDataPtr = IntPtr.Zero;
|
||||
|
||||
uint Channels;
|
||||
uint SamplesPerChannelPerFrame;
|
||||
uint TotalSamplesPerChannel;
|
||||
|
||||
public override bool Loaded => QoaHandle != IntPtr.Zero;
|
||||
private string FilePath;
|
||||
|
||||
private const uint QOA_MAGIC = 0x716f6166; /* 'qoaf' */
|
||||
|
||||
private static unsafe UInt64 ReverseEndianness(UInt64 value)
|
||||
{
|
||||
byte* bytes = (byte*) &value;
|
||||
|
||||
return
|
||||
((UInt64)(bytes[0]) << 56) | ((UInt64)(bytes[1]) << 48) |
|
||||
((UInt64)(bytes[2]) << 40) | ((UInt64)(bytes[3]) << 32) |
|
||||
((UInt64)(bytes[4]) << 24) | ((UInt64)(bytes[5]) << 16) |
|
||||
((UInt64)(bytes[6]) << 8) | ((UInt64)(bytes[7]) << 0);
|
||||
}
|
||||
|
||||
public unsafe static StreamingSoundQoa Create(AudioDevice device, string filePath)
|
||||
{
|
||||
using var stream = new FileStream(filePath, FileMode.Open, FileAccess.Read);
|
||||
using var reader = new BinaryReader(stream);
|
||||
|
||||
UInt64 fileHeader = ReverseEndianness(reader.ReadUInt64());
|
||||
if ((fileHeader >> 32) != QOA_MAGIC)
|
||||
{
|
||||
throw new AudioLoadException("Specified file is not a QOA file.");
|
||||
}
|
||||
|
||||
uint totalSamplesPerChannel = (uint) (fileHeader & (0xFFFFFFFF));
|
||||
if (totalSamplesPerChannel == 0)
|
||||
{
|
||||
throw new AudioLoadException("Specified file is not a valid QOA file.");
|
||||
}
|
||||
|
||||
UInt64 frameHeader = ReverseEndianness(reader.ReadUInt64());
|
||||
uint channels = (uint) ((frameHeader >> 56) & 0x0000FF);
|
||||
uint samplerate = (uint) ((frameHeader >> 32) & 0xFFFFFF);
|
||||
uint samplesPerChannelPerFrame = (uint) ((frameHeader >> 16) & 0x00FFFF);
|
||||
|
||||
return new StreamingSoundQoa(
|
||||
device,
|
||||
filePath,
|
||||
channels,
|
||||
samplerate,
|
||||
samplesPerChannelPerFrame,
|
||||
totalSamplesPerChannel
|
||||
);
|
||||
}
|
||||
|
||||
internal unsafe StreamingSoundQoa(
|
||||
AudioDevice device,
|
||||
string filePath,
|
||||
uint channels,
|
||||
uint samplesPerSecond,
|
||||
uint samplesPerChannelPerFrame,
|
||||
uint totalSamplesPerChannel
|
||||
) : base(
|
||||
device,
|
||||
1,
|
||||
16,
|
||||
(ushort) (2 * channels),
|
||||
(ushort) channels,
|
||||
samplesPerSecond,
|
||||
samplesPerChannelPerFrame * channels * sizeof(short),
|
||||
true
|
||||
) {
|
||||
Channels = channels;
|
||||
SamplesPerChannelPerFrame = samplesPerChannelPerFrame;
|
||||
TotalSamplesPerChannel = totalSamplesPerChannel;
|
||||
FilePath = filePath;
|
||||
}
|
||||
|
||||
public override void Seek(uint sampleFrame)
|
||||
{
|
||||
FAudio.qoa_seek_frame(QoaHandle, (int) sampleFrame);
|
||||
}
|
||||
|
||||
public override unsafe void Load()
|
||||
{
|
||||
var fileStream = new FileStream(FilePath, FileMode.Open, FileAccess.Read);
|
||||
FileDataPtr = (nint) NativeMemory.Alloc((nuint) fileStream.Length);
|
||||
var fileDataSpan = new Span<byte>((void*) FileDataPtr, (int) fileStream.Length);
|
||||
fileStream.ReadExactly(fileDataSpan);
|
||||
fileStream.Close();
|
||||
|
||||
QoaHandle = FAudio.qoa_open_from_memory((char*) FileDataPtr, (uint) fileDataSpan.Length, 0);
|
||||
if (QoaHandle == IntPtr.Zero)
|
||||
{
|
||||
NativeMemory.Free((void*) FileDataPtr);
|
||||
Logger.LogError("Error opening QOA file!");
|
||||
throw new AudioLoadException("Error opening QOA file!");
|
||||
}
|
||||
}
|
||||
|
||||
public override unsafe void Unload()
|
||||
{
|
||||
if (Loaded)
|
||||
{
|
||||
FAudio.qoa_close(QoaHandle);
|
||||
NativeMemory.Free((void*) FileDataPtr);
|
||||
|
||||
QoaHandle = IntPtr.Zero;
|
||||
FileDataPtr = IntPtr.Zero;
|
||||
}
|
||||
}
|
||||
|
||||
protected override unsafe void FillBuffer(
|
||||
void* buffer,
|
||||
int bufferLengthInBytes,
|
||||
out int filledLengthInBytes,
|
||||
out bool reachedEnd
|
||||
) {
|
||||
var lengthInShorts = bufferLengthInBytes / sizeof(short);
|
||||
|
||||
// NOTE: this function returns samples per channel!
|
||||
var samples = FAudio.qoa_decode_next_frame(QoaHandle, (short*) buffer);
|
||||
|
||||
var sampleCount = samples * Channels;
|
||||
reachedEnd = sampleCount < lengthInShorts;
|
||||
filledLengthInBytes = (int) (sampleCount * sizeof(short));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
namespace MoonWorks.Audio
|
||||
{
|
||||
public abstract class StreamingSoundSeekable : StreamingSound
|
||||
{
|
||||
public bool Loop { get; set; }
|
||||
|
||||
protected StreamingSoundSeekable(
|
||||
AudioDevice device,
|
||||
ushort formatTag,
|
||||
ushort bitsPerSample,
|
||||
ushort blockAlign,
|
||||
ushort channels,
|
||||
uint samplesPerSecond,
|
||||
uint bufferSize,
|
||||
bool autoUpdate
|
||||
) : base(
|
||||
device,
|
||||
formatTag,
|
||||
bitsPerSample,
|
||||
blockAlign,
|
||||
channels,
|
||||
samplesPerSecond,
|
||||
bufferSize,
|
||||
autoUpdate
|
||||
) {
|
||||
|
||||
}
|
||||
|
||||
public abstract void Seek(uint sampleFrame);
|
||||
|
||||
protected override void OnReachedEnd()
|
||||
{
|
||||
if (Loop)
|
||||
{
|
||||
ConsumingBuffers = true;
|
||||
Seek(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,150 @@
|
|||
using System;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
/// <summary>
|
||||
/// Use in conjunction with an AudioDataStreamable object to play back streaming audio data.
|
||||
/// </summary>
|
||||
public class StreamingVoice : SourceVoice, IPoolable<StreamingVoice>
|
||||
{
|
||||
private const int BUFFER_COUNT = 3;
|
||||
private readonly IntPtr[] buffers;
|
||||
private int nextBufferIndex = 0;
|
||||
private uint BufferSize;
|
||||
|
||||
public bool Loop { get; set; }
|
||||
|
||||
public AudioDataStreamable AudioData { get; protected set; }
|
||||
|
||||
public unsafe StreamingVoice(AudioDevice device, Format format) : base(device, format)
|
||||
{
|
||||
buffers = new IntPtr[BUFFER_COUNT];
|
||||
}
|
||||
|
||||
public static StreamingVoice Create(AudioDevice device, Format format)
|
||||
{
|
||||
return new StreamingVoice(device, format);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Loads and prepares an AudioDataStreamable for streaming playback.
|
||||
/// This automatically calls Load on the given AudioDataStreamable.
|
||||
/// </summary>
|
||||
public void Load(AudioDataStreamable data)
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
if (AudioData != null)
|
||||
{
|
||||
AudioData.Unload();
|
||||
}
|
||||
|
||||
data.Load();
|
||||
AudioData = data;
|
||||
|
||||
InitializeBuffers();
|
||||
QueueBuffers();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Unloads AudioDataStreamable from this voice.
|
||||
/// This automatically calls Unload on the given AudioDataStreamable.
|
||||
/// </summary>
|
||||
public void Unload()
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
if (AudioData != null)
|
||||
{
|
||||
Stop();
|
||||
AudioData.Unload();
|
||||
AudioData = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public override void Reset()
|
||||
{
|
||||
Unload();
|
||||
base.Reset();
|
||||
}
|
||||
|
||||
public override void Update()
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
if (AudioData == null || State != SoundState.Playing)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
QueueBuffers();
|
||||
}
|
||||
}
|
||||
|
||||
private void QueueBuffers()
|
||||
{
|
||||
int buffersNeeded = BUFFER_COUNT - (int) BuffersQueued; // don't get got by uint underflow!
|
||||
for (int i = 0; i < buffersNeeded; i += 1)
|
||||
{
|
||||
AddBuffer();
|
||||
}
|
||||
}
|
||||
|
||||
private unsafe void AddBuffer()
|
||||
{
|
||||
var buffer = buffers[nextBufferIndex];
|
||||
nextBufferIndex = (nextBufferIndex + 1) % BUFFER_COUNT;
|
||||
|
||||
AudioData.Decode(
|
||||
(void*) buffer,
|
||||
(int) BufferSize,
|
||||
out int filledLengthInBytes,
|
||||
out bool reachedEnd
|
||||
);
|
||||
|
||||
if (filledLengthInBytes > 0)
|
||||
{
|
||||
var buf = new FAudio.FAudioBuffer
|
||||
{
|
||||
AudioBytes = (uint) filledLengthInBytes,
|
||||
pAudioData = buffer,
|
||||
PlayLength = (
|
||||
(uint) (filledLengthInBytes /
|
||||
Format.Channels /
|
||||
(uint) (Format.BitsPerSample / 8))
|
||||
)
|
||||
};
|
||||
|
||||
Submit(buf);
|
||||
}
|
||||
|
||||
if (reachedEnd)
|
||||
{
|
||||
/* We have reached the end of the data, what do we do? */
|
||||
if (Loop)
|
||||
{
|
||||
AudioData.Seek(0);
|
||||
AddBuffer();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private unsafe void InitializeBuffers()
|
||||
{
|
||||
BufferSize = AudioData.DecodeBufferSize;
|
||||
|
||||
for (int i = 0; i < BUFFER_COUNT; i += 1)
|
||||
{
|
||||
if (buffers[i] != IntPtr.Zero)
|
||||
{
|
||||
NativeMemory.Free((void*) buffers[i]);
|
||||
}
|
||||
|
||||
buffers[i] = (IntPtr) NativeMemory.Alloc(BufferSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
using System;
|
||||
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
/// <summary>
|
||||
/// SourceVoices can send audio to a SubmixVoice for convenient effects processing.
|
||||
/// Submixes process in order of processingStage, from lowest to highest.
|
||||
/// Therefore submixes early in a chain should have a low processingStage, and later in the chain they should have a higher one.
|
||||
/// </summary>
|
||||
public class SubmixVoice : Voice
|
||||
{
|
||||
public SubmixVoice(
|
||||
AudioDevice device,
|
||||
uint sourceChannelCount,
|
||||
uint sampleRate,
|
||||
uint processingStage
|
||||
) : base(device, sourceChannelCount, device.DeviceDetails.OutputFormat.Format.nChannels)
|
||||
{
|
||||
FAudio.FAudio_CreateSubmixVoice(
|
||||
device.Handle,
|
||||
out handle,
|
||||
sourceChannelCount,
|
||||
sampleRate,
|
||||
FAudio.FAUDIO_VOICE_USEFILTER,
|
||||
processingStage,
|
||||
IntPtr.Zero, // default sends to mastering voice
|
||||
IntPtr.Zero
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
namespace MoonWorks.Audio
|
||||
{
|
||||
/// <summary>
|
||||
/// TransientVoice is intended for playing one-off sound effects that don't have a long term reference.
|
||||
/// It will be automatically returned to the source voice pool once it is done playing back.
|
||||
/// </summary>
|
||||
public class TransientVoice : SourceVoice, IPoolable<TransientVoice>
|
||||
{
|
||||
static TransientVoice IPoolable<TransientVoice>.Create(AudioDevice device, Format format)
|
||||
{
|
||||
return new TransientVoice(device, format);
|
||||
}
|
||||
|
||||
public TransientVoice(AudioDevice device, Format format) : base(device, format)
|
||||
{
|
||||
}
|
||||
|
||||
public override void Update()
|
||||
{
|
||||
lock (StateLock)
|
||||
{
|
||||
if (PlaybackInitiated && BuffersQueued == 0)
|
||||
{
|
||||
Return();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -4,58 +4,60 @@ using EasingFunction = System.Func<float, float>;
|
|||
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
public abstract class SoundInstance : AudioResource
|
||||
public abstract unsafe class Voice : AudioResource
|
||||
{
|
||||
internal IntPtr Voice;
|
||||
protected IntPtr handle;
|
||||
public IntPtr Handle => handle;
|
||||
|
||||
private FAudio.FAudioWaveFormatEx format;
|
||||
public FAudio.FAudioWaveFormatEx Format => format;
|
||||
|
||||
protected FAudio.F3DAUDIO_DSP_SETTINGS dspSettings;
|
||||
public uint SourceChannelCount { get; }
|
||||
public uint DestinationChannelCount { get; }
|
||||
|
||||
protected SubmixVoice OutputVoice;
|
||||
private ReverbEffect ReverbEffect;
|
||||
private FAudio.FAudioVoiceSends ReverbSends;
|
||||
|
||||
protected byte* pMatrixCoefficients;
|
||||
|
||||
public bool Is3D { get; protected set; }
|
||||
|
||||
public virtual SoundState State { get; protected set; }
|
||||
|
||||
private float pan = 0;
|
||||
public float Pan
|
||||
private float dopplerFactor;
|
||||
/// <summary>
|
||||
/// The strength of the doppler effect on this voice.
|
||||
/// </summary>
|
||||
public float DopplerFactor
|
||||
{
|
||||
get => pan;
|
||||
get => dopplerFactor;
|
||||
set
|
||||
{
|
||||
if (dopplerFactor != value)
|
||||
{
|
||||
dopplerFactor = value;
|
||||
UpdatePitch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private float volume = 1;
|
||||
/// <summary>
|
||||
/// The overall volume level for the voice.
|
||||
/// </summary>
|
||||
public float Volume
|
||||
{
|
||||
get => volume;
|
||||
internal set
|
||||
{
|
||||
value = Math.MathHelper.Clamp(value, -1f, 1f);
|
||||
if (pan != value)
|
||||
value = Math.MathHelper.Max(0, value);
|
||||
if (volume != value)
|
||||
{
|
||||
pan = value;
|
||||
|
||||
if (pan < -1f)
|
||||
{
|
||||
pan = -1f;
|
||||
}
|
||||
if (pan > 1f)
|
||||
{
|
||||
pan = 1f;
|
||||
}
|
||||
|
||||
if (Is3D) { return; }
|
||||
|
||||
SetPanMatrixCoefficients();
|
||||
FAudio.FAudioVoice_SetOutputMatrix(
|
||||
Voice,
|
||||
Device.MasteringVoice,
|
||||
dspSettings.SrcChannelCount,
|
||||
dspSettings.DstChannelCount,
|
||||
dspSettings.pMatrixCoefficients,
|
||||
0
|
||||
);
|
||||
volume = value;
|
||||
FAudio.FAudioVoice_SetVolume(Handle, volume, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private float pitch = 0;
|
||||
/// <summary>
|
||||
/// The pitch of the voice.
|
||||
/// </summary>
|
||||
public float Pitch
|
||||
{
|
||||
get => pitch;
|
||||
|
@ -70,21 +72,6 @@ namespace MoonWorks.Audio
|
|||
}
|
||||
}
|
||||
|
||||
private float volume = 1;
|
||||
public float Volume
|
||||
{
|
||||
get => volume;
|
||||
internal set
|
||||
{
|
||||
value = Math.MathHelper.Max(0, value);
|
||||
if (volume != value)
|
||||
{
|
||||
volume = value;
|
||||
FAudio.FAudioVoice_SetVolume(Voice, volume, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private const float MAX_FILTER_FREQUENCY = 1f;
|
||||
private const float MAX_FILTER_ONEOVERQ = 1.5f;
|
||||
|
||||
|
@ -95,6 +82,9 @@ namespace MoonWorks.Audio
|
|||
OneOverQ = 1f
|
||||
};
|
||||
|
||||
/// <summary>
|
||||
/// The frequency cutoff on the voice filter.
|
||||
/// </summary>
|
||||
public float FilterFrequency
|
||||
{
|
||||
get => filterParameters.Frequency;
|
||||
|
@ -106,7 +96,7 @@ namespace MoonWorks.Audio
|
|||
filterParameters.Frequency = value;
|
||||
|
||||
FAudio.FAudioVoice_SetFilterParameters(
|
||||
Voice,
|
||||
Handle,
|
||||
ref filterParameters,
|
||||
0
|
||||
);
|
||||
|
@ -114,6 +104,10 @@ namespace MoonWorks.Audio
|
|||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Reciprocal of Q factor.
|
||||
/// Controls how quickly frequencies beyond the filter frequency are dampened.
|
||||
/// </summary>
|
||||
public float FilterOneOverQ
|
||||
{
|
||||
get => filterParameters.OneOverQ;
|
||||
|
@ -125,7 +119,7 @@ namespace MoonWorks.Audio
|
|||
filterParameters.OneOverQ = value;
|
||||
|
||||
FAudio.FAudioVoice_SetFilterParameters(
|
||||
Voice,
|
||||
Handle,
|
||||
ref filterParameters,
|
||||
0
|
||||
);
|
||||
|
@ -134,6 +128,9 @@ namespace MoonWorks.Audio
|
|||
}
|
||||
|
||||
private FilterType filterType;
|
||||
/// <summary>
|
||||
/// The frequency filter that is applied to the voice.
|
||||
/// </summary>
|
||||
public FilterType FilterType
|
||||
{
|
||||
get => filterType;
|
||||
|
@ -170,7 +167,7 @@ namespace MoonWorks.Audio
|
|||
}
|
||||
|
||||
FAudio.FAudioVoice_SetFilterParameters(
|
||||
Voice,
|
||||
Handle,
|
||||
ref filterParameters,
|
||||
0
|
||||
);
|
||||
|
@ -178,7 +175,49 @@ namespace MoonWorks.Audio
|
|||
}
|
||||
}
|
||||
|
||||
protected float pan = 0;
|
||||
/// <summary>
|
||||
/// Left-right panning. -1 is hard left pan, 1 is hard right pan.
|
||||
/// </summary>
|
||||
public float Pan
|
||||
{
|
||||
get => pan;
|
||||
internal set
|
||||
{
|
||||
value = Math.MathHelper.Clamp(value, -1f, 1f);
|
||||
if (pan != value)
|
||||
{
|
||||
pan = value;
|
||||
|
||||
if (pan < -1f)
|
||||
{
|
||||
pan = -1f;
|
||||
}
|
||||
if (pan > 1f)
|
||||
{
|
||||
pan = 1f;
|
||||
}
|
||||
|
||||
if (Is3D) { return; }
|
||||
|
||||
SetPanMatrixCoefficients();
|
||||
FAudio.FAudioVoice_SetOutputMatrix(
|
||||
Handle,
|
||||
OutputVoice.Handle,
|
||||
SourceChannelCount,
|
||||
DestinationChannelCount,
|
||||
(nint) pMatrixCoefficients,
|
||||
0
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private float reverb;
|
||||
/// <summary>
|
||||
/// The wet-dry mix of the reverb effect.
|
||||
/// Has no effect if SetReverbEffectChain has not been called.
|
||||
/// </summary>
|
||||
public unsafe float Reverb
|
||||
{
|
||||
get => reverb;
|
||||
|
@ -191,19 +230,19 @@ namespace MoonWorks.Audio
|
|||
{
|
||||
reverb = value;
|
||||
|
||||
float* outputMatrix = (float*) dspSettings.pMatrixCoefficients;
|
||||
float* outputMatrix = (float*) pMatrixCoefficients;
|
||||
outputMatrix[0] = reverb;
|
||||
if (dspSettings.SrcChannelCount == 2)
|
||||
if (SourceChannelCount == 2)
|
||||
{
|
||||
outputMatrix[1] = reverb;
|
||||
}
|
||||
|
||||
FAudio.FAudioVoice_SetOutputMatrix(
|
||||
Voice,
|
||||
ReverbEffect.Voice,
|
||||
dspSettings.SrcChannelCount,
|
||||
Handle,
|
||||
ReverbEffect.Handle,
|
||||
SourceChannelCount,
|
||||
1,
|
||||
dspSettings.pMatrixCoefficients,
|
||||
(nint) pMatrixCoefficients,
|
||||
0
|
||||
);
|
||||
}
|
||||
|
@ -218,225 +257,236 @@ namespace MoonWorks.Audio
|
|||
}
|
||||
}
|
||||
|
||||
public unsafe SoundInstance(
|
||||
AudioDevice device,
|
||||
ushort formatTag,
|
||||
ushort bitsPerSample,
|
||||
ushort blockAlign,
|
||||
ushort channels,
|
||||
uint samplesPerSecond
|
||||
) : base(device)
|
||||
public Voice(AudioDevice device, uint sourceChannelCount, uint destinationChannelCount) : base(device)
|
||||
{
|
||||
format = new FAudio.FAudioWaveFormatEx
|
||||
{
|
||||
wFormatTag = formatTag,
|
||||
wBitsPerSample = bitsPerSample,
|
||||
nChannels = channels,
|
||||
nBlockAlign = blockAlign,
|
||||
nSamplesPerSec = samplesPerSecond,
|
||||
nAvgBytesPerSec = blockAlign * samplesPerSecond
|
||||
};
|
||||
|
||||
FAudio.FAudio_CreateSourceVoice(
|
||||
Device.Handle,
|
||||
out Voice,
|
||||
ref format,
|
||||
FAudio.FAUDIO_VOICE_USEFILTER,
|
||||
FAudio.FAUDIO_DEFAULT_FREQ_RATIO,
|
||||
IntPtr.Zero,
|
||||
IntPtr.Zero,
|
||||
IntPtr.Zero
|
||||
);
|
||||
|
||||
if (Voice == IntPtr.Zero)
|
||||
{
|
||||
Logger.LogError("SoundInstance failed to initialize!");
|
||||
return;
|
||||
}
|
||||
|
||||
InitDSPSettings(Format.nChannels);
|
||||
|
||||
State = SoundState.Stopped;
|
||||
}
|
||||
|
||||
public void Apply3D(AudioListener listener, AudioEmitter emitter)
|
||||
{
|
||||
Is3D = true;
|
||||
|
||||
emitter.emitterData.CurveDistanceScaler = Device.CurveDistanceScalar;
|
||||
emitter.emitterData.ChannelCount = dspSettings.SrcChannelCount;
|
||||
|
||||
FAudio.F3DAudioCalculate(
|
||||
Device.Handle3D,
|
||||
ref listener.listenerData,
|
||||
ref emitter.emitterData,
|
||||
FAudio.F3DAUDIO_CALCULATE_MATRIX | FAudio.F3DAUDIO_CALCULATE_DOPPLER,
|
||||
ref dspSettings
|
||||
);
|
||||
|
||||
UpdatePitch();
|
||||
FAudio.FAudioVoice_SetOutputMatrix(
|
||||
Voice,
|
||||
Device.MasteringVoice,
|
||||
dspSettings.SrcChannelCount,
|
||||
dspSettings.DstChannelCount,
|
||||
dspSettings.pMatrixCoefficients,
|
||||
0
|
||||
);
|
||||
}
|
||||
|
||||
public unsafe void ApplyReverb(ReverbEffect reverbEffect)
|
||||
{
|
||||
ReverbSends = new FAudio.FAudioVoiceSends();
|
||||
ReverbSends.SendCount = 2;
|
||||
ReverbSends.pSends = (nint) NativeMemory.Alloc((nuint) (2 * Marshal.SizeOf<FAudio.FAudioSendDescriptor>()));
|
||||
|
||||
FAudio.FAudioSendDescriptor* sendDesc = (FAudio.FAudioSendDescriptor*) ReverbSends.pSends;
|
||||
sendDesc[0].Flags = 0;
|
||||
sendDesc[0].pOutputVoice = Device.MasteringVoice;
|
||||
sendDesc[1].Flags = 0;
|
||||
sendDesc[1].pOutputVoice = reverbEffect.Voice;
|
||||
|
||||
FAudio.FAudioVoice_SetOutputVoices(
|
||||
Voice,
|
||||
ref ReverbSends
|
||||
);
|
||||
|
||||
ReverbEffect = reverbEffect;
|
||||
}
|
||||
|
||||
public void SetPan(float targetValue)
|
||||
{
|
||||
Pan = targetValue;
|
||||
Device.ClearTweens(this, AudioTweenProperty.Pan);
|
||||
}
|
||||
|
||||
public void SetPan(float targetValue, float duration, EasingFunction easingFunction)
|
||||
{
|
||||
Device.CreateTween(this, AudioTweenProperty.Pan, easingFunction, Pan, targetValue, duration, 0);
|
||||
}
|
||||
|
||||
public void SetPan(float targetValue, float delayTime, float duration, EasingFunction easingFunction)
|
||||
{
|
||||
Device.CreateTween(this, AudioTweenProperty.Pan, easingFunction, Pan, targetValue, duration, delayTime);
|
||||
SourceChannelCount = sourceChannelCount;
|
||||
DestinationChannelCount = destinationChannelCount;
|
||||
OutputVoice = device.MasteringVoice;
|
||||
nuint memsize = 4 * sourceChannelCount * destinationChannelCount;
|
||||
pMatrixCoefficients = (byte*) NativeMemory.AllocZeroed(memsize);
|
||||
SetPanMatrixCoefficients();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the pitch of the voice. Valid input range is -1f to 1f.
|
||||
/// </summary>
|
||||
public void SetPitch(float targetValue)
|
||||
{
|
||||
Pitch = targetValue;
|
||||
Device.ClearTweens(this, AudioTweenProperty.Pitch);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the pitch of the voice over a time duration in seconds.
|
||||
/// </summary>
|
||||
/// <param name="easingFunction">An easing function. See MoonWorks.Math.Easing.Function.Float</param>
|
||||
public void SetPitch(float targetValue, float duration, EasingFunction easingFunction)
|
||||
{
|
||||
Device.CreateTween(this, AudioTweenProperty.Pitch, easingFunction, Pan, targetValue, duration, 0);
|
||||
Device.CreateTween(this, AudioTweenProperty.Pitch, easingFunction, Pitch, targetValue, duration, 0);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the pitch of the voice over a time duration in seconds after a delay in seconds.
|
||||
/// </summary>
|
||||
/// <param name="easingFunction">An easing function. See MoonWorks.Math.Easing.Function.Float</param>
|
||||
public void SetPitch(float targetValue, float delayTime, float duration, EasingFunction easingFunction)
|
||||
{
|
||||
Device.CreateTween(this, AudioTweenProperty.Pitch, easingFunction, Pan, targetValue, duration, delayTime);
|
||||
Device.CreateTween(this, AudioTweenProperty.Pitch, easingFunction, Pitch, targetValue, duration, delayTime);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the volume of the voice. Minimum value is 0f.
|
||||
/// </summary>
|
||||
public void SetVolume(float targetValue)
|
||||
{
|
||||
Volume = targetValue;
|
||||
Device.ClearTweens(this, AudioTweenProperty.Volume);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the volume of the voice over a time duration in seconds.
|
||||
/// </summary>
|
||||
/// <param name="easingFunction">An easing function. See MoonWorks.Math.Easing.Function.Float</param>
|
||||
public void SetVolume(float targetValue, float duration, EasingFunction easingFunction)
|
||||
{
|
||||
Device.CreateTween(this, AudioTweenProperty.Volume, easingFunction, Volume, targetValue, duration, 0);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the volume of the voice over a time duration in seconds after a delay in seconds.
|
||||
/// </summary>
|
||||
/// <param name="easingFunction">An easing function. See MoonWorks.Math.Easing.Function.Float</param>
|
||||
public void SetVolume(float targetValue, float delayTime, float duration, EasingFunction easingFunction)
|
||||
{
|
||||
Device.CreateTween(this, AudioTweenProperty.Volume, easingFunction, Volume, targetValue, duration, delayTime);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the frequency cutoff on the voice filter. Valid range is 0.01f to 1f.
|
||||
/// </summary>
|
||||
public void SetFilterFrequency(float targetValue)
|
||||
{
|
||||
FilterFrequency = targetValue;
|
||||
Device.ClearTweens(this, AudioTweenProperty.FilterFrequency);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the frequency cutoff on the voice filter over a time duration in seconds.
|
||||
/// </summary>
|
||||
/// <param name="easingFunction">An easing function. See MoonWorks.Math.Easing.Function.Float</param>
|
||||
public void SetFilterFrequency(float targetValue, float duration, EasingFunction easingFunction)
|
||||
{
|
||||
Device.CreateTween(this, AudioTweenProperty.FilterFrequency, easingFunction, FilterFrequency, targetValue, duration, 0);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the frequency cutoff on the voice filter over a time duration in seconds after a delay in seconds.
|
||||
/// </summary>
|
||||
/// <param name="easingFunction">An easing function. See MoonWorks.Math.Easing.Function.Float</param>
|
||||
public void SetFilterFrequency(float targetValue, float delayTime, float duration, EasingFunction easingFunction)
|
||||
{
|
||||
Device.CreateTween(this, AudioTweenProperty.FilterFrequency, easingFunction, FilterFrequency, targetValue, duration, delayTime);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets reciprocal of Q factor on the frequency filter.
|
||||
/// Controls how quickly frequencies beyond the filter frequency are dampened.
|
||||
/// </summary>
|
||||
public void SetFilterOneOverQ(float targetValue)
|
||||
{
|
||||
FilterOneOverQ = targetValue;
|
||||
}
|
||||
|
||||
public void SetReverb(float targetValue)
|
||||
/// <summary>
|
||||
/// Sets a left-right panning value. -1f is hard left pan, 1f is hard right pan.
|
||||
/// </summary>
|
||||
public virtual void SetPan(float targetValue)
|
||||
{
|
||||
Pan = targetValue;
|
||||
Device.ClearTweens(this, AudioTweenProperty.Pan);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets a left-right panning value over a time duration in seconds.
|
||||
/// </summary>
|
||||
/// <param name="easingFunction">An easing function. See MoonWorks.Math.Easing.Function.Float</param>
|
||||
public virtual void SetPan(float targetValue, float duration, EasingFunction easingFunction)
|
||||
{
|
||||
Device.CreateTween(this, AudioTweenProperty.Pan, easingFunction, Pan, targetValue, duration, 0);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets a left-right panning value over a time duration in seconds after a delay in seconds.
|
||||
/// </summary>
|
||||
/// <param name="easingFunction">An easing function. See MoonWorks.Math.Easing.Function.Float</param>
|
||||
public virtual void SetPan(float targetValue, float delayTime, float duration, EasingFunction easingFunction)
|
||||
{
|
||||
Device.CreateTween(this, AudioTweenProperty.Pan, easingFunction, Pan, targetValue, duration, delayTime);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the wet-dry mix value of the reverb effect. Minimum value is 0f.
|
||||
/// </summary>
|
||||
public virtual void SetReverb(float targetValue)
|
||||
{
|
||||
Reverb = targetValue;
|
||||
Device.ClearTweens(this, AudioTweenProperty.Reverb);
|
||||
}
|
||||
|
||||
public void SetReverb(float targetValue, float duration, EasingFunction easingFunction)
|
||||
/// <summary>
|
||||
/// Sets the wet-dry mix value of the reverb effect over a time duration in seconds.
|
||||
/// </summary>
|
||||
/// <param name="easingFunction">An easing function. See MoonWorks.Math.Easing.Function.Float</param>
|
||||
public virtual void SetReverb(float targetValue, float duration, EasingFunction easingFunction)
|
||||
{
|
||||
Device.CreateTween(this, AudioTweenProperty.Reverb, easingFunction, Volume, targetValue, duration, 0);
|
||||
}
|
||||
|
||||
public void SetReverb(float targetValue, float delayTime, float duration, EasingFunction easingFunction)
|
||||
/// <summary>
|
||||
/// Sets the wet-dry mix value of the reverb effect over a time duration in seconds after a delay in seconds.
|
||||
/// </summary>
|
||||
/// <param name="easingFunction">An easing function. See MoonWorks.Math.Easing.Function.Float</param>
|
||||
public virtual void SetReverb(float targetValue, float delayTime, float duration, EasingFunction easingFunction)
|
||||
{
|
||||
Device.CreateTween(this, AudioTweenProperty.Reverb, easingFunction, Volume, targetValue, duration, delayTime);
|
||||
}
|
||||
|
||||
public abstract void Play();
|
||||
public abstract void QueueSyncPlay();
|
||||
public abstract void Pause();
|
||||
public abstract void Stop();
|
||||
public abstract void StopImmediate();
|
||||
|
||||
private unsafe void InitDSPSettings(uint srcChannels)
|
||||
/// <summary>
|
||||
/// Sets the output voice for this voice.
|
||||
/// </summary>
|
||||
/// <param name="send">Where the output should be sent.</param>
|
||||
public unsafe void SetOutputVoice(SubmixVoice send)
|
||||
{
|
||||
dspSettings = new FAudio.F3DAUDIO_DSP_SETTINGS();
|
||||
dspSettings.DopplerFactor = 1f;
|
||||
dspSettings.SrcChannelCount = srcChannels;
|
||||
dspSettings.DstChannelCount = Device.DeviceDetails.OutputFormat.Format.nChannels;
|
||||
OutputVoice = send;
|
||||
|
||||
nuint memsize = (
|
||||
4 *
|
||||
dspSettings.SrcChannelCount *
|
||||
dspSettings.DstChannelCount
|
||||
);
|
||||
|
||||
dspSettings.pMatrixCoefficients = (nint) NativeMemory.Alloc(memsize);
|
||||
byte* memPtr = (byte*) dspSettings.pMatrixCoefficients;
|
||||
for (uint i = 0; i < memsize; i += 1)
|
||||
if (ReverbEffect != null)
|
||||
{
|
||||
memPtr[i] = 0;
|
||||
}
|
||||
|
||||
SetPanMatrixCoefficients();
|
||||
}
|
||||
|
||||
private void UpdatePitch()
|
||||
{
|
||||
float doppler;
|
||||
float dopplerScale = Device.DopplerScale;
|
||||
if (!Is3D || dopplerScale == 0.0f)
|
||||
{
|
||||
doppler = 1.0f;
|
||||
SetReverbEffectChain(ReverbEffect);
|
||||
}
|
||||
else
|
||||
{
|
||||
doppler = dspSettings.DopplerFactor * dopplerScale;
|
||||
}
|
||||
FAudio.FAudioSendDescriptor* sendDesc = stackalloc FAudio.FAudioSendDescriptor[1];
|
||||
sendDesc[0].Flags = 0;
|
||||
sendDesc[0].pOutputVoice = send.Handle;
|
||||
|
||||
FAudio.FAudioSourceVoice_SetFrequencyRatio(
|
||||
Voice,
|
||||
(float) System.Math.Pow(2.0, pitch) * doppler,
|
||||
0
|
||||
var sends = new FAudio.FAudioVoiceSends();
|
||||
sends.SendCount = 1;
|
||||
sends.pSends = (nint) sendDesc;
|
||||
|
||||
FAudio.FAudioVoice_SetOutputVoices(
|
||||
Handle,
|
||||
ref sends
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Applies a reverb effect chain to this voice.
|
||||
/// </summary>
|
||||
public unsafe void SetReverbEffectChain(ReverbEffect reverbEffect)
|
||||
{
|
||||
var sendDesc = stackalloc FAudio.FAudioSendDescriptor[2];
|
||||
sendDesc[0].Flags = 0;
|
||||
sendDesc[0].pOutputVoice = OutputVoice.Handle;
|
||||
sendDesc[1].Flags = 0;
|
||||
sendDesc[1].pOutputVoice = reverbEffect.Handle;
|
||||
|
||||
var sends = new FAudio.FAudioVoiceSends();
|
||||
sends.SendCount = 2;
|
||||
sends.pSends = (nint) sendDesc;
|
||||
|
||||
FAudio.FAudioVoice_SetOutputVoices(
|
||||
Handle,
|
||||
ref sends
|
||||
);
|
||||
|
||||
ReverbEffect = reverbEffect;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Removes the reverb effect chain from this voice.
|
||||
/// </summary>
|
||||
public void RemoveReverbEffectChain()
|
||||
{
|
||||
if (ReverbEffect != null)
|
||||
{
|
||||
ReverbEffect = null;
|
||||
reverb = 0;
|
||||
SetOutputVoice(OutputVoice);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Resets all voice parameters to defaults.
|
||||
/// </summary>
|
||||
public virtual void Reset()
|
||||
{
|
||||
RemoveReverbEffectChain();
|
||||
Volume = 1;
|
||||
Pan = 0;
|
||||
Pitch = 0;
|
||||
FilterType = FilterType.None;
|
||||
SetOutputVoice(Device.MasteringVoice);
|
||||
}
|
||||
|
||||
// Taken from https://github.com/FNA-XNA/FNA/blob/master/src/Audio/SoundEffectInstance.cs
|
||||
|
@ -449,10 +499,10 @@ namespace MoonWorks.Audio
|
|||
* entire channel; the two channels are blended on each side.
|
||||
* -flibit
|
||||
*/
|
||||
float* outputMatrix = (float*) dspSettings.pMatrixCoefficients;
|
||||
if (dspSettings.SrcChannelCount == 1)
|
||||
float* outputMatrix = (float*) pMatrixCoefficients;
|
||||
if (SourceChannelCount == 1)
|
||||
{
|
||||
if (dspSettings.DstChannelCount == 1)
|
||||
if (DestinationChannelCount == 1)
|
||||
{
|
||||
outputMatrix[0] = 1.0f;
|
||||
}
|
||||
|
@ -464,7 +514,7 @@ namespace MoonWorks.Audio
|
|||
}
|
||||
else
|
||||
{
|
||||
if (dspSettings.DstChannelCount == 1)
|
||||
if (DestinationChannelCount == 1)
|
||||
{
|
||||
outputMatrix[0] = 1.0f;
|
||||
outputMatrix[1] = 1.0f;
|
||||
|
@ -493,16 +543,30 @@ namespace MoonWorks.Audio
|
|||
}
|
||||
}
|
||||
|
||||
protected void UpdatePitch()
|
||||
{
|
||||
float doppler;
|
||||
float dopplerScale = Device.DopplerScale;
|
||||
if (!Is3D || dopplerScale == 0.0f)
|
||||
{
|
||||
doppler = 1.0f;
|
||||
}
|
||||
else
|
||||
{
|
||||
doppler = DopplerFactor * dopplerScale;
|
||||
}
|
||||
|
||||
FAudio.FAudioSourceVoice_SetFrequencyRatio(
|
||||
Handle,
|
||||
(float) System.Math.Pow(2.0, pitch) * doppler,
|
||||
0
|
||||
);
|
||||
}
|
||||
|
||||
protected unsafe override void Destroy()
|
||||
{
|
||||
StopImmediate();
|
||||
FAudio.FAudioVoice_DestroyVoice(Voice);
|
||||
NativeMemory.Free((void*) dspSettings.pMatrixCoefficients);
|
||||
|
||||
if (ReverbEffect != null)
|
||||
{
|
||||
NativeMemory.Free((void*) ReverbSends.pSends);
|
||||
}
|
||||
NativeMemory.Free(pMatrixCoefficients);
|
||||
FAudio.FAudioVoice_DestroyVoice(Handle);
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue