forked from MoonsideGames/MoonWorks
Theora video support + audio improvements (#20)
- `SoundInstance.Play` no longer takes a loop parameter - `SoundInstance.Stop` is split into `Stop` and `StopImmediate` instead of taking an immediate parameter - Added `StreamingSoundSeekable` to better support streaming audio that does not support seek - `StreamingSound` no longer has a Loop property, but `StreamingSoundSeekable` does - abstract `StreamingSound.AddBuffer` renamed to `FillBuffer` - `FillBuffer` is now provided with a native buffer to avoid an extra data copy - `StreamingSound` buffer implementation optimized to avoid repeated alloc/frees - added `Video` class which can load and play Theora (.ogv) streaming video/audio Reviewed-on: MoonsideGames/MoonWorks#20main
parent
5a5fbc0c77
commit
efb9893aef
@ -0,0 +1 @@
|
||||
Subproject commit dd8c7fa69e678b6182cdaa71458ad08dd31c65da
|
@ -0,0 +1,25 @@
|
||||
namespace MoonWorks.Audio
|
||||
{
|
||||
public abstract class StreamingSoundSeekable : StreamingSound
|
||||
{
|
||||
public bool Loop { get; set; }
|
||||
|
||||
protected StreamingSoundSeekable(AudioDevice device, ushort formatTag, ushort bitsPerSample, ushort blockAlign, ushort channels, uint samplesPerSecond) : base(device, formatTag, bitsPerSample, blockAlign, channels, samplesPerSecond)
|
||||
{
|
||||
}
|
||||
|
||||
public abstract void Seek(uint sampleFrame);
|
||||
|
||||
protected override void OnReachedEnd()
|
||||
{
|
||||
if (Loop)
|
||||
{
|
||||
Seek(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
Stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Binary file not shown.
Binary file not shown.
@ -0,0 +1,9 @@
|
||||
#version 450
|
||||
|
||||
layout(location = 0) out vec2 outTexCoord;
|
||||
|
||||
void main()
|
||||
{
|
||||
outTexCoord = vec2((gl_VertexIndex << 1) & 2, gl_VertexIndex & 2);
|
||||
gl_Position = vec4(outTexCoord * 2.0 - 1.0, 0.0, 1.0);
|
||||
}
|
@ -0,0 +1,38 @@
|
||||
/*
|
||||
* This effect is based on the YUV-to-RGBA GLSL shader found in SDL.
|
||||
* Thus, it also released under the zlib license:
|
||||
* http://libsdl.org/license.php
|
||||
*/
|
||||
#version 450
|
||||
|
||||
layout(location = 0) in vec2 TexCoord;
|
||||
|
||||
layout(location = 0) out vec4 FragColor;
|
||||
|
||||
layout(binding = 0, set = 1) uniform sampler2D YSampler;
|
||||
layout(binding = 1, set = 1) uniform sampler2D USampler;
|
||||
layout(binding = 2, set = 1) uniform sampler2D VSampler;
|
||||
|
||||
/* More info about colorspace conversion:
|
||||
* http://www.equasys.de/colorconversion.html
|
||||
* http://www.equasys.de/colorformat.html
|
||||
*/
|
||||
|
||||
const vec3 offset = vec3(-0.0625, -0.5, -0.5);
|
||||
const vec3 Rcoeff = vec3(1.164, 0.000, 1.793);
|
||||
const vec3 Gcoeff = vec3(1.164, -0.213, -0.533);
|
||||
const vec3 Bcoeff = vec3(1.164, 2.112, 0.000);
|
||||
|
||||
void main()
|
||||
{
|
||||
vec3 yuv;
|
||||
yuv.x = texture(YSampler, TexCoord).r;
|
||||
yuv.y = texture(USampler, TexCoord).r;
|
||||
yuv.z = texture(VSampler, TexCoord).r;
|
||||
yuv += offset;
|
||||
|
||||
FragColor.r = dot(yuv, Rcoeff);
|
||||
FragColor.g = dot(yuv, Gcoeff);
|
||||
FragColor.b = dot(yuv, Bcoeff);
|
||||
FragColor.a = 1.0;
|
||||
}
|
@ -0,0 +1,357 @@
|
||||
/* Heavily based on https://github.com/FNA-XNA/FNA/blob/master/src/Media/Xiph/VideoPlayer.cs */
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
using System.Runtime.InteropServices;
|
||||
using MoonWorks.Audio;
|
||||
using MoonWorks.Graphics;
|
||||
|
||||
namespace MoonWorks.Video
|
||||
{
|
||||
public enum VideoState
|
||||
{
|
||||
Playing,
|
||||
Paused,
|
||||
Stopped
|
||||
}
|
||||
|
||||
public unsafe class Video : IDisposable
|
||||
{
|
||||
internal IntPtr Handle;
|
||||
|
||||
public bool Loop { get; private set; }
|
||||
public float Volume {
|
||||
get => volume;
|
||||
set
|
||||
{
|
||||
volume = value;
|
||||
if (audioStream != null)
|
||||
{
|
||||
audioStream.Volume = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
public float PlaybackSpeed { get; set; }
|
||||
public double FramesPerSecond => fps;
|
||||
private VideoState State = VideoState.Stopped;
|
||||
|
||||
private double fps;
|
||||
private int yWidth;
|
||||
private int yHeight;
|
||||
private int uvWidth;
|
||||
private int uvHeight;
|
||||
|
||||
private void* yuvData = null;
|
||||
private int yuvDataLength;
|
||||
private int currentFrame;
|
||||
|
||||
private GraphicsDevice GraphicsDevice;
|
||||
private Texture RenderTexture = null;
|
||||
private Texture yTexture = null;
|
||||
private Texture uTexture = null;
|
||||
private Texture vTexture = null;
|
||||
private Sampler LinearSampler;
|
||||
|
||||
private AudioDevice AudioDevice = null;
|
||||
private StreamingSoundTheora audioStream = null;
|
||||
private float volume = 1.0f;
|
||||
|
||||
private Stopwatch timer;
|
||||
private double lastTimestamp;
|
||||
private double timeElapsed;
|
||||
|
||||
private bool disposed;
|
||||
|
||||
/* TODO: is there some way for us to load the data into memory? */
|
||||
public Video(GraphicsDevice graphicsDevice, AudioDevice audioDevice, string filename)
|
||||
{
|
||||
GraphicsDevice = graphicsDevice;
|
||||
AudioDevice = audioDevice;
|
||||
|
||||
if (!System.IO.File.Exists(filename))
|
||||
{
|
||||
throw new ArgumentException("Video file not found!");
|
||||
}
|
||||
|
||||
if (Theorafile.tf_fopen(filename, out Handle) < 0)
|
||||
{
|
||||
throw new ArgumentException("Invalid video file!");
|
||||
}
|
||||
|
||||
Theorafile.th_pixel_fmt format;
|
||||
Theorafile.tf_videoinfo(
|
||||
Handle,
|
||||
out yWidth,
|
||||
out yHeight,
|
||||
out fps,
|
||||
out format
|
||||
);
|
||||
|
||||
if (format == Theorafile.th_pixel_fmt.TH_PF_420)
|
||||
{
|
||||
uvWidth = yWidth / 2;
|
||||
uvHeight = yHeight / 2;
|
||||
}
|
||||
else if (format == Theorafile.th_pixel_fmt.TH_PF_422)
|
||||
{
|
||||
uvWidth = yWidth / 2;
|
||||
uvHeight = yHeight;
|
||||
}
|
||||
else if (format == Theorafile.th_pixel_fmt.TH_PF_444)
|
||||
{
|
||||
uvWidth = yWidth;
|
||||
uvHeight = yHeight;
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new NotSupportedException("Unrecognized YUV format!");
|
||||
}
|
||||
|
||||
yuvDataLength = (
|
||||
(yWidth * yHeight) +
|
||||
(uvWidth * uvHeight * 2)
|
||||
);
|
||||
|
||||
yuvData = NativeMemory.Alloc((nuint) yuvDataLength);
|
||||
|
||||
InitializeTheoraStream();
|
||||
|
||||
if (Theorafile.tf_hasvideo(Handle) == 1)
|
||||
{
|
||||
RenderTexture = Texture.CreateTexture2D(
|
||||
GraphicsDevice,
|
||||
(uint) yWidth,
|
||||
(uint) yHeight,
|
||||
TextureFormat.R8G8B8A8,
|
||||
TextureUsageFlags.ColorTarget | TextureUsageFlags.Sampler
|
||||
);
|
||||
|
||||
yTexture = Texture.CreateTexture2D(
|
||||
GraphicsDevice,
|
||||
(uint) yWidth,
|
||||
(uint) yHeight,
|
||||
TextureFormat.R8,
|
||||
TextureUsageFlags.Sampler
|
||||
);
|
||||
|
||||
uTexture = Texture.CreateTexture2D(
|
||||
GraphicsDevice,
|
||||
(uint) uvWidth,
|
||||
(uint) uvHeight,
|
||||
TextureFormat.R8,
|
||||
TextureUsageFlags.Sampler
|
||||
);
|
||||
|
||||
vTexture = Texture.CreateTexture2D(
|
||||
GraphicsDevice,
|
||||
(uint) uvWidth,
|
||||
(uint) uvHeight,
|
||||
TextureFormat.R8,
|
||||
TextureUsageFlags.Sampler
|
||||
);
|
||||
|
||||
LinearSampler = new Sampler(GraphicsDevice, SamplerCreateInfo.LinearClamp);
|
||||
}
|
||||
|
||||
timer = new Stopwatch();
|
||||
}
|
||||
|
||||
public void Play(bool loop = false)
|
||||
{
|
||||
if (State == VideoState.Playing)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
Loop = loop;
|
||||
timer.Start();
|
||||
|
||||
if (audioStream != null)
|
||||
{
|
||||
audioStream.Play();
|
||||
}
|
||||
|
||||
State = VideoState.Playing;
|
||||
}
|
||||
|
||||
public void Pause()
|
||||
{
|
||||
if (State != VideoState.Playing)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
timer.Stop();
|
||||
|
||||
if (audioStream != null)
|
||||
{
|
||||
audioStream.Pause();
|
||||
}
|
||||
|
||||
State = VideoState.Paused;
|
||||
}
|
||||
|
||||
public void Stop()
|
||||
{
|
||||
if (State == VideoState.Stopped)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
timer.Stop();
|
||||
timer.Reset();
|
||||
|
||||
Theorafile.tf_reset(Handle);
|
||||
lastTimestamp = 0;
|
||||
timeElapsed = 0;
|
||||
|
||||
if (audioStream != null)
|
||||
{
|
||||
audioStream.StopImmediate();
|
||||
audioStream.Dispose();
|
||||
audioStream = null;
|
||||
}
|
||||
|
||||
State = VideoState.Stopped;
|
||||
}
|
||||
|
||||
public Texture GetTexture()
|
||||
{
|
||||
if (RenderTexture == null)
|
||||
{
|
||||
throw new InvalidOperationException();
|
||||
}
|
||||
|
||||
if (State == VideoState.Stopped)
|
||||
{
|
||||
return RenderTexture;
|
||||
}
|
||||
|
||||
timeElapsed += (timer.Elapsed.TotalMilliseconds - lastTimestamp) * PlaybackSpeed;
|
||||
lastTimestamp = timer.Elapsed.TotalMilliseconds;
|
||||
|
||||
int thisFrame = ((int) (timeElapsed / (1000.0 / FramesPerSecond)));
|
||||
if (thisFrame > currentFrame)
|
||||
{
|
||||
if (Theorafile.tf_readvideo(
|
||||
Handle,
|
||||
(IntPtr) yuvData,
|
||||
thisFrame - currentFrame
|
||||
) == 1 || currentFrame == -1) {
|
||||
UpdateTexture();
|
||||
}
|
||||
|
||||
currentFrame = thisFrame;
|
||||
}
|
||||
|
||||
bool ended = Theorafile.tf_eos(Handle) == 1;
|
||||
if (ended)
|
||||
{
|
||||
timer.Stop();
|
||||
timer.Reset();
|
||||
|
||||
if (audioStream != null)
|
||||
{
|
||||
audioStream.Stop();
|
||||
audioStream.Dispose();
|
||||
audioStream = null;
|
||||
}
|
||||
|
||||
Theorafile.tf_reset(Handle);
|
||||
|
||||
if (Loop)
|
||||
{
|
||||
// Start over!
|
||||
InitializeTheoraStream();
|
||||
|
||||
timer.Start();
|
||||
}
|
||||
else
|
||||
{
|
||||
State = VideoState.Stopped;
|
||||
}
|
||||
}
|
||||
|
||||
return RenderTexture;
|
||||
}
|
||||
|
||||
private void UpdateTexture()
|
||||
{
|
||||
var commandBuffer = GraphicsDevice.AcquireCommandBuffer();
|
||||
|
||||
commandBuffer.SetTextureDataYUV(
|
||||
yTexture,
|
||||
uTexture,
|
||||
vTexture,
|
||||
(IntPtr) yuvData,
|
||||
(uint) yuvDataLength
|
||||
);
|
||||
|
||||
commandBuffer.BeginRenderPass(
|
||||
new ColorAttachmentInfo(RenderTexture, Color.Black)
|
||||
);
|
||||
|
||||
commandBuffer.BindGraphicsPipeline(GraphicsDevice.VideoPipeline);
|
||||
commandBuffer.BindFragmentSamplers(
|
||||
new TextureSamplerBinding(yTexture, LinearSampler),
|
||||
new TextureSamplerBinding(uTexture, LinearSampler),
|
||||
new TextureSamplerBinding(vTexture, LinearSampler)
|
||||
);
|
||||
|
||||
commandBuffer.DrawPrimitives(0, 1, 0, 0);
|
||||
|
||||
commandBuffer.EndRenderPass();
|
||||
|
||||
GraphicsDevice.Submit(commandBuffer);
|
||||
}
|
||||
|
||||
private void InitializeTheoraStream()
|
||||
{
|
||||
// Grab the first video frame ASAP.
|
||||
while (Theorafile.tf_readvideo(Handle, (IntPtr) yuvData, 1) == 0);
|
||||
|
||||
// Grab the first bit of audio. We're trying to start the decoding ASAP.
|
||||
if (AudioDevice != null && Theorafile.tf_hasaudio(Handle) == 1)
|
||||
{
|
||||
int channels, sampleRate;
|
||||
Theorafile.tf_audioinfo(Handle, out channels, out sampleRate);
|
||||
audioStream = new StreamingSoundTheora(AudioDevice, Handle, channels, (uint) sampleRate);
|
||||
}
|
||||
|
||||
currentFrame = -1;
|
||||
}
|
||||
|
||||
protected virtual void Dispose(bool disposing)
|
||||
{
|
||||
if (!disposed)
|
||||
{
|
||||
if (disposing)
|
||||
{
|
||||
// dispose managed state (managed objects)
|
||||
RenderTexture.Dispose();
|
||||
yTexture.Dispose();
|
||||
uTexture.Dispose();
|
||||
vTexture.Dispose();
|
||||
}
|
||||
|
||||
// free unmanaged resources (unmanaged objects)
|
||||
Theorafile.tf_close(ref Handle);
|
||||
NativeMemory.Free(yuvData);
|
||||
|
||||
disposed = true;
|
||||
}
|
||||
}
|
||||
|
||||
~Video()
|
||||
{
|
||||
// Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method
|
||||
Dispose(disposing: false);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
// Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method
|
||||
Dispose(disposing: true);
|
||||
GC.SuppressFinalize(this);
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue