From 1c2b24f9844990a71f513f6e8c24e3cf5006af9b Mon Sep 17 00:00:00 2001 From: hwsmm Date: Mon, 30 Oct 2023 20:43:22 +0900 Subject: [PATCH 001/127] More abstract audio components --- osu.Framework/Audio/AudioManager.cs | 277 ++++-------------- osu.Framework/Audio/BassAudioManager.cs | 240 +++++++++++++++ osu.Framework/Audio/Sample/Sample.cs | 6 + osu.Framework/Audio/Sample/SampleBass.cs | 4 +- .../Audio/Sample/SampleBassFactory.cs | 46 +-- osu.Framework/Audio/Sample/SampleFactory.cs | 71 +++++ osu.Framework/Audio/Sample/SampleStore.cs | 15 +- osu.Framework/Audio/Track/TrackStore.cs | 14 +- 8 files changed, 400 insertions(+), 273 deletions(-) create mode 100644 osu.Framework/Audio/BassAudioManager.cs create mode 100644 osu.Framework/Audio/Sample/SampleFactory.cs diff --git a/osu.Framework/Audio/AudioManager.cs b/osu.Framework/Audio/AudioManager.cs index 7ddcb822e6..72fc7a09d2 100644 --- a/osu.Framework/Audio/AudioManager.cs +++ b/osu.Framework/Audio/AudioManager.cs @@ -6,55 +6,33 @@ using System; using System.Collections.Generic; using System.Collections.Immutable; -using System.Diagnostics; -using System.Linq; +using System.IO; using System.Threading; -using ManagedBass; -using ManagedBass.Fx; -using ManagedBass.Mix; using osu.Framework.Audio.Mixing; -using osu.Framework.Audio.Mixing.Bass; using osu.Framework.Audio.Sample; using osu.Framework.Audio.Track; using osu.Framework.Bindables; -using osu.Framework.Development; -using osu.Framework.Extensions.TypeExtensions; using osu.Framework.IO.Stores; -using osu.Framework.Logging; using osu.Framework.Threading; namespace osu.Framework.Audio { - public class AudioManager : AudioCollectionManager + public abstract class AudioManager : AudioCollectionManager { /// - /// The number of BASS audio devices preceding the first real audio device. - /// Consisting of and . + /// The thread audio operations (mainly Bass calls) are ran on. /// - protected const int BASS_INTERNAL_DEVICE_COUNT = 2; - - /// - /// The index of the BASS audio device denoting the OS default. - /// - /// - /// See http://www.un4seen.com/doc/#bass/BASS_CONFIG_DEV_DEFAULT.html for more information on the included device. - /// - private const int bass_default_device = 1; + private readonly AudioThread audioThread; /// /// The manager component responsible for audio tracks (e.g. songs). /// - public ITrackStore Tracks => globalTrackStore.Value; + public ITrackStore Tracks => AudioGlobalTrackStore.Value; /// /// The manager component responsible for audio samples (e.g. sound effects). /// - public ISampleStore Samples => globalSampleStore.Value; - - /// - /// The thread audio operations (mainly Bass calls) are ran on. - /// - private readonly AudioThread thread; + public ISampleStore Samples => AudioGlobalSampleStore.Value; /// /// The global mixer which all tracks are routed into by default. @@ -72,7 +50,7 @@ public class AudioManager : AudioCollectionManager /// /// This property does not contain the names of disabled audio devices. /// - public IEnumerable AudioDeviceNames => audioDeviceNames; + public IEnumerable AudioDeviceNames => DeviceNames; /// /// Is fired whenever a new audio device is discovered and provides its name. @@ -108,31 +86,25 @@ public class AudioManager : AudioCollectionManager MaxValue = 1 }; - public override bool IsLoaded => base.IsLoaded && - // bass default device is a null device (-1), not the actual system default. - Bass.CurrentDevice != Bass.DefaultDevice; - // Mutated by multiple threads, must be thread safe. - private ImmutableList audioDevices = ImmutableList.Empty; - private ImmutableList audioDeviceNames = ImmutableList.Empty; + protected ImmutableList DeviceNames = ImmutableList.Empty; - private Scheduler scheduler => thread.Scheduler; + private Scheduler scheduler => audioThread.Scheduler; private Scheduler eventScheduler => EventScheduler ?? scheduler; private readonly CancellationTokenSource cancelSource = new CancellationTokenSource(); - private readonly DeviceInfoUpdateComparer updateComparer = new DeviceInfoUpdateComparer(); /// /// The scheduler used for invoking publicly exposed delegate events. /// public Scheduler EventScheduler; - internal IBindableList ActiveMixers => activeMixers; - private readonly BindableList activeMixers = new BindableList(); + internal IBindableList ActiveMixers => AudioActiveMixers; + protected readonly BindableList AudioActiveMixers = new BindableList(); - private readonly Lazy globalTrackStore; - private readonly Lazy globalSampleStore; + private protected readonly Lazy AudioGlobalTrackStore; + private protected readonly Lazy AudioGlobalSampleStore; /// /// Constructs an AudioStore given a track resource store, and a sample resource store. @@ -140,28 +112,28 @@ public class AudioManager : AudioCollectionManager /// The host's audio thread. /// The resource store containing all audio tracks to be used in the future. /// The sample store containing all audio samples to be used in the future. - public AudioManager(AudioThread audioThread, ResourceStore trackStore, ResourceStore sampleStore) + protected AudioManager(AudioThread audioThread, ResourceStore trackStore, ResourceStore sampleStore) { - thread = audioThread; + this.audioThread = audioThread; - thread.RegisterManager(this); + this.audioThread.RegisterManager(this); AudioDevice.ValueChanged += onDeviceChanged; - AddItem(TrackMixer = createAudioMixer(null, nameof(TrackMixer))); - AddItem(SampleMixer = createAudioMixer(null, nameof(SampleMixer))); + AddItem(TrackMixer = AudioCreateAudioMixer(null, nameof(TrackMixer))); + AddItem(SampleMixer = AudioCreateAudioMixer(null, nameof(SampleMixer))); - globalTrackStore = new Lazy(() => + AudioGlobalTrackStore = new Lazy(() => { - var store = new TrackStore(trackStore, TrackMixer); + var store = new TrackStore(trackStore, TrackMixer, GetNewTrack); AddItem(store); store.AddAdjustment(AdjustableProperty.Volume, VolumeTrack); return store; }); - globalSampleStore = new Lazy(() => + AudioGlobalSampleStore = new Lazy(() => { - var store = new SampleStore(sampleStore, SampleMixer); + var store = new SampleStore(sampleStore, SampleMixer, GetSampleFactory); AddItem(store); store.AddAdjustment(AdjustableProperty.Volume, VolumeSample); return store; @@ -192,11 +164,15 @@ public AudioManager(AudioThread audioThread, ResourceStore trackStore, R }); } + internal abstract Track.Track GetNewTrack(Stream data, string name); + + internal abstract SampleFactory GetSampleFactory(Stream data, string name, AudioMixer mixer, int playbackConcurrency); + protected override void Dispose(bool disposing) { cancelSource.Cancel(); - thread.UnregisterManager(this); + audioThread.UnregisterManager(this); OnNewDevice = null; OnLostDevice = null; @@ -206,7 +182,7 @@ protected override void Dispose(bool disposing) private void onDeviceChanged(ValueChangedEvent args) { - scheduler.Add(() => setAudioDevice(args.NewValue)); + scheduler.Add(() => SetAudioDevice(args.NewValue)); } private void onDevicesChanged() @@ -217,7 +193,7 @@ private void onDevicesChanged() return; if (!IsCurrentDeviceValid()) - setAudioDevice(); + SetAudioDevice(); }); } @@ -231,27 +207,22 @@ private void onDevicesChanged() /// /// An identifier displayed on the audio mixer visualiser. public AudioMixer CreateAudioMixer(string identifier = default) => - createAudioMixer(SampleMixer, !string.IsNullOrEmpty(identifier) ? identifier : $"user #{Interlocked.Increment(ref userMixerID)}"); + AudioCreateAudioMixer(SampleMixer, !string.IsNullOrEmpty(identifier) ? identifier : $"user #{Interlocked.Increment(ref userMixerID)}"); - private AudioMixer createAudioMixer(AudioMixer globalMixer, string identifier) - { - var mixer = new BassAudioMixer(globalMixer, identifier); - AddItem(mixer); - return mixer; - } + protected abstract AudioMixer AudioCreateAudioMixer(AudioMixer globalMixer, string identifier); protected override void ItemAdded(AudioComponent item) { base.ItemAdded(item); if (item is AudioMixer mixer) - activeMixers.Add(mixer); + AudioActiveMixers.Add(mixer); } protected override void ItemRemoved(AudioComponent item) { base.ItemRemoved(item); if (item is AudioMixer mixer) - activeMixers.Remove(mixer); + AudioActiveMixers.Remove(mixer); } /// @@ -262,10 +233,10 @@ protected override void ItemRemoved(AudioComponent item) /// The to use for tracks created by this store. Defaults to the global . public ITrackStore GetTrackStore(IResourceStore store = null, AudioMixer mixer = null) { - if (store == null) return globalTrackStore.Value; + if (store == null) return AudioGlobalTrackStore.Value; - TrackStore tm = new TrackStore(store, mixer ?? TrackMixer); - globalTrackStore.Value.AddItem(tm); + TrackStore tm = new TrackStore(store, mixer ?? TrackMixer, GetNewTrack); + AudioGlobalTrackStore.Value.AddItem(tm); return tm; } @@ -282,176 +253,40 @@ public ITrackStore GetTrackStore(IResourceStore store = null, AudioMixer /// The to use for samples created by this store. Defaults to the global . public ISampleStore GetSampleStore(IResourceStore store = null, AudioMixer mixer = null) { - if (store == null) return globalSampleStore.Value; + if (store == null) return AudioGlobalSampleStore.Value; - SampleStore sm = new SampleStore(store, mixer ?? SampleMixer); - globalSampleStore.Value.AddItem(sm); + SampleStore sm = new SampleStore(store, mixer ?? SampleMixer, GetSampleFactory); + AudioGlobalSampleStore.Value.AddItem(sm); return sm; } - /// - /// Sets the output audio device by its name. - /// This will automatically fall back to the system default device on failure. - /// - /// Name of the audio device, or null to use the configured device preference . - private bool setAudioDevice(string deviceName = null) - { - deviceName ??= AudioDevice.Value; - - // try using the specified device - int deviceIndex = audioDeviceNames.FindIndex(d => d == deviceName); - if (deviceIndex >= 0 && setAudioDevice(BASS_INTERNAL_DEVICE_COUNT + deviceIndex)) - return true; - - // try using the system default if there is any device present. - if (audioDeviceNames.Count > 0 && setAudioDevice(bass_default_device)) - return true; - - // no audio devices can be used, so try using Bass-provided "No sound" device as last resort. - if (setAudioDevice(Bass.NoSoundDevice)) - return true; - - //we're fucked. even "No sound" device won't initialise. - return false; - } - - private bool setAudioDevice(int deviceIndex) - { - var device = audioDevices.ElementAtOrDefault(deviceIndex); - - // device is invalid - if (!device.IsEnabled) - return false; + protected abstract bool SetAudioDevice(string deviceName = null); + protected abstract bool SetAudioDevice(int deviceIndex); - // we don't want bass initializing with real audio device on headless test runs. - if (deviceIndex != Bass.NoSoundDevice && DebugUtils.IsNUnitRunning) - return false; - - // initialize new device - bool initSuccess = InitBass(deviceIndex); - if (Bass.LastError != Errors.Already && BassUtils.CheckFaulted(false)) - return false; - - if (!initSuccess) - { - Logger.Log("BASS failed to initialize but did not provide an error code", level: LogLevel.Error); - return false; - } - - Logger.Log($@"🔈 BASS initialised - BASS version: {Bass.Version} - BASS FX version: {BassFx.Version} - BASS MIX version: {BassMix.Version} - Device: {device.Name} - Driver: {device.Driver} - Update period: {Bass.UpdatePeriod} ms - Device buffer length: {Bass.DeviceBufferLength} ms - Playback buffer length: {Bass.PlaybackBufferLength} ms"); - - //we have successfully initialised a new device. - UpdateDevice(deviceIndex); - - return true; - } - - /// - /// This method calls . - /// It can be overridden for unit testing. - /// - protected virtual bool InitBass(int device) - { - if (Bass.CurrentDevice == device) - return true; - - // this likely doesn't help us but also doesn't seem to cause any issues or any cpu increase. - Bass.UpdatePeriod = 5; - - // reduce latency to a known sane minimum. - Bass.DeviceBufferLength = 10; - Bass.PlaybackBufferLength = 100; - - // ensure there are no brief delays on audio operations (causing stream stalls etc.) after periods of silence. - Bass.DeviceNonStop = true; - - // without this, if bass falls back to directsound legacy mode the audio playback offset will be way off. - Bass.Configure(ManagedBass.Configuration.TruePlayPosition, 0); - - // For iOS devices, set the default audio policy to one that obeys the mute switch. - Bass.Configure(ManagedBass.Configuration.IOSMixAudio, 5); - - // Always provide a default device. This should be a no-op, but we have asserts for this behaviour. - Bass.Configure(ManagedBass.Configuration.IncludeDefaultDevice, true); - - // Enable custom BASS_CONFIG_MP3_OLDGAPS flag for backwards compatibility. - Bass.Configure((ManagedBass.Configuration)68, 1); - - // Disable BASS_CONFIG_DEV_TIMEOUT flag to keep BASS audio output from pausing on device processing timeout. - // See https://www.un4seen.com/forum/?topic=19601 for more information. - Bass.Configure((ManagedBass.Configuration)70, false); - - return AudioThread.InitDevice(device); - } + protected abstract bool IsDevicesUpdated(out ImmutableList newDevices, out ImmutableList lostDevices); private void syncAudioDevices() { - // audioDevices are updated if: - // - A new device is added - // - An existing device is Enabled/Disabled or set as Default - var updatedAudioDevices = EnumerateAllDevices().ToImmutableList(); - if (audioDevices.SequenceEqual(updatedAudioDevices, updateComparer)) - return; - - audioDevices = updatedAudioDevices; - - // Bass should always be providing "No sound" and "Default" device. - Trace.Assert(audioDevices.Count >= BASS_INTERNAL_DEVICE_COUNT, "Bass did not provide any audio devices."); - - var oldDeviceNames = audioDeviceNames; - var newDeviceNames = audioDeviceNames = audioDevices.Skip(BASS_INTERNAL_DEVICE_COUNT).Where(d => d.IsEnabled).Select(d => d.Name).ToImmutableList(); - - onDevicesChanged(); - - var newDevices = newDeviceNames.Except(oldDeviceNames).ToList(); - var lostDevices = oldDeviceNames.Except(newDeviceNames).ToList(); - - if (newDevices.Count > 0 || lostDevices.Count > 0) + if (IsDevicesUpdated(out ImmutableList newDevices, out ImmutableList lostDevices)) { - eventScheduler.Add(delegate + onDevicesChanged(); + + if (newDevices.Count > 0 || lostDevices.Count > 0) { - foreach (string d in newDevices) - OnNewDevice?.Invoke(d); - foreach (string d in lostDevices) - OnLostDevice?.Invoke(d); - }); + eventScheduler.Add(delegate + { + foreach (string d in newDevices) + OnNewDevice?.Invoke(d); + foreach (string d in lostDevices) + OnLostDevice?.Invoke(d); + }); + } } } - protected virtual IEnumerable EnumerateAllDevices() - { - int deviceCount = Bass.DeviceCount; - for (int i = 0; i < deviceCount; i++) - yield return Bass.GetDeviceInfo(i); - } - // The current device is considered valid if it is enabled, initialized, and not a fallback device. - protected virtual bool IsCurrentDeviceValid() - { - var device = audioDevices.ElementAtOrDefault(Bass.CurrentDevice); - bool isFallback = string.IsNullOrEmpty(AudioDevice.Value) ? !device.IsDefault : device.Name != AudioDevice.Value; - return device.IsEnabled && device.IsInitialized && !isFallback; - } - - public override string ToString() - { - string deviceName = audioDevices.ElementAtOrDefault(Bass.CurrentDevice).Name; - return $@"{GetType().ReadableName()} ({deviceName ?? "Unknown"})"; - } - - private class DeviceInfoUpdateComparer : IEqualityComparer - { - public bool Equals(DeviceInfo x, DeviceInfo y) => x.IsEnabled == y.IsEnabled && x.IsDefault == y.IsDefault; + protected abstract bool IsCurrentDeviceValid(); - public int GetHashCode(DeviceInfo obj) => obj.Name.GetHashCode(); - } + public abstract override string ToString(); } } diff --git a/osu.Framework/Audio/BassAudioManager.cs b/osu.Framework/Audio/BassAudioManager.cs new file mode 100644 index 0000000000..6afd611b12 --- /dev/null +++ b/osu.Framework/Audio/BassAudioManager.cs @@ -0,0 +1,240 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +#nullable disable + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Diagnostics; +using System.IO; +using System.Linq; +using ManagedBass; +using ManagedBass.Fx; +using ManagedBass.Mix; +using osu.Framework.Audio.Mixing; +using osu.Framework.Audio.Mixing.Bass; +using osu.Framework.Audio.Sample; +using osu.Framework.Audio.Track; +using osu.Framework.Development; +using osu.Framework.Extensions; +using osu.Framework.Extensions.TypeExtensions; +using osu.Framework.IO.Stores; +using osu.Framework.Logging; +using osu.Framework.Threading; + +namespace osu.Framework.Audio +{ + public class BassAudioManager : AudioManager + { + /// + /// The number of BASS audio devices preceding the first real audio device. + /// Consisting of and . + /// + protected const int BASS_INTERNAL_DEVICE_COUNT = 2; + + /// + /// The index of the BASS audio device denoting the OS default. + /// + /// + /// See http://www.un4seen.com/doc/#bass/BASS_CONFIG_DEV_DEFAULT.html for more information on the included device. + /// + private const int bass_default_device = 1; + + public override bool IsLoaded => base.IsLoaded && + // bass default device is a null device (-1), not the actual system default. + Bass.CurrentDevice != Bass.DefaultDevice; + + // Mutated by multiple threads, must be thread safe. + private ImmutableList audioDevices = ImmutableList.Empty; + + private readonly DeviceInfoUpdateComparer updateComparer = new DeviceInfoUpdateComparer(); + + /// + /// Constructs an AudioStore given a track resource store, and a sample resource store. + /// + /// The host's audio thread. + /// The resource store containing all audio tracks to be used in the future. + /// The sample store containing all audio samples to be used in the future. + public BassAudioManager(AudioThread audioThread, ResourceStore trackStore, ResourceStore sampleStore) + : base(audioThread, trackStore, sampleStore) + { + } + + internal override Track.Track GetNewTrack(Stream data, string name) => new TrackBass(data, name); + + internal override SampleFactory GetSampleFactory(Stream stream, string name, AudioMixer mixer, int playbackConcurrency) + { + byte[] data; + + using (stream) + data = stream.ReadAllBytesToArray(); + + return new SampleBassFactory(data, name, (BassAudioMixer)mixer, playbackConcurrency); + } + + protected override AudioMixer AudioCreateAudioMixer(AudioMixer globalMixer, string identifier) + { + var mixer = new BassAudioMixer(globalMixer, identifier); + AddItem(mixer); + return mixer; + } + + /// + /// Sets the output audio device by its name. + /// This will automatically fall back to the system default device on failure. + /// + /// Name of the audio device, or null to use the configured device preference. + protected override bool SetAudioDevice(string deviceName = null) + { + deviceName ??= AudioDevice.Value; + + // try using the specified device + int deviceIndex = DeviceNames.FindIndex(d => d == deviceName); + if (deviceIndex >= 0 && SetAudioDevice(BASS_INTERNAL_DEVICE_COUNT + deviceIndex)) + return true; + + // try using the system default if there is any device present. + if (DeviceNames.Count > 0 && SetAudioDevice(bass_default_device)) + return true; + + // no audio devices can be used, so try using Bass-provided "No sound" device as last resort. + if (SetAudioDevice(Bass.NoSoundDevice)) + return true; + + //we're fucked. even "No sound" device won't initialise. + return false; + } + + protected override bool SetAudioDevice(int deviceIndex) + { + var device = audioDevices.ElementAtOrDefault(deviceIndex); + + // device is invalid + if (!device.IsEnabled) + return false; + + // we don't want bass initializing with real audio device on headless test runs. + if (deviceIndex != Bass.NoSoundDevice && DebugUtils.IsNUnitRunning) + return false; + + // initialize new device + bool initSuccess = InitBass(deviceIndex); + if (Bass.LastError != Errors.Already && BassUtils.CheckFaulted(false)) + return false; + + if (!initSuccess) + { + Logger.Log("BASS failed to initialize but did not provide an error code", level: LogLevel.Error); + return false; + } + + Logger.Log($@"🔈 BASS initialised + BASS version: {Bass.Version} + BASS FX version: {BassFx.Version} + BASS MIX version: {BassMix.Version} + Device: {device.Name} + Driver: {device.Driver} + Update period: {Bass.UpdatePeriod} ms + Device buffer length: {Bass.DeviceBufferLength} ms + Playback buffer length: {Bass.PlaybackBufferLength} ms"); + + //we have successfully initialised a new device. + UpdateDevice(deviceIndex); + + return true; + } + + /// + /// This method calls . + /// It can be overridden for unit testing. + /// + protected virtual bool InitBass(int device) + { + if (Bass.CurrentDevice == device) + return true; + + // this likely doesn't help us but also doesn't seem to cause any issues or any cpu increase. + Bass.UpdatePeriod = 5; + + // reduce latency to a known sane minimum. + Bass.DeviceBufferLength = 10; + Bass.PlaybackBufferLength = 100; + + // ensure there are no brief delays on audio operations (causing stream stalls etc.) after periods of silence. + Bass.DeviceNonStop = true; + + // without this, if bass falls back to directsound legacy mode the audio playback offset will be way off. + Bass.Configure(ManagedBass.Configuration.TruePlayPosition, 0); + + // For iOS devices, set the default audio policy to one that obeys the mute switch. + Bass.Configure(ManagedBass.Configuration.IOSMixAudio, 5); + + // Always provide a default device. This should be a no-op, but we have asserts for this behaviour. + Bass.Configure(ManagedBass.Configuration.IncludeDefaultDevice, true); + + // Enable custom BASS_CONFIG_MP3_OLDGAPS flag for backwards compatibility. + Bass.Configure((ManagedBass.Configuration)68, 1); + + // Disable BASS_CONFIG_DEV_TIMEOUT flag to keep BASS audio output from pausing on device processing timeout. + // See https://www.un4seen.com/forum/?topic=19601 for more information. + Bass.Configure((ManagedBass.Configuration)70, false); + + return AudioThread.InitDevice(device); + } + + protected override bool IsDevicesUpdated(out ImmutableList newDevices, out ImmutableList lostDevices) + { + // audioDevices are updated if: + // - A new device is added + // - An existing device is Enabled/Disabled or set as Default + var updatedAudioDevices = EnumerateAllDevices().ToImmutableList(); + + if (audioDevices.SequenceEqual(updatedAudioDevices, updateComparer)) + { + newDevices = lostDevices = ImmutableList.Empty; + return false; + } + + audioDevices = updatedAudioDevices; + + // Bass should always be providing "No sound" and "Default" device. + Trace.Assert(audioDevices.Count >= BASS_INTERNAL_DEVICE_COUNT, "Bass did not provide any audio devices."); + + var oldDeviceNames = DeviceNames; + var newDeviceNames = DeviceNames = audioDevices.Skip(BASS_INTERNAL_DEVICE_COUNT).Where(d => d.IsEnabled).Select(d => d.Name).ToImmutableList(); + + newDevices = newDeviceNames.Except(oldDeviceNames).ToImmutableList(); + lostDevices = oldDeviceNames.Except(newDeviceNames).ToImmutableList(); + return true; + } + + protected virtual IEnumerable EnumerateAllDevices() + { + int deviceCount = Bass.DeviceCount; + for (int i = 0; i < deviceCount; i++) + yield return Bass.GetDeviceInfo(i); + } + + // The current device is considered valid if it is enabled, initialized, and not a fallback device. + protected override bool IsCurrentDeviceValid() + { + var device = audioDevices.ElementAtOrDefault(Bass.CurrentDevice); + bool isFallback = string.IsNullOrEmpty(AudioDevice.Value) ? !device.IsDefault : device.Name != AudioDevice.Value; + return device.IsEnabled && device.IsInitialized && !isFallback; + } + + public override string ToString() + { + string deviceName = audioDevices.ElementAtOrDefault(Bass.CurrentDevice).Name; + return $@"{GetType().ReadableName()} ({deviceName ?? "Unknown"})"; + } + + private class DeviceInfoUpdateComparer : IEqualityComparer + { + public bool Equals(DeviceInfo x, DeviceInfo y) => x.IsEnabled == y.IsEnabled && x.IsDefault == y.IsDefault; + + public int GetHashCode(DeviceInfo obj) => obj.Name.GetHashCode(); + } + } +} diff --git a/osu.Framework/Audio/Sample/Sample.cs b/osu.Framework/Audio/Sample/Sample.cs index c0926bd6b4..02feb8fdbd 100644 --- a/osu.Framework/Audio/Sample/Sample.cs +++ b/osu.Framework/Audio/Sample/Sample.cs @@ -14,6 +14,12 @@ public abstract class Sample : AudioCollectionManager, ISample public string Name { get; } + internal Sample(SampleFactory factory, string name) + { + Name = name; + PlaybackConcurrency.BindTo(factory.PlaybackConcurrency); + } + protected Sample(string name) { Name = name; diff --git a/osu.Framework/Audio/Sample/SampleBass.cs b/osu.Framework/Audio/Sample/SampleBass.cs index 5052a2cd5e..995b72b6fc 100644 --- a/osu.Framework/Audio/Sample/SampleBass.cs +++ b/osu.Framework/Audio/Sample/SampleBass.cs @@ -15,12 +15,10 @@ internal sealed class SampleBass : Sample private readonly BassAudioMixer mixer; internal SampleBass(SampleBassFactory factory, BassAudioMixer mixer) - : base(factory.Name) + : base(factory, factory.Name) { this.factory = factory; this.mixer = mixer; - - PlaybackConcurrency.BindTo(factory.PlaybackConcurrency); } protected override SampleChannel CreateChannel() diff --git a/osu.Framework/Audio/Sample/SampleBassFactory.cs b/osu.Framework/Audio/Sample/SampleBassFactory.cs index 3b863a68b3..dd2e90e075 100644 --- a/osu.Framework/Audio/Sample/SampleBassFactory.cs +++ b/osu.Framework/Audio/Sample/SampleBassFactory.cs @@ -14,42 +14,26 @@ namespace osu.Framework.Audio.Sample /// /// A factory for objects sharing a common sample ID (and thus playback concurrency). /// - internal class SampleBassFactory : AudioCollectionManager + internal class SampleBassFactory : SampleFactory { - /// - /// A name identifying the sample to be created by this factory. - /// - public string Name { get; } - public int SampleId { get; private set; } public override bool IsLoaded => SampleId != 0; - public double Length { get; private set; } - - /// - /// Todo: Expose this to support per-sample playback concurrency once ManagedBass has been updated (https://github.com/ManagedBass/ManagedBass/pull/85). - /// - internal readonly Bindable PlaybackConcurrency = new Bindable(Sample.DEFAULT_CONCURRENCY); - private readonly BassAudioMixer mixer; private NativeMemoryTracker.NativeMemoryLease? memoryLease; + private byte[]? data; - public SampleBassFactory(byte[] data, string name, BassAudioMixer mixer) + public SampleBassFactory(byte[] data, string name, BassAudioMixer mixer, int playbackConcurrency) + : base(name, playbackConcurrency) { this.data = data; this.mixer = mixer; - - Name = name; - - EnqueueAction(loadSample); - - PlaybackConcurrency.BindValueChanged(updatePlaybackConcurrency); } - private void updatePlaybackConcurrency(ValueChangedEvent concurrency) + private protected override void UpdatePlaybackConcurrency(ValueChangedEvent concurrency) { EnqueueAction(() => { @@ -63,14 +47,7 @@ private void updatePlaybackConcurrency(ValueChangedEvent concurrency) }); } - internal override void UpdateDevice(int deviceIndex) - { - // The sample may not have already loaded if a device wasn't present in a previous load attempt. - if (!IsLoaded) - loadSample(); - } - - private void loadSample() + private protected override void LoadSample() { Debug.Assert(CanPerformInline); Debug.Assert(!IsLoaded); @@ -87,7 +64,6 @@ private void loadSample() if (Bass.LastError == Errors.Init) return; - // We've done as best as we could to init the sample. It may still have failed by some other cause (such as malformed data), but allow the GC to now clean up the locally-stored data. data = null; if (!IsLoaded) @@ -97,12 +73,9 @@ private void loadSample() memoryLease = NativeMemoryTracker.AddMemory(this, dataLength); } - public Sample CreateSample() => new SampleBass(this, mixer) { OnPlay = onPlay }; + public override Sample CreateSample() => new SampleBass(this, mixer) { OnPlay = SampleFactoryOnPlay }; - private void onPlay(Sample sample) - { - AddItem(sample); - } + private protected override void FreeSample() => Bass.SampleFree(SampleId); ~SampleBassFactory() { @@ -115,10 +88,7 @@ protected override void Dispose(bool disposing) return; if (IsLoaded) - { - Bass.SampleFree(SampleId); memoryLease?.Dispose(); - } base.Dispose(disposing); } diff --git a/osu.Framework/Audio/Sample/SampleFactory.cs b/osu.Framework/Audio/Sample/SampleFactory.cs new file mode 100644 index 0000000000..72b006ecf6 --- /dev/null +++ b/osu.Framework/Audio/Sample/SampleFactory.cs @@ -0,0 +1,71 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using osu.Framework.Bindables; + +namespace osu.Framework.Audio.Sample +{ + /// + /// A factory for objects sharing a common sample ID (and thus playback concurrency). + /// + internal abstract class SampleFactory : AudioCollectionManager + { + /// + /// A name identifying the sample to be created by this factory. + /// + public string Name { get; } + + public double Length { get; private protected set; } + + /// + /// Todo: Expose this to support per-sample playback concurrency once ManagedBass has been updated (https://github.com/ManagedBass/ManagedBass/pull/85). + /// + internal readonly Bindable PlaybackConcurrency = new Bindable(Sample.DEFAULT_CONCURRENCY); + + protected SampleFactory(string name, int playbackConcurrency) + { + Name = name; + PlaybackConcurrency.Value = playbackConcurrency; + + EnqueueAction(LoadSample); + + PlaybackConcurrency.BindValueChanged(UpdatePlaybackConcurrency); + } + + private protected abstract void UpdatePlaybackConcurrency(ValueChangedEvent concurrency); + + internal override void UpdateDevice(int deviceIndex) + { + // The sample may not have already loaded if a device wasn't present in a previous load attempt. + if (!IsLoaded) + LoadSample(); + } + + private protected abstract void LoadSample(); + + public abstract Sample CreateSample(); + + private protected abstract void FreeSample(); + + protected void SampleFactoryOnPlay(Sample sample) + { + AddItem(sample); + } + + ~SampleFactory() + { + Dispose(false); + } + + protected override void Dispose(bool disposing) + { + if (IsDisposed) + return; + + if (IsLoaded) + FreeSample(); + + base.Dispose(disposing); + } + } +} diff --git a/osu.Framework/Audio/Sample/SampleStore.cs b/osu.Framework/Audio/Sample/SampleStore.cs index 5064b48d57..7e65123fb2 100644 --- a/osu.Framework/Audio/Sample/SampleStore.cs +++ b/osu.Framework/Audio/Sample/SampleStore.cs @@ -10,7 +10,6 @@ using System.Threading.Tasks; using JetBrains.Annotations; using osu.Framework.Audio.Mixing; -using osu.Framework.Audio.Mixing.Bass; using osu.Framework.IO.Stores; using osu.Framework.Statistics; @@ -20,15 +19,19 @@ internal class SampleStore : AudioCollectionManager, I { private readonly ResourceStore store; private readonly AudioMixer mixer; + private readonly GetSampleFactoryDelegate getSampleFactoryDelegate; - private readonly Dictionary factories = new Dictionary(); + private readonly Dictionary factories = new Dictionary(); + + public delegate SampleFactory GetSampleFactoryDelegate(Stream stream, string name, AudioMixer mixer, int playbackConcurrency); public int PlaybackConcurrency { get; set; } = Sample.DEFAULT_CONCURRENCY; - internal SampleStore([NotNull] IResourceStore store, [NotNull] AudioMixer mixer) + internal SampleStore([NotNull] IResourceStore store, [NotNull] AudioMixer mixer, [NotNull] GetSampleFactoryDelegate getSampleFactoryDelegate) { this.store = new ResourceStore(store); this.mixer = mixer; + this.getSampleFactoryDelegate = getSampleFactoryDelegate; AddExtension(@"wav"); AddExtension(@"mp3"); @@ -44,12 +47,12 @@ public Sample Get(string name) lock (factories) { - if (!factories.TryGetValue(name, out SampleBassFactory factory)) + if (!factories.TryGetValue(name, out SampleFactory factory)) { this.LogIfNonBackgroundThread(name); - byte[] data = store.Get(name); - factory = factories[name] = data == null ? null : new SampleBassFactory(data, name, (BassAudioMixer)mixer) { PlaybackConcurrency = { Value = PlaybackConcurrency } }; + Stream data = store.GetStream(name); + factory = factories[name] = data == null ? null : getSampleFactoryDelegate(data, name, mixer, PlaybackConcurrency); if (factory != null) AddItem(factory); diff --git a/osu.Framework/Audio/Track/TrackStore.cs b/osu.Framework/Audio/Track/TrackStore.cs index 83000a547d..22f2225d43 100644 --- a/osu.Framework/Audio/Track/TrackStore.cs +++ b/osu.Framework/Audio/Track/TrackStore.cs @@ -18,11 +18,15 @@ internal class TrackStore : AudioCollectionManager, IT { private readonly IResourceStore store; private readonly AudioMixer mixer; + private readonly GetNewTrackDelegate getNewTrackDelegate; - internal TrackStore([NotNull] IResourceStore store, [NotNull] AudioMixer mixer) + public delegate Track GetNewTrackDelegate(Stream dataStream, string name); + + internal TrackStore([NotNull] IResourceStore store, [NotNull] AudioMixer mixer, [NotNull] GetNewTrackDelegate getNewTrackDelegate) { this.store = store; this.mixer = mixer; + this.getNewTrackDelegate = getNewTrackDelegate; (store as ResourceStore)?.AddExtension(@"mp3"); } @@ -47,12 +51,12 @@ public Track Get(string name) if (dataStream == null) return null; - TrackBass trackBass = new TrackBass(dataStream, name); + Track track = getNewTrackDelegate(dataStream, name); - mixer.Add(trackBass); - AddItem(trackBass); + mixer.Add(track); + AddItem(track); - return trackBass; + return track; } public Task GetAsync(string name, CancellationToken cancellationToken = default) => From 525e22c11f36ecf0b80eabf5a630c822ced32eac Mon Sep 17 00:00:00 2001 From: hwsmm Date: Mon, 30 Oct 2023 20:43:53 +0900 Subject: [PATCH 002/127] Add SDL2 audio components --- osu.Framework/Audio/AudioDecoder.cs | 415 ++++++++++++++++++ .../Audio/Mixing/SDL2/ISDL2AudioChannel.cs | 33 ++ .../Audio/Mixing/SDL2/SDL2AudioMixer.cs | 265 +++++++++++ osu.Framework/Audio/ResamplingPlayer.cs | 131 ++++++ osu.Framework/Audio/SDL2AudioManager.cs | 242 ++++++++++ osu.Framework/Audio/SDL2AudioStream.cs | 145 ++++++ .../Audio/Sample/SampleChannelSDL2.cs | 83 ++++ osu.Framework/Audio/Sample/SampleSDL2.cs | 29 ++ .../Audio/Sample/SampleSDL2AudioPlayer.cs | 71 +++ .../Audio/Sample/SampleSDL2Factory.cs | 94 ++++ .../Audio/Track/TempoSDL2AudioPlayer.cs | 172 ++++++++ osu.Framework/Audio/Track/TrackSDL2.cs | 225 ++++++++++ .../Audio/Track/TrackSDL2AudioPlayer.cs | 241 ++++++++++ 13 files changed, 2146 insertions(+) create mode 100644 osu.Framework/Audio/AudioDecoder.cs create mode 100644 osu.Framework/Audio/Mixing/SDL2/ISDL2AudioChannel.cs create mode 100644 osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs create mode 100644 osu.Framework/Audio/ResamplingPlayer.cs create mode 100644 osu.Framework/Audio/SDL2AudioManager.cs create mode 100644 osu.Framework/Audio/SDL2AudioStream.cs create mode 100644 osu.Framework/Audio/Sample/SampleChannelSDL2.cs create mode 100644 osu.Framework/Audio/Sample/SampleSDL2.cs create mode 100644 osu.Framework/Audio/Sample/SampleSDL2AudioPlayer.cs create mode 100644 osu.Framework/Audio/Sample/SampleSDL2Factory.cs create mode 100644 osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs create mode 100644 osu.Framework/Audio/Track/TrackSDL2.cs create mode 100644 osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs diff --git a/osu.Framework/Audio/AudioDecoder.cs b/osu.Framework/Audio/AudioDecoder.cs new file mode 100644 index 0000000000..b723372222 --- /dev/null +++ b/osu.Framework/Audio/AudioDecoder.cs @@ -0,0 +1,415 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using System; +using System.IO; +using ManagedBass; +using osu.Framework.Audio.Callbacks; +using SDL2; +using System.Threading; +using osu.Framework.Logging; +using System.Collections.Generic; +using osu.Framework.Graphics.Video; + +namespace osu.Framework.Audio +{ + /// + /// Decodes audio from , and convert it to appropriate format. + /// + public class AudioDecoder : IDisposable + { + public class AudioDecoderData + { + internal readonly int Rate; + internal readonly int Channels; + internal readonly bool IsTrack; + internal readonly ushort Format; + internal readonly Stream Stream; + internal readonly PassDataDelegate Pass; + internal readonly object? UserData; + + internal int DecodeStream; + internal FileCallbacks? Callbacks; + internal SDL2AudioStream? Resampler; + + internal VideoDecoder? FFmpeg; + + private volatile int bitrate; + + public int Bitrate + { + get => bitrate; + set => Interlocked.Exchange(ref bitrate, value); + } + + private double length; + + public double Length + { + get => length; + set => Interlocked.Exchange(ref length, value); + } + + private long bytelength; + + public long ByteLength + { + get => Interlocked.Read(ref bytelength); + set => Interlocked.Exchange(ref bytelength, value); + } + + internal volatile bool StopJob; + internal volatile bool Loading; + + internal AudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, PassDataDelegate pass, object? userData) + { + Rate = rate; + Channels = channels; + IsTrack = isTrack; + Format = format; + Stream = stream; + Pass = pass; + UserData = userData; + } + + public void Stop() + { + StopJob = true; + } + + // Call this in lock + internal void Dispose() + { + if (DecodeStream != 0) + { + Bass.StreamFree(DecodeStream); + DecodeStream = 0; + } + + Stream.Dispose(); + Resampler?.Dispose(); + Callbacks?.Dispose(); + FFmpeg?.Dispose(); + } + } + + private readonly LinkedList jobs = new LinkedList(); + + /// + /// Decoder will call this delegate every time some amount of data is ready. + /// + /// Decoded audio data + /// + /// + public delegate void PassDataDelegate(byte[] data, object? userdata, AudioDecoderData decoderData, bool done); + + private readonly SDL.SDL_AudioSpec spec; + + private readonly Thread decoderThread; + + /// + /// Set up configuration and start a decoding thread. + /// + /// Resample format + public AudioDecoder(SDL.SDL_AudioSpec spec) + { + this.spec = spec; + + decoderThread = new Thread(() => loop(tokenSource.Token)) + { + IsBackground = true + }; + + decoderThread.Start(); + } + + private readonly CancellationTokenSource tokenSource = new CancellationTokenSource(); + + /// + /// Start decoding in the decoding thread. + /// + /// Data stream to read + /// Delegate to pass data to + /// Object to pass to the delegate + /// + public AudioDecoderData StartDecodingAsync(Stream stream, PassDataDelegate pass, object? userData) + { + AudioDecoderData data = new AudioDecoderData(spec.freq, spec.channels, true, spec.format, stream, pass, userData); + + lock (jobs) + { + jobs.AddFirst(data); + } + + return data; + } + + private void passAudioSync(byte[] data, object? userdata, AudioDecoderData decoderData, bool done) + { + if (userdata is TempDecodeData temp) + { + if (done && temp.Stream == null) + temp.DecodedAtOnce = data; + else + { + temp.Stream ??= new MemoryStream(); + temp.Stream.Write(data); + } + } + } + + private class TempDecodeData + { + internal MemoryStream? Stream; + internal byte[]? DecodedAtOnce; + } + + /// + /// Decodes audio from stream. It blocks until decoding is done. + /// + /// Data stream to read. + /// Decoded audio + public byte[] DecodeAudio(Stream stream) + { + TempDecodeData tempData = new TempDecodeData(); + AudioDecoderData data = new AudioDecoderData(spec.freq, spec.channels, false, spec.format, stream, passAudioSync, tempData); + + try + { + loadFromStream(data); + + if (tempData.DecodedAtOnce != null) + return tempData.DecodedAtOnce; + + while (data.Loading) + { + loadFromStream(data); + } + + return tempData.Stream?.ToArray() ?? Array.Empty(); + } + finally + { + tempData.Stream?.Dispose(); + tempData.Stream = null; + } + } + + private void loop(CancellationToken token) + { + while (!token.IsCancellationRequested) + { + if (jobs.Count == 0) + { + Thread.Sleep(50); + continue; + } + + lock (jobs) + { + var node = jobs.First; + + while (node != null) + { + var next = node.Next; + AudioDecoderData data = node.Value; + + if (data.StopJob) + { + data.Dispose(); + jobs.Remove(node); + } + else + loadFromStream(data); + + if (!data.Loading) + jobs.Remove(node); + + node = next; + } + } + } + } + + private int loadFromStream(AudioDecoderData job) + { + try + { + if (Bass.CurrentDevice > -1) + { + if (!job.Loading) + { + job.Callbacks = new FileCallbacks(new DataStreamFileProcedures(job.Stream)); + BassFlags bassFlags = BassFlags.Decode; + if (SDL.SDL_AUDIO_ISFLOAT(job.Format)) bassFlags |= BassFlags.Float; + if (job.IsTrack) bassFlags |= BassFlags.Prescan; + job.DecodeStream = Bass.CreateStream(StreamSystem.NoBuffer, bassFlags, job.Callbacks.Callbacks); + + if (job.DecodeStream == 0) + throw new FormatException($"Couldn't create stream: {Bass.LastError}"); + + bool infoAvail = Bass.ChannelGetInfo(job.DecodeStream, out var info); + + if (infoAvail) + { + job.ByteLength = Bass.ChannelGetLength(job.DecodeStream); + job.Length = Bass.ChannelBytes2Seconds(job.DecodeStream, job.ByteLength) * 1000; + job.Bitrate = (int)Math.Round(Bass.ChannelGetAttribute(job.DecodeStream, ChannelAttribute.Bitrate)); + + ushort srcformat; + + switch (info.Resolution) + { + case Resolution.Byte: + srcformat = SDL.AUDIO_S8; + break; + + case Resolution.Short: + srcformat = SDL.AUDIO_S16; + break; + + case Resolution.Float: + default: + srcformat = SDL.AUDIO_F32; + break; + } + + if (info.Channels != job.Channels || srcformat != job.Format || info.Frequency != job.Rate) + job.Resampler = new SDL2AudioStream(srcformat, (byte)info.Channels, info.Frequency, job.Format, (byte)job.Channels, job.Rate); + } + else + { + if (job.IsTrack) + throw new FormatException($"Couldn't get channel info: {Bass.LastError}"); + } + + job.Loading = true; + } + + if (job.Loading) + { + if (job.DecodeStream == 0) + throw new InvalidOperationException("BASS stream is not available"); + + int bufferLen = (int)(job.IsTrack ? Bass.ChannelSeconds2Bytes(job.DecodeStream, 8) : job.ByteLength); + + if (bufferLen <= 0) + bufferLen = 44100 * 2 * 4; + + byte[] buffer = new byte[bufferLen]; + int got = Bass.ChannelGetData(job.DecodeStream, buffer, bufferLen); + + if (got == -1) + { + job.Loading = false; + + if (Bass.LastError != Errors.Ended) + throw new FormatException($"Couldn't decode: {Bass.LastError}"); + } + + if (Bass.StreamGetFilePosition(job.DecodeStream, FileStreamPosition.End) <= Bass.StreamGetFilePosition(job.DecodeStream)) + job.Loading = false; + + if (job.Resampler == null) + { + if (got <= 0) buffer = Array.Empty(); + else if (got != bufferLen) Array.Resize(ref buffer, got); + + job.Pass(buffer, job.UserData, job, !job.Loading); + } + else + { + if (got > 0) + job.Resampler.Put(buffer, got); + + if (!job.Loading) + job.Resampler.Flush(); + + int avail = job.Resampler.GetPendingBytes(); + + byte[] resampled = avail > 0 ? new byte[avail] : Array.Empty(); + + if (avail > 0) + job.Resampler.Get(resampled, avail); + + job.Pass(resampled, job.UserData, job, !job.Loading); + } + } + } + else + { + if (job.FFmpeg == null) + { + job.FFmpeg = new VideoDecoder(job.Stream, job.Rate, job.Channels, SDL.SDL_AUDIO_ISFLOAT(job.Format), SDL.SDL_AUDIO_BITSIZE(job.Format), SDL.SDL_AUDIO_ISSIGNED(job.Format)); + + job.FFmpeg.PrepareDecoding(); + job.FFmpeg.RecreateCodecContext(); + + job.Bitrate = (int)job.FFmpeg.Bitrate; + job.Length = job.FFmpeg.Duration; + job.ByteLength = (long)Math.Ceiling(job.FFmpeg.Duration / 1000.0d * job.Rate) * job.Channels * SDL.SDL_AUDIO_BITSIZE(job.Format); // FIXME + + job.Loading = true; + } + + job.FFmpeg.DecodeNextAudioFrame(32, out byte[] decoded, !job.IsTrack); + + if (job.FFmpeg.State != VideoDecoder.DecoderState.Running) + job.Loading = false; + + job.Pass(decoded, job.UserData, job, !job.Loading); + } + } + catch (Exception e) + { + Logger.Log(e.Message, level: LogLevel.Important); + job.Loading = false; + } + finally + { + if (!job.Loading) + job.Dispose(); + } + + return 0; + } + + private bool disposedValue; + + protected virtual void Dispose(bool disposing) + { + if (!disposedValue) + { + if (disposing) + { + tokenSource.Cancel(); + tokenSource.Dispose(); + decoderThread.Join(); + } + + lock (jobs) + { + foreach (var job in jobs) + { + job.Dispose(); + } + + jobs.Clear(); + } + + disposedValue = true; + } + } + + ~AudioDecoder() + { + Dispose(false); + } + + public void Dispose() + { + Dispose(true); + GC.SuppressFinalize(this); + } + } +} diff --git a/osu.Framework/Audio/Mixing/SDL2/ISDL2AudioChannel.cs b/osu.Framework/Audio/Mixing/SDL2/ISDL2AudioChannel.cs new file mode 100644 index 0000000000..1e58d93b34 --- /dev/null +++ b/osu.Framework/Audio/Mixing/SDL2/ISDL2AudioChannel.cs @@ -0,0 +1,33 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +namespace osu.Framework.Audio.Mixing.SDL2 +{ + /// + /// Interface for audio channels that feed audio to . + /// + internal interface ISDL2AudioChannel : IAudioChannel + { + /// + /// Returns remaining audio samples. + /// + /// Audio data needs to be put here. Length of this determines how much data needs to be filled. + /// Sample count + int GetRemainingSamples(float[] data); + + /// + /// Mixer won't call if this returns false. + /// + bool Playing { get; } + + /// + /// Mixer uses this as volume, Value should be within 0 and 1. + /// + float Volume { get; } + + /// + /// Mixer uses this to adjust channel balance. Value should be within -1.0 and 1.0 + /// + double Balance { get; } + } +} diff --git a/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs b/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs new file mode 100644 index 0000000000..ff741d7495 --- /dev/null +++ b/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs @@ -0,0 +1,265 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using System; +using System.Collections.Generic; +using System.Collections.Specialized; +using System.Diagnostics; +using System.Linq; +using ManagedBass; +using ManagedBass.Fx; +using osu.Framework.Bindables; +using osu.Framework.Extensions.ObjectExtensions; +using osu.Framework.Statistics; +using NAudio.Dsp; + +namespace osu.Framework.Audio.Mixing.SDL2 +{ + /// + /// Mixes instances and applies effects on top of them. + /// + internal class SDL2AudioMixer : AudioMixer + { + private readonly object syncRoot = new object(); + + /// + /// List of instances that are active. + /// + private readonly LinkedList activeChannels = new LinkedList(); + + /// + /// Creates a new + /// + /// + /// An identifier displayed on the audio mixer visualiser. + public SDL2AudioMixer(AudioMixer? globalMixer, string identifier) + : base(globalMixer, identifier) + { + EnqueueAction(() => Effects.BindCollectionChanged(onEffectsChanged, true)); + } + + public override BindableList Effects { get; } = new BindableList(); + + protected override void AddInternal(IAudioChannel channel) + { + if (channel is not ISDL2AudioChannel sdlChannel) + return; + + lock (syncRoot) + activeChannels.AddLast(sdlChannel); + } + + protected override void RemoveInternal(IAudioChannel channel) + { + if (channel is not ISDL2AudioChannel sdlChannel) + return; + + lock (syncRoot) + activeChannels.Remove(sdlChannel); + } + + protected override void UpdateState() + { + FrameStatistics.Add(StatisticsCounterType.MixChannels, channelCount); + base.UpdateState(); + } + + // https://github.com/libsdl-org/SDL/blob/SDL2/src/audio/SDL_mixer.c#L292 + private const float max_vol = 3.402823466e+38F; + private const float min_vol = -3.402823466e+38F; + + private void mixAudio(float[] dst, float[] src, int samples, float left, float right) + { + if (left <= 0 && right <= 0) + return; + + for (int i = 0; i < samples; i++) + dst[i] = Math.Clamp(src[i] * ((i % 2) == 0 ? left : right) + dst[i], min_vol, max_vol); + } + + private float[]? ret; + + private volatile int channelCount; + + /// + /// Mix into a float array given as an argument. + /// + /// A float array that audio will be mixed into. + public void MixChannelsInto(float[] data) + { + lock (syncRoot) + { + int sampleCount = data.Length; + if (ret == null || sampleCount != ret.Length) + ret = new float[sampleCount]; + + bool useFilters = audioFilters.Count > 0; + float[] put = useFilters ? new float[sampleCount] : data; + + var node = activeChannels.First; + + while (node != null) + { + var next = node.Next; + var channel = node.Value; + + if (!(channel is AudioComponent ac && ac.IsAlive)) + { + activeChannels.Remove(node); + } + else if (channel.Playing) + { + int size = channel.GetRemainingSamples(ret); + float left = 1; + float right = 1; + + if (size > 0) + { + if (channel.Balance < 0) + right += (float)channel.Balance; + else if (channel.Balance > 0) + left -= (float)channel.Balance; + + right *= channel.Volume; + left *= channel.Volume; + + mixAudio(put, ret, size, left, right); + } + } + + node = next; + } + + channelCount = activeChannels.Count; + + if (useFilters) + { + for (int i = 0; i < sampleCount; i++) + { + foreach (var filter in audioFilters) + { + if (filter.BiQuadFilter != null) + put[i] = filter.BiQuadFilter.Transform(put[i]); + } + } + + mixAudio(data, put, sampleCount, 1, 1); + } + } + } + + private readonly List audioFilters = new List(); + + private void onEffectsChanged(object? sender, NotifyCollectionChangedEventArgs e) => EnqueueAction(() => + { + lock (syncRoot) + { + switch (e.Action) + { + case NotifyCollectionChangedAction.Add: + { + Debug.Assert(e.NewItems != null); + int startIndex = Math.Max(0, e.NewStartingIndex); + audioFilters.InsertRange(startIndex, e.NewItems.OfType().Select(eff => new EffectBox(eff))); + break; + } + + case NotifyCollectionChangedAction.Move: + { + EffectBox effect = audioFilters[e.OldStartingIndex]; + audioFilters.RemoveAt(e.OldStartingIndex); + audioFilters.Insert(e.NewStartingIndex, effect); + break; + } + + case NotifyCollectionChangedAction.Remove: + { + Debug.Assert(e.OldItems != null); + + audioFilters.RemoveRange(e.OldStartingIndex, e.OldItems.Count); + break; + } + + case NotifyCollectionChangedAction.Replace: + { + Debug.Assert(e.NewItems != null); + + EffectBox newFilter = new EffectBox((IEffectParameter)e.NewItems[0].AsNonNull()); + audioFilters[e.NewStartingIndex] = newFilter; + break; + } + + case NotifyCollectionChangedAction.Reset: + { + audioFilters.Clear(); + break; + } + } + } + }); + + internal class EffectBox + { + public readonly BiQuadFilter? BiQuadFilter; + + public EffectBox(IEffectParameter param) + { + // allowing non-bqf to keep index of list + if (param is BQFParameters bqfp) + BiQuadFilter = getFilter(44100, bqfp); + } + } + + private static BiQuadFilter getFilter(float freq, BQFParameters bqfp) + { + BiQuadFilter filter; + + switch (bqfp.lFilter) + { + case BQFType.LowPass: + filter = BiQuadFilter.LowPassFilter(freq, bqfp.fCenter, bqfp.fQ); + break; + + case BQFType.HighPass: + filter = BiQuadFilter.HighPassFilter(freq, bqfp.fCenter, bqfp.fQ); + break; + + case BQFType.BandPass: + filter = BiQuadFilter.BandPassFilterConstantPeakGain(freq, bqfp.fCenter, bqfp.fQ); + break; + + case BQFType.BandPassQ: + filter = BiQuadFilter.BandPassFilterConstantSkirtGain(freq, bqfp.fCenter, bqfp.fQ); + break; + + case BQFType.Notch: + filter = BiQuadFilter.NotchFilter(freq, bqfp.fCenter, bqfp.fQ); + break; + + case BQFType.PeakingEQ: + filter = BiQuadFilter.PeakingEQ(freq, bqfp.fCenter, bqfp.fQ, bqfp.fGain); + break; + + case BQFType.LowShelf: + filter = BiQuadFilter.LowShelf(freq, bqfp.fCenter, bqfp.fS, bqfp.fGain); + break; + + case BQFType.HighShelf: + filter = BiQuadFilter.HighShelf(freq, bqfp.fCenter, bqfp.fS, bqfp.fGain); + break; + + case BQFType.AllPass: + default: // NAudio BiQuadFilter covers all, this default is kind of meaningless + filter = BiQuadFilter.AllPassFilter(freq, bqfp.fCenter, bqfp.fQ); + break; + } + + return filter; + } + + public void StreamFree(IAudioChannel channel) + { + Remove(channel, false); + } + } +} diff --git a/osu.Framework/Audio/ResamplingPlayer.cs b/osu.Framework/Audio/ResamplingPlayer.cs new file mode 100644 index 0000000000..c20f19b79a --- /dev/null +++ b/osu.Framework/Audio/ResamplingPlayer.cs @@ -0,0 +1,131 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using System; +using NAudio.Dsp; + +namespace osu.Framework.Audio +{ + /// + /// Abstract class that's meant to be used with a real player implementation. + /// This class provides resampling on the fly for players. + /// + internal abstract class ResamplingPlayer + { + private double relativeRate = 1; + + /// + /// Represents current relative rate. + /// + public double RelativeRate + { + get => relativeRate; + set => setRate(value); + } + + private WdlResampler? resampler; + + protected readonly int SrcRate; + protected readonly byte SrcChannels; + + /// + /// Creates a new . + /// + /// Sampling rate of audio that's given from or + /// Channels of audio that's given from or + protected ResamplingPlayer(int srcRate, byte srcChannels) + { + SrcRate = srcRate; + SrcChannels = srcChannels; + } + + /// + /// Sets relative rate of audio. + /// + /// Rate that is relative to the original frequency. 1.0 is normal rate. + private void setRate(double relativeRate) + { + if (relativeRate == 0) + { + this.relativeRate = relativeRate; + return; + } + + if (relativeRate < 0 || this.relativeRate == relativeRate) + return; + + if (resampler == null) + { + resampler = new WdlResampler(); + resampler.SetMode(true, 2, false); + resampler.SetFilterParms(); + resampler.SetFeedMode(false); + } + + resampler.SetRates(SrcRate, SrcRate / relativeRate); + this.relativeRate = relativeRate; + } + + protected virtual double GetProcessingLatency() + { + if (resampler == null || RelativeRate == 1) + return 0; + + return resampler.GetCurrentLatency() * 1000.0d; + } + + /// + /// Returns rate adjusted audio samples. It calls a parent method if is 1. + /// + /// An array to put samples in + /// The number of samples put into the array + public virtual int GetRemainingSamples(float[] data) + { + if (RelativeRate == 0) + return 0; + + if (resampler == null || RelativeRate == 1) + return GetRemainingRawFloats(data, 0, data.Length); + + int requested = data.Length / SrcChannels; + int needed = resampler.ResamplePrepare(requested, SrcChannels, out float[] inBuffer, out int inBufferOffset); + int rawGot = GetRemainingRawFloats(inBuffer, inBufferOffset, needed * SrcChannels); + + if (rawGot > 0) + { + int got = resampler.ResampleOut(data, 0, rawGot / SrcChannels, requested, SrcChannels); + return got * SrcChannels; + } + + return 0; + } + + // must implement either (preferably float one) + + private byte[]? bytes; + + protected virtual int GetRemainingRawFloats(float[] data, int offset, int needed) + { + if (bytes == null || needed * 4 != bytes.Length) + bytes = new byte[needed * 4]; + + int got = GetRemainingRawBytes(bytes); + + if (got > 0) Buffer.BlockCopy(bytes, 0, data, offset * 4, got); + return got / 4; + } + + private float[]? floats; + + protected virtual int GetRemainingRawBytes(byte[] data) + { + if (floats == null || data.Length / 4 != floats.Length) + floats = new float[data.Length / 4]; + + int got = GetRemainingRawFloats(floats, 0, floats.Length); + + if (got > 0) Buffer.BlockCopy(floats, 0, data, 0, got * 4); + return got * 4; + } + } +} diff --git a/osu.Framework/Audio/SDL2AudioManager.cs b/osu.Framework/Audio/SDL2AudioManager.cs new file mode 100644 index 0000000000..333d7e7ba1 --- /dev/null +++ b/osu.Framework/Audio/SDL2AudioManager.cs @@ -0,0 +1,242 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +#nullable disable + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.IO; +using System.Linq; +using osu.Framework.Audio.Mixing; +using osu.Framework.Audio.Mixing.SDL2; +using osu.Framework.Audio.Sample; +using osu.Framework.Audio.Track; +using osu.Framework.Extensions.TypeExtensions; +using osu.Framework.IO.Stores; +using osu.Framework.Logging; +using osu.Framework.Threading; +using SDL2; + +namespace osu.Framework.Audio +{ + public class SDL2AudioManager : AudioManager + { + private volatile uint deviceId; + + private SDL.SDL_AudioSpec spec; + + private readonly AudioDecoder decoder; + + private readonly List sdlMixerList = new List(); + + /// + /// Creates a new . + /// + /// The host's audio thread. + /// The resource store containing all audio tracks to be used in the future. + /// The sample store containing all audio samples to be used in the future. + public SDL2AudioManager(AudioThread audioThread, ResourceStore trackStore, ResourceStore sampleStore) + : base(audioThread, trackStore, sampleStore) + { + // Must not edit this except for samples, as components (especially mixer) expects this to match. + spec = new SDL.SDL_AudioSpec + { + freq = 44100, + channels = 2, + format = SDL.AUDIO_F32, + callback = audioCallback, + samples = 256 // determines latency, this value can be changed but is already reasonably low + }; + + // comment below lines if you want to use FFmpeg to decode audio, AudioDecoder will use FFmpeg if no BASS device is available + EnqueueAction(() => + { + ManagedBass.Bass.Configure((ManagedBass.Configuration)68, 1); + AudioThread.InitDevice(0); + }); + + decoder = new AudioDecoder(spec); + } + + private string currentDeviceName = "Not loaded"; + + public override string ToString() + { + return $@"{GetType().ReadableName()} ({currentDeviceName})"; + } + + protected override AudioMixer AudioCreateAudioMixer(AudioMixer globalMixer, string identifier) + { + var mixer = new SDL2AudioMixer(globalMixer, identifier); + AddItem(mixer); + return mixer; + } + + protected override void ItemAdded(AudioComponent item) + { + base.ItemAdded(item); + + if (item is SDL2AudioMixer mixer) + { + try + { + if (deviceId != 0) + SDL.SDL_LockAudioDevice(deviceId); + + sdlMixerList.Add(mixer); + } + finally + { + if (deviceId != 0) + SDL.SDL_UnlockAudioDevice(deviceId); + } + } + } + + protected override void ItemRemoved(AudioComponent item) + { + base.ItemRemoved(item); + + if (item is SDL2AudioMixer mixer) + { + try + { + if (deviceId != 0) + SDL.SDL_LockAudioDevice(deviceId); + + sdlMixerList.Remove(mixer); + } + finally + { + if (deviceId != 0) + SDL.SDL_UnlockAudioDevice(deviceId); + } + } + } + + private void audioCallback(IntPtr userdata, IntPtr stream, int bufsize) + { + try + { + float[] main = new float[bufsize / 4]; + + foreach (var mixer in sdlMixerList) + { + if (mixer.IsAlive) + mixer.MixChannelsInto(main); + } + + unsafe + { + fixed (float* mainPtr = main) + Buffer.MemoryCopy(mainPtr, stream.ToPointer(), bufsize, bufsize); + } + } + catch (Exception e) + { + Logger.Error(e, "Error while pushing audio to SDL"); + } + } + + protected override bool IsDevicesUpdated(out ImmutableList newDevices, out ImmutableList lostDevices) + { + var updatedAudioDevices = EnumerateAllDevices().ToImmutableList(); + + if (DeviceNames.SequenceEqual(updatedAudioDevices)) + { + newDevices = lostDevices = ImmutableList.Empty; + return false; + } + + newDevices = updatedAudioDevices.Except(DeviceNames).ToImmutableList(); + lostDevices = DeviceNames.Except(updatedAudioDevices).ToImmutableList(); + + DeviceNames = updatedAudioDevices; + return true; + } + + protected virtual IEnumerable EnumerateAllDevices() + { + int deviceCount = SDL.SDL_GetNumAudioDevices(0); // it may return -1 if only default device is available (sound server) + for (int i = 0; i < deviceCount; i++) + yield return SDL.SDL_GetAudioDeviceName(i, 0); + } + + protected override bool SetAudioDevice(string deviceName = null) + { + if (!AudioDeviceNames.Contains(deviceName)) + deviceName = null; + + if (deviceId > 0) + SDL.SDL_CloseAudioDevice(deviceId); + + // Let audio driver adjust latency, this may set to a high value on Windows, but let's just be safe + const uint flag = SDL.SDL_AUDIO_ALLOW_SAMPLES_CHANGE; + deviceId = SDL.SDL_OpenAudioDevice(deviceName, 0, ref spec, out var outspec, (int)flag); + + if (deviceId == 0) + { + if (deviceName == null) + { + Logger.Log("SDL Audio init failed!", level: LogLevel.Error); + return false; + } + + Logger.Log("SDL Audio init failed, try using default device...", level: LogLevel.Important); + return SetAudioDevice(); + } + + spec = outspec; + + // Start playback + SDL.SDL_PauseAudioDevice(deviceId, 0); + + currentDeviceName = deviceName ?? "Default"; + + Logger.Log($@"🔈 SDL Audio initialised + Driver: {SDL.SDL_GetCurrentAudioDriver()} + Device Name: {currentDeviceName} + Frequency: {spec.freq} hz + Channels: {spec.channels} + Format: {(SDL.SDL_AUDIO_ISSIGNED(spec.format) ? "" : "un")}signed {SDL.SDL_AUDIO_BITSIZE(spec.format)} bits{(SDL.SDL_AUDIO_ISFLOAT(spec.format) ? " (float)" : "")} + Samples: {spec.samples} samples + Buffer size: {spec.size} bytes"); + + return true; + } + + protected override bool SetAudioDevice(int deviceIndex) + { + if (deviceIndex < DeviceNames.Count && deviceIndex >= 0) + return SetAudioDevice(DeviceNames[deviceIndex]); + + return SetAudioDevice(); + } + + protected override bool IsCurrentDeviceValid() => SDL.SDL_GetAudioDeviceStatus(deviceId) != SDL.SDL_AudioStatus.SDL_AUDIO_STOPPED; + + internal override Track.Track GetNewTrack(Stream data, string name) + { + TrackSDL2 track = new TrackSDL2(name, spec.freq, spec.channels, spec.samples); + decoder.StartDecodingAsync(data, track.AddToQueue, null); + return track; + } + + internal override SampleFactory GetSampleFactory(Stream data, string name, AudioMixer mixer, int playbackConcurrency) + => new SampleSDL2Factory(data, name, (SDL2AudioMixer)mixer, playbackConcurrency, spec, decoder); + + protected override void Dispose(bool disposing) + { + base.Dispose(disposing); + + decoder.Dispose(); + + if (deviceId > 0) + { + SDL.SDL_CloseAudioDevice(deviceId); + deviceId = 0; + } + } + } +} diff --git a/osu.Framework/Audio/SDL2AudioStream.cs b/osu.Framework/Audio/SDL2AudioStream.cs new file mode 100644 index 0000000000..d73d46efac --- /dev/null +++ b/osu.Framework/Audio/SDL2AudioStream.cs @@ -0,0 +1,145 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using System; +using System.Runtime.InteropServices; +using SDL2; + +namespace osu.Framework.Audio +{ + /// + /// Wrapper for SDL_AudioStream, which is a built-in audio converter. + /// + public class SDL2AudioStream : AudioComponent + { + private IntPtr stream = IntPtr.Zero; + + public ushort SrcFormat { get; private set; } + public byte SrcChannels { get; private set; } + public int SrcRate { get; private set; } + + public ushort DstFormat { get; private set; } + public byte DstChannels { get; private set; } + public int DstRate { get; private set; } + + /// + /// Creates a new . + /// + /// Source SDL_AudioFormat + /// Source channels + /// Source sample rate + /// Destination SDL_AudioFormat + /// Destination Channels + /// Destination sample rate + /// Thrown if SDL refuses to create a stream. + public SDL2AudioStream(ushort srcFormat, byte srcChannels, int srcRate, ushort dstFormat, byte dstChannels, int dstRate) + { + SrcFormat = srcFormat; + SrcChannels = srcChannels; + SrcRate = srcRate; + + if (!UpdateStream(dstFormat, dstChannels, dstRate)) + throw new FormatException("Failed creating resampling stream"); + } + + /// + /// Recreates the stream. + /// + /// Destination SDL_AudioFormat + /// Destination Channels + /// Destination sample rate + /// False if failed + public bool UpdateStream(ushort dstFormat, byte dstChannels, int dstRate) + { + if (stream != IntPtr.Zero) + SDL.SDL_FreeAudioStream(stream); + + // SDL3 may support this in a better way + stream = SDL.SDL_NewAudioStream(SrcFormat, SrcChannels, SrcRate, dstFormat, dstChannels, dstRate); + + if (stream != IntPtr.Zero) + { + DstFormat = dstFormat; + DstChannels = dstChannels; + DstRate = dstRate; + return true; + } + + return false; + } + + /// + /// Returns available samples in bytes. + /// + public int GetPendingBytes() + { + return SDL.SDL_AudioStreamAvailable(stream); + } + + /// + /// Put samples in the stream. + /// + /// Data to put + /// Data length in bytes + /// False if failed + public unsafe bool Put(byte[] data, int len) + { + fixed (byte* p = data) + { + IntPtr ptr = new IntPtr(p); + return SDL.SDL_AudioStreamPut(stream, ptr, len) == 0; + } + } + + /// + /// Get samples from the stream. + /// + /// An array that stream will put data into + /// Maximum data length in bytes + /// Returned data length in bytes + public unsafe int Get(byte[] data, int len) + { + fixed (byte* p = data) + { + IntPtr ptr = new IntPtr(p); + return SDL.SDL_AudioStreamGet(stream, ptr, len); + } + } + + // it is not available in sdl2-cs, will make a pr in future + [DllImport("SDL2", CallingConvention = CallingConvention.Cdecl)] + private static extern void SDL_AudioStreamFlush(IntPtr stream); + + /// + /// Flushes the stream. + /// + public void Flush() + { + SDL_AudioStreamFlush(stream); + } + + /// + /// Clears the stream. + /// + public void Clear() + { + SDL.SDL_AudioStreamClear(stream); + } + + ~SDL2AudioStream() + { + Dispose(false); + } + + protected override void Dispose(bool disposing) + { + if (IsDisposed) + return; + + if (stream != IntPtr.Zero) + SDL.SDL_FreeAudioStream(stream); + + base.Dispose(disposing); + } + } +} diff --git a/osu.Framework/Audio/Sample/SampleChannelSDL2.cs b/osu.Framework/Audio/Sample/SampleChannelSDL2.cs new file mode 100644 index 0000000000..9f2f8cda2f --- /dev/null +++ b/osu.Framework/Audio/Sample/SampleChannelSDL2.cs @@ -0,0 +1,83 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using osu.Framework.Audio.Mixing.SDL2; + +namespace osu.Framework.Audio.Sample +{ + internal sealed class SampleChannelSDL2 : SampleChannel, ISDL2AudioChannel + { + private readonly SampleSDL2AudioPlayer player; + + private volatile bool playing; + public override bool Playing => playing; + + public SampleChannelSDL2(SampleSDL2 sample, SampleSDL2AudioPlayer player) + : base(sample.Name) + { + this.player = player; + } + + public override void Play() + { + started = false; + playing = true; + base.Play(); + } + + public override void Stop() + { + playing = false; + started = false; + base.Stop(); + } + + private volatile bool started; + + int ISDL2AudioChannel.GetRemainingSamples(float[] data) + { + if (player.RelativeRate != AggregateFrequency.Value) + player.RelativeRate = AggregateFrequency.Value; + + if (player.Loop != Looping) + player.Loop = Looping; + + if (!started) + { + player.Reset(); + started = true; + } + + int ret = player.GetRemainingSamples(data); + + if (player.Done) + { + playing = false; + started = false; + } + + return ret; + } + + float ISDL2AudioChannel.Volume => (float)AggregateVolume.Value; + + bool ISDL2AudioChannel.Playing => playing; + + double ISDL2AudioChannel.Balance => AggregateBalance.Value; + + ~SampleChannelSDL2() + { + Dispose(false); + } + + protected override void Dispose(bool disposing) + { + if (IsDisposed) + return; + + (Mixer as SDL2AudioMixer)?.StreamFree(this); + + base.Dispose(disposing); + } + } +} diff --git a/osu.Framework/Audio/Sample/SampleSDL2.cs b/osu.Framework/Audio/Sample/SampleSDL2.cs new file mode 100644 index 0000000000..c10c7205ad --- /dev/null +++ b/osu.Framework/Audio/Sample/SampleSDL2.cs @@ -0,0 +1,29 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using osu.Framework.Audio.Mixing.SDL2; + +namespace osu.Framework.Audio.Sample +{ + internal sealed class SampleSDL2 : Sample + { + public override bool IsLoaded => factory.IsLoaded; + + private readonly SampleSDL2Factory factory; + private readonly SDL2AudioMixer mixer; + + public SampleSDL2(SampleSDL2Factory factory, SDL2AudioMixer mixer) + : base(factory, factory.Name) + { + this.factory = factory; + this.mixer = mixer; + } + + protected override SampleChannel CreateChannel() + { + var channel = new SampleChannelSDL2(this, factory.CreatePlayer()); + mixer.Add(channel); + return channel; + } + } +} diff --git a/osu.Framework/Audio/Sample/SampleSDL2AudioPlayer.cs b/osu.Framework/Audio/Sample/SampleSDL2AudioPlayer.cs new file mode 100644 index 0000000000..ab184e7fde --- /dev/null +++ b/osu.Framework/Audio/Sample/SampleSDL2AudioPlayer.cs @@ -0,0 +1,71 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using System; + +namespace osu.Framework.Audio.Sample +{ + internal class SampleSDL2AudioPlayer : ResamplingPlayer + { + private int position; + + private volatile bool done; + public bool Done => done; + + private readonly float[] audioData; + + public bool Loop { get; set; } + + public SampleSDL2AudioPlayer(float[] audioData, int rate, byte channels) + : base(rate, channels) + { + this.audioData = audioData; + } + + protected override int GetRemainingRawFloats(float[] data, int offset, int needed) + { + if (audioData.Length <= 0) + { + done = true; + return 0; + } + + int i = 0; + + for (; i < needed;) + { + int remaining = needed - i; + int put = audioData.Length - position; + if (remaining < put) + put = remaining; + + if (put > 0) + Array.Copy(audioData, position, data, offset + i, put); + + i += put; + position += put; + + // done playing + if (position >= audioData.Length) + { + if (Loop) // back to start if looping + position = 0; + else + { + done = true; + break; + } + } + } + + return i; + } + + public void Reset(bool resetIndex = true) + { + done = false; + if (resetIndex) + position = 0; + } + } +} diff --git a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs new file mode 100644 index 0000000000..0e7e2d558d --- /dev/null +++ b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs @@ -0,0 +1,94 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using System; +using System.Diagnostics; +using System.IO; +using osu.Framework.Audio.Mixing.SDL2; +using osu.Framework.Bindables; +using SDL2; + +namespace osu.Framework.Audio.Sample +{ + internal class SampleSDL2Factory : SampleFactory + { + private bool isLoaded; + public override bool IsLoaded => isLoaded; + + private readonly SDL2AudioMixer mixer; + private readonly SDL.SDL_AudioSpec spec; + private readonly AudioDecoder decoder; + + public float[]? DecodedAudio { get; private set; } + + private Stream? stream; + + public SampleSDL2Factory(Stream stream, string name, SDL2AudioMixer mixer, int playbackConcurrency, SDL.SDL_AudioSpec spec, AudioDecoder decoder) + : base(name, playbackConcurrency) + { + this.stream = stream; + this.mixer = mixer; + this.spec = spec; + this.decoder = decoder; + } + + private protected override void LoadSample() + { + Debug.Assert(CanPerformInline); + Debug.Assert(!IsLoaded); + + if (stream == null) + return; + + try + { + byte[] audio = decoder.DecodeAudio(stream); + + if (audio.Length > 0) + { + DecodedAudio = new float[audio.Length / 4]; + Buffer.BlockCopy(audio, 0, DecodedAudio, 0, audio.Length); + } + + Length = audio.Length / 4.0d / spec.freq / spec.channels; + isLoaded = true; + } + finally + { + stream.Dispose(); + stream = null; + } + } + + public SampleSDL2AudioPlayer CreatePlayer() => new SampleSDL2AudioPlayer(DecodedAudio ?? Array.Empty(), spec.freq, spec.channels); + + public override Sample CreateSample() => new SampleSDL2(this, mixer) { OnPlay = SampleFactoryOnPlay }; + + private protected override void FreeSample() + { + // All players created by this factory have reference to this array. + // It removes its own reference to the array, but GC will clear it once all SampleAudioPlayers for this sample are gone. + DecodedAudio = null; + } + + private protected override void UpdatePlaybackConcurrency(ValueChangedEvent concurrency) + { + } + + ~SampleSDL2Factory() + { + Dispose(false); + } + + protected override void Dispose(bool disposing) + { + if (IsDisposed) + return; + + stream?.Dispose(); + stream = null; + + base.Dispose(disposing); + } + } +} diff --git a/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs new file mode 100644 index 0000000000..0f713be713 --- /dev/null +++ b/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs @@ -0,0 +1,172 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using System; +using SoundTouch; + +namespace osu.Framework.Audio.Track +{ + internal class TempoSDL2AudioPlayer : TrackSDL2AudioPlayer + { + private SoundTouchProcessor? soundTouch; + + private double tempo = 1; + + /// + /// Represents current speed. + /// + public double Tempo + { + get => tempo; + set => setTempo(value); + } + + private readonly int samplesize; + + private bool doneFilling; + private bool donePlaying; + + public override bool Done => base.Done && (soundTouch == null || donePlaying); + + /// + /// Creates a new . + /// + /// + /// + /// will prepare this amount of samples (or more) on every update. + public TempoSDL2AudioPlayer(int rate, byte channels, int samples) + : base(rate, channels) + { + samplesize = samples; + } + + public void FillRequiredSamples() => fillSamples(samplesize); + + /// + /// Fills SoundTouch buffer until it has a specific amount of samples. + /// + /// Needed sample count + private void fillSamples(int samples) + { + if (soundTouch == null) + return; + + while (!base.Done && soundTouch.AvailableSamples < samples) + { + int getSamples = (int)Math.Ceiling((samples - soundTouch.AvailableSamples) * Tempo) * SrcChannels; + float[] src = new float[getSamples]; + getSamples = base.GetRemainingSamples(src); + if (getSamples <= 0) + break; + + soundTouch.PutSamples(src, getSamples / SrcChannels); + } + + if (!doneFilling && base.Done) + { + soundTouch.Flush(); + doneFilling = true; + } + } + + /// + /// Sets tempo. This initializes if it's set to some value else than 1.0, and once it's set again to 1.0, it disposes . + /// + /// New tempo value + private void setTempo(double tempo) + { + if (soundTouch == null && tempo == 1.0f) + return; + + if (soundTouch == null) + { + soundTouch = new SoundTouchProcessor + { + SampleRate = SrcRate, + Channels = SrcChannels + }; + soundTouch.SetSetting(SettingId.UseQuickSeek, 1); + soundTouch.SetSetting(SettingId.OverlapDurationMs, 4); + soundTouch.SetSetting(SettingId.SequenceDurationMs, 30); + } + + if (this.tempo != tempo) + { + this.tempo = tempo; + + if (Tempo == 1.0f) + { + if (AudioData != null) + { + int latency = GetTempoLatencyInSamples() * 4 * SrcChannels; + long temp = !ReversePlayback ? AudioData.Position - latency : AudioData.Position + latency; + + if (temp >= 0) + AudioData.Position = temp; + } + + Reset(false); + soundTouch = null; + return; + } + + double tempochange = Math.Clamp((Math.Abs(tempo) - 1.0d) * 100.0d, -95, 5000); + soundTouch.TempoChange = tempochange; + FillRequiredSamples(); + } + } + + /// + /// Returns tempo and rate adjusted audio samples. It calls a parent method if is 1. + /// + /// An array to put samples in + /// The number of samples put + public override int GetRemainingSamples(float[] ret) + { + if (soundTouch == null) + return base.GetRemainingSamples(ret); + + if (RelativeRate == 0) + return 0; + + int expected = ret.Length / SrcChannels; + + if (!doneFilling && soundTouch.AvailableSamples < expected) + { + fillSamples(expected); + } + + int got = soundTouch.ReceiveSamples(ret, expected); + + if (got == 0 && doneFilling) + donePlaying = true; + + return got * SrcChannels; + } + + public override void Reset(bool resetPosition = true) + { + base.Reset(resetPosition); + soundTouch?.Flush(); + doneFilling = false; + donePlaying = false; + } + + protected int GetTempoLatencyInSamples() + { + if (soundTouch == null) + return 0; + + return (int)(soundTouch.UnprocessedSampleCount + soundTouch.AvailableSamples * Tempo); + } + + protected override double GetProcessingLatency() => base.GetProcessingLatency() + (double)GetTempoLatencyInSamples() / SrcRate * 1000.0d; + + public override void Seek(double seek) + { + base.Seek(seek); + if (soundTouch != null) + FillRequiredSamples(); + } + } +} diff --git a/osu.Framework/Audio/Track/TrackSDL2.cs b/osu.Framework/Audio/Track/TrackSDL2.cs new file mode 100644 index 0000000000..82d23583f5 --- /dev/null +++ b/osu.Framework/Audio/Track/TrackSDL2.cs @@ -0,0 +1,225 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using System; +using System.Threading; +using System.Threading.Tasks; +using osu.Framework.Audio.Mixing.SDL2; +using osu.Framework.Extensions; + +namespace osu.Framework.Audio.Track +{ + public sealed class TrackSDL2 : Track, ISDL2AudioChannel + { + private readonly TempoSDL2AudioPlayer player; + + public override bool IsDummyDevice => false; + + private volatile bool isLoaded; + public override bool IsLoaded => isLoaded; + + private double currentTime; + public override double CurrentTime => currentTime; + + private volatile bool isRunning; + public override bool IsRunning => isRunning; + + private volatile bool hasCompleted; + public override bool HasCompleted => hasCompleted; + + private volatile int bitrate; + public override int? Bitrate => bitrate; + + public TrackSDL2(string name, int rate, byte channels, int samples) + : base(name) + { + // SoundTouch limitation + const float tempo_minimum_supported = 0.05f; + AggregateTempo.ValueChanged += t => + { + if (t.NewValue < tempo_minimum_supported) + throw new ArgumentException($"{nameof(TrackSDL2)} does not support {nameof(Tempo)} specifications below {tempo_minimum_supported}. Use {nameof(Frequency)} instead."); + }; + + player = new TempoSDL2AudioPlayer(rate, channels, samples); + } + + private readonly object syncRoot = new object(); + + private AudioDecoder.AudioDecoderData? decodeData; + + internal void AddToQueue(byte[] audio, object? userdata, AudioDecoder.AudioDecoderData data, bool done) + { + if (IsDisposed) + return; + + lock (syncRoot) + { + if (!player.IsLoaded) + { + if (!player.IsLoading) + player.PrepareStream(data.ByteLength); + + player.PutSamplesInStream(audio); + + if (done) + { + player.DonePutting(); + decodeData = null; + } + } + } + + if (!isLoaded) + Interlocked.Exchange(ref decodeData, data); + } + + protected override void UpdateState() + { + base.UpdateState(); + + if (decodeData != null && !isLoaded) + { + Length = decodeData.Length; + bitrate = decodeData.Bitrate; + isLoaded = true; + } + + if (player.Done && isRunning) + { + if (Looping) + { + seekInternal(RestartPoint); + } + else + { + isRunning = false; + hasCompleted = true; + RaiseCompleted(); + } + } + + if (AggregateTempo.Value != 1 && isRunning) + { + lock (syncRoot) + player.FillRequiredSamples(); + } + } + + internal override void OnStateChanged() + { + base.OnStateChanged(); + + lock (syncRoot) + { + if (!player.ReversePlayback && AggregateFrequency.Value < 0) + player.ReversePlayback = true; + else if (player.ReversePlayback && AggregateFrequency.Value >= 0) + player.ReversePlayback = false; + + player.RelativeRate = Math.Abs(AggregateFrequency.Value); + player.Tempo = AggregateTempo.Value; + } + } + + public override bool Seek(double seek) => SeekAsync(seek).GetResultSafely(); + + public override async Task SeekAsync(double seek) + { + double conservativeLength = Length == 0 ? double.MaxValue : Length; + double conservativeClamped = Math.Clamp(seek, 0, conservativeLength); + + await EnqueueAction(() => seekInternal(seek)).ConfigureAwait(false); + + return conservativeClamped == seek; + } + + private void seekInternal(double seek) + { + lock (syncRoot) + { + player.Seek(seek); + + if (seek < Length) + { + player.Reset(false); + hasCompleted = false; + } + + Interlocked.Exchange(ref currentTime, player.GetCurrentTime()); + } + } + + public override void Start() + { + if (IsDisposed) + throw new ObjectDisposedException(ToString(), "Can not start disposed tracks."); + + StartAsync().WaitSafely(); + } + + public override Task StartAsync() => EnqueueAction(() => + { + lock (syncRoot) + player.Reset(false); + + isRunning = true; + hasCompleted = false; + }); + + public override void Stop() => StopAsync().WaitSafely(); + + public override Task StopAsync() => EnqueueAction(() => + { + isRunning = false; + }); + + int ISDL2AudioChannel.GetRemainingSamples(float[] data) + { + if (!IsLoaded) return 0; + + int ret; + + lock (syncRoot) + { + ret = player.GetRemainingSamples(data); + Interlocked.Exchange(ref currentTime, player.GetCurrentTime()); + } + + if (ret < 0) + { + EnqueueAction(RaiseFailed); + return 0; + } + + return ret; + } + + bool ISDL2AudioChannel.Playing => isRunning && !player.Done; + + float ISDL2AudioChannel.Volume => (float)AggregateVolume.Value; + + double ISDL2AudioChannel.Balance => AggregateBalance.Value; + + ~TrackSDL2() + { + Dispose(false); + } + + protected override void Dispose(bool disposing) + { + if (IsDisposed) + return; + + isRunning = false; + (Mixer as SDL2AudioMixer)?.StreamFree(this); + + decodeData?.Stop(); + + lock (syncRoot) + player.Dispose(); + + base.Dispose(disposing); + } + } +} diff --git a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs new file mode 100644 index 0000000000..cc71f44ebb --- /dev/null +++ b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs @@ -0,0 +1,241 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using System; +using System.IO; +using osu.Framework.Logging; + +namespace osu.Framework.Audio.Track +{ + /// + /// Mainly returns audio data to . + /// + internal class TrackSDL2AudioPlayer : ResamplingPlayer, IDisposable + { + private volatile bool isLoaded; + public bool IsLoaded => isLoaded; + + private volatile bool isLoading; + public bool IsLoading => isLoading; + + private volatile bool done; + public virtual bool Done => done; + + /// + /// Returns a byte position converted into milliseconds with configuration set for this player. + /// + /// A byte position to convert + /// + public double GetMsFromBytes(long bytePos) => bytePos * 1000.0d / SrcRate / SrcChannels / 4; + + /// + /// Returns a position in milliseconds converted from a byte position with configuration set for this player. + /// + /// A position in milliseconds to convert + /// + public long GetBytesFromMs(double seconds) => (long)(seconds / 1000.0d * SrcRate) * SrcChannels * 4; + + /// + /// Stores raw audio data. + /// + protected MemoryStream? AudioData; + + public long AudioDataLength => AudioData?.Length ?? 0; + + /// + /// Play backwards if set to true. + /// + public bool ReversePlayback { get; set; } + + /// + /// Creates a new . Use if you want to adjust tempo. + /// + /// Sampling rate of audio + /// Channels of audio + public TrackSDL2AudioPlayer(int rate, byte channels) + : base(rate, channels) + { + isLoading = false; + isLoaded = false; + } + + internal void PrepareStream(long byteLength) + { + if (disposedValue) + return; + + if (AudioData == null) + { + int len = byteLength > int.MaxValue ? int.MaxValue : (int)byteLength; + AudioData = new MemoryStream(len); + } + + isLoading = true; + } + + internal void PutSamplesInStream(byte[] next) + { + if (disposedValue) + return; + + if (AudioData == null) + throw new InvalidOperationException($"Use {nameof(PrepareStream)} before calling this"); + + long save = AudioData.Position; + AudioData.Position = AudioData.Length; + AudioData.Write(next); + AudioData.Position = save; + } + + internal void DonePutting() + { + if (disposedValue) + return; + + // Saved seek was over data length + if (SaveSeek > AudioDataLength) + SaveSeek = 0; + + isLoading = false; + isLoaded = true; + } + + protected override int GetRemainingRawBytes(byte[] data) + { + if (AudioData == null) + return 0; + + if (AudioData.Length <= 0) + { + done = true; + return 0; + } + + if (SaveSeek > 0) + { + // set to 0 if position is over saved seek + if (AudioData.Position > SaveSeek) + SaveSeek = 0; + + // player now has audio data to play + if (AudioData.Length > SaveSeek) + { + AudioData.Position = SaveSeek; + SaveSeek = 0; + } + + // if player didn't reach the position, don't play + if (SaveSeek > 0) + return 0; + } + + int read = data.Length; + + if (ReversePlayback) + { + int frameSize = SrcChannels * 4; + + if (AudioData.Position < read) + read = (int)AudioData.Position; + + byte[] temp = new byte[read]; + + AudioData.Position -= read; + read = AudioData.Read(temp, 0, read); + AudioData.Position -= read; + + for (int e = 0; e < read / frameSize; e++) + { + Buffer.BlockCopy(temp, read - frameSize * (e + 1), data, frameSize * e, frameSize); + } + } + else + { + read = AudioData.Read(data, 0, read); + } + + if (read < data.Length && isLoading) + Logger.Log("Track underrun!"); + + if (ReversePlayback ? AudioData.Position <= 0 : AudioData.Position >= AudioData.Length && !isLoading) + done = true; + + return read; + } + + /// + /// Clears 'done' status. + /// + /// Goes back to the start if set to true. + public virtual void Reset(bool resetPosition = true) + { + done = false; + + if (resetPosition) + { + SaveSeek = 0; + + if (AudioData != null) + AudioData.Position = 0; + } + } + + /// + /// Returns current position converted into milliseconds. + /// + public double GetCurrentTime() + { + if (AudioData == null) + return 0; + + return !ReversePlayback + ? GetMsFromBytes(AudioData.Position) - GetProcessingLatency() + : GetMsFromBytes(AudioData.Position) + GetProcessingLatency(); + } + + protected long SaveSeek; + + /// + /// Sets the position of this player. + /// If the given value is over current , it will be saved and pause playback until decoding reaches the position. + /// However, if the value is still over after the decoding is over, it will be discarded. + /// + /// Position in milliseconds + public virtual void Seek(double seek) + { + long tmp = GetBytesFromMs(seek); + + if (!isLoaded && tmp > AudioDataLength) + { + SaveSeek = tmp; + } + else if (AudioData != null) + { + SaveSeek = 0; + AudioData.Position = Math.Clamp(tmp, 0, AudioDataLength - 1); + } + } + + private bool disposedValue; + + protected virtual void Dispose(bool disposing) + { + if (!disposedValue) + { + if (disposing) + { + AudioData?.Dispose(); + AudioData = null; + } + + disposedValue = true; + } + } + + public void Dispose() + { + Dispose(true); + GC.SuppressFinalize(this); + } + } +} From 8aba5b9a921924eea1cc308db0f73cd74681b2e4 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Mon, 30 Oct 2023 20:44:25 +0900 Subject: [PATCH 003/127] Add FFmpeg audio decoding capability in VideoDecoder --- osu.Framework/Graphics/Video/FFmpegFuncs.cs | 27 ++ osu.Framework/Graphics/Video/VideoDecoder.cs | 246 +++++++++++++++++-- 2 files changed, 252 insertions(+), 21 deletions(-) diff --git a/osu.Framework/Graphics/Video/FFmpegFuncs.cs b/osu.Framework/Graphics/Video/FFmpegFuncs.cs index 91821e44bb..4b52860b69 100644 --- a/osu.Framework/Graphics/Video/FFmpegFuncs.cs +++ b/osu.Framework/Graphics/Video/FFmpegFuncs.cs @@ -87,6 +87,24 @@ public unsafe class FFmpegFuncs public delegate int SwsScaleDelegate(SwsContext* c, byte*[] srcSlice, int[] srcStride, int srcSliceY, int srcSliceH, byte*[] dst, int[] dstStride); + public delegate SwrContext* SwrAllocSetOptsDelegate(SwrContext* s, long outChLayout, AVSampleFormat outSampleFmt, int outSampleRate, long inChLayout, AVSampleFormat inSampleFmt, int inSampleRate, int logOffset, void* logCtx); + + public delegate int SwrInitDelegate(SwrContext* s); + + public delegate int SwrIsInitializedDelegate(SwrContext* s); + + public delegate void SwrFreeDelegate(SwrContext** s); + + public delegate void SwrCloseDelegate(SwrContext* s); + + public delegate int SwrConvertDelegate(SwrContext* s, byte** dst, int outCount, byte** src, int inCount); + + public delegate long SwrGetDelayDelegate(SwrContext* s, long value); + + public delegate int AvSamplesGetBufferSizeDelegate(int* linesize, int nbChannels, int nbSamples, AVSampleFormat sampleFmt, int align); + + public delegate long AvGetDefaultChannelLayoutDelegate(int nbChannels); + #endregion public AvFrameAllocDelegate av_frame_alloc; @@ -125,6 +143,15 @@ public unsafe class FFmpegFuncs public SwsFreeContextDelegate sws_freeContext; public SwsGetCachedContextDelegate sws_getCachedContext; public SwsScaleDelegate sws_scale; + public SwrAllocSetOptsDelegate swr_alloc_set_opts; + public SwrInitDelegate swr_init; + public SwrIsInitializedDelegate swr_is_initialized; + public SwrFreeDelegate swr_free; + public SwrCloseDelegate swr_close; + public SwrConvertDelegate swr_convert; + public SwrGetDelayDelegate swr_get_delay; + public AvSamplesGetBufferSizeDelegate av_samples_get_buffer_size; + public AvGetDefaultChannelLayoutDelegate av_get_default_channel_layout; // Touching AutoGen.ffmpeg or its LibraryLoader in any way on non-Desktop platforms // will cause it to throw in static constructor, which can't be bypassed. diff --git a/osu.Framework/Graphics/Video/VideoDecoder.cs b/osu.Framework/Graphics/Video/VideoDecoder.cs index 3bdfaf8479..24c5ebc057 100644 --- a/osu.Framework/Graphics/Video/VideoDecoder.cs +++ b/osu.Framework/Graphics/Video/VideoDecoder.cs @@ -120,6 +120,7 @@ static VideoDecoder() Library.Load("libavcodec.so", Library.LoadFlags.RTLD_LAZY | Library.LoadFlags.RTLD_GLOBAL); Library.Load("libavformat.so", Library.LoadFlags.RTLD_LAZY | Library.LoadFlags.RTLD_GLOBAL); Library.Load("libswscale.so", Library.LoadFlags.RTLD_LAZY | Library.LoadFlags.RTLD_GLOBAL); + Library.Load("libswresample.so", Library.LoadFlags.RTLD_LAZY | Library.LoadFlags.RTLD_GLOBAL); } } @@ -133,17 +134,10 @@ public VideoDecoder(IRenderer renderer, string filename) { } - /// - /// Creates a new video decoder that decodes the given video stream. - /// - /// The renderer to display the video. - /// The stream that should be decoded. - public VideoDecoder(IRenderer renderer, Stream videoStream) + private VideoDecoder(Stream stream) { ffmpeg = CreateFuncs(); - - this.renderer = renderer; - this.videoStream = videoStream; + videoStream = stream; if (!videoStream.CanRead) throw new InvalidOperationException($"The given stream does not support reading. A stream used for a {nameof(VideoDecoder)} must support reading."); @@ -152,6 +146,17 @@ public VideoDecoder(IRenderer renderer, Stream videoStream) decoderCommands = new ConcurrentQueue(); availableTextures = new ConcurrentQueue(); // TODO: use "real" object pool when there's some public pool supporting disposables handle = new ObjectHandle(this, GCHandleType.Normal); + } + + /// + /// Creates a new video decoder that decodes the given video stream. + /// + /// The renderer to display the video. + /// The stream that should be decoded. + public VideoDecoder(IRenderer renderer, Stream videoStream) + : this(videoStream) + { + this.renderer = renderer; TargetHardwareVideoDecoders.BindValueChanged(_ => { @@ -159,10 +164,46 @@ public VideoDecoder(IRenderer renderer, Stream videoStream) if (formatContext == null) return; - decoderCommands.Enqueue(recreateCodecContext); + decoderCommands.Enqueue(RecreateCodecContext); }); } + private readonly bool audio; + private readonly int audioRate; + private readonly int audioChannels; + private readonly int audioBits; + private readonly long audioChannelLayout; + private readonly AVSampleFormat audioFmt; + private SwrContext* swrContext; + + public long Bitrate => codecContext->bit_rate; + public long FrameCount => stream->nb_frames; + + // Audio mode + public VideoDecoder(Stream audioStream, int rate, int channels, bool isFloat, int bits, bool signed) + : this(audioStream) + { + audioRate = rate; + audioChannels = channels; + audioBits = bits; + + audio = true; + hwDecodingAllowed = false; + audioChannelLayout = ffmpeg.av_get_default_channel_layout(channels); + audioFmt = AVSampleFormat.AV_SAMPLE_FMT_FLT; + + if (isFloat) + audioFmt = AVSampleFormat.AV_SAMPLE_FMT_FLT; + else if (!signed && bits == 8) + audioFmt = AVSampleFormat.AV_SAMPLE_FMT_U8; + else if (signed && bits == 16) + audioFmt = AVSampleFormat.AV_SAMPLE_FMT_S16; + else if (signed && bits == 32) + audioFmt = AVSampleFormat.AV_SAMPLE_FMT_S32; + else + Logger.Log("libswresample doesn't support current format! using default format...", level: LogLevel.Important); + } + /// /// Seek the decoder to the given timestamp. This will fail if is false. /// @@ -207,8 +248,8 @@ public void StartDecoding() { try { - prepareDecoding(); - recreateCodecContext(); + PrepareDecoding(); + RecreateCodecContext(); } catch (Exception e) { @@ -328,7 +369,7 @@ private static long streamSeekCallbacks(void* opaque, long offset, int whence) } // sets up libavformat state: creates the AVFormatContext, the frames, etc. to start decoding, but does not actually start the decodingLoop - private void prepareDecoding() + internal void PrepareDecoding() { const int context_buffer_size = 4096; readPacketCallback = readPacket; @@ -353,9 +394,9 @@ private void prepareDecoding() if (findStreamInfoResult < 0) throw new InvalidOperationException($"Error finding stream info: {getErrorMessage(findStreamInfoResult)}"); - int streamIndex = ffmpeg.av_find_best_stream(formatContext, AVMediaType.AVMEDIA_TYPE_VIDEO, -1, -1, null, 0); + int streamIndex = ffmpeg.av_find_best_stream(formatContext, audio ? AVMediaType.AVMEDIA_TYPE_AUDIO : AVMediaType.AVMEDIA_TYPE_VIDEO, -1, -1, null, 0); if (streamIndex < 0) - throw new InvalidOperationException($"Couldn't find video stream: {getErrorMessage(streamIndex)}"); + throw new InvalidOperationException($"Couldn't find stream: {getErrorMessage(streamIndex)}"); stream = formatContext->streams[streamIndex]; timeBaseInSeconds = stream->time_base.GetValue(); @@ -366,7 +407,7 @@ private void prepareDecoding() Duration = formatContext->duration / (double)FFmpegFuncs.AV_TIME_BASE * 1000.0; } - private void recreateCodecContext() + internal void RecreateCodecContext() { if (stream == null) return; @@ -423,6 +464,12 @@ private void recreateCodecContext() continue; } + if (audio && !prepareResampler()) + { + Logger.Log("Error trying to prepare audio resampler"); + continue; + } + Logger.Log($"Successfully initialized decoder: {decoder.Name}"); openSuccessful = true; @@ -433,6 +480,32 @@ private void recreateCodecContext() throw new InvalidOperationException("No usable decoder found"); } + private bool prepareResampler() + { + long srcChLayout = ffmpeg.av_get_default_channel_layout(codecContext->channels); + AVSampleFormat srcAudioFmt = codecContext->sample_fmt; + int srcRate = codecContext->sample_rate; + + if (audioChannelLayout == srcChLayout && audioFmt == srcAudioFmt && audioRate == srcRate) + { + swrContext = null; + return true; + } + + swrContext = ffmpeg.swr_alloc_set_opts(null, audioChannelLayout, audioFmt, audioRate, + srcChLayout, srcAudioFmt, srcRate, 0, null); + + if (swrContext == null) + { + Logger.Log("Failed allocating memory for swresampler", level: LogLevel.Error); + return false; + } + + ffmpeg.swr_init(swrContext); + + return ffmpeg.swr_is_initialized(swrContext) > 0; + } + private void decodingLoop(CancellationToken cancellationToken) { var packet = ffmpeg.av_packet_alloc(); @@ -497,6 +570,52 @@ private void decodingLoop(CancellationToken cancellationToken) } } + private MemoryStream memoryStream; + + internal void DecodeNextAudioFrame(int iteration, out byte[] decodedAudio, bool decodeUntilEnd = false) + { + if (!audio) + { + decodedAudio = Array.Empty(); + return; + } + + var packet = ffmpeg.av_packet_alloc(); + var receiveFrame = ffmpeg.av_frame_alloc(); + + memoryStream = new MemoryStream(); + + try + { + for (int i = 0; i < iteration; i++) + { + if (decodeUntilEnd) i--; // loop indefinitely to decode at once + + decodeNextFrame(packet, receiveFrame); + + if (State != DecoderState.Running) + { + resampleAndAppendToAudioStream(null); // flush resampler + break; + } + } + } + catch (Exception e) + { + Logger.Error(e, "VideoDecoder faulted while decoding audio"); + State = DecoderState.Faulted; + } + finally + { + ffmpeg.av_packet_free(&packet); + ffmpeg.av_frame_free(&receiveFrame); + + decodedAudio = memoryStream.ToArray(); + memoryStream?.Dispose(); + memoryStream = null; + } + } + private void decodeNextFrame(AVPacket* packet, AVFrame* receiveFrame) { // read data from input into AVPacket. @@ -604,7 +723,7 @@ private void readDecodedFrames(AVFrame* receiveFrame) // get final frame. FFmpegFrame frame; - if (((AVPixelFormat)receiveFrame->format).IsHardwarePixelFormat()) + if (!audio && ((AVPixelFormat)receiveFrame->format).IsHardwarePixelFormat()) { // transfer data from HW decoder to RAM. if (!hwTransferFrames.TryDequeue(out var hwTransferFrame)) @@ -634,6 +753,13 @@ private void readDecodedFrames(AVFrame* receiveFrame) lastDecodedFrameTime = (float)frameTime; + if (audio) + { + resampleAndAppendToAudioStream(frame); + frame.Dispose(); + continue; + } + // Note: this is the pixel format that `VideoTexture` expects internally frame = ensureFramePixelFormat(frame, AVPixelFormat.AV_PIX_FMT_YUV420P); if (frame == null) @@ -650,6 +776,64 @@ private void readDecodedFrames(AVFrame* receiveFrame) } } + private void resampleAndAppendToAudioStream(FFmpegFrame frame) + { + if (memoryStream == null) + return; + + int sampleCount; + byte*[] source; + + if (swrContext != null) + { + sampleCount = (int)ffmpeg.swr_get_delay(swrContext, codecContext->sample_rate); + source = null; + + if (frame != null) + { + sampleCount = (int)Math.Ceiling((double)(sampleCount + frame.Pointer->nb_samples) * audioRate / codecContext->sample_rate); + source = frame.Pointer->data.ToArray(); + } + + // no frame, no remaining samples in resampler + if (sampleCount <= 0) + return; + } + else if (frame != null) + { + sampleCount = frame.Pointer->nb_samples; + source = frame.Pointer->data.ToArray(); + } + else // no frame, no resampler + { + return; + } + + int audioSize = ffmpeg.av_samples_get_buffer_size(null, audioChannels, sampleCount, audioFmt, 0); + byte[] audioDest = new byte[audioSize]; + int nbSamples = 0; + + if (swrContext != null) + { + fixed (byte** data = source) + fixed (byte* dest = audioDest) + nbSamples = ffmpeg.swr_convert(swrContext, &dest, sampleCount, data, frame != null ? frame.Pointer->nb_samples : 0); + } + else if (source != null) + { + // assuming that the destination and source are not planar as we never define planar in ctor + nbSamples = sampleCount; + + for (int i = 0; i < audioDest.Length; i++) + { + audioDest[i] = *(source[0] + i); + } + } + + if (nbSamples > 0) + memoryStream.Write(audioDest, 0, nbSamples * (audioBits / 8) * audioChannels); + } + private readonly ConcurrentQueue scalerFrames = new ConcurrentQueue(); private void returnScalerFrame(FFmpegFrame frame) => scalerFrames.Enqueue(frame); @@ -730,7 +914,7 @@ private void tryDisableHwDecoding(int errorCode) { Logger.Log("Disabling hardware decoding of the current video due to an unexpected error"); - decoderCommands.Enqueue(recreateCodecContext); + decoderCommands.Enqueue(RecreateCodecContext); } } @@ -869,7 +1053,16 @@ protected virtual FFmpegFuncs CreateFuncs() avio_context_free = FFmpeg.AutoGen.ffmpeg.avio_context_free, sws_freeContext = FFmpeg.AutoGen.ffmpeg.sws_freeContext, sws_getCachedContext = FFmpeg.AutoGen.ffmpeg.sws_getCachedContext, - sws_scale = FFmpeg.AutoGen.ffmpeg.sws_scale + sws_scale = FFmpeg.AutoGen.ffmpeg.sws_scale, + swr_alloc_set_opts = FFmpeg.AutoGen.ffmpeg.swr_alloc_set_opts, + swr_init = FFmpeg.AutoGen.ffmpeg.swr_init, + swr_is_initialized = FFmpeg.AutoGen.ffmpeg.swr_is_initialized, + swr_free = FFmpeg.AutoGen.ffmpeg.swr_free, + swr_close = FFmpeg.AutoGen.ffmpeg.swr_close, + swr_convert = FFmpeg.AutoGen.ffmpeg.swr_convert, + swr_get_delay = FFmpeg.AutoGen.ffmpeg.swr_get_delay, + av_samples_get_buffer_size = FFmpeg.AutoGen.ffmpeg.av_samples_get_buffer_size, + av_get_default_channel_layout = FFmpeg.AutoGen.ffmpeg.av_get_default_channel_layout }; } @@ -895,7 +1088,7 @@ protected virtual void Dispose(bool disposing) decoderCommands.Clear(); - StopDecodingAsync().ContinueWith(_ => + void freeFFmpeg() { if (formatContext != null && inputOpened) { @@ -928,6 +1121,12 @@ protected virtual void Dispose(bool disposing) if (swsContext != null) ffmpeg.sws_freeContext(swsContext); + if (swrContext != null) + { + fixed (SwrContext** ptr = &swrContext) + ffmpeg.swr_free(ptr); + } + while (decodedFrames.TryDequeue(out var f)) { f.Texture.FlushUploads(); @@ -944,7 +1143,12 @@ protected virtual void Dispose(bool disposing) sf.Dispose(); handle.Dispose(); - }); + } + + if (audio) + freeFFmpeg(); + else + StopDecodingAsync().ContinueWith(_ => freeFFmpeg()); } #endregion From 47001172f25e7173eb1a8ec53b31d1df9f5b3bb2 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Mon, 30 Oct 2023 20:44:38 +0900 Subject: [PATCH 004/127] Add needed dependencies for SDL2 Audio --- osu.Framework/osu.Framework.csproj | 2 ++ 1 file changed, 2 insertions(+) diff --git a/osu.Framework/osu.Framework.csproj b/osu.Framework/osu.Framework.csproj index 3ebc16d4f8..ab9a09985b 100644 --- a/osu.Framework/osu.Framework.csproj +++ b/osu.Framework/osu.Framework.csproj @@ -23,6 +23,7 @@ + @@ -37,6 +38,7 @@ + From e71183049c0ebdb69bf711ca4123fe8ae68a9d61 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Mon, 30 Oct 2023 20:45:00 +0900 Subject: [PATCH 005/127] Init SDL2 Audio in SDL_Init --- osu.Framework/Platform/SDL2Window.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework/Platform/SDL2Window.cs b/osu.Framework/Platform/SDL2Window.cs index 8ae1d72155..697d631d06 100644 --- a/osu.Framework/Platform/SDL2Window.cs +++ b/osu.Framework/Platform/SDL2Window.cs @@ -186,7 +186,7 @@ protected SDL2Window(GraphicsSurfaceType surfaceType) { ObjectHandle = new ObjectHandle(this, GCHandleType.Normal); - if (SDL.SDL_Init(SDL.SDL_INIT_VIDEO | SDL.SDL_INIT_GAMECONTROLLER) < 0) + if (SDL.SDL_Init(SDL.SDL_INIT_VIDEO | SDL.SDL_INIT_GAMECONTROLLER | SDL.SDL_INIT_AUDIO) < 0) { throw new InvalidOperationException($"Failed to initialise SDL: {SDL.SDL_GetError()}"); } From 949ba72b3dbd0fc2601982c6e0f6e0afab97c9f8 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Mon, 30 Oct 2023 20:45:23 +0900 Subject: [PATCH 006/127] Make SDL2 audio support through framework.ini --- osu.Framework/Configuration/AudioDriver.cs | 11 +++++++++++ .../Configuration/FrameworkConfigManager.cs | 2 ++ osu.Framework/Game.cs | 14 ++++++++++++-- 3 files changed, 25 insertions(+), 2 deletions(-) create mode 100644 osu.Framework/Configuration/AudioDriver.cs diff --git a/osu.Framework/Configuration/AudioDriver.cs b/osu.Framework/Configuration/AudioDriver.cs new file mode 100644 index 0000000000..21e2ec9bb3 --- /dev/null +++ b/osu.Framework/Configuration/AudioDriver.cs @@ -0,0 +1,11 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +namespace osu.Framework.Configuration +{ + public enum AudioDriver + { + BASS, + SDL2 + } +} diff --git a/osu.Framework/Configuration/FrameworkConfigManager.cs b/osu.Framework/Configuration/FrameworkConfigManager.cs index c8498c53ae..8f7831273c 100644 --- a/osu.Framework/Configuration/FrameworkConfigManager.cs +++ b/osu.Framework/Configuration/FrameworkConfigManager.cs @@ -31,6 +31,7 @@ protected override void InitialiseDefaults() SetDefault(FrameworkSetting.WindowedPositionX, 0.5, -0.5, 1.5); SetDefault(FrameworkSetting.WindowedPositionY, 0.5, -0.5, 1.5); SetDefault(FrameworkSetting.LastDisplayDevice, DisplayIndex.Default); + SetDefault(FrameworkSetting.AudioDriver, AudioDriver.BASS); SetDefault(FrameworkSetting.AudioDevice, string.Empty); SetDefault(FrameworkSetting.VolumeUniversal, 1.0, 0.0, 1.0, 0.01); SetDefault(FrameworkSetting.VolumeMusic, 1.0, 0.0, 1.0, 0.01); @@ -77,6 +78,7 @@ public enum FrameworkSetting { ShowLogOverlay, + AudioDriver, AudioDevice, VolumeUniversal, VolumeEffect, diff --git a/osu.Framework/Game.cs b/osu.Framework/Game.cs index 2ed0647d39..12deb4feec 100644 --- a/osu.Framework/Game.cs +++ b/osu.Framework/Game.cs @@ -165,8 +165,18 @@ private void load(FrameworkConfigManager config) samples.AddStore(new NamespacedResourceStore(Resources, @"Samples")); samples.AddStore(new OnlineStore()); - Audio = new AudioManager(Host.AudioThread, tracks, samples) { EventScheduler = Scheduler }; - dependencies.Cache(Audio); + switch (config.Get(FrameworkSetting.AudioDriver)) + { + case AudioDriver.SDL2: + Audio = new SDL2AudioManager(Host.AudioThread, tracks, samples) { EventScheduler = Scheduler }; + break; + + default: + Audio = new BassAudioManager(Host.AudioThread, tracks, samples) { EventScheduler = Scheduler }; + break; + } + + dependencies.CacheAs(typeof(AudioManager), Audio); dependencies.CacheAs(Audio.Tracks); dependencies.CacheAs(Audio.Samples); From b4a6c5885fae92481b484a1bc91712f34191a157 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Mon, 30 Oct 2023 20:45:39 +0900 Subject: [PATCH 007/127] Fix broken BASS tests --- .../Audio/AudioManagerWithDeviceLoss.cs | 2 +- osu.Framework.Tests/Audio/BassTestComponents.cs | 13 +++++++++++-- .../Visual/Audio/TestSceneAudioManager.cs | 7 +++++++ osu.Framework/IO/Stores/ResourceStore.cs | 2 +- 4 files changed, 20 insertions(+), 4 deletions(-) diff --git a/osu.Framework.Tests/Audio/AudioManagerWithDeviceLoss.cs b/osu.Framework.Tests/Audio/AudioManagerWithDeviceLoss.cs index d1f5977b35..d8cd315425 100644 --- a/osu.Framework.Tests/Audio/AudioManagerWithDeviceLoss.cs +++ b/osu.Framework.Tests/Audio/AudioManagerWithDeviceLoss.cs @@ -14,7 +14,7 @@ namespace osu.Framework.Tests.Audio /// that can simulate the loss of a device. /// This will NOT work without a physical audio device! /// - internal class AudioManagerWithDeviceLoss : AudioManager + internal class AudioManagerWithDeviceLoss : BassAudioManager { public AudioManagerWithDeviceLoss(AudioThread audioThread, ResourceStore trackStore, ResourceStore sampleStore) : base(audioThread, trackStore, sampleStore) diff --git a/osu.Framework.Tests/Audio/BassTestComponents.cs b/osu.Framework.Tests/Audio/BassTestComponents.cs index e95713aaa5..cd93be5cd2 100644 --- a/osu.Framework.Tests/Audio/BassTestComponents.cs +++ b/osu.Framework.Tests/Audio/BassTestComponents.cs @@ -9,6 +9,7 @@ using osu.Framework.Audio.Sample; using osu.Framework.Audio.Track; using osu.Framework.Development; +using osu.Framework.Extensions; using osu.Framework.IO.Stores; using osu.Framework.Threading; @@ -36,8 +37,16 @@ public BassTestComponents(bool init = true) Mixer = CreateMixer(); Resources = new DllResourceStore(typeof(TrackBassTest).Assembly); - TrackStore = new TrackStore(Resources, Mixer); - SampleStore = new SampleStore(Resources, Mixer); + TrackStore = new TrackStore(Resources, Mixer, (data, name) => new TrackBass(data, name)); + SampleStore = new SampleStore(Resources, Mixer, (stream, name, mixer, playbackConcurrency) => + { + byte[] data; + + using (stream) + data = stream.ReadAllBytesToArray(); + + return new SampleBassFactory(data, name, (BassAudioMixer)mixer, playbackConcurrency); + }); Add(TrackStore, SampleStore); } diff --git a/osu.Framework.Tests/Visual/Audio/TestSceneAudioManager.cs b/osu.Framework.Tests/Visual/Audio/TestSceneAudioManager.cs index da0939c8dc..2877b5c352 100644 --- a/osu.Framework.Tests/Visual/Audio/TestSceneAudioManager.cs +++ b/osu.Framework.Tests/Visual/Audio/TestSceneAudioManager.cs @@ -2,6 +2,7 @@ // See the LICENCE file in the repository root for full licence text. using System.Collections.Generic; +using System.IO; using System.Linq; using System.Threading; using System.Threading.Tasks; @@ -65,6 +66,12 @@ public override byte[] Get(string name) return base.Get(name); } + public override Stream GetStream(string name) + { + attemptedLookups.Add(name); + return base.GetStream(name); + } + public override Task GetAsync(string name, CancellationToken cancellationToken = default) { attemptedLookups.Add(name); diff --git a/osu.Framework/IO/Stores/ResourceStore.cs b/osu.Framework/IO/Stores/ResourceStore.cs index b190b1c9e0..3e760dddce 100644 --- a/osu.Framework/IO/Stores/ResourceStore.cs +++ b/osu.Framework/IO/Stores/ResourceStore.cs @@ -125,7 +125,7 @@ public virtual T Get(string name) return default; } - public Stream GetStream(string name) + public virtual Stream GetStream(string name) { if (name == null) return null; From 23af0512ddc8f97dfb689337e4b8828791da9490 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Tue, 31 Oct 2023 22:44:20 +0900 Subject: [PATCH 008/127] Cleanup AudioDecoder --- osu.Framework/Audio/AudioDecoder.cs | 167 ++++++++---------- .../Audio/Sample/SampleSDL2Factory.cs | 2 +- 2 files changed, 76 insertions(+), 93 deletions(-) diff --git a/osu.Framework/Audio/AudioDecoder.cs b/osu.Framework/Audio/AudioDecoder.cs index b723372222..2123de5e1d 100644 --- a/osu.Framework/Audio/AudioDecoder.cs +++ b/osu.Framework/Audio/AudioDecoder.cs @@ -25,7 +25,7 @@ public class AudioDecoderData internal readonly bool IsTrack; internal readonly ushort Format; internal readonly Stream Stream; - internal readonly PassDataDelegate Pass; + internal readonly PassDataDelegate? Pass; internal readonly object? UserData; internal int DecodeStream; @@ -61,7 +61,7 @@ public long ByteLength internal volatile bool StopJob; internal volatile bool Loading; - internal AudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, PassDataDelegate pass, object? userData) + public AudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, PassDataDelegate? pass = null, object? userData = null) { Rate = rate; Channels = channels; @@ -137,61 +137,38 @@ public AudioDecoderData StartDecodingAsync(Stream stream, PassDataDelegate pass, AudioDecoderData data = new AudioDecoderData(spec.freq, spec.channels, true, spec.format, stream, pass, userData); lock (jobs) - { jobs.AddFirst(data); - } return data; } - private void passAudioSync(byte[] data, object? userdata, AudioDecoderData decoderData, bool done) - { - if (userdata is TempDecodeData temp) - { - if (done && temp.Stream == null) - temp.DecodedAtOnce = data; - else - { - temp.Stream ??= new MemoryStream(); - temp.Stream.Write(data); - } - } - } - - private class TempDecodeData - { - internal MemoryStream? Stream; - internal byte[]? DecodedAtOnce; - } - /// /// Decodes audio from stream. It blocks until decoding is done. /// /// Data stream to read. /// Decoded audio - public byte[] DecodeAudio(Stream stream) + public byte[] DecodeAudioInCurrentSpec(Stream stream) => DecodeAudio(spec.freq, spec.channels, spec.format, stream); + + public static byte[] DecodeAudio(int freq, int channels, ushort format, Stream stream) { - TempDecodeData tempData = new TempDecodeData(); - AudioDecoderData data = new AudioDecoderData(spec.freq, spec.channels, false, spec.format, stream, passAudioSync, tempData); + AudioDecoderData data = new AudioDecoderData(freq, channels, false, format, stream); - try - { - loadFromStream(data); + LoadFromStream(data, out byte[] decoded); - if (tempData.DecodedAtOnce != null) - return tempData.DecodedAtOnce; + if (!data.Loading) + return decoded; + + using (MemoryStream memoryStream = new MemoryStream()) + { + memoryStream.Write(decoded); while (data.Loading) { - loadFromStream(data); + LoadFromStream(data, out decoded); + memoryStream.Write(decoded); } - return tempData.Stream?.ToArray() ?? Array.Empty(); - } - finally - { - tempData.Stream?.Dispose(); - tempData.Stream = null; + return memoryStream.ToArray(); } } @@ -220,7 +197,10 @@ private void loop(CancellationToken token) jobs.Remove(node); } else - loadFromStream(data); + { + LoadFromStream(data, out byte[] decoded); + data.Pass?.Invoke(decoded, data.UserData, data, !data.Loading); + } if (!data.Loading) jobs.Remove(node); @@ -231,65 +211,69 @@ private void loop(CancellationToken token) } } - private int loadFromStream(AudioDecoderData job) + private static readonly object bass_sync_lock = new object(); + + /// + /// Decodes and resamples audio from job.Stream, and pass it to decoded. + /// + /// Decode data + /// Decoded audio + public static void LoadFromStream(AudioDecoderData job, out byte[] decoded) { try { if (Bass.CurrentDevice > -1) { - if (!job.Loading) + lock (bass_sync_lock) { - job.Callbacks = new FileCallbacks(new DataStreamFileProcedures(job.Stream)); - BassFlags bassFlags = BassFlags.Decode; - if (SDL.SDL_AUDIO_ISFLOAT(job.Format)) bassFlags |= BassFlags.Float; - if (job.IsTrack) bassFlags |= BassFlags.Prescan; - job.DecodeStream = Bass.CreateStream(StreamSystem.NoBuffer, bassFlags, job.Callbacks.Callbacks); - - if (job.DecodeStream == 0) - throw new FormatException($"Couldn't create stream: {Bass.LastError}"); - - bool infoAvail = Bass.ChannelGetInfo(job.DecodeStream, out var info); - - if (infoAvail) + if (!job.Loading) { - job.ByteLength = Bass.ChannelGetLength(job.DecodeStream); - job.Length = Bass.ChannelBytes2Seconds(job.DecodeStream, job.ByteLength) * 1000; - job.Bitrate = (int)Math.Round(Bass.ChannelGetAttribute(job.DecodeStream, ChannelAttribute.Bitrate)); + job.Callbacks = new FileCallbacks(new DataStreamFileProcedures(job.Stream)); + BassFlags bassFlags = BassFlags.Decode; + if (SDL.SDL_AUDIO_ISFLOAT(job.Format)) bassFlags |= BassFlags.Float; + if (job.IsTrack) bassFlags |= BassFlags.Prescan; + job.DecodeStream = Bass.CreateStream(StreamSystem.NoBuffer, bassFlags, job.Callbacks.Callbacks); - ushort srcformat; + if (job.DecodeStream == 0) + throw new FormatException($"Couldn't create stream: {Bass.LastError}"); - switch (info.Resolution) + bool infoAvail = Bass.ChannelGetInfo(job.DecodeStream, out var info); + + if (infoAvail) + { + job.ByteLength = Bass.ChannelGetLength(job.DecodeStream); + job.Length = Bass.ChannelBytes2Seconds(job.DecodeStream, job.ByteLength) * 1000; + job.Bitrate = (int)Math.Round(Bass.ChannelGetAttribute(job.DecodeStream, ChannelAttribute.Bitrate)); + + ushort srcformat; + + switch (info.Resolution) + { + case Resolution.Byte: + srcformat = SDL.AUDIO_S8; + break; + + case Resolution.Short: + srcformat = SDL.AUDIO_S16; + break; + + case Resolution.Float: + default: + srcformat = SDL.AUDIO_F32; + break; + } + + if (info.Channels != job.Channels || srcformat != job.Format || info.Frequency != job.Rate) + job.Resampler = new SDL2AudioStream(srcformat, (byte)info.Channels, info.Frequency, job.Format, (byte)job.Channels, job.Rate); + } + else { - case Resolution.Byte: - srcformat = SDL.AUDIO_S8; - break; - - case Resolution.Short: - srcformat = SDL.AUDIO_S16; - break; - - case Resolution.Float: - default: - srcformat = SDL.AUDIO_F32; - break; + if (job.IsTrack) + throw new FormatException($"Couldn't get channel info: {Bass.LastError}"); } - if (info.Channels != job.Channels || srcformat != job.Format || info.Frequency != job.Rate) - job.Resampler = new SDL2AudioStream(srcformat, (byte)info.Channels, info.Frequency, job.Format, (byte)job.Channels, job.Rate); + job.Loading = true; } - else - { - if (job.IsTrack) - throw new FormatException($"Couldn't get channel info: {Bass.LastError}"); - } - - job.Loading = true; - } - - if (job.Loading) - { - if (job.DecodeStream == 0) - throw new InvalidOperationException("BASS stream is not available"); int bufferLen = (int)(job.IsTrack ? Bass.ChannelSeconds2Bytes(job.DecodeStream, 8) : job.ByteLength); @@ -315,7 +299,7 @@ private int loadFromStream(AudioDecoderData job) if (got <= 0) buffer = Array.Empty(); else if (got != bufferLen) Array.Resize(ref buffer, got); - job.Pass(buffer, job.UserData, job, !job.Loading); + decoded = buffer; } else { @@ -332,7 +316,7 @@ private int loadFromStream(AudioDecoderData job) if (avail > 0) job.Resampler.Get(resampled, avail); - job.Pass(resampled, job.UserData, job, !job.Loading); + decoded = resampled; } } } @@ -352,26 +336,25 @@ private int loadFromStream(AudioDecoderData job) job.Loading = true; } - job.FFmpeg.DecodeNextAudioFrame(32, out byte[] decoded, !job.IsTrack); + job.FFmpeg.DecodeNextAudioFrame(32, out byte[] audioData, !job.IsTrack); if (job.FFmpeg.State != VideoDecoder.DecoderState.Running) job.Loading = false; - job.Pass(decoded, job.UserData, job, !job.Loading); + decoded = audioData; } } catch (Exception e) { Logger.Log(e.Message, level: LogLevel.Important); job.Loading = false; + decoded = Array.Empty(); } finally { if (!job.Loading) job.Dispose(); } - - return 0; } private bool disposedValue; diff --git a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs index 0e7e2d558d..441d58043e 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs @@ -42,7 +42,7 @@ private protected override void LoadSample() try { - byte[] audio = decoder.DecodeAudio(stream); + byte[] audio = decoder.DecodeAudioInCurrentSpec(stream); if (audio.Length > 0) { From 69c65a32f56cd0a04d6112612f9969e9b5ff1e90 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 10 Nov 2023 13:06:29 +0900 Subject: [PATCH 009/127] Separate BASS and FFmpeg AudioDecoder --- osu.Framework/Audio/AudioDecoder.cs | 246 +++++------------- osu.Framework/Audio/BassAudioDecoder.cs | 158 +++++++++++ osu.Framework/Audio/FFmpegAudioDecoder.cs | 64 +++++ osu.Framework/Audio/SDL2AudioManager.cs | 27 +- .../Audio/Sample/SampleSDL2Factory.cs | 6 +- osu.Framework/Audio/Track/TrackSDL2.cs | 3 + osu.Framework/Graphics/Video/FFmpegFrame.cs | 2 +- osu.Framework/Graphics/Video/VideoDecoder.cs | 75 +++--- 8 files changed, 350 insertions(+), 231 deletions(-) create mode 100644 osu.Framework/Audio/BassAudioDecoder.cs create mode 100644 osu.Framework/Audio/FFmpegAudioDecoder.cs diff --git a/osu.Framework/Audio/AudioDecoder.cs b/osu.Framework/Audio/AudioDecoder.cs index 2123de5e1d..668c01f919 100644 --- a/osu.Framework/Audio/AudioDecoder.cs +++ b/osu.Framework/Audio/AudioDecoder.cs @@ -3,22 +3,18 @@ using System; using System.IO; -using ManagedBass; -using osu.Framework.Audio.Callbacks; -using SDL2; using System.Threading; using osu.Framework.Logging; using System.Collections.Generic; -using osu.Framework.Graphics.Video; namespace osu.Framework.Audio { /// /// Decodes audio from , and convert it to appropriate format. /// - public class AudioDecoder : IDisposable + public abstract class AudioDecoder : IDisposable { - public class AudioDecoderData + public abstract class AudioDecoderData { internal readonly int Rate; internal readonly int Channels; @@ -28,12 +24,6 @@ public class AudioDecoderData internal readonly PassDataDelegate? Pass; internal readonly object? UserData; - internal int DecodeStream; - internal FileCallbacks? Callbacks; - internal SDL2AudioStream? Resampler; - - internal VideoDecoder? FFmpeg; - private volatile int bitrate; public int Bitrate @@ -61,7 +51,7 @@ public long ByteLength internal volatile bool StopJob; internal volatile bool Loading; - public AudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, PassDataDelegate? pass = null, object? userData = null) + protected AudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, PassDataDelegate? pass, object? userData) { Rate = rate; Channels = channels; @@ -78,18 +68,8 @@ public void Stop() } // Call this in lock - internal void Dispose() + internal virtual void Dispose() { - if (DecodeStream != 0) - { - Bass.StreamFree(DecodeStream); - DecodeStream = 0; - } - - Stream.Dispose(); - Resampler?.Dispose(); - Callbacks?.Dispose(); - FFmpeg?.Dispose(); } } @@ -103,24 +83,23 @@ internal void Dispose() /// public delegate void PassDataDelegate(byte[] data, object? userdata, AudioDecoderData decoderData, bool done); - private readonly SDL.SDL_AudioSpec spec; + private readonly int rate; + private readonly int channels; + private readonly ushort format; - private readonly Thread decoderThread; + private Thread? decoderThread; /// /// Set up configuration and start a decoding thread. /// - /// Resample format - public AudioDecoder(SDL.SDL_AudioSpec spec) + /// Resample rate + /// Resample channels + /// Resample SDL audio format + protected AudioDecoder(int rate, int channels, ushort format) { - this.spec = spec; - - decoderThread = new Thread(() => loop(tokenSource.Token)) - { - IsBackground = true - }; - - decoderThread.Start(); + this.rate = rate; + this.channels = channels; + this.format = format; } private readonly CancellationTokenSource tokenSource = new CancellationTokenSource(); @@ -134,7 +113,17 @@ public AudioDecoder(SDL.SDL_AudioSpec spec) /// public AudioDecoderData StartDecodingAsync(Stream stream, PassDataDelegate pass, object? userData) { - AudioDecoderData data = new AudioDecoderData(spec.freq, spec.channels, true, spec.format, stream, pass, userData); + if (decoderThread == null) + { + decoderThread = new Thread(() => loop(tokenSource.Token)) + { + IsBackground = true + }; + + decoderThread.Start(); + } + + AudioDecoderData data = CreateDecoderData(rate, channels, true, format, stream, pass, userData); lock (jobs) jobs.AddFirst(data); @@ -147,17 +136,18 @@ public AudioDecoderData StartDecodingAsync(Stream stream, PassDataDelegate pass, /// /// Data stream to read. /// Decoded audio - public byte[] DecodeAudioInCurrentSpec(Stream stream) => DecodeAudio(spec.freq, spec.channels, spec.format, stream); + public byte[] DecodeAudioInCurrentSpec(Stream stream) => DecodeAudio(rate, channels, format, stream); - public static byte[] DecodeAudio(int freq, int channels, ushort format, Stream stream) + public byte[] DecodeAudio(int freq, int channels, ushort format, Stream stream) { - AudioDecoderData data = new AudioDecoderData(freq, channels, false, format, stream); + AudioDecoderData data = CreateDecoderData(freq, channels, false, format, stream); LoadFromStream(data, out byte[] decoded); if (!data.Loading) return decoded; + // fallback if it couldn't decode at once using (MemoryStream memoryStream = new MemoryStream()) { memoryStream.Write(decoded); @@ -176,173 +166,59 @@ private void loop(CancellationToken token) { while (!token.IsCancellationRequested) { - if (jobs.Count == 0) - { - Thread.Sleep(50); - continue; - } + int jobCount; lock (jobs) { - var node = jobs.First; + jobCount = jobs.Count; - while (node != null) + if (jobCount > 0) { - var next = node.Next; - AudioDecoderData data = node.Value; + var node = jobs.First; - if (data.StopJob) - { - data.Dispose(); - jobs.Remove(node); - } - else + while (node != null) { - LoadFromStream(data, out byte[] decoded); - data.Pass?.Invoke(decoded, data.UserData, data, !data.Loading); - } + var next = node.Next; + AudioDecoderData data = node.Value; - if (!data.Loading) - jobs.Remove(node); + if (data.StopJob) + { + data.Dispose(); + jobs.Remove(node); + } + else + { + LoadFromStream(data, out byte[] decoded); + data.Pass?.Invoke(decoded, data.UserData, data, !data.Loading); + } - node = next; + if (!data.Loading) + jobs.Remove(node); + + node = next; + } } } + + if (jobCount <= 0) + Thread.Sleep(50); } } - private static readonly object bass_sync_lock = new object(); + public abstract AudioDecoderData CreateDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, PassDataDelegate? pass = null, object? userData = null); + + protected abstract void LoadFromStreamInternal(AudioDecoderData job, out byte[] decoded); /// /// Decodes and resamples audio from job.Stream, and pass it to decoded. /// /// Decode data /// Decoded audio - public static void LoadFromStream(AudioDecoderData job, out byte[] decoded) + public void LoadFromStream(AudioDecoderData job, out byte[] decoded) { try { - if (Bass.CurrentDevice > -1) - { - lock (bass_sync_lock) - { - if (!job.Loading) - { - job.Callbacks = new FileCallbacks(new DataStreamFileProcedures(job.Stream)); - BassFlags bassFlags = BassFlags.Decode; - if (SDL.SDL_AUDIO_ISFLOAT(job.Format)) bassFlags |= BassFlags.Float; - if (job.IsTrack) bassFlags |= BassFlags.Prescan; - job.DecodeStream = Bass.CreateStream(StreamSystem.NoBuffer, bassFlags, job.Callbacks.Callbacks); - - if (job.DecodeStream == 0) - throw new FormatException($"Couldn't create stream: {Bass.LastError}"); - - bool infoAvail = Bass.ChannelGetInfo(job.DecodeStream, out var info); - - if (infoAvail) - { - job.ByteLength = Bass.ChannelGetLength(job.DecodeStream); - job.Length = Bass.ChannelBytes2Seconds(job.DecodeStream, job.ByteLength) * 1000; - job.Bitrate = (int)Math.Round(Bass.ChannelGetAttribute(job.DecodeStream, ChannelAttribute.Bitrate)); - - ushort srcformat; - - switch (info.Resolution) - { - case Resolution.Byte: - srcformat = SDL.AUDIO_S8; - break; - - case Resolution.Short: - srcformat = SDL.AUDIO_S16; - break; - - case Resolution.Float: - default: - srcformat = SDL.AUDIO_F32; - break; - } - - if (info.Channels != job.Channels || srcformat != job.Format || info.Frequency != job.Rate) - job.Resampler = new SDL2AudioStream(srcformat, (byte)info.Channels, info.Frequency, job.Format, (byte)job.Channels, job.Rate); - } - else - { - if (job.IsTrack) - throw new FormatException($"Couldn't get channel info: {Bass.LastError}"); - } - - job.Loading = true; - } - - int bufferLen = (int)(job.IsTrack ? Bass.ChannelSeconds2Bytes(job.DecodeStream, 8) : job.ByteLength); - - if (bufferLen <= 0) - bufferLen = 44100 * 2 * 4; - - byte[] buffer = new byte[bufferLen]; - int got = Bass.ChannelGetData(job.DecodeStream, buffer, bufferLen); - - if (got == -1) - { - job.Loading = false; - - if (Bass.LastError != Errors.Ended) - throw new FormatException($"Couldn't decode: {Bass.LastError}"); - } - - if (Bass.StreamGetFilePosition(job.DecodeStream, FileStreamPosition.End) <= Bass.StreamGetFilePosition(job.DecodeStream)) - job.Loading = false; - - if (job.Resampler == null) - { - if (got <= 0) buffer = Array.Empty(); - else if (got != bufferLen) Array.Resize(ref buffer, got); - - decoded = buffer; - } - else - { - if (got > 0) - job.Resampler.Put(buffer, got); - - if (!job.Loading) - job.Resampler.Flush(); - - int avail = job.Resampler.GetPendingBytes(); - - byte[] resampled = avail > 0 ? new byte[avail] : Array.Empty(); - - if (avail > 0) - job.Resampler.Get(resampled, avail); - - decoded = resampled; - } - } - } - else - { - if (job.FFmpeg == null) - { - job.FFmpeg = new VideoDecoder(job.Stream, job.Rate, job.Channels, SDL.SDL_AUDIO_ISFLOAT(job.Format), SDL.SDL_AUDIO_BITSIZE(job.Format), SDL.SDL_AUDIO_ISSIGNED(job.Format)); - - job.FFmpeg.PrepareDecoding(); - job.FFmpeg.RecreateCodecContext(); - - job.Bitrate = (int)job.FFmpeg.Bitrate; - job.Length = job.FFmpeg.Duration; - job.ByteLength = (long)Math.Ceiling(job.FFmpeg.Duration / 1000.0d * job.Rate) * job.Channels * SDL.SDL_AUDIO_BITSIZE(job.Format); // FIXME - - job.Loading = true; - } - - job.FFmpeg.DecodeNextAudioFrame(32, out byte[] audioData, !job.IsTrack); - - if (job.FFmpeg.State != VideoDecoder.DecoderState.Running) - job.Loading = false; - - decoded = audioData; - } + LoadFromStreamInternal(job, out decoded); } catch (Exception e) { @@ -367,7 +243,7 @@ protected virtual void Dispose(bool disposing) { tokenSource.Cancel(); tokenSource.Dispose(); - decoderThread.Join(); + decoderThread?.Join(); } lock (jobs) diff --git a/osu.Framework/Audio/BassAudioDecoder.cs b/osu.Framework/Audio/BassAudioDecoder.cs new file mode 100644 index 0000000000..27a82edb99 --- /dev/null +++ b/osu.Framework/Audio/BassAudioDecoder.cs @@ -0,0 +1,158 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using System; +using System.IO; +using ManagedBass; +using osu.Framework.Audio.Callbacks; +using SDL2; + +namespace osu.Framework.Audio +{ + /// + /// This is only for using BASS as a decoder for SDL2 backend! + /// + internal class BassAudioDecoder : AudioDecoder + { + public BassAudioDecoder(int rate, int channels, ushort format) + : base(rate, channels, format) + { + } + + public class BassAudioDecoderData : AudioDecoderData + { + internal int DecodeStream; + internal FileCallbacks? Callbacks; + internal SDL2AudioStream? Resampler; + + public BassAudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, PassDataDelegate? pass = null, object? userData = null) + : base(rate, channels, isTrack, format, stream, pass, userData) + { + } + + internal override void Dispose() + { + if (DecodeStream != 0) + { + Bass.StreamFree(DecodeStream); + DecodeStream = 0; + } + + Resampler?.Dispose(); + Callbacks?.Dispose(); + + base.Dispose(); + } + } + + private static readonly object bass_sync_lock = new object(); + + protected override void LoadFromStreamInternal(AudioDecoderData decodeData, out byte[] decoded) + { + if (decodeData is not BassAudioDecoderData job) + throw new ArgumentException("Provide proper data"); + + if (Bass.CurrentDevice < 0) + throw new InvalidOperationException("Initialize a BASS device to decode audio"); + + lock (bass_sync_lock) + { + if (!job.Loading) + { + job.Callbacks = new FileCallbacks(new DataStreamFileProcedures(job.Stream)); + BassFlags bassFlags = BassFlags.Decode; + if (SDL.SDL_AUDIO_ISFLOAT(job.Format)) bassFlags |= BassFlags.Float; + if (job.IsTrack) bassFlags |= BassFlags.Prescan; + job.DecodeStream = Bass.CreateStream(StreamSystem.NoBuffer, bassFlags, job.Callbacks.Callbacks); + + if (job.DecodeStream == 0) + throw new FormatException($"Couldn't create stream: {Bass.LastError}"); + + bool infoAvail = Bass.ChannelGetInfo(job.DecodeStream, out var info); + + if (infoAvail) + { + job.ByteLength = Bass.ChannelGetLength(job.DecodeStream); + job.Length = Bass.ChannelBytes2Seconds(job.DecodeStream, job.ByteLength) * 1000; + job.Bitrate = (int)Math.Round(Bass.ChannelGetAttribute(job.DecodeStream, ChannelAttribute.Bitrate)); + + ushort srcformat; + + switch (info.Resolution) + { + case Resolution.Byte: + srcformat = SDL.AUDIO_S8; + break; + + case Resolution.Short: + srcformat = SDL.AUDIO_S16; + break; + + case Resolution.Float: + default: + srcformat = SDL.AUDIO_F32; + break; + } + + if (info.Channels != job.Channels || srcformat != job.Format || info.Frequency != job.Rate) + job.Resampler = new SDL2AudioStream(srcformat, (byte)info.Channels, info.Frequency, job.Format, (byte)job.Channels, job.Rate); + } + else + { + if (job.IsTrack) + throw new FormatException($"Couldn't get channel info: {Bass.LastError}"); + } + + job.Loading = true; + } + + int bufferLen = (int)(job.IsTrack ? Bass.ChannelSeconds2Bytes(job.DecodeStream, 1) : job.ByteLength); + + if (bufferLen <= 0) + bufferLen = 44100 * 2 * 1; + + byte[] buffer = new byte[bufferLen]; + int got = Bass.ChannelGetData(job.DecodeStream, buffer, bufferLen); + + if (got == -1) + { + job.Loading = false; + + if (Bass.LastError != Errors.Ended) + throw new FormatException($"Couldn't decode: {Bass.LastError}"); + } + + if (Bass.StreamGetFilePosition(job.DecodeStream, FileStreamPosition.End) <= Bass.StreamGetFilePosition(job.DecodeStream)) + job.Loading = false; + + if (job.Resampler == null) + { + if (got <= 0) buffer = Array.Empty(); + else if (got != bufferLen) Array.Resize(ref buffer, got); + + decoded = buffer; + } + else + { + if (got > 0) + job.Resampler.Put(buffer, got); + + if (!job.Loading) + job.Resampler.Flush(); + + int avail = job.Resampler.GetPendingBytes(); + + byte[] resampled = avail > 0 ? new byte[avail] : Array.Empty(); + + if (avail > 0) + job.Resampler.Get(resampled, avail); + + decoded = resampled; + } + } + } + + public override AudioDecoderData CreateDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, PassDataDelegate? pass = null, object? userData = null) + => new BassAudioDecoderData(rate, channels, isTrack, format, stream, pass, userData); + } +} diff --git a/osu.Framework/Audio/FFmpegAudioDecoder.cs b/osu.Framework/Audio/FFmpegAudioDecoder.cs new file mode 100644 index 0000000000..c4b2653b56 --- /dev/null +++ b/osu.Framework/Audio/FFmpegAudioDecoder.cs @@ -0,0 +1,64 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using System; +using System.IO; +using osu.Framework.Graphics.Video; +using SDL2; + +namespace osu.Framework.Audio +{ + internal class FFmpegAudioDecoder : AudioDecoder + { + public FFmpegAudioDecoder(int rate, int channels, ushort format) + : base(rate, channels, format) + { + } + + public class FFmpegAudioDecoderData : AudioDecoderData + { + internal VideoDecoder? FFmpeg; + + public FFmpegAudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, PassDataDelegate? pass, object? userData) + : base(rate, channels, isTrack, format, stream, pass, userData) + { + } + + internal override void Dispose() + { + base.Dispose(); + FFmpeg?.Dispose(); + } + } + + protected override void LoadFromStreamInternal(AudioDecoderData decodeData, out byte[] decoded) + { + if (decodeData is not FFmpegAudioDecoderData job) + throw new ArgumentException("Provide proper data"); + + if (job.FFmpeg == null) + { + job.FFmpeg = new VideoDecoder(job.Stream, job.Rate, job.Channels, SDL.SDL_AUDIO_ISFLOAT(job.Format), SDL.SDL_AUDIO_BITSIZE(job.Format), SDL.SDL_AUDIO_ISSIGNED(job.Format)); + + job.FFmpeg.PrepareDecoding(); + job.FFmpeg.RecreateCodecContext(); + + job.Bitrate = (int)job.FFmpeg.Bitrate; + job.Length = job.FFmpeg.Duration; + job.ByteLength = (long)Math.Ceiling(job.FFmpeg.Duration / 1000.0d * job.Rate) * job.Channels * SDL.SDL_AUDIO_BITSIZE(job.Format); // FIXME + + job.Loading = true; + } + + job.FFmpeg.DecodeNextAudioFrame(32, out byte[] audioData, !job.IsTrack); + + if (job.FFmpeg.State != VideoDecoder.DecoderState.Running) + job.Loading = false; + + decoded = audioData; + } + + public override AudioDecoderData CreateDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, PassDataDelegate? pass = null, object? userData = null) + => new FFmpegAudioDecoderData(rate, channels, isTrack, format, stream, pass, userData); + } +} diff --git a/osu.Framework/Audio/SDL2AudioManager.cs b/osu.Framework/Audio/SDL2AudioManager.cs index 333d7e7ba1..879940f544 100644 --- a/osu.Framework/Audio/SDL2AudioManager.cs +++ b/osu.Framework/Audio/SDL2AudioManager.cs @@ -22,11 +22,15 @@ namespace osu.Framework.Audio { public class SDL2AudioManager : AudioManager { + public const int AUDIO_FREQ = 44100; + public const byte AUDIO_CHANNELS = 2; + public const ushort AUDIO_FORMAT = SDL.AUDIO_F32; + private volatile uint deviceId; private SDL.SDL_AudioSpec spec; - private readonly AudioDecoder decoder; + private static AudioDecoder decoder; private readonly List sdlMixerList = new List(); @@ -42,9 +46,9 @@ public SDL2AudioManager(AudioThread audioThread, ResourceStore trackStor // Must not edit this except for samples, as components (especially mixer) expects this to match. spec = new SDL.SDL_AudioSpec { - freq = 44100, - channels = 2, - format = SDL.AUDIO_F32, + freq = AUDIO_FREQ, + channels = AUDIO_CHANNELS, + format = AUDIO_FORMAT, callback = audioCallback, samples = 256 // determines latency, this value can be changed but is already reasonably low }; @@ -55,8 +59,15 @@ public SDL2AudioManager(AudioThread audioThread, ResourceStore trackStor ManagedBass.Bass.Configure((ManagedBass.Configuration)68, 1); AudioThread.InitDevice(0); }); + } + + public static AudioDecoder GetAudioDecoder() + { + decoder ??= ManagedBass.Bass.CurrentDevice >= 0 + ? new BassAudioDecoder(AUDIO_FREQ, AUDIO_CHANNELS, AUDIO_FORMAT) + : new FFmpegAudioDecoder(AUDIO_FREQ, AUDIO_CHANNELS, AUDIO_FORMAT); - decoder = new AudioDecoder(spec); + return decoder; } private string currentDeviceName = "Not loaded"; @@ -219,18 +230,18 @@ protected override bool SetAudioDevice(int deviceIndex) internal override Track.Track GetNewTrack(Stream data, string name) { TrackSDL2 track = new TrackSDL2(name, spec.freq, spec.channels, spec.samples); - decoder.StartDecodingAsync(data, track.AddToQueue, null); + EnqueueAction(() => GetAudioDecoder().StartDecodingAsync(data, track.AddToQueue, null)); return track; } internal override SampleFactory GetSampleFactory(Stream data, string name, AudioMixer mixer, int playbackConcurrency) - => new SampleSDL2Factory(data, name, (SDL2AudioMixer)mixer, playbackConcurrency, spec, decoder); + => new SampleSDL2Factory(data, name, (SDL2AudioMixer)mixer, playbackConcurrency, spec); protected override void Dispose(bool disposing) { base.Dispose(disposing); - decoder.Dispose(); + decoder?.Dispose(); if (deviceId > 0) { diff --git a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs index 441d58043e..01d1ef1c17 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs @@ -17,19 +17,17 @@ internal class SampleSDL2Factory : SampleFactory private readonly SDL2AudioMixer mixer; private readonly SDL.SDL_AudioSpec spec; - private readonly AudioDecoder decoder; public float[]? DecodedAudio { get; private set; } private Stream? stream; - public SampleSDL2Factory(Stream stream, string name, SDL2AudioMixer mixer, int playbackConcurrency, SDL.SDL_AudioSpec spec, AudioDecoder decoder) + public SampleSDL2Factory(Stream stream, string name, SDL2AudioMixer mixer, int playbackConcurrency, SDL.SDL_AudioSpec spec) : base(name, playbackConcurrency) { this.stream = stream; this.mixer = mixer; this.spec = spec; - this.decoder = decoder; } private protected override void LoadSample() @@ -42,7 +40,7 @@ private protected override void LoadSample() try { - byte[] audio = decoder.DecodeAudioInCurrentSpec(stream); + byte[] audio = SDL2AudioManager.GetAudioDecoder().DecodeAudioInCurrentSpec(stream); if (audio.Length > 0) { diff --git a/osu.Framework/Audio/Track/TrackSDL2.cs b/osu.Framework/Audio/Track/TrackSDL2.cs index 82d23583f5..9900a0ba1e 100644 --- a/osu.Framework/Audio/Track/TrackSDL2.cs +++ b/osu.Framework/Audio/Track/TrackSDL2.cs @@ -65,6 +65,7 @@ internal void AddToQueue(byte[] audio, object? userdata, AudioDecoder.AudioDecod if (done) { player.DonePutting(); + data.Stream.Dispose(); decodeData = null; } } @@ -214,6 +215,8 @@ protected override void Dispose(bool disposing) isRunning = false; (Mixer as SDL2AudioMixer)?.StreamFree(this); + decodeData?.Stream.Dispose(); + decodeData?.Stop(); lock (syncRoot) diff --git a/osu.Framework/Graphics/Video/FFmpegFrame.cs b/osu.Framework/Graphics/Video/FFmpegFrame.cs index 1daa9b7a25..89f091b9b2 100644 --- a/osu.Framework/Graphics/Video/FFmpegFrame.cs +++ b/osu.Framework/Graphics/Video/FFmpegFrame.cs @@ -9,7 +9,7 @@ namespace osu.Framework.Graphics.Video { - internal sealed unsafe class FFmpegFrame : IDisposable + public sealed unsafe class FFmpegFrame : IDisposable { public readonly AVFrame* Pointer; diff --git a/osu.Framework/Graphics/Video/VideoDecoder.cs b/osu.Framework/Graphics/Video/VideoDecoder.cs index 24c5ebc057..fecc4dccf4 100644 --- a/osu.Framework/Graphics/Video/VideoDecoder.cs +++ b/osu.Framework/Graphics/Video/VideoDecoder.cs @@ -24,6 +24,7 @@ using osu.Framework.Logging; using osu.Framework.Platform; using osu.Framework.Platform.Linux.Native; +using System.Buffers; namespace osu.Framework.Graphics.Video { @@ -587,10 +588,10 @@ internal void DecodeNextAudioFrame(int iteration, out byte[] decodedAudio, bool try { - for (int i = 0; i < iteration; i++) - { - if (decodeUntilEnd) i--; // loop indefinitely to decode at once + int i = 0; + while (decodeUntilEnd || i++ < iteration) + { decodeNextFrame(packet, receiveFrame); if (State != DecoderState.Running) @@ -711,6 +712,12 @@ private void readDecodedFrames(AVFrame* receiveFrame) break; } + if (audio) + { + resampleAndAppendToAudioStream(receiveFrame); + continue; + } + // use `best_effort_timestamp` as it can be more accurate if timestamps from the source file (pts) are broken. // but some HW codecs don't set it in which case fallback to `pts` long frameTimestamp = receiveFrame->best_effort_timestamp != FFmpegFuncs.AV_NOPTS_VALUE ? receiveFrame->best_effort_timestamp : receiveFrame->pts; @@ -723,7 +730,7 @@ private void readDecodedFrames(AVFrame* receiveFrame) // get final frame. FFmpegFrame frame; - if (!audio && ((AVPixelFormat)receiveFrame->format).IsHardwarePixelFormat()) + if (((AVPixelFormat)receiveFrame->format).IsHardwarePixelFormat()) { // transfer data from HW decoder to RAM. if (!hwTransferFrames.TryDequeue(out var hwTransferFrame)) @@ -753,13 +760,6 @@ private void readDecodedFrames(AVFrame* receiveFrame) lastDecodedFrameTime = (float)frameTime; - if (audio) - { - resampleAndAppendToAudioStream(frame); - frame.Dispose(); - continue; - } - // Note: this is the pixel format that `VideoTexture` expects internally frame = ensureFramePixelFormat(frame, AVPixelFormat.AV_PIX_FMT_YUV420P); if (frame == null) @@ -776,7 +776,7 @@ private void readDecodedFrames(AVFrame* receiveFrame) } } - private void resampleAndAppendToAudioStream(FFmpegFrame frame) + private void resampleAndAppendToAudioStream(AVFrame* frame) { if (memoryStream == null) return; @@ -791,8 +791,8 @@ private void resampleAndAppendToAudioStream(FFmpegFrame frame) if (frame != null) { - sampleCount = (int)Math.Ceiling((double)(sampleCount + frame.Pointer->nb_samples) * audioRate / codecContext->sample_rate); - source = frame.Pointer->data.ToArray(); + sampleCount = (int)Math.Ceiling((double)(sampleCount + frame->nb_samples) * audioRate / codecContext->sample_rate); + source = frame->data.ToArray(); } // no frame, no remaining samples in resampler @@ -801,8 +801,8 @@ private void resampleAndAppendToAudioStream(FFmpegFrame frame) } else if (frame != null) { - sampleCount = frame.Pointer->nb_samples; - source = frame.Pointer->data.ToArray(); + sampleCount = frame->nb_samples; + source = frame->data.ToArray(); } else // no frame, no resampler { @@ -810,28 +810,35 @@ private void resampleAndAppendToAudioStream(FFmpegFrame frame) } int audioSize = ffmpeg.av_samples_get_buffer_size(null, audioChannels, sampleCount, audioFmt, 0); - byte[] audioDest = new byte[audioSize]; + byte[] audioDest = ArrayPool.Shared.Rent(audioSize); int nbSamples = 0; - if (swrContext != null) - { - fixed (byte** data = source) - fixed (byte* dest = audioDest) - nbSamples = ffmpeg.swr_convert(swrContext, &dest, sampleCount, data, frame != null ? frame.Pointer->nb_samples : 0); - } - else if (source != null) + try { - // assuming that the destination and source are not planar as we never define planar in ctor - nbSamples = sampleCount; - - for (int i = 0; i < audioDest.Length; i++) + if (swrContext != null) { - audioDest[i] = *(source[0] + i); + fixed (byte** data = source) + fixed (byte* dest = audioDest) + nbSamples = ffmpeg.swr_convert(swrContext, &dest, sampleCount, data, frame != null ? frame->nb_samples : 0); + } + else if (source != null) + { + // assuming that the destination and source are not planar as we never define planar in ctor + nbSamples = sampleCount; + + for (int i = 0; i < audioDest.Length; i++) + { + audioDest[i] = *(source[0] + i); + } } - } - if (nbSamples > 0) - memoryStream.Write(audioDest, 0, nbSamples * (audioBits / 8) * audioChannels); + if (nbSamples > 0) + memoryStream.Write(audioDest, 0, nbSamples * (audioBits / 8) * audioChannels); + } + finally + { + ArrayPool.Shared.Return(audioDest); + } } private readonly ConcurrentQueue scalerFrames = new ConcurrentQueue(); @@ -1115,7 +1122,9 @@ void freeFFmpeg() seekCallback = null; readPacketCallback = null; - videoStream.Dispose(); + if (!audio) + videoStream.Dispose(); + videoStream = null; if (swsContext != null) From 67b3ecf959c66b74b5741f04ad606fb7a69b8e2f Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 10 Nov 2023 13:06:44 +0900 Subject: [PATCH 010/127] Edit Waveform to use AudioDecoder --- osu.Framework/Audio/Track/Waveform.cs | 194 ++++++++++++-------------- 1 file changed, 90 insertions(+), 104 deletions(-) diff --git a/osu.Framework/Audio/Track/Waveform.cs b/osu.Framework/Audio/Track/Waveform.cs index 957c5138ca..9006c2b137 100644 --- a/osu.Framework/Audio/Track/Waveform.cs +++ b/osu.Framework/Audio/Track/Waveform.cs @@ -3,15 +3,13 @@ using System; using System.Buffers; -using System.Diagnostics; using System.IO; using System.Threading; using System.Threading.Tasks; -using ManagedBass; using osu.Framework.Utils; -using osu.Framework.Audio.Callbacks; using osu.Framework.Extensions; -using osu.Framework.Logging; +using NAudio.Dsp; +using System.Collections.Generic; namespace osu.Framework.Audio.Track { @@ -25,15 +23,10 @@ public class Waveform : IDisposable /// private const float resolution = 0.001f; - /// - /// The data stream is iteratively decoded to provide this many points per iteration so as to not exceed BASS's internal buffer size. - /// - private const int points_per_iteration = 1000; - /// /// FFT1024 gives ~40hz accuracy. /// - private const DataFlags fft_samples = DataFlags.FFT1024; + private const int fft_samples = 1024; /// /// Number of bins generated by the FFT. Must correspond to . @@ -67,8 +60,6 @@ public class Waveform : IDisposable private readonly Task readTask; - private FileCallbacks? fileCallbacks; - /// /// Constructs a new from provided audio data. /// @@ -80,129 +71,127 @@ public Waveform(Stream? data) if (data == null) return; - // for the time being, this code cannot run if there is no bass device available. - if (Bass.CurrentDevice < 0) - { - Logger.Log("Failed to read waveform as no bass device is available."); - return; - } + const int bytes_per_sample = 4; + const int sample_rate = 44100; - fileCallbacks = new FileCallbacks(new DataStreamFileProcedures(data)); + // Code below assumes stereo + channels = 2; - const int bytes_per_sample = 4; + // GetAudioDecoder returns BASS decoder if any BASS device (including No Sound) is available + AudioDecoder decoder = SDL2AudioManager.GetAudioDecoder(); - int decodeStream = Bass.CreateStream(StreamSystem.NoBuffer, BassFlags.Decode | BassFlags.Float, fileCallbacks.Callbacks, fileCallbacks.Handle); + // AudioDecoder will resample data into specified sample rate and channels (44100hz 2ch float) + AudioDecoder.AudioDecoderData decoderData = decoder.CreateDecoderData(sample_rate, channels, true, SDL2.SDL.AUDIO_F32, data); - float[]? sampleBuffer = null; + Complex[]? complexBuffer = null; try { - Bass.ChannelGetInfo(decodeStream, out ChannelInfo info); - - long length = Bass.ChannelGetLength(decodeStream); - // Each "point" is generated from a number of samples, each sample contains a number of channels - int samplesPerPoint = (int)(info.Frequency * resolution * info.Channels); + int samplesPerPoint = (int)(sample_rate * resolution * channels); - int bytesPerPoint = samplesPerPoint * bytes_per_sample; + // Use List as entire length may be inaccurate + List pointList = new List(); - int pointCount = (int)(length / bytesPerPoint); + int fftPointIndex = 0; - points = new Point[pointCount]; + complexBuffer = ArrayPool.Shared.Rent(fft_samples); - // Each iteration pulls in several samples - int bytesPerIteration = bytesPerPoint * points_per_iteration; + int complexBufferIndex = 0; - sampleBuffer = ArrayPool.Shared.Rent(bytesPerIteration / bytes_per_sample); + Point point = new Point(); - int pointIndex = 0; + int pointSamples = 0; - // Read sample data - while (length > 0) + int m = (int)Math.Log(fft_samples, 2.0); + + do { - length = Bass.ChannelGetData(decodeStream, sampleBuffer, bytesPerIteration); - int samplesRead = (int)(length / bytes_per_sample); + decoder.LoadFromStream(decoderData, out byte[] currentBytes); + int sampleIndex = 0; - // Each point is composed of multiple samples - for (int i = 0; i < samplesRead && pointIndex < pointCount; i += samplesPerPoint) + unsafe { - // We assume one or more channels. - // For non-stereo tracks, we'll use the single track for both amplitudes. - // For anything above two tracks we'll use the first and second track. - Debug.Assert(info.Channels >= 1); - int secondChannelIndex = info.Channels > 1 ? 1 : 0; - - // Channels are interleaved in the sample data (data[0] -> channel0, data[1] -> channel1, data[2] -> channel0, etc) - // samplesPerPoint assumes this interleaving behaviour - var point = new Point(); - - for (int j = i; j < i + samplesPerPoint; j += info.Channels) + fixed (void* ptr = currentBytes) { - // Find the maximum amplitude for each channel in the point - point.AmplitudeLeft = Math.Max(point.AmplitudeLeft, Math.Abs(sampleBuffer[j])); - point.AmplitudeRight = Math.Max(point.AmplitudeRight, Math.Abs(sampleBuffer[j + secondChannelIndex])); + float* currentFloats = (float*)ptr; + int currentFloatsLength = currentBytes.Length / bytes_per_sample; + + while (sampleIndex < currentFloatsLength) + { + // Each point is composed of multiple samples + for (; pointSamples < samplesPerPoint && sampleIndex < currentFloatsLength; pointSamples += channels, sampleIndex += channels) + { + // Find the maximum amplitude for each channel in the point + float left = *(currentFloats + sampleIndex); + float right = *(currentFloats + sampleIndex + 1); + + point.AmplitudeLeft = Math.Max(point.AmplitudeLeft, Math.Abs(left)); + point.AmplitudeRight = Math.Max(point.AmplitudeRight, Math.Abs(right)); + + complexBuffer[complexBufferIndex].X = (left + right) * 0.5f; + complexBuffer[complexBufferIndex].Y = 0; + + if (++complexBufferIndex >= fft_samples) + { + complexBufferIndex = 0; + + FastFourierTransform.FFT(true, m, complexBuffer); + + point.LowIntensity = computeIntensity(sample_rate, complexBuffer, low_min, mid_min); + point.MidIntensity = computeIntensity(sample_rate, complexBuffer, mid_min, high_min); + point.HighIntensity = computeIntensity(sample_rate, complexBuffer, high_min, high_max); + + for (; fftPointIndex < pointList.Count; fftPointIndex++) + { + var prevPoint = pointList[fftPointIndex]; + prevPoint.LowIntensity = point.LowIntensity; + prevPoint.MidIntensity = point.MidIntensity; + prevPoint.HighIntensity = point.HighIntensity; + pointList[fftPointIndex] = prevPoint; + } + + fftPointIndex++; // current Point is going to be added + } + } + + if (pointSamples >= samplesPerPoint) + { + // There may be unclipped samples, so clip them ourselves + point.AmplitudeLeft = Math.Min(1, point.AmplitudeLeft); + point.AmplitudeRight = Math.Min(1, point.AmplitudeRight); + + pointList.Add(point); + + point = new Point(); + pointSamples = 0; + } + } } - - // BASS may provide unclipped samples, so clip them ourselves - point.AmplitudeLeft = Math.Min(1, point.AmplitudeLeft); - point.AmplitudeRight = Math.Min(1, point.AmplitudeRight); - - points[pointIndex++] = point; } - } - - Bass.ChannelSetPosition(decodeStream, 0); - length = Bass.ChannelGetLength(decodeStream); - - // Read FFT data - float[] bins = new float[fft_bins]; - int currentPoint = 0; - long currentByte = 0; - - while (length > 0) - { - length = Bass.ChannelGetData(decodeStream, bins, (int)fft_samples); - currentByte += length; + } while (decoderData.Loading); - float lowIntensity = computeIntensity(info, bins, low_min, mid_min); - float midIntensity = computeIntensity(info, bins, mid_min, high_min); - float highIntensity = computeIntensity(info, bins, high_min, high_max); - - // In general, the FFT function will read more data than the amount of data we have in one point - // so we'll be setting intensities for all points whose data fits into the amount read by the FFT - // We know that each data point required sampleDataPerPoint amount of data - for (; currentPoint < points.Length && currentPoint * bytesPerPoint < currentByte; currentPoint++) - { - var point = points[currentPoint]; - point.LowIntensity = lowIntensity; - point.MidIntensity = midIntensity; - point.HighIntensity = highIntensity; - points[currentPoint] = point; - } - } - - channels = info.Channels; + points = pointList.ToArray(); } finally { - Bass.StreamFree(decodeStream); - if (sampleBuffer != null) - ArrayPool.Shared.Return(sampleBuffer); + if (complexBuffer != null) + ArrayPool.Shared.Return(complexBuffer); } }, cancelSource.Token); } - private float computeIntensity(ChannelInfo info, float[] bins, float startFrequency, float endFrequency) + private float computeIntensity(int frequency, Complex[] bins, float startFrequency, float endFrequency) { - int startBin = (int)(fft_bins * 2 * startFrequency / info.Frequency); - int endBin = (int)(fft_bins * 2 * endFrequency / info.Frequency); + int startBin = (int)(fft_samples * startFrequency / frequency); + int endBin = (int)(fft_samples * endFrequency / frequency); - startBin = Math.Clamp(startBin, 0, bins.Length); - endBin = Math.Clamp(endBin, 0, bins.Length); + startBin = Math.Clamp(startBin, 0, fft_bins); + endBin = Math.Clamp(endBin, 0, fft_bins); float value = 0; for (int i = startBin; i < endBin; i++) - value += bins[i]; + value += (float)Math.Sqrt(bins[i].X * bins[i].X + bins[i].Y * bins[i].Y); return value; } @@ -349,9 +338,6 @@ protected virtual void Dispose(bool disposing) cancelSource.Cancel(); cancelSource.Dispose(); points = Array.Empty(); - - fileCallbacks?.Dispose(); - fileCallbacks = null; } #endregion From d4fbc8ed8c5a05220ea7c9358a70e99e510c2ed3 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 10 Nov 2023 20:21:14 +0900 Subject: [PATCH 011/127] Reduce allocations in VideoDecoder --- osu.Framework/Graphics/Video/VideoDecoder.cs | 42 ++++++++++++-------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/osu.Framework/Graphics/Video/VideoDecoder.cs b/osu.Framework/Graphics/Video/VideoDecoder.cs index fecc4dccf4..29a0174730 100644 --- a/osu.Framework/Graphics/Video/VideoDecoder.cs +++ b/osu.Framework/Graphics/Video/VideoDecoder.cs @@ -406,6 +406,9 @@ internal void PrepareDecoding() Duration = stream->duration * timeBaseInSeconds * 1000.0; else Duration = formatContext->duration / (double)FFmpegFuncs.AV_TIME_BASE * 1000.0; + + packet = ffmpeg.av_packet_alloc(); + receiveFrame = ffmpeg.av_frame_alloc(); } internal void RecreateCodecContext() @@ -507,11 +510,11 @@ private bool prepareResampler() return ffmpeg.swr_is_initialized(swrContext) > 0; } + private AVPacket* packet; + private AVFrame* receiveFrame; + private void decodingLoop(CancellationToken cancellationToken) { - var packet = ffmpeg.av_packet_alloc(); - var receiveFrame = ffmpeg.av_frame_alloc(); - const int max_pending_frames = 3; try @@ -563,9 +566,6 @@ private void decodingLoop(CancellationToken cancellationToken) } finally { - ffmpeg.av_packet_free(&packet); - ffmpeg.av_frame_free(&receiveFrame); - if (State != DecoderState.Faulted) State = DecoderState.Stopped; } @@ -581,10 +581,8 @@ internal void DecodeNextAudioFrame(int iteration, out byte[] decodedAudio, bool return; } - var packet = ffmpeg.av_packet_alloc(); - var receiveFrame = ffmpeg.av_frame_alloc(); - - memoryStream = new MemoryStream(); + memoryStream ??= new MemoryStream(); + memoryStream.Position = 0; try { @@ -608,12 +606,10 @@ internal void DecodeNextAudioFrame(int iteration, out byte[] decodedAudio, bool } finally { - ffmpeg.av_packet_free(&packet); - ffmpeg.av_frame_free(&receiveFrame); - - decodedAudio = memoryStream.ToArray(); - memoryStream?.Dispose(); - memoryStream = null; + decodedAudio = new byte[memoryStream.Position]; + memoryStream.Position = 0; + int read = memoryStream.Read(decodedAudio, 0, decodedAudio.Length); + Array.Resize(ref decodedAudio, read); } } @@ -1097,6 +1093,18 @@ protected virtual void Dispose(bool disposing) void freeFFmpeg() { + if (packet != null) + { + fixed (AVPacket** ptr = &packet) + ffmpeg.av_packet_free(ptr); + } + + if (receiveFrame != null) + { + fixed (AVFrame** ptr = &receiveFrame) + ffmpeg.av_frame_free(ptr); + } + if (formatContext != null && inputOpened) { fixed (AVFormatContext** ptr = &formatContext) @@ -1136,6 +1144,8 @@ void freeFFmpeg() ffmpeg.swr_free(ptr); } + memoryStream?.Dispose(); + while (decodedFrames.TryDequeue(out var f)) { f.Texture.FlushUploads(); From 9ade55afeddaff23984ac0115d14575130abbfc1 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 10 Nov 2023 20:53:21 +0900 Subject: [PATCH 012/127] Fix a possible race condition in TrackSDL2 --- osu.Framework/Audio/Track/TrackSDL2.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/osu.Framework/Audio/Track/TrackSDL2.cs b/osu.Framework/Audio/Track/TrackSDL2.cs index 9900a0ba1e..f520f7ead6 100644 --- a/osu.Framework/Audio/Track/TrackSDL2.cs +++ b/osu.Framework/Audio/Track/TrackSDL2.cs @@ -215,10 +215,10 @@ protected override void Dispose(bool disposing) isRunning = false; (Mixer as SDL2AudioMixer)?.StreamFree(this); - decodeData?.Stream.Dispose(); - decodeData?.Stop(); + decodeData?.Stream.Dispose(); + lock (syncRoot) player.Dispose(); From 89a67aeb890418be89041097cd36e814cdfb52fd Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sat, 11 Nov 2023 17:21:45 +0900 Subject: [PATCH 013/127] Use the sample rate const in SDL2AudioMixer --- osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs b/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs index ff741d7495..d9d33cda68 100644 --- a/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs +++ b/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs @@ -206,7 +206,7 @@ public EffectBox(IEffectParameter param) { // allowing non-bqf to keep index of list if (param is BQFParameters bqfp) - BiQuadFilter = getFilter(44100, bqfp); + BiQuadFilter = getFilter(SDL2AudioManager.AUDIO_FREQ, bqfp); } } From 1a3c8c792d0cb802729f2133851f91df8a462c48 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sat, 11 Nov 2023 17:22:59 +0900 Subject: [PATCH 014/127] Add an option to dispose stream automatically in AudioDecoder --- osu.Framework/Audio/AudioDecoder.cs | 10 +++++++--- osu.Framework/Audio/BassAudioDecoder.cs | 10 +++++----- osu.Framework/Audio/FFmpegAudioDecoder.cs | 10 +++++----- osu.Framework/Audio/Track/TrackSDL2.cs | 2 -- osu.Framework/Audio/Track/Waveform.cs | 2 +- 5 files changed, 18 insertions(+), 16 deletions(-) diff --git a/osu.Framework/Audio/AudioDecoder.cs b/osu.Framework/Audio/AudioDecoder.cs index 668c01f919..927869484b 100644 --- a/osu.Framework/Audio/AudioDecoder.cs +++ b/osu.Framework/Audio/AudioDecoder.cs @@ -21,6 +21,7 @@ public abstract class AudioDecoderData internal readonly bool IsTrack; internal readonly ushort Format; internal readonly Stream Stream; + internal readonly bool AutoDisposeStream; internal readonly PassDataDelegate? Pass; internal readonly object? UserData; @@ -51,13 +52,14 @@ public long ByteLength internal volatile bool StopJob; internal volatile bool Loading; - protected AudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, PassDataDelegate? pass, object? userData) + protected AudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass, object? userData) { Rate = rate; Channels = channels; IsTrack = isTrack; Format = format; Stream = stream; + AutoDisposeStream = autoDisposeStream; Pass = pass; UserData = userData; } @@ -70,6 +72,8 @@ public void Stop() // Call this in lock internal virtual void Dispose() { + if (AutoDisposeStream) + Stream.Dispose(); } } @@ -123,7 +127,7 @@ public AudioDecoderData StartDecodingAsync(Stream stream, PassDataDelegate pass, decoderThread.Start(); } - AudioDecoderData data = CreateDecoderData(rate, channels, true, format, stream, pass, userData); + AudioDecoderData data = CreateDecoderData(rate, channels, true, format, stream, true, pass, userData); lock (jobs) jobs.AddFirst(data); @@ -205,7 +209,7 @@ private void loop(CancellationToken token) } } - public abstract AudioDecoderData CreateDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, PassDataDelegate? pass = null, object? userData = null); + public abstract AudioDecoderData CreateDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream = true, PassDataDelegate? pass = null, object? userData = null); protected abstract void LoadFromStreamInternal(AudioDecoderData job, out byte[] decoded); diff --git a/osu.Framework/Audio/BassAudioDecoder.cs b/osu.Framework/Audio/BassAudioDecoder.cs index 27a82edb99..40fdc53288 100644 --- a/osu.Framework/Audio/BassAudioDecoder.cs +++ b/osu.Framework/Audio/BassAudioDecoder.cs @@ -25,8 +25,8 @@ public class BassAudioDecoderData : AudioDecoderData internal FileCallbacks? Callbacks; internal SDL2AudioStream? Resampler; - public BassAudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, PassDataDelegate? pass = null, object? userData = null) - : base(rate, channels, isTrack, format, stream, pass, userData) + public BassAudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass, object? userData) + : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass, userData) { } @@ -109,7 +109,7 @@ protected override void LoadFromStreamInternal(AudioDecoderData decodeData, out int bufferLen = (int)(job.IsTrack ? Bass.ChannelSeconds2Bytes(job.DecodeStream, 1) : job.ByteLength); if (bufferLen <= 0) - bufferLen = 44100 * 2 * 1; + bufferLen = 44100 * 2 * 4 * 1; byte[] buffer = new byte[bufferLen]; int got = Bass.ChannelGetData(job.DecodeStream, buffer, bufferLen); @@ -152,7 +152,7 @@ protected override void LoadFromStreamInternal(AudioDecoderData decodeData, out } } - public override AudioDecoderData CreateDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, PassDataDelegate? pass = null, object? userData = null) - => new BassAudioDecoderData(rate, channels, isTrack, format, stream, pass, userData); + public override AudioDecoderData CreateDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream = true, PassDataDelegate? pass = null, object? userData = null) + => new BassAudioDecoderData(rate, channels, isTrack, format, stream, autoDisposeStream, pass, userData); } } diff --git a/osu.Framework/Audio/FFmpegAudioDecoder.cs b/osu.Framework/Audio/FFmpegAudioDecoder.cs index c4b2653b56..b10589e779 100644 --- a/osu.Framework/Audio/FFmpegAudioDecoder.cs +++ b/osu.Framework/Audio/FFmpegAudioDecoder.cs @@ -19,15 +19,15 @@ public class FFmpegAudioDecoderData : AudioDecoderData { internal VideoDecoder? FFmpeg; - public FFmpegAudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, PassDataDelegate? pass, object? userData) - : base(rate, channels, isTrack, format, stream, pass, userData) + public FFmpegAudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass, object? userData) + : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass, userData) { } internal override void Dispose() { - base.Dispose(); FFmpeg?.Dispose(); + base.Dispose(); } } @@ -58,7 +58,7 @@ protected override void LoadFromStreamInternal(AudioDecoderData decodeData, out decoded = audioData; } - public override AudioDecoderData CreateDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, PassDataDelegate? pass = null, object? userData = null) - => new FFmpegAudioDecoderData(rate, channels, isTrack, format, stream, pass, userData); + public override AudioDecoderData CreateDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream = true, PassDataDelegate? pass = null, object? userData = null) + => new FFmpegAudioDecoderData(rate, channels, isTrack, format, stream, autoDisposeStream, pass, userData); } } diff --git a/osu.Framework/Audio/Track/TrackSDL2.cs b/osu.Framework/Audio/Track/TrackSDL2.cs index f520f7ead6..2738ac5935 100644 --- a/osu.Framework/Audio/Track/TrackSDL2.cs +++ b/osu.Framework/Audio/Track/TrackSDL2.cs @@ -217,8 +217,6 @@ protected override void Dispose(bool disposing) decodeData?.Stop(); - decodeData?.Stream.Dispose(); - lock (syncRoot) player.Dispose(); diff --git a/osu.Framework/Audio/Track/Waveform.cs b/osu.Framework/Audio/Track/Waveform.cs index 9006c2b137..f7710cc108 100644 --- a/osu.Framework/Audio/Track/Waveform.cs +++ b/osu.Framework/Audio/Track/Waveform.cs @@ -81,7 +81,7 @@ public Waveform(Stream? data) AudioDecoder decoder = SDL2AudioManager.GetAudioDecoder(); // AudioDecoder will resample data into specified sample rate and channels (44100hz 2ch float) - AudioDecoder.AudioDecoderData decoderData = decoder.CreateDecoderData(sample_rate, channels, true, SDL2.SDL.AUDIO_F32, data); + AudioDecoder.AudioDecoderData decoderData = decoder.CreateDecoderData(sample_rate, channels, true, SDL2.SDL.AUDIO_F32, data, false); Complex[]? complexBuffer = null; From 24dfb68bc938cf35bc76dd53484a4ad4f85c9c69 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sat, 11 Nov 2023 17:23:24 +0900 Subject: [PATCH 015/127] Reset videoStream position in VideoDecoder --- osu.Framework/Graphics/Video/VideoDecoder.cs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/osu.Framework/Graphics/Video/VideoDecoder.cs b/osu.Framework/Graphics/Video/VideoDecoder.cs index 29a0174730..994933d079 100644 --- a/osu.Framework/Graphics/Video/VideoDecoder.cs +++ b/osu.Framework/Graphics/Video/VideoDecoder.cs @@ -372,6 +372,8 @@ private static long streamSeekCallbacks(void* opaque, long offset, int whence) // sets up libavformat state: creates the AVFormatContext, the frames, etc. to start decoding, but does not actually start the decodingLoop internal void PrepareDecoding() { + videoStream.Position = 0; + const int context_buffer_size = 4096; readPacketCallback = readPacket; seekCallback = streamSeekCallbacks; From 455af66aaf81c903d6d58aa4b8e55eb569367ccd Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sat, 11 Nov 2023 17:23:53 +0900 Subject: [PATCH 016/127] Create MemoryStream in VideoDecoder if audio --- osu.Framework/Graphics/Video/VideoDecoder.cs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/osu.Framework/Graphics/Video/VideoDecoder.cs b/osu.Framework/Graphics/Video/VideoDecoder.cs index 994933d079..827f81cf1e 100644 --- a/osu.Framework/Graphics/Video/VideoDecoder.cs +++ b/osu.Framework/Graphics/Video/VideoDecoder.cs @@ -193,6 +193,8 @@ public VideoDecoder(Stream audioStream, int rate, int channels, bool isFloat, in audioChannelLayout = ffmpeg.av_get_default_channel_layout(channels); audioFmt = AVSampleFormat.AV_SAMPLE_FMT_FLT; + memoryStream = new MemoryStream(); + if (isFloat) audioFmt = AVSampleFormat.AV_SAMPLE_FMT_FLT; else if (!signed && bits == 8) @@ -583,7 +585,6 @@ internal void DecodeNextAudioFrame(int iteration, out byte[] decodedAudio, bool return; } - memoryStream ??= new MemoryStream(); memoryStream.Position = 0; try @@ -1148,6 +1149,8 @@ void freeFFmpeg() memoryStream?.Dispose(); + memoryStream = null; + while (decodedFrames.TryDequeue(out var f)) { f.Texture.FlushUploads(); From a7f9bee2ad25ddaea71e4bcf8cf9ec0b95172055 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Wed, 15 Nov 2023 22:26:47 +0900 Subject: [PATCH 017/127] Let AudioDecoder dispose the stream --- osu.Framework/Audio/Track/TrackSDL2.cs | 1 - 1 file changed, 1 deletion(-) diff --git a/osu.Framework/Audio/Track/TrackSDL2.cs b/osu.Framework/Audio/Track/TrackSDL2.cs index 2738ac5935..82d23583f5 100644 --- a/osu.Framework/Audio/Track/TrackSDL2.cs +++ b/osu.Framework/Audio/Track/TrackSDL2.cs @@ -65,7 +65,6 @@ internal void AddToQueue(byte[] audio, object? userdata, AudioDecoder.AudioDecod if (done) { player.DonePutting(); - data.Stream.Dispose(); decodeData = null; } } From 3ee5aec837dc34b6d4f050c992b115870f3c3cc6 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Wed, 15 Nov 2023 22:36:46 +0900 Subject: [PATCH 018/127] Flush when seeking in TrackSDL2Player --- osu.Framework/Audio/ResamplingPlayer.cs | 5 +++++ osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs | 7 ++++++- osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs | 5 ++--- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/osu.Framework/Audio/ResamplingPlayer.cs b/osu.Framework/Audio/ResamplingPlayer.cs index c20f19b79a..4e4adee3b0 100644 --- a/osu.Framework/Audio/ResamplingPlayer.cs +++ b/osu.Framework/Audio/ResamplingPlayer.cs @@ -74,6 +74,11 @@ protected virtual double GetProcessingLatency() return resampler.GetCurrentLatency() * 1000.0d; } + public virtual void Flush() + { + resampler?.Reset(); + } + /// /// Returns rate adjusted audio samples. It calls a parent method if is 1. /// diff --git a/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs index 0f713be713..4164538b52 100644 --- a/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs @@ -147,7 +147,6 @@ public override int GetRemainingSamples(float[] ret) public override void Reset(bool resetPosition = true) { base.Reset(resetPosition); - soundTouch?.Flush(); doneFilling = false; donePlaying = false; } @@ -162,6 +161,12 @@ protected int GetTempoLatencyInSamples() protected override double GetProcessingLatency() => base.GetProcessingLatency() + (double)GetTempoLatencyInSamples() / SrcRate * 1000.0d; + public override void Flush() + { + base.Flush(); + soundTouch?.Flush(); + } + public override void Seek(double seek) { base.Seek(seek); diff --git a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs index cc71f44ebb..b914c29365 100644 --- a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs @@ -174,9 +174,7 @@ public virtual void Reset(bool resetPosition = true) if (resetPosition) { SaveSeek = 0; - - if (AudioData != null) - AudioData.Position = 0; + Seek(0); } } @@ -213,6 +211,7 @@ public virtual void Seek(double seek) { SaveSeek = 0; AudioData.Position = Math.Clamp(tmp, 0, AudioDataLength - 1); + Flush(); } } From 03a678718398db1aaf4599e3f7941e291d76ac71 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Wed, 15 Nov 2023 23:32:58 +0900 Subject: [PATCH 019/127] Let TempoSDL2Player use offset --- osu.Framework/Audio/ResamplingPlayer.cs | 14 +++--- .../Audio/Track/TempoSDL2AudioPlayer.cs | 47 ++++++++++++++----- 2 files changed, 43 insertions(+), 18 deletions(-) diff --git a/osu.Framework/Audio/ResamplingPlayer.cs b/osu.Framework/Audio/ResamplingPlayer.cs index 4e4adee3b0..0e87ef9a4d 100644 --- a/osu.Framework/Audio/ResamplingPlayer.cs +++ b/osu.Framework/Audio/ResamplingPlayer.cs @@ -84,21 +84,23 @@ public virtual void Flush() /// /// An array to put samples in /// The number of samples put into the array - public virtual int GetRemainingSamples(float[] data) + public virtual int GetRemainingSamples(float[] data) => GetRemainingSamples(data, 0, data.Length); + + public virtual int GetRemainingSamples(float[] data, int offset, int needed) { if (RelativeRate == 0) return 0; if (resampler == null || RelativeRate == 1) - return GetRemainingRawFloats(data, 0, data.Length); + return GetRemainingRawFloats(data, offset, needed); - int requested = data.Length / SrcChannels; - int needed = resampler.ResamplePrepare(requested, SrcChannels, out float[] inBuffer, out int inBufferOffset); - int rawGot = GetRemainingRawFloats(inBuffer, inBufferOffset, needed * SrcChannels); + int requested = needed / SrcChannels; + int neededFrames = resampler.ResamplePrepare(requested, SrcChannels, out float[] inBuffer, out int inBufferOffset); + int rawGot = GetRemainingRawFloats(inBuffer, inBufferOffset, neededFrames * SrcChannels); if (rawGot > 0) { - int got = resampler.ResampleOut(data, 0, rawGot / SrcChannels, requested, SrcChannels); + int got = resampler.ResampleOut(data, offset, rawGot / SrcChannels, requested, SrcChannels); return got * SrcChannels; } diff --git a/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs index 4164538b52..609180ca1d 100644 --- a/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs @@ -2,6 +2,7 @@ // See the LICENCE file in the repository root for full licence text. using System; +using System.Buffers; using SoundTouch; namespace osu.Framework.Audio.Track @@ -54,12 +55,22 @@ private void fillSamples(int samples) while (!base.Done && soundTouch.AvailableSamples < samples) { int getSamples = (int)Math.Ceiling((samples - soundTouch.AvailableSamples) * Tempo) * SrcChannels; - float[] src = new float[getSamples]; - getSamples = base.GetRemainingSamples(src); + float[] src = ArrayPool.Shared.Rent(getSamples); + + try + { + getSamples = base.GetRemainingRawFloats(src, 0, getSamples); + + if (getSamples > 0) + soundTouch.PutSamples(src, getSamples / SrcChannels); + } + finally + { + ArrayPool.Shared.Return(src); + } + if (getSamples <= 0) break; - - soundTouch.PutSamples(src, getSamples / SrcChannels); } if (!doneFilling && base.Done) @@ -121,27 +132,39 @@ private void setTempo(double tempo) /// /// An array to put samples in /// The number of samples put - public override int GetRemainingSamples(float[] ret) + protected override int GetRemainingRawFloats(float[] data, int offset, int needed) { if (soundTouch == null) - return base.GetRemainingSamples(ret); + return base.GetRemainingRawFloats(data, offset, needed); - if (RelativeRate == 0) - return 0; - - int expected = ret.Length / SrcChannels; + int expected = needed / SrcChannels; if (!doneFilling && soundTouch.AvailableSamples < expected) { fillSamples(expected); } - int got = soundTouch.ReceiveSamples(ret, expected); + float[] ret = offset == 0 ? data : ArrayPool.Shared.Rent(needed); + + int got = 0; + + try + { + got = soundTouch.ReceiveSamples(ret, expected) * SrcChannels; + + if (offset != 0 && got > 0) + Buffer.BlockCopy(ret, 0, data, offset * 4, got * 4); + } + finally + { + if (offset != 0) + ArrayPool.Shared.Return(ret); + } if (got == 0 && doneFilling) donePlaying = true; - return got * SrcChannels; + return got; } public override void Reset(bool resetPosition = true) From 9ec8d466021f892660c2b838621a79191eaf450a Mon Sep 17 00:00:00 2001 From: hwsmm Date: Mon, 18 Dec 2023 21:06:14 +0900 Subject: [PATCH 020/127] Revert "Let TempoSDL2Player use offset" This reverts commit 03a678718398db1aaf4599e3f7941e291d76ac71. --- osu.Framework/Audio/ResamplingPlayer.cs | 14 +++--- .../Audio/Track/TempoSDL2AudioPlayer.cs | 47 +++++-------------- 2 files changed, 18 insertions(+), 43 deletions(-) diff --git a/osu.Framework/Audio/ResamplingPlayer.cs b/osu.Framework/Audio/ResamplingPlayer.cs index 0e87ef9a4d..4e4adee3b0 100644 --- a/osu.Framework/Audio/ResamplingPlayer.cs +++ b/osu.Framework/Audio/ResamplingPlayer.cs @@ -84,23 +84,21 @@ public virtual void Flush() /// /// An array to put samples in /// The number of samples put into the array - public virtual int GetRemainingSamples(float[] data) => GetRemainingSamples(data, 0, data.Length); - - public virtual int GetRemainingSamples(float[] data, int offset, int needed) + public virtual int GetRemainingSamples(float[] data) { if (RelativeRate == 0) return 0; if (resampler == null || RelativeRate == 1) - return GetRemainingRawFloats(data, offset, needed); + return GetRemainingRawFloats(data, 0, data.Length); - int requested = needed / SrcChannels; - int neededFrames = resampler.ResamplePrepare(requested, SrcChannels, out float[] inBuffer, out int inBufferOffset); - int rawGot = GetRemainingRawFloats(inBuffer, inBufferOffset, neededFrames * SrcChannels); + int requested = data.Length / SrcChannels; + int needed = resampler.ResamplePrepare(requested, SrcChannels, out float[] inBuffer, out int inBufferOffset); + int rawGot = GetRemainingRawFloats(inBuffer, inBufferOffset, needed * SrcChannels); if (rawGot > 0) { - int got = resampler.ResampleOut(data, offset, rawGot / SrcChannels, requested, SrcChannels); + int got = resampler.ResampleOut(data, 0, rawGot / SrcChannels, requested, SrcChannels); return got * SrcChannels; } diff --git a/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs index 609180ca1d..4164538b52 100644 --- a/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs @@ -2,7 +2,6 @@ // See the LICENCE file in the repository root for full licence text. using System; -using System.Buffers; using SoundTouch; namespace osu.Framework.Audio.Track @@ -55,22 +54,12 @@ private void fillSamples(int samples) while (!base.Done && soundTouch.AvailableSamples < samples) { int getSamples = (int)Math.Ceiling((samples - soundTouch.AvailableSamples) * Tempo) * SrcChannels; - float[] src = ArrayPool.Shared.Rent(getSamples); - - try - { - getSamples = base.GetRemainingRawFloats(src, 0, getSamples); - - if (getSamples > 0) - soundTouch.PutSamples(src, getSamples / SrcChannels); - } - finally - { - ArrayPool.Shared.Return(src); - } - + float[] src = new float[getSamples]; + getSamples = base.GetRemainingSamples(src); if (getSamples <= 0) break; + + soundTouch.PutSamples(src, getSamples / SrcChannels); } if (!doneFilling && base.Done) @@ -132,39 +121,27 @@ private void setTempo(double tempo) /// /// An array to put samples in /// The number of samples put - protected override int GetRemainingRawFloats(float[] data, int offset, int needed) + public override int GetRemainingSamples(float[] ret) { if (soundTouch == null) - return base.GetRemainingRawFloats(data, offset, needed); + return base.GetRemainingSamples(ret); - int expected = needed / SrcChannels; + if (RelativeRate == 0) + return 0; + + int expected = ret.Length / SrcChannels; if (!doneFilling && soundTouch.AvailableSamples < expected) { fillSamples(expected); } - float[] ret = offset == 0 ? data : ArrayPool.Shared.Rent(needed); - - int got = 0; - - try - { - got = soundTouch.ReceiveSamples(ret, expected) * SrcChannels; - - if (offset != 0 && got > 0) - Buffer.BlockCopy(ret, 0, data, offset * 4, got * 4); - } - finally - { - if (offset != 0) - ArrayPool.Shared.Return(ret); - } + int got = soundTouch.ReceiveSamples(ret, expected); if (got == 0 && doneFilling) donePlaying = true; - return got; + return got * SrcChannels; } public override void Reset(bool resetPosition = true) From 41b65fada112123a70d6cc8700d7c51ef67d5c97 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Mon, 18 Dec 2023 21:54:18 +0900 Subject: [PATCH 021/127] Remove unused using --- osu.Framework.Tests/Audio/BassTestComponents.cs | 1 - 1 file changed, 1 deletion(-) diff --git a/osu.Framework.Tests/Audio/BassTestComponents.cs b/osu.Framework.Tests/Audio/BassTestComponents.cs index b7a7ff1c70..04fed5a07e 100644 --- a/osu.Framework.Tests/Audio/BassTestComponents.cs +++ b/osu.Framework.Tests/Audio/BassTestComponents.cs @@ -7,7 +7,6 @@ using osu.Framework.Audio.Mixing.Bass; using osu.Framework.Audio.Sample; using osu.Framework.Audio.Track; -using osu.Framework.Development; using osu.Framework.Extensions; using osu.Framework.IO.Stores; using osu.Framework.Threading; From 2535ec4f3a0059238ced2b3efa56376ec054acd8 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Mon, 18 Dec 2023 21:54:56 +0900 Subject: [PATCH 022/127] Fix wrong audio byteLength assumption --- osu.Framework/Audio/BassAudioDecoder.cs | 4 ++++ osu.Framework/Audio/FFmpegAudioDecoder.cs | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/osu.Framework/Audio/BassAudioDecoder.cs b/osu.Framework/Audio/BassAudioDecoder.cs index 40fdc53288..a5e720b831 100644 --- a/osu.Framework/Audio/BassAudioDecoder.cs +++ b/osu.Framework/Audio/BassAudioDecoder.cs @@ -95,7 +95,11 @@ protected override void LoadFromStreamInternal(AudioDecoderData decodeData, out } if (info.Channels != job.Channels || srcformat != job.Format || info.Frequency != job.Rate) + { job.Resampler = new SDL2AudioStream(srcformat, (byte)info.Channels, info.Frequency, job.Format, (byte)job.Channels, job.Rate); + job.ByteLength = (long)Math.Ceiling(job.ByteLength / (double)info.Frequency / SDL.SDL_AUDIO_BITSIZE(srcformat) / info.Channels + * job.Rate * SDL.SDL_AUDIO_BITSIZE(job.Format) * job.Channels); + } } else { diff --git a/osu.Framework/Audio/FFmpegAudioDecoder.cs b/osu.Framework/Audio/FFmpegAudioDecoder.cs index b10589e779..22c301f4a3 100644 --- a/osu.Framework/Audio/FFmpegAudioDecoder.cs +++ b/osu.Framework/Audio/FFmpegAudioDecoder.cs @@ -45,7 +45,7 @@ protected override void LoadFromStreamInternal(AudioDecoderData decodeData, out job.Bitrate = (int)job.FFmpeg.Bitrate; job.Length = job.FFmpeg.Duration; - job.ByteLength = (long)Math.Ceiling(job.FFmpeg.Duration / 1000.0d * job.Rate) * job.Channels * SDL.SDL_AUDIO_BITSIZE(job.Format); // FIXME + job.ByteLength = (long)Math.Ceiling(job.FFmpeg.Duration / 1000.0d) * job.Rate * job.Channels * (SDL.SDL_AUDIO_BITSIZE(job.Format) / 8); // FIXME job.Loading = true; } From 028c0ffef9f09b60970aa6b93c2ac40b99a507a2 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Tue, 19 Dec 2023 22:27:53 +0900 Subject: [PATCH 023/127] Use float for Balance and improve thread safety --- .../Audio/Mixing/SDL2/ISDL2AudioChannel.cs | 2 +- .../Audio/Mixing/SDL2/SDL2AudioMixer.cs | 9 ++-- .../Audio/Sample/SampleChannelSDL2.cs | 31 +++++++++++--- osu.Framework/Audio/Track/TrackSDL2.cs | 42 +++++++++++-------- 4 files changed, 55 insertions(+), 29 deletions(-) diff --git a/osu.Framework/Audio/Mixing/SDL2/ISDL2AudioChannel.cs b/osu.Framework/Audio/Mixing/SDL2/ISDL2AudioChannel.cs index 1e58d93b34..5d82e23dc9 100644 --- a/osu.Framework/Audio/Mixing/SDL2/ISDL2AudioChannel.cs +++ b/osu.Framework/Audio/Mixing/SDL2/ISDL2AudioChannel.cs @@ -28,6 +28,6 @@ internal interface ISDL2AudioChannel : IAudioChannel /// /// Mixer uses this to adjust channel balance. Value should be within -1.0 and 1.0 /// - double Balance { get; } + float Balance { get; } } } diff --git a/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs b/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs index d9d33cda68..11bda979e7 100644 --- a/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs +++ b/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs @@ -110,15 +110,16 @@ public void MixChannelsInto(float[] data) else if (channel.Playing) { int size = channel.GetRemainingSamples(ret); - float left = 1; - float right = 1; if (size > 0) { + float left = 1; + float right = 1; + if (channel.Balance < 0) - right += (float)channel.Balance; + right += channel.Balance; else if (channel.Balance > 0) - left -= (float)channel.Balance; + left -= channel.Balance; right *= channel.Volume; left *= channel.Volume; diff --git a/osu.Framework/Audio/Sample/SampleChannelSDL2.cs b/osu.Framework/Audio/Sample/SampleChannelSDL2.cs index 9f2f8cda2f..8432d60ad2 100644 --- a/osu.Framework/Audio/Sample/SampleChannelSDL2.cs +++ b/osu.Framework/Audio/Sample/SampleChannelSDL2.cs @@ -1,6 +1,7 @@ // Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. // See the LICENCE file in the repository root for full licence text. +using System.Threading; using osu.Framework.Audio.Mixing.SDL2; namespace osu.Framework.Audio.Sample @@ -12,6 +13,9 @@ internal sealed class SampleChannelSDL2 : SampleChannel, ISDL2AudioChannel private volatile bool playing; public override bool Playing => playing; + private volatile bool looping; + public override bool Looping { get => looping; set => looping = value; } + public SampleChannelSDL2(SampleSDL2 sample, SampleSDL2AudioPlayer player) : base(sample.Name) { @@ -36,11 +40,11 @@ public override void Stop() int ISDL2AudioChannel.GetRemainingSamples(float[] data) { - if (player.RelativeRate != AggregateFrequency.Value) - player.RelativeRate = AggregateFrequency.Value; + if (player.RelativeRate != rate) + player.RelativeRate = rate; - if (player.Loop != Looping) - player.Loop = Looping; + if (player.Loop != looping) + player.Loop = looping; if (!started) { @@ -59,11 +63,26 @@ int ISDL2AudioChannel.GetRemainingSamples(float[] data) return ret; } - float ISDL2AudioChannel.Volume => (float)AggregateVolume.Value; + private volatile float volume = 1.0f; + private volatile float balance; + + private double rate = 1.0f; + + internal override void OnStateChanged() + { + base.OnStateChanged(); + + volume = (float)AggregateVolume.Value; + balance = (float)AggregateBalance.Value; + + Interlocked.Exchange(ref rate, AggregateFrequency.Value); + } + + float ISDL2AudioChannel.Volume => volume; bool ISDL2AudioChannel.Playing => playing; - double ISDL2AudioChannel.Balance => AggregateBalance.Value; + float ISDL2AudioChannel.Balance => balance; ~SampleChannelSDL2() { diff --git a/osu.Framework/Audio/Track/TrackSDL2.cs b/osu.Framework/Audio/Track/TrackSDL2.cs index 82d23583f5..db3500a125 100644 --- a/osu.Framework/Audio/Track/TrackSDL2.cs +++ b/osu.Framework/Audio/Track/TrackSDL2.cs @@ -106,22 +106,6 @@ protected override void UpdateState() } } - internal override void OnStateChanged() - { - base.OnStateChanged(); - - lock (syncRoot) - { - if (!player.ReversePlayback && AggregateFrequency.Value < 0) - player.ReversePlayback = true; - else if (player.ReversePlayback && AggregateFrequency.Value >= 0) - player.ReversePlayback = false; - - player.RelativeRate = Math.Abs(AggregateFrequency.Value); - player.Tempo = AggregateTempo.Value; - } - } - public override bool Seek(double seek) => SeekAsync(seek).GetResultSafely(); public override async Task SeekAsync(double seek) @@ -195,11 +179,33 @@ int ISDL2AudioChannel.GetRemainingSamples(float[] data) return ret; } + private volatile float volume = 1.0f; + private volatile float balance; + + internal override void OnStateChanged() + { + base.OnStateChanged(); + + lock (syncRoot) + { + if (!player.ReversePlayback && AggregateFrequency.Value < 0) + player.ReversePlayback = true; + else if (player.ReversePlayback && AggregateFrequency.Value >= 0) + player.ReversePlayback = false; + + player.RelativeRate = Math.Abs(AggregateFrequency.Value); + player.Tempo = AggregateTempo.Value; + } + + volume = (float)AggregateVolume.Value; + balance = (float)AggregateBalance.Value; + } + bool ISDL2AudioChannel.Playing => isRunning && !player.Done; - float ISDL2AudioChannel.Volume => (float)AggregateVolume.Value; + float ISDL2AudioChannel.Volume => volume; - double ISDL2AudioChannel.Balance => AggregateBalance.Value; + float ISDL2AudioChannel.Balance => balance; ~TrackSDL2() { From bd7c7c9a6063ed696bfa957d27ddb0ddb0c05281 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Tue, 19 Dec 2023 22:32:39 +0900 Subject: [PATCH 024/127] Reduce allocation when passing decoded audio --- osu.Framework/Audio/AudioDecoder.cs | 36 ++++++++++++------- osu.Framework/Audio/BassAudioDecoder.cs | 36 ++++++++++++------- osu.Framework/Audio/FFmpegAudioDecoder.cs | 14 +++++--- osu.Framework/Audio/Track/TrackSDL2.cs | 4 +-- .../Audio/Track/TrackSDL2AudioPlayer.cs | 4 +-- osu.Framework/Audio/Track/Waveform.cs | 6 ++-- osu.Framework/Graphics/Video/VideoDecoder.cs | 18 +++++----- 7 files changed, 73 insertions(+), 45 deletions(-) diff --git a/osu.Framework/Audio/AudioDecoder.cs b/osu.Framework/Audio/AudioDecoder.cs index 927869484b..d36eacd1bf 100644 --- a/osu.Framework/Audio/AudioDecoder.cs +++ b/osu.Framework/Audio/AudioDecoder.cs @@ -70,7 +70,7 @@ public void Stop() } // Call this in lock - internal virtual void Dispose() + internal virtual void Free() { if (AutoDisposeStream) Stream.Dispose(); @@ -85,13 +85,14 @@ internal virtual void Dispose() /// Decoded audio data /// /// - public delegate void PassDataDelegate(byte[] data, object? userdata, AudioDecoderData decoderData, bool done); + public delegate void PassDataDelegate(byte[] data, int length, object? userdata, AudioDecoderData decoderData, bool done); private readonly int rate; private readonly int channels; private readonly ushort format; private Thread? decoderThread; + private AutoResetEvent? decoderWaitHandle; /// /// Set up configuration and start a decoding thread. @@ -119,6 +120,8 @@ public AudioDecoderData StartDecodingAsync(Stream stream, PassDataDelegate pass, { if (decoderThread == null) { + decoderWaitHandle = new AutoResetEvent(false); + decoderThread = new Thread(() => loop(tokenSource.Token)) { IsBackground = true @@ -132,6 +135,8 @@ public AudioDecoderData StartDecodingAsync(Stream stream, PassDataDelegate pass, lock (jobs) jobs.AddFirst(data); + decoderWaitHandle?.Set(); + return data; } @@ -158,8 +163,8 @@ public byte[] DecodeAudio(int freq, int channels, ushort format, Stream stream) while (data.Loading) { - LoadFromStream(data, out decoded); - memoryStream.Write(decoded); + int read = LoadFromStream(data, out decoded); + memoryStream.Write(decoded, 0, read); } return memoryStream.ToArray(); @@ -187,13 +192,13 @@ private void loop(CancellationToken token) if (data.StopJob) { - data.Dispose(); + data.Free(); jobs.Remove(node); } else { - LoadFromStream(data, out byte[] decoded); - data.Pass?.Invoke(decoded, data.UserData, data, !data.Loading); + int read = LoadFromStream(data, out byte[] decoded); + data.Pass?.Invoke(decoded, read, data.UserData, data, !data.Loading); } if (!data.Loading) @@ -205,24 +210,24 @@ private void loop(CancellationToken token) } if (jobCount <= 0) - Thread.Sleep(50); + decoderWaitHandle?.WaitOne(); } } public abstract AudioDecoderData CreateDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream = true, PassDataDelegate? pass = null, object? userData = null); - protected abstract void LoadFromStreamInternal(AudioDecoderData job, out byte[] decoded); + protected abstract int LoadFromStreamInternal(AudioDecoderData job, out byte[] decoded); /// /// Decodes and resamples audio from job.Stream, and pass it to decoded. /// /// Decode data /// Decoded audio - public void LoadFromStream(AudioDecoderData job, out byte[] decoded) + public int LoadFromStream(AudioDecoderData job, out byte[] decoded) { try { - LoadFromStreamInternal(job, out decoded); + return LoadFromStreamInternal(job, out decoded); } catch (Exception e) { @@ -233,8 +238,10 @@ public void LoadFromStream(AudioDecoderData job, out byte[] decoded) finally { if (!job.Loading) - job.Dispose(); + job.Free(); } + + return -1; } private bool disposedValue; @@ -246,15 +253,18 @@ protected virtual void Dispose(bool disposing) if (disposing) { tokenSource.Cancel(); + decoderWaitHandle?.Set(); + tokenSource.Dispose(); decoderThread?.Join(); + decoderWaitHandle?.Dispose(); } lock (jobs) { foreach (var job in jobs) { - job.Dispose(); + job.Free(); } jobs.Clear(); diff --git a/osu.Framework/Audio/BassAudioDecoder.cs b/osu.Framework/Audio/BassAudioDecoder.cs index a5e720b831..a51fc7c668 100644 --- a/osu.Framework/Audio/BassAudioDecoder.cs +++ b/osu.Framework/Audio/BassAudioDecoder.cs @@ -25,12 +25,15 @@ public class BassAudioDecoderData : AudioDecoderData internal FileCallbacks? Callbacks; internal SDL2AudioStream? Resampler; + internal byte[]? DecodeData; + internal byte[]? ResampleData; + public BassAudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass, object? userData) : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass, userData) { } - internal override void Dispose() + internal override void Free() { if (DecodeStream != 0) { @@ -41,13 +44,16 @@ internal override void Dispose() Resampler?.Dispose(); Callbacks?.Dispose(); - base.Dispose(); + DecodeData = null; + ResampleData = null; + + base.Free(); } } private static readonly object bass_sync_lock = new object(); - protected override void LoadFromStreamInternal(AudioDecoderData decodeData, out byte[] decoded) + protected override int LoadFromStreamInternal(AudioDecoderData decodeData, out byte[] decoded) { if (decodeData is not BassAudioDecoderData job) throw new ArgumentException("Provide proper data"); @@ -115,8 +121,12 @@ protected override void LoadFromStreamInternal(AudioDecoderData decodeData, out if (bufferLen <= 0) bufferLen = 44100 * 2 * 4 * 1; - byte[] buffer = new byte[bufferLen]; - int got = Bass.ChannelGetData(job.DecodeStream, buffer, bufferLen); + if (job.DecodeData == null || job.DecodeData.Length < bufferLen) + { + job.DecodeData = new byte[bufferLen]; + } + + int got = Bass.ChannelGetData(job.DecodeStream, job.DecodeData, bufferLen); if (got == -1) { @@ -131,27 +141,27 @@ protected override void LoadFromStreamInternal(AudioDecoderData decodeData, out if (job.Resampler == null) { - if (got <= 0) buffer = Array.Empty(); - else if (got != bufferLen) Array.Resize(ref buffer, got); - - decoded = buffer; + decoded = job.DecodeData; + return Math.Max(0, got); } else { if (got > 0) - job.Resampler.Put(buffer, got); + job.Resampler.Put(job.DecodeData, got); if (!job.Loading) job.Resampler.Flush(); int avail = job.Resampler.GetPendingBytes(); - byte[] resampled = avail > 0 ? new byte[avail] : Array.Empty(); + if (job.ResampleData == null || job.ResampleData.Length < avail) + job.ResampleData = new byte[avail]; if (avail > 0) - job.Resampler.Get(resampled, avail); + job.Resampler.Get(job.ResampleData, avail); - decoded = resampled; + decoded = job.ResampleData; + return avail; } } } diff --git a/osu.Framework/Audio/FFmpegAudioDecoder.cs b/osu.Framework/Audio/FFmpegAudioDecoder.cs index 22c301f4a3..c4ffeeb07a 100644 --- a/osu.Framework/Audio/FFmpegAudioDecoder.cs +++ b/osu.Framework/Audio/FFmpegAudioDecoder.cs @@ -18,20 +18,23 @@ public FFmpegAudioDecoder(int rate, int channels, ushort format) public class FFmpegAudioDecoderData : AudioDecoderData { internal VideoDecoder? FFmpeg; + internal byte[]? DecodeData; public FFmpegAudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass, object? userData) : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass, userData) { } - internal override void Dispose() + internal override void Free() { + DecodeData = null; + FFmpeg?.Dispose(); - base.Dispose(); + base.Free(); } } - protected override void LoadFromStreamInternal(AudioDecoderData decodeData, out byte[] decoded) + protected override int LoadFromStreamInternal(AudioDecoderData decodeData, out byte[] decoded) { if (decodeData is not FFmpegAudioDecoderData job) throw new ArgumentException("Provide proper data"); @@ -50,12 +53,13 @@ protected override void LoadFromStreamInternal(AudioDecoderData decodeData, out job.Loading = true; } - job.FFmpeg.DecodeNextAudioFrame(32, out byte[] audioData, !job.IsTrack); + int got = job.FFmpeg.DecodeNextAudioFrame(32, ref job.DecodeData, !job.IsTrack); if (job.FFmpeg.State != VideoDecoder.DecoderState.Running) job.Loading = false; - decoded = audioData; + decoded = job.DecodeData; + return got; } public override AudioDecoderData CreateDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream = true, PassDataDelegate? pass = null, object? userData = null) diff --git a/osu.Framework/Audio/Track/TrackSDL2.cs b/osu.Framework/Audio/Track/TrackSDL2.cs index db3500a125..c23f1983ad 100644 --- a/osu.Framework/Audio/Track/TrackSDL2.cs +++ b/osu.Framework/Audio/Track/TrackSDL2.cs @@ -48,7 +48,7 @@ public TrackSDL2(string name, int rate, byte channels, int samples) private AudioDecoder.AudioDecoderData? decodeData; - internal void AddToQueue(byte[] audio, object? userdata, AudioDecoder.AudioDecoderData data, bool done) + internal void AddToQueue(byte[] audio, int length, object? userdata, AudioDecoder.AudioDecoderData data, bool done) { if (IsDisposed) return; @@ -60,7 +60,7 @@ internal void AddToQueue(byte[] audio, object? userdata, AudioDecoder.AudioDecod if (!player.IsLoading) player.PrepareStream(data.ByteLength); - player.PutSamplesInStream(audio); + player.PutSamplesInStream(audio, length); if (done) { diff --git a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs index b914c29365..8f18fcd5bb 100644 --- a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs @@ -73,7 +73,7 @@ internal void PrepareStream(long byteLength) isLoading = true; } - internal void PutSamplesInStream(byte[] next) + internal void PutSamplesInStream(byte[] next, int length) { if (disposedValue) return; @@ -83,7 +83,7 @@ internal void PutSamplesInStream(byte[] next) long save = AudioData.Position; AudioData.Position = AudioData.Length; - AudioData.Write(next); + AudioData.Write(next, 0, length); AudioData.Position = save; } diff --git a/osu.Framework/Audio/Track/Waveform.cs b/osu.Framework/Audio/Track/Waveform.cs index f7710cc108..1d88abbbf7 100644 --- a/osu.Framework/Audio/Track/Waveform.cs +++ b/osu.Framework/Audio/Track/Waveform.cs @@ -107,7 +107,7 @@ public Waveform(Stream? data) do { - decoder.LoadFromStream(decoderData, out byte[] currentBytes); + int read = decoder.LoadFromStream(decoderData, out byte[] currentBytes); int sampleIndex = 0; unsafe @@ -115,7 +115,7 @@ public Waveform(Stream? data) fixed (void* ptr = currentBytes) { float* currentFloats = (float*)ptr; - int currentFloatsLength = currentBytes.Length / bytes_per_sample; + int currentFloatsLength = read / bytes_per_sample; while (sampleIndex < currentFloatsLength) { @@ -177,6 +177,8 @@ public Waveform(Stream? data) { if (complexBuffer != null) ArrayPool.Shared.Return(complexBuffer); + + decoderData.Free(); } }, cancelSource.Token); } diff --git a/osu.Framework/Graphics/Video/VideoDecoder.cs b/osu.Framework/Graphics/Video/VideoDecoder.cs index 36fca58a0b..e2af54ef53 100644 --- a/osu.Framework/Graphics/Video/VideoDecoder.cs +++ b/osu.Framework/Graphics/Video/VideoDecoder.cs @@ -577,12 +577,12 @@ private void decodingLoop(CancellationToken cancellationToken) private MemoryStream memoryStream; - internal void DecodeNextAudioFrame(int iteration, out byte[] decodedAudio, bool decodeUntilEnd = false) + internal int DecodeNextAudioFrame(int iteration, ref byte[] decodedAudio, bool decodeUntilEnd = false) { if (!audio) { decodedAudio = Array.Empty(); - return; + return 0; } memoryStream.Position = 0; @@ -606,14 +606,16 @@ internal void DecodeNextAudioFrame(int iteration, out byte[] decodedAudio, bool { Logger.Error(e, "VideoDecoder faulted while decoding audio"); State = DecoderState.Faulted; + return 0; } - finally - { + + if (decodedAudio == null || decodedAudio.Length < memoryStream.Position) decodedAudio = new byte[memoryStream.Position]; - memoryStream.Position = 0; - int read = memoryStream.Read(decodedAudio, 0, decodedAudio.Length); - Array.Resize(ref decodedAudio, read); - } + + int pos = (int)memoryStream.Position; + + memoryStream.Position = 0; + return memoryStream.Read(decodedAudio, 0, pos); } private void decodeNextFrame(AVPacket* packet, AVFrame* receiveFrame) From 9f2eff4adb5fe7de8d5411a0f6186705e33e2aa0 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Thu, 21 Dec 2023 17:31:40 +0900 Subject: [PATCH 025/127] Refactor AudioDecoder --- ...AudioDecoder.cs => AudioDecoderManager.cs} | 166 +++++++----------- osu.Framework/Audio/BassAudioDecoder.cs | 119 ++++++------- osu.Framework/Audio/FFmpegAudioDecoder.cs | 59 +++---- osu.Framework/Audio/SDL2AudioManager.cs | 15 +- .../Audio/Sample/SampleSDL2Factory.cs | 2 +- osu.Framework/Audio/Track/TrackSDL2.cs | 20 ++- osu.Framework/Audio/Track/Waveform.cs | 18 +- 7 files changed, 165 insertions(+), 234 deletions(-) rename osu.Framework/Audio/{AudioDecoder.cs => AudioDecoderManager.cs} (50%) diff --git a/osu.Framework/Audio/AudioDecoder.cs b/osu.Framework/Audio/AudioDecoderManager.cs similarity index 50% rename from osu.Framework/Audio/AudioDecoder.cs rename to osu.Framework/Audio/AudioDecoderManager.cs index d36eacd1bf..b135c9d4e7 100644 --- a/osu.Framework/Audio/AudioDecoder.cs +++ b/osu.Framework/Audio/AudioDecoderManager.cs @@ -12,18 +12,17 @@ namespace osu.Framework.Audio /// /// Decodes audio from , and convert it to appropriate format. /// - public abstract class AudioDecoder : IDisposable + public class AudioDecoderManager : IDisposable { - public abstract class AudioDecoderData + public abstract class AudioDecoder { - internal readonly int Rate; - internal readonly int Channels; - internal readonly bool IsTrack; - internal readonly ushort Format; - internal readonly Stream Stream; - internal readonly bool AutoDisposeStream; - internal readonly PassDataDelegate? Pass; - internal readonly object? UserData; + protected readonly int Rate; + protected readonly int Channels; + protected readonly bool IsTrack; + protected readonly ushort Format; + protected readonly Stream Stream; + protected readonly bool AutoDisposeStream; + protected readonly PassDataDelegate? Pass; private volatile int bitrate; @@ -52,7 +51,7 @@ public long ByteLength internal volatile bool StopJob; internal volatile bool Loading; - protected AudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass, object? userData) + protected AudioDecoder(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) { Rate = rate; Channels = channels; @@ -61,7 +60,6 @@ protected AudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream = stream; AutoDisposeStream = autoDisposeStream; Pass = pass; - UserData = userData; } public void Stop() @@ -69,54 +67,64 @@ public void Stop() StopJob = true; } - // Call this in lock + // Not using IDisposable since things must be handled in a specific thread internal virtual void Free() { if (AutoDisposeStream) Stream.Dispose(); } - } - private readonly LinkedList jobs = new LinkedList(); + protected abstract int LoadFromStreamInternal(out byte[] decoded); + + /// + /// Decodes and resamples audio from job.Stream, and pass it to decoded. + /// + /// Decoded audio + public int LoadFromStream(out byte[] decoded) + { + int read = 0; + + try + { + read = LoadFromStreamInternal(out decoded); + } + catch (Exception e) + { + Logger.Log(e.Message, level: LogLevel.Important); + Loading = false; + decoded = Array.Empty(); + } + finally + { + if (!Loading) + Free(); + } + + Pass?.Invoke(decoded, read, this, !Loading); + return read; + } + } - /// - /// Decoder will call this delegate every time some amount of data is ready. - /// - /// Decoded audio data - /// - /// - public delegate void PassDataDelegate(byte[] data, int length, object? userdata, AudioDecoderData decoderData, bool done); + private readonly LinkedList jobs = new LinkedList(); - private readonly int rate; - private readonly int channels; - private readonly ushort format; + public delegate void PassDataDelegate(byte[] data, int length, AudioDecoder decoderData, bool done); private Thread? decoderThread; private AutoResetEvent? decoderWaitHandle; - /// - /// Set up configuration and start a decoding thread. - /// - /// Resample rate - /// Resample channels - /// Resample SDL audio format - protected AudioDecoder(int rate, int channels, ushort format) + private readonly CancellationTokenSource tokenSource = new CancellationTokenSource(); + + internal static AudioDecoder CreateDecoder(int rate, int channels, bool isTrack, ushort format, Stream stream, + bool autoDisposeStream = true, PassDataDelegate? pass = null) { - this.rate = rate; - this.channels = channels; - this.format = format; - } + AudioDecoder decoder = ManagedBass.Bass.CurrentDevice >= 0 + ? new BassAudioDecoder(rate, channels, isTrack, format, stream, autoDisposeStream, pass) + : new FFmpegAudioDecoder(rate, channels, isTrack, format, stream, autoDisposeStream, pass); - private readonly CancellationTokenSource tokenSource = new CancellationTokenSource(); + return decoder; + } - /// - /// Start decoding in the decoding thread. - /// - /// Data stream to read - /// Delegate to pass data to - /// Object to pass to the delegate - /// - public AudioDecoderData StartDecodingAsync(Stream stream, PassDataDelegate pass, object? userData) + public AudioDecoder StartDecodingAsync(int rate, byte channels, ushort format, Stream stream, PassDataDelegate pass) { if (decoderThread == null) { @@ -130,30 +138,23 @@ public AudioDecoderData StartDecodingAsync(Stream stream, PassDataDelegate pass, decoderThread.Start(); } - AudioDecoderData data = CreateDecoderData(rate, channels, true, format, stream, true, pass, userData); + AudioDecoder decoder = CreateDecoder(rate, channels, true, format, stream, true, pass); lock (jobs) - jobs.AddFirst(data); + jobs.AddFirst(decoder); decoderWaitHandle?.Set(); - return data; + return decoder; } - /// - /// Decodes audio from stream. It blocks until decoding is done. - /// - /// Data stream to read. - /// Decoded audio - public byte[] DecodeAudioInCurrentSpec(Stream stream) => DecodeAudio(rate, channels, format, stream); - - public byte[] DecodeAudio(int freq, int channels, ushort format, Stream stream) + public static byte[] DecodeAudio(int freq, int channels, ushort format, Stream stream) { - AudioDecoderData data = CreateDecoderData(freq, channels, false, format, stream); + AudioDecoder decoder = CreateDecoder(freq, channels, false, format, stream); - LoadFromStream(data, out byte[] decoded); + decoder.LoadFromStream(out byte[] decoded); - if (!data.Loading) + if (!decoder.Loading) return decoded; // fallback if it couldn't decode at once @@ -161,9 +162,9 @@ public byte[] DecodeAudio(int freq, int channels, ushort format, Stream stream) { memoryStream.Write(decoded); - while (data.Loading) + while (decoder.Loading) { - int read = LoadFromStream(data, out decoded); + int read = decoder.LoadFromStream(out decoded); memoryStream.Write(decoded, 0, read); } @@ -188,20 +189,19 @@ private void loop(CancellationToken token) while (node != null) { var next = node.Next; - AudioDecoderData data = node.Value; + AudioDecoder decoder = node.Value; - if (data.StopJob) + if (decoder.StopJob) { - data.Free(); + decoder.Free(); jobs.Remove(node); } else { - int read = LoadFromStream(data, out byte[] decoded); - data.Pass?.Invoke(decoded, read, data.UserData, data, !data.Loading); + decoder.LoadFromStream(out _); } - if (!data.Loading) + if (!decoder.Loading) jobs.Remove(node); node = next; @@ -214,36 +214,6 @@ private void loop(CancellationToken token) } } - public abstract AudioDecoderData CreateDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream = true, PassDataDelegate? pass = null, object? userData = null); - - protected abstract int LoadFromStreamInternal(AudioDecoderData job, out byte[] decoded); - - /// - /// Decodes and resamples audio from job.Stream, and pass it to decoded. - /// - /// Decode data - /// Decoded audio - public int LoadFromStream(AudioDecoderData job, out byte[] decoded) - { - try - { - return LoadFromStreamInternal(job, out decoded); - } - catch (Exception e) - { - Logger.Log(e.Message, level: LogLevel.Important); - job.Loading = false; - decoded = Array.Empty(); - } - finally - { - if (!job.Loading) - job.Free(); - } - - return -1; - } - private bool disposedValue; protected virtual void Dispose(bool disposing) @@ -274,7 +244,7 @@ protected virtual void Dispose(bool disposing) } } - ~AudioDecoder() + ~AudioDecoderManager() { Dispose(false); } diff --git a/osu.Framework/Audio/BassAudioDecoder.cs b/osu.Framework/Audio/BassAudioDecoder.cs index a51fc7c668..45ff874557 100644 --- a/osu.Framework/Audio/BassAudioDecoder.cs +++ b/osu.Framework/Audio/BassAudioDecoder.cs @@ -6,6 +6,7 @@ using ManagedBass; using osu.Framework.Audio.Callbacks; using SDL2; +using static osu.Framework.Audio.AudioDecoderManager; namespace osu.Framework.Audio { @@ -14,73 +15,62 @@ namespace osu.Framework.Audio /// internal class BassAudioDecoder : AudioDecoder { - public BassAudioDecoder(int rate, int channels, ushort format) - : base(rate, channels, format) + private int decodeStream; + private FileCallbacks? callbacks; + private SDL2AudioStream? resampler; + + private byte[]? decodeData; + private byte[]? resampleData; + + public BassAudioDecoder(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) + : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass) { } - public class BassAudioDecoderData : AudioDecoderData + internal override void Free() { - internal int DecodeStream; - internal FileCallbacks? Callbacks; - internal SDL2AudioStream? Resampler; - - internal byte[]? DecodeData; - internal byte[]? ResampleData; - - public BassAudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass, object? userData) - : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass, userData) + if (decodeStream != 0) { + Bass.StreamFree(decodeStream); + decodeStream = 0; } - internal override void Free() - { - if (DecodeStream != 0) - { - Bass.StreamFree(DecodeStream); - DecodeStream = 0; - } - - Resampler?.Dispose(); - Callbacks?.Dispose(); + resampler?.Dispose(); + callbacks?.Dispose(); - DecodeData = null; - ResampleData = null; + decodeData = null; + resampleData = null; - base.Free(); - } + base.Free(); } private static readonly object bass_sync_lock = new object(); - protected override int LoadFromStreamInternal(AudioDecoderData decodeData, out byte[] decoded) + protected override int LoadFromStreamInternal(out byte[] decoded) { - if (decodeData is not BassAudioDecoderData job) - throw new ArgumentException("Provide proper data"); - if (Bass.CurrentDevice < 0) throw new InvalidOperationException("Initialize a BASS device to decode audio"); lock (bass_sync_lock) { - if (!job.Loading) + if (!Loading) { - job.Callbacks = new FileCallbacks(new DataStreamFileProcedures(job.Stream)); + callbacks = new FileCallbacks(new DataStreamFileProcedures(Stream)); BassFlags bassFlags = BassFlags.Decode; - if (SDL.SDL_AUDIO_ISFLOAT(job.Format)) bassFlags |= BassFlags.Float; - if (job.IsTrack) bassFlags |= BassFlags.Prescan; - job.DecodeStream = Bass.CreateStream(StreamSystem.NoBuffer, bassFlags, job.Callbacks.Callbacks); + if (SDL.SDL_AUDIO_ISFLOAT(Format)) bassFlags |= BassFlags.Float; + if (IsTrack) bassFlags |= BassFlags.Prescan; + decodeStream = Bass.CreateStream(StreamSystem.NoBuffer, bassFlags, callbacks.Callbacks); - if (job.DecodeStream == 0) + if (decodeStream == 0) throw new FormatException($"Couldn't create stream: {Bass.LastError}"); - bool infoAvail = Bass.ChannelGetInfo(job.DecodeStream, out var info); + bool infoAvail = Bass.ChannelGetInfo(decodeStream, out var info); if (infoAvail) { - job.ByteLength = Bass.ChannelGetLength(job.DecodeStream); - job.Length = Bass.ChannelBytes2Seconds(job.DecodeStream, job.ByteLength) * 1000; - job.Bitrate = (int)Math.Round(Bass.ChannelGetAttribute(job.DecodeStream, ChannelAttribute.Bitrate)); + ByteLength = Bass.ChannelGetLength(decodeStream); + Length = Bass.ChannelBytes2Seconds(decodeStream, ByteLength) * 1000; + Bitrate = (int)Math.Round(Bass.ChannelGetAttribute(decodeStream, ChannelAttribute.Bitrate)); ushort srcformat; @@ -100,73 +90,70 @@ protected override int LoadFromStreamInternal(AudioDecoderData decodeData, out b break; } - if (info.Channels != job.Channels || srcformat != job.Format || info.Frequency != job.Rate) + if (info.Channels != Channels || srcformat != Format || info.Frequency != Rate) { - job.Resampler = new SDL2AudioStream(srcformat, (byte)info.Channels, info.Frequency, job.Format, (byte)job.Channels, job.Rate); - job.ByteLength = (long)Math.Ceiling(job.ByteLength / (double)info.Frequency / SDL.SDL_AUDIO_BITSIZE(srcformat) / info.Channels - * job.Rate * SDL.SDL_AUDIO_BITSIZE(job.Format) * job.Channels); + resampler = new SDL2AudioStream(srcformat, (byte)info.Channels, info.Frequency, Format, (byte)Channels, Rate); + ByteLength = (long)Math.Ceiling(ByteLength / (double)info.Frequency / SDL.SDL_AUDIO_BITSIZE(srcformat) / info.Channels + * Rate * SDL.SDL_AUDIO_BITSIZE(Format) * Channels); } } else { - if (job.IsTrack) + if (IsTrack) throw new FormatException($"Couldn't get channel info: {Bass.LastError}"); } - job.Loading = true; + Loading = true; } - int bufferLen = (int)(job.IsTrack ? Bass.ChannelSeconds2Bytes(job.DecodeStream, 1) : job.ByteLength); + int bufferLen = (int)(IsTrack ? Bass.ChannelSeconds2Bytes(decodeStream, 1) : ByteLength); if (bufferLen <= 0) bufferLen = 44100 * 2 * 4 * 1; - if (job.DecodeData == null || job.DecodeData.Length < bufferLen) + if (decodeData == null || decodeData.Length < bufferLen) { - job.DecodeData = new byte[bufferLen]; + decodeData = new byte[bufferLen]; } - int got = Bass.ChannelGetData(job.DecodeStream, job.DecodeData, bufferLen); + int got = Bass.ChannelGetData(decodeStream, decodeData, bufferLen); if (got == -1) { - job.Loading = false; + Loading = false; if (Bass.LastError != Errors.Ended) throw new FormatException($"Couldn't decode: {Bass.LastError}"); } - if (Bass.StreamGetFilePosition(job.DecodeStream, FileStreamPosition.End) <= Bass.StreamGetFilePosition(job.DecodeStream)) - job.Loading = false; + if (Bass.StreamGetFilePosition(decodeStream, FileStreamPosition.End) <= Bass.StreamGetFilePosition(decodeStream)) + Loading = false; - if (job.Resampler == null) + if (resampler == null) { - decoded = job.DecodeData; + decoded = decodeData; return Math.Max(0, got); } else { if (got > 0) - job.Resampler.Put(job.DecodeData, got); + resampler.Put(decodeData, got); - if (!job.Loading) - job.Resampler.Flush(); + if (!Loading) + resampler.Flush(); - int avail = job.Resampler.GetPendingBytes(); + int avail = resampler.GetPendingBytes(); - if (job.ResampleData == null || job.ResampleData.Length < avail) - job.ResampleData = new byte[avail]; + if (resampleData == null || resampleData.Length < avail) + resampleData = new byte[avail]; if (avail > 0) - job.Resampler.Get(job.ResampleData, avail); + resampler.Get(resampleData, avail); - decoded = job.ResampleData; + decoded = resampleData; return avail; } } } - - public override AudioDecoderData CreateDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream = true, PassDataDelegate? pass = null, object? userData = null) - => new BassAudioDecoderData(rate, channels, isTrack, format, stream, autoDisposeStream, pass, userData); } } diff --git a/osu.Framework/Audio/FFmpegAudioDecoder.cs b/osu.Framework/Audio/FFmpegAudioDecoder.cs index c4ffeeb07a..dd89a199da 100644 --- a/osu.Framework/Audio/FFmpegAudioDecoder.cs +++ b/osu.Framework/Audio/FFmpegAudioDecoder.cs @@ -5,64 +5,51 @@ using System.IO; using osu.Framework.Graphics.Video; using SDL2; +using static osu.Framework.Audio.AudioDecoderManager; namespace osu.Framework.Audio { internal class FFmpegAudioDecoder : AudioDecoder { - public FFmpegAudioDecoder(int rate, int channels, ushort format) - : base(rate, channels, format) + private VideoDecoder? ffmpeg; + private byte[]? decodeData; + + public FFmpegAudioDecoder(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) + : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass) { } - public class FFmpegAudioDecoderData : AudioDecoderData + internal override void Free() { - internal VideoDecoder? FFmpeg; - internal byte[]? DecodeData; + decodeData = null; - public FFmpegAudioDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass, object? userData) - : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass, userData) - { - } - - internal override void Free() - { - DecodeData = null; - - FFmpeg?.Dispose(); - base.Free(); - } + ffmpeg?.Dispose(); + base.Free(); } - protected override int LoadFromStreamInternal(AudioDecoderData decodeData, out byte[] decoded) + protected override int LoadFromStreamInternal(out byte[] decoded) { - if (decodeData is not FFmpegAudioDecoderData job) - throw new ArgumentException("Provide proper data"); - - if (job.FFmpeg == null) + if (ffmpeg == null) { - job.FFmpeg = new VideoDecoder(job.Stream, job.Rate, job.Channels, SDL.SDL_AUDIO_ISFLOAT(job.Format), SDL.SDL_AUDIO_BITSIZE(job.Format), SDL.SDL_AUDIO_ISSIGNED(job.Format)); + ffmpeg = new VideoDecoder(Stream, Rate, Channels, SDL.SDL_AUDIO_ISFLOAT(Format), SDL.SDL_AUDIO_BITSIZE(Format), SDL.SDL_AUDIO_ISSIGNED(Format)); - job.FFmpeg.PrepareDecoding(); - job.FFmpeg.RecreateCodecContext(); + ffmpeg.PrepareDecoding(); + ffmpeg.RecreateCodecContext(); - job.Bitrate = (int)job.FFmpeg.Bitrate; - job.Length = job.FFmpeg.Duration; - job.ByteLength = (long)Math.Ceiling(job.FFmpeg.Duration / 1000.0d) * job.Rate * job.Channels * (SDL.SDL_AUDIO_BITSIZE(job.Format) / 8); // FIXME + Bitrate = (int)ffmpeg.Bitrate; + Length = ffmpeg.Duration; + ByteLength = (long)Math.Ceiling(ffmpeg.Duration / 1000.0d * Rate) * Channels * (SDL.SDL_AUDIO_BITSIZE(Format) / 8); // FIXME - job.Loading = true; + Loading = true; } - int got = job.FFmpeg.DecodeNextAudioFrame(32, ref job.DecodeData, !job.IsTrack); + int got = ffmpeg.DecodeNextAudioFrame(32, ref decodeData, !IsTrack); - if (job.FFmpeg.State != VideoDecoder.DecoderState.Running) - job.Loading = false; + if (ffmpeg.State != VideoDecoder.DecoderState.Running) + Loading = false; - decoded = job.DecodeData; + decoded = decodeData; return got; } - - public override AudioDecoderData CreateDecoderData(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream = true, PassDataDelegate? pass = null, object? userData = null) - => new FFmpegAudioDecoderData(rate, channels, isTrack, format, stream, autoDisposeStream, pass, userData); } } diff --git a/osu.Framework/Audio/SDL2AudioManager.cs b/osu.Framework/Audio/SDL2AudioManager.cs index 879940f544..1170e54a6b 100644 --- a/osu.Framework/Audio/SDL2AudioManager.cs +++ b/osu.Framework/Audio/SDL2AudioManager.cs @@ -30,7 +30,7 @@ public class SDL2AudioManager : AudioManager private SDL.SDL_AudioSpec spec; - private static AudioDecoder decoder; + private static readonly AudioDecoderManager decoder = new AudioDecoderManager(); private readonly List sdlMixerList = new List(); @@ -57,19 +57,10 @@ public SDL2AudioManager(AudioThread audioThread, ResourceStore trackStor EnqueueAction(() => { ManagedBass.Bass.Configure((ManagedBass.Configuration)68, 1); - AudioThread.InitDevice(0); + //AudioThread.InitDevice(0); }); } - public static AudioDecoder GetAudioDecoder() - { - decoder ??= ManagedBass.Bass.CurrentDevice >= 0 - ? new BassAudioDecoder(AUDIO_FREQ, AUDIO_CHANNELS, AUDIO_FORMAT) - : new FFmpegAudioDecoder(AUDIO_FREQ, AUDIO_CHANNELS, AUDIO_FORMAT); - - return decoder; - } - private string currentDeviceName = "Not loaded"; public override string ToString() @@ -230,7 +221,7 @@ protected override bool SetAudioDevice(int deviceIndex) internal override Track.Track GetNewTrack(Stream data, string name) { TrackSDL2 track = new TrackSDL2(name, spec.freq, spec.channels, spec.samples); - EnqueueAction(() => GetAudioDecoder().StartDecodingAsync(data, track.AddToQueue, null)); + EnqueueAction(() => decoder.StartDecodingAsync(AUDIO_FREQ, AUDIO_CHANNELS, AUDIO_FORMAT, data, track.AddToQueue)); return track; } diff --git a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs index 01d1ef1c17..f7f53fd883 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs @@ -40,7 +40,7 @@ private protected override void LoadSample() try { - byte[] audio = SDL2AudioManager.GetAudioDecoder().DecodeAudioInCurrentSpec(stream); + byte[] audio = AudioDecoderManager.DecodeAudio(spec.freq, spec.channels, spec.format, stream); if (audio.Length > 0) { diff --git a/osu.Framework/Audio/Track/TrackSDL2.cs b/osu.Framework/Audio/Track/TrackSDL2.cs index c23f1983ad..d6f3ca4a5d 100644 --- a/osu.Framework/Audio/Track/TrackSDL2.cs +++ b/osu.Framework/Audio/Track/TrackSDL2.cs @@ -46,9 +46,9 @@ public TrackSDL2(string name, int rate, byte channels, int samples) private readonly object syncRoot = new object(); - private AudioDecoder.AudioDecoderData? decodeData; + private AudioDecoderManager.AudioDecoder? decodeData; - internal void AddToQueue(byte[] audio, int length, object? userdata, AudioDecoder.AudioDecoderData data, bool done) + internal void AddToQueue(byte[] audio, int length, AudioDecoderManager.AudioDecoder data, bool done) { if (IsDisposed) return; @@ -63,10 +63,7 @@ internal void AddToQueue(byte[] audio, int length, object? userdata, AudioDecode player.PutSamplesInStream(audio, length); if (done) - { player.DonePutting(); - decodeData = null; - } } } @@ -80,9 +77,16 @@ protected override void UpdateState() if (decodeData != null && !isLoaded) { - Length = decodeData.Length; - bitrate = decodeData.Bitrate; - isLoaded = true; + if (isLoaded) + { + decodeData = null; + } + else + { + Length = decodeData.Length; + bitrate = decodeData.Bitrate; + isLoaded = true; + } } if (player.Done && isRunning) diff --git a/osu.Framework/Audio/Track/Waveform.cs b/osu.Framework/Audio/Track/Waveform.cs index 1d88abbbf7..65061ec5c4 100644 --- a/osu.Framework/Audio/Track/Waveform.cs +++ b/osu.Framework/Audio/Track/Waveform.cs @@ -77,13 +77,10 @@ public Waveform(Stream? data) // Code below assumes stereo channels = 2; - // GetAudioDecoder returns BASS decoder if any BASS device (including No Sound) is available - AudioDecoder decoder = SDL2AudioManager.GetAudioDecoder(); - // AudioDecoder will resample data into specified sample rate and channels (44100hz 2ch float) - AudioDecoder.AudioDecoderData decoderData = decoder.CreateDecoderData(sample_rate, channels, true, SDL2.SDL.AUDIO_F32, data, false); + AudioDecoderManager.AudioDecoder decoder = AudioDecoderManager.CreateDecoder(sample_rate, channels, true, SDL2.SDL.AUDIO_F32, data, false); - Complex[]? complexBuffer = null; + Complex[] complexBuffer = ArrayPool.Shared.Rent(fft_samples); try { @@ -95,8 +92,6 @@ public Waveform(Stream? data) int fftPointIndex = 0; - complexBuffer = ArrayPool.Shared.Rent(fft_samples); - int complexBufferIndex = 0; Point point = new Point(); @@ -107,7 +102,7 @@ public Waveform(Stream? data) do { - int read = decoder.LoadFromStream(decoderData, out byte[] currentBytes); + int read = decoder.LoadFromStream(out byte[] currentBytes); int sampleIndex = 0; unsafe @@ -169,16 +164,13 @@ public Waveform(Stream? data) } } } - } while (decoderData.Loading); + } while (decoder.Loading); points = pointList.ToArray(); } finally { - if (complexBuffer != null) - ArrayPool.Shared.Return(complexBuffer); - - decoderData.Free(); + ArrayPool.Shared.Return(complexBuffer); } }, cancelSource.Token); } From 089fc0f51165a4a695c0a960915ced79f2f17b06 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Thu, 21 Dec 2023 22:43:41 +0900 Subject: [PATCH 026/127] Use BASS in SDL2AudioManager --- osu.Framework/Audio/SDL2AudioManager.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework/Audio/SDL2AudioManager.cs b/osu.Framework/Audio/SDL2AudioManager.cs index 1170e54a6b..81ac99db4c 100644 --- a/osu.Framework/Audio/SDL2AudioManager.cs +++ b/osu.Framework/Audio/SDL2AudioManager.cs @@ -57,7 +57,7 @@ public SDL2AudioManager(AudioThread audioThread, ResourceStore trackStor EnqueueAction(() => { ManagedBass.Bass.Configure((ManagedBass.Configuration)68, 1); - //AudioThread.InitDevice(0); + AudioThread.InitDevice(0); }); } From 47669ea0e540447ec07a576ea8cdb7aae8711d21 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sat, 23 Dec 2023 17:26:09 +0900 Subject: [PATCH 027/127] Undo an unneeded change --- osu.Framework/Graphics/Video/FFmpegFrame.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework/Graphics/Video/FFmpegFrame.cs b/osu.Framework/Graphics/Video/FFmpegFrame.cs index 89f091b9b2..1daa9b7a25 100644 --- a/osu.Framework/Graphics/Video/FFmpegFrame.cs +++ b/osu.Framework/Graphics/Video/FFmpegFrame.cs @@ -9,7 +9,7 @@ namespace osu.Framework.Graphics.Video { - public sealed unsafe class FFmpegFrame : IDisposable + internal sealed unsafe class FFmpegFrame : IDisposable { public readonly AVFrame* Pointer; From 4f341d2a43c02e383e7089b803f92d5c03aef386 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sat, 23 Dec 2023 17:52:42 +0900 Subject: [PATCH 028/127] Fix SampleSDL2Factory Length and use private for decodedAudio --- osu.Framework/Audio/Sample/SampleSDL2Factory.cs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs index f7f53fd883..30fab1c960 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs @@ -18,7 +18,7 @@ internal class SampleSDL2Factory : SampleFactory private readonly SDL2AudioMixer mixer; private readonly SDL.SDL_AudioSpec spec; - public float[]? DecodedAudio { get; private set; } + private float[] decodedAudio = Array.Empty(); private Stream? stream; @@ -44,11 +44,11 @@ private protected override void LoadSample() if (audio.Length > 0) { - DecodedAudio = new float[audio.Length / 4]; - Buffer.BlockCopy(audio, 0, DecodedAudio, 0, audio.Length); + decodedAudio = new float[audio.Length / 4]; + Buffer.BlockCopy(audio, 0, decodedAudio, 0, audio.Length); } - Length = audio.Length / 4.0d / spec.freq / spec.channels; + Length = audio.Length / 4d / spec.freq / spec.channels * 1000d; isLoaded = true; } finally @@ -58,7 +58,7 @@ private protected override void LoadSample() } } - public SampleSDL2AudioPlayer CreatePlayer() => new SampleSDL2AudioPlayer(DecodedAudio ?? Array.Empty(), spec.freq, spec.channels); + public SampleSDL2AudioPlayer CreatePlayer() => new SampleSDL2AudioPlayer(decodedAudio, spec.freq, spec.channels); public override Sample CreateSample() => new SampleSDL2(this, mixer) { OnPlay = SampleFactoryOnPlay }; @@ -66,7 +66,7 @@ private protected override void FreeSample() { // All players created by this factory have reference to this array. // It removes its own reference to the array, but GC will clear it once all SampleAudioPlayers for this sample are gone. - DecodedAudio = null; + decodedAudio = Array.Empty(); } private protected override void UpdatePlaybackConcurrency(ValueChangedEvent concurrency) From 89ab405b946bc377343eb11cf3998afd598e3085 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sun, 24 Dec 2023 23:52:15 +0900 Subject: [PATCH 029/127] Let TrackSDL2Player use ArrayPool --- osu.Framework/Audio/ResamplingPlayer.cs | 33 +--- .../Audio/Track/TempoSDL2AudioPlayer.cs | 6 +- .../Audio/Track/TrackSDL2AudioPlayer.cs | 141 +++++++++++++----- 3 files changed, 107 insertions(+), 73 deletions(-) diff --git a/osu.Framework/Audio/ResamplingPlayer.cs b/osu.Framework/Audio/ResamplingPlayer.cs index 4e4adee3b0..1a4c659dbe 100644 --- a/osu.Framework/Audio/ResamplingPlayer.cs +++ b/osu.Framework/Audio/ResamplingPlayer.cs @@ -1,7 +1,6 @@ // Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. // See the LICENCE file in the repository root for full licence text. -using System; using NAudio.Dsp; namespace osu.Framework.Audio @@ -31,8 +30,8 @@ public double RelativeRate /// /// Creates a new . /// - /// Sampling rate of audio that's given from or - /// Channels of audio that's given from or + /// Sampling rate of audio that's given from + /// Channels of audio that's given from protected ResamplingPlayer(int srcRate, byte srcChannels) { SrcRate = srcRate; @@ -105,32 +104,6 @@ public virtual int GetRemainingSamples(float[] data) return 0; } - // must implement either (preferably float one) - - private byte[]? bytes; - - protected virtual int GetRemainingRawFloats(float[] data, int offset, int needed) - { - if (bytes == null || needed * 4 != bytes.Length) - bytes = new byte[needed * 4]; - - int got = GetRemainingRawBytes(bytes); - - if (got > 0) Buffer.BlockCopy(bytes, 0, data, offset * 4, got); - return got / 4; - } - - private float[]? floats; - - protected virtual int GetRemainingRawBytes(byte[] data) - { - if (floats == null || data.Length / 4 != floats.Length) - floats = new float[data.Length / 4]; - - int got = GetRemainingRawFloats(floats, 0, floats.Length); - - if (got > 0) Buffer.BlockCopy(floats, 0, data, 0, got * 4); - return got * 4; - } + protected abstract int GetRemainingRawFloats(float[] data, int offset, int needed); } } diff --git a/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs index 4164538b52..57d8b5dbc1 100644 --- a/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs @@ -98,11 +98,11 @@ private void setTempo(double tempo) { if (AudioData != null) { - int latency = GetTempoLatencyInSamples() * 4 * SrcChannels; - long temp = !ReversePlayback ? AudioData.Position - latency : AudioData.Position + latency; + int latency = GetTempoLatencyInSamples() * SrcChannels; + long temp = !ReversePlayback ? AudioDataPosition - latency : AudioDataPosition + latency; if (temp >= 0) - AudioData.Position = temp; + AudioDataPosition = temp; } Reset(false); diff --git a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs index 8f18fcd5bb..8d85f28aca 100644 --- a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs @@ -2,7 +2,7 @@ // See the LICENCE file in the repository root for full licence text. using System; -using System.IO; +using System.Buffers; using osu.Framework.Logging; namespace osu.Framework.Audio.Track @@ -26,21 +26,25 @@ internal class TrackSDL2AudioPlayer : ResamplingPlayer, IDisposable /// /// A byte position to convert /// - public double GetMsFromBytes(long bytePos) => bytePos * 1000.0d / SrcRate / SrcChannels / 4; + public double GetMsFromIndex(long bytePos) => bytePos * 1000.0d / SrcRate / SrcChannels; /// /// Returns a position in milliseconds converted from a byte position with configuration set for this player. /// /// A position in milliseconds to convert /// - public long GetBytesFromMs(double seconds) => (long)(seconds / 1000.0d * SrcRate) * SrcChannels * 4; + public long GetIndexFromMs(double seconds) => (long)(seconds / 1000.0d * SrcRate) * SrcChannels; /// /// Stores raw audio data. /// - protected MemoryStream? AudioData; + protected float[]? AudioData; - public long AudioDataLength => AudioData?.Length ?? 0; + protected long AudioDataPosition; + + private bool dataRented; + + public long AudioDataLength { get; private set; } /// /// Play backwards if set to true. @@ -59,16 +63,55 @@ public TrackSDL2AudioPlayer(int rate, byte channels) isLoaded = false; } + // To copy data with long offset with one method + private unsafe void copyData(float[] src, long srcOffset, float[] dst, long dstOffset, long length) + { + if (length <= 0) + return; + + fixed (float* srcPtr = src) + fixed (float* dstPtr = dst) + { + Buffer.MemoryCopy(srcPtr + srcOffset, dstPtr + dstOffset, (dst.LongLength - dstOffset) * sizeof(float), length * sizeof(float)); + } + } + + private void prepareArray(long wanted) + { + if (wanted <= AudioData?.LongLength) + return; + + float[] temp; + bool rent; + + if (wanted > int.MaxValue) + { + rent = false; + temp = new float[wanted]; + } + else + { + rent = true; + temp = ArrayPool.Shared.Rent((int)wanted); + } + + if (AudioData != null) + copyData(AudioData, 0, temp, 0, AudioDataLength); + + if (dataRented && AudioData != null) + ArrayPool.Shared.Return(AudioData); + + AudioData = temp; + dataRented = rent; + } + internal void PrepareStream(long byteLength) { if (disposedValue) return; if (AudioData == null) - { - int len = byteLength > int.MaxValue ? int.MaxValue : (int)byteLength; - AudioData = new MemoryStream(len); - } + prepareArray(byteLength / 4); isLoading = true; } @@ -81,10 +124,22 @@ internal void PutSamplesInStream(byte[] next, int length) if (AudioData == null) throw new InvalidOperationException($"Use {nameof(PrepareStream)} before calling this"); - long save = AudioData.Position; - AudioData.Position = AudioData.Length; - AudioData.Write(next, 0, length); - AudioData.Position = save; + int floatLen = length / sizeof(float); + + if (AudioDataLength + floatLen > AudioData.LongLength) + prepareArray(AudioDataLength + floatLen); + + unsafe // Most standard functions doesn't support long + { + fixed (float* dest = AudioData) + fixed (void* ptr = next) + { + float* src = (float*)ptr; + Buffer.MemoryCopy(src, dest + AudioDataLength, (AudioData.LongLength - AudioDataLength) * sizeof(float), length); + } + } + + AudioDataLength += floatLen; } internal void DonePutting() @@ -100,12 +155,12 @@ internal void DonePutting() isLoaded = true; } - protected override int GetRemainingRawBytes(byte[] data) + protected override int GetRemainingRawFloats(float[] data, int offset, int needed) { if (AudioData == null) return 0; - if (AudioData.Length <= 0) + if (AudioDataLength <= 0) { done = true; return 0; @@ -114,13 +169,13 @@ protected override int GetRemainingRawBytes(byte[] data) if (SaveSeek > 0) { // set to 0 if position is over saved seek - if (AudioData.Position > SaveSeek) + if (AudioDataPosition > SaveSeek) SaveSeek = 0; // player now has audio data to play - if (AudioData.Length > SaveSeek) + if (AudioDataLength > SaveSeek) { - AudioData.Position = SaveSeek; + AudioDataPosition = SaveSeek; SaveSeek = 0; } @@ -129,35 +184,34 @@ protected override int GetRemainingRawBytes(byte[] data) return 0; } - int read = data.Length; + int read; if (ReversePlayback) { - int frameSize = SrcChannels * 4; - - if (AudioData.Position < read) - read = (int)AudioData.Position; - - byte[] temp = new byte[read]; - - AudioData.Position -= read; - read = AudioData.Read(temp, 0, read); - AudioData.Position -= read; - - for (int e = 0; e < read / frameSize; e++) + for (read = 0; read < needed; read++) { - Buffer.BlockCopy(temp, read - frameSize * (e + 1), data, frameSize * e, frameSize); + if (AudioDataPosition < 0) + { + AudioDataPosition = 0; + break; + } + + data[read + offset] = AudioData[AudioDataPosition--]; } } else { - read = AudioData.Read(data, 0, read); + long remain = AudioDataLength - AudioDataPosition; + read = remain > needed ? needed : (int)remain; + + copyData(AudioData, AudioDataPosition, data, offset, read); + AudioDataPosition += read; } - if (read < data.Length && isLoading) + if (read < needed && isLoading) Logger.Log("Track underrun!"); - if (ReversePlayback ? AudioData.Position <= 0 : AudioData.Position >= AudioData.Length && !isLoading) + if (ReversePlayback ? AudioDataPosition <= 0 : AudioDataPosition >= AudioDataLength && !isLoading) done = true; return read; @@ -187,8 +241,8 @@ public double GetCurrentTime() return 0; return !ReversePlayback - ? GetMsFromBytes(AudioData.Position) - GetProcessingLatency() - : GetMsFromBytes(AudioData.Position) + GetProcessingLatency(); + ? GetMsFromIndex(AudioDataPosition) - GetProcessingLatency() + : GetMsFromIndex(AudioDataPosition) + GetProcessingLatency(); } protected long SaveSeek; @@ -201,7 +255,7 @@ public double GetCurrentTime() /// Position in milliseconds public virtual void Seek(double seek) { - long tmp = GetBytesFromMs(seek); + long tmp = GetIndexFromMs(seek); if (!isLoaded && tmp > AudioDataLength) { @@ -210,7 +264,7 @@ public virtual void Seek(double seek) else if (AudioData != null) { SaveSeek = 0; - AudioData.Position = Math.Clamp(tmp, 0, AudioDataLength - 1); + AudioDataPosition = Math.Clamp(tmp, 0, AudioDataLength - 1); Flush(); } } @@ -223,7 +277,9 @@ protected virtual void Dispose(bool disposing) { if (disposing) { - AudioData?.Dispose(); + if (dataRented && AudioData != null) + ArrayPool.Shared.Return(AudioData); + AudioData = null; } @@ -231,6 +287,11 @@ protected virtual void Dispose(bool disposing) } } + ~TrackSDL2AudioPlayer() + { + Dispose(false); + } + public void Dispose() { Dispose(true); From 20e066ee9495bb26bcdcb0678e1a1c7fccf31d51 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Mon, 25 Dec 2023 18:21:50 +0900 Subject: [PATCH 030/127] Use a constant to init BASS No Sound in SDL2AudioManager --- osu.Framework/Audio/SDL2AudioManager.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework/Audio/SDL2AudioManager.cs b/osu.Framework/Audio/SDL2AudioManager.cs index e6b9c542fb..fc3aa1abee 100644 --- a/osu.Framework/Audio/SDL2AudioManager.cs +++ b/osu.Framework/Audio/SDL2AudioManager.cs @@ -57,7 +57,7 @@ public SDL2AudioManager(AudioThread audioThread, ResourceStore trackStor EnqueueAction(() => { ManagedBass.Bass.Configure((ManagedBass.Configuration)68, 1); - AudioThread.InitDevice(0); + AudioThread.InitDevice(ManagedBass.Bass.NoSoundDevice); }); } From e823423128552a70cdf773ea09444583edcf00ed Mon Sep 17 00:00:00 2001 From: hwsmm Date: Mon, 25 Dec 2023 18:22:25 +0900 Subject: [PATCH 031/127] Swap stereo channel in TrackSDL2Player ReversePlayback --- osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs index 8d85f28aca..2289722016 100644 --- a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs @@ -188,7 +188,7 @@ protected override int GetRemainingRawFloats(float[] data, int offset, int neede if (ReversePlayback) { - for (read = 0; read < needed; read++) + for (read = 0; read < needed; read += 2) { if (AudioDataPosition < 0) { @@ -196,6 +196,8 @@ protected override int GetRemainingRawFloats(float[] data, int offset, int neede break; } + // swap stereo channel + data[read + 1 + offset] = AudioData[AudioDataPosition--]; data[read + offset] = AudioData[AudioDataPosition--]; } } From 9a5d36419ea675d56bd905bec2ef0cd5a9745177 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Mon, 25 Dec 2023 18:22:57 +0900 Subject: [PATCH 032/127] Use Log2 instead in Waveform --- osu.Framework/Audio/Track/Waveform.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework/Audio/Track/Waveform.cs b/osu.Framework/Audio/Track/Waveform.cs index 65061ec5c4..cb0ccd2b0b 100644 --- a/osu.Framework/Audio/Track/Waveform.cs +++ b/osu.Framework/Audio/Track/Waveform.cs @@ -98,7 +98,7 @@ public Waveform(Stream? data) int pointSamples = 0; - int m = (int)Math.Log(fft_samples, 2.0); + int m = (int)Math.Log2(fft_samples); do { From e3689d21307a259096637b7ae783f89b7f279c7d Mon Sep 17 00:00:00 2001 From: hwsmm Date: Mon, 25 Dec 2023 18:23:29 +0900 Subject: [PATCH 033/127] Add a basic implementation of CurrentAmplitude in TrackSDL2 --- osu.Framework/Audio/ResamplingPlayer.cs | 4 +- osu.Framework/Audio/Track/TrackSDL2.cs | 63 +++++++++++++++++++ .../Audio/Track/TrackSDL2AudioPlayer.cs | 12 ++++ 3 files changed, 77 insertions(+), 2 deletions(-) diff --git a/osu.Framework/Audio/ResamplingPlayer.cs b/osu.Framework/Audio/ResamplingPlayer.cs index 1a4c659dbe..9575f5d1a6 100644 --- a/osu.Framework/Audio/ResamplingPlayer.cs +++ b/osu.Framework/Audio/ResamplingPlayer.cs @@ -24,8 +24,8 @@ public double RelativeRate private WdlResampler? resampler; - protected readonly int SrcRate; - protected readonly byte SrcChannels; + internal readonly int SrcRate; + internal readonly byte SrcChannels; /// /// Creates a new . diff --git a/osu.Framework/Audio/Track/TrackSDL2.cs b/osu.Framework/Audio/Track/TrackSDL2.cs index d6f3ca4a5d..3c23b3c300 100644 --- a/osu.Framework/Audio/Track/TrackSDL2.cs +++ b/osu.Framework/Audio/Track/TrackSDL2.cs @@ -4,8 +4,10 @@ using System; using System.Threading; using System.Threading.Tasks; +using NAudio.Dsp; using osu.Framework.Audio.Mixing.SDL2; using osu.Framework.Extensions; +using osu.Framework.Utils; namespace osu.Framework.Audio.Track { @@ -71,6 +73,59 @@ internal void AddToQueue(byte[] audio, int length, AudioDecoderManager.AudioDeco Interlocked.Exchange(ref decodeData, data); } + private volatile bool amplitudeRequested; + private double lastTime; + + private ChannelAmplitudes currentAmplitudes = ChannelAmplitudes.Empty; + private float[]? samples; + private Complex[]? fftSamples; + private float[]? fftResult; + + public override ChannelAmplitudes CurrentAmplitudes + { + get + { + if (!amplitudeRequested) + amplitudeRequested = true; + + return isRunning ? currentAmplitudes : ChannelAmplitudes.Empty; + } + } + + private void updateCurrentAmplitude() + { + samples ??= new float[(int)(player.SrcRate * (1f / 60)) * player.SrcChannels]; + fftSamples ??= new Complex[ChannelAmplitudes.AMPLITUDES_SIZE * 2]; + fftResult ??= new float[ChannelAmplitudes.AMPLITUDES_SIZE]; + + lock (syncRoot) + player.Peek(samples); + + float leftAmplitude = 0; + float rightAmplitude = 0; + int secondCh = player.SrcChannels < 2 ? 0 : 1; + int fftIndex = 0; + + for (int i = 0; i < samples.Length; i += player.SrcChannels) + { + leftAmplitude = Math.Max(leftAmplitude, Math.Abs(samples[i])); + rightAmplitude = Math.Max(rightAmplitude, Math.Abs(samples[i + secondCh])); + + if (fftIndex < fftSamples.Length) + { + fftSamples[fftIndex].Y = 0; + fftSamples[fftIndex++].X = (samples[i] + samples[i + secondCh]) * 0.5f; + } + } + + FastFourierTransform.FFT(true, (int)Math.Log2(fftSamples.Length), fftSamples); + + for (int i = 0; i < fftResult.Length; i++) + fftResult[i] = (float)Math.Sqrt(fftSamples[i].X * fftSamples[i].X + fftSamples[i].Y + fftSamples[i].Y); + + currentAmplitudes = new ChannelAmplitudes(Math.Min(1f, leftAmplitude), Math.Min(1f, rightAmplitude), fftResult); + } + protected override void UpdateState() { base.UpdateState(); @@ -108,6 +163,14 @@ protected override void UpdateState() lock (syncRoot) player.FillRequiredSamples(); } + + // Not sure if I need to split this up to another class since this featrue is only exclusive to Track + if (amplitudeRequested && isRunning && Precision.DefinitelyBigger(currentTime, lastTime)) + { + lastTime = currentTime; + + updateCurrentAmplitude(); + } } public override bool Seek(double seek) => SeekAsync(seek).GetResultSafely(); diff --git a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs index 2289722016..f15907dd2f 100644 --- a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs @@ -219,6 +219,18 @@ protected override int GetRemainingRawFloats(float[] data, int offset, int neede return read; } + public bool Peek(float[] data) + { + if (AudioData == null) + return false; + + long remain = AudioDataLength - AudioDataPosition; + int read = remain > data.Length ? data.Length : (int)remain; + + copyData(AudioData, AudioDataPosition, data, 0, read); + return true; + } + /// /// Clears 'done' status. /// From cc9f19c9bd07ee0a5c460f01fea00bc4fbdc0fd6 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Tue, 26 Dec 2023 21:34:10 +0900 Subject: [PATCH 034/127] Use a better method name for passing audio data in TrackSDL2 --- osu.Framework/Audio/SDL2AudioManager.cs | 2 +- osu.Framework/Audio/Track/TrackSDL2.cs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/osu.Framework/Audio/SDL2AudioManager.cs b/osu.Framework/Audio/SDL2AudioManager.cs index fc3aa1abee..7588a5a39e 100644 --- a/osu.Framework/Audio/SDL2AudioManager.cs +++ b/osu.Framework/Audio/SDL2AudioManager.cs @@ -221,7 +221,7 @@ protected override bool SetAudioDevice(int deviceIndex) internal override Track.Track GetNewTrack(Stream data, string name) { TrackSDL2 track = new TrackSDL2(name, spec.freq, spec.channels, spec.samples); - EnqueueAction(() => decoder.StartDecodingAsync(AUDIO_FREQ, AUDIO_CHANNELS, AUDIO_FORMAT, data, track.AddToQueue)); + EnqueueAction(() => decoder.StartDecodingAsync(AUDIO_FREQ, AUDIO_CHANNELS, AUDIO_FORMAT, data, track.ReceiveAudioData)); return track; } diff --git a/osu.Framework/Audio/Track/TrackSDL2.cs b/osu.Framework/Audio/Track/TrackSDL2.cs index 3c23b3c300..9b03d0de99 100644 --- a/osu.Framework/Audio/Track/TrackSDL2.cs +++ b/osu.Framework/Audio/Track/TrackSDL2.cs @@ -50,7 +50,7 @@ public TrackSDL2(string name, int rate, byte channels, int samples) private AudioDecoderManager.AudioDecoder? decodeData; - internal void AddToQueue(byte[] audio, int length, AudioDecoderManager.AudioDecoder data, bool done) + internal void ReceiveAudioData(byte[] audio, int length, AudioDecoderManager.AudioDecoder data, bool done) { if (IsDisposed) return; From fbac4cab517b414818dd77a9ab9025c1e7e1c31e Mon Sep 17 00:00:00 2001 From: hwsmm Date: Tue, 26 Dec 2023 21:34:52 +0900 Subject: [PATCH 035/127] Fix TrackSDL2 CurrentAmplitude not updating after looping --- osu.Framework/Audio/Track/TrackSDL2.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework/Audio/Track/TrackSDL2.cs b/osu.Framework/Audio/Track/TrackSDL2.cs index 9b03d0de99..8af3c304a0 100644 --- a/osu.Framework/Audio/Track/TrackSDL2.cs +++ b/osu.Framework/Audio/Track/TrackSDL2.cs @@ -165,7 +165,7 @@ protected override void UpdateState() } // Not sure if I need to split this up to another class since this featrue is only exclusive to Track - if (amplitudeRequested && isRunning && Precision.DefinitelyBigger(currentTime, lastTime)) + if (amplitudeRequested && isRunning && currentTime != lastTime) { lastTime = currentTime; From 0fcb82109ddd8c9f1753ed0d474d7b51d7e3c692 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Tue, 26 Dec 2023 21:35:53 +0900 Subject: [PATCH 036/127] Add ComputeMagnitude extension for NAudio Complex --- osu.Framework/Audio/Track/TrackSDL2.cs | 2 +- osu.Framework/Audio/Track/Waveform.cs | 2 +- osu.Framework/Extensions/ExtensionMethods.cs | 9 +++++++++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/osu.Framework/Audio/Track/TrackSDL2.cs b/osu.Framework/Audio/Track/TrackSDL2.cs index 8af3c304a0..dc6d1cc45b 100644 --- a/osu.Framework/Audio/Track/TrackSDL2.cs +++ b/osu.Framework/Audio/Track/TrackSDL2.cs @@ -121,7 +121,7 @@ private void updateCurrentAmplitude() FastFourierTransform.FFT(true, (int)Math.Log2(fftSamples.Length), fftSamples); for (int i = 0; i < fftResult.Length; i++) - fftResult[i] = (float)Math.Sqrt(fftSamples[i].X * fftSamples[i].X + fftSamples[i].Y + fftSamples[i].Y); + fftResult[i] = fftSamples[i].ComputeMagnitude(); currentAmplitudes = new ChannelAmplitudes(Math.Min(1f, leftAmplitude), Math.Min(1f, rightAmplitude), fftResult); } diff --git a/osu.Framework/Audio/Track/Waveform.cs b/osu.Framework/Audio/Track/Waveform.cs index cb0ccd2b0b..6fc912e40e 100644 --- a/osu.Framework/Audio/Track/Waveform.cs +++ b/osu.Framework/Audio/Track/Waveform.cs @@ -185,7 +185,7 @@ private float computeIntensity(int frequency, Complex[] bins, float startFrequen float value = 0; for (int i = startBin; i < endBin; i++) - value += (float)Math.Sqrt(bins[i].X * bins[i].X + bins[i].Y * bins[i].Y); + value += bins[i].ComputeMagnitude(); return value; } diff --git a/osu.Framework/Extensions/ExtensionMethods.cs b/osu.Framework/Extensions/ExtensionMethods.cs index e6e16f72a6..cc6d98b6dd 100644 --- a/osu.Framework/Extensions/ExtensionMethods.cs +++ b/osu.Framework/Extensions/ExtensionMethods.cs @@ -13,6 +13,8 @@ using System.Reflection; using System.Security.Cryptography; using System.Text; +using JetBrains.Annotations; +using NAudio.Dsp; using osu.Framework.Extensions.ObjectExtensions; using osu.Framework.Localisation; using osu.Framework.Platform; @@ -374,5 +376,12 @@ public static bool CheckIsValidUrl(this string url) || url.StartsWith("http://", StringComparison.Ordinal) || url.StartsWith("mailto:", StringComparison.Ordinal); } + + /// + /// Computes magnitude of a given . + /// + /// NAudio Complex number + /// Magnitude (Absolute number) of a given complex. + public static float ComputeMagnitude(this Complex complex) => (float)Math.Sqrt(complex.X * complex.X + complex.Y * complex.Y); } } From d4acdcbe2d2a726cab8e6394693d4a38eaa17624 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Tue, 26 Dec 2023 21:44:01 +0900 Subject: [PATCH 037/127] Remove unused usings --- osu.Framework/Audio/Track/TrackSDL2.cs | 1 - osu.Framework/Extensions/ExtensionMethods.cs | 1 - 2 files changed, 2 deletions(-) diff --git a/osu.Framework/Audio/Track/TrackSDL2.cs b/osu.Framework/Audio/Track/TrackSDL2.cs index dc6d1cc45b..95b17f9767 100644 --- a/osu.Framework/Audio/Track/TrackSDL2.cs +++ b/osu.Framework/Audio/Track/TrackSDL2.cs @@ -7,7 +7,6 @@ using NAudio.Dsp; using osu.Framework.Audio.Mixing.SDL2; using osu.Framework.Extensions; -using osu.Framework.Utils; namespace osu.Framework.Audio.Track { diff --git a/osu.Framework/Extensions/ExtensionMethods.cs b/osu.Framework/Extensions/ExtensionMethods.cs index cc6d98b6dd..5f490e625a 100644 --- a/osu.Framework/Extensions/ExtensionMethods.cs +++ b/osu.Framework/Extensions/ExtensionMethods.cs @@ -13,7 +13,6 @@ using System.Reflection; using System.Security.Cryptography; using System.Text; -using JetBrains.Annotations; using NAudio.Dsp; using osu.Framework.Extensions.ObjectExtensions; using osu.Framework.Localisation; From 63652b2cc79b1d8047cac1177f0bd4d0ae49ad06 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Tue, 26 Dec 2023 21:50:07 +0900 Subject: [PATCH 038/127] Remove a reference to AudioDecoder once the player has finished receiving data --- osu.Framework/Audio/Track/TrackSDL2.cs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/osu.Framework/Audio/Track/TrackSDL2.cs b/osu.Framework/Audio/Track/TrackSDL2.cs index 95b17f9767..ad3a3b10ea 100644 --- a/osu.Framework/Audio/Track/TrackSDL2.cs +++ b/osu.Framework/Audio/Track/TrackSDL2.cs @@ -129,18 +129,17 @@ protected override void UpdateState() { base.UpdateState(); - if (decodeData != null && !isLoaded) + if (decodeData != null) { - if (isLoaded) - { - decodeData = null; - } - else + if (!isLoaded) { Length = decodeData.Length; bitrate = decodeData.Bitrate; isLoaded = true; } + + if (player.IsLoaded) + decodeData = null; } if (player.Done && isRunning) From 64caee93384d1990c07eb710e73fea7e45cda3ac Mon Sep 17 00:00:00 2001 From: hwsmm Date: Wed, 27 Dec 2023 19:45:59 +0900 Subject: [PATCH 039/127] Use Array.Copy instead of Buffer.MemoryCopy --- .../Audio/Track/TrackSDL2AudioPlayer.cs | 27 ++++++------------- 1 file changed, 8 insertions(+), 19 deletions(-) diff --git a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs index f15907dd2f..3d4a6c9673 100644 --- a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs @@ -63,19 +63,6 @@ public TrackSDL2AudioPlayer(int rate, byte channels) isLoaded = false; } - // To copy data with long offset with one method - private unsafe void copyData(float[] src, long srcOffset, float[] dst, long dstOffset, long length) - { - if (length <= 0) - return; - - fixed (float* srcPtr = src) - fixed (float* dstPtr = dst) - { - Buffer.MemoryCopy(srcPtr + srcOffset, dstPtr + dstOffset, (dst.LongLength - dstOffset) * sizeof(float), length * sizeof(float)); - } - } - private void prepareArray(long wanted) { if (wanted <= AudioData?.LongLength) @@ -96,10 +83,12 @@ private void prepareArray(long wanted) } if (AudioData != null) - copyData(AudioData, 0, temp, 0, AudioDataLength); + { + Array.Copy(AudioData, 0, temp, 0, AudioDataLength); - if (dataRented && AudioData != null) - ArrayPool.Shared.Return(AudioData); + if (dataRented) + ArrayPool.Shared.Return(AudioData); + } AudioData = temp; dataRented = rent; @@ -129,7 +118,7 @@ internal void PutSamplesInStream(byte[] next, int length) if (AudioDataLength + floatLen > AudioData.LongLength) prepareArray(AudioDataLength + floatLen); - unsafe // Most standard functions doesn't support long + unsafe // To directly put bytes as float in array { fixed (float* dest = AudioData) fixed (void* ptr = next) @@ -206,7 +195,7 @@ protected override int GetRemainingRawFloats(float[] data, int offset, int neede long remain = AudioDataLength - AudioDataPosition; read = remain > needed ? needed : (int)remain; - copyData(AudioData, AudioDataPosition, data, offset, read); + Array.Copy(AudioData, AudioDataPosition, data, offset, read); AudioDataPosition += read; } @@ -227,7 +216,7 @@ public bool Peek(float[] data) long remain = AudioDataLength - AudioDataPosition; int read = remain > data.Length ? data.Length : (int)remain; - copyData(AudioData, AudioDataPosition, data, 0, read); + Array.Copy(AudioData, AudioDataPosition, data, 0, read); return true; } From fc4ebd8f367dae8343ebf92383cf56418740b4c8 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Wed, 27 Dec 2023 19:46:24 +0900 Subject: [PATCH 040/127] Better message in SDL2AudioManager --- osu.Framework/Audio/SDL2AudioManager.cs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/osu.Framework/Audio/SDL2AudioManager.cs b/osu.Framework/Audio/SDL2AudioManager.cs index 7588a5a39e..1d7bba2c78 100644 --- a/osu.Framework/Audio/SDL2AudioManager.cs +++ b/osu.Framework/Audio/SDL2AudioManager.cs @@ -173,7 +173,7 @@ protected override bool SetAudioDevice(string deviceName = null) if (deviceId > 0) SDL.SDL_CloseAudioDevice(deviceId); - // Let audio driver adjust latency, this may set to a high value on Windows, but let's just be safe + // Let audio driver adjust latency, this may set to a high value on Windows (but usually around 10ms), but let's just be safe const uint flag = SDL.SDL_AUDIO_ALLOW_SAMPLES_CHANGE; deviceId = SDL.SDL_OpenAudioDevice(deviceName, 0, ref spec, out var outspec, (int)flag); @@ -199,11 +199,9 @@ protected override bool SetAudioDevice(string deviceName = null) Logger.Log($@"🔈 SDL Audio initialised Driver: {SDL.SDL_GetCurrentAudioDriver()} Device Name: {currentDeviceName} - Frequency: {spec.freq} hz - Channels: {spec.channels} - Format: {(SDL.SDL_AUDIO_ISSIGNED(spec.format) ? "" : "un")}signed {SDL.SDL_AUDIO_BITSIZE(spec.format)} bits{(SDL.SDL_AUDIO_ISFLOAT(spec.format) ? " (float)" : "")} - Samples: {spec.samples} samples - Buffer size: {spec.size} bytes"); + Format: {spec.freq}hz {spec.channels}ch + Resolution: {(SDL.SDL_AUDIO_ISUNSIGNED(spec.format) ? "unsigned " : "")}{SDL.SDL_AUDIO_BITSIZE(spec.format)}bit{(SDL.SDL_AUDIO_ISFLOAT(spec.format) ? " float" : "")} + Samples: {spec.samples} samples"); return true; } From c96d1e9669cbb11f5a720b590181f5cefbc27b44 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Thu, 28 Dec 2023 21:07:43 +0900 Subject: [PATCH 041/127] Abstract a bit more in SampleFactory --- .../Audio/Sample/SampleBassFactory.cs | 24 +++++++------------ osu.Framework/Audio/Sample/SampleFactory.cs | 7 ------ 2 files changed, 9 insertions(+), 22 deletions(-) diff --git a/osu.Framework/Audio/Sample/SampleBassFactory.cs b/osu.Framework/Audio/Sample/SampleBassFactory.cs index dd2e90e075..47c2d86c77 100644 --- a/osu.Framework/Audio/Sample/SampleBassFactory.cs +++ b/osu.Framework/Audio/Sample/SampleBassFactory.cs @@ -72,25 +72,19 @@ private protected override void LoadSample() Length = Bass.ChannelBytes2Seconds(SampleId, dataLength) * 1000; memoryLease = NativeMemoryTracker.AddMemory(this, dataLength); } - - public override Sample CreateSample() => new SampleBass(this, mixer) { OnPlay = SampleFactoryOnPlay }; - - private protected override void FreeSample() => Bass.SampleFree(SampleId); - - ~SampleBassFactory() + internal override void UpdateDevice(int deviceIndex) { - Dispose(false); + // The sample may not have already loaded if a device wasn't present in a previous load attempt. + if (!IsLoaded) + LoadSample(); } - protected override void Dispose(bool disposing) - { - if (IsDisposed) - return; - - if (IsLoaded) - memoryLease?.Dispose(); + public override Sample CreateSample() => new SampleBass(this, mixer) { OnPlay = SampleFactoryOnPlay }; - base.Dispose(disposing); + private protected override void FreeSample() + { + Bass.SampleFree(SampleId); + memoryLease?.Dispose(); } } } diff --git a/osu.Framework/Audio/Sample/SampleFactory.cs b/osu.Framework/Audio/Sample/SampleFactory.cs index 72b006ecf6..87bd763d16 100644 --- a/osu.Framework/Audio/Sample/SampleFactory.cs +++ b/osu.Framework/Audio/Sample/SampleFactory.cs @@ -34,13 +34,6 @@ protected SampleFactory(string name, int playbackConcurrency) private protected abstract void UpdatePlaybackConcurrency(ValueChangedEvent concurrency); - internal override void UpdateDevice(int deviceIndex) - { - // The sample may not have already loaded if a device wasn't present in a previous load attempt. - if (!IsLoaded) - LoadSample(); - } - private protected abstract void LoadSample(); public abstract Sample CreateSample(); From 8472acb923baf06a9868c017f73a2268e1302492 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Thu, 28 Dec 2023 22:40:04 +0900 Subject: [PATCH 042/127] Use Math.Min instead in SDL2 AudioPlayers --- osu.Framework/Audio/Sample/SampleSDL2AudioPlayer.cs | 5 +---- osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/osu.Framework/Audio/Sample/SampleSDL2AudioPlayer.cs b/osu.Framework/Audio/Sample/SampleSDL2AudioPlayer.cs index ab184e7fde..2f6a869770 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Sample/SampleSDL2AudioPlayer.cs @@ -34,10 +34,7 @@ protected override int GetRemainingRawFloats(float[] data, int offset, int neede for (; i < needed;) { - int remaining = needed - i; - int put = audioData.Length - position; - if (remaining < put) - put = remaining; + int put = Math.Min(needed - i, audioData.Length - position); if (put > 0) Array.Copy(audioData, position, data, offset + i, put); diff --git a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs index 3d4a6c9673..9eb6b7f08b 100644 --- a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs @@ -193,7 +193,7 @@ protected override int GetRemainingRawFloats(float[] data, int offset, int neede else { long remain = AudioDataLength - AudioDataPosition; - read = remain > needed ? needed : (int)remain; + read = (int)Math.Min(needed, remain); Array.Copy(AudioData, AudioDataPosition, data, offset, read); AudioDataPosition += read; From 2f53ffaebff59e8471c2d9fb342ae2a8e9b3bfb6 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Thu, 28 Dec 2023 22:40:22 +0900 Subject: [PATCH 043/127] Better TrackSDL2Player Peek --- osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs index 9eb6b7f08b..d5267fe280 100644 --- a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs @@ -208,15 +208,20 @@ protected override int GetRemainingRawFloats(float[] data, int offset, int neede return read; } + /// + /// Puts recently played audio samples into data. Mostly used to calculate amplitude of a track. + /// + /// A float array to put data in + /// True if succeeded public bool Peek(float[] data) { if (AudioData == null) return false; - long remain = AudioDataLength - AudioDataPosition; - int read = remain > data.Length ? data.Length : (int)remain; + long start = Math.Max(0, AudioDataPosition - data.Length); // To get most recently 'used' audio data + long remain = AudioDataLength - start; - Array.Copy(AudioData, AudioDataPosition, data, 0, read); + Array.Copy(AudioData, start, data, 0, Math.Min(data.Length, remain)); return true; } From 9dcecd8b6e25296d5537c75f455876d513abb9c4 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 29 Dec 2023 00:29:17 +0900 Subject: [PATCH 044/127] Satisfy InspectCode --- osu.Framework/Audio/Sample/SampleBassFactory.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/osu.Framework/Audio/Sample/SampleBassFactory.cs b/osu.Framework/Audio/Sample/SampleBassFactory.cs index 47c2d86c77..2d90a622bc 100644 --- a/osu.Framework/Audio/Sample/SampleBassFactory.cs +++ b/osu.Framework/Audio/Sample/SampleBassFactory.cs @@ -72,6 +72,7 @@ private protected override void LoadSample() Length = Bass.ChannelBytes2Seconds(SampleId, dataLength) * 1000; memoryLease = NativeMemoryTracker.AddMemory(this, dataLength); } + internal override void UpdateDevice(int deviceIndex) { // The sample may not have already loaded if a device wasn't present in a previous load attempt. From d31fbd7e12b454e3141f654c10ec5d2681a21266 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 29 Dec 2023 00:29:32 +0900 Subject: [PATCH 045/127] Extra safety check in TrackSDL2Player --- osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs index d5267fe280..33f41e4fc1 100644 --- a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs @@ -272,7 +272,7 @@ public virtual void Seek(double seek) else if (AudioData != null) { SaveSeek = 0; - AudioDataPosition = Math.Clamp(tmp, 0, AudioDataLength - 1); + AudioDataPosition = Math.Clamp(tmp, 0, Math.Max(0, AudioDataLength - 1)); Flush(); } } From 06551a9fec1e64c115e2ce7f3c820f09f8498ac1 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 29 Dec 2023 00:29:59 +0900 Subject: [PATCH 046/127] Use BASSMix to resample instead of SDL2 --- osu.Framework/Audio/BassAudioDecoder.cs | 120 ++++++++++++------------ 1 file changed, 62 insertions(+), 58 deletions(-) diff --git a/osu.Framework/Audio/BassAudioDecoder.cs b/osu.Framework/Audio/BassAudioDecoder.cs index 45ff874557..ba6003f9e1 100644 --- a/osu.Framework/Audio/BassAudioDecoder.cs +++ b/osu.Framework/Audio/BassAudioDecoder.cs @@ -4,6 +4,7 @@ using System; using System.IO; using ManagedBass; +using ManagedBass.Mix; using osu.Framework.Audio.Callbacks; using SDL2; using static osu.Framework.Audio.AudioDecoderManager; @@ -17,10 +18,29 @@ internal class BassAudioDecoder : AudioDecoder { private int decodeStream; private FileCallbacks? callbacks; - private SDL2AudioStream? resampler; + + private int resampler; private byte[]? decodeData; - private byte[]? resampleData; + + private Resolution resolution + { + get + { + switch (Format) + { + case SDL.AUDIO_S8: + return Resolution.Byte; + + case SDL.AUDIO_S16: + return Resolution.Short; + + case SDL.AUDIO_F32: + default: + return Resolution.Float; + } + } + } public BassAudioDecoder(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass) @@ -29,17 +49,21 @@ public BassAudioDecoder(int rate, int channels, bool isTrack, ushort format, Str internal override void Free() { + if (resampler != 0) + { + Bass.StreamFree(resampler); + resampler = 0; + } + if (decodeStream != 0) { Bass.StreamFree(decodeStream); decodeStream = 0; } - resampler?.Dispose(); callbacks?.Dispose(); decodeData = null; - resampleData = null; base.Free(); } @@ -56,9 +80,9 @@ protected override int LoadFromStreamInternal(out byte[] decoded) if (!Loading) { callbacks = new FileCallbacks(new DataStreamFileProcedures(Stream)); - BassFlags bassFlags = BassFlags.Decode; - if (SDL.SDL_AUDIO_ISFLOAT(Format)) bassFlags |= BassFlags.Float; + BassFlags bassFlags = BassFlags.Decode | resolution.ToBassFlag(); if (IsTrack) bassFlags |= BassFlags.Prescan; + decodeStream = Bass.CreateStream(StreamSystem.NoBuffer, bassFlags, callbacks.Callbacks); if (decodeStream == 0) @@ -72,29 +96,34 @@ protected override int LoadFromStreamInternal(out byte[] decoded) Length = Bass.ChannelBytes2Seconds(decodeStream, ByteLength) * 1000; Bitrate = (int)Math.Round(Bass.ChannelGetAttribute(decodeStream, ChannelAttribute.Bitrate)); - ushort srcformat; - - switch (info.Resolution) + if (info.Channels != Channels || info.Resolution != resolution || info.Frequency != Rate) { - case Resolution.Byte: - srcformat = SDL.AUDIO_S8; - break; - - case Resolution.Short: - srcformat = SDL.AUDIO_S16; - break; - - case Resolution.Float: - default: - srcformat = SDL.AUDIO_F32; - break; - } + resampler = BassMix.CreateMixerStream(Rate, Channels, BassFlags.MixerEnd | BassFlags.Decode | resolution.ToBassFlag()); - if (info.Channels != Channels || srcformat != Format || info.Frequency != Rate) - { - resampler = new SDL2AudioStream(srcformat, (byte)info.Channels, info.Frequency, Format, (byte)Channels, Rate); - ByteLength = (long)Math.Ceiling(ByteLength / (double)info.Frequency / SDL.SDL_AUDIO_BITSIZE(srcformat) / info.Channels - * Rate * SDL.SDL_AUDIO_BITSIZE(Format) * Channels); + if (resampler == 0) + throw new FormatException($"Failed to create BASS Mixer: {Bass.LastError}"); + + Bass.ChannelSetAttribute(resampler, ChannelAttribute.Buffer, 0); + + if (!BassMix.MixerAddChannel(resampler, decodeStream, BassFlags.MixerChanBuffer | BassFlags.MixerChanNoRampin)) + throw new FormatException($"Failed to add a channel to BASS Mixer: {Bass.LastError}"); + + ushort srcBits = 32; // float by default + + switch (info.Resolution) + { + case Resolution.Byte: + srcBits = 8; + break; + + case Resolution.Short: + srcBits = 16; + break; + } + + ByteLength = (long)Math.Ceiling((decimal)ByteLength / info.Frequency * Rate); + ByteLength /= info.Channels * (srcBits / 8); + ByteLength *= Channels * (SDL.SDL_AUDIO_BITSIZE(Format) / 8); } } else @@ -106,17 +135,17 @@ protected override int LoadFromStreamInternal(out byte[] decoded) Loading = true; } - int bufferLen = (int)(IsTrack ? Bass.ChannelSeconds2Bytes(decodeStream, 1) : ByteLength); + int handle = resampler == 0 ? decodeStream : resampler; + + int bufferLen = (int)(IsTrack ? Bass.ChannelSeconds2Bytes(handle, 1) : ByteLength); if (bufferLen <= 0) bufferLen = 44100 * 2 * 4 * 1; if (decodeData == null || decodeData.Length < bufferLen) - { decodeData = new byte[bufferLen]; - } - int got = Bass.ChannelGetData(decodeStream, decodeData, bufferLen); + int got = Bass.ChannelGetData(handle, decodeData, bufferLen); if (got == -1) { @@ -126,33 +155,8 @@ protected override int LoadFromStreamInternal(out byte[] decoded) throw new FormatException($"Couldn't decode: {Bass.LastError}"); } - if (Bass.StreamGetFilePosition(decodeStream, FileStreamPosition.End) <= Bass.StreamGetFilePosition(decodeStream)) - Loading = false; - - if (resampler == null) - { - decoded = decodeData; - return Math.Max(0, got); - } - else - { - if (got > 0) - resampler.Put(decodeData, got); - - if (!Loading) - resampler.Flush(); - - int avail = resampler.GetPendingBytes(); - - if (resampleData == null || resampleData.Length < avail) - resampleData = new byte[avail]; - - if (avail > 0) - resampler.Get(resampleData, avail); - - decoded = resampleData; - return avail; - } + decoded = decodeData; + return Math.Max(0, got); } } } From 626a64172843748976791f128b90efdc01ced143 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 29 Dec 2023 19:58:05 +0900 Subject: [PATCH 047/127] No longer make use of bits in BassAudioDecoder --- osu.Framework/Audio/BassAudioDecoder.cs | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/osu.Framework/Audio/BassAudioDecoder.cs b/osu.Framework/Audio/BassAudioDecoder.cs index ba6003f9e1..cb013e0c35 100644 --- a/osu.Framework/Audio/BassAudioDecoder.cs +++ b/osu.Framework/Audio/BassAudioDecoder.cs @@ -108,22 +108,9 @@ protected override int LoadFromStreamInternal(out byte[] decoded) if (!BassMix.MixerAddChannel(resampler, decodeStream, BassFlags.MixerChanBuffer | BassFlags.MixerChanNoRampin)) throw new FormatException($"Failed to add a channel to BASS Mixer: {Bass.LastError}"); - ushort srcBits = 32; // float by default - - switch (info.Resolution) - { - case Resolution.Byte: - srcBits = 8; - break; - - case Resolution.Short: - srcBits = 16; - break; - } - ByteLength = (long)Math.Ceiling((decimal)ByteLength / info.Frequency * Rate); - ByteLength /= info.Channels * (srcBits / 8); - ByteLength *= Channels * (SDL.SDL_AUDIO_BITSIZE(Format) / 8); + ByteLength /= info.Channels; + ByteLength *= Channels; } } else From dbef4669eca06fbd76a1a177fd81b6778e450097 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 29 Dec 2023 19:58:27 +0900 Subject: [PATCH 048/127] Remove SDL2AudioStream accordingly --- osu.Framework/Audio/SDL2AudioStream.cs | 145 ------------------------- 1 file changed, 145 deletions(-) delete mode 100644 osu.Framework/Audio/SDL2AudioStream.cs diff --git a/osu.Framework/Audio/SDL2AudioStream.cs b/osu.Framework/Audio/SDL2AudioStream.cs deleted file mode 100644 index d73d46efac..0000000000 --- a/osu.Framework/Audio/SDL2AudioStream.cs +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. -// See the LICENCE file in the repository root for full licence text. - -using System; -using System.Runtime.InteropServices; -using SDL2; - -namespace osu.Framework.Audio -{ - /// - /// Wrapper for SDL_AudioStream, which is a built-in audio converter. - /// - public class SDL2AudioStream : AudioComponent - { - private IntPtr stream = IntPtr.Zero; - - public ushort SrcFormat { get; private set; } - public byte SrcChannels { get; private set; } - public int SrcRate { get; private set; } - - public ushort DstFormat { get; private set; } - public byte DstChannels { get; private set; } - public int DstRate { get; private set; } - - /// - /// Creates a new . - /// - /// Source SDL_AudioFormat - /// Source channels - /// Source sample rate - /// Destination SDL_AudioFormat - /// Destination Channels - /// Destination sample rate - /// Thrown if SDL refuses to create a stream. - public SDL2AudioStream(ushort srcFormat, byte srcChannels, int srcRate, ushort dstFormat, byte dstChannels, int dstRate) - { - SrcFormat = srcFormat; - SrcChannels = srcChannels; - SrcRate = srcRate; - - if (!UpdateStream(dstFormat, dstChannels, dstRate)) - throw new FormatException("Failed creating resampling stream"); - } - - /// - /// Recreates the stream. - /// - /// Destination SDL_AudioFormat - /// Destination Channels - /// Destination sample rate - /// False if failed - public bool UpdateStream(ushort dstFormat, byte dstChannels, int dstRate) - { - if (stream != IntPtr.Zero) - SDL.SDL_FreeAudioStream(stream); - - // SDL3 may support this in a better way - stream = SDL.SDL_NewAudioStream(SrcFormat, SrcChannels, SrcRate, dstFormat, dstChannels, dstRate); - - if (stream != IntPtr.Zero) - { - DstFormat = dstFormat; - DstChannels = dstChannels; - DstRate = dstRate; - return true; - } - - return false; - } - - /// - /// Returns available samples in bytes. - /// - public int GetPendingBytes() - { - return SDL.SDL_AudioStreamAvailable(stream); - } - - /// - /// Put samples in the stream. - /// - /// Data to put - /// Data length in bytes - /// False if failed - public unsafe bool Put(byte[] data, int len) - { - fixed (byte* p = data) - { - IntPtr ptr = new IntPtr(p); - return SDL.SDL_AudioStreamPut(stream, ptr, len) == 0; - } - } - - /// - /// Get samples from the stream. - /// - /// An array that stream will put data into - /// Maximum data length in bytes - /// Returned data length in bytes - public unsafe int Get(byte[] data, int len) - { - fixed (byte* p = data) - { - IntPtr ptr = new IntPtr(p); - return SDL.SDL_AudioStreamGet(stream, ptr, len); - } - } - - // it is not available in sdl2-cs, will make a pr in future - [DllImport("SDL2", CallingConvention = CallingConvention.Cdecl)] - private static extern void SDL_AudioStreamFlush(IntPtr stream); - - /// - /// Flushes the stream. - /// - public void Flush() - { - SDL_AudioStreamFlush(stream); - } - - /// - /// Clears the stream. - /// - public void Clear() - { - SDL.SDL_AudioStreamClear(stream); - } - - ~SDL2AudioStream() - { - Dispose(false); - } - - protected override void Dispose(bool disposing) - { - if (IsDisposed) - return; - - if (stream != IntPtr.Zero) - SDL.SDL_FreeAudioStream(stream); - - base.Dispose(disposing); - } - } -} From 7dc9e98283aac3e73ac6e3ad858e26e00217a058 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 29 Dec 2023 19:58:42 +0900 Subject: [PATCH 049/127] Wait until sample gets loaded in SampleSDL2 --- osu.Framework/Audio/Sample/SampleFactory.cs | 9 ++++++++- osu.Framework/Audio/Sample/SampleSDL2Factory.cs | 8 +++++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/osu.Framework/Audio/Sample/SampleFactory.cs b/osu.Framework/Audio/Sample/SampleFactory.cs index 87bd763d16..0adf28f333 100644 --- a/osu.Framework/Audio/Sample/SampleFactory.cs +++ b/osu.Framework/Audio/Sample/SampleFactory.cs @@ -1,6 +1,7 @@ // Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. // See the LICENCE file in the repository root for full licence text. +using System.Threading.Tasks; using osu.Framework.Bindables; namespace osu.Framework.Audio.Sample @@ -22,12 +23,18 @@ internal abstract class SampleFactory : AudioCollectionManager internal readonly Bindable PlaybackConcurrency = new Bindable(Sample.DEFAULT_CONCURRENCY); + protected Task? LoadSampleTask; + protected SampleFactory(string name, int playbackConcurrency) { Name = name; PlaybackConcurrency.Value = playbackConcurrency; - EnqueueAction(LoadSample); + LoadSampleTask = EnqueueAction(() => + { + LoadSample(); + LoadSampleTask = null; + }); PlaybackConcurrency.BindValueChanged(UpdatePlaybackConcurrency); } diff --git a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs index 30fab1c960..0e6349b566 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs @@ -6,6 +6,7 @@ using System.IO; using osu.Framework.Audio.Mixing.SDL2; using osu.Framework.Bindables; +using osu.Framework.Extensions; using SDL2; namespace osu.Framework.Audio.Sample @@ -58,7 +59,12 @@ private protected override void LoadSample() } } - public SampleSDL2AudioPlayer CreatePlayer() => new SampleSDL2AudioPlayer(decodedAudio, spec.freq, spec.channels); + public SampleSDL2AudioPlayer CreatePlayer() + { + LoadSampleTask?.WaitSafely(); + + return new SampleSDL2AudioPlayer(decodedAudio, spec.freq, spec.channels); + } public override Sample CreateSample() => new SampleSDL2(this, mixer) { OnPlay = SampleFactoryOnPlay }; From 9c265875509e5a59f63c016d25d7796f221884b7 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 29 Dec 2023 20:10:49 +0900 Subject: [PATCH 050/127] Dispose SampleFactory properly --- .../Audio/Sample/SampleBassFactory.cs | 10 +++++++++- osu.Framework/Audio/Sample/SampleFactory.cs | 18 ------------------ .../Audio/Sample/SampleSDL2Factory.cs | 9 ++------- 3 files changed, 11 insertions(+), 26 deletions(-) diff --git a/osu.Framework/Audio/Sample/SampleBassFactory.cs b/osu.Framework/Audio/Sample/SampleBassFactory.cs index 2d90a622bc..c37a06ae4a 100644 --- a/osu.Framework/Audio/Sample/SampleBassFactory.cs +++ b/osu.Framework/Audio/Sample/SampleBassFactory.cs @@ -82,8 +82,16 @@ internal override void UpdateDevice(int deviceIndex) public override Sample CreateSample() => new SampleBass(this, mixer) { OnPlay = SampleFactoryOnPlay }; - private protected override void FreeSample() + ~SampleBassFactory() { + Dispose(false); + } + + protected override void Dispose(bool disposing) + { + if (IsDisposed) + return; + Bass.SampleFree(SampleId); memoryLease?.Dispose(); } diff --git a/osu.Framework/Audio/Sample/SampleFactory.cs b/osu.Framework/Audio/Sample/SampleFactory.cs index 0adf28f333..9f9a35f58f 100644 --- a/osu.Framework/Audio/Sample/SampleFactory.cs +++ b/osu.Framework/Audio/Sample/SampleFactory.cs @@ -45,27 +45,9 @@ protected SampleFactory(string name, int playbackConcurrency) public abstract Sample CreateSample(); - private protected abstract void FreeSample(); - protected void SampleFactoryOnPlay(Sample sample) { AddItem(sample); } - - ~SampleFactory() - { - Dispose(false); - } - - protected override void Dispose(bool disposing) - { - if (IsDisposed) - return; - - if (IsLoaded) - FreeSample(); - - base.Dispose(disposing); - } } } diff --git a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs index 0e6349b566..a3565d1ab2 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs @@ -68,13 +68,6 @@ public SampleSDL2AudioPlayer CreatePlayer() public override Sample CreateSample() => new SampleSDL2(this, mixer) { OnPlay = SampleFactoryOnPlay }; - private protected override void FreeSample() - { - // All players created by this factory have reference to this array. - // It removes its own reference to the array, but GC will clear it once all SampleAudioPlayers for this sample are gone. - decodedAudio = Array.Empty(); - } - private protected override void UpdatePlaybackConcurrency(ValueChangedEvent concurrency) { } @@ -92,6 +85,8 @@ protected override void Dispose(bool disposing) stream?.Dispose(); stream = null; + decodedAudio = Array.Empty(); + base.Dispose(disposing); } } From 83b641d9961fe4348c174a8bb56ee97b95ba6d6b Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 29 Dec 2023 20:59:24 +0900 Subject: [PATCH 051/127] Revert "No longer make use of bits in BassAudioDecoder" This reverts commit 626a64172843748976791f128b90efdc01ced143. --- osu.Framework/Audio/BassAudioDecoder.cs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/osu.Framework/Audio/BassAudioDecoder.cs b/osu.Framework/Audio/BassAudioDecoder.cs index cb013e0c35..ba6003f9e1 100644 --- a/osu.Framework/Audio/BassAudioDecoder.cs +++ b/osu.Framework/Audio/BassAudioDecoder.cs @@ -108,9 +108,22 @@ protected override int LoadFromStreamInternal(out byte[] decoded) if (!BassMix.MixerAddChannel(resampler, decodeStream, BassFlags.MixerChanBuffer | BassFlags.MixerChanNoRampin)) throw new FormatException($"Failed to add a channel to BASS Mixer: {Bass.LastError}"); + ushort srcBits = 32; // float by default + + switch (info.Resolution) + { + case Resolution.Byte: + srcBits = 8; + break; + + case Resolution.Short: + srcBits = 16; + break; + } + ByteLength = (long)Math.Ceiling((decimal)ByteLength / info.Frequency * Rate); - ByteLength /= info.Channels; - ByteLength *= Channels; + ByteLength /= info.Channels * (srcBits / 8); + ByteLength *= Channels * (SDL.SDL_AUDIO_BITSIZE(Format) / 8); } } else From d26a3304fbdaf332f0c31015421486537b79f703 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sat, 30 Dec 2023 22:09:35 +0900 Subject: [PATCH 052/127] Simplify BassAudioDecoder --- osu.Framework/Audio/AudioDecoderManager.cs | 12 +++-- osu.Framework/Audio/BassAudioDecoder.cs | 54 +++++++++---------- .../Audio/Sample/SampleSDL2Factory.cs | 10 ++-- 3 files changed, 39 insertions(+), 37 deletions(-) diff --git a/osu.Framework/Audio/AudioDecoderManager.cs b/osu.Framework/Audio/AudioDecoderManager.cs index b135c9d4e7..8f8802a145 100644 --- a/osu.Framework/Audio/AudioDecoderManager.cs +++ b/osu.Framework/Audio/AudioDecoderManager.cs @@ -148,26 +148,30 @@ public AudioDecoder StartDecodingAsync(int rate, byte channels, ushort format, S return decoder; } - public static byte[] DecodeAudio(int freq, int channels, ushort format, Stream stream) + public static byte[] DecodeAudio(int freq, int channels, ushort format, Stream stream, out int size) { AudioDecoder decoder = CreateDecoder(freq, channels, false, format, stream); - decoder.LoadFromStream(out byte[] decoded); + int read = decoder.LoadFromStream(out byte[] decoded); if (!decoder.Loading) + { + size = read; return decoded; + } // fallback if it couldn't decode at once using (MemoryStream memoryStream = new MemoryStream()) { - memoryStream.Write(decoded); + memoryStream.Write(decoded, 0, read); while (decoder.Loading) { - int read = decoder.LoadFromStream(out decoded); + read = decoder.LoadFromStream(out decoded); memoryStream.Write(decoded, 0, read); } + size = (int)memoryStream.Length; return memoryStream.ToArray(); } } diff --git a/osu.Framework/Audio/BassAudioDecoder.cs b/osu.Framework/Audio/BassAudioDecoder.cs index ba6003f9e1..e6fb8a3f45 100644 --- a/osu.Framework/Audio/BassAudioDecoder.cs +++ b/osu.Framework/Audio/BassAudioDecoder.cs @@ -17,7 +17,9 @@ namespace osu.Framework.Audio internal class BassAudioDecoder : AudioDecoder { private int decodeStream; - private FileCallbacks? callbacks; + private FileCallbacks? fileCallbacks; + + private SyncCallback? syncCallback; private int resampler; @@ -42,6 +44,8 @@ private Resolution resolution } } + private ushort bits => SDL.SDL_AUDIO_BITSIZE(Format); + public BassAudioDecoder(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass) { @@ -61,7 +65,11 @@ internal override void Free() decodeStream = 0; } - callbacks?.Dispose(); + fileCallbacks?.Dispose(); + syncCallback?.Dispose(); + + fileCallbacks = null; + syncCallback = null; decodeData = null; @@ -79,51 +87,39 @@ protected override int LoadFromStreamInternal(out byte[] decoded) { if (!Loading) { - callbacks = new FileCallbacks(new DataStreamFileProcedures(Stream)); + fileCallbacks = new FileCallbacks(new DataStreamFileProcedures(Stream)); + syncCallback = new SyncCallback((_, _, _, _) => + { + Loading = false; + }); + BassFlags bassFlags = BassFlags.Decode | resolution.ToBassFlag(); if (IsTrack) bassFlags |= BassFlags.Prescan; - decodeStream = Bass.CreateStream(StreamSystem.NoBuffer, bassFlags, callbacks.Callbacks); + decodeStream = Bass.CreateStream(StreamSystem.NoBuffer, bassFlags, fileCallbacks.Callbacks); if (decodeStream == 0) throw new FormatException($"Couldn't create stream: {Bass.LastError}"); - bool infoAvail = Bass.ChannelGetInfo(decodeStream, out var info); - - if (infoAvail) + if (Bass.ChannelGetInfo(decodeStream, out var info)) { ByteLength = Bass.ChannelGetLength(decodeStream); - Length = Bass.ChannelBytes2Seconds(decodeStream, ByteLength) * 1000; + Length = Bass.ChannelBytes2Seconds(decodeStream, ByteLength) * 1000.0d; Bitrate = (int)Math.Round(Bass.ChannelGetAttribute(decodeStream, ChannelAttribute.Bitrate)); - if (info.Channels != Channels || info.Resolution != resolution || info.Frequency != Rate) + if (info.Channels != Channels || info.Frequency != Rate) { resampler = BassMix.CreateMixerStream(Rate, Channels, BassFlags.MixerEnd | BassFlags.Decode | resolution.ToBassFlag()); if (resampler == 0) throw new FormatException($"Failed to create BASS Mixer: {Bass.LastError}"); - Bass.ChannelSetAttribute(resampler, ChannelAttribute.Buffer, 0); - - if (!BassMix.MixerAddChannel(resampler, decodeStream, BassFlags.MixerChanBuffer | BassFlags.MixerChanNoRampin)) + if (!BassMix.MixerAddChannel(resampler, decodeStream, BassFlags.MixerChanNoRampin | BassFlags.MixerChanLimit)) throw new FormatException($"Failed to add a channel to BASS Mixer: {Bass.LastError}"); - ushort srcBits = 32; // float by default - - switch (info.Resolution) - { - case Resolution.Byte: - srcBits = 8; - break; - - case Resolution.Short: - srcBits = 16; - break; - } - + ByteLength /= info.Channels * (bits / 8); ByteLength = (long)Math.Ceiling((decimal)ByteLength / info.Frequency * Rate); - ByteLength /= info.Channels * (srcBits / 8); - ByteLength *= Channels * (SDL.SDL_AUDIO_BITSIZE(Format) / 8); + ByteLength *= Channels * (bits / 8); } } else @@ -132,12 +128,14 @@ protected override int LoadFromStreamInternal(out byte[] decoded) throw new FormatException($"Couldn't get channel info: {Bass.LastError}"); } + Bass.ChannelSetSync(resampler == 0 ? decodeStream : resampler, SyncFlags.End | SyncFlags.Onetime, 0, syncCallback.Callback, syncCallback.Handle); + Loading = true; } int handle = resampler == 0 ? decodeStream : resampler; - int bufferLen = (int)(IsTrack ? Bass.ChannelSeconds2Bytes(handle, 1) : ByteLength); + int bufferLen = (int)Bass.ChannelSeconds2Bytes(handle, 1); if (bufferLen <= 0) bufferLen = 44100 * 2 * 4 * 1; diff --git a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs index a3565d1ab2..6f1163387d 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs @@ -41,15 +41,15 @@ private protected override void LoadSample() try { - byte[] audio = AudioDecoderManager.DecodeAudio(spec.freq, spec.channels, spec.format, stream); + byte[] audio = AudioDecoderManager.DecodeAudio(spec.freq, spec.channels, spec.format, stream, out int size); - if (audio.Length > 0) + if (size > 0) { - decodedAudio = new float[audio.Length / 4]; - Buffer.BlockCopy(audio, 0, decodedAudio, 0, audio.Length); + decodedAudio = new float[size / 4]; + Buffer.BlockCopy(audio, 0, decodedAudio, 0, size); } - Length = audio.Length / 4d / spec.freq / spec.channels * 1000d; + Length = size / 4d / spec.freq / spec.channels * 1000d; isLoaded = true; } finally From 50e32b3bf5d63b2f68e72fe1774268e9ce88afeb Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sat, 30 Dec 2023 22:09:48 +0900 Subject: [PATCH 053/127] Implement SDL2AudioCallback properly --- .../Audio/Callbacks/SDL2AudioCallback.cs | 31 +++++++++++++++++++ osu.Framework/Audio/SDL2AudioManager.cs | 12 +++++-- 2 files changed, 41 insertions(+), 2 deletions(-) create mode 100644 osu.Framework/Audio/Callbacks/SDL2AudioCallback.cs diff --git a/osu.Framework/Audio/Callbacks/SDL2AudioCallback.cs b/osu.Framework/Audio/Callbacks/SDL2AudioCallback.cs new file mode 100644 index 0000000000..c33bca8aeb --- /dev/null +++ b/osu.Framework/Audio/Callbacks/SDL2AudioCallback.cs @@ -0,0 +1,31 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using System; +using System.Runtime.CompilerServices; +using osu.Framework.Allocation; +using osu.Framework.Platform; +using SDL2; + +namespace osu.Framework.Audio.Callbacks +{ + internal class SDL2AudioCallback : BassCallback + { + public SDL.SDL_AudioCallback Callback => RuntimeFeature.IsDynamicCodeSupported ? AudioCallback : audioCallback; + + public readonly SDL.SDL_AudioCallback AudioCallback; + + public SDL2AudioCallback(SDL.SDL_AudioCallback callback) + { + AudioCallback = callback; + } + + [MonoPInvokeCallback(typeof(SDL.SDL_AudioCallback))] + private static void audioCallback(IntPtr userdata, IntPtr stream, int len) + { + var ptr = new ObjectHandle(userdata); + if (ptr.GetTarget(out var target)) + target.AudioCallback(userdata, stream, len); + } + } +} diff --git a/osu.Framework/Audio/SDL2AudioManager.cs b/osu.Framework/Audio/SDL2AudioManager.cs index 0edcef1bc5..41ffeeb47f 100644 --- a/osu.Framework/Audio/SDL2AudioManager.cs +++ b/osu.Framework/Audio/SDL2AudioManager.cs @@ -8,6 +8,7 @@ using System.Collections.Immutable; using System.IO; using System.Linq; +using osu.Framework.Audio.Callbacks; using osu.Framework.Audio.Mixing; using osu.Framework.Audio.Mixing.SDL2; using osu.Framework.Audio.Sample; @@ -34,6 +35,8 @@ public class SDL2AudioManager : AudioManager private readonly List sdlMixerList = new List(); + private readonly SDL2AudioCallback audioCallback; + /// /// Creates a new . /// @@ -43,13 +46,16 @@ public class SDL2AudioManager : AudioManager public SDL2AudioManager(AudioThread audioThread, ResourceStore trackStore, ResourceStore sampleStore) : base(audioThread, trackStore, sampleStore) { + audioCallback = new SDL2AudioCallback((_, stream, size) => internalAudioCallback(stream, size)); + // Must not edit this except for samples, as components (especially mixer) expects this to match. spec = new SDL.SDL_AudioSpec { freq = AUDIO_FREQ, channels = AUDIO_CHANNELS, format = AUDIO_FORMAT, - callback = audioCallback, + callback = audioCallback.Callback, + userdata = audioCallback.Handle, samples = 256 // determines latency, this value can be changed but is already reasonably low }; @@ -117,7 +123,7 @@ protected override void ItemRemoved(AudioComponent item) } } - private void audioCallback(IntPtr userdata, IntPtr stream, int bufsize) + private void internalAudioCallback(IntPtr stream, int bufsize) { try { @@ -237,6 +243,8 @@ protected override void Dispose(bool disposing) SDL.SDL_CloseAudioDevice(deviceId); deviceId = 0; } + + audioCallback?.Dispose(); } } } From cad1fa2320547c821568e06b4e22d037d687f995 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sun, 31 Dec 2023 17:29:44 +0900 Subject: [PATCH 054/127] Remove sync at end in BassAudioDecoder in code --- osu.Framework/Audio/BassAudioDecoder.cs | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/osu.Framework/Audio/BassAudioDecoder.cs b/osu.Framework/Audio/BassAudioDecoder.cs index e6fb8a3f45..bcf0d34b89 100644 --- a/osu.Framework/Audio/BassAudioDecoder.cs +++ b/osu.Framework/Audio/BassAudioDecoder.cs @@ -19,6 +19,7 @@ internal class BassAudioDecoder : AudioDecoder private int decodeStream; private FileCallbacks? fileCallbacks; + private int syncHandle; private SyncCallback? syncCallback; private int resampler; @@ -53,6 +54,17 @@ public BassAudioDecoder(int rate, int channels, bool isTrack, ushort format, Str internal override void Free() { + if (syncHandle != 0) + Bass.ChannelRemoveSync(resampler == 0 ? decodeStream : resampler, syncHandle); + + fileCallbacks?.Dispose(); + syncCallback?.Dispose(); + + fileCallbacks = null; + syncCallback = null; + + decodeData = null; + if (resampler != 0) { Bass.StreamFree(resampler); @@ -65,14 +77,6 @@ internal override void Free() decodeStream = 0; } - fileCallbacks?.Dispose(); - syncCallback?.Dispose(); - - fileCallbacks = null; - syncCallback = null; - - decodeData = null; - base.Free(); } @@ -128,7 +132,7 @@ protected override int LoadFromStreamInternal(out byte[] decoded) throw new FormatException($"Couldn't get channel info: {Bass.LastError}"); } - Bass.ChannelSetSync(resampler == 0 ? decodeStream : resampler, SyncFlags.End | SyncFlags.Onetime, 0, syncCallback.Callback, syncCallback.Handle); + syncHandle = Bass.ChannelSetSync(resampler == 0 ? decodeStream : resampler, SyncFlags.End | SyncFlags.Onetime, 0, syncCallback.Callback, syncCallback.Handle); Loading = true; } From c65479b0c0d0081704f2a85f0c8b3a4f66894560 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sun, 31 Dec 2023 17:30:01 +0900 Subject: [PATCH 055/127] Fix SampleFactory docs summary --- osu.Framework/Audio/Sample/SampleFactory.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework/Audio/Sample/SampleFactory.cs b/osu.Framework/Audio/Sample/SampleFactory.cs index 9f9a35f58f..fe7cab98a1 100644 --- a/osu.Framework/Audio/Sample/SampleFactory.cs +++ b/osu.Framework/Audio/Sample/SampleFactory.cs @@ -7,7 +7,7 @@ namespace osu.Framework.Audio.Sample { /// - /// A factory for objects sharing a common sample ID (and thus playback concurrency). + /// A factory for objects sharing a common sample ID (and thus playback concurrency). /// internal abstract class SampleFactory : AudioCollectionManager { From e8f1cbabcb52d218a414389d64f02d3264fc1657 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sun, 31 Dec 2023 17:30:32 +0900 Subject: [PATCH 056/127] No lock in TrackSDL2 Peek --- osu.Framework/Audio/Track/TrackSDL2.cs | 14 +++++-- .../Audio/Track/TrackSDL2AudioPlayer.cs | 41 +++++++++++-------- 2 files changed, 33 insertions(+), 22 deletions(-) diff --git a/osu.Framework/Audio/Track/TrackSDL2.cs b/osu.Framework/Audio/Track/TrackSDL2.cs index ad3a3b10ea..bb862a5da0 100644 --- a/osu.Framework/Audio/Track/TrackSDL2.cs +++ b/osu.Framework/Audio/Track/TrackSDL2.cs @@ -97,8 +97,7 @@ private void updateCurrentAmplitude() fftSamples ??= new Complex[ChannelAmplitudes.AMPLITUDES_SIZE * 2]; fftResult ??= new float[ChannelAmplitudes.AMPLITUDES_SIZE]; - lock (syncRoot) - player.Peek(samples); + player.Peek(samples, lastTime); float leftAmplitude = 0; float rightAmplitude = 0; @@ -185,6 +184,8 @@ public override async Task SeekAsync(double seek) private void seekInternal(double seek) { + double time; + lock (syncRoot) { player.Seek(seek); @@ -195,8 +196,10 @@ private void seekInternal(double seek) hasCompleted = false; } - Interlocked.Exchange(ref currentTime, player.GetCurrentTime()); + time = player.GetCurrentTime(); } + + Interlocked.Exchange(ref currentTime, time); } public override void Start() @@ -228,13 +231,16 @@ int ISDL2AudioChannel.GetRemainingSamples(float[] data) if (!IsLoaded) return 0; int ret; + double time; lock (syncRoot) { + time = player.GetCurrentTime(); ret = player.GetRemainingSamples(data); - Interlocked.Exchange(ref currentTime, player.GetCurrentTime()); } + Interlocked.Exchange(ref currentTime, time); + if (ret < 0) { EnqueueAction(RaiseFailed); diff --git a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs index 33f41e4fc1..b6d48691e6 100644 --- a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs @@ -3,6 +3,7 @@ using System; using System.Buffers; +using System.Threading; using osu.Framework.Logging; namespace osu.Framework.Audio.Track @@ -44,7 +45,7 @@ internal class TrackSDL2AudioPlayer : ResamplingPlayer, IDisposable private bool dataRented; - public long AudioDataLength { get; private set; } + private long audioDataLength; /// /// Play backwards if set to true. @@ -84,7 +85,7 @@ private void prepareArray(long wanted) if (AudioData != null) { - Array.Copy(AudioData, 0, temp, 0, AudioDataLength); + Array.Copy(AudioData, 0, temp, 0, audioDataLength); if (dataRented) ArrayPool.Shared.Return(AudioData); @@ -115,8 +116,8 @@ internal void PutSamplesInStream(byte[] next, int length) int floatLen = length / sizeof(float); - if (AudioDataLength + floatLen > AudioData.LongLength) - prepareArray(AudioDataLength + floatLen); + if (audioDataLength + floatLen > AudioData.LongLength) + prepareArray(audioDataLength + floatLen); unsafe // To directly put bytes as float in array { @@ -124,11 +125,11 @@ internal void PutSamplesInStream(byte[] next, int length) fixed (void* ptr = next) { float* src = (float*)ptr; - Buffer.MemoryCopy(src, dest + AudioDataLength, (AudioData.LongLength - AudioDataLength) * sizeof(float), length); + Buffer.MemoryCopy(src, dest + audioDataLength, (AudioData.LongLength - audioDataLength) * sizeof(float), length); } } - AudioDataLength += floatLen; + audioDataLength += floatLen; } internal void DonePutting() @@ -137,7 +138,7 @@ internal void DonePutting() return; // Saved seek was over data length - if (SaveSeek > AudioDataLength) + if (SaveSeek > audioDataLength) SaveSeek = 0; isLoading = false; @@ -149,7 +150,7 @@ protected override int GetRemainingRawFloats(float[] data, int offset, int neede if (AudioData == null) return 0; - if (AudioDataLength <= 0) + if (audioDataLength <= 0) { done = true; return 0; @@ -162,7 +163,7 @@ protected override int GetRemainingRawFloats(float[] data, int offset, int neede SaveSeek = 0; // player now has audio data to play - if (AudioDataLength > SaveSeek) + if (audioDataLength > SaveSeek) { AudioDataPosition = SaveSeek; SaveSeek = 0; @@ -192,7 +193,7 @@ protected override int GetRemainingRawFloats(float[] data, int offset, int neede } else { - long remain = AudioDataLength - AudioDataPosition; + long remain = audioDataLength - AudioDataPosition; read = (int)Math.Min(needed, remain); Array.Copy(AudioData, AudioDataPosition, data, offset, read); @@ -202,7 +203,7 @@ protected override int GetRemainingRawFloats(float[] data, int offset, int neede if (read < needed && isLoading) Logger.Log("Track underrun!"); - if (ReversePlayback ? AudioDataPosition <= 0 : AudioDataPosition >= AudioDataLength && !isLoading) + if (ReversePlayback ? AudioDataPosition <= 0 : AudioDataPosition >= audioDataLength && !isLoading) done = true; return read; @@ -212,14 +213,18 @@ protected override int GetRemainingRawFloats(float[] data, int offset, int neede /// Puts recently played audio samples into data. Mostly used to calculate amplitude of a track. /// /// A float array to put data in + /// /// True if succeeded - public bool Peek(float[] data) + public bool Peek(float[] data, double posMs) { if (AudioData == null) return false; - long start = Math.Max(0, AudioDataPosition - data.Length); // To get most recently 'used' audio data - long remain = AudioDataLength - start; + long pos = GetIndexFromMs(posMs); + long len = Interlocked.Read(ref audioDataLength); + + long start = Math.Max(0, pos); // To get most recently 'used' audio data + long remain = len - start; Array.Copy(AudioData, start, data, 0, Math.Min(data.Length, remain)); return true; @@ -257,22 +262,22 @@ public double GetCurrentTime() /// /// Sets the position of this player. - /// If the given value is over current , it will be saved and pause playback until decoding reaches the position. - /// However, if the value is still over after the decoding is over, it will be discarded. + /// If the given value is over current , it will be saved and pause playback until decoding reaches the position. + /// However, if the value is still over after the decoding is over, it will be discarded. /// /// Position in milliseconds public virtual void Seek(double seek) { long tmp = GetIndexFromMs(seek); - if (!isLoaded && tmp > AudioDataLength) + if (!isLoaded && tmp > audioDataLength) { SaveSeek = tmp; } else if (AudioData != null) { SaveSeek = 0; - AudioDataPosition = Math.Clamp(tmp, 0, Math.Max(0, AudioDataLength - 1)); + AudioDataPosition = Math.Clamp(tmp, 0, Math.Max(0, audioDataLength - 1)); Flush(); } } From 3fc1b0bb01f1ee50e6d64b99625cc98625b52dc5 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sun, 31 Dec 2023 17:38:29 +0900 Subject: [PATCH 057/127] Clear syncHandle in BassAudioDecoder --- osu.Framework/Audio/BassAudioDecoder.cs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/osu.Framework/Audio/BassAudioDecoder.cs b/osu.Framework/Audio/BassAudioDecoder.cs index bcf0d34b89..d168f7bcd1 100644 --- a/osu.Framework/Audio/BassAudioDecoder.cs +++ b/osu.Framework/Audio/BassAudioDecoder.cs @@ -55,7 +55,10 @@ public BassAudioDecoder(int rate, int channels, bool isTrack, ushort format, Str internal override void Free() { if (syncHandle != 0) + { Bass.ChannelRemoveSync(resampler == 0 ? decodeStream : resampler, syncHandle); + syncHandle = 0; + } fileCallbacks?.Dispose(); syncCallback?.Dispose(); From a2681a5054e75d6cba93e9440ce6296d41c17910 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Tue, 2 Jan 2024 15:45:15 +0900 Subject: [PATCH 058/127] Properly clamp TrackSDL2Player Peek position --- osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs index b6d48691e6..e000d1d9cd 100644 --- a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs @@ -223,7 +223,7 @@ public bool Peek(float[] data, double posMs) long pos = GetIndexFromMs(posMs); long len = Interlocked.Read(ref audioDataLength); - long start = Math.Max(0, pos); // To get most recently 'used' audio data + long start = Math.Clamp(pos, 0, len); long remain = len - start; Array.Copy(AudioData, start, data, 0, Math.Min(data.Length, remain)); From 5e73ed8a71811f3641ca5d7a1c2076bfeab8a80d Mon Sep 17 00:00:00 2001 From: hwsmm Date: Tue, 2 Jan 2024 15:46:07 +0900 Subject: [PATCH 059/127] Use Clear instead of Flush in TrackSDL2Players --- osu.Framework/Audio/ResamplingPlayer.cs | 2 +- .../Audio/Track/TempoSDL2AudioPlayer.cs | 26 +++++++++---------- .../Audio/Track/TrackSDL2AudioPlayer.cs | 18 +++++-------- 3 files changed, 19 insertions(+), 27 deletions(-) diff --git a/osu.Framework/Audio/ResamplingPlayer.cs b/osu.Framework/Audio/ResamplingPlayer.cs index 9575f5d1a6..488e5fe911 100644 --- a/osu.Framework/Audio/ResamplingPlayer.cs +++ b/osu.Framework/Audio/ResamplingPlayer.cs @@ -73,7 +73,7 @@ protected virtual double GetProcessingLatency() return resampler.GetCurrentLatency() * 1000.0d; } - public virtual void Flush() + public virtual void Clear() { resampler?.Reset(); } diff --git a/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs index 57d8b5dbc1..9008753d5b 100644 --- a/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs @@ -48,7 +48,7 @@ public TempoSDL2AudioPlayer(int rate, byte channels, int samples) /// Needed sample count private void fillSamples(int samples) { - if (soundTouch == null) + if (soundTouch == null || tempo == 1.0f) return; while (!base.Done && soundTouch.AvailableSamples < samples) @@ -104,15 +104,14 @@ private void setTempo(double tempo) if (temp >= 0) AudioDataPosition = temp; } - - Reset(false); - soundTouch = null; - return; + } + else + { + double tempochange = Math.Clamp((Math.Abs(tempo) - 1.0d) * 100.0d, -95, 5000); + soundTouch.TempoChange = tempochange; } - double tempochange = Math.Clamp((Math.Abs(tempo) - 1.0d) * 100.0d, -95, 5000); - soundTouch.TempoChange = tempochange; - FillRequiredSamples(); + Clear(); } } @@ -123,7 +122,7 @@ private void setTempo(double tempo) /// The number of samples put public override int GetRemainingSamples(float[] ret) { - if (soundTouch == null) + if (soundTouch == null || tempo == 1.0f) return base.GetRemainingSamples(ret); if (RelativeRate == 0) @@ -132,9 +131,7 @@ public override int GetRemainingSamples(float[] ret) int expected = ret.Length / SrcChannels; if (!doneFilling && soundTouch.AvailableSamples < expected) - { fillSamples(expected); - } int got = soundTouch.ReceiveSamples(ret, expected); @@ -147,6 +144,7 @@ public override int GetRemainingSamples(float[] ret) public override void Reset(bool resetPosition = true) { base.Reset(resetPosition); + doneFilling = false; donePlaying = false; } @@ -161,10 +159,10 @@ protected int GetTempoLatencyInSamples() protected override double GetProcessingLatency() => base.GetProcessingLatency() + (double)GetTempoLatencyInSamples() / SrcRate * 1000.0d; - public override void Flush() + public override void Clear() { - base.Flush(); - soundTouch?.Flush(); + base.Clear(); + soundTouch?.Clear(); } public override void Seek(double seek) diff --git a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs index e000d1d9cd..f6d902caef 100644 --- a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs @@ -4,7 +4,6 @@ using System; using System.Buffers; using System.Threading; -using osu.Framework.Logging; namespace osu.Framework.Audio.Track { @@ -200,9 +199,6 @@ protected override int GetRemainingRawFloats(float[] data, int offset, int neede AudioDataPosition += read; } - if (read < needed && isLoading) - Logger.Log("Track underrun!"); - if (ReversePlayback ? AudioDataPosition <= 0 : AudioDataPosition >= audioDataLength && !isLoading) done = true; @@ -243,6 +239,8 @@ public virtual void Reset(bool resetPosition = true) SaveSeek = 0; Seek(0); } + + Clear(); } /// @@ -278,7 +276,7 @@ public virtual void Seek(double seek) { SaveSeek = 0; AudioDataPosition = Math.Clamp(tmp, 0, Math.Max(0, audioDataLength - 1)); - Flush(); + Clear(); } } @@ -288,14 +286,10 @@ protected virtual void Dispose(bool disposing) { if (!disposedValue) { - if (disposing) - { - if (dataRented && AudioData != null) - ArrayPool.Shared.Return(AudioData); - - AudioData = null; - } + if (dataRented && AudioData != null) + ArrayPool.Shared.Return(AudioData); + AudioData = null; disposedValue = true; } } From e0300986b2a0e5bc80078c31a93fc43015c5e3a5 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Tue, 2 Jan 2024 15:59:29 +0900 Subject: [PATCH 060/127] Use while instead --- osu.Framework/Audio/Sample/SampleSDL2AudioPlayer.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework/Audio/Sample/SampleSDL2AudioPlayer.cs b/osu.Framework/Audio/Sample/SampleSDL2AudioPlayer.cs index 2f6a869770..6aedd177aa 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Sample/SampleSDL2AudioPlayer.cs @@ -32,7 +32,7 @@ protected override int GetRemainingRawFloats(float[] data, int offset, int neede int i = 0; - for (; i < needed;) + while (i < needed) { int put = Math.Min(needed - i, audioData.Length - position); From 6afa85a4624517d3f81c1732c2f09f7422a995aa Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 12 Jan 2024 22:13:51 +0900 Subject: [PATCH 061/127] Add the dispose logic in SDL2AudioMixer --- osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs b/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs index 11bda979e7..6507bd8be8 100644 --- a/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs +++ b/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs @@ -258,6 +258,15 @@ private static BiQuadFilter getFilter(float freq, BQFParameters bqfp) return filter; } + protected override void Dispose(bool disposing) + { + base.Dispose(disposing); + + // Move all contained channels back to the default mixer. + foreach (var channel in activeChannels.ToArray()) + Remove(channel); + } + public void StreamFree(IAudioChannel channel) { Remove(channel, false); From b330078acc963edac8268199ead283ef13e828ad Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 12 Jan 2024 22:14:35 +0900 Subject: [PATCH 062/127] Handle SDL2 Audio device event --- osu.Framework/Platform/SDL2Window.cs | 32 ++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/osu.Framework/Platform/SDL2Window.cs b/osu.Framework/Platform/SDL2Window.cs index 0baa19e53e..aa05ef0fdb 100644 --- a/osu.Framework/Platform/SDL2Window.cs +++ b/osu.Framework/Platform/SDL2Window.cs @@ -579,6 +579,28 @@ protected virtual void HandleEvent(SDL.SDL_Event e) case SDL.SDL_EventType.SDL_DROPCOMPLETE: handleDropEvent(e.drop); break; + + case SDL.SDL_EventType.SDL_AUDIODEVICEADDED: + case SDL.SDL_EventType.SDL_AUDIODEVICEREMOVED: + handleAudioDeviceEvent(e.adevice); + break; + } + } + + private void handleAudioDeviceEvent(SDL.SDL_AudioDeviceEvent evtAudioDevice) + { + if (evtAudioDevice.iscapture != 0) // capture device + return; + + switch ((SDL.SDL_EventType)evtAudioDevice.type) + { + case SDL.SDL_EventType.SDL_AUDIODEVICEADDED: + AudioDeviceAdded?.Invoke(evtAudioDevice.which); + break; + + case SDL.SDL_EventType.SDL_AUDIODEVICEREMOVED: + AudioDeviceRemoved?.Invoke(evtAudioDevice.which); + break; } } @@ -652,6 +674,16 @@ internal virtual void SetIconFromGroup(IconGroup iconGroup) /// public event Action? DragDrop; + /// + /// Invoked when a new audio device is added, only when using SDL2 audio + /// + public event Action? AudioDeviceAdded; + + /// + /// Invoked when a new audio device is removed, only when using SDL2 audio + /// + public event Action? AudioDeviceRemoved; + #endregion public void Dispose() From fa715c215b5476cc5088325598d01126dc3931bb Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 12 Jan 2024 22:15:17 +0900 Subject: [PATCH 063/127] Prepare to invoke audio events in SDL2AudioManager --- osu.Framework/Game.cs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/osu.Framework/Game.cs b/osu.Framework/Game.cs index 8944f1db66..520b62268e 100644 --- a/osu.Framework/Game.cs +++ b/osu.Framework/Game.cs @@ -167,9 +167,12 @@ private void load(FrameworkConfigManager config) switch (config.Get(FrameworkSetting.AudioDriver)) { - case AudioDriver.SDL2: - Audio = new SDL2AudioManager(Host.AudioThread, tracks, samples) { EventScheduler = Scheduler }; + case AudioDriver.SDL2 when Host.Window is SDL2Window sdl2Window: + { + SDL2AudioManager sdl2Audio = new SDL2AudioManager(Host.AudioThread, tracks, samples) { EventScheduler = Scheduler }; + Audio = sdl2Audio; break; + } default: Audio = new BassAudioManager(Host.AudioThread, tracks, samples) { EventScheduler = Scheduler }; From 679c4fe0b04f062ccfb1a915e01217779a7f01cd Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sat, 13 Jan 2024 00:24:43 +0900 Subject: [PATCH 064/127] Move syncAudioDevices to Bass entirely and let SDL2AudioManager receive events --- osu.Framework/Audio/AudioManager.cs | 71 ++----------- osu.Framework/Audio/BassAudioManager.cs | 127 ++++++++++++++++++------ osu.Framework/Audio/SDL2AudioManager.cs | 54 +++++++--- osu.Framework/Game.cs | 2 + osu.Framework/Platform/SDL2Window.cs | 8 +- 5 files changed, 151 insertions(+), 111 deletions(-) diff --git a/osu.Framework/Audio/AudioManager.cs b/osu.Framework/Audio/AudioManager.cs index 0fb9c7fbeb..a6fccdcd61 100644 --- a/osu.Framework/Audio/AudioManager.cs +++ b/osu.Framework/Audio/AudioManager.cs @@ -57,11 +57,17 @@ public abstract class AudioManager : AudioCollectionManager /// public event Action OnNewDevice; + // workaround as c# doesn't allow actions to get invoked outside of this class + protected void InvokeOnNewDevice(string deviceName) => OnNewDevice?.Invoke(deviceName); + /// /// Is fired whenever an audio device is lost and provides its name. /// public event Action OnLostDevice; + // same as above + protected void InvokeOnLostDevice(string deviceName) => OnLostDevice?.Invoke(deviceName); + /// /// The preferred audio device we should use. A value of /// denotes the OS default. @@ -98,9 +104,7 @@ public abstract class AudioManager : AudioCollectionManager private Scheduler scheduler => CurrentAudioThread.Scheduler; - private Scheduler eventScheduler => EventScheduler ?? scheduler; - - private readonly CancellationTokenSource cancelSource = new CancellationTokenSource(); + protected readonly CancellationTokenSource CancelSource = new CancellationTokenSource(); /// /// The scheduler used for invoking publicly exposed delegate events. @@ -145,32 +149,6 @@ protected AudioManager(AudioThread audioThread, ResourceStore trackStore store.AddAdjustment(AdjustableProperty.Volume, VolumeSample); return store; }); - - CancellationToken token = cancelSource.Token; - - syncAudioDevices(); - scheduler.AddDelayed(() => - { - // sync audioDevices every 1000ms - new Thread(() => - { - while (!token.IsCancellationRequested) - { - try - { - if (CheckForDeviceChanges(audioDevices)) - syncAudioDevices(); - Thread.Sleep(1000); - } - catch - { - } - } - }) - { - IsBackground = true - }.Start(); - }, 1000); } internal abstract Track.Track GetNewTrack(Stream data, string name); @@ -179,7 +157,7 @@ protected AudioManager(AudioThread audioThread, ResourceStore trackStore protected override void Dispose(bool disposing) { - cancelSource.Cancel(); + CancelSource.Cancel(); CurrentAudioThread.UnregisterManager(this); @@ -194,18 +172,6 @@ protected void OnDeviceChanged() scheduler.Add(() => SetAudioDevice(AudioDevice.Value)); } - private void onDevicesChanged() - { - scheduler.Add(() => - { - if (cancelSource.IsCancellationRequested) - return; - - if (!IsCurrentDeviceValid()) - SetAudioDevice(); - }); - } - private static int userMixerID; /// @@ -272,27 +238,6 @@ public ISampleStore GetSampleStore(IResourceStore store = null, AudioMix protected abstract bool SetAudioDevice(string deviceName = null); protected abstract bool SetAudioDevice(int deviceIndex); - protected abstract bool IsDevicesUpdated(out ImmutableList newDevices, out ImmutableList lostDevices); - - private void syncAudioDevices() - { - if (IsDevicesUpdated(out ImmutableList newDevices, out ImmutableList lostDevices)) - { - onDevicesChanged(); - - if (newDevices.Count > 0 || lostDevices.Count > 0) - { - eventScheduler.Add(delegate - { - foreach (string d in newDevices) - OnNewDevice?.Invoke(d); - foreach (string d in lostDevices) - OnLostDevice?.Invoke(d); - }); - } - } - } - // The current device is considered valid if it is enabled, initialized, and not a fallback device. protected abstract bool IsCurrentDeviceValid(); diff --git a/osu.Framework/Audio/BassAudioManager.cs b/osu.Framework/Audio/BassAudioManager.cs index 2e1f0b966c..41a3507720 100644 --- a/osu.Framework/Audio/BassAudioManager.cs +++ b/osu.Framework/Audio/BassAudioManager.cs @@ -4,11 +4,11 @@ #nullable disable using System; -using System.Collections.Generic; using System.Collections.Immutable; using System.Diagnostics; using System.IO; using System.Linq; +using System.Threading; using ManagedBass; using ManagedBass.Fx; using ManagedBass.Mix; @@ -63,9 +63,9 @@ public class BassAudioManager : AudioManager Bass.CurrentDevice != Bass.DefaultDevice; // Mutated by multiple threads, must be thread safe. - private ImmutableList audioDevices = ImmutableList.Empty; + private ImmutableArray audioDevices = ImmutableArray.Empty; - private readonly DeviceInfoUpdateComparer updateComparer = new DeviceInfoUpdateComparer(); + private Scheduler eventScheduler => EventScheduler ?? CurrentAudioThread.Scheduler; /// /// Constructs an AudioStore given a track resource store, and a sample resource store. @@ -81,6 +81,44 @@ public BassAudioManager(AudioThread audioThread, ResourceStore trackStor OnDeviceChanged(); UsingGlobalMixer.Value = handle.NewValue.HasValue; }; + + CancellationToken token = CancelSource.Token; + + syncAudioDevices(); + eventScheduler.AddDelayed(() => + { + // sync audioDevices every 1000ms + new Thread(() => + { + while (!token.IsCancellationRequested) + { + try + { + if (CheckForDeviceChanges(audioDevices)) + syncAudioDevices(); + Thread.Sleep(1000); + } + catch + { + } + } + }) + { + IsBackground = true + }.Start(); + }, 1000); + } + + protected void OnDevicesChanged() + { + eventScheduler.Add(() => + { + if (CancelSource.IsCancellationRequested) + return; + + if (!IsCurrentDeviceValid()) + SetAudioDevice(); + }); } internal override Track.Track GetNewTrack(Stream data, string name) => new TrackBass(data, name); @@ -208,37 +246,77 @@ protected virtual bool InitBass(int device) return true; } - protected override bool IsDevicesUpdated(out ImmutableList newDevices, out ImmutableList lostDevices) + private void syncAudioDevices() { - // audioDevices are updated if: - // - A new device is added - // - An existing device is Enabled/Disabled or set as Default - var updatedAudioDevices = EnumerateAllDevices().ToImmutableList(); + audioDevices = GetAllDevices(); - if (audioDevices.SequenceEqual(updatedAudioDevices, updateComparer)) + // Bass should always be providing "No sound" and "Default" device. + Trace.Assert(audioDevices.Length >= BASS_INTERNAL_DEVICE_COUNT, "Bass did not provide any audio devices."); + + var oldDeviceNames = DeviceNames; + var newDeviceNames = DeviceNames = audioDevices.Skip(BASS_INTERNAL_DEVICE_COUNT).Where(d => d.IsEnabled).Select(d => d.Name).ToImmutableList(); + + OnDevicesChanged(); + + var newDevices = newDeviceNames.Except(oldDeviceNames).ToList(); + var lostDevices = oldDeviceNames.Except(newDeviceNames).ToList(); + + if (newDevices.Count > 0 || lostDevices.Count > 0) { - newDevices = lostDevices = ImmutableList.Empty; - return false; + eventScheduler.Add(delegate + { + foreach (string d in newDevices) + InvokeOnNewDevice(d); + foreach (string d in lostDevices) + InvokeOnLostDevice(d); + }); } + } + + /// + /// Check whether any audio device changes have occurred. + /// + /// Changes supported are: + /// - A new device is added + /// - An existing device is Enabled/Disabled or set as Default + /// + /// + /// This method is optimised to incur the lowest overhead possible. + /// + /// The previous audio devices array. + /// Whether a change was detected. + protected virtual bool CheckForDeviceChanges(ImmutableArray previousDevices) + { + int deviceCount = Bass.DeviceCount; - audioDevices = updatedAudioDevices; + if (previousDevices.Length != deviceCount) + return true; - // Bass should always be providing "No sound" and "Default" device. - Trace.Assert(audioDevices.Count >= BASS_INTERNAL_DEVICE_COUNT, "Bass did not provide any audio devices."); + for (int i = 0; i < deviceCount; i++) + { + var prevInfo = previousDevices[i]; - var oldDeviceNames = DeviceNames; - var newDeviceNames = DeviceNames = audioDevices.Skip(BASS_INTERNAL_DEVICE_COUNT).Where(d => d.IsEnabled).Select(d => d.Name).ToImmutableList(); + Bass.GetDeviceInfo(i, out var info); - newDevices = newDeviceNames.Except(oldDeviceNames).ToImmutableList(); - lostDevices = oldDeviceNames.Except(newDeviceNames).ToImmutableList(); - return true; + if (info.IsEnabled != prevInfo.IsEnabled) + return true; + + if (info.IsDefault != prevInfo.IsDefault) + return true; + } + + return false; } - protected virtual IEnumerable EnumerateAllDevices() + protected virtual ImmutableArray GetAllDevices() { int deviceCount = Bass.DeviceCount; + + var devices = ImmutableArray.CreateBuilder(deviceCount); for (int i = 0; i < deviceCount; i++) - yield return Bass.GetDeviceInfo(i); + devices.Add(Bass.GetDeviceInfo(i)); + + return devices.MoveToImmutable(); } // The current device is considered valid if it is enabled, initialized, and not a fallback device. @@ -254,12 +332,5 @@ public override string ToString() string deviceName = audioDevices.ElementAtOrDefault(Bass.CurrentDevice).Name; return $@"{GetType().ReadableName()} ({deviceName ?? "Unknown"})"; } - - private class DeviceInfoUpdateComparer : IEqualityComparer - { - public bool Equals(DeviceInfo x, DeviceInfo y) => x.IsEnabled == y.IsEnabled && x.IsDefault == y.IsDefault; - - public int GetHashCode(DeviceInfo obj) => obj.Name.GetHashCode(); - } } } diff --git a/osu.Framework/Audio/SDL2AudioManager.cs b/osu.Framework/Audio/SDL2AudioManager.cs index 41ffeeb47f..f5a07e15f2 100644 --- a/osu.Framework/Audio/SDL2AudioManager.cs +++ b/osu.Framework/Audio/SDL2AudioManager.cs @@ -7,7 +7,6 @@ using System.Collections.Generic; using System.Collections.Immutable; using System.IO; -using System.Linq; using osu.Framework.Audio.Callbacks; using osu.Framework.Audio.Mixing; using osu.Framework.Audio.Mixing.SDL2; @@ -37,6 +36,8 @@ public class SDL2AudioManager : AudioManager private readonly SDL2AudioCallback audioCallback; + private Scheduler eventScheduler => EventScheduler ?? CurrentAudioThread.Scheduler; + /// /// Creates a new . /// @@ -59,9 +60,11 @@ public SDL2AudioManager(AudioThread audioThread, ResourceStore trackStor samples = 256 // determines latency, this value can be changed but is already reasonably low }; - // comment below lines if you want to use FFmpeg to decode audio, AudioDecoder will use FFmpeg if no BASS device is available - EnqueueAction(() => + eventScheduler.Add(() => { + updateDeviceNames(); + + // comment below lines if you want to use FFmpeg to decode audio, AudioDecoder will use FFmpeg if no BASS device is available ManagedBass.Bass.Configure((ManagedBass.Configuration)68, 1); audioThread.InitDevice(ManagedBass.Bass.NoSoundDevice); }); @@ -147,23 +150,40 @@ private void internalAudioCallback(IntPtr stream, int bufsize) } } - protected override bool IsDevicesUpdated(out ImmutableList newDevices, out ImmutableList lostDevices) + internal void OnNewDeviceEvent(int addedDeviceIndex) { - var updatedAudioDevices = EnumerateAllDevices().ToImmutableList(); - - if (DeviceNames.SequenceEqual(updatedAudioDevices)) + eventScheduler.Add(() => { - newDevices = lostDevices = ImmutableList.Empty; - return false; - } + // the index is only vaild until next SDL_GetNumAudioDevices call, so get the name first. + string name = SDL.SDL_GetAudioDeviceName(addedDeviceIndex, 0); - newDevices = updatedAudioDevices.Except(DeviceNames).ToImmutableList(); - lostDevices = DeviceNames.Except(updatedAudioDevices).ToImmutableList(); + updateDeviceNames(); + InvokeOnNewDevice(name); + }); + } - DeviceNames = updatedAudioDevices; - return true; + internal void OnLostDeviceEvent(uint removedDeviceId) + { + eventScheduler.Add(() => + { + // SDL doesn't retain information about removed device. + updateDeviceNames(); + + if (deviceId == removedDeviceId) // current device lost + { + InvokeOnLostDevice(currentDeviceName); + SetAudioDevice(); + } + else + { + // we can probably guess the name by comparing the old list and the new one, but it won't be reliable + InvokeOnLostDevice(string.Empty); + } + }); } + private void updateDeviceNames() => DeviceNames = EnumerateAllDevices().ToImmutableList(); + protected virtual IEnumerable EnumerateAllDevices() { int deviceCount = SDL.SDL_GetNumAudioDevices(0); // it may return -1 if only default device is available (sound server) @@ -173,12 +193,14 @@ protected virtual IEnumerable EnumerateAllDevices() protected override bool SetAudioDevice(string deviceName = null) { - if (!AudioDeviceNames.Contains(deviceName)) + if (!DeviceNames.Contains(deviceName)) deviceName = null; if (deviceId > 0) SDL.SDL_CloseAudioDevice(deviceId); + Logger.Log("Trying this device: " + deviceName); + // Let audio driver adjust latency, this may set to a high value on Windows (but usually around 10ms), but let's just be safe const uint flag = SDL.SDL_AUDIO_ALLOW_SAMPLES_CHANGE; deviceId = SDL.SDL_OpenAudioDevice(deviceName, 0, ref spec, out var outspec, (int)flag); @@ -187,7 +209,7 @@ protected override bool SetAudioDevice(string deviceName = null) { if (deviceName == null) { - Logger.Log("SDL Audio init failed!", level: LogLevel.Error); + Logger.Log("No audio device can be used! Check your audio system.", level: LogLevel.Error); return false; } diff --git a/osu.Framework/Game.cs b/osu.Framework/Game.cs index 520b62268e..6bb9d2bcd8 100644 --- a/osu.Framework/Game.cs +++ b/osu.Framework/Game.cs @@ -170,6 +170,8 @@ private void load(FrameworkConfigManager config) case AudioDriver.SDL2 when Host.Window is SDL2Window sdl2Window: { SDL2AudioManager sdl2Audio = new SDL2AudioManager(Host.AudioThread, tracks, samples) { EventScheduler = Scheduler }; + sdl2Window.AudioDeviceAdded += sdl2Audio.OnNewDeviceEvent; + sdl2Window.AudioDeviceRemoved += sdl2Audio.OnLostDeviceEvent; Audio = sdl2Audio; break; } diff --git a/osu.Framework/Platform/SDL2Window.cs b/osu.Framework/Platform/SDL2Window.cs index 7d59068f07..54b573ab34 100644 --- a/osu.Framework/Platform/SDL2Window.cs +++ b/osu.Framework/Platform/SDL2Window.cs @@ -601,14 +601,14 @@ private void handleAudioDeviceEvent(SDL.SDL_AudioDeviceEvent evtAudioDevice) if (evtAudioDevice.iscapture != 0) // capture device return; - switch ((SDL.SDL_EventType)evtAudioDevice.type) + switch (evtAudioDevice.type) { case SDL.SDL_EventType.SDL_AUDIODEVICEADDED: - AudioDeviceAdded?.Invoke(evtAudioDevice.which); + AudioDeviceAdded?.Invoke((int)evtAudioDevice.which); break; case SDL.SDL_EventType.SDL_AUDIODEVICEREMOVED: - AudioDeviceRemoved?.Invoke(evtAudioDevice.which); + AudioDeviceRemoved?.Invoke(evtAudioDevice.which); // it is only uint if a device is removed break; } } @@ -686,7 +686,7 @@ internal virtual void SetIconFromGroup(IconGroup iconGroup) /// /// Invoked when a new audio device is added, only when using SDL2 audio /// - public event Action? AudioDeviceAdded; + public event Action? AudioDeviceAdded; /// /// Invoked when a new audio device is removed, only when using SDL2 audio From 6da52542e034b440a2d3c4a0d431c304ff171776 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sat, 13 Jan 2024 00:49:06 +0900 Subject: [PATCH 065/127] Fix BASS device changing thread safety issue --- osu.Framework/Audio/AudioManager.cs | 8 ++++---- osu.Framework/Audio/BassAudioManager.cs | 4 ++-- osu.Framework/Audio/SDL2AudioManager.cs | 10 +++++++--- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/osu.Framework/Audio/AudioManager.cs b/osu.Framework/Audio/AudioManager.cs index a6fccdcd61..acef0cbdb1 100644 --- a/osu.Framework/Audio/AudioManager.cs +++ b/osu.Framework/Audio/AudioManager.cs @@ -58,7 +58,7 @@ public abstract class AudioManager : AudioCollectionManager public event Action OnNewDevice; // workaround as c# doesn't allow actions to get invoked outside of this class - protected void InvokeOnNewDevice(string deviceName) => OnNewDevice?.Invoke(deviceName); + protected virtual void InvokeOnNewDevice(string deviceName) => OnNewDevice?.Invoke(deviceName); /// /// Is fired whenever an audio device is lost and provides its name. @@ -66,7 +66,7 @@ public abstract class AudioManager : AudioCollectionManager public event Action OnLostDevice; // same as above - protected void InvokeOnLostDevice(string deviceName) => OnLostDevice?.Invoke(deviceName); + protected virtual void InvokeOnLostDevice(string deviceName) => OnLostDevice?.Invoke(deviceName); /// /// The preferred audio device we should use. A value of @@ -102,7 +102,7 @@ public abstract class AudioManager : AudioCollectionManager // Mutated by multiple threads, must be thread safe. protected ImmutableList DeviceNames = ImmutableList.Empty; - private Scheduler scheduler => CurrentAudioThread.Scheduler; + protected Scheduler AudioScheduler => CurrentAudioThread.Scheduler; protected readonly CancellationTokenSource CancelSource = new CancellationTokenSource(); @@ -169,7 +169,7 @@ protected override void Dispose(bool disposing) protected void OnDeviceChanged() { - scheduler.Add(() => SetAudioDevice(AudioDevice.Value)); + AudioScheduler.Add(() => SetAudioDevice(AudioDevice.Value)); } private static int userMixerID; diff --git a/osu.Framework/Audio/BassAudioManager.cs b/osu.Framework/Audio/BassAudioManager.cs index 41a3507720..5e08ef2124 100644 --- a/osu.Framework/Audio/BassAudioManager.cs +++ b/osu.Framework/Audio/BassAudioManager.cs @@ -85,7 +85,7 @@ public BassAudioManager(AudioThread audioThread, ResourceStore trackStor CancellationToken token = CancelSource.Token; syncAudioDevices(); - eventScheduler.AddDelayed(() => + AudioScheduler.AddDelayed(() => { // sync audioDevices every 1000ms new Thread(() => @@ -111,7 +111,7 @@ public BassAudioManager(AudioThread audioThread, ResourceStore trackStor protected void OnDevicesChanged() { - eventScheduler.Add(() => + AudioScheduler.Add(() => { if (CancelSource.IsCancellationRequested) return; diff --git a/osu.Framework/Audio/SDL2AudioManager.cs b/osu.Framework/Audio/SDL2AudioManager.cs index f5a07e15f2..8ebb0ace17 100644 --- a/osu.Framework/Audio/SDL2AudioManager.cs +++ b/osu.Framework/Audio/SDL2AudioManager.cs @@ -38,6 +38,10 @@ public class SDL2AudioManager : AudioManager private Scheduler eventScheduler => EventScheduler ?? CurrentAudioThread.Scheduler; + protected override void InvokeOnNewDevice(string deviceName) => eventScheduler.Add(() => base.InvokeOnNewDevice(deviceName)); + + protected override void InvokeOnLostDevice(string deviceName) => eventScheduler.Add(() => base.InvokeOnLostDevice(deviceName)); + /// /// Creates a new . /// @@ -60,7 +64,7 @@ public SDL2AudioManager(AudioThread audioThread, ResourceStore trackStor samples = 256 // determines latency, this value can be changed but is already reasonably low }; - eventScheduler.Add(() => + AudioScheduler.Add(() => { updateDeviceNames(); @@ -152,7 +156,7 @@ private void internalAudioCallback(IntPtr stream, int bufsize) internal void OnNewDeviceEvent(int addedDeviceIndex) { - eventScheduler.Add(() => + AudioScheduler.Add(() => { // the index is only vaild until next SDL_GetNumAudioDevices call, so get the name first. string name = SDL.SDL_GetAudioDeviceName(addedDeviceIndex, 0); @@ -164,7 +168,7 @@ internal void OnNewDeviceEvent(int addedDeviceIndex) internal void OnLostDeviceEvent(uint removedDeviceId) { - eventScheduler.Add(() => + AudioScheduler.Add(() => { // SDL doesn't retain information about removed device. updateDeviceNames(); From b47f745719d39629c8c9c6438d1d5710a53be48d Mon Sep 17 00:00:00 2001 From: hwsmm Date: Wed, 24 Jan 2024 21:06:17 +0900 Subject: [PATCH 066/127] Remove unneeded log in SDL2AudioManager --- osu.Framework/Audio/SDL2AudioManager.cs | 2 -- 1 file changed, 2 deletions(-) diff --git a/osu.Framework/Audio/SDL2AudioManager.cs b/osu.Framework/Audio/SDL2AudioManager.cs index 8ebb0ace17..965a752b83 100644 --- a/osu.Framework/Audio/SDL2AudioManager.cs +++ b/osu.Framework/Audio/SDL2AudioManager.cs @@ -203,8 +203,6 @@ protected override bool SetAudioDevice(string deviceName = null) if (deviceId > 0) SDL.SDL_CloseAudioDevice(deviceId); - Logger.Log("Trying this device: " + deviceName); - // Let audio driver adjust latency, this may set to a high value on Windows (but usually around 10ms), but let's just be safe const uint flag = SDL.SDL_AUDIO_ALLOW_SAMPLES_CHANGE; deviceId = SDL.SDL_OpenAudioDevice(deviceName, 0, ref spec, out var outspec, (int)flag); From 0850312a51244d75d33101070ad315bd0e89e037 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sun, 28 Jan 2024 16:40:37 +0900 Subject: [PATCH 067/127] Implement basic sample concurrency in SDL2 Audio backend --- .../Audio/Sample/SampleBassFactory.cs | 4 +-- .../Audio/Sample/SampleChannelSDL2.cs | 10 +++++++ osu.Framework/Audio/Sample/SampleFactory.cs | 4 +-- osu.Framework/Audio/Sample/SampleSDL2.cs | 27 +++++++++++++++++++ .../Audio/Sample/SampleSDL2Factory.cs | 4 +-- 5 files changed, 43 insertions(+), 6 deletions(-) diff --git a/osu.Framework/Audio/Sample/SampleBassFactory.cs b/osu.Framework/Audio/Sample/SampleBassFactory.cs index c37a06ae4a..84a3bfbb27 100644 --- a/osu.Framework/Audio/Sample/SampleBassFactory.cs +++ b/osu.Framework/Audio/Sample/SampleBassFactory.cs @@ -33,7 +33,7 @@ public SampleBassFactory(byte[] data, string name, BassAudioMixer mixer, int pla this.mixer = mixer; } - private protected override void UpdatePlaybackConcurrency(ValueChangedEvent concurrency) + protected override void UpdatePlaybackConcurrency(ValueChangedEvent concurrency) { EnqueueAction(() => { @@ -47,7 +47,7 @@ private protected override void UpdatePlaybackConcurrency(ValueChangedEvent }); } - private protected override void LoadSample() + protected override void LoadSample() { Debug.Assert(CanPerformInline); Debug.Assert(!IsLoaded); diff --git a/osu.Framework/Audio/Sample/SampleChannelSDL2.cs b/osu.Framework/Audio/Sample/SampleChannelSDL2.cs index 8432d60ad2..24a94a9338 100644 --- a/osu.Framework/Audio/Sample/SampleChannelSDL2.cs +++ b/osu.Framework/Audio/Sample/SampleChannelSDL2.cs @@ -10,6 +10,8 @@ internal sealed class SampleChannelSDL2 : SampleChannel, ISDL2AudioChannel { private readonly SampleSDL2AudioPlayer player; + private readonly SampleSDL2 sample; + private volatile bool playing; public override bool Playing => playing; @@ -20,10 +22,15 @@ public SampleChannelSDL2(SampleSDL2 sample, SampleSDL2AudioPlayer player) : base(sample.Name) { this.player = player; + this.sample = sample; } public override void Play() { + // Don't play if samples has enough concurrent channels playing. + if (!sample.StartPlayingChannel()) + return; + started = false; playing = true; base.Play(); @@ -58,6 +65,9 @@ int ISDL2AudioChannel.GetRemainingSamples(float[] data) { playing = false; started = false; + + // Let sample know that it has finished playing. + sample.DonePlayingChannel(); } return ret; diff --git a/osu.Framework/Audio/Sample/SampleFactory.cs b/osu.Framework/Audio/Sample/SampleFactory.cs index fe7cab98a1..bee223cb3e 100644 --- a/osu.Framework/Audio/Sample/SampleFactory.cs +++ b/osu.Framework/Audio/Sample/SampleFactory.cs @@ -39,9 +39,9 @@ protected SampleFactory(string name, int playbackConcurrency) PlaybackConcurrency.BindValueChanged(UpdatePlaybackConcurrency); } - private protected abstract void UpdatePlaybackConcurrency(ValueChangedEvent concurrency); + protected abstract void UpdatePlaybackConcurrency(ValueChangedEvent concurrency); - private protected abstract void LoadSample(); + protected abstract void LoadSample(); public abstract Sample CreateSample(); diff --git a/osu.Framework/Audio/Sample/SampleSDL2.cs b/osu.Framework/Audio/Sample/SampleSDL2.cs index c10c7205ad..ec42fd050a 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2.cs +++ b/osu.Framework/Audio/Sample/SampleSDL2.cs @@ -1,7 +1,9 @@ // Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. // See the LICENCE file in the repository root for full licence text. +using System.Threading; using osu.Framework.Audio.Mixing.SDL2; +using osu.Framework.Bindables; namespace osu.Framework.Audio.Sample { @@ -17,8 +19,33 @@ public SampleSDL2(SampleSDL2Factory factory, SDL2AudioMixer mixer) { this.factory = factory; this.mixer = mixer; + + maxConcurrentCount = PlaybackConcurrency.Value; + PlaybackConcurrency.BindValueChanged(updatePlaybackConcurrency); + } + + private volatile int maxConcurrentCount; + + private volatile int concurrentCount; + + private void updatePlaybackConcurrency(ValueChangedEvent concurrency) + { + maxConcurrentCount = concurrency.NewValue; } + internal bool StartPlayingChannel() + { + if (Interlocked.Increment(ref concurrentCount) > maxConcurrentCount) + { + Interlocked.Decrement(ref concurrentCount); + return false; + } + + return true; + } + + internal void DonePlayingChannel() => Interlocked.Decrement(ref concurrentCount); + protected override SampleChannel CreateChannel() { var channel = new SampleChannelSDL2(this, factory.CreatePlayer()); diff --git a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs index 6f1163387d..026886ba1e 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs @@ -31,7 +31,7 @@ public SampleSDL2Factory(Stream stream, string name, SDL2AudioMixer mixer, int p this.spec = spec; } - private protected override void LoadSample() + protected override void LoadSample() { Debug.Assert(CanPerformInline); Debug.Assert(!IsLoaded); @@ -68,7 +68,7 @@ public SampleSDL2AudioPlayer CreatePlayer() public override Sample CreateSample() => new SampleSDL2(this, mixer) { OnPlay = SampleFactoryOnPlay }; - private protected override void UpdatePlaybackConcurrency(ValueChangedEvent concurrency) + protected override void UpdatePlaybackConcurrency(ValueChangedEvent concurrency) { } From 5745cf747751faae58d885a0d14aa975ba86767b Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sun, 28 Jan 2024 21:23:52 +0900 Subject: [PATCH 068/127] Move sample concurrency to factory --- osu.Framework/Audio/Sample/Sample.cs | 4 +-- osu.Framework/Audio/Sample/SampleBass.cs | 2 +- osu.Framework/Audio/Sample/SampleSDL2.cs | 29 ++----------------- .../Audio/Sample/SampleSDL2Factory.cs | 19 ++++++++++++ 4 files changed, 25 insertions(+), 29 deletions(-) diff --git a/osu.Framework/Audio/Sample/Sample.cs b/osu.Framework/Audio/Sample/Sample.cs index 02feb8fdbd..8098663d55 100644 --- a/osu.Framework/Audio/Sample/Sample.cs +++ b/osu.Framework/Audio/Sample/Sample.cs @@ -14,9 +14,9 @@ public abstract class Sample : AudioCollectionManager, ISample public string Name { get; } - internal Sample(SampleFactory factory, string name) + internal Sample(SampleFactory factory) + : this(factory.Name) { - Name = name; PlaybackConcurrency.BindTo(factory.PlaybackConcurrency); } diff --git a/osu.Framework/Audio/Sample/SampleBass.cs b/osu.Framework/Audio/Sample/SampleBass.cs index 995b72b6fc..0f80ceabec 100644 --- a/osu.Framework/Audio/Sample/SampleBass.cs +++ b/osu.Framework/Audio/Sample/SampleBass.cs @@ -15,7 +15,7 @@ internal sealed class SampleBass : Sample private readonly BassAudioMixer mixer; internal SampleBass(SampleBassFactory factory, BassAudioMixer mixer) - : base(factory, factory.Name) + : base(factory) { this.factory = factory; this.mixer = mixer; diff --git a/osu.Framework/Audio/Sample/SampleSDL2.cs b/osu.Framework/Audio/Sample/SampleSDL2.cs index ec42fd050a..663056ba3c 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2.cs +++ b/osu.Framework/Audio/Sample/SampleSDL2.cs @@ -1,9 +1,7 @@ // Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. // See the LICENCE file in the repository root for full licence text. -using System.Threading; using osu.Framework.Audio.Mixing.SDL2; -using osu.Framework.Bindables; namespace osu.Framework.Audio.Sample { @@ -15,36 +13,15 @@ internal sealed class SampleSDL2 : Sample private readonly SDL2AudioMixer mixer; public SampleSDL2(SampleSDL2Factory factory, SDL2AudioMixer mixer) - : base(factory, factory.Name) + : base(factory) { this.factory = factory; this.mixer = mixer; - - maxConcurrentCount = PlaybackConcurrency.Value; - PlaybackConcurrency.BindValueChanged(updatePlaybackConcurrency); } - private volatile int maxConcurrentCount; - - private volatile int concurrentCount; - - private void updatePlaybackConcurrency(ValueChangedEvent concurrency) - { - maxConcurrentCount = concurrency.NewValue; - } - - internal bool StartPlayingChannel() - { - if (Interlocked.Increment(ref concurrentCount) > maxConcurrentCount) - { - Interlocked.Decrement(ref concurrentCount); - return false; - } - - return true; - } + internal bool StartPlayingChannel() => factory.IncreaseConcurrentCount(); - internal void DonePlayingChannel() => Interlocked.Decrement(ref concurrentCount); + internal void DonePlayingChannel() => factory.DecreaseConcurrentCount(); protected override SampleChannel CreateChannel() { diff --git a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs index 026886ba1e..8ae3791aa1 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs @@ -4,6 +4,7 @@ using System; using System.Diagnostics; using System.IO; +using System.Threading; using osu.Framework.Audio.Mixing.SDL2; using osu.Framework.Bindables; using osu.Framework.Extensions; @@ -23,6 +24,10 @@ internal class SampleSDL2Factory : SampleFactory private Stream? stream; + private volatile int concurrentCount; + + private volatile int maxConcurrentCount = Sample.DEFAULT_CONCURRENCY; + public SampleSDL2Factory(Stream stream, string name, SDL2AudioMixer mixer, int playbackConcurrency, SDL.SDL_AudioSpec spec) : base(name, playbackConcurrency) { @@ -70,8 +75,22 @@ public SampleSDL2AudioPlayer CreatePlayer() protected override void UpdatePlaybackConcurrency(ValueChangedEvent concurrency) { + maxConcurrentCount = concurrency.NewValue; } + internal bool IncreaseConcurrentCount() + { + if (Interlocked.Increment(ref concurrentCount) > maxConcurrentCount) + { + Interlocked.Decrement(ref concurrentCount); + return false; + } + + return true; + } + + internal void DecreaseConcurrentCount() => Interlocked.Decrement(ref concurrentCount); + ~SampleSDL2Factory() { Dispose(false); From 884276190fdab01fbd341a8b67d282c3c42038b0 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sun, 28 Jan 2024 21:28:37 +0900 Subject: [PATCH 069/127] Remove sample concurrency for now Result doesn't sound so good with default value (2) --- .../Audio/Sample/SampleChannelSDL2.cs | 10 ---------- osu.Framework/Audio/Sample/SampleSDL2.cs | 4 ---- .../Audio/Sample/SampleSDL2Factory.cs | 19 ------------------- 3 files changed, 33 deletions(-) diff --git a/osu.Framework/Audio/Sample/SampleChannelSDL2.cs b/osu.Framework/Audio/Sample/SampleChannelSDL2.cs index 24a94a9338..8432d60ad2 100644 --- a/osu.Framework/Audio/Sample/SampleChannelSDL2.cs +++ b/osu.Framework/Audio/Sample/SampleChannelSDL2.cs @@ -10,8 +10,6 @@ internal sealed class SampleChannelSDL2 : SampleChannel, ISDL2AudioChannel { private readonly SampleSDL2AudioPlayer player; - private readonly SampleSDL2 sample; - private volatile bool playing; public override bool Playing => playing; @@ -22,15 +20,10 @@ public SampleChannelSDL2(SampleSDL2 sample, SampleSDL2AudioPlayer player) : base(sample.Name) { this.player = player; - this.sample = sample; } public override void Play() { - // Don't play if samples has enough concurrent channels playing. - if (!sample.StartPlayingChannel()) - return; - started = false; playing = true; base.Play(); @@ -65,9 +58,6 @@ int ISDL2AudioChannel.GetRemainingSamples(float[] data) { playing = false; started = false; - - // Let sample know that it has finished playing. - sample.DonePlayingChannel(); } return ret; diff --git a/osu.Framework/Audio/Sample/SampleSDL2.cs b/osu.Framework/Audio/Sample/SampleSDL2.cs index 663056ba3c..f4f6f021cd 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2.cs +++ b/osu.Framework/Audio/Sample/SampleSDL2.cs @@ -19,10 +19,6 @@ public SampleSDL2(SampleSDL2Factory factory, SDL2AudioMixer mixer) this.mixer = mixer; } - internal bool StartPlayingChannel() => factory.IncreaseConcurrentCount(); - - internal void DonePlayingChannel() => factory.DecreaseConcurrentCount(); - protected override SampleChannel CreateChannel() { var channel = new SampleChannelSDL2(this, factory.CreatePlayer()); diff --git a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs index 8ae3791aa1..026886ba1e 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL2Factory.cs @@ -4,7 +4,6 @@ using System; using System.Diagnostics; using System.IO; -using System.Threading; using osu.Framework.Audio.Mixing.SDL2; using osu.Framework.Bindables; using osu.Framework.Extensions; @@ -24,10 +23,6 @@ internal class SampleSDL2Factory : SampleFactory private Stream? stream; - private volatile int concurrentCount; - - private volatile int maxConcurrentCount = Sample.DEFAULT_CONCURRENCY; - public SampleSDL2Factory(Stream stream, string name, SDL2AudioMixer mixer, int playbackConcurrency, SDL.SDL_AudioSpec spec) : base(name, playbackConcurrency) { @@ -75,22 +70,8 @@ public SampleSDL2AudioPlayer CreatePlayer() protected override void UpdatePlaybackConcurrency(ValueChangedEvent concurrency) { - maxConcurrentCount = concurrency.NewValue; } - internal bool IncreaseConcurrentCount() - { - if (Interlocked.Increment(ref concurrentCount) > maxConcurrentCount) - { - Interlocked.Decrement(ref concurrentCount); - return false; - } - - return true; - } - - internal void DecreaseConcurrentCount() => Interlocked.Decrement(ref concurrentCount); - ~SampleSDL2Factory() { Dispose(false); From 153b1ee1f9f9a4af28ce413d5f590c45f1dd7488 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Tue, 30 Jan 2024 15:39:11 +0900 Subject: [PATCH 070/127] Start decoding thread in Manager constructor --- osu.Framework/Audio/AudioDecoderManager.cs | 43 ++++++++++------------ 1 file changed, 20 insertions(+), 23 deletions(-) diff --git a/osu.Framework/Audio/AudioDecoderManager.cs b/osu.Framework/Audio/AudioDecoderManager.cs index 8f8802a145..e825bd6fbd 100644 --- a/osu.Framework/Audio/AudioDecoderManager.cs +++ b/osu.Framework/Audio/AudioDecoderManager.cs @@ -109,10 +109,9 @@ public int LoadFromStream(out byte[] decoded) public delegate void PassDataDelegate(byte[] data, int length, AudioDecoder decoderData, bool done); - private Thread? decoderThread; - private AutoResetEvent? decoderWaitHandle; - - private readonly CancellationTokenSource tokenSource = new CancellationTokenSource(); + private readonly Thread decoderThread; + private readonly AutoResetEvent decoderWaitHandle; + private readonly CancellationTokenSource tokenSource; internal static AudioDecoder CreateDecoder(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream = true, PassDataDelegate? pass = null) @@ -124,26 +123,27 @@ internal static AudioDecoder CreateDecoder(int rate, int channels, bool isTrack, return decoder; } - public AudioDecoder StartDecodingAsync(int rate, byte channels, ushort format, Stream stream, PassDataDelegate pass) + public AudioDecoderManager() { - if (decoderThread == null) - { - decoderWaitHandle = new AutoResetEvent(false); + tokenSource = new CancellationTokenSource(); + decoderWaitHandle = new AutoResetEvent(false); - decoderThread = new Thread(() => loop(tokenSource.Token)) - { - IsBackground = true - }; + decoderThread = new Thread(() => loop(tokenSource.Token)) + { + IsBackground = true + }; - decoderThread.Start(); - } + decoderThread.Start(); + } + public AudioDecoder StartDecodingAsync(int rate, byte channels, ushort format, Stream stream, PassDataDelegate pass) + { AudioDecoder decoder = CreateDecoder(rate, channels, true, format, stream, true, pass); lock (jobs) jobs.AddFirst(decoder); - decoderWaitHandle?.Set(); + decoderWaitHandle.Set(); return decoder; } @@ -224,15 +224,12 @@ protected virtual void Dispose(bool disposing) { if (!disposedValue) { - if (disposing) - { - tokenSource.Cancel(); - decoderWaitHandle?.Set(); + tokenSource.Cancel(); + decoderWaitHandle.Set(); - tokenSource.Dispose(); - decoderThread?.Join(); - decoderWaitHandle?.Dispose(); - } + tokenSource.Dispose(); + decoderThread.Join(); + decoderWaitHandle.Dispose(); lock (jobs) { From 61af01e24fb534b62cec101fdbad821280515f1b Mon Sep 17 00:00:00 2001 From: hwsmm Date: Tue, 6 Feb 2024 21:55:01 +0900 Subject: [PATCH 071/127] Dispose Stream in generic Waveform --- osu.Framework/Audio/Track/Waveform.cs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/osu.Framework/Audio/Track/Waveform.cs b/osu.Framework/Audio/Track/Waveform.cs index 9accd4c02a..17a61a59da 100644 --- a/osu.Framework/Audio/Track/Waveform.cs +++ b/osu.Framework/Audio/Track/Waveform.cs @@ -60,6 +60,8 @@ public class Waveform : IDisposable private readonly Task readTask; + private Stream? data; + /// /// Constructs a new from provided audio data. /// @@ -74,6 +76,8 @@ public Waveform(Stream? data) readTask = Task.Run(() => { + this.data = null; + if (data == null) return; @@ -177,6 +181,9 @@ public Waveform(Stream? data) finally { ArrayPool.Shared.Return(complexBuffer); + + data.Dispose(); + data = null; } }, cancelSource.Token); } @@ -338,6 +345,10 @@ protected virtual void Dispose(bool disposing) cancelSource.Cancel(); cancelSource.Dispose(); points = Array.Empty(); + + // Try disposing the stream again in case the task was not started. + data?.Dispose(); + data = null; } #endregion From aef3aabd6c12cc78967f6d48b854b44298037ab1 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Tue, 6 Feb 2024 22:27:04 +0900 Subject: [PATCH 072/127] Fix code quality issues --- osu.Framework/Audio/SDL2AudioManager.cs | 2 +- osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/osu.Framework/Audio/SDL2AudioManager.cs b/osu.Framework/Audio/SDL2AudioManager.cs index 965a752b83..8fdd7af918 100644 --- a/osu.Framework/Audio/SDL2AudioManager.cs +++ b/osu.Framework/Audio/SDL2AudioManager.cs @@ -249,7 +249,7 @@ protected override bool SetAudioDevice(int deviceIndex) internal override Track.Track GetNewTrack(Stream data, string name) { TrackSDL2 track = new TrackSDL2(name, spec.freq, spec.channels, spec.samples); - EnqueueAction(() => decoder.StartDecodingAsync(AUDIO_FREQ, AUDIO_CHANNELS, AUDIO_FORMAT, data, track.ReceiveAudioData)); + EnqueueAction(() => decoder.StartDecodingAsync(spec.freq, spec.channels, spec.samples, data, track.ReceiveAudioData)); return track; } diff --git a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs index f6d902caef..4df5c76949 100644 --- a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs @@ -24,9 +24,9 @@ internal class TrackSDL2AudioPlayer : ResamplingPlayer, IDisposable /// /// Returns a byte position converted into milliseconds with configuration set for this player. /// - /// A byte position to convert + /// Position to convert /// - public double GetMsFromIndex(long bytePos) => bytePos * 1000.0d / SrcRate / SrcChannels; + public double GetMsFromIndex(long pos) => pos * 1000.0d / SrcRate / SrcChannels; /// /// Returns a position in milliseconds converted from a byte position with configuration set for this player. From 5be11f3a2e7e8dd6e268e91d47ecb5add89aadca Mon Sep 17 00:00:00 2001 From: hwsmm Date: Wed, 7 Feb 2024 05:43:57 +0900 Subject: [PATCH 073/127] Fix typo --- osu.Framework/Audio/SDL2AudioManager.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework/Audio/SDL2AudioManager.cs b/osu.Framework/Audio/SDL2AudioManager.cs index 8fdd7af918..23a143c0ae 100644 --- a/osu.Framework/Audio/SDL2AudioManager.cs +++ b/osu.Framework/Audio/SDL2AudioManager.cs @@ -249,7 +249,7 @@ protected override bool SetAudioDevice(int deviceIndex) internal override Track.Track GetNewTrack(Stream data, string name) { TrackSDL2 track = new TrackSDL2(name, spec.freq, spec.channels, spec.samples); - EnqueueAction(() => decoder.StartDecodingAsync(spec.freq, spec.channels, spec.samples, data, track.ReceiveAudioData)); + EnqueueAction(() => decoder.StartDecodingAsync(spec.freq, spec.channels, spec.format, data, track.ReceiveAudioData)); return track; } From ff9fa11ea749b6bf6800f4b217cfb4bec00b8cf7 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sat, 23 Mar 2024 01:31:50 +0900 Subject: [PATCH 074/127] Make channel volume as tuple --- osu.Framework/Audio/Mixing/SDL2/ISDL2AudioChannel.cs | 7 +------ osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs | 11 +---------- osu.Framework/Audio/Sample/SampleChannelSDL2.cs | 11 ++++------- osu.Framework/Audio/Track/TrackSDL2.cs | 11 ++++------- 4 files changed, 10 insertions(+), 30 deletions(-) diff --git a/osu.Framework/Audio/Mixing/SDL2/ISDL2AudioChannel.cs b/osu.Framework/Audio/Mixing/SDL2/ISDL2AudioChannel.cs index 5d82e23dc9..817b7c0bf2 100644 --- a/osu.Framework/Audio/Mixing/SDL2/ISDL2AudioChannel.cs +++ b/osu.Framework/Audio/Mixing/SDL2/ISDL2AudioChannel.cs @@ -23,11 +23,6 @@ internal interface ISDL2AudioChannel : IAudioChannel /// /// Mixer uses this as volume, Value should be within 0 and 1. /// - float Volume { get; } - - /// - /// Mixer uses this to adjust channel balance. Value should be within -1.0 and 1.0 - /// - float Balance { get; } + (float left, float right) Volume { get; } } } diff --git a/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs b/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs index 6507bd8be8..586c6fe384 100644 --- a/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs +++ b/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs @@ -113,16 +113,7 @@ public void MixChannelsInto(float[] data) if (size > 0) { - float left = 1; - float right = 1; - - if (channel.Balance < 0) - right += channel.Balance; - else if (channel.Balance > 0) - left -= channel.Balance; - - right *= channel.Volume; - left *= channel.Volume; + var (left, right) = channel.Volume; mixAudio(put, ret, size, left, right); } diff --git a/osu.Framework/Audio/Sample/SampleChannelSDL2.cs b/osu.Framework/Audio/Sample/SampleChannelSDL2.cs index 8432d60ad2..180d2cfa26 100644 --- a/osu.Framework/Audio/Sample/SampleChannelSDL2.cs +++ b/osu.Framework/Audio/Sample/SampleChannelSDL2.cs @@ -63,8 +63,7 @@ int ISDL2AudioChannel.GetRemainingSamples(float[] data) return ret; } - private volatile float volume = 1.0f; - private volatile float balance; + private (float, float) volume = (1.0f, 1.0f); private double rate = 1.0f; @@ -72,18 +71,16 @@ internal override void OnStateChanged() { base.OnStateChanged(); - volume = (float)AggregateVolume.Value; - balance = (float)AggregateBalance.Value; + double balance = AggregateBalance.Value; + volume = ((float)(AggregateVolume.Value * (balance > 0 ? balance : 1.0)), (float)(AggregateVolume.Value * (balance < 0 ? -balance : 1.0))); Interlocked.Exchange(ref rate, AggregateFrequency.Value); } - float ISDL2AudioChannel.Volume => volume; + (float, float) ISDL2AudioChannel.Volume => volume; bool ISDL2AudioChannel.Playing => playing; - float ISDL2AudioChannel.Balance => balance; - ~SampleChannelSDL2() { Dispose(false); diff --git a/osu.Framework/Audio/Track/TrackSDL2.cs b/osu.Framework/Audio/Track/TrackSDL2.cs index bb862a5da0..2c8894e955 100644 --- a/osu.Framework/Audio/Track/TrackSDL2.cs +++ b/osu.Framework/Audio/Track/TrackSDL2.cs @@ -250,8 +250,7 @@ int ISDL2AudioChannel.GetRemainingSamples(float[] data) return ret; } - private volatile float volume = 1.0f; - private volatile float balance; + private (float, float) volume = (1.0f, 1.0f); internal override void OnStateChanged() { @@ -268,15 +267,13 @@ internal override void OnStateChanged() player.Tempo = AggregateTempo.Value; } - volume = (float)AggregateVolume.Value; - balance = (float)AggregateBalance.Value; + double balance = AggregateBalance.Value; + volume = ((float)(AggregateVolume.Value * (balance > 0 ? balance : 1.0)), (float)(AggregateVolume.Value * (balance < 0 ? -balance : 1.0))); } bool ISDL2AudioChannel.Playing => isRunning && !player.Done; - float ISDL2AudioChannel.Volume => volume; - - float ISDL2AudioChannel.Balance => balance; + (float, float) ISDL2AudioChannel.Volume => volume; ~TrackSDL2() { From 744f6654902421e1bba80db785c6cce693307598 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sat, 23 Mar 2024 01:42:49 +0900 Subject: [PATCH 075/127] Let mixing no longer create a new array on every callback --- .../Audio/Mixing/SDL2/SDL2AudioMixer.cs | 55 ++++++++++++++----- osu.Framework/Audio/SDL2AudioManager.cs | 14 ++--- 2 files changed, 48 insertions(+), 21 deletions(-) diff --git a/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs b/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs index 586c6fe384..f8dd034541 100644 --- a/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs +++ b/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs @@ -64,37 +64,47 @@ protected override void UpdateState() base.UpdateState(); } - // https://github.com/libsdl-org/SDL/blob/SDL2/src/audio/SDL_mixer.c#L292 - private const float max_vol = 3.402823466e+38F; - private const float min_vol = -3.402823466e+38F; - - private void mixAudio(float[] dst, float[] src, int samples, float left, float right) + private unsafe void mixAudio(float* dst, float* src, ref int filled, int samples, float left, float right) { if (left <= 0 && right <= 0) return; for (int i = 0; i < samples; i++) - dst[i] = Math.Clamp(src[i] * ((i % 2) == 0 ? left : right) + dst[i], min_vol, max_vol); + *(dst + i) = (*(src + i) * ((i % 2) == 0 ? left : right)) + (i < filled ? *(dst + i) : 0); + + if (samples > filled) + filled = samples; } private float[]? ret; + private float[]? filterArray; + private volatile int channelCount; /// /// Mix into a float array given as an argument. /// /// A float array that audio will be mixed into. - public void MixChannelsInto(float[] data) + /// Size of data + /// Count of usable audio samples in data + public unsafe void MixChannelsInto(float* data, int sampleCount, ref int filledSamples) { lock (syncRoot) { - int sampleCount = data.Length; if (ret == null || sampleCount != ret.Length) + { ret = new float[sampleCount]; + } bool useFilters = audioFilters.Count > 0; - float[] put = useFilters ? new float[sampleCount] : data; + + if (useFilters && (filterArray == null || filterArray.Length != sampleCount)) + { + filterArray = new float[sampleCount]; + } + + int filterArrayFilled = 0; var node = activeChannels.First; @@ -115,7 +125,21 @@ public void MixChannelsInto(float[] data) { var (left, right) = channel.Volume; - mixAudio(put, ret, size, left, right); + if (!useFilters) + { + fixed (float* retPtr = ret) + { + mixAudio(data, retPtr, ref filledSamples, size, left, right); + } + } + else + { + fixed (float* filterArrPtr = filterArray) + fixed (float* retPtr = ret) + { + mixAudio(filterArrPtr, retPtr, ref filterArrayFilled, size, left, right); + } + } } } @@ -126,16 +150,21 @@ public void MixChannelsInto(float[] data) if (useFilters) { - for (int i = 0; i < sampleCount; i++) + for (int i = 0; i < filterArrayFilled; i++) { foreach (var filter in audioFilters) { if (filter.BiQuadFilter != null) - put[i] = filter.BiQuadFilter.Transform(put[i]); + { + filterArray![i] = filter.BiQuadFilter.Transform(filterArray[i]); + } } } - mixAudio(data, put, sampleCount, 1, 1); + fixed (float* filterArrPtr = filterArray) + { + mixAudio(data, filterArrPtr, ref filledSamples, filterArrayFilled, 1, 1); + } } } } diff --git a/osu.Framework/Audio/SDL2AudioManager.cs b/osu.Framework/Audio/SDL2AudioManager.cs index 23a143c0ae..9c8b419704 100644 --- a/osu.Framework/Audio/SDL2AudioManager.cs +++ b/osu.Framework/Audio/SDL2AudioManager.cs @@ -130,23 +130,21 @@ protected override void ItemRemoved(AudioComponent item) } } - private void internalAudioCallback(IntPtr stream, int bufsize) + private unsafe void internalAudioCallback(IntPtr stream, int bufsize) { try { - float[] main = new float[bufsize / 4]; + float* main = (float*)stream.ToPointer(); + int filled = 0; foreach (var mixer in sdlMixerList) { if (mixer.IsAlive) - mixer.MixChannelsInto(main); + mixer.MixChannelsInto(main, bufsize / 4, ref filled); } - unsafe - { - fixed (float* mainPtr = main) - Buffer.MemoryCopy(mainPtr, stream.ToPointer(), bufsize, bufsize); - } + for (; filled < bufsize / 4; filled++) + *(main + filled) = 0; } catch (Exception e) { From 71f0b2fc726e2cbe02a1f9747e83de0e3f0f8cdd Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sun, 31 Mar 2024 17:17:47 +0900 Subject: [PATCH 076/127] Revert unneeded changes in SampleBassFactory --- .../Audio/Sample/SampleBassFactory.cs | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/osu.Framework/Audio/Sample/SampleBassFactory.cs b/osu.Framework/Audio/Sample/SampleBassFactory.cs index 84a3bfbb27..5274c4ac36 100644 --- a/osu.Framework/Audio/Sample/SampleBassFactory.cs +++ b/osu.Framework/Audio/Sample/SampleBassFactory.cs @@ -47,6 +47,13 @@ protected override void UpdatePlaybackConcurrency(ValueChangedEvent concurr }); } + internal override void UpdateDevice(int deviceIndex) + { + // The sample may not have already loaded if a device wasn't present in a previous load attempt. + if (!IsLoaded) + LoadSample(); + } + protected override void LoadSample() { Debug.Assert(CanPerformInline); @@ -73,13 +80,6 @@ protected override void LoadSample() memoryLease = NativeMemoryTracker.AddMemory(this, dataLength); } - internal override void UpdateDevice(int deviceIndex) - { - // The sample may not have already loaded if a device wasn't present in a previous load attempt. - if (!IsLoaded) - LoadSample(); - } - public override Sample CreateSample() => new SampleBass(this, mixer) { OnPlay = SampleFactoryOnPlay }; ~SampleBassFactory() @@ -92,8 +92,13 @@ protected override void Dispose(bool disposing) if (IsDisposed) return; - Bass.SampleFree(SampleId); - memoryLease?.Dispose(); + if (IsLoaded) + { + Bass.SampleFree(SampleId); + memoryLease?.Dispose(); + } + + base.Dispose(disposing); } } } From 3cbd9dff60b260cb7ce25ab2fdf49e0926c2d576 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sun, 31 Mar 2024 17:25:14 +0900 Subject: [PATCH 077/127] No longer uses ArrayPool for tracks --- .../Audio/Track/TrackSDL2AudioPlayer.cs | 26 +------------------ 1 file changed, 1 insertion(+), 25 deletions(-) diff --git a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs index 4df5c76949..e6150d7a69 100644 --- a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs @@ -2,7 +2,6 @@ // See the LICENCE file in the repository root for full licence text. using System; -using System.Buffers; using System.Threading; namespace osu.Framework.Audio.Track @@ -42,8 +41,6 @@ internal class TrackSDL2AudioPlayer : ResamplingPlayer, IDisposable protected long AudioDataPosition; - private bool dataRented; - private long audioDataLength; /// @@ -68,30 +65,12 @@ private void prepareArray(long wanted) if (wanted <= AudioData?.LongLength) return; - float[] temp; - bool rent; - - if (wanted > int.MaxValue) - { - rent = false; - temp = new float[wanted]; - } - else - { - rent = true; - temp = ArrayPool.Shared.Rent((int)wanted); - } + float[] temp = new float[wanted]; if (AudioData != null) - { Array.Copy(AudioData, 0, temp, 0, audioDataLength); - if (dataRented) - ArrayPool.Shared.Return(AudioData); - } - AudioData = temp; - dataRented = rent; } internal void PrepareStream(long byteLength) @@ -286,9 +265,6 @@ protected virtual void Dispose(bool disposing) { if (!disposedValue) { - if (dataRented && AudioData != null) - ArrayPool.Shared.Return(AudioData); - AudioData = null; disposedValue = true; } From be8c1b03f8da0aca98460ae026d40afc8fcd5a7c Mon Sep 17 00:00:00 2001 From: hwsmm Date: Wed, 10 Apr 2024 02:00:21 +0900 Subject: [PATCH 078/127] SDL3 Audio migration (first pass) --- osu.Framework/Audio/AudioDecoderManager.cs | 11 +- osu.Framework/Audio/BassAudioDecoder.cs | 26 +- .../Audio/Callbacks/SDL2AudioCallback.cs | 31 -- osu.Framework/Audio/FFmpegAudioDecoder.cs | 8 +- .../ISDL3AudioChannel.cs} | 6 +- .../SDL3AudioMixer.cs} | 20 +- osu.Framework/Audio/ResamplingPlayer.cs | 4 +- osu.Framework/Audio/SDL2AudioManager.cs | 272 --------------- osu.Framework/Audio/SDL3AudioManager.cs | 321 ++++++++++++++++++ ...pleChannelSDL2.cs => SampleChannelSDL3.cs} | 18 +- .../Sample/{SampleSDL2.cs => SampleSDL3.cs} | 12 +- ...udioPlayer.cs => SampleSDL3AudioPlayer.cs} | 4 +- ...pleSDL2Factory.cs => SampleSDL3Factory.cs} | 20 +- ...AudioPlayer.cs => TempoSDL3AudioPlayer.cs} | 8 +- .../Track/{TrackSDL2.cs => TrackSDL3.cs} | 22 +- ...AudioPlayer.cs => TrackSDL3AudioPlayer.cs} | 10 +- osu.Framework/Audio/Track/Waveform.cs | 2 +- osu.Framework/Configuration/AudioDriver.cs | 2 +- osu.Framework/Game.cs | 10 +- osu.Framework/Platform/SDL3Window.cs | 12 +- 20 files changed, 416 insertions(+), 403 deletions(-) delete mode 100644 osu.Framework/Audio/Callbacks/SDL2AudioCallback.cs rename osu.Framework/Audio/Mixing/{SDL2/ISDL2AudioChannel.cs => SDL3/ISDL3AudioChannel.cs} (88%) rename osu.Framework/Audio/Mixing/{SDL2/SDL2AudioMixer.cs => SDL3/SDL3AudioMixer.cs} (93%) delete mode 100644 osu.Framework/Audio/SDL2AudioManager.cs create mode 100644 osu.Framework/Audio/SDL3AudioManager.cs rename osu.Framework/Audio/Sample/{SampleChannelSDL2.cs => SampleChannelSDL3.cs} (80%) rename osu.Framework/Audio/Sample/{SampleSDL2.cs => SampleSDL3.cs} (63%) rename osu.Framework/Audio/Sample/{SampleSDL2AudioPlayer.cs => SampleSDL3AudioPlayer.cs} (91%) rename osu.Framework/Audio/Sample/{SampleSDL2Factory.cs => SampleSDL3Factory.cs} (78%) rename osu.Framework/Audio/Track/{TempoSDL2AudioPlayer.cs => TempoSDL3AudioPlayer.cs} (95%) rename osu.Framework/Audio/Track/{TrackSDL2.cs => TrackSDL3.cs} (93%) rename osu.Framework/Audio/Track/{TrackSDL2AudioPlayer.cs => TrackSDL3AudioPlayer.cs} (96%) diff --git a/osu.Framework/Audio/AudioDecoderManager.cs b/osu.Framework/Audio/AudioDecoderManager.cs index e825bd6fbd..2a2f13cf68 100644 --- a/osu.Framework/Audio/AudioDecoderManager.cs +++ b/osu.Framework/Audio/AudioDecoderManager.cs @@ -6,6 +6,7 @@ using System.Threading; using osu.Framework.Logging; using System.Collections.Generic; +using SDL; namespace osu.Framework.Audio { @@ -19,7 +20,7 @@ public abstract class AudioDecoder protected readonly int Rate; protected readonly int Channels; protected readonly bool IsTrack; - protected readonly ushort Format; + protected readonly SDL_AudioFormat Format; protected readonly Stream Stream; protected readonly bool AutoDisposeStream; protected readonly PassDataDelegate? Pass; @@ -51,7 +52,7 @@ public long ByteLength internal volatile bool StopJob; internal volatile bool Loading; - protected AudioDecoder(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) + protected AudioDecoder(int rate, int channels, bool isTrack, SDL_AudioFormat format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) { Rate = rate; Channels = channels; @@ -113,7 +114,7 @@ public int LoadFromStream(out byte[] decoded) private readonly AutoResetEvent decoderWaitHandle; private readonly CancellationTokenSource tokenSource; - internal static AudioDecoder CreateDecoder(int rate, int channels, bool isTrack, ushort format, Stream stream, + internal static AudioDecoder CreateDecoder(int rate, int channels, bool isTrack, SDL_AudioFormat format, Stream stream, bool autoDisposeStream = true, PassDataDelegate? pass = null) { AudioDecoder decoder = ManagedBass.Bass.CurrentDevice >= 0 @@ -136,7 +137,7 @@ public AudioDecoderManager() decoderThread.Start(); } - public AudioDecoder StartDecodingAsync(int rate, byte channels, ushort format, Stream stream, PassDataDelegate pass) + public AudioDecoder StartDecodingAsync(int rate, int channels, SDL_AudioFormat format, Stream stream, PassDataDelegate pass) { AudioDecoder decoder = CreateDecoder(rate, channels, true, format, stream, true, pass); @@ -148,7 +149,7 @@ public AudioDecoder StartDecodingAsync(int rate, byte channels, ushort format, S return decoder; } - public static byte[] DecodeAudio(int freq, int channels, ushort format, Stream stream, out int size) + public static byte[] DecodeAudio(int freq, int channels, SDL_AudioFormat format, Stream stream, out int size) { AudioDecoder decoder = CreateDecoder(freq, channels, false, format, stream); diff --git a/osu.Framework/Audio/BassAudioDecoder.cs b/osu.Framework/Audio/BassAudioDecoder.cs index d168f7bcd1..7999d79ea8 100644 --- a/osu.Framework/Audio/BassAudioDecoder.cs +++ b/osu.Framework/Audio/BassAudioDecoder.cs @@ -6,13 +6,13 @@ using ManagedBass; using ManagedBass.Mix; using osu.Framework.Audio.Callbacks; -using SDL2; +using SDL; using static osu.Framework.Audio.AudioDecoderManager; namespace osu.Framework.Audio { /// - /// This is only for using BASS as a decoder for SDL2 backend! + /// This is only for using BASS as a decoder for SDL3 backend! /// internal class BassAudioDecoder : AudioDecoder { @@ -30,24 +30,18 @@ private Resolution resolution { get { - switch (Format) - { - case SDL.AUDIO_S8: - return Resolution.Byte; - - case SDL.AUDIO_S16: - return Resolution.Short; - - case SDL.AUDIO_F32: - default: - return Resolution.Float; - } + if ((int)Format == SDL3.SDL_AUDIO_S8) + return Resolution.Byte; + else if (Format == SDL3.SDL_AUDIO_S16) + return Resolution.Short; + else + return Resolution.Float; } } - private ushort bits => SDL.SDL_AUDIO_BITSIZE(Format); + private ushort bits => (ushort)SDL3.SDL_AUDIO_BITSIZE(Format); - public BassAudioDecoder(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) + public BassAudioDecoder(int rate, int channels, bool isTrack, SDL_AudioFormat format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass) { } diff --git a/osu.Framework/Audio/Callbacks/SDL2AudioCallback.cs b/osu.Framework/Audio/Callbacks/SDL2AudioCallback.cs deleted file mode 100644 index c33bca8aeb..0000000000 --- a/osu.Framework/Audio/Callbacks/SDL2AudioCallback.cs +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. -// See the LICENCE file in the repository root for full licence text. - -using System; -using System.Runtime.CompilerServices; -using osu.Framework.Allocation; -using osu.Framework.Platform; -using SDL2; - -namespace osu.Framework.Audio.Callbacks -{ - internal class SDL2AudioCallback : BassCallback - { - public SDL.SDL_AudioCallback Callback => RuntimeFeature.IsDynamicCodeSupported ? AudioCallback : audioCallback; - - public readonly SDL.SDL_AudioCallback AudioCallback; - - public SDL2AudioCallback(SDL.SDL_AudioCallback callback) - { - AudioCallback = callback; - } - - [MonoPInvokeCallback(typeof(SDL.SDL_AudioCallback))] - private static void audioCallback(IntPtr userdata, IntPtr stream, int len) - { - var ptr = new ObjectHandle(userdata); - if (ptr.GetTarget(out var target)) - target.AudioCallback(userdata, stream, len); - } - } -} diff --git a/osu.Framework/Audio/FFmpegAudioDecoder.cs b/osu.Framework/Audio/FFmpegAudioDecoder.cs index dd89a199da..3d8e24aa17 100644 --- a/osu.Framework/Audio/FFmpegAudioDecoder.cs +++ b/osu.Framework/Audio/FFmpegAudioDecoder.cs @@ -4,7 +4,7 @@ using System; using System.IO; using osu.Framework.Graphics.Video; -using SDL2; +using SDL; using static osu.Framework.Audio.AudioDecoderManager; namespace osu.Framework.Audio @@ -14,7 +14,7 @@ internal class FFmpegAudioDecoder : AudioDecoder private VideoDecoder? ffmpeg; private byte[]? decodeData; - public FFmpegAudioDecoder(int rate, int channels, bool isTrack, ushort format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) + public FFmpegAudioDecoder(int rate, int channels, bool isTrack, SDL_AudioFormat format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass) { } @@ -31,14 +31,14 @@ protected override int LoadFromStreamInternal(out byte[] decoded) { if (ffmpeg == null) { - ffmpeg = new VideoDecoder(Stream, Rate, Channels, SDL.SDL_AUDIO_ISFLOAT(Format), SDL.SDL_AUDIO_BITSIZE(Format), SDL.SDL_AUDIO_ISSIGNED(Format)); + ffmpeg = new VideoDecoder(Stream, Rate, Channels, SDL3.SDL_AUDIO_ISFLOAT(Format), SDL3.SDL_AUDIO_BITSIZE(Format), SDL3.SDL_AUDIO_ISSIGNED(Format)); ffmpeg.PrepareDecoding(); ffmpeg.RecreateCodecContext(); Bitrate = (int)ffmpeg.Bitrate; Length = ffmpeg.Duration; - ByteLength = (long)Math.Ceiling(ffmpeg.Duration / 1000.0d * Rate) * Channels * (SDL.SDL_AUDIO_BITSIZE(Format) / 8); // FIXME + ByteLength = (long)Math.Ceiling(ffmpeg.Duration / 1000.0d * Rate) * Channels * (SDL3.SDL_AUDIO_BITSIZE(Format) / 8); // FIXME Loading = true; } diff --git a/osu.Framework/Audio/Mixing/SDL2/ISDL2AudioChannel.cs b/osu.Framework/Audio/Mixing/SDL3/ISDL3AudioChannel.cs similarity index 88% rename from osu.Framework/Audio/Mixing/SDL2/ISDL2AudioChannel.cs rename to osu.Framework/Audio/Mixing/SDL3/ISDL3AudioChannel.cs index 817b7c0bf2..c2f2132abb 100644 --- a/osu.Framework/Audio/Mixing/SDL2/ISDL2AudioChannel.cs +++ b/osu.Framework/Audio/Mixing/SDL3/ISDL3AudioChannel.cs @@ -1,12 +1,12 @@ // Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. // See the LICENCE file in the repository root for full licence text. -namespace osu.Framework.Audio.Mixing.SDL2 +namespace osu.Framework.Audio.Mixing.SDL3 { /// - /// Interface for audio channels that feed audio to . + /// Interface for audio channels that feed audio to . /// - internal interface ISDL2AudioChannel : IAudioChannel + internal interface ISDL3AudioChannel : IAudioChannel { /// /// Returns remaining audio samples. diff --git a/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs similarity index 93% rename from osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs rename to osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs index f8dd034541..6a98c78bf8 100644 --- a/osu.Framework/Audio/Mixing/SDL2/SDL2AudioMixer.cs +++ b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs @@ -13,26 +13,26 @@ using osu.Framework.Statistics; using NAudio.Dsp; -namespace osu.Framework.Audio.Mixing.SDL2 +namespace osu.Framework.Audio.Mixing.SDL3 { /// - /// Mixes instances and applies effects on top of them. + /// Mixes instances and applies effects on top of them. /// - internal class SDL2AudioMixer : AudioMixer + internal class SDL3AudioMixer : AudioMixer { private readonly object syncRoot = new object(); /// - /// List of instances that are active. + /// List of instances that are active. /// - private readonly LinkedList activeChannels = new LinkedList(); + private readonly LinkedList activeChannels = new LinkedList(); /// - /// Creates a new + /// Creates a new /// /// /// An identifier displayed on the audio mixer visualiser. - public SDL2AudioMixer(AudioMixer? globalMixer, string identifier) + public SDL3AudioMixer(AudioMixer? globalMixer, string identifier) : base(globalMixer, identifier) { EnqueueAction(() => Effects.BindCollectionChanged(onEffectsChanged, true)); @@ -42,7 +42,7 @@ public SDL2AudioMixer(AudioMixer? globalMixer, string identifier) protected override void AddInternal(IAudioChannel channel) { - if (channel is not ISDL2AudioChannel sdlChannel) + if (channel is not ISDL3AudioChannel sdlChannel) return; lock (syncRoot) @@ -51,7 +51,7 @@ protected override void AddInternal(IAudioChannel channel) protected override void RemoveInternal(IAudioChannel channel) { - if (channel is not ISDL2AudioChannel sdlChannel) + if (channel is not ISDL3AudioChannel sdlChannel) return; lock (syncRoot) @@ -227,7 +227,7 @@ public EffectBox(IEffectParameter param) { // allowing non-bqf to keep index of list if (param is BQFParameters bqfp) - BiQuadFilter = getFilter(SDL2AudioManager.AUDIO_FREQ, bqfp); + BiQuadFilter = getFilter(SDL3AudioManager.AUDIO_FREQ, bqfp); } } diff --git a/osu.Framework/Audio/ResamplingPlayer.cs b/osu.Framework/Audio/ResamplingPlayer.cs index 488e5fe911..1dba8b9235 100644 --- a/osu.Framework/Audio/ResamplingPlayer.cs +++ b/osu.Framework/Audio/ResamplingPlayer.cs @@ -25,14 +25,14 @@ public double RelativeRate private WdlResampler? resampler; internal readonly int SrcRate; - internal readonly byte SrcChannels; + internal readonly int SrcChannels; /// /// Creates a new . /// /// Sampling rate of audio that's given from /// Channels of audio that's given from - protected ResamplingPlayer(int srcRate, byte srcChannels) + protected ResamplingPlayer(int srcRate, int srcChannels) { SrcRate = srcRate; SrcChannels = srcChannels; diff --git a/osu.Framework/Audio/SDL2AudioManager.cs b/osu.Framework/Audio/SDL2AudioManager.cs deleted file mode 100644 index 9c8b419704..0000000000 --- a/osu.Framework/Audio/SDL2AudioManager.cs +++ /dev/null @@ -1,272 +0,0 @@ -// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. -// See the LICENCE file in the repository root for full licence text. - -#nullable disable - -using System; -using System.Collections.Generic; -using System.Collections.Immutable; -using System.IO; -using osu.Framework.Audio.Callbacks; -using osu.Framework.Audio.Mixing; -using osu.Framework.Audio.Mixing.SDL2; -using osu.Framework.Audio.Sample; -using osu.Framework.Audio.Track; -using osu.Framework.Extensions.TypeExtensions; -using osu.Framework.IO.Stores; -using osu.Framework.Logging; -using osu.Framework.Threading; -using SDL2; - -namespace osu.Framework.Audio -{ - public class SDL2AudioManager : AudioManager - { - public const int AUDIO_FREQ = 44100; - public const byte AUDIO_CHANNELS = 2; - public const ushort AUDIO_FORMAT = SDL.AUDIO_F32; - - private volatile uint deviceId; - - private SDL.SDL_AudioSpec spec; - - private static readonly AudioDecoderManager decoder = new AudioDecoderManager(); - - private readonly List sdlMixerList = new List(); - - private readonly SDL2AudioCallback audioCallback; - - private Scheduler eventScheduler => EventScheduler ?? CurrentAudioThread.Scheduler; - - protected override void InvokeOnNewDevice(string deviceName) => eventScheduler.Add(() => base.InvokeOnNewDevice(deviceName)); - - protected override void InvokeOnLostDevice(string deviceName) => eventScheduler.Add(() => base.InvokeOnLostDevice(deviceName)); - - /// - /// Creates a new . - /// - /// The host's audio thread. - /// The resource store containing all audio tracks to be used in the future. - /// The sample store containing all audio samples to be used in the future. - public SDL2AudioManager(AudioThread audioThread, ResourceStore trackStore, ResourceStore sampleStore) - : base(audioThread, trackStore, sampleStore) - { - audioCallback = new SDL2AudioCallback((_, stream, size) => internalAudioCallback(stream, size)); - - // Must not edit this except for samples, as components (especially mixer) expects this to match. - spec = new SDL.SDL_AudioSpec - { - freq = AUDIO_FREQ, - channels = AUDIO_CHANNELS, - format = AUDIO_FORMAT, - callback = audioCallback.Callback, - userdata = audioCallback.Handle, - samples = 256 // determines latency, this value can be changed but is already reasonably low - }; - - AudioScheduler.Add(() => - { - updateDeviceNames(); - - // comment below lines if you want to use FFmpeg to decode audio, AudioDecoder will use FFmpeg if no BASS device is available - ManagedBass.Bass.Configure((ManagedBass.Configuration)68, 1); - audioThread.InitDevice(ManagedBass.Bass.NoSoundDevice); - }); - } - - private string currentDeviceName = "Not loaded"; - - public override string ToString() - { - return $@"{GetType().ReadableName()} ({currentDeviceName})"; - } - - protected override AudioMixer AudioCreateAudioMixer(AudioMixer fallbackMixer, string identifier) - { - var mixer = new SDL2AudioMixer(fallbackMixer, identifier); - AddItem(mixer); - return mixer; - } - - protected override void ItemAdded(AudioComponent item) - { - base.ItemAdded(item); - - if (item is SDL2AudioMixer mixer) - { - try - { - if (deviceId != 0) - SDL.SDL_LockAudioDevice(deviceId); - - sdlMixerList.Add(mixer); - } - finally - { - if (deviceId != 0) - SDL.SDL_UnlockAudioDevice(deviceId); - } - } - } - - protected override void ItemRemoved(AudioComponent item) - { - base.ItemRemoved(item); - - if (item is SDL2AudioMixer mixer) - { - try - { - if (deviceId != 0) - SDL.SDL_LockAudioDevice(deviceId); - - sdlMixerList.Remove(mixer); - } - finally - { - if (deviceId != 0) - SDL.SDL_UnlockAudioDevice(deviceId); - } - } - } - - private unsafe void internalAudioCallback(IntPtr stream, int bufsize) - { - try - { - float* main = (float*)stream.ToPointer(); - int filled = 0; - - foreach (var mixer in sdlMixerList) - { - if (mixer.IsAlive) - mixer.MixChannelsInto(main, bufsize / 4, ref filled); - } - - for (; filled < bufsize / 4; filled++) - *(main + filled) = 0; - } - catch (Exception e) - { - Logger.Error(e, "Error while pushing audio to SDL"); - } - } - - internal void OnNewDeviceEvent(int addedDeviceIndex) - { - AudioScheduler.Add(() => - { - // the index is only vaild until next SDL_GetNumAudioDevices call, so get the name first. - string name = SDL.SDL_GetAudioDeviceName(addedDeviceIndex, 0); - - updateDeviceNames(); - InvokeOnNewDevice(name); - }); - } - - internal void OnLostDeviceEvent(uint removedDeviceId) - { - AudioScheduler.Add(() => - { - // SDL doesn't retain information about removed device. - updateDeviceNames(); - - if (deviceId == removedDeviceId) // current device lost - { - InvokeOnLostDevice(currentDeviceName); - SetAudioDevice(); - } - else - { - // we can probably guess the name by comparing the old list and the new one, but it won't be reliable - InvokeOnLostDevice(string.Empty); - } - }); - } - - private void updateDeviceNames() => DeviceNames = EnumerateAllDevices().ToImmutableList(); - - protected virtual IEnumerable EnumerateAllDevices() - { - int deviceCount = SDL.SDL_GetNumAudioDevices(0); // it may return -1 if only default device is available (sound server) - for (int i = 0; i < deviceCount; i++) - yield return SDL.SDL_GetAudioDeviceName(i, 0); - } - - protected override bool SetAudioDevice(string deviceName = null) - { - if (!DeviceNames.Contains(deviceName)) - deviceName = null; - - if (deviceId > 0) - SDL.SDL_CloseAudioDevice(deviceId); - - // Let audio driver adjust latency, this may set to a high value on Windows (but usually around 10ms), but let's just be safe - const uint flag = SDL.SDL_AUDIO_ALLOW_SAMPLES_CHANGE; - deviceId = SDL.SDL_OpenAudioDevice(deviceName, 0, ref spec, out var outspec, (int)flag); - - if (deviceId == 0) - { - if (deviceName == null) - { - Logger.Log("No audio device can be used! Check your audio system.", level: LogLevel.Error); - return false; - } - - Logger.Log("SDL Audio init failed, try using default device...", level: LogLevel.Important); - return SetAudioDevice(); - } - - spec = outspec; - - // Start playback - SDL.SDL_PauseAudioDevice(deviceId, 0); - - currentDeviceName = deviceName ?? "Default"; - - Logger.Log($@"🔈 SDL Audio initialised - Driver: {SDL.SDL_GetCurrentAudioDriver()} - Device Name: {currentDeviceName} - Format: {spec.freq}hz {spec.channels}ch - Resolution: {(SDL.SDL_AUDIO_ISUNSIGNED(spec.format) ? "unsigned " : "")}{SDL.SDL_AUDIO_BITSIZE(spec.format)}bit{(SDL.SDL_AUDIO_ISFLOAT(spec.format) ? " float" : "")} - Samples: {spec.samples} samples"); - - return true; - } - - protected override bool SetAudioDevice(int deviceIndex) - { - if (deviceIndex < DeviceNames.Count && deviceIndex >= 0) - return SetAudioDevice(DeviceNames[deviceIndex]); - - return SetAudioDevice(); - } - - protected override bool IsCurrentDeviceValid() => SDL.SDL_GetAudioDeviceStatus(deviceId) != SDL.SDL_AudioStatus.SDL_AUDIO_STOPPED; - - internal override Track.Track GetNewTrack(Stream data, string name) - { - TrackSDL2 track = new TrackSDL2(name, spec.freq, spec.channels, spec.samples); - EnqueueAction(() => decoder.StartDecodingAsync(spec.freq, spec.channels, spec.format, data, track.ReceiveAudioData)); - return track; - } - - internal override SampleFactory GetSampleFactory(Stream data, string name, AudioMixer mixer, int playbackConcurrency) - => new SampleSDL2Factory(data, name, (SDL2AudioMixer)mixer, playbackConcurrency, spec); - - protected override void Dispose(bool disposing) - { - base.Dispose(disposing); - - decoder?.Dispose(); - - if (deviceId > 0) - { - SDL.SDL_CloseAudioDevice(deviceId); - deviceId = 0; - } - - audioCallback?.Dispose(); - } - } -} diff --git a/osu.Framework/Audio/SDL3AudioManager.cs b/osu.Framework/Audio/SDL3AudioManager.cs new file mode 100644 index 0000000000..a30869cd12 --- /dev/null +++ b/osu.Framework/Audio/SDL3AudioManager.cs @@ -0,0 +1,321 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +#nullable disable + +using System; +using System.Buffers; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.IO; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using osu.Framework.Allocation; +using osu.Framework.Audio.Mixing; +using osu.Framework.Audio.Mixing.SDL3; +using osu.Framework.Audio.Sample; +using osu.Framework.Audio.Track; +using osu.Framework.Extensions.TypeExtensions; +using osu.Framework.IO.Stores; +using osu.Framework.Logging; +using osu.Framework.Threading; +using SDL; + +namespace osu.Framework.Audio +{ + public unsafe class SDL3AudioManager : AudioManager + { + public static readonly int AUDIO_FREQ = 44100; + public static readonly int AUDIO_CHANNELS = 2; + public static readonly SDL_AudioFormat AUDIO_FORMAT = SDL3.SDL_AUDIO_F32; + + private volatile SDL_AudioDeviceID deviceId; + private volatile SDL_AudioStream* deviceStream; + + private SDL_AudioSpec spec; + private int bufferSize = (int)(44100 * 0.01); + + private static readonly AudioDecoderManager decoder = new AudioDecoderManager(); + + private readonly List sdlMixerList = new List(); + + private ImmutableArray deviceIdArray = ImmutableArray.Empty; + + protected ObjectHandle ObjectHandle { get; private set; } + + private Scheduler eventScheduler => EventScheduler ?? CurrentAudioThread.Scheduler; + + protected override void InvokeOnNewDevice(string deviceName) => eventScheduler.Add(() => base.InvokeOnNewDevice(deviceName)); + + protected override void InvokeOnLostDevice(string deviceName) => eventScheduler.Add(() => base.InvokeOnLostDevice(deviceName)); + + /// + /// Creates a new . + /// + /// The host's audio thread. + /// The resource store containing all audio tracks to be used in the future. + /// The sample store containing all audio samples to be used in the future. + public SDL3AudioManager(AudioThread audioThread, ResourceStore trackStore, ResourceStore sampleStore) + : base(audioThread, trackStore, sampleStore) + { + ObjectHandle = new ObjectHandle(this, GCHandleType.Normal); + + // Must not edit this except for samples, as components (especially mixer) expects this to match. + spec = new SDL_AudioSpec + { + freq = AUDIO_FREQ, + channels = AUDIO_CHANNELS, + format = AUDIO_FORMAT + }; + + AudioScheduler.Add(() => + { + updateDeviceList(); + + // comment below lines if you want to use FFmpeg to decode audio, AudioDecoder will use FFmpeg if no BASS device is available + ManagedBass.Bass.Configure((ManagedBass.Configuration)68, 1); + audioThread.InitDevice(ManagedBass.Bass.NoSoundDevice); + }); + } + + private string currentDeviceName = "Not loaded"; + + public override string ToString() + { + return $@"{GetType().ReadableName()} ({currentDeviceName})"; + } + + protected override AudioMixer AudioCreateAudioMixer(AudioMixer fallbackMixer, string identifier) + { + var mixer = new SDL3AudioMixer(fallbackMixer, identifier); + AddItem(mixer); + return mixer; + } + + protected override void ItemAdded(AudioComponent item) + { + base.ItemAdded(item); + + if (item is SDL3AudioMixer mixer) + { + try + { + if (deviceId != 0) + SDL3.SDL_LockAudioStream(deviceStream); + + sdlMixerList.Add(mixer); + } + finally + { + if (deviceId != 0) + SDL3.SDL_UnlockAudioStream(deviceStream); + } + } + } + + protected override void ItemRemoved(AudioComponent item) + { + base.ItemRemoved(item); + + if (item is SDL3AudioMixer mixer) + { + try + { + if (deviceId != 0) + SDL3.SDL_LockAudioStream(deviceStream); + + sdlMixerList.Remove(mixer); + } + finally + { + if (deviceId != 0) + SDL3.SDL_UnlockAudioStream(deviceStream); + } + } + } + + [UnmanagedCallersOnly(CallConvs = new[] { typeof(CallConvCdecl) })] + private static void audioCallback(IntPtr userdata, SDL_AudioStream* stream, int additionalAmount, int totalAmount) + { + var handle = new ObjectHandle(userdata); + if (handle.GetTarget(out SDL3AudioManager audioManager)) + audioManager.internalAudioCallback(stream, additionalAmount); + } + + private void internalAudioCallback(SDL_AudioStream* stream, int additionalAmount) + { + float[] buf = ArrayPool.Shared.Rent(additionalAmount / 4); + + try + { + fixed (float* main = buf) + { + int filled = 0; + + foreach (var mixer in sdlMixerList) + { + if (mixer.IsAlive) + mixer.MixChannelsInto(main, additionalAmount / 4, ref filled); + } + + for (; filled < additionalAmount / 4; filled++) + *(main + filled) = 0; + + SDL3.SDL_PutAudioStreamData(stream, (IntPtr)main, filled * 4); + } + } + catch (Exception e) + { + Logger.Error(e, "Error while pushing audio to SDL"); + } + finally + { + ArrayPool.Shared.Return(buf); + } + } + + internal void OnNewDeviceEvent(SDL_AudioDeviceID addedDeviceIndex) + { + AudioScheduler.Add(() => + { + // the index is only vaild until next SDL_GetNumAudioDevices call, so get the name first. + string name = SDL3.SDL_GetAudioDeviceName(addedDeviceIndex); + + updateDeviceList(); + InvokeOnNewDevice(name); + }); + } + + internal void OnLostDeviceEvent(SDL_AudioDeviceID removedDeviceId) + { + AudioScheduler.Add(() => + { + // SDL doesn't retain information about removed device. + updateDeviceList(); + + if (!IsCurrentDeviceValid()) // current device lost + { + InvokeOnLostDevice(currentDeviceName); + SetAudioDevice(); + } + else + { + // we can probably guess the name by comparing the old list and the new one, but it won't be reliable + InvokeOnLostDevice(string.Empty); + } + }); + } + + private void updateDeviceList() + { + int count = 0; + SDL_AudioDeviceID* idArrayPtr = SDL3.SDL_GetAudioOutputDevices(&count); + + var idArray = ImmutableArray.CreateBuilder(count); + var nameArray = ImmutableArray.CreateBuilder(count); + + for (int i = 0; i < count; i++) + { + SDL_AudioDeviceID id = *(idArrayPtr + i); + idArray.Add(id); + nameArray.Add(SDL3.SDL_GetAudioDeviceName(id)); + + Logger.Log($"audio update {id} : {nameArray[i]}"); + } + + deviceIdArray = idArray.ToImmutableArray(); + DeviceNames = nameArray.ToImmutableList(); + } + + private bool setAudioDevice(SDL_AudioDeviceID targetId) + { + if (deviceStream != null) + { + SDL3.SDL_DestroyAudioStream(deviceStream); + deviceStream = null; + } + + fixed (SDL_AudioSpec* ptr = &spec) + { + deviceStream = SDL3.SDL_OpenAudioDeviceStream(targetId, ptr, &audioCallback, ObjectHandle.Handle); + + if (deviceStream != null) + { + deviceId = SDL3.SDL_GetAudioStreamDevice(deviceStream); + + int sampleFrameSize = 0; + SDL_AudioSpec temp; // this has 'real' device info which is useless since SDL converts audio according to the spec we provided + if (SDL3.SDL_GetAudioDeviceFormat(deviceId, &temp, &sampleFrameSize) == 0) + bufferSize = sampleFrameSize * (int)Math.Ceiling((double)spec.freq / temp.freq); + } + } + + if (deviceStream == null) + { + if (targetId == SDL3.SDL_AUDIO_DEVICE_DEFAULT_OUTPUT) + return false; + + return setAudioDevice(SDL3.SDL_AUDIO_DEVICE_DEFAULT_OUTPUT); + } + + SDL3.SDL_ResumeAudioDevice(deviceId); + + currentDeviceName = SDL3.SDL_GetAudioDeviceName(targetId); + + Logger.Log($@"🔈 SDL Audio initialised + Driver: {SDL3.SDL_GetCurrentAudioDriver()} + Device Name: {currentDeviceName} + Format: {spec.freq}hz {spec.channels}ch + Sample size: {bufferSize}"); + + return true; + } + + protected override bool SetAudioDevice(string deviceName = null) + { + deviceName ??= AudioDevice.Value; + + int deviceIndex = DeviceNames.FindIndex(d => d == deviceName); + if (deviceIndex >= 0) + return setAudioDevice(deviceIdArray[deviceIndex]); + + return setAudioDevice(SDL3.SDL_AUDIO_DEVICE_DEFAULT_OUTPUT); + } + + protected override bool SetAudioDevice(int deviceIndex) + { + if (deviceIndex < deviceIdArray.Length && deviceIndex >= 0) + return setAudioDevice(deviceIdArray[deviceIndex]); + + return SetAudioDevice(); + } + + protected override bool IsCurrentDeviceValid() => deviceId > 0 && SDL3.SDL_AudioDevicePaused(deviceId) == SDL3.SDL_FALSE; + + internal override Track.Track GetNewTrack(Stream data, string name) + { + TrackSDL3 track = new TrackSDL3(name, spec.freq, spec.channels, bufferSize); + EnqueueAction(() => decoder.StartDecodingAsync(spec.freq, spec.channels, spec.format, data, track.ReceiveAudioData)); + return track; + } + + internal override SampleFactory GetSampleFactory(Stream data, string name, AudioMixer mixer, int playbackConcurrency) + => new SampleSDL3Factory(data, name, (SDL3AudioMixer)mixer, playbackConcurrency, spec); + + protected override void Dispose(bool disposing) + { + base.Dispose(disposing); + + decoder?.Dispose(); + + if (deviceStream != null) + { + SDL3.SDL_DestroyAudioStream(deviceStream); + deviceStream = null; + deviceId = 0; // Destroying audio stream will close audio device + } + + ObjectHandle.Dispose(); + } + } +} diff --git a/osu.Framework/Audio/Sample/SampleChannelSDL2.cs b/osu.Framework/Audio/Sample/SampleChannelSDL3.cs similarity index 80% rename from osu.Framework/Audio/Sample/SampleChannelSDL2.cs rename to osu.Framework/Audio/Sample/SampleChannelSDL3.cs index 180d2cfa26..d86dc162bb 100644 --- a/osu.Framework/Audio/Sample/SampleChannelSDL2.cs +++ b/osu.Framework/Audio/Sample/SampleChannelSDL3.cs @@ -2,13 +2,13 @@ // See the LICENCE file in the repository root for full licence text. using System.Threading; -using osu.Framework.Audio.Mixing.SDL2; +using osu.Framework.Audio.Mixing.SDL3; namespace osu.Framework.Audio.Sample { - internal sealed class SampleChannelSDL2 : SampleChannel, ISDL2AudioChannel + internal sealed class SampleChannelSDL3 : SampleChannel, ISDL3AudioChannel { - private readonly SampleSDL2AudioPlayer player; + private readonly SampleSDL3AudioPlayer player; private volatile bool playing; public override bool Playing => playing; @@ -16,7 +16,7 @@ internal sealed class SampleChannelSDL2 : SampleChannel, ISDL2AudioChannel private volatile bool looping; public override bool Looping { get => looping; set => looping = value; } - public SampleChannelSDL2(SampleSDL2 sample, SampleSDL2AudioPlayer player) + public SampleChannelSDL3(SampleSDL3 sample, SampleSDL3AudioPlayer player) : base(sample.Name) { this.player = player; @@ -38,7 +38,7 @@ public override void Stop() private volatile bool started; - int ISDL2AudioChannel.GetRemainingSamples(float[] data) + int ISDL3AudioChannel.GetRemainingSamples(float[] data) { if (player.RelativeRate != rate) player.RelativeRate = rate; @@ -77,11 +77,11 @@ internal override void OnStateChanged() Interlocked.Exchange(ref rate, AggregateFrequency.Value); } - (float, float) ISDL2AudioChannel.Volume => volume; + (float, float) ISDL3AudioChannel.Volume => volume; - bool ISDL2AudioChannel.Playing => playing; + bool ISDL3AudioChannel.Playing => playing; - ~SampleChannelSDL2() + ~SampleChannelSDL3() { Dispose(false); } @@ -91,7 +91,7 @@ protected override void Dispose(bool disposing) if (IsDisposed) return; - (Mixer as SDL2AudioMixer)?.StreamFree(this); + (Mixer as SDL3AudioMixer)?.StreamFree(this); base.Dispose(disposing); } diff --git a/osu.Framework/Audio/Sample/SampleSDL2.cs b/osu.Framework/Audio/Sample/SampleSDL3.cs similarity index 63% rename from osu.Framework/Audio/Sample/SampleSDL2.cs rename to osu.Framework/Audio/Sample/SampleSDL3.cs index f4f6f021cd..a8edac0c05 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2.cs +++ b/osu.Framework/Audio/Sample/SampleSDL3.cs @@ -1,18 +1,18 @@ // Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. // See the LICENCE file in the repository root for full licence text. -using osu.Framework.Audio.Mixing.SDL2; +using osu.Framework.Audio.Mixing.SDL3; namespace osu.Framework.Audio.Sample { - internal sealed class SampleSDL2 : Sample + internal sealed class SampleSDL3 : Sample { public override bool IsLoaded => factory.IsLoaded; - private readonly SampleSDL2Factory factory; - private readonly SDL2AudioMixer mixer; + private readonly SampleSDL3Factory factory; + private readonly SDL3AudioMixer mixer; - public SampleSDL2(SampleSDL2Factory factory, SDL2AudioMixer mixer) + public SampleSDL3(SampleSDL3Factory factory, SDL3AudioMixer mixer) : base(factory) { this.factory = factory; @@ -21,7 +21,7 @@ public SampleSDL2(SampleSDL2Factory factory, SDL2AudioMixer mixer) protected override SampleChannel CreateChannel() { - var channel = new SampleChannelSDL2(this, factory.CreatePlayer()); + var channel = new SampleChannelSDL3(this, factory.CreatePlayer()); mixer.Add(channel); return channel; } diff --git a/osu.Framework/Audio/Sample/SampleSDL2AudioPlayer.cs b/osu.Framework/Audio/Sample/SampleSDL3AudioPlayer.cs similarity index 91% rename from osu.Framework/Audio/Sample/SampleSDL2AudioPlayer.cs rename to osu.Framework/Audio/Sample/SampleSDL3AudioPlayer.cs index 6aedd177aa..c75f1c59dd 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Sample/SampleSDL3AudioPlayer.cs @@ -5,7 +5,7 @@ namespace osu.Framework.Audio.Sample { - internal class SampleSDL2AudioPlayer : ResamplingPlayer + internal class SampleSDL3AudioPlayer : ResamplingPlayer { private int position; @@ -16,7 +16,7 @@ internal class SampleSDL2AudioPlayer : ResamplingPlayer public bool Loop { get; set; } - public SampleSDL2AudioPlayer(float[] audioData, int rate, byte channels) + public SampleSDL3AudioPlayer(float[] audioData, int rate, int channels) : base(rate, channels) { this.audioData = audioData; diff --git a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs b/osu.Framework/Audio/Sample/SampleSDL3Factory.cs similarity index 78% rename from osu.Framework/Audio/Sample/SampleSDL2Factory.cs rename to osu.Framework/Audio/Sample/SampleSDL3Factory.cs index 026886ba1e..b1e3647671 100644 --- a/osu.Framework/Audio/Sample/SampleSDL2Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL3Factory.cs @@ -4,26 +4,26 @@ using System; using System.Diagnostics; using System.IO; -using osu.Framework.Audio.Mixing.SDL2; +using osu.Framework.Audio.Mixing.SDL3; using osu.Framework.Bindables; using osu.Framework.Extensions; -using SDL2; +using SDL; namespace osu.Framework.Audio.Sample { - internal class SampleSDL2Factory : SampleFactory + internal class SampleSDL3Factory : SampleFactory { private bool isLoaded; public override bool IsLoaded => isLoaded; - private readonly SDL2AudioMixer mixer; - private readonly SDL.SDL_AudioSpec spec; + private readonly SDL3AudioMixer mixer; + private readonly SDL_AudioSpec spec; private float[] decodedAudio = Array.Empty(); private Stream? stream; - public SampleSDL2Factory(Stream stream, string name, SDL2AudioMixer mixer, int playbackConcurrency, SDL.SDL_AudioSpec spec) + public SampleSDL3Factory(Stream stream, string name, SDL3AudioMixer mixer, int playbackConcurrency, SDL_AudioSpec spec) : base(name, playbackConcurrency) { this.stream = stream; @@ -59,20 +59,20 @@ protected override void LoadSample() } } - public SampleSDL2AudioPlayer CreatePlayer() + public SampleSDL3AudioPlayer CreatePlayer() { LoadSampleTask?.WaitSafely(); - return new SampleSDL2AudioPlayer(decodedAudio, spec.freq, spec.channels); + return new SampleSDL3AudioPlayer(decodedAudio, spec.freq, spec.channels); } - public override Sample CreateSample() => new SampleSDL2(this, mixer) { OnPlay = SampleFactoryOnPlay }; + public override Sample CreateSample() => new SampleSDL3(this, mixer) { OnPlay = SampleFactoryOnPlay }; protected override void UpdatePlaybackConcurrency(ValueChangedEvent concurrency) { } - ~SampleSDL2Factory() + ~SampleSDL3Factory() { Dispose(false); } diff --git a/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TempoSDL3AudioPlayer.cs similarity index 95% rename from osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs rename to osu.Framework/Audio/Track/TempoSDL3AudioPlayer.cs index 9008753d5b..2a314873be 100644 --- a/osu.Framework/Audio/Track/TempoSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TempoSDL3AudioPlayer.cs @@ -6,7 +6,7 @@ namespace osu.Framework.Audio.Track { - internal class TempoSDL2AudioPlayer : TrackSDL2AudioPlayer + internal class TempoSDL3AudioPlayer : TrackSDL3AudioPlayer { private SoundTouchProcessor? soundTouch; @@ -29,12 +29,12 @@ public double Tempo public override bool Done => base.Done && (soundTouch == null || donePlaying); /// - /// Creates a new . + /// Creates a new . /// /// /// - /// will prepare this amount of samples (or more) on every update. - public TempoSDL2AudioPlayer(int rate, byte channels, int samples) + /// will prepare this amount of samples (or more) on every update. + public TempoSDL3AudioPlayer(int rate, int channels, int samples) : base(rate, channels) { samplesize = samples; diff --git a/osu.Framework/Audio/Track/TrackSDL2.cs b/osu.Framework/Audio/Track/TrackSDL3.cs similarity index 93% rename from osu.Framework/Audio/Track/TrackSDL2.cs rename to osu.Framework/Audio/Track/TrackSDL3.cs index 2c8894e955..e5871306b5 100644 --- a/osu.Framework/Audio/Track/TrackSDL2.cs +++ b/osu.Framework/Audio/Track/TrackSDL3.cs @@ -5,14 +5,14 @@ using System.Threading; using System.Threading.Tasks; using NAudio.Dsp; -using osu.Framework.Audio.Mixing.SDL2; +using osu.Framework.Audio.Mixing.SDL3; using osu.Framework.Extensions; namespace osu.Framework.Audio.Track { - public sealed class TrackSDL2 : Track, ISDL2AudioChannel + public sealed class TrackSDL3 : Track, ISDL3AudioChannel { - private readonly TempoSDL2AudioPlayer player; + private readonly TempoSDL3AudioPlayer player; public override bool IsDummyDevice => false; @@ -31,7 +31,7 @@ public sealed class TrackSDL2 : Track, ISDL2AudioChannel private volatile int bitrate; public override int? Bitrate => bitrate; - public TrackSDL2(string name, int rate, byte channels, int samples) + public TrackSDL3(string name, int rate, int channels, int samples) : base(name) { // SoundTouch limitation @@ -39,10 +39,10 @@ public TrackSDL2(string name, int rate, byte channels, int samples) AggregateTempo.ValueChanged += t => { if (t.NewValue < tempo_minimum_supported) - throw new ArgumentException($"{nameof(TrackSDL2)} does not support {nameof(Tempo)} specifications below {tempo_minimum_supported}. Use {nameof(Frequency)} instead."); + throw new ArgumentException($"{nameof(TrackSDL3)} does not support {nameof(Tempo)} specifications below {tempo_minimum_supported}. Use {nameof(Frequency)} instead."); }; - player = new TempoSDL2AudioPlayer(rate, channels, samples); + player = new TempoSDL3AudioPlayer(rate, channels, samples); } private readonly object syncRoot = new object(); @@ -226,7 +226,7 @@ public override Task StopAsync() => EnqueueAction(() => isRunning = false; }); - int ISDL2AudioChannel.GetRemainingSamples(float[] data) + int ISDL3AudioChannel.GetRemainingSamples(float[] data) { if (!IsLoaded) return 0; @@ -271,11 +271,11 @@ internal override void OnStateChanged() volume = ((float)(AggregateVolume.Value * (balance > 0 ? balance : 1.0)), (float)(AggregateVolume.Value * (balance < 0 ? -balance : 1.0))); } - bool ISDL2AudioChannel.Playing => isRunning && !player.Done; + bool ISDL3AudioChannel.Playing => isRunning && !player.Done; - (float, float) ISDL2AudioChannel.Volume => volume; + (float, float) ISDL3AudioChannel.Volume => volume; - ~TrackSDL2() + ~TrackSDL3() { Dispose(false); } @@ -286,7 +286,7 @@ protected override void Dispose(bool disposing) return; isRunning = false; - (Mixer as SDL2AudioMixer)?.StreamFree(this); + (Mixer as SDL3AudioMixer)?.StreamFree(this); decodeData?.Stop(); diff --git a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs similarity index 96% rename from osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs rename to osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs index e6150d7a69..b4b4a55ff2 100644 --- a/osu.Framework/Audio/Track/TrackSDL2AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs @@ -7,9 +7,9 @@ namespace osu.Framework.Audio.Track { /// - /// Mainly returns audio data to . + /// Mainly returns audio data to . /// - internal class TrackSDL2AudioPlayer : ResamplingPlayer, IDisposable + internal class TrackSDL3AudioPlayer : ResamplingPlayer, IDisposable { private volatile bool isLoaded; public bool IsLoaded => isLoaded; @@ -49,11 +49,11 @@ internal class TrackSDL2AudioPlayer : ResamplingPlayer, IDisposable public bool ReversePlayback { get; set; } /// - /// Creates a new . Use if you want to adjust tempo. + /// Creates a new . Use if you want to adjust tempo. /// /// Sampling rate of audio /// Channels of audio - public TrackSDL2AudioPlayer(int rate, byte channels) + public TrackSDL3AudioPlayer(int rate, int channels) : base(rate, channels) { isLoading = false; @@ -270,7 +270,7 @@ protected virtual void Dispose(bool disposing) } } - ~TrackSDL2AudioPlayer() + ~TrackSDL3AudioPlayer() { Dispose(false); } diff --git a/osu.Framework/Audio/Track/Waveform.cs b/osu.Framework/Audio/Track/Waveform.cs index 17a61a59da..df5dd9e57e 100644 --- a/osu.Framework/Audio/Track/Waveform.cs +++ b/osu.Framework/Audio/Track/Waveform.cs @@ -88,7 +88,7 @@ public Waveform(Stream? data) channels = 2; // AudioDecoder will resample data into specified sample rate and channels (44100hz 2ch float) - AudioDecoderManager.AudioDecoder decoder = AudioDecoderManager.CreateDecoder(sample_rate, channels, true, SDL2.SDL.AUDIO_F32, data, false); + AudioDecoderManager.AudioDecoder decoder = AudioDecoderManager.CreateDecoder(sample_rate, channels, true, SDL.SDL3.SDL_AUDIO_F32, data, false); Complex[] complexBuffer = ArrayPool.Shared.Rent(fft_samples); diff --git a/osu.Framework/Configuration/AudioDriver.cs b/osu.Framework/Configuration/AudioDriver.cs index 21e2ec9bb3..41a619f08d 100644 --- a/osu.Framework/Configuration/AudioDriver.cs +++ b/osu.Framework/Configuration/AudioDriver.cs @@ -6,6 +6,6 @@ namespace osu.Framework.Configuration public enum AudioDriver { BASS, - SDL2 + SDL3 } } diff --git a/osu.Framework/Game.cs b/osu.Framework/Game.cs index 6bb9d2bcd8..47ed832048 100644 --- a/osu.Framework/Game.cs +++ b/osu.Framework/Game.cs @@ -167,12 +167,12 @@ private void load(FrameworkConfigManager config) switch (config.Get(FrameworkSetting.AudioDriver)) { - case AudioDriver.SDL2 when Host.Window is SDL2Window sdl2Window: + case AudioDriver.SDL3 when Host.Window is SDL3Window sdl3Window: { - SDL2AudioManager sdl2Audio = new SDL2AudioManager(Host.AudioThread, tracks, samples) { EventScheduler = Scheduler }; - sdl2Window.AudioDeviceAdded += sdl2Audio.OnNewDeviceEvent; - sdl2Window.AudioDeviceRemoved += sdl2Audio.OnLostDeviceEvent; - Audio = sdl2Audio; + SDL3AudioManager sdl3Audio = new SDL3AudioManager(Host.AudioThread, tracks, samples) { EventScheduler = Scheduler }; + sdl3Window.AudioDeviceAdded += sdl3Audio.OnNewDeviceEvent; + sdl3Window.AudioDeviceRemoved += sdl3Audio.OnLostDeviceEvent; + Audio = sdl3Audio; break; } diff --git a/osu.Framework/Platform/SDL3Window.cs b/osu.Framework/Platform/SDL3Window.cs index 9c35b57a98..0cccd4c0db 100644 --- a/osu.Framework/Platform/SDL3Window.cs +++ b/osu.Framework/Platform/SDL3Window.cs @@ -567,7 +567,7 @@ protected virtual void HandleEvent(SDL_Event e) } } - private void handleAudioDeviceEvent(SDL.SDL_AudioDeviceEvent evtAudioDevice) + private void handleAudioDeviceEvent(SDL_AudioDeviceEvent evtAudioDevice) { if (evtAudioDevice.iscapture != 0) // capture device return; @@ -575,7 +575,7 @@ private void handleAudioDeviceEvent(SDL.SDL_AudioDeviceEvent evtAudioDevice) switch (evtAudioDevice.type) { case SDL_EventType.SDL_EVENT_AUDIO_DEVICE_ADDED: - AudioDeviceAdded?.Invoke((int)evtAudioDevice.which); + AudioDeviceAdded?.Invoke(evtAudioDevice.which); break; case SDL_EventType.SDL_EVENT_AUDIO_DEVICE_REMOVED: @@ -658,14 +658,14 @@ internal virtual void SetIconFromGroup(IconGroup iconGroup) public event Action? DragDrop; /// - /// Invoked when a new audio device is added, only when using SDL2 audio + /// Invoked when a new audio device is added, only when using SDL3 audio /// - public event Action? AudioDeviceAdded; + public event Action? AudioDeviceAdded; /// - /// Invoked when a new audio device is removed, only when using SDL2 audio + /// Invoked when a new audio device is removed, only when using SDL3 audio /// - public event Action? AudioDeviceRemoved; + public event Action? AudioDeviceRemoved; #endregion From 32ad8fca4a7ba95235c3199690bc4f9359ac3e7c Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 12 Apr 2024 23:09:36 +0900 Subject: [PATCH 079/127] Ignore SDL audio devices with empty name --- osu.Framework/Audio/SDL3AudioManager.cs | 33 ++++++++++++++----------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/osu.Framework/Audio/SDL3AudioManager.cs b/osu.Framework/Audio/SDL3AudioManager.cs index a30869cd12..f8b8db32f0 100644 --- a/osu.Framework/Audio/SDL3AudioManager.cs +++ b/osu.Framework/Audio/SDL3AudioManager.cs @@ -33,7 +33,7 @@ public unsafe class SDL3AudioManager : AudioManager private volatile SDL_AudioStream* deviceStream; private SDL_AudioSpec spec; - private int bufferSize = (int)(44100 * 0.01); + private int bufferSize = (int)(AUDIO_FREQ * 0.01); // 10ms, will be calculated later when opening audio device, it works as a base value until then. private static readonly AudioDecoderManager decoder = new AudioDecoderManager(); @@ -60,7 +60,7 @@ public SDL3AudioManager(AudioThread audioThread, ResourceStore trackStor { ObjectHandle = new ObjectHandle(this, GCHandleType.Normal); - // Must not edit this except for samples, as components (especially mixer) expects this to match. + // Must not edit this, as components (especially mixer) expects this to match. spec = new SDL_AudioSpec { freq = AUDIO_FREQ, @@ -70,7 +70,7 @@ public SDL3AudioManager(AudioThread audioThread, ResourceStore trackStor AudioScheduler.Add(() => { - updateDeviceList(); + syncAudioDevices(); // comment below lines if you want to use FFmpeg to decode audio, AudioDecoder will use FFmpeg if no BASS device is available ManagedBass.Bass.Configure((ManagedBass.Configuration)68, 1); @@ -144,7 +144,8 @@ private static void audioCallback(IntPtr userdata, SDL_AudioStream* stream, int private void internalAudioCallback(SDL_AudioStream* stream, int additionalAmount) { - float[] buf = ArrayPool.Shared.Rent(additionalAmount / 4); + additionalAmount /= 4; + float[] buf = ArrayPool.Shared.Rent(additionalAmount); try { @@ -155,12 +156,9 @@ private void internalAudioCallback(SDL_AudioStream* stream, int additionalAmount foreach (var mixer in sdlMixerList) { if (mixer.IsAlive) - mixer.MixChannelsInto(main, additionalAmount / 4, ref filled); + mixer.MixChannelsInto(main, additionalAmount, ref filled); } - for (; filled < additionalAmount / 4; filled++) - *(main + filled) = 0; - SDL3.SDL_PutAudioStreamData(stream, (IntPtr)main, filled * 4); } } @@ -181,7 +179,7 @@ internal void OnNewDeviceEvent(SDL_AudioDeviceID addedDeviceIndex) // the index is only vaild until next SDL_GetNumAudioDevices call, so get the name first. string name = SDL3.SDL_GetAudioDeviceName(addedDeviceIndex); - updateDeviceList(); + syncAudioDevices(); InvokeOnNewDevice(name); }); } @@ -191,7 +189,7 @@ internal void OnLostDeviceEvent(SDL_AudioDeviceID removedDeviceId) AudioScheduler.Add(() => { // SDL doesn't retain information about removed device. - updateDeviceList(); + syncAudioDevices(); if (!IsCurrentDeviceValid()) // current device lost { @@ -206,7 +204,7 @@ internal void OnLostDeviceEvent(SDL_AudioDeviceID removedDeviceId) }); } - private void updateDeviceList() + private void syncAudioDevices() { int count = 0; SDL_AudioDeviceID* idArrayPtr = SDL3.SDL_GetAudioOutputDevices(&count); @@ -217,14 +215,19 @@ private void updateDeviceList() for (int i = 0; i < count; i++) { SDL_AudioDeviceID id = *(idArrayPtr + i); - idArray.Add(id); - nameArray.Add(SDL3.SDL_GetAudioDeviceName(id)); + string name = SDL3.SDL_GetAudioDeviceName(id); + + if (name == null || name.Length == 0) + continue; - Logger.Log($"audio update {id} : {nameArray[i]}"); + idArray.Add(id); + nameArray.Add(name); } - deviceIdArray = idArray.ToImmutableArray(); + deviceIdArray = idArray.ToImmutable(); DeviceNames = nameArray.ToImmutableList(); + + Logger.Log($"count {count} , id {deviceIdArray.Length} , names {DeviceNames.Count}"); } private bool setAudioDevice(SDL_AudioDeviceID targetId) From 685db63b372507ce40396a94249ae40352af3166 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sat, 13 Apr 2024 01:22:32 +0900 Subject: [PATCH 080/127] No longer uses ArrayPool and pointers in SDL audio callback --- .../Audio/Mixing/SDL3/SDL3AudioMixer.cs | 29 +++++++-------- osu.Framework/Audio/SDL3AudioManager.cs | 35 ++++++++++--------- 2 files changed, 31 insertions(+), 33 deletions(-) diff --git a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs index 6a98c78bf8..0fe1528b07 100644 --- a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs +++ b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs @@ -64,13 +64,18 @@ protected override void UpdateState() base.UpdateState(); } - private unsafe void mixAudio(float* dst, float* src, ref int filled, int samples, float left, float right) + private unsafe void mixAudio(float[] dst, float[] src, ref int filled, int samples, float left, float right) { if (left <= 0 && right <= 0) return; - for (int i = 0; i < samples; i++) - *(dst + i) = (*(src + i) * ((i % 2) == 0 ? left : right)) + (i < filled ? *(dst + i) : 0); + int i = 0; + + for (; i < filled; i++) + dst[i] += src[i] * ((i % 2) == 0 ? left : right); + + for (; i < samples; i++) + dst[i] = src[i] * ((i % 2) == 0 ? left : right); if (samples > filled) filled = samples; @@ -88,7 +93,7 @@ private unsafe void mixAudio(float* dst, float* src, ref int filled, int samples /// A float array that audio will be mixed into. /// Size of data /// Count of usable audio samples in data - public unsafe void MixChannelsInto(float* data, int sampleCount, ref int filledSamples) + public unsafe void MixChannelsInto(float[] data, int sampleCount, ref int filledSamples) { lock (syncRoot) { @@ -127,18 +132,11 @@ public unsafe void MixChannelsInto(float* data, int sampleCount, ref int filledS if (!useFilters) { - fixed (float* retPtr = ret) - { - mixAudio(data, retPtr, ref filledSamples, size, left, right); - } + mixAudio(data, ret, ref filledSamples, size, left, right); } else { - fixed (float* filterArrPtr = filterArray) - fixed (float* retPtr = ret) - { - mixAudio(filterArrPtr, retPtr, ref filterArrayFilled, size, left, right); - } + mixAudio(filterArray!, ret, ref filterArrayFilled, size, left, right); } } } @@ -161,10 +159,7 @@ public unsafe void MixChannelsInto(float* data, int sampleCount, ref int filledS } } - fixed (float* filterArrPtr = filterArray) - { - mixAudio(data, filterArrPtr, ref filledSamples, filterArrayFilled, 1, 1); - } + mixAudio(data, filterArray!, ref filledSamples, filterArrayFilled, 1, 1); } } } diff --git a/osu.Framework/Audio/SDL3AudioManager.cs b/osu.Framework/Audio/SDL3AudioManager.cs index f8b8db32f0..571c123458 100644 --- a/osu.Framework/Audio/SDL3AudioManager.cs +++ b/osu.Framework/Audio/SDL3AudioManager.cs @@ -68,6 +68,9 @@ public SDL3AudioManager(AudioThread audioThread, ResourceStore trackStor format = AUDIO_FORMAT }; + // determines latency, but some audio servers may not make use of this hint + SDL3.SDL_SetHint(SDL3.SDL_HINT_AUDIO_DEVICE_SAMPLE_FRAMES, "256"u8); + AudioScheduler.Add(() => { syncAudioDevices(); @@ -142,34 +145,32 @@ private static void audioCallback(IntPtr userdata, SDL_AudioStream* stream, int audioManager.internalAudioCallback(stream, additionalAmount); } + private float[] audioBuffer; + private void internalAudioCallback(SDL_AudioStream* stream, int additionalAmount) { additionalAmount /= 4; - float[] buf = ArrayPool.Shared.Rent(additionalAmount); + + if (audioBuffer == null || audioBuffer.Length < additionalAmount) + audioBuffer = new float[additionalAmount]; try { - fixed (float* main = buf) - { - int filled = 0; - - foreach (var mixer in sdlMixerList) - { - if (mixer.IsAlive) - mixer.MixChannelsInto(main, additionalAmount, ref filled); - } + int filled = 0; - SDL3.SDL_PutAudioStreamData(stream, (IntPtr)main, filled * 4); + foreach (var mixer in sdlMixerList) + { + if (mixer.IsAlive) + mixer.MixChannelsInto(audioBuffer, additionalAmount, ref filled); } + + fixed (float* ptr = audioBuffer) + SDL3.SDL_PutAudioStreamData(stream, (IntPtr)ptr, filled * 4); } catch (Exception e) { Logger.Error(e, "Error while pushing audio to SDL"); } - finally - { - ArrayPool.Shared.Return(buf); - } } internal void OnNewDeviceEvent(SDL_AudioDeviceID addedDeviceIndex) @@ -315,7 +316,9 @@ protected override void Dispose(bool disposing) { SDL3.SDL_DestroyAudioStream(deviceStream); deviceStream = null; - deviceId = 0; // Destroying audio stream will close audio device + deviceId = 0; + // Destroying audio stream will close audio device because we use SDL3 OpenAudioDeviceStream + // won't use multiple AudioStream for now since it's barely useful } ObjectHandle.Dispose(); From 3ea2a720801acb855fcb5035de3f5f797f9275f0 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sat, 13 Apr 2024 01:32:03 +0900 Subject: [PATCH 081/127] Satisfy InspectCode --- osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs | 4 ++-- osu.Framework/Audio/SDL3AudioManager.cs | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs index 0fe1528b07..56478d16c4 100644 --- a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs +++ b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs @@ -64,7 +64,7 @@ protected override void UpdateState() base.UpdateState(); } - private unsafe void mixAudio(float[] dst, float[] src, ref int filled, int samples, float left, float right) + private void mixAudio(float[] dst, float[] src, ref int filled, int samples, float left, float right) { if (left <= 0 && right <= 0) return; @@ -93,7 +93,7 @@ private unsafe void mixAudio(float[] dst, float[] src, ref int filled, int sampl /// A float array that audio will be mixed into. /// Size of data /// Count of usable audio samples in data - public unsafe void MixChannelsInto(float[] data, int sampleCount, ref int filledSamples) + public void MixChannelsInto(float[] data, int sampleCount, ref int filledSamples) { lock (syncRoot) { diff --git a/osu.Framework/Audio/SDL3AudioManager.cs b/osu.Framework/Audio/SDL3AudioManager.cs index 571c123458..e156ae807a 100644 --- a/osu.Framework/Audio/SDL3AudioManager.cs +++ b/osu.Framework/Audio/SDL3AudioManager.cs @@ -4,7 +4,6 @@ #nullable disable using System; -using System.Buffers; using System.Collections.Generic; using System.Collections.Immutable; using System.IO; @@ -218,7 +217,7 @@ private void syncAudioDevices() SDL_AudioDeviceID id = *(idArrayPtr + i); string name = SDL3.SDL_GetAudioDeviceName(id); - if (name == null || name.Length == 0) + if (string.IsNullOrEmpty(name)) continue; idArray.Add(id); From e9dfbe90f7fc7bc27ba434d7bc7323d0c597bf92 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sat, 13 Apr 2024 17:39:08 +0900 Subject: [PATCH 082/127] Remove unneeded lines in SDL3AudioManager --- osu.Framework/Audio/SDL3AudioManager.cs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/osu.Framework/Audio/SDL3AudioManager.cs b/osu.Framework/Audio/SDL3AudioManager.cs index e156ae807a..7f2f442e92 100644 --- a/osu.Framework/Audio/SDL3AudioManager.cs +++ b/osu.Framework/Audio/SDL3AudioManager.cs @@ -67,9 +67,6 @@ public SDL3AudioManager(AudioThread audioThread, ResourceStore trackStor format = AUDIO_FORMAT }; - // determines latency, but some audio servers may not make use of this hint - SDL3.SDL_SetHint(SDL3.SDL_HINT_AUDIO_DEVICE_SAMPLE_FRAMES, "256"u8); - AudioScheduler.Add(() => { syncAudioDevices(); @@ -226,8 +223,6 @@ private void syncAudioDevices() deviceIdArray = idArray.ToImmutable(); DeviceNames = nameArray.ToImmutableList(); - - Logger.Log($"count {count} , id {deviceIdArray.Length} , names {DeviceNames.Count}"); } private bool setAudioDevice(SDL_AudioDeviceID targetId) From 4c867b6364e0e91f5c4f2780aebb1b0f46bcb01b Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 10 May 2024 23:27:21 +0900 Subject: [PATCH 083/127] Fix channel balance in SDL3 audio --- .../Audio/AggregateAdjustmentExtensions.cs | 19 +++++++++++++++++++ .../Audio/Sample/SampleChannelSDL3.cs | 3 +-- osu.Framework/Audio/Track/TrackSDL3.cs | 3 +-- 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/osu.Framework/Audio/AggregateAdjustmentExtensions.cs b/osu.Framework/Audio/AggregateAdjustmentExtensions.cs index 3dda8ef660..bbbeebd531 100644 --- a/osu.Framework/Audio/AggregateAdjustmentExtensions.cs +++ b/osu.Framework/Audio/AggregateAdjustmentExtensions.cs @@ -34,5 +34,24 @@ public static IBindable GetAggregate(this IAggregateAudioAdjustment adju throw new ArgumentOutOfRangeException(nameof(type), "Invalid adjustable property type."); } } + + /// + /// Get aggregated stereo volume by decreasing the opponent channel. + /// + /// The audio adjustments to return from. + /// Aggregated stereo volume. + internal static (double, double) GetAggregatedStereoVolume(this IAggregateAudioAdjustment adjustment) + { + double volume = adjustment.AggregateVolume.Value; + double balance = adjustment.AggregateBalance.Value; + double balanceAbs = 1.0 - Math.Abs(balance); + + if (balance < 0) + return (volume, volume * balanceAbs); + else if (balance > 0) + return (volume * balanceAbs, volume); + + return (volume, volume); + } } } diff --git a/osu.Framework/Audio/Sample/SampleChannelSDL3.cs b/osu.Framework/Audio/Sample/SampleChannelSDL3.cs index d86dc162bb..3b62c94434 100644 --- a/osu.Framework/Audio/Sample/SampleChannelSDL3.cs +++ b/osu.Framework/Audio/Sample/SampleChannelSDL3.cs @@ -71,8 +71,7 @@ internal override void OnStateChanged() { base.OnStateChanged(); - double balance = AggregateBalance.Value; - volume = ((float)(AggregateVolume.Value * (balance > 0 ? balance : 1.0)), (float)(AggregateVolume.Value * (balance < 0 ? -balance : 1.0))); + volume = ((float, float))Adjustments.GetAggregatedStereoVolume(); Interlocked.Exchange(ref rate, AggregateFrequency.Value); } diff --git a/osu.Framework/Audio/Track/TrackSDL3.cs b/osu.Framework/Audio/Track/TrackSDL3.cs index e5871306b5..f0773bfcc1 100644 --- a/osu.Framework/Audio/Track/TrackSDL3.cs +++ b/osu.Framework/Audio/Track/TrackSDL3.cs @@ -267,8 +267,7 @@ internal override void OnStateChanged() player.Tempo = AggregateTempo.Value; } - double balance = AggregateBalance.Value; - volume = ((float)(AggregateVolume.Value * (balance > 0 ? balance : 1.0)), (float)(AggregateVolume.Value * (balance < 0 ? -balance : 1.0))); + volume = ((float, float))Adjustments.GetAggregatedStereoVolume(); } bool ISDL3AudioChannel.Playing => isRunning && !player.Done; From 8deadc647377a73fb8981aca1d4c3c9d04c5d0f5 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Tue, 18 Jun 2024 21:32:53 +0900 Subject: [PATCH 084/127] intermediate --- .../Audio/Mixing/SDL3/SDL3AudioMixer.cs | 100 ++++++++---------- 1 file changed, 44 insertions(+), 56 deletions(-) diff --git a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs index 56478d16c4..6a42109d12 100644 --- a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs +++ b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs @@ -8,7 +8,6 @@ using System.Linq; using ManagedBass; using ManagedBass.Fx; -using osu.Framework.Bindables; using osu.Framework.Extensions.ObjectExtensions; using osu.Framework.Statistics; using NAudio.Dsp; @@ -35,11 +34,8 @@ internal class SDL3AudioMixer : AudioMixer public SDL3AudioMixer(AudioMixer? globalMixer, string identifier) : base(globalMixer, identifier) { - EnqueueAction(() => Effects.BindCollectionChanged(onEffectsChanged, true)); } - public override BindableList Effects { get; } = new BindableList(); - protected override void AddInternal(IAudioChannel channel) { if (channel is not ISDL3AudioChannel sdlChannel) @@ -102,7 +98,7 @@ public void MixChannelsInto(float[] data, int sampleCount, ref int filledSamples ret = new float[sampleCount]; } - bool useFilters = audioFilters.Count > 0; + bool useFilters = activeEffects.Count > 0; if (useFilters && (filterArray == null || filterArray.Length != sampleCount)) { @@ -150,7 +146,7 @@ public void MixChannelsInto(float[] data, int sampleCount, ref int filledSamples { for (int i = 0; i < filterArrayFilled; i++) { - foreach (var filter in audioFilters) + foreach (var filter in activeEffects.Values) { if (filter.BiQuadFilter != null) { @@ -164,65 +160,18 @@ public void MixChannelsInto(float[] data, int sampleCount, ref int filledSamples } } - private readonly List audioFilters = new List(); - - private void onEffectsChanged(object? sender, NotifyCollectionChangedEventArgs e) => EnqueueAction(() => - { - lock (syncRoot) - { - switch (e.Action) - { - case NotifyCollectionChangedAction.Add: - { - Debug.Assert(e.NewItems != null); - int startIndex = Math.Max(0, e.NewStartingIndex); - audioFilters.InsertRange(startIndex, e.NewItems.OfType().Select(eff => new EffectBox(eff))); - break; - } - - case NotifyCollectionChangedAction.Move: - { - EffectBox effect = audioFilters[e.OldStartingIndex]; - audioFilters.RemoveAt(e.OldStartingIndex); - audioFilters.Insert(e.NewStartingIndex, effect); - break; - } - - case NotifyCollectionChangedAction.Remove: - { - Debug.Assert(e.OldItems != null); - - audioFilters.RemoveRange(e.OldStartingIndex, e.OldItems.Count); - break; - } - - case NotifyCollectionChangedAction.Replace: - { - Debug.Assert(e.NewItems != null); - - EffectBox newFilter = new EffectBox((IEffectParameter)e.NewItems[0].AsNonNull()); - audioFilters[e.NewStartingIndex] = newFilter; - break; - } - - case NotifyCollectionChangedAction.Reset: - { - audioFilters.Clear(); - break; - } - } - } - }); - internal class EffectBox { public readonly BiQuadFilter? BiQuadFilter; + public readonly IEffectParameter EffectParameter; public EffectBox(IEffectParameter param) { // allowing non-bqf to keep index of list if (param is BQFParameters bqfp) BiQuadFilter = getFilter(SDL3AudioManager.AUDIO_FREQ, bqfp); + + EffectParameter = param; } } @@ -286,5 +235,44 @@ public void StreamFree(IAudioChannel channel) { Remove(channel, false); } + + private readonly SortedDictionary activeEffects = new SortedDictionary(); + + public override void AddEffect(IEffectParameter effect, int priority = 0) => EnqueueAction(() => + { + lock (syncRoot) + { + if (activeEffects.ContainsKey(priority)) + return; + + activeEffects[priority] = new EffectBox(effect); + } + }); + + public override void RemoveEffect(IEffectParameter effect) => EnqueueAction(() => + { + lock (syncRoot) + { + bool found = false; + + do + { + foreach (KeyValuePair pair in activeEffects) + { + if (pair.Value.EffectParameter == effect) + { + activeEffects.Remove(pair.Key); // cannot move forward because we removed it! + found = true; + break; + } + } + } + while (found); + } + }); + + public override void UpdateEffect(IEffectParameter effect) => EnqueueAction(() => + { + }); } } From 21a01eca09280b9addcc2200cab5445a3adf1e34 Mon Sep 17 00:00:00 2001 From: hwsmm Date: Wed, 19 Jun 2024 00:16:56 +0900 Subject: [PATCH 085/127] Cleanup and adapt to master --- .../Audio/Mixing/SDL3/SDL3AudioMixer.cs | 63 +++++++------------ osu.Framework/Game.cs | 1 + 2 files changed, 25 insertions(+), 39 deletions(-) diff --git a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs index 6a42109d12..ceb66e07fa 100644 --- a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs +++ b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs @@ -1,14 +1,10 @@ // Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. // See the LICENCE file in the repository root for full licence text. -using System; using System.Collections.Generic; -using System.Collections.Specialized; -using System.Diagnostics; using System.Linq; using ManagedBass; using ManagedBass.Fx; -using osu.Framework.Extensions.ObjectExtensions; using osu.Framework.Statistics; using NAudio.Dsp; @@ -94,16 +90,12 @@ public void MixChannelsInto(float[] data, int sampleCount, ref int filledSamples lock (syncRoot) { if (ret == null || sampleCount != ret.Length) - { ret = new float[sampleCount]; - } bool useFilters = activeEffects.Count > 0; if (useFilters && (filterArray == null || filterArray.Length != sampleCount)) - { filterArray = new float[sampleCount]; - } int filterArrayFilled = 0; @@ -127,13 +119,9 @@ public void MixChannelsInto(float[] data, int sampleCount, ref int filledSamples var (left, right) = channel.Volume; if (!useFilters) - { mixAudio(data, ret, ref filledSamples, size, left, right); - } else - { mixAudio(filterArray!, ret, ref filterArrayFilled, size, left, right); - } } } @@ -149,9 +137,7 @@ public void MixChannelsInto(float[] data, int sampleCount, ref int filledSamples foreach (var filter in activeEffects.Values) { if (filter.BiQuadFilter != null) - { filterArray![i] = filter.BiQuadFilter.Transform(filterArray[i]); - } } } @@ -162,16 +148,18 @@ public void MixChannelsInto(float[] data, int sampleCount, ref int filledSamples internal class EffectBox { - public readonly BiQuadFilter? BiQuadFilter; - public readonly IEffectParameter EffectParameter; + public BiQuadFilter? BiQuadFilter; public EffectBox(IEffectParameter param) + { + Update(param); + } + + public void Update(IEffectParameter param) { // allowing non-bqf to keep index of list if (param is BQFParameters bqfp) BiQuadFilter = getFilter(SDL3AudioManager.AUDIO_FREQ, bqfp); - - EffectParameter = param; } } @@ -236,43 +224,40 @@ public void StreamFree(IAudioChannel channel) Remove(channel, false); } + // Would like something like BiMap in Java, but I cannot write the whole collection here. private readonly SortedDictionary activeEffects = new SortedDictionary(); + private readonly Dictionary parameterDict = new Dictionary(); + // This would overwrite a filter with same priority, does it need to get fixed? public override void AddEffect(IEffectParameter effect, int priority = 0) => EnqueueAction(() => { - lock (syncRoot) - { - if (activeEffects.ContainsKey(priority)) - return; + if (parameterDict.ContainsKey(effect)) + return; + lock (syncRoot) activeEffects[priority] = new EffectBox(effect); - } + + parameterDict[effect] = priority; }); public override void RemoveEffect(IEffectParameter effect) => EnqueueAction(() => { + if (!parameterDict.TryGetValue(effect, out int index)) + return; + lock (syncRoot) - { - bool found = false; + activeEffects.Remove(index); - do - { - foreach (KeyValuePair pair in activeEffects) - { - if (pair.Value.EffectParameter == effect) - { - activeEffects.Remove(pair.Key); // cannot move forward because we removed it! - found = true; - break; - } - } - } - while (found); - } + parameterDict.Remove(effect); }); public override void UpdateEffect(IEffectParameter effect) => EnqueueAction(() => { + if (!parameterDict.TryGetValue(effect, out int index)) + return; + + lock (syncRoot) + activeEffects[index].Update(effect); }); } } diff --git a/osu.Framework/Game.cs b/osu.Framework/Game.cs index 47ed832048..36b9c0b798 100644 --- a/osu.Framework/Game.cs +++ b/osu.Framework/Game.cs @@ -24,6 +24,7 @@ using osu.Framework.IO.Stores; using osu.Framework.Localisation; using osu.Framework.Platform; +using osu.Framework.Platform.SDL3; using osuTK; namespace osu.Framework From ca971bb14b31a58d140c012f7195ef1c9bce3ebc Mon Sep 17 00:00:00 2001 From: hwsmm Date: Fri, 21 Jun 2024 23:10:46 +0900 Subject: [PATCH 086/127] Cleanup BiQuad filter application --- .../Audio/Mixing/SDL3/SDL3AudioMixer.cs | 96 ++++++++----------- 1 file changed, 41 insertions(+), 55 deletions(-) diff --git a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs index ceb66e07fa..3990f6e6fe 100644 --- a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs +++ b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs @@ -132,13 +132,10 @@ public void MixChannelsInto(float[] data, int sampleCount, ref int filledSamples if (useFilters) { - for (int i = 0; i < filterArrayFilled; i++) + foreach (var filter in activeEffects.Values) { - foreach (var filter in activeEffects.Values) - { - if (filter.BiQuadFilter != null) - filterArray![i] = filter.BiQuadFilter.Transform(filterArray[i]); - } + for (int i = 0; i < filterArrayFilled; i++) + filterArray![i] = filter.Transform(filterArray[i]); } mixAudio(data, filterArray!, ref filledSamples, filterArrayFilled, 1, 1); @@ -146,68 +143,53 @@ public void MixChannelsInto(float[] data, int sampleCount, ref int filledSamples } } - internal class EffectBox - { - public BiQuadFilter? BiQuadFilter; - - public EffectBox(IEffectParameter param) - { - Update(param); - } - - public void Update(IEffectParameter param) - { - // allowing non-bqf to keep index of list - if (param is BQFParameters bqfp) - BiQuadFilter = getFilter(SDL3AudioManager.AUDIO_FREQ, bqfp); - } - } - - private static BiQuadFilter getFilter(float freq, BQFParameters bqfp) + private static BiQuadFilter updateFilter(BiQuadFilter? filter, float freq, BQFParameters bqfp) { - BiQuadFilter filter; - switch (bqfp.lFilter) { case BQFType.LowPass: - filter = BiQuadFilter.LowPassFilter(freq, bqfp.fCenter, bqfp.fQ); - break; + if (filter == null) + return BiQuadFilter.LowPassFilter(freq, bqfp.fCenter, bqfp.fQ); + else + filter.SetLowPassFilter(freq, bqfp.fCenter, bqfp.fQ); + + return filter; case BQFType.HighPass: - filter = BiQuadFilter.HighPassFilter(freq, bqfp.fCenter, bqfp.fQ); - break; + if (filter == null) + return BiQuadFilter.HighPassFilter(freq, bqfp.fCenter, bqfp.fQ); + else + filter.SetHighPassFilter(freq, bqfp.fCenter, bqfp.fQ); + + return filter; + + case BQFType.PeakingEQ: + if (filter == null) + return BiQuadFilter.PeakingEQ(freq, bqfp.fCenter, bqfp.fQ, bqfp.fGain); + else + filter.SetPeakingEq(freq, bqfp.fCenter, bqfp.fQ, bqfp.fGain); + + return filter; case BQFType.BandPass: - filter = BiQuadFilter.BandPassFilterConstantPeakGain(freq, bqfp.fCenter, bqfp.fQ); - break; + return BiQuadFilter.BandPassFilterConstantPeakGain(freq, bqfp.fCenter, bqfp.fQ); case BQFType.BandPassQ: - filter = BiQuadFilter.BandPassFilterConstantSkirtGain(freq, bqfp.fCenter, bqfp.fQ); - break; + return BiQuadFilter.BandPassFilterConstantSkirtGain(freq, bqfp.fCenter, bqfp.fQ); case BQFType.Notch: - filter = BiQuadFilter.NotchFilter(freq, bqfp.fCenter, bqfp.fQ); - break; - - case BQFType.PeakingEQ: - filter = BiQuadFilter.PeakingEQ(freq, bqfp.fCenter, bqfp.fQ, bqfp.fGain); - break; + return BiQuadFilter.NotchFilter(freq, bqfp.fCenter, bqfp.fQ); case BQFType.LowShelf: - filter = BiQuadFilter.LowShelf(freq, bqfp.fCenter, bqfp.fS, bqfp.fGain); - break; + return BiQuadFilter.LowShelf(freq, bqfp.fCenter, bqfp.fS, bqfp.fGain); case BQFType.HighShelf: - filter = BiQuadFilter.HighShelf(freq, bqfp.fCenter, bqfp.fS, bqfp.fGain); - break; + return BiQuadFilter.HighShelf(freq, bqfp.fCenter, bqfp.fS, bqfp.fGain); case BQFType.AllPass: - default: // NAudio BiQuadFilter covers all, this default is kind of meaningless - filter = BiQuadFilter.AllPassFilter(freq, bqfp.fCenter, bqfp.fQ); - break; + default: + return BiQuadFilter.AllPassFilter(freq, bqfp.fCenter, bqfp.fQ); } - - return filter; } protected override void Dispose(bool disposing) @@ -225,17 +207,21 @@ public void StreamFree(IAudioChannel channel) } // Would like something like BiMap in Java, but I cannot write the whole collection here. - private readonly SortedDictionary activeEffects = new SortedDictionary(); + private readonly SortedDictionary activeEffects = new SortedDictionary(); private readonly Dictionary parameterDict = new Dictionary(); - // This would overwrite a filter with same priority, does it need to get fixed? public override void AddEffect(IEffectParameter effect, int priority = 0) => EnqueueAction(() => { - if (parameterDict.ContainsKey(effect)) + if (parameterDict.ContainsKey(effect) || effect is not BQFParameters bqfp) return; + while (activeEffects.ContainsKey(priority)) + priority++; + + BiQuadFilter filter = updateFilter(null, SDL3AudioManager.AUDIO_FREQ, bqfp); + lock (syncRoot) - activeEffects[priority] = new EffectBox(effect); + activeEffects[priority] = filter; parameterDict[effect] = priority; }); @@ -253,11 +239,11 @@ public override void RemoveEffect(IEffectParameter effect) => EnqueueAction(() = public override void UpdateEffect(IEffectParameter effect) => EnqueueAction(() => { - if (!parameterDict.TryGetValue(effect, out int index)) + if (!parameterDict.TryGetValue(effect, out int index) || effect is not BQFParameters bqfp) return; lock (syncRoot) - activeEffects[index].Update(effect); + activeEffects[index] = updateFilter(activeEffects[index], SDL3AudioManager.AUDIO_FREQ, bqfp); }); } } From 60437dda208f6a16182b01398a0fea55bc5e32fa Mon Sep 17 00:00:00 2001 From: hwsmm Date: Sat, 22 Jun 2024 14:03:15 +0900 Subject: [PATCH 087/127] Clamp volume --- .../Audio/Mixing/SDL3/SDL3AudioMixer.cs | 26 +++++++++++++++---- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs index 3990f6e6fe..edf6fa6d18 100644 --- a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs +++ b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs @@ -61,13 +61,29 @@ private void mixAudio(float[] dst, float[] src, ref int filled, int samples, flo if (left <= 0 && right <= 0) return; - int i = 0; + for (int e = 0; e < samples; e += 2) + { + if (e < filled) + { + dst[e] += src[e] * left; + dst[e + 1] += src[e + 1] * right; + } + else + { + dst[e] = src[e] * left; + dst[e + 1] = src[e + 1] * right; + } - for (; i < filled; i++) - dst[i] += src[i] * ((i % 2) == 0 ? left : right); + if (dst[e] < -1f) + dst[e] = -1f; + else if (dst[e] > 1f) + dst[e] = 1f; - for (; i < samples; i++) - dst[i] = src[i] * ((i % 2) == 0 ? left : right); + if (dst[e + 1] < -1f) + dst[e + 1] = -1f; + else if (dst[e + 1] > 1f) + dst[e + 1] = 1f; + } if (samples > filled) filled = samples; From f82d46c49d07896a8cd435d25839d42b2519b34b Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Sat, 20 Jul 2024 17:58:56 +0900 Subject: [PATCH 088/127] Apply SDL3 audio upstream changes --- osu.Framework/Audio/BassAudioDecoder.cs | 4 ++-- osu.Framework/Audio/SDL3AudioManager.cs | 8 ++++---- osu.Framework/Platform/SDL3/SDL3Window.cs | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/osu.Framework/Audio/BassAudioDecoder.cs b/osu.Framework/Audio/BassAudioDecoder.cs index 7999d79ea8..e89cfb8996 100644 --- a/osu.Framework/Audio/BassAudioDecoder.cs +++ b/osu.Framework/Audio/BassAudioDecoder.cs @@ -30,9 +30,9 @@ private Resolution resolution { get { - if ((int)Format == SDL3.SDL_AUDIO_S8) + if (Format == SDL_AudioFormat.SDL_AUDIO_S8) return Resolution.Byte; - else if (Format == SDL3.SDL_AUDIO_S16) + else if (Format == SDL3.SDL_AUDIO_S16) // uses constant due to endian return Resolution.Short; else return Resolution.Float; diff --git a/osu.Framework/Audio/SDL3AudioManager.cs b/osu.Framework/Audio/SDL3AudioManager.cs index 7f2f442e92..2498389bd3 100644 --- a/osu.Framework/Audio/SDL3AudioManager.cs +++ b/osu.Framework/Audio/SDL3AudioManager.cs @@ -204,7 +204,7 @@ internal void OnLostDeviceEvent(SDL_AudioDeviceID removedDeviceId) private void syncAudioDevices() { int count = 0; - SDL_AudioDeviceID* idArrayPtr = SDL3.SDL_GetAudioOutputDevices(&count); + SDL_AudioDeviceID* idArrayPtr = SDL3.SDL_GetAudioPlaybackDevices(&count); var idArray = ImmutableArray.CreateBuilder(count); var nameArray = ImmutableArray.CreateBuilder(count); @@ -250,10 +250,10 @@ private bool setAudioDevice(SDL_AudioDeviceID targetId) if (deviceStream == null) { - if (targetId == SDL3.SDL_AUDIO_DEVICE_DEFAULT_OUTPUT) + if (targetId == SDL3.SDL_AUDIO_DEVICE_DEFAULT_PLAYBACK) return false; - return setAudioDevice(SDL3.SDL_AUDIO_DEVICE_DEFAULT_OUTPUT); + return setAudioDevice(SDL3.SDL_AUDIO_DEVICE_DEFAULT_PLAYBACK); } SDL3.SDL_ResumeAudioDevice(deviceId); @@ -277,7 +277,7 @@ protected override bool SetAudioDevice(string deviceName = null) if (deviceIndex >= 0) return setAudioDevice(deviceIdArray[deviceIndex]); - return setAudioDevice(SDL3.SDL_AUDIO_DEVICE_DEFAULT_OUTPUT); + return setAudioDevice(SDL3.SDL_AUDIO_DEVICE_DEFAULT_PLAYBACK); } protected override bool SetAudioDevice(int deviceIndex) diff --git a/osu.Framework/Platform/SDL3/SDL3Window.cs b/osu.Framework/Platform/SDL3/SDL3Window.cs index 2aa3047de2..a9b28768f5 100644 --- a/osu.Framework/Platform/SDL3/SDL3Window.cs +++ b/osu.Framework/Platform/SDL3/SDL3Window.cs @@ -582,7 +582,7 @@ protected virtual void HandleEvent(SDL_Event e) private void handleAudioDeviceEvent(SDL_AudioDeviceEvent evtAudioDevice) { - if (evtAudioDevice.iscapture != 0) // capture device + if (evtAudioDevice.recording != 0) // recording device return; switch (evtAudioDevice.type) From 76e9fab172c48a2eaba44406e537b0d69ac834b2 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Sat, 20 Jul 2024 18:11:34 +0900 Subject: [PATCH 089/127] Clamp later in SDL3 mixer --- .../Audio/Mixing/SDL3/SDL3AudioMixer.cs | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs index edf6fa6d18..a5da4d17c8 100644 --- a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs +++ b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs @@ -73,16 +73,6 @@ private void mixAudio(float[] dst, float[] src, ref int filled, int samples, flo dst[e] = src[e] * left; dst[e + 1] = src[e + 1] * right; } - - if (dst[e] < -1f) - dst[e] = -1f; - else if (dst[e] > 1f) - dst[e] = 1f; - - if (dst[e + 1] < -1f) - dst[e + 1] = -1f; - else if (dst[e + 1] > 1f) - dst[e + 1] = 1f; } if (samples > filled) @@ -157,6 +147,14 @@ public void MixChannelsInto(float[] data, int sampleCount, ref int filledSamples mixAudio(data, filterArray!, ref filledSamples, filterArrayFilled, 1, 1); } } + + for (int i = 0; i < filledSamples; i++) + { + if (data[i] > 1.0f) + data[i] = 1.0f; + else if (data[i] < -1.0f) + data[i] = -1.0f; + } } private static BiQuadFilter updateFilter(BiQuadFilter? filter, float freq, BQFParameters bqfp) From 8ed0a59efa72ef0eb539b20381738b5ca7062981 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Tue, 6 Aug 2024 14:47:52 +0900 Subject: [PATCH 090/127] Merge decoders into one file and use decoder thread for samples --- osu.Framework/Audio/AudioDecoderManager.cs | 260 ---------- osu.Framework/Audio/BassAudioDecoder.cs | 162 ------ osu.Framework/Audio/FFmpegAudioDecoder.cs | 55 --- .../Audio/SDL3AudioDecoderManager.cs | 462 ++++++++++++++++++ osu.Framework/Audio/SDL3AudioManager.cs | 7 +- .../Audio/Sample/SampleBassFactory.cs | 6 +- osu.Framework/Audio/Sample/SampleFactory.cs | 11 - .../Audio/Sample/SampleSDL3Factory.cs | 50 +- osu.Framework/Audio/Track/TrackSDL3.cs | 12 +- osu.Framework/Audio/Track/Waveform.cs | 2 +- 10 files changed, 500 insertions(+), 527 deletions(-) delete mode 100644 osu.Framework/Audio/AudioDecoderManager.cs delete mode 100644 osu.Framework/Audio/BassAudioDecoder.cs delete mode 100644 osu.Framework/Audio/FFmpegAudioDecoder.cs create mode 100644 osu.Framework/Audio/SDL3AudioDecoderManager.cs diff --git a/osu.Framework/Audio/AudioDecoderManager.cs b/osu.Framework/Audio/AudioDecoderManager.cs deleted file mode 100644 index 2a2f13cf68..0000000000 --- a/osu.Framework/Audio/AudioDecoderManager.cs +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. -// See the LICENCE file in the repository root for full licence text. - -using System; -using System.IO; -using System.Threading; -using osu.Framework.Logging; -using System.Collections.Generic; -using SDL; - -namespace osu.Framework.Audio -{ - /// - /// Decodes audio from , and convert it to appropriate format. - /// - public class AudioDecoderManager : IDisposable - { - public abstract class AudioDecoder - { - protected readonly int Rate; - protected readonly int Channels; - protected readonly bool IsTrack; - protected readonly SDL_AudioFormat Format; - protected readonly Stream Stream; - protected readonly bool AutoDisposeStream; - protected readonly PassDataDelegate? Pass; - - private volatile int bitrate; - - public int Bitrate - { - get => bitrate; - set => Interlocked.Exchange(ref bitrate, value); - } - - private double length; - - public double Length - { - get => length; - set => Interlocked.Exchange(ref length, value); - } - - private long bytelength; - - public long ByteLength - { - get => Interlocked.Read(ref bytelength); - set => Interlocked.Exchange(ref bytelength, value); - } - - internal volatile bool StopJob; - internal volatile bool Loading; - - protected AudioDecoder(int rate, int channels, bool isTrack, SDL_AudioFormat format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) - { - Rate = rate; - Channels = channels; - IsTrack = isTrack; - Format = format; - Stream = stream; - AutoDisposeStream = autoDisposeStream; - Pass = pass; - } - - public void Stop() - { - StopJob = true; - } - - // Not using IDisposable since things must be handled in a specific thread - internal virtual void Free() - { - if (AutoDisposeStream) - Stream.Dispose(); - } - - protected abstract int LoadFromStreamInternal(out byte[] decoded); - - /// - /// Decodes and resamples audio from job.Stream, and pass it to decoded. - /// - /// Decoded audio - public int LoadFromStream(out byte[] decoded) - { - int read = 0; - - try - { - read = LoadFromStreamInternal(out decoded); - } - catch (Exception e) - { - Logger.Log(e.Message, level: LogLevel.Important); - Loading = false; - decoded = Array.Empty(); - } - finally - { - if (!Loading) - Free(); - } - - Pass?.Invoke(decoded, read, this, !Loading); - return read; - } - } - - private readonly LinkedList jobs = new LinkedList(); - - public delegate void PassDataDelegate(byte[] data, int length, AudioDecoder decoderData, bool done); - - private readonly Thread decoderThread; - private readonly AutoResetEvent decoderWaitHandle; - private readonly CancellationTokenSource tokenSource; - - internal static AudioDecoder CreateDecoder(int rate, int channels, bool isTrack, SDL_AudioFormat format, Stream stream, - bool autoDisposeStream = true, PassDataDelegate? pass = null) - { - AudioDecoder decoder = ManagedBass.Bass.CurrentDevice >= 0 - ? new BassAudioDecoder(rate, channels, isTrack, format, stream, autoDisposeStream, pass) - : new FFmpegAudioDecoder(rate, channels, isTrack, format, stream, autoDisposeStream, pass); - - return decoder; - } - - public AudioDecoderManager() - { - tokenSource = new CancellationTokenSource(); - decoderWaitHandle = new AutoResetEvent(false); - - decoderThread = new Thread(() => loop(tokenSource.Token)) - { - IsBackground = true - }; - - decoderThread.Start(); - } - - public AudioDecoder StartDecodingAsync(int rate, int channels, SDL_AudioFormat format, Stream stream, PassDataDelegate pass) - { - AudioDecoder decoder = CreateDecoder(rate, channels, true, format, stream, true, pass); - - lock (jobs) - jobs.AddFirst(decoder); - - decoderWaitHandle.Set(); - - return decoder; - } - - public static byte[] DecodeAudio(int freq, int channels, SDL_AudioFormat format, Stream stream, out int size) - { - AudioDecoder decoder = CreateDecoder(freq, channels, false, format, stream); - - int read = decoder.LoadFromStream(out byte[] decoded); - - if (!decoder.Loading) - { - size = read; - return decoded; - } - - // fallback if it couldn't decode at once - using (MemoryStream memoryStream = new MemoryStream()) - { - memoryStream.Write(decoded, 0, read); - - while (decoder.Loading) - { - read = decoder.LoadFromStream(out decoded); - memoryStream.Write(decoded, 0, read); - } - - size = (int)memoryStream.Length; - return memoryStream.ToArray(); - } - } - - private void loop(CancellationToken token) - { - while (!token.IsCancellationRequested) - { - int jobCount; - - lock (jobs) - { - jobCount = jobs.Count; - - if (jobCount > 0) - { - var node = jobs.First; - - while (node != null) - { - var next = node.Next; - AudioDecoder decoder = node.Value; - - if (decoder.StopJob) - { - decoder.Free(); - jobs.Remove(node); - } - else - { - decoder.LoadFromStream(out _); - } - - if (!decoder.Loading) - jobs.Remove(node); - - node = next; - } - } - } - - if (jobCount <= 0) - decoderWaitHandle?.WaitOne(); - } - } - - private bool disposedValue; - - protected virtual void Dispose(bool disposing) - { - if (!disposedValue) - { - tokenSource.Cancel(); - decoderWaitHandle.Set(); - - tokenSource.Dispose(); - decoderThread.Join(); - decoderWaitHandle.Dispose(); - - lock (jobs) - { - foreach (var job in jobs) - { - job.Free(); - } - - jobs.Clear(); - } - - disposedValue = true; - } - } - - ~AudioDecoderManager() - { - Dispose(false); - } - - public void Dispose() - { - Dispose(true); - GC.SuppressFinalize(this); - } - } -} diff --git a/osu.Framework/Audio/BassAudioDecoder.cs b/osu.Framework/Audio/BassAudioDecoder.cs deleted file mode 100644 index e89cfb8996..0000000000 --- a/osu.Framework/Audio/BassAudioDecoder.cs +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. -// See the LICENCE file in the repository root for full licence text. - -using System; -using System.IO; -using ManagedBass; -using ManagedBass.Mix; -using osu.Framework.Audio.Callbacks; -using SDL; -using static osu.Framework.Audio.AudioDecoderManager; - -namespace osu.Framework.Audio -{ - /// - /// This is only for using BASS as a decoder for SDL3 backend! - /// - internal class BassAudioDecoder : AudioDecoder - { - private int decodeStream; - private FileCallbacks? fileCallbacks; - - private int syncHandle; - private SyncCallback? syncCallback; - - private int resampler; - - private byte[]? decodeData; - - private Resolution resolution - { - get - { - if (Format == SDL_AudioFormat.SDL_AUDIO_S8) - return Resolution.Byte; - else if (Format == SDL3.SDL_AUDIO_S16) // uses constant due to endian - return Resolution.Short; - else - return Resolution.Float; - } - } - - private ushort bits => (ushort)SDL3.SDL_AUDIO_BITSIZE(Format); - - public BassAudioDecoder(int rate, int channels, bool isTrack, SDL_AudioFormat format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) - : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass) - { - } - - internal override void Free() - { - if (syncHandle != 0) - { - Bass.ChannelRemoveSync(resampler == 0 ? decodeStream : resampler, syncHandle); - syncHandle = 0; - } - - fileCallbacks?.Dispose(); - syncCallback?.Dispose(); - - fileCallbacks = null; - syncCallback = null; - - decodeData = null; - - if (resampler != 0) - { - Bass.StreamFree(resampler); - resampler = 0; - } - - if (decodeStream != 0) - { - Bass.StreamFree(decodeStream); - decodeStream = 0; - } - - base.Free(); - } - - private static readonly object bass_sync_lock = new object(); - - protected override int LoadFromStreamInternal(out byte[] decoded) - { - if (Bass.CurrentDevice < 0) - throw new InvalidOperationException("Initialize a BASS device to decode audio"); - - lock (bass_sync_lock) - { - if (!Loading) - { - fileCallbacks = new FileCallbacks(new DataStreamFileProcedures(Stream)); - syncCallback = new SyncCallback((_, _, _, _) => - { - Loading = false; - }); - - BassFlags bassFlags = BassFlags.Decode | resolution.ToBassFlag(); - if (IsTrack) bassFlags |= BassFlags.Prescan; - - decodeStream = Bass.CreateStream(StreamSystem.NoBuffer, bassFlags, fileCallbacks.Callbacks); - - if (decodeStream == 0) - throw new FormatException($"Couldn't create stream: {Bass.LastError}"); - - if (Bass.ChannelGetInfo(decodeStream, out var info)) - { - ByteLength = Bass.ChannelGetLength(decodeStream); - Length = Bass.ChannelBytes2Seconds(decodeStream, ByteLength) * 1000.0d; - Bitrate = (int)Math.Round(Bass.ChannelGetAttribute(decodeStream, ChannelAttribute.Bitrate)); - - if (info.Channels != Channels || info.Frequency != Rate) - { - resampler = BassMix.CreateMixerStream(Rate, Channels, BassFlags.MixerEnd | BassFlags.Decode | resolution.ToBassFlag()); - - if (resampler == 0) - throw new FormatException($"Failed to create BASS Mixer: {Bass.LastError}"); - - if (!BassMix.MixerAddChannel(resampler, decodeStream, BassFlags.MixerChanNoRampin | BassFlags.MixerChanLimit)) - throw new FormatException($"Failed to add a channel to BASS Mixer: {Bass.LastError}"); - - ByteLength /= info.Channels * (bits / 8); - ByteLength = (long)Math.Ceiling((decimal)ByteLength / info.Frequency * Rate); - ByteLength *= Channels * (bits / 8); - } - } - else - { - if (IsTrack) - throw new FormatException($"Couldn't get channel info: {Bass.LastError}"); - } - - syncHandle = Bass.ChannelSetSync(resampler == 0 ? decodeStream : resampler, SyncFlags.End | SyncFlags.Onetime, 0, syncCallback.Callback, syncCallback.Handle); - - Loading = true; - } - - int handle = resampler == 0 ? decodeStream : resampler; - - int bufferLen = (int)Bass.ChannelSeconds2Bytes(handle, 1); - - if (bufferLen <= 0) - bufferLen = 44100 * 2 * 4 * 1; - - if (decodeData == null || decodeData.Length < bufferLen) - decodeData = new byte[bufferLen]; - - int got = Bass.ChannelGetData(handle, decodeData, bufferLen); - - if (got == -1) - { - Loading = false; - - if (Bass.LastError != Errors.Ended) - throw new FormatException($"Couldn't decode: {Bass.LastError}"); - } - - decoded = decodeData; - return Math.Max(0, got); - } - } - } -} diff --git a/osu.Framework/Audio/FFmpegAudioDecoder.cs b/osu.Framework/Audio/FFmpegAudioDecoder.cs deleted file mode 100644 index 3d8e24aa17..0000000000 --- a/osu.Framework/Audio/FFmpegAudioDecoder.cs +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. -// See the LICENCE file in the repository root for full licence text. - -using System; -using System.IO; -using osu.Framework.Graphics.Video; -using SDL; -using static osu.Framework.Audio.AudioDecoderManager; - -namespace osu.Framework.Audio -{ - internal class FFmpegAudioDecoder : AudioDecoder - { - private VideoDecoder? ffmpeg; - private byte[]? decodeData; - - public FFmpegAudioDecoder(int rate, int channels, bool isTrack, SDL_AudioFormat format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) - : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass) - { - } - - internal override void Free() - { - decodeData = null; - - ffmpeg?.Dispose(); - base.Free(); - } - - protected override int LoadFromStreamInternal(out byte[] decoded) - { - if (ffmpeg == null) - { - ffmpeg = new VideoDecoder(Stream, Rate, Channels, SDL3.SDL_AUDIO_ISFLOAT(Format), SDL3.SDL_AUDIO_BITSIZE(Format), SDL3.SDL_AUDIO_ISSIGNED(Format)); - - ffmpeg.PrepareDecoding(); - ffmpeg.RecreateCodecContext(); - - Bitrate = (int)ffmpeg.Bitrate; - Length = ffmpeg.Duration; - ByteLength = (long)Math.Ceiling(ffmpeg.Duration / 1000.0d * Rate) * Channels * (SDL3.SDL_AUDIO_BITSIZE(Format) / 8); // FIXME - - Loading = true; - } - - int got = ffmpeg.DecodeNextAudioFrame(32, ref decodeData, !IsTrack); - - if (ffmpeg.State != VideoDecoder.DecoderState.Running) - Loading = false; - - decoded = decodeData; - return got; - } - } -} diff --git a/osu.Framework/Audio/SDL3AudioDecoderManager.cs b/osu.Framework/Audio/SDL3AudioDecoderManager.cs new file mode 100644 index 0000000000..3961eae444 --- /dev/null +++ b/osu.Framework/Audio/SDL3AudioDecoderManager.cs @@ -0,0 +1,462 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using System; +using System.IO; +using System.Threading; +using osu.Framework.Logging; +using System.Collections.Generic; +using SDL; +using ManagedBass.Mix; +using ManagedBass; +using osu.Framework.Audio.Callbacks; +using osu.Framework.Graphics.Video; + +namespace osu.Framework.Audio +{ + /// + /// Decodes audio from , and convert it to appropriate format. + /// It needs a lot of polishing... + /// + public class SDL3AudioDecoderManager : IDisposable + { + private readonly LinkedList jobs = new LinkedList(); + + public delegate void PassDataDelegate(byte[] data, int length, AudioDecoder decoderData, bool done); + + private readonly Thread decoderThread; + private readonly AutoResetEvent decoderWaitHandle; + private readonly CancellationTokenSource tokenSource; + + internal static AudioDecoder CreateDecoder(int rate, int channels, bool isTrack, SDL_AudioFormat format, Stream stream, + bool autoDisposeStream = true, PassDataDelegate? pass = null) + { + AudioDecoder decoder = Bass.CurrentDevice >= 0 + ? new BassAudioDecoder(rate, channels, isTrack, format, stream, autoDisposeStream, pass) + : new FFmpegAudioDecoder(rate, channels, isTrack, format, stream, autoDisposeStream, pass); + + return decoder; + } + + public SDL3AudioDecoderManager() + { + tokenSource = new CancellationTokenSource(); + decoderWaitHandle = new AutoResetEvent(false); + + decoderThread = new Thread(() => loop(tokenSource.Token)) + { + IsBackground = true + }; + + decoderThread.Start(); + } + + public AudioDecoder StartDecodingAsync(int rate, int channels, SDL_AudioFormat format, Stream stream, PassDataDelegate pass, bool isTrack) + { + if (disposedValue) + throw new InvalidOperationException($"Cannot start decoding on disposed {nameof(SDL3AudioDecoderManager)}"); + + AudioDecoder decoder = CreateDecoder(rate, channels, isTrack, format, stream, true, pass); + + lock (jobs) + jobs.AddFirst(decoder); + + decoderWaitHandle.Set(); + + return decoder; + } + + private void loop(CancellationToken token) + { + while (!token.IsCancellationRequested) + { + int jobCount; + + lock (jobs) + { + jobCount = jobs.Count; + + if (jobCount > 0) + { + var node = jobs.First; + + while (node != null) + { + var next = node.Next; + AudioDecoder decoder = node.Value; + + if (decoder.StopJob) + { + decoder.Free(); + jobs.Remove(node); + } + else + { + int read = decodeAudio(decoder, out byte[] decoded); + decoder.Pass?.Invoke(decoded, read, decoder, !decoder.Loading); + } + + if (!decoder.Loading) + jobs.Remove(node); + + node = next; + } + } + } + + if (jobCount <= 0) + decoderWaitHandle.WaitOne(); + } + } + + private bool disposedValue; + + protected virtual void Dispose(bool disposing) + { + if (!disposedValue) + { + tokenSource.Cancel(); + decoderWaitHandle.Set(); + + decoderThread.Join(); + tokenSource.Dispose(); + decoderWaitHandle.Dispose(); + + lock (jobs) + { + foreach (var job in jobs) + { + job.Free(); + } + + jobs.Clear(); + } + + disposedValue = true; + } + } + + ~SDL3AudioDecoderManager() + { + Dispose(false); + } + + public void Dispose() + { + Dispose(true); + GC.SuppressFinalize(this); + } + + private static int decodeAudio(AudioDecoder decoder, out byte[] decoded) + { + int read = decoder.LoadFromStream(out byte[] temp); + + if (!decoder.Loading || decoder.IsTrack) + { + decoded = temp; + return read; + } + + // fallback if it couldn't decode at once + using (MemoryStream memoryStream = new MemoryStream()) + { + memoryStream.Write(temp, 0, read); + + while (decoder.Loading) + { + read = decoder.LoadFromStream(out temp); + memoryStream.Write(temp, 0, read); + } + + decoded = memoryStream.ToArray(); + return (int)memoryStream.Length; + } + } + + public abstract class AudioDecoder + { + internal readonly int Rate; + internal readonly int Channels; + internal readonly bool IsTrack; + internal readonly SDL_AudioFormat Format; + internal readonly Stream Stream; + internal readonly bool AutoDisposeStream; + internal readonly PassDataDelegate? Pass; + + private int bitrate; + + public int Bitrate + { + get => bitrate; + set => Interlocked.Exchange(ref bitrate, value); + } + + private double length; + + public double Length + { + get => length; + set => Interlocked.Exchange(ref length, value); + } + + private long byteLength; + + public long ByteLength + { + get => byteLength; + set => Interlocked.Exchange(ref byteLength, value); + } + + internal volatile bool StopJob; + + private volatile bool loading; + public bool Loading { get => loading; protected set => loading = value; } + + protected AudioDecoder(int rate, int channels, bool isTrack, SDL_AudioFormat format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) + { + Rate = rate; + Channels = channels; + IsTrack = isTrack; + Format = format; + Stream = stream; + AutoDisposeStream = autoDisposeStream; + Pass = pass; + } + + public void Stop() + { + StopJob = true; + } + + // Not using IDisposable since things must be handled in a decoder thread + internal virtual void Free() + { + if (AutoDisposeStream) + Stream.Dispose(); + } + + protected abstract int LoadFromStreamInternal(out byte[] decoded); + + /// + /// Decodes and resamples audio from job.Stream, and pass it to decoded. + /// You may need to run this multiple times. + /// + /// Decoded audio + public int LoadFromStream(out byte[] decoded) + { + int read = 0; + + try + { + read = LoadFromStreamInternal(out decoded); + } + catch (Exception e) + { + Logger.Log(e.Message, level: LogLevel.Important); + Loading = false; + decoded = Array.Empty(); + } + finally + { + if (!Loading) + Free(); + } + + return read; + } + } + + /// + /// This is only for using BASS as a decoder for SDL3 backend! + /// + internal class BassAudioDecoder : AudioDecoder + { + private int decodeStream; + private FileCallbacks? fileCallbacks; + + private int syncHandle; + private SyncCallback? syncCallback; + + private int resampler; + + private byte[]? decodeData; + + private Resolution resolution + { + get + { + if (Format == SDL_AudioFormat.SDL_AUDIO_S8) + return Resolution.Byte; + else if (Format == SDL3.SDL_AUDIO_S16) // uses constant due to endian + return Resolution.Short; + else + return Resolution.Float; + } + } + + private ushort bits => (ushort)SDL3.SDL_AUDIO_BITSIZE(Format); + + public BassAudioDecoder(int rate, int channels, bool isTrack, SDL_AudioFormat format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) + : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass) + { + } + + internal override void Free() + { + if (syncHandle != 0) + { + Bass.ChannelRemoveSync(resampler == 0 ? decodeStream : resampler, syncHandle); + syncHandle = 0; + } + + fileCallbacks?.Dispose(); + syncCallback?.Dispose(); + + fileCallbacks = null; + syncCallback = null; + + decodeData = null; + + if (resampler != 0) + { + Bass.StreamFree(resampler); + resampler = 0; + } + + if (decodeStream != 0) + { + Bass.StreamFree(decodeStream); + decodeStream = 0; + } + + base.Free(); + } + + private static readonly object bass_sync_lock = new object(); + + protected override int LoadFromStreamInternal(out byte[] decoded) + { + if (Bass.CurrentDevice < 0) + throw new InvalidOperationException("Initialize a BASS device to decode audio"); + + lock (bass_sync_lock) + { + if (!Loading) + { + fileCallbacks = new FileCallbacks(new DataStreamFileProcedures(Stream)); + syncCallback = new SyncCallback((_, _, _, _) => + { + Loading = false; + }); + + BassFlags bassFlags = BassFlags.Decode | resolution.ToBassFlag(); + if (IsTrack) bassFlags |= BassFlags.Prescan; + + decodeStream = Bass.CreateStream(StreamSystem.NoBuffer, bassFlags, fileCallbacks.Callbacks); + + if (decodeStream == 0) + throw new FormatException($"Couldn't create stream: {Bass.LastError}"); + + if (Bass.ChannelGetInfo(decodeStream, out var info)) + { + ByteLength = Bass.ChannelGetLength(decodeStream); + Length = Bass.ChannelBytes2Seconds(decodeStream, ByteLength) * 1000.0d; + Bitrate = (int)Math.Round(Bass.ChannelGetAttribute(decodeStream, ChannelAttribute.Bitrate)); + + if (info.Channels != Channels || info.Frequency != Rate) + { + resampler = BassMix.CreateMixerStream(Rate, Channels, BassFlags.MixerEnd | BassFlags.Decode | resolution.ToBassFlag()); + + if (resampler == 0) + throw new FormatException($"Failed to create BASS Mixer: {Bass.LastError}"); + + if (!BassMix.MixerAddChannel(resampler, decodeStream, BassFlags.MixerChanNoRampin | BassFlags.MixerChanLimit)) + throw new FormatException($"Failed to add a channel to BASS Mixer: {Bass.LastError}"); + + ByteLength /= info.Channels * (bits / 8); + ByteLength = (long)Math.Ceiling((decimal)ByteLength / info.Frequency * Rate); + ByteLength *= Channels * (bits / 8); + } + } + else + { + if (IsTrack) + throw new FormatException($"Couldn't get channel info: {Bass.LastError}"); + } + + syncHandle = Bass.ChannelSetSync(resampler == 0 ? decodeStream : resampler, SyncFlags.End | SyncFlags.Onetime, 0, syncCallback.Callback, syncCallback.Handle); + + Loading = true; + } + + int handle = resampler == 0 ? decodeStream : resampler; + + int bufferLen = (int)Bass.ChannelSeconds2Bytes(handle, 1); + + if (bufferLen <= 0) + bufferLen = 44100 * 2 * 4 * 1; + + if (decodeData == null || decodeData.Length < bufferLen) + decodeData = new byte[bufferLen]; + + int got = Bass.ChannelGetData(handle, decodeData, bufferLen); + + if (got == -1) + { + Loading = false; + + if (Bass.LastError != Errors.Ended) + throw new FormatException($"Couldn't decode: {Bass.LastError}"); + } + + decoded = decodeData; + return Math.Max(0, got); + } + } + } + + internal class FFmpegAudioDecoder : AudioDecoder + { + private VideoDecoder? ffmpeg; + private byte[]? decodeData; + + public FFmpegAudioDecoder(int rate, int channels, bool isTrack, SDL_AudioFormat format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) + : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass) + { + } + + internal override void Free() + { + decodeData = null; + + ffmpeg?.Dispose(); + base.Free(); + } + + protected override int LoadFromStreamInternal(out byte[] decoded) + { + if (ffmpeg == null) + { + ffmpeg = new VideoDecoder(Stream, Rate, Channels, SDL3.SDL_AUDIO_ISFLOAT(Format), SDL3.SDL_AUDIO_BITSIZE(Format), SDL3.SDL_AUDIO_ISSIGNED(Format)); + + ffmpeg.PrepareDecoding(); + ffmpeg.RecreateCodecContext(); + + Bitrate = (int)ffmpeg.Bitrate; + Length = ffmpeg.Duration; + ByteLength = (long)Math.Ceiling(ffmpeg.Duration / 1000.0d * Rate) * Channels * (SDL3.SDL_AUDIO_BITSIZE(Format) / 8); // FIXME + + Loading = true; + } + + int got = ffmpeg.DecodeNextAudioFrame(32, ref decodeData, !IsTrack); + + if (ffmpeg.State != VideoDecoder.DecoderState.Running) + Loading = false; + + decoded = decodeData; + return got; + } + } + } +} diff --git a/osu.Framework/Audio/SDL3AudioManager.cs b/osu.Framework/Audio/SDL3AudioManager.cs index 2498389bd3..36ba27f756 100644 --- a/osu.Framework/Audio/SDL3AudioManager.cs +++ b/osu.Framework/Audio/SDL3AudioManager.cs @@ -34,7 +34,7 @@ public unsafe class SDL3AudioManager : AudioManager private SDL_AudioSpec spec; private int bufferSize = (int)(AUDIO_FREQ * 0.01); // 10ms, will be calculated later when opening audio device, it works as a base value until then. - private static readonly AudioDecoderManager decoder = new AudioDecoderManager(); + internal static SDL3AudioDecoderManager DecoderManager { get; } = new SDL3AudioDecoderManager(); private readonly List sdlMixerList = new List(); @@ -292,8 +292,7 @@ protected override bool SetAudioDevice(int deviceIndex) internal override Track.Track GetNewTrack(Stream data, string name) { - TrackSDL3 track = new TrackSDL3(name, spec.freq, spec.channels, bufferSize); - EnqueueAction(() => decoder.StartDecodingAsync(spec.freq, spec.channels, spec.format, data, track.ReceiveAudioData)); + TrackSDL3 track = new TrackSDL3(name, data, spec, bufferSize); return track; } @@ -304,7 +303,7 @@ protected override void Dispose(bool disposing) { base.Dispose(disposing); - decoder?.Dispose(); + DecoderManager?.Dispose(); if (deviceStream != null) { diff --git a/osu.Framework/Audio/Sample/SampleBassFactory.cs b/osu.Framework/Audio/Sample/SampleBassFactory.cs index 5274c4ac36..12f0fc8531 100644 --- a/osu.Framework/Audio/Sample/SampleBassFactory.cs +++ b/osu.Framework/Audio/Sample/SampleBassFactory.cs @@ -31,6 +31,8 @@ public SampleBassFactory(byte[] data, string name, BassAudioMixer mixer, int pla { this.data = data; this.mixer = mixer; + + EnqueueAction(loadSample); } protected override void UpdatePlaybackConcurrency(ValueChangedEvent concurrency) @@ -51,10 +53,10 @@ internal override void UpdateDevice(int deviceIndex) { // The sample may not have already loaded if a device wasn't present in a previous load attempt. if (!IsLoaded) - LoadSample(); + loadSample(); } - protected override void LoadSample() + private void loadSample() { Debug.Assert(CanPerformInline); Debug.Assert(!IsLoaded); diff --git a/osu.Framework/Audio/Sample/SampleFactory.cs b/osu.Framework/Audio/Sample/SampleFactory.cs index bee223cb3e..a868e71df8 100644 --- a/osu.Framework/Audio/Sample/SampleFactory.cs +++ b/osu.Framework/Audio/Sample/SampleFactory.cs @@ -1,7 +1,6 @@ // Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. // See the LICENCE file in the repository root for full licence text. -using System.Threading.Tasks; using osu.Framework.Bindables; namespace osu.Framework.Audio.Sample @@ -23,26 +22,16 @@ internal abstract class SampleFactory : AudioCollectionManager internal readonly Bindable PlaybackConcurrency = new Bindable(Sample.DEFAULT_CONCURRENCY); - protected Task? LoadSampleTask; - protected SampleFactory(string name, int playbackConcurrency) { Name = name; PlaybackConcurrency.Value = playbackConcurrency; - LoadSampleTask = EnqueueAction(() => - { - LoadSample(); - LoadSampleTask = null; - }); - PlaybackConcurrency.BindValueChanged(UpdatePlaybackConcurrency); } protected abstract void UpdatePlaybackConcurrency(ValueChangedEvent concurrency); - protected abstract void LoadSample(); - public abstract Sample CreateSample(); protected void SampleFactoryOnPlay(Sample sample) diff --git a/osu.Framework/Audio/Sample/SampleSDL3Factory.cs b/osu.Framework/Audio/Sample/SampleSDL3Factory.cs index b1e3647671..0ef6018edf 100644 --- a/osu.Framework/Audio/Sample/SampleSDL3Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL3Factory.cs @@ -2,18 +2,17 @@ // See the LICENCE file in the repository root for full licence text. using System; -using System.Diagnostics; using System.IO; +using System.Threading; using osu.Framework.Audio.Mixing.SDL3; using osu.Framework.Bindables; -using osu.Framework.Extensions; using SDL; namespace osu.Framework.Audio.Sample { internal class SampleSDL3Factory : SampleFactory { - private bool isLoaded; + private volatile bool isLoaded; public override bool IsLoaded => isLoaded; private readonly SDL3AudioMixer mixer; @@ -21,47 +20,42 @@ internal class SampleSDL3Factory : SampleFactory private float[] decodedAudio = Array.Empty(); - private Stream? stream; + private readonly AutoResetEvent completion = new AutoResetEvent(false); + + private SDL3AudioDecoderManager.AudioDecoder? decoder; public SampleSDL3Factory(Stream stream, string name, SDL3AudioMixer mixer, int playbackConcurrency, SDL_AudioSpec spec) : base(name, playbackConcurrency) { - this.stream = stream; this.mixer = mixer; this.spec = spec; + + decoder = SDL3AudioManager.DecoderManager.StartDecodingAsync(spec.freq, spec.channels, spec.format, stream, ReceiveAudioData, false); } - protected override void LoadSample() + internal void ReceiveAudioData(byte[] audio, int byteLen, SDL3AudioDecoderManager.AudioDecoder data, bool done) { - Debug.Assert(CanPerformInline); - Debug.Assert(!IsLoaded); - - if (stream == null) + if (IsDisposed) return; - try - { - byte[] audio = AudioDecoderManager.DecodeAudio(spec.freq, spec.channels, spec.format, stream, out int size); + decoder = null; - if (size > 0) - { - decodedAudio = new float[size / 4]; - Buffer.BlockCopy(audio, 0, decodedAudio, 0, size); - } - - Length = size / 4d / spec.freq / spec.channels * 1000d; - isLoaded = true; - } - finally + if (byteLen > 0) { - stream.Dispose(); - stream = null; + decodedAudio = new float[byteLen / 4]; + Buffer.BlockCopy(audio, 0, decodedAudio, 0, byteLen); } + + Length = byteLen / 4d / spec.freq / spec.channels * 1000d; + isLoaded = true; + + completion.Set(); } public SampleSDL3AudioPlayer CreatePlayer() { - LoadSampleTask?.WaitSafely(); + if (!isLoaded) + completion.WaitOne(10); return new SampleSDL3AudioPlayer(decodedAudio, spec.freq, spec.channels); } @@ -82,11 +76,11 @@ protected override void Dispose(bool disposing) if (IsDisposed) return; - stream?.Dispose(); - stream = null; + decoder?.Stop(); decodedAudio = Array.Empty(); + completion.Dispose(); base.Dispose(disposing); } } diff --git a/osu.Framework/Audio/Track/TrackSDL3.cs b/osu.Framework/Audio/Track/TrackSDL3.cs index f0773bfcc1..b48ce5b892 100644 --- a/osu.Framework/Audio/Track/TrackSDL3.cs +++ b/osu.Framework/Audio/Track/TrackSDL3.cs @@ -2,11 +2,13 @@ // See the LICENCE file in the repository root for full licence text. using System; +using System.IO; using System.Threading; using System.Threading.Tasks; using NAudio.Dsp; using osu.Framework.Audio.Mixing.SDL3; using osu.Framework.Extensions; +using SDL; namespace osu.Framework.Audio.Track { @@ -31,9 +33,11 @@ public sealed class TrackSDL3 : Track, ISDL3AudioChannel private volatile int bitrate; public override int? Bitrate => bitrate; - public TrackSDL3(string name, int rate, int channels, int samples) + public TrackSDL3(string name, Stream data, SDL_AudioSpec spec, int samples) : base(name) { + EnqueueAction(() => SDL3AudioManager.DecoderManager.StartDecodingAsync(spec.freq, spec.channels, spec.format, data, ReceiveAudioData, true)); + // SoundTouch limitation const float tempo_minimum_supported = 0.05f; AggregateTempo.ValueChanged += t => @@ -42,14 +46,14 @@ public TrackSDL3(string name, int rate, int channels, int samples) throw new ArgumentException($"{nameof(TrackSDL3)} does not support {nameof(Tempo)} specifications below {tempo_minimum_supported}. Use {nameof(Frequency)} instead."); }; - player = new TempoSDL3AudioPlayer(rate, channels, samples); + player = new TempoSDL3AudioPlayer(spec.freq, spec.channels, samples); } private readonly object syncRoot = new object(); - private AudioDecoderManager.AudioDecoder? decodeData; + private SDL3AudioDecoderManager.AudioDecoder? decodeData; - internal void ReceiveAudioData(byte[] audio, int length, AudioDecoderManager.AudioDecoder data, bool done) + internal void ReceiveAudioData(byte[] audio, int length, SDL3AudioDecoderManager.AudioDecoder data, bool done) { if (IsDisposed) return; diff --git a/osu.Framework/Audio/Track/Waveform.cs b/osu.Framework/Audio/Track/Waveform.cs index df5dd9e57e..e9efe42f88 100644 --- a/osu.Framework/Audio/Track/Waveform.cs +++ b/osu.Framework/Audio/Track/Waveform.cs @@ -88,7 +88,7 @@ public Waveform(Stream? data) channels = 2; // AudioDecoder will resample data into specified sample rate and channels (44100hz 2ch float) - AudioDecoderManager.AudioDecoder decoder = AudioDecoderManager.CreateDecoder(sample_rate, channels, true, SDL.SDL3.SDL_AUDIO_F32, data, false); + SDL3AudioDecoderManager.AudioDecoder decoder = SDL3AudioDecoderManager.CreateDecoder(sample_rate, channels, true, SDL.SDL3.SDL_AUDIO_F32, data, false); Complex[] complexBuffer = ArrayPool.Shared.Rent(fft_samples); From d43b68cfdfe4bd0b6128f565e9a48f055b2fa6aa Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Tue, 6 Aug 2024 21:11:02 +0900 Subject: [PATCH 091/127] Polish SDL3AudioDecoder a bit --- .../Audio/SDL3AudioDecoderManager.cs | 364 ++++++++++-------- .../Audio/Sample/SampleSDL3Factory.cs | 6 +- osu.Framework/Audio/Track/TrackSDL3.cs | 6 +- osu.Framework/Audio/Track/Waveform.cs | 10 +- 4 files changed, 229 insertions(+), 157 deletions(-) diff --git a/osu.Framework/Audio/SDL3AudioDecoderManager.cs b/osu.Framework/Audio/SDL3AudioDecoderManager.cs index 3961eae444..69226127e4 100644 --- a/osu.Framework/Audio/SDL3AudioDecoderManager.cs +++ b/osu.Framework/Audio/SDL3AudioDecoderManager.cs @@ -11,6 +11,7 @@ using ManagedBass; using osu.Framework.Audio.Callbacks; using osu.Framework.Graphics.Video; +using static osu.Framework.Audio.SDL3AudioDecoderManager; namespace osu.Framework.Audio { @@ -20,24 +21,44 @@ namespace osu.Framework.Audio /// public class SDL3AudioDecoderManager : IDisposable { - private readonly LinkedList jobs = new LinkedList(); + private readonly LinkedList jobs = new LinkedList(); - public delegate void PassDataDelegate(byte[] data, int length, AudioDecoder decoderData, bool done); + /// + /// Delegate to get decoded audio data from the decoder. + /// + /// Decoded audio. The format depends on you specified, + /// so you may need to actual data format. + /// This may be used by decoder later to reduce allocation, so you need to copy the data before exiting from this delegate, otherwise you may end up with wrong data. + /// Length in byte of decoded audio. Use this instead of data.Length + /// Associated . + /// Whether if this is the last data or not. + public delegate void PassDataDelegate(byte[] data, int length, SDL3AudioDecoder decoderData, bool done); private readonly Thread decoderThread; private readonly AutoResetEvent decoderWaitHandle; private readonly CancellationTokenSource tokenSource; - internal static AudioDecoder CreateDecoder(int rate, int channels, bool isTrack, SDL_AudioFormat format, Stream stream, - bool autoDisposeStream = true, PassDataDelegate? pass = null) + /// + /// Creates a new decoder that is not managed by the decoder thread. + /// + /// Refer to + /// Refer to + /// Refer to + /// Refer to + /// Refer to + /// A new instance. + internal static SDL3AudioDecoder CreateDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream = true, PassDataDelegate? pass = null) { - AudioDecoder decoder = Bass.CurrentDevice >= 0 - ? new BassAudioDecoder(rate, channels, isTrack, format, stream, autoDisposeStream, pass) - : new FFmpegAudioDecoder(rate, channels, isTrack, format, stream, autoDisposeStream, pass); + SDL3AudioDecoder decoder = Bass.CurrentDevice >= 0 + ? new SDL3AudioDecoder.BassAudioDecoder(stream, audioSpec, isTrack, autoDisposeStream, pass) + : new SDL3AudioDecoder.FFmpegAudioDecoder(stream, audioSpec, isTrack, autoDisposeStream, pass); return decoder; } + /// + /// Starts a decoder thread. + /// public SDL3AudioDecoderManager() { tokenSource = new CancellationTokenSource(); @@ -51,12 +72,20 @@ public SDL3AudioDecoderManager() decoderThread.Start(); } - public AudioDecoder StartDecodingAsync(int rate, int channels, SDL_AudioFormat format, Stream stream, PassDataDelegate pass, bool isTrack) + /// + /// Creates a new decoder, and adds it to the job list of a decoder thread. + /// + /// Refer to + /// Refer to + /// Refer to + /// Refer to + /// A new instance. + public SDL3AudioDecoder StartDecodingAsync(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, PassDataDelegate pass) { if (disposedValue) throw new InvalidOperationException($"Cannot start decoding on disposed {nameof(SDL3AudioDecoderManager)}"); - AudioDecoder decoder = CreateDecoder(rate, channels, isTrack, format, stream, true, pass); + SDL3AudioDecoder decoder = CreateDecoder(stream, audioSpec, isTrack, true, pass); lock (jobs) jobs.AddFirst(decoder); @@ -83,7 +112,7 @@ private void loop(CancellationToken token) while (node != null) { var next = node.Next; - AudioDecoder decoder = node.Value; + SDL3AudioDecoder decoder = node.Value; if (decoder.StopJob) { @@ -147,7 +176,7 @@ public void Dispose() GC.SuppressFinalize(this); } - private static int decodeAudio(AudioDecoder decoder, out byte[] decoded) + private static int decodeAudio(SDL3AudioDecoder decoder, out byte[] decoded) { int read = decoder.LoadFromStream(out byte[] temp); @@ -172,104 +201,143 @@ private static int decodeAudio(AudioDecoder decoder, out byte[] decoded) return (int)memoryStream.Length; } } + } + + /// + /// Contains decoder information, and perform the actual decoding. + /// + public abstract class SDL3AudioDecoder + { + /// + /// Decoder will decode audio data from this. + /// It accepts most formats. (e.g. MP3, OGG, WAV and so on...) + /// + internal readonly Stream Stream; - public abstract class AudioDecoder + /// + /// Decoder will convert audio data according to this spec if needed. + /// + internal readonly SDL_AudioSpec AudioSpec; + + /// + /// Decoder will call multiple times with partial data if true. + /// It's a receiver's job to combine the data in this case. Otherwise, It will call only once with the entirely decoded data if false. + /// + internal readonly bool IsTrack; + + /// + /// It will automatically dispose once decoding is done/failed. + /// + internal readonly bool AutoDisposeStream; + + /// + /// Decoder will call this once or more to pass the decoded audio data. + /// + internal readonly PassDataDelegate? Pass; + + private int bitrate; + + /// + /// Audio bitrate. Decoder may fill this in after the first call of . + /// + public int Bitrate { - internal readonly int Rate; - internal readonly int Channels; - internal readonly bool IsTrack; - internal readonly SDL_AudioFormat Format; - internal readonly Stream Stream; - internal readonly bool AutoDisposeStream; - internal readonly PassDataDelegate? Pass; + get => bitrate; + set => Interlocked.Exchange(ref bitrate, value); + } - private int bitrate; + private double length; - public int Bitrate - { - get => bitrate; - set => Interlocked.Exchange(ref bitrate, value); - } + /// + /// Audio length in miliseconds. Decoder may fill this in after the first call of . + /// + public double Length + { + get => length; + set => Interlocked.Exchange(ref length, value); + } - private double length; + private long byteLength; - public double Length - { - get => length; - set => Interlocked.Exchange(ref length, value); - } + /// + /// Audio length in byte. Note that this may not be accurate. You cannot depend on this value entirely. + /// You can find out the actual byte length by summing up byte counts you received once decoding is done. + /// Decoder may fill this in after the first call of . + /// + public long ByteLength + { + get => byteLength; + set => Interlocked.Exchange(ref byteLength, value); + } - private long byteLength; + internal volatile bool StopJob; - public long ByteLength - { - get => byteLength; - set => Interlocked.Exchange(ref byteLength, value); - } + private volatile bool loading; - internal volatile bool StopJob; + /// + /// Whether it is decoding or not. + /// + public bool Loading { get => loading; protected set => loading = value; } - private volatile bool loading; - public bool Loading { get => loading; protected set => loading = value; } + protected SDL3AudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream, PassDataDelegate? pass) + { + Stream = stream; + AudioSpec = audioSpec; + IsTrack = isTrack; + AutoDisposeStream = autoDisposeStream; + Pass = pass; + } - protected AudioDecoder(int rate, int channels, bool isTrack, SDL_AudioFormat format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) - { - Rate = rate; - Channels = channels; - IsTrack = isTrack; - Format = format; - Stream = stream; - AutoDisposeStream = autoDisposeStream; - Pass = pass; - } + /// + /// Add a flag to stop decoding in the next loop of decoder thread. + /// + public void Stop() + { + StopJob = true; + } + + // Not using IDisposable since things must be handled in a decoder thread + internal virtual void Free() + { + if (AutoDisposeStream) + Stream.Dispose(); + } - public void Stop() + protected abstract int LoadFromStreamInternal(out byte[] decoded); + + /// + /// Decodes and resamples audio from job.Stream, and pass it to decoded. + /// You may need to run this multiple times. + /// Don't call this yourself if this decoder is in the decoder thread job list. + /// + /// Decoded audio + public int LoadFromStream(out byte[] decoded) + { + int read = 0; + + try { - StopJob = true; + read = LoadFromStreamInternal(out decoded); } - - // Not using IDisposable since things must be handled in a decoder thread - internal virtual void Free() + catch (Exception e) { - if (AutoDisposeStream) - Stream.Dispose(); + Logger.Log(e.Message, level: LogLevel.Important); + Loading = false; + decoded = Array.Empty(); } - - protected abstract int LoadFromStreamInternal(out byte[] decoded); - - /// - /// Decodes and resamples audio from job.Stream, and pass it to decoded. - /// You may need to run this multiple times. - /// - /// Decoded audio - public int LoadFromStream(out byte[] decoded) + finally { - int read = 0; - - try - { - read = LoadFromStreamInternal(out decoded); - } - catch (Exception e) - { - Logger.Log(e.Message, level: LogLevel.Important); - Loading = false; - decoded = Array.Empty(); - } - finally - { - if (!Loading) - Free(); - } - - return read; + if (!Loading) + Free(); } + + return read; } /// /// This is only for using BASS as a decoder for SDL3 backend! /// - internal class BassAudioDecoder : AudioDecoder + internal class BassAudioDecoder : SDL3AudioDecoder { private int decodeStream; private FileCallbacks? fileCallbacks; @@ -285,19 +353,19 @@ private Resolution resolution { get { - if (Format == SDL_AudioFormat.SDL_AUDIO_S8) + if (AudioSpec.format == SDL_AudioFormat.SDL_AUDIO_S8) return Resolution.Byte; - else if (Format == SDL3.SDL_AUDIO_S16) // uses constant due to endian + else if (AudioSpec.format == SDL3.SDL_AUDIO_S16) // uses constant due to endian return Resolution.Short; else return Resolution.Float; } } - private ushort bits => (ushort)SDL3.SDL_AUDIO_BITSIZE(Format); + private ushort bits => (ushort)SDL3.SDL_AUDIO_BITSIZE(AudioSpec.format); - public BassAudioDecoder(int rate, int channels, bool isTrack, SDL_AudioFormat format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) - : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass) + public BassAudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream, PassDataDelegate? pass) + : base(stream, audioSpec, isTrack, autoDisposeStream, pass) { } @@ -332,96 +400,91 @@ internal override void Free() base.Free(); } - private static readonly object bass_sync_lock = new object(); - protected override int LoadFromStreamInternal(out byte[] decoded) { if (Bass.CurrentDevice < 0) throw new InvalidOperationException("Initialize a BASS device to decode audio"); - lock (bass_sync_lock) + if (!Loading) { - if (!Loading) + fileCallbacks = new FileCallbacks(new DataStreamFileProcedures(Stream)); + syncCallback = new SyncCallback((_, _, _, _) => { - fileCallbacks = new FileCallbacks(new DataStreamFileProcedures(Stream)); - syncCallback = new SyncCallback((_, _, _, _) => - { - Loading = false; - }); + Loading = false; + }); - BassFlags bassFlags = BassFlags.Decode | resolution.ToBassFlag(); - if (IsTrack) bassFlags |= BassFlags.Prescan; + BassFlags bassFlags = BassFlags.Decode | resolution.ToBassFlag(); + if (IsTrack) bassFlags |= BassFlags.Prescan; - decodeStream = Bass.CreateStream(StreamSystem.NoBuffer, bassFlags, fileCallbacks.Callbacks); + decodeStream = Bass.CreateStream(StreamSystem.NoBuffer, bassFlags, fileCallbacks.Callbacks); - if (decodeStream == 0) - throw new FormatException($"Couldn't create stream: {Bass.LastError}"); + if (decodeStream == 0) + throw new FormatException($"Couldn't create stream: {Bass.LastError}"); - if (Bass.ChannelGetInfo(decodeStream, out var info)) - { - ByteLength = Bass.ChannelGetLength(decodeStream); - Length = Bass.ChannelBytes2Seconds(decodeStream, ByteLength) * 1000.0d; - Bitrate = (int)Math.Round(Bass.ChannelGetAttribute(decodeStream, ChannelAttribute.Bitrate)); + if (Bass.ChannelGetInfo(decodeStream, out var info)) + { + ByteLength = Bass.ChannelGetLength(decodeStream); + Length = Bass.ChannelBytes2Seconds(decodeStream, ByteLength) * 1000.0d; + Bitrate = (int)Math.Round(Bass.ChannelGetAttribute(decodeStream, ChannelAttribute.Bitrate)); - if (info.Channels != Channels || info.Frequency != Rate) - { - resampler = BassMix.CreateMixerStream(Rate, Channels, BassFlags.MixerEnd | BassFlags.Decode | resolution.ToBassFlag()); + if (info.Channels != AudioSpec.channels || info.Frequency != AudioSpec.freq) + { + resampler = BassMix.CreateMixerStream(AudioSpec.freq, AudioSpec.channels, BassFlags.MixerEnd | BassFlags.Decode | resolution.ToBassFlag()); - if (resampler == 0) - throw new FormatException($"Failed to create BASS Mixer: {Bass.LastError}"); + if (resampler == 0) + throw new FormatException($"Failed to create BASS Mixer: {Bass.LastError}"); - if (!BassMix.MixerAddChannel(resampler, decodeStream, BassFlags.MixerChanNoRampin | BassFlags.MixerChanLimit)) - throw new FormatException($"Failed to add a channel to BASS Mixer: {Bass.LastError}"); + if (!BassMix.MixerAddChannel(resampler, decodeStream, BassFlags.MixerChanNoRampin | BassFlags.MixerChanLimit)) + throw new FormatException($"Failed to add a channel to BASS Mixer: {Bass.LastError}"); - ByteLength /= info.Channels * (bits / 8); - ByteLength = (long)Math.Ceiling((decimal)ByteLength / info.Frequency * Rate); - ByteLength *= Channels * (bits / 8); - } - } - else - { - if (IsTrack) - throw new FormatException($"Couldn't get channel info: {Bass.LastError}"); + ByteLength /= info.Channels * (bits / 8); + ByteLength = (long)Math.Ceiling((decimal)ByteLength / info.Frequency * AudioSpec.freq); + ByteLength *= AudioSpec.channels * (bits / 8); } - - syncHandle = Bass.ChannelSetSync(resampler == 0 ? decodeStream : resampler, SyncFlags.End | SyncFlags.Onetime, 0, syncCallback.Callback, syncCallback.Handle); - - Loading = true; + } + else + { + if (IsTrack) + throw new FormatException($"Couldn't get channel info: {Bass.LastError}"); } - int handle = resampler == 0 ? decodeStream : resampler; + syncHandle = Bass.ChannelSetSync(resampler == 0 ? decodeStream : resampler, SyncFlags.End | SyncFlags.Onetime, 0, syncCallback.Callback, syncCallback.Handle); - int bufferLen = (int)Bass.ChannelSeconds2Bytes(handle, 1); + Loading = true; + } - if (bufferLen <= 0) - bufferLen = 44100 * 2 * 4 * 1; + int handle = resampler == 0 ? decodeStream : resampler; - if (decodeData == null || decodeData.Length < bufferLen) - decodeData = new byte[bufferLen]; + int bufferLen = (int)Bass.ChannelSeconds2Bytes(handle, 1); - int got = Bass.ChannelGetData(handle, decodeData, bufferLen); + if (bufferLen <= 0) + bufferLen = 44100 * 2 * 4 * 1; - if (got == -1) - { - Loading = false; + if (decodeData == null || decodeData.Length < bufferLen) + decodeData = new byte[bufferLen]; - if (Bass.LastError != Errors.Ended) - throw new FormatException($"Couldn't decode: {Bass.LastError}"); - } + int got = Bass.ChannelGetData(handle, decodeData, bufferLen); + + if (got == -1) + { + Loading = false; - decoded = decodeData; - return Math.Max(0, got); + if (Bass.LastError != Errors.Ended) + throw new FormatException($"Couldn't decode: {Bass.LastError}"); } + + decoded = decodeData; + return Math.Max(0, got); } } - internal class FFmpegAudioDecoder : AudioDecoder + internal class FFmpegAudioDecoder : SDL3AudioDecoder { private VideoDecoder? ffmpeg; private byte[]? decodeData; - public FFmpegAudioDecoder(int rate, int channels, bool isTrack, SDL_AudioFormat format, Stream stream, bool autoDisposeStream, PassDataDelegate? pass) - : base(rate, channels, isTrack, format, stream, autoDisposeStream, pass) + public FFmpegAudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream, PassDataDelegate? pass) + : base(stream, audioSpec, isTrack, autoDisposeStream, pass) { } @@ -437,14 +500,15 @@ protected override int LoadFromStreamInternal(out byte[] decoded) { if (ffmpeg == null) { - ffmpeg = new VideoDecoder(Stream, Rate, Channels, SDL3.SDL_AUDIO_ISFLOAT(Format), SDL3.SDL_AUDIO_BITSIZE(Format), SDL3.SDL_AUDIO_ISSIGNED(Format)); + ffmpeg = new VideoDecoder(Stream, AudioSpec.freq, AudioSpec.channels, + SDL3.SDL_AUDIO_ISFLOAT(AudioSpec.format), SDL3.SDL_AUDIO_BITSIZE(AudioSpec.format), SDL3.SDL_AUDIO_ISSIGNED(AudioSpec.format)); ffmpeg.PrepareDecoding(); ffmpeg.RecreateCodecContext(); Bitrate = (int)ffmpeg.Bitrate; Length = ffmpeg.Duration; - ByteLength = (long)Math.Ceiling(ffmpeg.Duration / 1000.0d * Rate) * Channels * (SDL3.SDL_AUDIO_BITSIZE(Format) / 8); // FIXME + ByteLength = (long)Math.Ceiling(ffmpeg.Duration / 1000.0d * AudioSpec.freq) * AudioSpec.channels * (SDL3.SDL_AUDIO_BITSIZE(AudioSpec.format) / 8); // FIXME Loading = true; } diff --git a/osu.Framework/Audio/Sample/SampleSDL3Factory.cs b/osu.Framework/Audio/Sample/SampleSDL3Factory.cs index 0ef6018edf..d8060e7b79 100644 --- a/osu.Framework/Audio/Sample/SampleSDL3Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL3Factory.cs @@ -22,7 +22,7 @@ internal class SampleSDL3Factory : SampleFactory private readonly AutoResetEvent completion = new AutoResetEvent(false); - private SDL3AudioDecoderManager.AudioDecoder? decoder; + private SDL3AudioDecoder? decoder; public SampleSDL3Factory(Stream stream, string name, SDL3AudioMixer mixer, int playbackConcurrency, SDL_AudioSpec spec) : base(name, playbackConcurrency) @@ -30,10 +30,10 @@ public SampleSDL3Factory(Stream stream, string name, SDL3AudioMixer mixer, int p this.mixer = mixer; this.spec = spec; - decoder = SDL3AudioManager.DecoderManager.StartDecodingAsync(spec.freq, spec.channels, spec.format, stream, ReceiveAudioData, false); + decoder = SDL3AudioManager.DecoderManager.StartDecodingAsync(stream, spec, false, ReceiveAudioData); } - internal void ReceiveAudioData(byte[] audio, int byteLen, SDL3AudioDecoderManager.AudioDecoder data, bool done) + internal void ReceiveAudioData(byte[] audio, int byteLen, SDL3AudioDecoder data, bool done) { if (IsDisposed) return; diff --git a/osu.Framework/Audio/Track/TrackSDL3.cs b/osu.Framework/Audio/Track/TrackSDL3.cs index b48ce5b892..9455e52519 100644 --- a/osu.Framework/Audio/Track/TrackSDL3.cs +++ b/osu.Framework/Audio/Track/TrackSDL3.cs @@ -36,7 +36,7 @@ public sealed class TrackSDL3 : Track, ISDL3AudioChannel public TrackSDL3(string name, Stream data, SDL_AudioSpec spec, int samples) : base(name) { - EnqueueAction(() => SDL3AudioManager.DecoderManager.StartDecodingAsync(spec.freq, spec.channels, spec.format, data, ReceiveAudioData, true)); + EnqueueAction(() => SDL3AudioManager.DecoderManager.StartDecodingAsync(data, spec, true, ReceiveAudioData)); // SoundTouch limitation const float tempo_minimum_supported = 0.05f; @@ -51,9 +51,9 @@ public TrackSDL3(string name, Stream data, SDL_AudioSpec spec, int samples) private readonly object syncRoot = new object(); - private SDL3AudioDecoderManager.AudioDecoder? decodeData; + private SDL3AudioDecoder? decodeData; - internal void ReceiveAudioData(byte[] audio, int length, SDL3AudioDecoderManager.AudioDecoder data, bool done) + internal void ReceiveAudioData(byte[] audio, int length, SDL3AudioDecoder data, bool done) { if (IsDisposed) return; diff --git a/osu.Framework/Audio/Track/Waveform.cs b/osu.Framework/Audio/Track/Waveform.cs index e9efe42f88..3c3e70fb39 100644 --- a/osu.Framework/Audio/Track/Waveform.cs +++ b/osu.Framework/Audio/Track/Waveform.cs @@ -10,6 +10,7 @@ using osu.Framework.Extensions; using NAudio.Dsp; using System.Collections.Generic; +using SDL; namespace osu.Framework.Audio.Track { @@ -87,8 +88,15 @@ public Waveform(Stream? data) // Code below assumes stereo channels = 2; + SDL_AudioSpec spec = new SDL_AudioSpec() + { + freq = sample_rate, + channels = channels, + format = SDL3.SDL_AUDIO_F32 + }; + // AudioDecoder will resample data into specified sample rate and channels (44100hz 2ch float) - SDL3AudioDecoderManager.AudioDecoder decoder = SDL3AudioDecoderManager.CreateDecoder(sample_rate, channels, true, SDL.SDL3.SDL_AUDIO_F32, data, false); + SDL3AudioDecoder decoder = SDL3AudioDecoderManager.CreateDecoder(data, spec, true, false); Complex[] complexBuffer = ArrayPool.Shared.Rent(fft_samples); From 3889ec942a2531d366cf37a2b8ce13f9a27bdc3e Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Tue, 6 Aug 2024 21:50:20 +0900 Subject: [PATCH 092/127] SDL3 Audio visual test fixes --- .../Visual/Audio/TestSceneTrackAmplitudes.cs | 18 +++++++++--------- .../Audio/Sample/SampleChannelSDL3.cs | 4 +++- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/osu.Framework.Tests/Visual/Audio/TestSceneTrackAmplitudes.cs b/osu.Framework.Tests/Visual/Audio/TestSceneTrackAmplitudes.cs index 2741c1cbca..d98e1b5f9d 100644 --- a/osu.Framework.Tests/Visual/Audio/TestSceneTrackAmplitudes.cs +++ b/osu.Framework.Tests/Visual/Audio/TestSceneTrackAmplitudes.cs @@ -15,24 +15,24 @@ namespace osu.Framework.Tests.Visual.Audio { public partial class TestSceneTrackAmplitudes : FrameworkTestScene { - private DrawableTrack track; + private DrawableTrack drawableTrack; private Box leftChannel; private Box rightChannel; - private TrackBass bassTrack; + private Track track; private Container amplitudeBoxes; [BackgroundDependencyLoader] private void load(ITrackStore tracks) { - bassTrack = (TrackBass)tracks.Get("sample-track.mp3"); - int length = bassTrack.CurrentAmplitudes.FrequencyAmplitudes.Length; + track = tracks.Get("sample-track.mp3"); + int length = track.CurrentAmplitudes.FrequencyAmplitudes.Length; Children = new Drawable[] { - track = new DrawableTrack(bassTrack), + drawableTrack = new DrawableTrack(track), new GridContainer { RelativeSizeAxes = Axes.Both, @@ -87,16 +87,16 @@ protected override void LoadComplete() { base.LoadComplete(); - track.Looping = true; - AddStep("start track", () => track.Start()); - AddStep("stop track", () => track.Stop()); + drawableTrack.Looping = true; + AddStep("start track", () => drawableTrack.Start()); + AddStep("stop track", () => drawableTrack.Stop()); } protected override void Update() { base.Update(); - var amplitudes = bassTrack.CurrentAmplitudes; + var amplitudes = track.CurrentAmplitudes; rightChannel.Width = amplitudes.RightChannel * 0.5f; leftChannel.Width = amplitudes.LeftChannel * 0.5f; diff --git a/osu.Framework/Audio/Sample/SampleChannelSDL3.cs b/osu.Framework/Audio/Sample/SampleChannelSDL3.cs index 3b62c94434..12e2044677 100644 --- a/osu.Framework/Audio/Sample/SampleChannelSDL3.cs +++ b/osu.Framework/Audio/Sample/SampleChannelSDL3.cs @@ -24,6 +24,9 @@ public SampleChannelSDL3(SampleSDL3 sample, SampleSDL3AudioPlayer player) public override void Play() { + if (started) + return; + started = false; playing = true; base.Play(); @@ -57,7 +60,6 @@ int ISDL3AudioChannel.GetRemainingSamples(float[] data) if (player.Done) { playing = false; - started = false; } return ret; From 7cb61607f635e3c9e86bb8aedcbed9aff5567ed6 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Thu, 8 Aug 2024 00:56:29 +0900 Subject: [PATCH 093/127] Update TrackSDL3 CurrentAmplitude less frequently --- osu.Framework/Audio/Track/TrackSDL3.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework/Audio/Track/TrackSDL3.cs b/osu.Framework/Audio/Track/TrackSDL3.cs index 9455e52519..aaa39df1d3 100644 --- a/osu.Framework/Audio/Track/TrackSDL3.cs +++ b/osu.Framework/Audio/Track/TrackSDL3.cs @@ -166,7 +166,7 @@ protected override void UpdateState() } // Not sure if I need to split this up to another class since this featrue is only exclusive to Track - if (amplitudeRequested && isRunning && currentTime != lastTime) + if (amplitudeRequested && isRunning && Math.Abs(currentTime - lastTime) > 1000.0 / 60.0) { lastTime = currentTime; From d29123b4ee5489a69e527ff952430a3d3f931831 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Thu, 8 Aug 2024 02:56:42 +0900 Subject: [PATCH 094/127] Don't divide by 2 when doing FFT in TrackSDL3/Waveform --- osu.Framework/Audio/Track/TrackSDL3.cs | 2 +- osu.Framework/Audio/Track/Waveform.cs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/osu.Framework/Audio/Track/TrackSDL3.cs b/osu.Framework/Audio/Track/TrackSDL3.cs index aaa39df1d3..030c4efce1 100644 --- a/osu.Framework/Audio/Track/TrackSDL3.cs +++ b/osu.Framework/Audio/Track/TrackSDL3.cs @@ -116,7 +116,7 @@ private void updateCurrentAmplitude() if (fftIndex < fftSamples.Length) { fftSamples[fftIndex].Y = 0; - fftSamples[fftIndex++].X = (samples[i] + samples[i + secondCh]) * 0.5f; + fftSamples[fftIndex++].X = samples[i] + samples[i + secondCh]; } } diff --git a/osu.Framework/Audio/Track/Waveform.cs b/osu.Framework/Audio/Track/Waveform.cs index 3c3e70fb39..8f5196867f 100644 --- a/osu.Framework/Audio/Track/Waveform.cs +++ b/osu.Framework/Audio/Track/Waveform.cs @@ -142,7 +142,7 @@ public Waveform(Stream? data) point.AmplitudeLeft = Math.Max(point.AmplitudeLeft, Math.Abs(left)); point.AmplitudeRight = Math.Max(point.AmplitudeRight, Math.Abs(right)); - complexBuffer[complexBufferIndex].X = (left + right) * 0.5f; + complexBuffer[complexBufferIndex].X = left + right; complexBuffer[complexBufferIndex].Y = 0; if (++complexBufferIndex >= fft_samples) From d36e569a3123271c0d62b77a741a1d1cc691a7a8 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Thu, 8 Aug 2024 03:04:13 +0900 Subject: [PATCH 095/127] Satisfy InspectCode --- osu.Framework/Audio/Track/Waveform.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework/Audio/Track/Waveform.cs b/osu.Framework/Audio/Track/Waveform.cs index 8f5196867f..b2a3436cd3 100644 --- a/osu.Framework/Audio/Track/Waveform.cs +++ b/osu.Framework/Audio/Track/Waveform.cs @@ -88,7 +88,7 @@ public Waveform(Stream? data) // Code below assumes stereo channels = 2; - SDL_AudioSpec spec = new SDL_AudioSpec() + SDL_AudioSpec spec = new SDL_AudioSpec { freq = sample_rate, channels = channels, From 3f7a7db66eb9d0d7c52101846baa7729d06f07ce Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Fri, 9 Aug 2024 00:04:52 +0900 Subject: [PATCH 096/127] Init Bass in SDL3AudioDecoder --- .../Audio/SDL3AudioDecoderManager.cs | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/osu.Framework/Audio/SDL3AudioDecoderManager.cs b/osu.Framework/Audio/SDL3AudioDecoderManager.cs index 69226127e4..2a5103b14b 100644 --- a/osu.Framework/Audio/SDL3AudioDecoderManager.cs +++ b/osu.Framework/Audio/SDL3AudioDecoderManager.cs @@ -56,6 +56,8 @@ internal static SDL3AudioDecoder CreateDecoder(Stream stream, SDL_AudioSpec audi return decoder; } + private bool bassInit; + /// /// Starts a decoder thread. /// @@ -69,6 +71,11 @@ public SDL3AudioDecoderManager() IsBackground = true }; + Bass.Configure((ManagedBass.Configuration)68, 1); + + if (Bass.CurrentDevice < 0) + bassInit = Bass.Init(Bass.NoSoundDevice); + decoderThread.Start(); } @@ -161,6 +168,12 @@ protected virtual void Dispose(bool disposing) jobs.Clear(); } + if (bassInit) + { + Bass.CurrentDevice = Bass.NoSoundDevice; + Bass.Free(); + } + disposedValue = true; } } @@ -233,7 +246,7 @@ public abstract class SDL3AudioDecoder /// /// Decoder will call this once or more to pass the decoded audio data. /// - internal readonly PassDataDelegate? Pass; + internal PassDataDelegate? Pass { get; private set; } private int bitrate; @@ -299,6 +312,9 @@ public void Stop() // Not using IDisposable since things must be handled in a decoder thread internal virtual void Free() { + // Pass = null; + // Remove reference to the receiver + if (AutoDisposeStream) Stream.Dispose(); } @@ -403,7 +419,7 @@ internal override void Free() protected override int LoadFromStreamInternal(out byte[] decoded) { if (Bass.CurrentDevice < 0) - throw new InvalidOperationException("Initialize a BASS device to decode audio"); + throw new InvalidOperationException($"Initialize a BASS device to decode audio: {Bass.LastError}"); if (!Loading) { From bf24b0f405cfa786baa21c55540d6acd0a0e5be6 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Fri, 9 Aug 2024 00:05:27 +0900 Subject: [PATCH 097/127] Make Sample/Track/Mixer tests compatible with SDL3 Audio --- osu.Framework.Tests/Audio/AudioMixerTest.cs | 385 ++++++++++++++++++ .../Audio/AudioTestComponents.cs | 91 +++++ .../Audio/BassAudioMixerTest.cs | 271 ------------ .../Audio/BassTestComponents.cs | 73 +--- .../Audio/SDL3AudioTestComponents.cs | 67 +++ osu.Framework.Tests/Audio/SampleBassTest.cs | 119 ------ osu.Framework.Tests/Audio/SampleTest.cs | 176 ++++++++ .../Audio/{TrackBassTest.cs => TrackTest.cs} | 298 +++++++++----- osu.Framework/Audio/SDL3AudioManager.cs | 310 +++++++------- .../Audio/Sample/SampleChannelSDL3.cs | 1 - .../Audio/Sample/SampleSDL3Factory.cs | 2 +- osu.Framework/Audio/Track/TrackSDL3.cs | 14 +- .../Audio/Track/TrackSDL3AudioPlayer.cs | 5 + 13 files changed, 1122 insertions(+), 690 deletions(-) create mode 100644 osu.Framework.Tests/Audio/AudioMixerTest.cs create mode 100644 osu.Framework.Tests/Audio/AudioTestComponents.cs delete mode 100644 osu.Framework.Tests/Audio/BassAudioMixerTest.cs create mode 100644 osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs delete mode 100644 osu.Framework.Tests/Audio/SampleBassTest.cs create mode 100644 osu.Framework.Tests/Audio/SampleTest.cs rename osu.Framework.Tests/Audio/{TrackBassTest.cs => TrackTest.cs} (55%) diff --git a/osu.Framework.Tests/Audio/AudioMixerTest.cs b/osu.Framework.Tests/Audio/AudioMixerTest.cs new file mode 100644 index 0000000000..05d6b05576 --- /dev/null +++ b/osu.Framework.Tests/Audio/AudioMixerTest.cs @@ -0,0 +1,385 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +#nullable disable + +using System; +using System.Threading; +using ManagedBass; +using ManagedBass.Mix; +using NUnit.Framework; +using osu.Framework.Audio.Mixing; +using osu.Framework.Audio.Mixing.Bass; +using osu.Framework.Audio.Mixing.SDL3; +using osu.Framework.Audio.Sample; +using osu.Framework.Audio.Track; +using osu.Framework.Extensions; + +namespace osu.Framework.Tests.Audio +{ + [TestFixture] + public class AudioMixerTest + { + private BassTestComponents bass; + private BassAudioMixer mixerBass => (BassAudioMixer)bass.Mixer; + private TrackBass trackBass; + private SampleBass sampleBass; + + private SDL3AudioTestComponents sdl3; + private SDL3AudioMixer mixerSDL3 => (SDL3AudioMixer)sdl3.Mixer; + private TrackSDL3 trackSDL3; + private SampleSDL3 sampleSDL3; + + private AudioTestComponents.Type type; + private AudioTestComponents audio; + private AudioMixer mixer; + private Track track; + private Sample sample; + + [SetUp] + public void Setup() + { + bass = new BassTestComponents(); + trackBass = (TrackBass)bass.GetTrack(); + sampleBass = (SampleBass)bass.GetSample(); + + sdl3 = new SDL3AudioTestComponents(); + trackSDL3 = (TrackSDL3)sdl3.GetTrack(); + sampleSDL3 = (SampleSDL3)sdl3.GetSample(); + + // TrackSDL3 doesn't have data readily available right away after constructed. + while (!trackSDL3.IsCompletelyLoaded) + { + sdl3.Update(); + Thread.Sleep(10); + } + + bass.Update(); + sdl3.Update(); + } + + [TearDown] + public void Teardown() + { + bass?.Dispose(); + sdl3?.Dispose(); + } + + private void setupBackend(AudioTestComponents.Type id) + { + type = id; + + if (id == AudioTestComponents.Type.BASS) + { + audio = bass; + mixer = mixerBass; + track = trackBass; + sample = sampleBass; + } + else if (id == AudioTestComponents.Type.SDL3) + { + audio = sdl3; + mixer = mixerSDL3; + track = trackSDL3; + sample = sampleSDL3; + } + else + { + throw new InvalidOperationException("not a supported id"); + } + } + + private void assertThatMixerContainsChannel(AudioMixer mixer, IAudioChannel channel) + { + TestContext.WriteLine($"{channel.Mixer.GetHashCode()} ({channel.Mixer.Identifier}) and {mixer.GetHashCode()} ({mixer.Identifier})"); + + if (type == AudioTestComponents.Type.BASS) + Assert.That(BassMix.ChannelGetMixer(((IBassAudioChannel)channel).Handle), Is.EqualTo(((BassAudioMixer)mixer).Handle)); + else + Assert.That(channel.Mixer == mixer, Is.True); + } + + [Test] + public void TestMixerInitialised() + { + Assert.That(mixerBass.Handle, Is.Not.Zero); + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestAddedToGlobalMixerByDefault(AudioTestComponents.Type id) + { + setupBackend(id); + + assertThatMixerContainsChannel(mixer, track); + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestCannotBeRemovedFromGlobalMixerBass(AudioTestComponents.Type id) + { + setupBackend(id); + + mixer.Remove(track); + audio.Update(); + + assertThatMixerContainsChannel(mixer, track); + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestTrackIsMovedBetweenMixers(AudioTestComponents.Type id) + { + setupBackend(id); + + var secondMixer = audio.CreateMixer(); + + secondMixer.Add(track); + audio.Update(); + + assertThatMixerContainsChannel(secondMixer, track); + + mixer.Add(track); + audio.Update(); + + assertThatMixerContainsChannel(mixer, track); + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestMovedToGlobalMixerWhenRemovedFromMixer(AudioTestComponents.Type id) + { + setupBackend(id); + + var secondMixer = audio.CreateMixer(); + + secondMixer.Add(track); + secondMixer.Remove(track); + audio.Update(); + + assertThatMixerContainsChannel(mixer, track); + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestVirtualTrackCanBeAddedAndRemoved(AudioTestComponents.Type id) + { + setupBackend(id); + + var secondMixer = audio.CreateMixer(); + var virtualTrack = audio.TrackStore.GetVirtual(); + + secondMixer.Add(virtualTrack); + audio.Update(); + + secondMixer.Remove(virtualTrack); + audio.Update(); + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestFreedChannelRemovedFromDefault(AudioTestComponents.Type id) + { + setupBackend(id); + + track.Dispose(); + audio.Update(); + + if (id == AudioTestComponents.Type.BASS) + Assert.That(BassMix.ChannelGetMixer(((IBassAudioChannel)trackBass).Handle), Is.Zero); + else + Assert.That(((IAudioChannel)track).Mixer, Is.Null); + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestChannelMovedToGlobalMixerAfterDispose(AudioTestComponents.Type id) + { + setupBackend(id); + + var secondMixer = audio.CreateMixer(); + + secondMixer.Add(track); + audio.Update(); + + secondMixer.Dispose(); + audio.Update(); + + assertThatMixerContainsChannel(mixer, track); + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestPlayPauseStop(AudioTestComponents.Type id) + { + setupBackend(id); + + Assert.That(!track.IsRunning); + + audio.RunOnAudioThread(() => track.Start()); + audio.Update(); + + Assert.That(track.IsRunning); + + audio.RunOnAudioThread(() => track.Stop()); + audio.Update(); + + Assert.That(!track.IsRunning); + + audio.RunOnAudioThread(() => + { + track.Seek(track.Length - 1000); + track.Start(); + }); + + audio.Update(); + + Assert.That(() => + { + audio.Update(); + return !track.IsRunning; + }, Is.True.After(3000)); + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestChannelRetainsPlayingStateWhenMovedBetweenMixers(AudioTestComponents.Type id) + { + setupBackend(id); + + var secondMixer = audio.CreateMixer(); + + secondMixer.Add(track); + audio.Update(); + + Assert.That(!track.IsRunning); + + audio.RunOnAudioThread(() => track.Start()); + audio.Update(); + + Assert.That(track.IsRunning); + + mixer.Add(track); + audio.Update(); + + Assert.That(track.IsRunning); + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestTrackReferenceLostWhenTrackIsDisposed(AudioTestComponents.Type id) + { + setupBackend(id); + + var trackReference = testDisposeTrackWithoutReference(); + + // The first update disposes the track, the second one removes the track from the TrackStore. + audio.Update(); + audio.Update(); + + GC.Collect(); + GC.WaitForPendingFinalizers(); + + Assert.That(!trackReference.TryGetTarget(out _)); + } + + private WeakReference testDisposeTrackWithoutReference() + { + var weakRef = new WeakReference(track); + + track.Dispose(); + track = null; + + if (type == AudioTestComponents.Type.BASS) + trackBass = null; + else if (type == AudioTestComponents.Type.SDL3) + trackSDL3 = null; + + return weakRef; + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestSampleChannelReferenceLostWhenSampleChannelIsDisposed(AudioTestComponents.Type id) + { + setupBackend(id); + + var channelReference = runTest(sample); + + // The first update disposes the track, the second one removes the track from the TrackStore. + audio.Update(); + audio.Update(); + + GC.Collect(); + GC.WaitForPendingFinalizers(); + + Assert.That(!channelReference.TryGetTarget(out _)); + + static WeakReference runTest(Sample sample) + { + var channel = sample.GetChannel(); + + channel.Play(); // Creates the handle/adds to mixer. + channel.Stop(); + channel.Dispose(); + + return new WeakReference(channel); + } + } + + private void assertIfTrackIsPlaying() + { + if (type == AudioTestComponents.Type.BASS) + Assert.That(mixerBass.ChannelIsActive(trackBass), Is.Not.EqualTo(PlaybackState.Playing)); + else + Assert.That(track.IsRunning, Is.Not.True); + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestChannelDoesNotPlayIfReachedEndAndSeekedBackwards(AudioTestComponents.Type id) + { + setupBackend(id); + + audio.RunOnAudioThread(() => + { + track.Seek(track.Length - 1); + track.Start(); + }); + + Thread.Sleep(50); + audio.Update(); + + assertIfTrackIsPlaying(); + + audio.RunOnAudioThread(() => track.SeekAsync(0).WaitSafely()); + audio.Update(); + + assertIfTrackIsPlaying(); + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestChannelDoesNotPlayIfReachedEndAndMovedMixers(AudioTestComponents.Type id) + { + setupBackend(id); + + audio.RunOnAudioThread(() => + { + track.Seek(track.Length - 1); + track.Start(); + }); + + Thread.Sleep(50); + audio.Update(); + + assertIfTrackIsPlaying(); + + var secondMixer = audio.CreateMixer(); + secondMixer.Add(track); + audio.Update(); + + assertIfTrackIsPlaying(); + } + } +} diff --git a/osu.Framework.Tests/Audio/AudioTestComponents.cs b/osu.Framework.Tests/Audio/AudioTestComponents.cs new file mode 100644 index 0000000000..1395ed8643 --- /dev/null +++ b/osu.Framework.Tests/Audio/AudioTestComponents.cs @@ -0,0 +1,91 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using System; +using osu.Framework.Audio.Mixing; +using osu.Framework.Audio.Sample; +using osu.Framework.Audio.Track; +using osu.Framework.Audio; +using System.IO; +using osu.Framework.IO.Stores; + +namespace osu.Framework.Tests.Audio +{ + public abstract class AudioTestComponents : IDisposable + { + public enum Type + { + BASS, + SDL3 + } + + internal readonly AudioMixer Mixer; + public readonly DllResourceStore Resources; + internal readonly TrackStore TrackStore; + internal readonly SampleStore SampleStore; + + protected readonly AudioCollectionManager AllComponents = new AudioCollectionManager(); + protected readonly AudioCollectionManager MixerComponents = new AudioCollectionManager(); + + protected AudioTestComponents(bool init) + { + Prepare(); + + if (init) + Init(); + + AllComponents.AddItem(MixerComponents); + + Mixer = CreateMixer(); + Resources = new DllResourceStore(typeof(TrackTest).Assembly); + TrackStore = new TrackStore(Resources, Mixer, CreateTrack); + SampleStore = new SampleStore(Resources, Mixer, CreateSampleFactory); + + Add(TrackStore, SampleStore); + } + + protected virtual void Prepare() + { + } + + internal abstract Track CreateTrack(Stream data, string name); + + internal abstract SampleFactory CreateSampleFactory(Stream stream, string name, AudioMixer mixer, int playbackConcurrency); + + public abstract void Init(); + + public virtual void Add(params AudioComponent[] component) + { + foreach (var c in component) + AllComponents.AddItem(c); + } + + public abstract AudioMixer CreateMixer(); + + public virtual void Update() + { + RunOnAudioThread(AllComponents.Update); + } + + /// + /// Runs an on a newly created audio thread, and blocks until it has been run to completion. + /// + /// The action to run on the audio thread. + public virtual void RunOnAudioThread(Action action) => AudioTestHelper.RunOnAudioThread(action); + + internal Track GetTrack() => TrackStore.Get("Resources.Tracks.sample-track.mp3"); + internal Sample GetSample() => SampleStore.Get("Resources.Tracks.sample-track.mp3"); + + public void Dispose() => RunOnAudioThread(() => + { + AllComponents.Dispose(); + AllComponents.Update(); // Actually runs the disposal. + + DisposeInternal(); + }); + + public virtual void DisposeInternal() + { + } + } +} diff --git a/osu.Framework.Tests/Audio/BassAudioMixerTest.cs b/osu.Framework.Tests/Audio/BassAudioMixerTest.cs deleted file mode 100644 index 23b5a10a75..0000000000 --- a/osu.Framework.Tests/Audio/BassAudioMixerTest.cs +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. -// See the LICENCE file in the repository root for full licence text. - -#nullable disable - -using System; -using System.Threading; -using ManagedBass; -using ManagedBass.Mix; -using NUnit.Framework; -using osu.Framework.Audio.Mixing.Bass; -using osu.Framework.Audio.Sample; -using osu.Framework.Audio.Track; -using osu.Framework.Extensions; - -namespace osu.Framework.Tests.Audio -{ - [TestFixture] - public class BassAudioMixerTest - { - private BassTestComponents bass; - private TrackBass track; - private SampleBass sample; - - [SetUp] - public void Setup() - { - bass = new BassTestComponents(); - track = bass.GetTrack(); - sample = bass.GetSample(); - - bass.Update(); - } - - [TearDown] - public void Teardown() - { - bass?.Dispose(); - } - - [Test] - public void TestMixerInitialised() - { - Assert.That(bass.Mixer.Handle, Is.Not.Zero); - } - - [Test] - public void TestAddedToGlobalMixerByDefault() - { - Assert.That(BassMix.ChannelGetMixer(getHandle()), Is.EqualTo(bass.Mixer.Handle)); - } - - [Test] - public void TestCannotBeRemovedFromGlobalMixer() - { - bass.Mixer.Remove(track); - bass.Update(); - - Assert.That(BassMix.ChannelGetMixer(getHandle()), Is.EqualTo(bass.Mixer.Handle)); - } - - [Test] - public void TestTrackIsMovedBetweenMixers() - { - var secondMixer = bass.CreateMixer(); - - secondMixer.Add(track); - bass.Update(); - - Assert.That(BassMix.ChannelGetMixer(getHandle()), Is.EqualTo(secondMixer.Handle)); - - bass.Mixer.Add(track); - bass.Update(); - - Assert.That(BassMix.ChannelGetMixer(getHandle()), Is.EqualTo(bass.Mixer.Handle)); - } - - [Test] - public void TestMovedToGlobalMixerWhenRemovedFromMixer() - { - var secondMixer = bass.CreateMixer(); - - secondMixer.Add(track); - secondMixer.Remove(track); - bass.Update(); - - Assert.That(BassMix.ChannelGetMixer(getHandle()), Is.EqualTo(bass.Mixer.Handle)); - } - - [Test] - public void TestVirtualTrackCanBeAddedAndRemoved() - { - var secondMixer = bass.CreateMixer(); - var virtualTrack = bass.TrackStore.GetVirtual(); - - secondMixer.Add(virtualTrack); - bass.Update(); - - secondMixer.Remove(virtualTrack); - bass.Update(); - } - - [Test] - public void TestFreedChannelRemovedFromDefault() - { - track.Dispose(); - bass.Update(); - - Assert.That(BassMix.ChannelGetMixer(getHandle()), Is.Zero); - } - - [Test] - public void TestChannelMovedToGlobalMixerAfterDispose() - { - var secondMixer = bass.CreateMixer(); - - secondMixer.Add(track); - bass.Update(); - - secondMixer.Dispose(); - bass.Update(); - - Assert.That(BassMix.ChannelGetMixer(getHandle()), Is.EqualTo(bass.Mixer.Handle)); - } - - [Test] - public void TestPlayPauseStop() - { - Assert.That(!track.IsRunning); - - bass.RunOnAudioThread(() => track.Start()); - bass.Update(); - - Assert.That(track.IsRunning); - - bass.RunOnAudioThread(() => track.Stop()); - bass.Update(); - - Assert.That(!track.IsRunning); - - bass.RunOnAudioThread(() => - { - track.Seek(track.Length - 1000); - track.Start(); - }); - - bass.Update(); - - Assert.That(() => - { - bass.Update(); - return !track.IsRunning; - }, Is.True.After(3000)); - } - - [Test] - public void TestChannelRetainsPlayingStateWhenMovedBetweenMixers() - { - var secondMixer = bass.CreateMixer(); - - secondMixer.Add(track); - bass.Update(); - - Assert.That(!track.IsRunning); - - bass.RunOnAudioThread(() => track.Start()); - bass.Update(); - - Assert.That(track.IsRunning); - - bass.Mixer.Add(track); - bass.Update(); - - Assert.That(track.IsRunning); - } - - [Test] - public void TestTrackReferenceLostWhenTrackIsDisposed() - { - var trackReference = testDisposeTrackWithoutReference(); - - // The first update disposes the track, the second one removes the track from the TrackStore. - bass.Update(); - bass.Update(); - - GC.Collect(); - GC.WaitForPendingFinalizers(); - - Assert.That(!trackReference.TryGetTarget(out _)); - } - - private WeakReference testDisposeTrackWithoutReference() - { - var weakRef = new WeakReference(track); - - track.Dispose(); - track = null; - - return weakRef; - } - - [Test] - public void TestSampleChannelReferenceLostWhenSampleChannelIsDisposed() - { - var channelReference = runTest(sample); - - // The first update disposes the track, the second one removes the track from the TrackStore. - bass.Update(); - bass.Update(); - - GC.Collect(); - GC.WaitForPendingFinalizers(); - - Assert.That(!channelReference.TryGetTarget(out _)); - - static WeakReference runTest(SampleBass sample) - { - var channel = sample.GetChannel(); - - channel.Play(); // Creates the handle/adds to mixer. - channel.Stop(); - channel.Dispose(); - - return new WeakReference(channel); - } - } - - [Test] - public void TestChannelDoesNotPlayIfReachedEndAndSeekedBackwards() - { - bass.RunOnAudioThread(() => - { - track.Seek(track.Length - 1); - track.Start(); - }); - - Thread.Sleep(50); - bass.Update(); - - Assert.That(bass.Mixer.ChannelIsActive(track), Is.Not.EqualTo(PlaybackState.Playing)); - - bass.RunOnAudioThread(() => track.SeekAsync(0).WaitSafely()); - bass.Update(); - - Assert.That(bass.Mixer.ChannelIsActive(track), Is.Not.EqualTo(PlaybackState.Playing)); - } - - [Test] - public void TestChannelDoesNotPlayIfReachedEndAndMovedMixers() - { - bass.RunOnAudioThread(() => - { - track.Seek(track.Length - 1); - track.Start(); - }); - - Thread.Sleep(50); - bass.Update(); - - Assert.That(bass.Mixer.ChannelIsActive(track), Is.Not.EqualTo(PlaybackState.Playing)); - - var secondMixer = bass.CreateMixer(); - secondMixer.Add(track); - bass.Update(); - - Assert.That(secondMixer.ChannelIsActive(track), Is.Not.EqualTo(PlaybackState.Playing)); - } - - private int getHandle() => ((IBassAudioChannel)track).Handle; - } -} diff --git a/osu.Framework.Tests/Audio/BassTestComponents.cs b/osu.Framework.Tests/Audio/BassTestComponents.cs index 2e24301511..0e9b131f31 100644 --- a/osu.Framework.Tests/Audio/BassTestComponents.cs +++ b/osu.Framework.Tests/Audio/BassTestComponents.cs @@ -2,13 +2,13 @@ // See the LICENCE file in the repository root for full licence text. using System; +using System.IO; using ManagedBass; -using osu.Framework.Audio; +using osu.Framework.Audio.Mixing; using osu.Framework.Audio.Mixing.Bass; using osu.Framework.Audio.Sample; using osu.Framework.Audio.Track; using osu.Framework.Extensions; -using osu.Framework.IO.Stores; using osu.Framework.Threading; namespace osu.Framework.Tests.Audio @@ -16,40 +16,14 @@ namespace osu.Framework.Tests.Audio /// /// Provides a BASS audio pipeline to be used for testing audio components. /// - public class BassTestComponents : IDisposable + public class BassTestComponents : AudioTestComponents, IDisposable { - internal readonly BassAudioMixer Mixer; - public readonly DllResourceStore Resources; - internal readonly TrackStore TrackStore; - internal readonly SampleStore SampleStore; - - private readonly AudioCollectionManager allComponents = new AudioCollectionManager(); - private readonly AudioCollectionManager mixerComponents = new AudioCollectionManager(); - public BassTestComponents(bool init = true) + : base(init) { - if (init) - Init(); - - allComponents.AddItem(mixerComponents); - - Mixer = CreateMixer(); - Resources = new DllResourceStore(typeof(TrackBassTest).Assembly); - TrackStore = new TrackStore(Resources, Mixer, (data, name) => new TrackBass(data, name)); - SampleStore = new SampleStore(Resources, Mixer, (stream, name, mixer, playbackConcurrency) => - { - byte[] data; - - using (stream) - data = stream.ReadAllBytesToArray(); - - return new SampleBassFactory(data, name, (BassAudioMixer)mixer, playbackConcurrency); - }); - - Add(TrackStore, SampleStore); } - public void Init() + public override void Init() { AudioThread.PreloadBass(); @@ -58,38 +32,29 @@ public void Init() Bass.Init(0); } - public void Add(params AudioComponent[] component) - { - foreach (var c in component) - allComponents.AddItem(c); - } - - internal BassAudioMixer CreateMixer() + public override AudioMixer CreateMixer() { var mixer = new BassAudioMixer(null, Mixer, "Test mixer"); - mixerComponents.AddItem(mixer); + MixerComponents.AddItem(mixer); return mixer; } - public void Update() + public override void DisposeInternal() { - RunOnAudioThread(() => allComponents.Update()); + base.DisposeInternal(); + Bass.Free(); } - /// - /// Runs an on a newly created audio thread, and blocks until it has been run to completion. - /// - /// The action to run on the audio thread. - public void RunOnAudioThread(Action action) => AudioTestHelper.RunOnAudioThread(action); - - internal TrackBass GetTrack() => (TrackBass)TrackStore.Get("Resources.Tracks.sample-track.mp3"); - internal SampleBass GetSample() => (SampleBass)SampleStore.Get("Resources.Tracks.sample-track.mp3"); + internal override Track CreateTrack(Stream data, string name) => new TrackBass(data, name); - public void Dispose() => RunOnAudioThread(() => + internal override SampleFactory CreateSampleFactory(Stream stream, string name, AudioMixer mixer, int playbackConcurrency) { - allComponents.Dispose(); - allComponents.Update(); // Actually runs the disposal. - Bass.Free(); - }); + byte[] data; + + using (stream) + data = stream.ReadAllBytesToArray(); + + return new SampleBassFactory(data, name, (BassAudioMixer)mixer, playbackConcurrency); + } } } diff --git a/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs b/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs new file mode 100644 index 0000000000..70f0efa888 --- /dev/null +++ b/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs @@ -0,0 +1,67 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using System; +using System.IO; +using System.Linq; +using osu.Framework.Audio; +using osu.Framework.Audio.Mixing; +using osu.Framework.Audio.Mixing.SDL3; +using osu.Framework.Audio.Sample; +using osu.Framework.Audio.Track; +using SDL; +using static SDL.SDL3; + +namespace osu.Framework.Tests.Audio +{ + /// + /// Provides a SDL3 audio pipeline to be used for testing audio components. + /// + public class SDL3AudioTestComponents : AudioTestComponents, IDisposable + { + private SDL3BaseAudioManager baseManager = null!; + + public SDL3AudioTestComponents(bool init = true) + : base(init) + { + } + + protected override void Prepare() + { + base.Prepare(); + baseManager = new SDL3BaseAudioManager(MixerComponents.Items.OfType); + } + + public override void Init() + { + SDL_SetHint(SDL_HINT_AUDIO_DRIVER, "dummy"u8); + + if (SDL_Init(SDL_InitFlags.SDL_INIT_AUDIO) < 0) + throw new InvalidOperationException($"Failed to initialise SDL: {SDL_GetError()}"); + + if (!baseManager.SetAudioDevice(SDL_AUDIO_DEVICE_DEFAULT_PLAYBACK)) + throw new InvalidOperationException($"Failed to open SDL3 audio device: {SDL_GetError()}"); + } + + public override AudioMixer CreateMixer() + { + var mixer = new SDL3AudioMixer(Mixer, "Test mixer"); + baseManager.RunWhileLockingAudioStream(() => MixerComponents.AddItem(mixer)); + + return mixer; + } + + public override void DisposeInternal() + { + base.DisposeInternal(); + baseManager.Dispose(); + + SDL_Quit(); + } + + internal override Track CreateTrack(Stream data, string name) => new TrackSDL3(name, data, baseManager.AudioSpec, 441); + + internal override SampleFactory CreateSampleFactory(Stream stream, string name, AudioMixer mixer, int playbackConcurrency) + => new SampleSDL3Factory(stream, name, (SDL3AudioMixer)mixer, playbackConcurrency, baseManager.AudioSpec); + } +} diff --git a/osu.Framework.Tests/Audio/SampleBassTest.cs b/osu.Framework.Tests/Audio/SampleBassTest.cs deleted file mode 100644 index ed0155b18e..0000000000 --- a/osu.Framework.Tests/Audio/SampleBassTest.cs +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. -// See the LICENCE file in the repository root for full licence text. - -#nullable disable - -using System; -using System.Threading; -using NUnit.Framework; -using osu.Framework.Audio.Sample; - -namespace osu.Framework.Tests.Audio -{ - [TestFixture] - public class SampleBassTest - { - private BassTestComponents bass; - private Sample sample; - private SampleChannel channel; - - [SetUp] - public void Setup() - { - bass = new BassTestComponents(); - sample = bass.GetSample(); - - bass.Update(); - } - - [TearDown] - public void Teardown() - { - bass?.Dispose(); - } - - [Test] - public void TestGetChannelOnDisposed() - { - sample.Dispose(); - - sample.Update(); - - Assert.Throws(() => sample.GetChannel()); - Assert.Throws(() => sample.Play()); - } - - [Test] - public void TestStart() - { - channel = sample.Play(); - bass.Update(); - - Thread.Sleep(50); - - bass.Update(); - - Assert.IsTrue(channel.Playing); - } - - [Test] - public void TestStop() - { - channel = sample.Play(); - bass.Update(); - - channel.Stop(); - bass.Update(); - - Assert.IsFalse(channel.Playing); - } - - [Test] - public void TestStopBeforeLoadFinished() - { - channel = sample.Play(); - channel.Stop(); - - bass.Update(); - - Assert.IsFalse(channel.Playing); - } - - [Test] - public void TestStopsWhenFactoryDisposed() - { - channel = sample.Play(); - bass.Update(); - - bass.SampleStore.Dispose(); - bass.Update(); - - Assert.IsFalse(channel.Playing); - } - - /// - /// Tests the case where a play call can be run inline due to already being on the audio thread. - /// Because it's immediately executed, a `Bass.Update()` call is not required before the channel's state is updated. - /// - [Test] - public void TestPlayingUpdatedAfterInlinePlay() - { - bass.RunOnAudioThread(() => channel = sample.Play()); - Assert.That(channel.Playing, Is.True); - } - - /// - /// Tests the case where a stop call can be run inline due to already being on the audio thread. - /// Because it's immediately executed, a `Bass.Update()` call is not required before the channel's state is updated. - /// - [Test] - public void TestPlayingUpdatedAfterInlineStop() - { - channel = sample.Play(); - bass.Update(); - - bass.RunOnAudioThread(() => channel.Stop()); - Assert.That(channel.Playing, Is.False); - } - } -} diff --git a/osu.Framework.Tests/Audio/SampleTest.cs b/osu.Framework.Tests/Audio/SampleTest.cs new file mode 100644 index 0000000000..5267f583f7 --- /dev/null +++ b/osu.Framework.Tests/Audio/SampleTest.cs @@ -0,0 +1,176 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +#nullable disable + +using System; +using System.Threading; +using NUnit.Framework; +using osu.Framework.Audio.Sample; + +namespace osu.Framework.Tests.Audio +{ + [TestFixture] + public class SampleTest + { + private BassTestComponents bass; + private Sample sampleBass; + + private SDL3AudioTestComponents sdl3; + private Sample sampleSDL3; + + private SampleChannel channel; + + [SetUp] + public void Setup() + { + bass = new BassTestComponents(); + sampleBass = bass.GetSample(); + + sdl3 = new SDL3AudioTestComponents(); + sampleSDL3 = sdl3.GetSample(); + + bass.Update(); + sdl3.Update(); + } + + [TearDown] + public void Teardown() + { + bass?.Dispose(); + sdl3?.Dispose(); + } + + private Sample getSample(AudioTestComponents.Type id) + { + if (id == AudioTestComponents.Type.BASS) + return sampleBass; + else if (id == AudioTestComponents.Type.SDL3) + return sampleSDL3; + else + throw new InvalidOperationException("not a supported id"); + } + + private AudioTestComponents getTestComponents(AudioTestComponents.Type id) + { + if (id == AudioTestComponents.Type.BASS) + return bass; + else if (id == AudioTestComponents.Type.SDL3) + return sdl3; + else + throw new InvalidOperationException("not a supported id"); + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestGetChannelOnDisposed(AudioTestComponents.Type id) + { + var sample = getSample(id); + + sample.Dispose(); + + sample.Update(); + + Assert.Throws(() => sample.GetChannel()); + Assert.Throws(() => sample.Play()); + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestStart(AudioTestComponents.Type id) + { + var sample = getSample(id); + var audio = getTestComponents(id); + + channel = sample.Play(); + + audio.Update(); + + Thread.Sleep(50); + + audio.Update(); + + Assert.IsTrue(channel.Playing); + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestStop(AudioTestComponents.Type id) + { + var sample = getSample(id); + var audio = getTestComponents(id); + + channel = sample.Play(); + audio.Update(); + + channel.Stop(); + audio.Update(); + + Assert.IsFalse(channel.Playing); + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestStopBeforeLoadFinished(AudioTestComponents.Type id) + { + var sample = getSample(id); + var audio = getTestComponents(id); + + channel = sample.Play(); + channel.Stop(); + + audio.Update(); + + Assert.IsFalse(channel.Playing); + } + + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestStopsWhenFactoryDisposed(AudioTestComponents.Type id) + { + var sample = getSample(id); + var audio = getTestComponents(id); + + channel = sample.Play(); + audio.Update(); + + audio.SampleStore.Dispose(); + audio.Update(); + + Assert.IsFalse(channel.Playing); + } + + /// + /// Tests the case where a play call can be run inline due to already being on the audio thread. + /// Because it's immediately executed, a `Bass.Update()` call is not required before the channel's state is updated. + /// + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestPlayingUpdatedAfterInlinePlay(AudioTestComponents.Type id) + { + var sample = getSample(id); + var audio = getTestComponents(id); + + audio.RunOnAudioThread(() => channel = sample.Play()); + Assert.That(channel.Playing, Is.True); + } + + /// + /// Tests the case where a stop call can be run inline due to already being on the audio thread. + /// Because it's immediately executed, a `Bass.Update()` call is not required before the channel's state is updated. + /// + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestPlayingUpdatedAfterInlineStop(AudioTestComponents.Type id) + { + var sample = getSample(id); + var audio = getTestComponents(id); + + var channel = sample.Play(); + audio.Update(); + + audio.RunOnAudioThread(() => channel.Stop()); + Assert.That(channel.Playing, Is.False); + } + } +} diff --git a/osu.Framework.Tests/Audio/TrackBassTest.cs b/osu.Framework.Tests/Audio/TrackTest.cs similarity index 55% rename from osu.Framework.Tests/Audio/TrackBassTest.cs rename to osu.Framework.Tests/Audio/TrackTest.cs index 49b711428e..6f5d11878f 100644 --- a/osu.Framework.Tests/Audio/TrackBassTest.cs +++ b/osu.Framework.Tests/Audio/TrackTest.cs @@ -15,48 +15,90 @@ namespace osu.Framework.Tests.Audio { [TestFixture] - public class TrackBassTest + public class TrackTest { private BassTestComponents bass; - private TrackBass track; + private TrackBass trackBass; + + private SDL3AudioTestComponents sdl3; + private TrackSDL3 trackSDL3; + + private AudioTestComponents audio; + private Track track; [SetUp] public void Setup() { bass = new BassTestComponents(); - track = bass.GetTrack(); + trackBass = (TrackBass)bass.GetTrack(); + + sdl3 = new SDL3AudioTestComponents(); + trackSDL3 = (TrackSDL3)sdl3.GetTrack(); + + // TrackSDL3 doesn't have data readily available right away after constructed. + while (!trackSDL3.IsCompletelyLoaded) + { + sdl3.Update(); + Thread.Sleep(10); + } bass.Update(); + sdl3.Update(); } [TearDown] public void Teardown() { bass?.Dispose(); + sdl3?.Dispose(); + } + + private void setupBackend(AudioTestComponents.Type id) + { + if (id == AudioTestComponents.Type.BASS) + { + audio = bass; + track = trackBass; + } + else if (id == AudioTestComponents.Type.SDL3) + { + audio = sdl3; + track = trackSDL3; + } + else + { + throw new InvalidOperationException("not a supported id"); + } } - [Test] - public void TestStart() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestStart(AudioTestComponents.Type id) { + setupBackend(id); + track.StartAsync(); - bass.Update(); + audio.Update(); Thread.Sleep(50); - bass.Update(); + audio.Update(); Assert.IsTrue(track.IsRunning); Assert.Greater(track.CurrentTime, 0); } - [Test] - public void TestStop() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestStop(AudioTestComponents.Type id) { + setupBackend(id); + track.StartAsync(); - bass.Update(); + audio.Update(); track.StopAsync(); - bass.Update(); + audio.Update(); Assert.IsFalse(track.IsRunning); @@ -66,20 +108,23 @@ public void TestStop() Assert.AreEqual(expectedTime, track.CurrentTime); } - [Test] - public void TestStopWhenDisposed() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestStopWhenDisposed(AudioTestComponents.Type id) { + setupBackend(id); + track.StartAsync(); - bass.Update(); + audio.Update(); Thread.Sleep(50); - bass.Update(); + audio.Update(); Assert.IsTrue(track.IsAlive); Assert.IsTrue(track.IsRunning); track.Dispose(); - bass.Update(); + audio.Update(); Assert.IsFalse(track.IsAlive); Assert.IsFalse(track.IsRunning); @@ -90,42 +135,51 @@ public void TestStopWhenDisposed() Assert.AreEqual(expectedTime, track.CurrentTime); } - [Test] - public void TestStopAtEnd() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestStopAtEnd(AudioTestComponents.Type id) { + setupBackend(id); + startPlaybackAt(track.Length - 1); Thread.Sleep(50); - bass.Update(); + audio.Update(); track.StopAsync(); - bass.Update(); + audio.Update(); Assert.IsFalse(track.IsRunning); Assert.AreEqual(track.Length, track.CurrentTime); } - [Test] - public void TestSeek() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestSeek(AudioTestComponents.Type id) { + setupBackend(id); + track.SeekAsync(1000); - bass.Update(); + audio.Update(); Assert.IsFalse(track.IsRunning); Assert.AreEqual(1000, track.CurrentTime); } - [Test] - public void TestSeekWhileRunning() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestSeekWhileRunning(AudioTestComponents.Type id) { + setupBackend(id); + track.StartAsync(); - bass.Update(); + audio.Update(); track.SeekAsync(1000); - bass.Update(); + audio.Update(); Thread.Sleep(50); - bass.Update(); + audio.Update(); Assert.IsTrue(track.IsRunning); Assert.GreaterOrEqual(track.CurrentTime, 1000); @@ -134,41 +188,49 @@ public void TestSeekWhileRunning() /// /// Bass does not allow seeking to the end of the track. It should fail and the current time should not change. /// - [Test] - public void TestSeekToEndFails() + [TestCase(AudioTestComponents.Type.BASS)] + public void TestSeekToEndFails(AudioTestComponents.Type id) { + setupBackend(id); + bool? success = null; - bass.RunOnAudioThread(() => { success = track.Seek(track.Length); }); - bass.Update(); + audio.RunOnAudioThread(() => { success = track.Seek(track.Length); }); + audio.Update(); Assert.AreEqual(0, track.CurrentTime); Assert.IsFalse(success); } - [Test] - public void TestSeekBackToSamePosition() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestSeekBackToSamePosition(AudioTestComponents.Type id) { + setupBackend(id); + track.SeekAsync(1000); track.SeekAsync(0); - bass.Update(); + audio.Update(); Thread.Sleep(50); - bass.Update(); + audio.Update(); Assert.GreaterOrEqual(track.CurrentTime, 0); Assert.Less(track.CurrentTime, 1000); } - [Test] - public void TestPlaybackToEnd() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestPlaybackToEnd(AudioTestComponents.Type id) { + setupBackend(id); + startPlaybackAt(track.Length - 1); Thread.Sleep(50); - bass.Update(); + audio.Update(); Assert.IsFalse(track.IsRunning); Assert.AreEqual(track.Length, track.CurrentTime); @@ -178,51 +240,63 @@ public void TestPlaybackToEnd() /// Bass restarts the track from the beginning if Start is called when the track has been completed. /// This is blocked locally in , so this test expects the track to not restart. /// - [Test] - public void TestStartFromEndDoesNotRestart() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestStartFromEndDoesNotRestart(AudioTestComponents.Type id) { + setupBackend(id); + startPlaybackAt(track.Length - 1); Thread.Sleep(50); - bass.Update(); + audio.Update(); track.StartAsync(); - bass.Update(); + audio.Update(); Assert.AreEqual(track.Length, track.CurrentTime); } - [Test] - public void TestRestart() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestRestart(AudioTestComponents.Type id) { + setupBackend(id); + startPlaybackAt(1000); Thread.Sleep(50); - bass.Update(); + audio.Update(); restartTrack(); Assert.IsTrue(track.IsRunning); Assert.Less(track.CurrentTime, 1000); } - [Test] - public void TestRestartAtEnd() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestRestartAtEnd(AudioTestComponents.Type id) { + setupBackend(id); + startPlaybackAt(track.Length - 1); Thread.Sleep(50); - bass.Update(); + audio.Update(); restartTrack(); Assert.IsTrue(track.IsRunning); Assert.LessOrEqual(track.CurrentTime, 1000); } - [Test] - public void TestRestartFromRestartPoint() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestRestartFromRestartPoint(AudioTestComponents.Type id) { + setupBackend(id); + track.RestartPoint = 1000; startPlaybackAt(3000); @@ -233,10 +307,14 @@ public void TestRestartFromRestartPoint() Assert.Less(track.CurrentTime, 3000); } - [TestCase(0)] - [TestCase(1000)] - public void TestLoopingRestart(double restartPoint) + [TestCase(AudioTestComponents.Type.BASS, 0)] + [TestCase(AudioTestComponents.Type.SDL3, 0)] + [TestCase(AudioTestComponents.Type.BASS, 1000)] + [TestCase(AudioTestComponents.Type.SDL3, 1000)] + public void TestLoopingRestart(AudioTestComponents.Type id, double restartPoint) { + setupBackend(id); + track.Looping = true; track.RestartPoint = restartPoint; @@ -246,12 +324,12 @@ public void TestLoopingRestart(double restartPoint) // In a perfect world the track will be running after the update above, but during testing it's possible that the track is in // a stalled state due to updates running on Bass' own thread, so we'll loop until the track starts running again - // Todo: This should be fixed in the future if/when we invoke Bass.Update() ourselves + // Todo: This should be fixed in the future if/when we invoke audio.Update() ourselves int loopCount = 0; while (++loopCount < 50 && !track.IsRunning) { - bass.Update(); + audio.Update(); Thread.Sleep(10); } @@ -262,9 +340,12 @@ public void TestLoopingRestart(double restartPoint) Assert.LessOrEqual(track.CurrentTime, restartPoint + 1000); } - [Test] - public void TestSetTempoNegative() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestSetTempoNegative(AudioTestComponents.Type id) { + setupBackend(id); + Assert.Throws(() => track.Tempo.Value = -1); Assert.Throws(() => track.Tempo.Value = 0.04f); @@ -276,16 +357,22 @@ public void TestSetTempoNegative() Assert.AreEqual(0.05f, track.Tempo.Value); } - [Test] - public void TestRateWithAggregateAdjustments() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestRateWithAggregateAdjustments(AudioTestComponents.Type id) { + setupBackend(id); + track.AddAdjustment(AdjustableProperty.Frequency, new BindableDouble(1.5f)); Assert.AreEqual(1.5, track.Rate); } - [Test] - public void TestLoopingTrackDoesntSetCompleted() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestLoopingTrackDoesntSetCompleted(AudioTestComponents.Type id) { + setupBackend(id); + bool completedEvent = false; track.Completed += () => completedEvent = true; @@ -296,14 +383,17 @@ public void TestLoopingTrackDoesntSetCompleted() Assert.IsFalse(track.HasCompleted); Assert.IsFalse(completedEvent); - bass.Update(); + audio.Update(); Assert.IsTrue(track.IsRunning); } - [Test] - public void TestHasCompletedResetsOnSeekBack() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestHasCompletedResetsOnSeekBack(AudioTestComponents.Type id) { + setupBackend(id); + // start playback and wait for completion. startPlaybackAt(track.Length - 1); takeEffectsAndUpdateAfter(50); @@ -312,20 +402,23 @@ public void TestHasCompletedResetsOnSeekBack() // ensure seeking to end doesn't reset completed state. track.SeekAsync(track.Length); - bass.Update(); + audio.Update(); Assert.IsTrue(track.HasCompleted); // seeking back reset completed state. track.SeekAsync(track.Length - 1); - bass.Update(); + audio.Update(); Assert.IsFalse(track.HasCompleted); } - [Test] - public void TestZeroFrequencyHandling() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestZeroFrequencyHandling(AudioTestComponents.Type id) { + setupBackend(id); + // start track. track.StartAsync(); takeEffectsAndUpdateAfter(50); @@ -336,13 +429,13 @@ public void TestZeroFrequencyHandling() // now set to zero frequency and update track to take effects. track.Frequency.Value = 0; - bass.Update(); + audio.Update(); double currentTime = track.CurrentTime; // assert time is frozen after 50ms sleep and didn't change with full precision, but "IsRunning" is still true. Thread.Sleep(50); - bass.Update(); + audio.Update(); Assert.IsTrue(track.IsRunning); Assert.AreEqual(currentTime, track.CurrentTime); @@ -360,9 +453,12 @@ public void TestZeroFrequencyHandling() /// /// Ensure setting a paused (or not yet played) track's frequency from zero to one doesn't resume / play it. /// - [Test] - public void TestZeroFrequencyDoesntResumeTrack() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestZeroFrequencyDoesntResumeTrack(AudioTestComponents.Type id) { + setupBackend(id); + // start at zero frequency and wait a bit. track.Frequency.Value = 0; track.StartAsync(); @@ -374,7 +470,7 @@ public void TestZeroFrequencyDoesntResumeTrack() // stop track and update. track.StopAsync(); - bass.Update(); + audio.Update(); Assert.IsFalse(track.IsRunning); @@ -387,71 +483,83 @@ public void TestZeroFrequencyDoesntResumeTrack() Assert.AreEqual(0, track.CurrentTime); } - [Test] - public void TestBitrate() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestBitrate(AudioTestComponents.Type id) { + setupBackend(id); + Assert.Greater(track.Bitrate, 0); } /// /// Tests the case where a start call can be run inline due to already being on the audio thread. - /// Because it's immediately executed, a `Bass.Update()` call is not required before the channel's state is updated. + /// Because it's immediately executed, a `audio.Update()` call is not required before the channel's state is updated. /// - [Test] - public void TestIsRunningUpdatedAfterInlineStart() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestIsRunningUpdatedAfterInlineStart(AudioTestComponents.Type id) { - bass.RunOnAudioThread(() => track.Start()); + setupBackend(id); + + audio.RunOnAudioThread(() => track.Start()); Assert.That(track.IsRunning, Is.True); } /// /// Tests the case where a stop call can be run inline due to already being on the audio thread. - /// Because it's immediately executed, a `Bass.Update()` call is not required before the channel's state is updated. + /// Because it's immediately executed, a `audio.Update()` call is not required before the channel's state is updated. /// - [Test] - public void TestIsRunningUpdatedAfterInlineStop() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestIsRunningUpdatedAfterInlineStop(AudioTestComponents.Type id) { + setupBackend(id); + track.StartAsync(); - bass.Update(); + audio.Update(); - bass.RunOnAudioThread(() => track.Stop()); + audio.RunOnAudioThread(() => track.Stop()); Assert.That(track.IsRunning, Is.False); } /// /// Tests the case where a seek call can be run inline due to already being on the audio thread. - /// Because it's immediately executed, a `Bass.Update()` call is not required before the channel's state is updated. + /// Because it's immediately executed, a `audio.Update()` call is not required before the channel's state is updated. /// - [Test] - public void TestCurrentTimeUpdatedAfterInlineSeek() + [TestCase(AudioTestComponents.Type.BASS)] + [TestCase(AudioTestComponents.Type.SDL3)] + public void TestCurrentTimeUpdatedAfterInlineSeek(AudioTestComponents.Type id) { + setupBackend(id); + track.StartAsync(); - bass.Update(); + audio.Update(); - bass.RunOnAudioThread(() => track.Seek(20000)); + audio.RunOnAudioThread(() => track.Seek(20000)); Assert.That(track.CurrentTime, Is.EqualTo(20000).Within(100)); } private void takeEffectsAndUpdateAfter(int after) { - bass.Update(); + audio.Update(); Thread.Sleep(after); - bass.Update(); + audio.Update(); } private void startPlaybackAt(double time) { track.SeekAsync(time); track.StartAsync(); - bass.Update(); + audio.Update(); } private void restartTrack() { - bass.RunOnAudioThread(() => + audio.RunOnAudioThread(() => { track.Restart(); - bass.Update(); + audio.Update(); }); } } diff --git a/osu.Framework/Audio/SDL3AudioManager.cs b/osu.Framework/Audio/SDL3AudioManager.cs index 36ba27f756..d23043cb93 100644 --- a/osu.Framework/Audio/SDL3AudioManager.cs +++ b/osu.Framework/Audio/SDL3AudioManager.cs @@ -19,35 +19,31 @@ using osu.Framework.Logging; using osu.Framework.Threading; using SDL; +using static SDL.SDL3; namespace osu.Framework.Audio { - public unsafe class SDL3AudioManager : AudioManager + public class SDL3AudioManager : AudioManager { public static readonly int AUDIO_FREQ = 44100; public static readonly int AUDIO_CHANNELS = 2; - public static readonly SDL_AudioFormat AUDIO_FORMAT = SDL3.SDL_AUDIO_F32; - - private volatile SDL_AudioDeviceID deviceId; - private volatile SDL_AudioStream* deviceStream; - - private SDL_AudioSpec spec; - private int bufferSize = (int)(AUDIO_FREQ * 0.01); // 10ms, will be calculated later when opening audio device, it works as a base value until then. + public static readonly SDL_AudioFormat AUDIO_FORMAT = SDL_AUDIO_F32; + // it is currently in global static... need to do something internal static SDL3AudioDecoderManager DecoderManager { get; } = new SDL3AudioDecoderManager(); private readonly List sdlMixerList = new List(); private ImmutableArray deviceIdArray = ImmutableArray.Empty; - protected ObjectHandle ObjectHandle { get; private set; } - private Scheduler eventScheduler => EventScheduler ?? CurrentAudioThread.Scheduler; protected override void InvokeOnNewDevice(string deviceName) => eventScheduler.Add(() => base.InvokeOnNewDevice(deviceName)); protected override void InvokeOnLostDevice(string deviceName) => eventScheduler.Add(() => base.InvokeOnLostDevice(deviceName)); + private readonly SDL3BaseAudioManager baseManager; + /// /// Creates a new . /// @@ -57,31 +53,14 @@ public unsafe class SDL3AudioManager : AudioManager public SDL3AudioManager(AudioThread audioThread, ResourceStore trackStore, ResourceStore sampleStore) : base(audioThread, trackStore, sampleStore) { - ObjectHandle = new ObjectHandle(this, GCHandleType.Normal); - - // Must not edit this, as components (especially mixer) expects this to match. - spec = new SDL_AudioSpec - { - freq = AUDIO_FREQ, - channels = AUDIO_CHANNELS, - format = AUDIO_FORMAT - }; - - AudioScheduler.Add(() => - { - syncAudioDevices(); + baseManager = new SDL3BaseAudioManager(() => sdlMixerList); - // comment below lines if you want to use FFmpeg to decode audio, AudioDecoder will use FFmpeg if no BASS device is available - ManagedBass.Bass.Configure((ManagedBass.Configuration)68, 1); - audioThread.InitDevice(ManagedBass.Bass.NoSoundDevice); - }); + AudioScheduler.Add(syncAudioDevices); } - private string currentDeviceName = "Not loaded"; - public override string ToString() { - return $@"{GetType().ReadableName()} ({currentDeviceName})"; + return $@"{GetType().ReadableName()} ({baseManager.DeviceName})"; } protected override AudioMixer AudioCreateAudioMixer(AudioMixer fallbackMixer, string identifier) @@ -96,20 +75,7 @@ protected override void ItemAdded(AudioComponent item) base.ItemAdded(item); if (item is SDL3AudioMixer mixer) - { - try - { - if (deviceId != 0) - SDL3.SDL_LockAudioStream(deviceStream); - - sdlMixerList.Add(mixer); - } - finally - { - if (deviceId != 0) - SDL3.SDL_UnlockAudioStream(deviceStream); - } - } + baseManager.RunWhileLockingAudioStream(() => sdlMixerList.Add(mixer)); } protected override void ItemRemoved(AudioComponent item) @@ -117,56 +83,7 @@ protected override void ItemRemoved(AudioComponent item) base.ItemRemoved(item); if (item is SDL3AudioMixer mixer) - { - try - { - if (deviceId != 0) - SDL3.SDL_LockAudioStream(deviceStream); - - sdlMixerList.Remove(mixer); - } - finally - { - if (deviceId != 0) - SDL3.SDL_UnlockAudioStream(deviceStream); - } - } - } - - [UnmanagedCallersOnly(CallConvs = new[] { typeof(CallConvCdecl) })] - private static void audioCallback(IntPtr userdata, SDL_AudioStream* stream, int additionalAmount, int totalAmount) - { - var handle = new ObjectHandle(userdata); - if (handle.GetTarget(out SDL3AudioManager audioManager)) - audioManager.internalAudioCallback(stream, additionalAmount); - } - - private float[] audioBuffer; - - private void internalAudioCallback(SDL_AudioStream* stream, int additionalAmount) - { - additionalAmount /= 4; - - if (audioBuffer == null || audioBuffer.Length < additionalAmount) - audioBuffer = new float[additionalAmount]; - - try - { - int filled = 0; - - foreach (var mixer in sdlMixerList) - { - if (mixer.IsAlive) - mixer.MixChannelsInto(audioBuffer, additionalAmount, ref filled); - } - - fixed (float* ptr = audioBuffer) - SDL3.SDL_PutAudioStreamData(stream, (IntPtr)ptr, filled * 4); - } - catch (Exception e) - { - Logger.Error(e, "Error while pushing audio to SDL"); - } + baseManager.RunWhileLockingAudioStream(() => sdlMixerList.Remove(mixer)); } internal void OnNewDeviceEvent(SDL_AudioDeviceID addedDeviceIndex) @@ -174,7 +91,7 @@ internal void OnNewDeviceEvent(SDL_AudioDeviceID addedDeviceIndex) AudioScheduler.Add(() => { // the index is only vaild until next SDL_GetNumAudioDevices call, so get the name first. - string name = SDL3.SDL_GetAudioDeviceName(addedDeviceIndex); + string name = SDL_GetAudioDeviceName(addedDeviceIndex); syncAudioDevices(); InvokeOnNewDevice(name); @@ -190,7 +107,7 @@ internal void OnLostDeviceEvent(SDL_AudioDeviceID removedDeviceId) if (!IsCurrentDeviceValid()) // current device lost { - InvokeOnLostDevice(currentDeviceName); + InvokeOnLostDevice(baseManager.DeviceName); SetAudioDevice(); } else @@ -201,10 +118,10 @@ internal void OnLostDeviceEvent(SDL_AudioDeviceID removedDeviceId) }); } - private void syncAudioDevices() + private unsafe void syncAudioDevices() { int count = 0; - SDL_AudioDeviceID* idArrayPtr = SDL3.SDL_GetAudioPlaybackDevices(&count); + SDL_AudioDeviceID* idArrayPtr = SDL_GetAudioPlaybackDevices(&count); var idArray = ImmutableArray.CreateBuilder(count); var nameArray = ImmutableArray.CreateBuilder(count); @@ -212,7 +129,7 @@ private void syncAudioDevices() for (int i = 0; i < count; i++) { SDL_AudioDeviceID id = *(idArrayPtr + i); - string name = SDL3.SDL_GetAudioDeviceName(id); + string name = SDL_GetAudioDeviceName(id); if (string.IsNullOrEmpty(name)) continue; @@ -227,46 +144,18 @@ private void syncAudioDevices() private bool setAudioDevice(SDL_AudioDeviceID targetId) { - if (deviceStream != null) + if (baseManager.SetAudioDevice(targetId)) { - SDL3.SDL_DestroyAudioStream(deviceStream); - deviceStream = null; - } - - fixed (SDL_AudioSpec* ptr = &spec) - { - deviceStream = SDL3.SDL_OpenAudioDeviceStream(targetId, ptr, &audioCallback, ObjectHandle.Handle); - - if (deviceStream != null) - { - deviceId = SDL3.SDL_GetAudioStreamDevice(deviceStream); + Logger.Log($@"🔈 SDL Audio initialised + Driver: {SDL_GetCurrentAudioDriver()} + Device Name: {baseManager.DeviceName} + Format: {baseManager.AudioSpec.freq}hz {baseManager.AudioSpec.channels}ch + Sample size: {baseManager.BufferSize}"); - int sampleFrameSize = 0; - SDL_AudioSpec temp; // this has 'real' device info which is useless since SDL converts audio according to the spec we provided - if (SDL3.SDL_GetAudioDeviceFormat(deviceId, &temp, &sampleFrameSize) == 0) - bufferSize = sampleFrameSize * (int)Math.Ceiling((double)spec.freq / temp.freq); - } + return true; } - if (deviceStream == null) - { - if (targetId == SDL3.SDL_AUDIO_DEVICE_DEFAULT_PLAYBACK) - return false; - - return setAudioDevice(SDL3.SDL_AUDIO_DEVICE_DEFAULT_PLAYBACK); - } - - SDL3.SDL_ResumeAudioDevice(deviceId); - - currentDeviceName = SDL3.SDL_GetAudioDeviceName(targetId); - - Logger.Log($@"🔈 SDL Audio initialised - Driver: {SDL3.SDL_GetCurrentAudioDriver()} - Device Name: {currentDeviceName} - Format: {spec.freq}hz {spec.channels}ch - Sample size: {bufferSize}"); - - return true; + return false; } protected override bool SetAudioDevice(string deviceName = null) @@ -277,7 +166,7 @@ protected override bool SetAudioDevice(string deviceName = null) if (deviceIndex >= 0) return setAudioDevice(deviceIdArray[deviceIndex]); - return setAudioDevice(SDL3.SDL_AUDIO_DEVICE_DEFAULT_PLAYBACK); + return setAudioDevice(SDL_AUDIO_DEVICE_DEFAULT_PLAYBACK); } protected override bool SetAudioDevice(int deviceIndex) @@ -288,33 +177,158 @@ protected override bool SetAudioDevice(int deviceIndex) return SetAudioDevice(); } - protected override bool IsCurrentDeviceValid() => deviceId > 0 && SDL3.SDL_AudioDevicePaused(deviceId) == SDL3.SDL_FALSE; + protected override bool IsCurrentDeviceValid() => baseManager.DeviceId > 0 && SDL_AudioDevicePaused(baseManager.DeviceId) == SDL_FALSE; - internal override Track.Track GetNewTrack(Stream data, string name) - { - TrackSDL3 track = new TrackSDL3(name, data, spec, bufferSize); - return track; - } + internal override Track.Track GetNewTrack(Stream data, string name) => new TrackSDL3(name, data, baseManager.AudioSpec, baseManager.BufferSize); internal override SampleFactory GetSampleFactory(Stream data, string name, AudioMixer mixer, int playbackConcurrency) - => new SampleSDL3Factory(data, name, (SDL3AudioMixer)mixer, playbackConcurrency, spec); + => new SampleSDL3Factory(data, name, (SDL3AudioMixer)mixer, playbackConcurrency, baseManager.AudioSpec); protected override void Dispose(bool disposing) { base.Dispose(disposing); - DecoderManager?.Dispose(); + baseManager.Dispose(); + } + } + + + /// + /// To share basic playback logic with audio tests. + /// + internal unsafe class SDL3BaseAudioManager : IDisposable + { + internal SDL_AudioSpec AudioSpec { get; private set; } + + internal SDL_AudioDeviceID DeviceId { get; private set; } + internal SDL_AudioStream* DeviceStream { get; private set; } + + internal int BufferSize { get; private set; } = (int)(SDL3AudioManager.AUDIO_FREQ * 0.01); + + internal string DeviceName { get; private set; } = "Not loaded"; + + private readonly Func> mixerIterator; + + private ObjectHandle objectHandle; + + internal SDL3BaseAudioManager(Func> mixerIterator) + { + this.mixerIterator = mixerIterator; + objectHandle = new ObjectHandle(this, GCHandleType.Normal); + AudioSpec = new SDL_AudioSpec + { + freq = SDL3AudioManager.AUDIO_FREQ, + channels = SDL3AudioManager.AUDIO_CHANNELS, + format = SDL3AudioManager.AUDIO_FORMAT + }; + } + + internal void RunWhileLockingAudioStream(Action action) + { + SDL_AudioStream* stream = DeviceStream; + + if (stream != null) + SDL_LockAudioStream(stream); + try + { + action(); + } + finally + { + if (stream != null) + SDL_UnlockAudioStream(stream); + } + } + + internal bool SetAudioDevice(SDL_AudioDeviceID targetId) + { + if (DeviceStream != null) + { + SDL_DestroyAudioStream(DeviceStream); + DeviceStream = null; + } + + SDL_AudioSpec spec = AudioSpec; + + SDL_AudioStream* deviceStream = SDL_OpenAudioDeviceStream(targetId, &spec, &audioCallback, objectHandle.Handle); if (deviceStream != null) { - SDL3.SDL_DestroyAudioStream(deviceStream); - deviceStream = null; - deviceId = 0; + SDL_DestroyAudioStream(DeviceStream); + DeviceStream = deviceStream; + AudioSpec = spec; + + DeviceId = SDL_GetAudioStreamDevice(deviceStream); + + int sampleFrameSize = 0; + SDL_AudioSpec temp; // this has 'real' device info which is useless since SDL converts audio according to the spec we provided + if (SDL_GetAudioDeviceFormat(DeviceId, &temp, &sampleFrameSize) == 0) + BufferSize = sampleFrameSize * (int)Math.Ceiling((double)spec.freq / temp.freq); + } + + if (deviceStream == null) + { + if (targetId == SDL_AUDIO_DEVICE_DEFAULT_PLAYBACK) + return false; + + return SetAudioDevice(SDL_AUDIO_DEVICE_DEFAULT_PLAYBACK); + } + + SDL_ResumeAudioDevice(DeviceId); + + DeviceName = SDL_GetAudioDeviceName(targetId); + + return true; + } + + [UnmanagedCallersOnly(CallConvs = new[] { typeof(CallConvCdecl) })] + private static void audioCallback(IntPtr userdata, SDL_AudioStream* stream, int additionalAmount, int totalAmount) + { + var handle = new ObjectHandle(userdata); + if (handle.GetTarget(out SDL3BaseAudioManager audioManager)) + audioManager.internalAudioCallback(stream, additionalAmount); + } + + private float[] audioBuffer; + + private void internalAudioCallback(SDL_AudioStream* stream, int additionalAmount) + { + additionalAmount /= 4; + + if (audioBuffer == null || audioBuffer.Length < additionalAmount) + audioBuffer = new float[additionalAmount]; + + try + { + int filled = 0; + + foreach (var mixer in mixerIterator()) + { + if (mixer.IsAlive) + mixer.MixChannelsInto(audioBuffer, additionalAmount, ref filled); + } + + fixed (float* ptr = audioBuffer) + SDL_PutAudioStreamData(stream, (IntPtr)ptr, filled * 4); + } + catch (Exception e) + { + Logger.Error(e, "Error while pushing audio to SDL"); + } + } + + public void Dispose() + { + if (DeviceStream != null) + { + SDL_DestroyAudioStream(DeviceStream); + DeviceStream = null; + DeviceId = 0; // Destroying audio stream will close audio device because we use SDL3 OpenAudioDeviceStream // won't use multiple AudioStream for now since it's barely useful } - ObjectHandle.Dispose(); + objectHandle.Dispose(); } } } diff --git a/osu.Framework/Audio/Sample/SampleChannelSDL3.cs b/osu.Framework/Audio/Sample/SampleChannelSDL3.cs index 12e2044677..b656a12990 100644 --- a/osu.Framework/Audio/Sample/SampleChannelSDL3.cs +++ b/osu.Framework/Audio/Sample/SampleChannelSDL3.cs @@ -27,7 +27,6 @@ public override void Play() if (started) return; - started = false; playing = true; base.Play(); } diff --git a/osu.Framework/Audio/Sample/SampleSDL3Factory.cs b/osu.Framework/Audio/Sample/SampleSDL3Factory.cs index d8060e7b79..1d879d6bea 100644 --- a/osu.Framework/Audio/Sample/SampleSDL3Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL3Factory.cs @@ -55,7 +55,7 @@ internal void ReceiveAudioData(byte[] audio, int byteLen, SDL3AudioDecoder data, public SampleSDL3AudioPlayer CreatePlayer() { if (!isLoaded) - completion.WaitOne(10); + completion.WaitOne(); // may cause deadlock in bad situation, but needed to get tests passed return new SampleSDL3AudioPlayer(decodedAudio, spec.freq, spec.channels); } diff --git a/osu.Framework/Audio/Track/TrackSDL3.cs b/osu.Framework/Audio/Track/TrackSDL3.cs index 030c4efce1..92a3a6f067 100644 --- a/osu.Framework/Audio/Track/TrackSDL3.cs +++ b/osu.Framework/Audio/Track/TrackSDL3.cs @@ -21,6 +21,13 @@ public sealed class TrackSDL3 : Track, ISDL3AudioChannel private volatile bool isLoaded; public override bool IsLoaded => isLoaded; + private volatile bool isCompletelyLoaded; + + /// + /// Audio can be played without interrupt once it's set to true. means that it at least has 'some' data to play. + /// + public bool IsCompletelyLoaded => isCompletelyLoaded; + private double currentTime; public override double CurrentTime => currentTime; @@ -142,7 +149,11 @@ protected override void UpdateState() } if (player.IsLoaded) + { + Length = player.AudioLength; + isCompletelyLoaded = true; decodeData = null; + } } if (player.Done && isRunning) @@ -239,8 +250,8 @@ int ISDL3AudioChannel.GetRemainingSamples(float[] data) lock (syncRoot) { - time = player.GetCurrentTime(); ret = player.GetRemainingSamples(data); + time = player.GetCurrentTime(); } Interlocked.Exchange(ref currentTime, time); @@ -292,6 +303,7 @@ protected override void Dispose(bool disposing) (Mixer as SDL3AudioMixer)?.StreamFree(this); decodeData?.Stop(); + decodeData = null; lock (syncRoot) player.Dispose(); diff --git a/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs index b4b4a55ff2..e39275c39a 100644 --- a/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs @@ -43,6 +43,8 @@ internal class TrackSDL3AudioPlayer : ResamplingPlayer, IDisposable private long audioDataLength; + public double AudioLength => GetMsFromIndex(audioDataLength); + /// /// Play backwards if set to true. /// @@ -227,6 +229,9 @@ public virtual void Reset(bool resetPosition = true) /// public double GetCurrentTime() { + if (SaveSeek > 0) + return GetMsFromIndex(SaveSeek); + if (AudioData == null) return 0; From 4f8fb15123567a93536b92cdf3c8d92414cf05e3 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Fri, 9 Aug 2024 02:22:33 +0900 Subject: [PATCH 098/127] Use interface to receive audio data in SDL3 Audio --- .../Audio/SDL3AudioDecoderManager.cs | 62 ++++++++++--------- .../Audio/Sample/SampleSDL3Factory.cs | 14 +---- osu.Framework/Audio/Track/TrackSDL3.cs | 8 +-- 3 files changed, 39 insertions(+), 45 deletions(-) diff --git a/osu.Framework/Audio/SDL3AudioDecoderManager.cs b/osu.Framework/Audio/SDL3AudioDecoderManager.cs index 2a5103b14b..65563781a4 100644 --- a/osu.Framework/Audio/SDL3AudioDecoderManager.cs +++ b/osu.Framework/Audio/SDL3AudioDecoderManager.cs @@ -11,20 +11,13 @@ using ManagedBass; using osu.Framework.Audio.Callbacks; using osu.Framework.Graphics.Video; -using static osu.Framework.Audio.SDL3AudioDecoderManager; namespace osu.Framework.Audio { - /// - /// Decodes audio from , and convert it to appropriate format. - /// It needs a lot of polishing... - /// - public class SDL3AudioDecoderManager : IDisposable + public interface ISDL3AudioDataReceiver { - private readonly LinkedList jobs = new LinkedList(); - /// - /// Delegate to get decoded audio data from the decoder. + /// Interface to get decoded audio data from the decoder. /// /// Decoded audio. The format depends on you specified, /// so you may need to actual data format. @@ -32,7 +25,16 @@ public class SDL3AudioDecoderManager : IDisposable /// Length in byte of decoded audio. Use this instead of data.Length /// Associated . /// Whether if this is the last data or not. - public delegate void PassDataDelegate(byte[] data, int length, SDL3AudioDecoder decoderData, bool done); + void GetData(byte[] data, int length, SDL3AudioDecoder decoderData, bool done); + } + + /// + /// Decodes audio from , and convert it to appropriate format. + /// It needs a lot of polishing... + /// + public class SDL3AudioDecoderManager : IDisposable + { + private readonly LinkedList jobs = new LinkedList(); private readonly Thread decoderThread; private readonly AutoResetEvent decoderWaitHandle; @@ -47,7 +49,7 @@ public class SDL3AudioDecoderManager : IDisposable /// Refer to /// Refer to /// A new instance. - internal static SDL3AudioDecoder CreateDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream = true, PassDataDelegate? pass = null) + internal static SDL3AudioDecoder CreateDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream = true, ISDL3AudioDataReceiver? pass = null) { SDL3AudioDecoder decoder = Bass.CurrentDevice >= 0 ? new SDL3AudioDecoder.BassAudioDecoder(stream, audioSpec, isTrack, autoDisposeStream, pass) @@ -87,7 +89,7 @@ public SDL3AudioDecoderManager() /// Refer to /// Refer to /// A new instance. - public SDL3AudioDecoder StartDecodingAsync(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, PassDataDelegate pass) + public SDL3AudioDecoder StartDecodingAsync(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, ISDL3AudioDataReceiver pass) { if (disposedValue) throw new InvalidOperationException($"Cannot start decoding on disposed {nameof(SDL3AudioDecoderManager)}"); @@ -123,17 +125,20 @@ private void loop(CancellationToken token) if (decoder.StopJob) { - decoder.Free(); + decoder.Dispose(); jobs.Remove(node); } else { int read = decodeAudio(decoder, out byte[] decoded); - decoder.Pass?.Invoke(decoded, read, decoder, !decoder.Loading); + decoder.Pass?.GetData(decoded, read, decoder, !decoder.Loading); } if (!decoder.Loading) + { + decoder.RemoveReferenceToReceiver(); // cannot do in Decoder.Dispose since Pass needs to be used later. jobs.Remove(node); + } node = next; } @@ -162,7 +167,7 @@ protected virtual void Dispose(bool disposing) { foreach (var job in jobs) { - job.Free(); + job.Dispose(); } jobs.Clear(); @@ -244,9 +249,9 @@ public abstract class SDL3AudioDecoder internal readonly bool AutoDisposeStream; /// - /// Decoder will call this once or more to pass the decoded audio data. + /// Decoder will call or more to pass the decoded audio data. /// - internal PassDataDelegate? Pass { get; private set; } + internal ISDL3AudioDataReceiver? Pass { get; private set; } private int bitrate; @@ -292,7 +297,7 @@ public long ByteLength /// public bool Loading { get => loading; protected set => loading = value; } - protected SDL3AudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream, PassDataDelegate? pass) + protected SDL3AudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream, ISDL3AudioDataReceiver? pass) { Stream = stream; AudioSpec = audioSpec; @@ -310,15 +315,14 @@ public void Stop() } // Not using IDisposable since things must be handled in a decoder thread - internal virtual void Free() + internal virtual void Dispose() { - // Pass = null; - // Remove reference to the receiver - if (AutoDisposeStream) Stream.Dispose(); } + internal void RemoveReferenceToReceiver() => Pass = null; + protected abstract int LoadFromStreamInternal(out byte[] decoded); /// @@ -344,7 +348,7 @@ public int LoadFromStream(out byte[] decoded) finally { if (!Loading) - Free(); + Dispose(); } return read; @@ -380,12 +384,12 @@ private Resolution resolution private ushort bits => (ushort)SDL3.SDL_AUDIO_BITSIZE(AudioSpec.format); - public BassAudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream, PassDataDelegate? pass) + public BassAudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream, ISDL3AudioDataReceiver? pass) : base(stream, audioSpec, isTrack, autoDisposeStream, pass) { } - internal override void Free() + internal override void Dispose() { if (syncHandle != 0) { @@ -413,7 +417,7 @@ internal override void Free() decodeStream = 0; } - base.Free(); + base.Dispose(); } protected override int LoadFromStreamInternal(out byte[] decoded) @@ -499,17 +503,17 @@ internal class FFmpegAudioDecoder : SDL3AudioDecoder private VideoDecoder? ffmpeg; private byte[]? decodeData; - public FFmpegAudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream, PassDataDelegate? pass) + public FFmpegAudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream, ISDL3AudioDataReceiver? pass) : base(stream, audioSpec, isTrack, autoDisposeStream, pass) { } - internal override void Free() + internal override void Dispose() { decodeData = null; ffmpeg?.Dispose(); - base.Free(); + base.Dispose(); } protected override int LoadFromStreamInternal(out byte[] decoded) diff --git a/osu.Framework/Audio/Sample/SampleSDL3Factory.cs b/osu.Framework/Audio/Sample/SampleSDL3Factory.cs index 1d879d6bea..d11a31d423 100644 --- a/osu.Framework/Audio/Sample/SampleSDL3Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL3Factory.cs @@ -10,7 +10,7 @@ namespace osu.Framework.Audio.Sample { - internal class SampleSDL3Factory : SampleFactory + internal class SampleSDL3Factory : SampleFactory, ISDL3AudioDataReceiver { private volatile bool isLoaded; public override bool IsLoaded => isLoaded; @@ -22,24 +22,18 @@ internal class SampleSDL3Factory : SampleFactory private readonly AutoResetEvent completion = new AutoResetEvent(false); - private SDL3AudioDecoder? decoder; - - public SampleSDL3Factory(Stream stream, string name, SDL3AudioMixer mixer, int playbackConcurrency, SDL_AudioSpec spec) + public SampleSDL3Factory(string name, SDL3AudioMixer mixer, int playbackConcurrency, SDL_AudioSpec spec) : base(name, playbackConcurrency) { this.mixer = mixer; this.spec = spec; - - decoder = SDL3AudioManager.DecoderManager.StartDecodingAsync(stream, spec, false, ReceiveAudioData); } - internal void ReceiveAudioData(byte[] audio, int byteLen, SDL3AudioDecoder data, bool done) + void ISDL3AudioDataReceiver.GetData(byte[] audio, int byteLen, SDL3AudioDecoder data, bool done) { if (IsDisposed) return; - decoder = null; - if (byteLen > 0) { decodedAudio = new float[byteLen / 4]; @@ -76,8 +70,6 @@ protected override void Dispose(bool disposing) if (IsDisposed) return; - decoder?.Stop(); - decodedAudio = Array.Empty(); completion.Dispose(); diff --git a/osu.Framework/Audio/Track/TrackSDL3.cs b/osu.Framework/Audio/Track/TrackSDL3.cs index 92a3a6f067..ed40c93b8a 100644 --- a/osu.Framework/Audio/Track/TrackSDL3.cs +++ b/osu.Framework/Audio/Track/TrackSDL3.cs @@ -12,7 +12,7 @@ namespace osu.Framework.Audio.Track { - public sealed class TrackSDL3 : Track, ISDL3AudioChannel + public sealed class TrackSDL3 : Track, ISDL3AudioChannel, ISDL3AudioDataReceiver { private readonly TempoSDL3AudioPlayer player; @@ -40,11 +40,9 @@ public sealed class TrackSDL3 : Track, ISDL3AudioChannel private volatile int bitrate; public override int? Bitrate => bitrate; - public TrackSDL3(string name, Stream data, SDL_AudioSpec spec, int samples) + public TrackSDL3(string name, SDL_AudioSpec spec, int samples) : base(name) { - EnqueueAction(() => SDL3AudioManager.DecoderManager.StartDecodingAsync(data, spec, true, ReceiveAudioData)); - // SoundTouch limitation const float tempo_minimum_supported = 0.05f; AggregateTempo.ValueChanged += t => @@ -60,7 +58,7 @@ public TrackSDL3(string name, Stream data, SDL_AudioSpec spec, int samples) private SDL3AudioDecoder? decodeData; - internal void ReceiveAudioData(byte[] audio, int length, SDL3AudioDecoder data, bool done) + void ISDL3AudioDataReceiver.GetData(byte[] audio, int length, SDL3AudioDecoder data, bool done) { if (IsDisposed) return; From fbcbd5fdc38fb45d0208fb5261ffde89ad3e9364 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Fri, 9 Aug 2024 02:23:07 +0900 Subject: [PATCH 099/127] SDL3 Audio text fixes --- osu.Framework.Tests/Audio/AudioMixerTest.cs | 90 +++++++------------ .../Audio/SDL3AudioTestComponents.cs | 15 +++- osu.Framework.Tests/Audio/SampleTest.cs | 67 +++++--------- osu.Framework.Tests/Audio/TrackTest.cs | 62 +++++-------- osu.Framework/Audio/SDL3AudioManager.cs | 24 +++-- 5 files changed, 105 insertions(+), 153 deletions(-) diff --git a/osu.Framework.Tests/Audio/AudioMixerTest.cs b/osu.Framework.Tests/Audio/AudioMixerTest.cs index 05d6b05576..c31c9387dd 100644 --- a/osu.Framework.Tests/Audio/AudioMixerTest.cs +++ b/osu.Framework.Tests/Audio/AudioMixerTest.cs @@ -10,7 +10,6 @@ using NUnit.Framework; using osu.Framework.Audio.Mixing; using osu.Framework.Audio.Mixing.Bass; -using osu.Framework.Audio.Mixing.SDL3; using osu.Framework.Audio.Sample; using osu.Framework.Audio.Track; using osu.Framework.Extensions; @@ -20,79 +19,48 @@ namespace osu.Framework.Tests.Audio [TestFixture] public class AudioMixerTest { - private BassTestComponents bass; - private BassAudioMixer mixerBass => (BassAudioMixer)bass.Mixer; - private TrackBass trackBass; - private SampleBass sampleBass; - - private SDL3AudioTestComponents sdl3; - private SDL3AudioMixer mixerSDL3 => (SDL3AudioMixer)sdl3.Mixer; - private TrackSDL3 trackSDL3; - private SampleSDL3 sampleSDL3; - private AudioTestComponents.Type type; private AudioTestComponents audio; private AudioMixer mixer; private Track track; private Sample sample; - [SetUp] - public void Setup() - { - bass = new BassTestComponents(); - trackBass = (TrackBass)bass.GetTrack(); - sampleBass = (SampleBass)bass.GetSample(); - - sdl3 = new SDL3AudioTestComponents(); - trackSDL3 = (TrackSDL3)sdl3.GetTrack(); - sampleSDL3 = (SampleSDL3)sdl3.GetSample(); - - // TrackSDL3 doesn't have data readily available right away after constructed. - while (!trackSDL3.IsCompletelyLoaded) - { - sdl3.Update(); - Thread.Sleep(10); - } - - bass.Update(); - sdl3.Update(); - } - [TearDown] public void Teardown() { - bass?.Dispose(); - sdl3?.Dispose(); + audio?.Dispose(); } - private void setupBackend(AudioTestComponents.Type id) + private void setupBackend(AudioTestComponents.Type id, bool loadTrack = false) { type = id; if (id == AudioTestComponents.Type.BASS) { - audio = bass; - mixer = mixerBass; - track = trackBass; - sample = sampleBass; + audio = new BassTestComponents(); + track = audio.GetTrack(); + sample = audio.GetSample(); } else if (id == AudioTestComponents.Type.SDL3) { - audio = sdl3; - mixer = mixerSDL3; - track = trackSDL3; - sample = sampleSDL3; + audio = new SDL3AudioTestComponents(); + track = audio.GetTrack(); + sample = audio.GetSample(); + + if (loadTrack) + ((SDL3AudioTestComponents)audio).WaitUntilTrackIsLoaded((TrackSDL3)track); } else { throw new InvalidOperationException("not a supported id"); } + + audio.Update(); + mixer = audio.Mixer; } private void assertThatMixerContainsChannel(AudioMixer mixer, IAudioChannel channel) { - TestContext.WriteLine($"{channel.Mixer.GetHashCode()} ({channel.Mixer.Identifier}) and {mixer.GetHashCode()} ({mixer.Identifier})"); - if (type == AudioTestComponents.Type.BASS) Assert.That(BassMix.ChannelGetMixer(((IBassAudioChannel)channel).Handle), Is.EqualTo(((BassAudioMixer)mixer).Handle)); else @@ -102,7 +70,9 @@ private void assertThatMixerContainsChannel(AudioMixer mixer, IAudioChannel chan [Test] public void TestMixerInitialised() { - Assert.That(mixerBass.Handle, Is.Not.Zero); + setupBackend(AudioTestComponents.Type.BASS); + + Assert.That(((BassAudioMixer)mixer).Handle, Is.Not.Zero); } [TestCase(AudioTestComponents.Type.BASS)] @@ -186,7 +156,7 @@ public void TestFreedChannelRemovedFromDefault(AudioTestComponents.Type id) audio.Update(); if (id == AudioTestComponents.Type.BASS) - Assert.That(BassMix.ChannelGetMixer(((IBassAudioChannel)trackBass).Handle), Is.Zero); + Assert.That(BassMix.ChannelGetMixer(((IBassAudioChannel)track).Handle), Is.Zero); else Assert.That(((IAudioChannel)track).Mixer, Is.Null); } @@ -212,7 +182,7 @@ public void TestChannelMovedToGlobalMixerAfterDispose(AudioTestComponents.Type i [TestCase(AudioTestComponents.Type.SDL3)] public void TestPlayPauseStop(AudioTestComponents.Type id) { - setupBackend(id); + setupBackend(id, true); Assert.That(!track.IsRunning); @@ -265,11 +235,11 @@ public void TestChannelRetainsPlayingStateWhenMovedBetweenMixers(AudioTestCompon Assert.That(track.IsRunning); } - [TestCase(AudioTestComponents.Type.BASS)] [TestCase(AudioTestComponents.Type.SDL3)] + [TestCase(AudioTestComponents.Type.BASS)] public void TestTrackReferenceLostWhenTrackIsDisposed(AudioTestComponents.Type id) { - setupBackend(id); + setupBackend(id, true); var trackReference = testDisposeTrackWithoutReference(); @@ -277,6 +247,13 @@ public void TestTrackReferenceLostWhenTrackIsDisposed(AudioTestComponents.Type i audio.Update(); audio.Update(); + // Workaround for SDL3, it needs decoder running on another thread to go away. + if (id == AudioTestComponents.Type.SDL3) + { + audio.Dispose(); + audio = null; + } + GC.Collect(); GC.WaitForPendingFinalizers(); @@ -290,11 +267,6 @@ private WeakReference testDisposeTrackWithoutReference() track.Dispose(); track = null; - if (type == AudioTestComponents.Type.BASS) - trackBass = null; - else if (type == AudioTestComponents.Type.SDL3) - trackSDL3 = null; - return weakRef; } @@ -330,7 +302,7 @@ static WeakReference runTest(Sample sample) private void assertIfTrackIsPlaying() { if (type == AudioTestComponents.Type.BASS) - Assert.That(mixerBass.ChannelIsActive(trackBass), Is.Not.EqualTo(PlaybackState.Playing)); + Assert.That(((BassAudioMixer)mixer).ChannelIsActive((TrackBass)track), Is.Not.EqualTo(PlaybackState.Playing)); else Assert.That(track.IsRunning, Is.Not.True); } @@ -339,7 +311,7 @@ private void assertIfTrackIsPlaying() [TestCase(AudioTestComponents.Type.SDL3)] public void TestChannelDoesNotPlayIfReachedEndAndSeekedBackwards(AudioTestComponents.Type id) { - setupBackend(id); + setupBackend(id, true); audio.RunOnAudioThread(() => { @@ -362,7 +334,7 @@ public void TestChannelDoesNotPlayIfReachedEndAndSeekedBackwards(AudioTestCompon [TestCase(AudioTestComponents.Type.SDL3)] public void TestChannelDoesNotPlayIfReachedEndAndMovedMixers(AudioTestComponents.Type id) { - setupBackend(id); + setupBackend(id, true); audio.RunOnAudioThread(() => { diff --git a/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs b/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs index 70f0efa888..e9fa156294 100644 --- a/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs +++ b/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs @@ -4,6 +4,7 @@ using System; using System.IO; using System.Linq; +using System.Threading; using osu.Framework.Audio; using osu.Framework.Audio.Mixing; using osu.Framework.Audio.Mixing.SDL3; @@ -51,6 +52,16 @@ public override AudioMixer CreateMixer() return mixer; } + public void WaitUntilTrackIsLoaded(TrackSDL3 track) + { + // TrackSDL3 doesn't have data readily available right away after constructed. + while (!track.IsCompletelyLoaded) + { + Update(); + Thread.Sleep(10); + } + } + public override void DisposeInternal() { base.DisposeInternal(); @@ -59,9 +70,9 @@ public override void DisposeInternal() SDL_Quit(); } - internal override Track CreateTrack(Stream data, string name) => new TrackSDL3(name, data, baseManager.AudioSpec, 441); + internal override Track CreateTrack(Stream data, string name) => baseManager.GetNewTrack(data, name); internal override SampleFactory CreateSampleFactory(Stream stream, string name, AudioMixer mixer, int playbackConcurrency) - => new SampleSDL3Factory(stream, name, (SDL3AudioMixer)mixer, playbackConcurrency, baseManager.AudioSpec); + => baseManager.GetSampleFactory(stream, name, mixer, playbackConcurrency); } } diff --git a/osu.Framework.Tests/Audio/SampleTest.cs b/osu.Framework.Tests/Audio/SampleTest.cs index 5267f583f7..b773f89c04 100644 --- a/osu.Framework.Tests/Audio/SampleTest.cs +++ b/osu.Framework.Tests/Audio/SampleTest.cs @@ -13,59 +13,42 @@ namespace osu.Framework.Tests.Audio [TestFixture] public class SampleTest { - private BassTestComponents bass; - private Sample sampleBass; - - private SDL3AudioTestComponents sdl3; - private Sample sampleSDL3; + private AudioTestComponents audio; + private Sample sample; private SampleChannel channel; - [SetUp] - public void Setup() - { - bass = new BassTestComponents(); - sampleBass = bass.GetSample(); - - sdl3 = new SDL3AudioTestComponents(); - sampleSDL3 = sdl3.GetSample(); - - bass.Update(); - sdl3.Update(); - } - [TearDown] public void Teardown() { - bass?.Dispose(); - sdl3?.Dispose(); + audio?.Dispose(); } - private Sample getSample(AudioTestComponents.Type id) + private void setupBackend(AudioTestComponents.Type id) { if (id == AudioTestComponents.Type.BASS) - return sampleBass; + { + audio = new BassTestComponents(); + sample = audio.GetSample(); + } else if (id == AudioTestComponents.Type.SDL3) - return sampleSDL3; + { + audio = new SDL3AudioTestComponents(); + sample = audio.GetSample(); + } else + { throw new InvalidOperationException("not a supported id"); - } + } - private AudioTestComponents getTestComponents(AudioTestComponents.Type id) - { - if (id == AudioTestComponents.Type.BASS) - return bass; - else if (id == AudioTestComponents.Type.SDL3) - return sdl3; - else - throw new InvalidOperationException("not a supported id"); + audio.Update(); } [TestCase(AudioTestComponents.Type.BASS)] [TestCase(AudioTestComponents.Type.SDL3)] public void TestGetChannelOnDisposed(AudioTestComponents.Type id) { - var sample = getSample(id); + setupBackend(id); sample.Dispose(); @@ -79,8 +62,7 @@ public void TestGetChannelOnDisposed(AudioTestComponents.Type id) [TestCase(AudioTestComponents.Type.SDL3)] public void TestStart(AudioTestComponents.Type id) { - var sample = getSample(id); - var audio = getTestComponents(id); + setupBackend(id); channel = sample.Play(); @@ -97,8 +79,7 @@ public void TestStart(AudioTestComponents.Type id) [TestCase(AudioTestComponents.Type.SDL3)] public void TestStop(AudioTestComponents.Type id) { - var sample = getSample(id); - var audio = getTestComponents(id); + setupBackend(id); channel = sample.Play(); audio.Update(); @@ -113,8 +94,7 @@ public void TestStop(AudioTestComponents.Type id) [TestCase(AudioTestComponents.Type.SDL3)] public void TestStopBeforeLoadFinished(AudioTestComponents.Type id) { - var sample = getSample(id); - var audio = getTestComponents(id); + setupBackend(id); channel = sample.Play(); channel.Stop(); @@ -128,8 +108,7 @@ public void TestStopBeforeLoadFinished(AudioTestComponents.Type id) [TestCase(AudioTestComponents.Type.SDL3)] public void TestStopsWhenFactoryDisposed(AudioTestComponents.Type id) { - var sample = getSample(id); - var audio = getTestComponents(id); + setupBackend(id); channel = sample.Play(); audio.Update(); @@ -148,8 +127,7 @@ public void TestStopsWhenFactoryDisposed(AudioTestComponents.Type id) [TestCase(AudioTestComponents.Type.SDL3)] public void TestPlayingUpdatedAfterInlinePlay(AudioTestComponents.Type id) { - var sample = getSample(id); - var audio = getTestComponents(id); + setupBackend(id); audio.RunOnAudioThread(() => channel = sample.Play()); Assert.That(channel.Playing, Is.True); @@ -163,8 +141,7 @@ public void TestPlayingUpdatedAfterInlinePlay(AudioTestComponents.Type id) [TestCase(AudioTestComponents.Type.SDL3)] public void TestPlayingUpdatedAfterInlineStop(AudioTestComponents.Type id) { - var sample = getSample(id); - var audio = getTestComponents(id); + setupBackend(id); var channel = sample.Play(); audio.Update(); diff --git a/osu.Framework.Tests/Audio/TrackTest.cs b/osu.Framework.Tests/Audio/TrackTest.cs index 6f5d11878f..8aaf46135a 100644 --- a/osu.Framework.Tests/Audio/TrackTest.cs +++ b/osu.Framework.Tests/Audio/TrackTest.cs @@ -17,65 +17,43 @@ namespace osu.Framework.Tests.Audio [TestFixture] public class TrackTest { - private BassTestComponents bass; - private TrackBass trackBass; - - private SDL3AudioTestComponents sdl3; - private TrackSDL3 trackSDL3; - private AudioTestComponents audio; private Track track; - [SetUp] - public void Setup() - { - bass = new BassTestComponents(); - trackBass = (TrackBass)bass.GetTrack(); - - sdl3 = new SDL3AudioTestComponents(); - trackSDL3 = (TrackSDL3)sdl3.GetTrack(); - - // TrackSDL3 doesn't have data readily available right away after constructed. - while (!trackSDL3.IsCompletelyLoaded) - { - sdl3.Update(); - Thread.Sleep(10); - } - - bass.Update(); - sdl3.Update(); - } - [TearDown] public void Teardown() { - bass?.Dispose(); - sdl3?.Dispose(); + audio?.Dispose(); } - private void setupBackend(AudioTestComponents.Type id) + private void setupBackend(AudioTestComponents.Type id, bool loadTrack = false) { if (id == AudioTestComponents.Type.BASS) { - audio = bass; - track = trackBass; + audio = new BassTestComponents(); + track = audio.GetTrack(); } else if (id == AudioTestComponents.Type.SDL3) { - audio = sdl3; - track = trackSDL3; + audio = new SDL3AudioTestComponents(); + track = audio.GetTrack(); + + if (loadTrack) + ((SDL3AudioTestComponents)audio).WaitUntilTrackIsLoaded((TrackSDL3)track); } else { throw new InvalidOperationException("not a supported id"); } + + audio.Update(); } [TestCase(AudioTestComponents.Type.BASS)] [TestCase(AudioTestComponents.Type.SDL3)] public void TestStart(AudioTestComponents.Type id) { - setupBackend(id); + setupBackend(id, true); track.StartAsync(); audio.Update(); @@ -139,7 +117,7 @@ public void TestStopWhenDisposed(AudioTestComponents.Type id) [TestCase(AudioTestComponents.Type.SDL3)] public void TestStopAtEnd(AudioTestComponents.Type id) { - setupBackend(id); + setupBackend(id, true); startPlaybackAt(track.Length - 1); @@ -206,7 +184,7 @@ public void TestSeekToEndFails(AudioTestComponents.Type id) [TestCase(AudioTestComponents.Type.SDL3)] public void TestSeekBackToSamePosition(AudioTestComponents.Type id) { - setupBackend(id); + setupBackend(id, true); track.SeekAsync(1000); track.SeekAsync(0); @@ -224,7 +202,7 @@ public void TestSeekBackToSamePosition(AudioTestComponents.Type id) [TestCase(AudioTestComponents.Type.SDL3)] public void TestPlaybackToEnd(AudioTestComponents.Type id) { - setupBackend(id); + setupBackend(id, true); startPlaybackAt(track.Length - 1); @@ -244,7 +222,7 @@ public void TestPlaybackToEnd(AudioTestComponents.Type id) [TestCase(AudioTestComponents.Type.SDL3)] public void TestStartFromEndDoesNotRestart(AudioTestComponents.Type id) { - setupBackend(id); + setupBackend(id, true); startPlaybackAt(track.Length - 1); @@ -313,7 +291,7 @@ public void TestRestartFromRestartPoint(AudioTestComponents.Type id) [TestCase(AudioTestComponents.Type.SDL3, 1000)] public void TestLoopingRestart(AudioTestComponents.Type id, double restartPoint) { - setupBackend(id); + setupBackend(id, true); track.Looping = true; track.RestartPoint = restartPoint; @@ -392,7 +370,7 @@ public void TestLoopingTrackDoesntSetCompleted(AudioTestComponents.Type id) [TestCase(AudioTestComponents.Type.SDL3)] public void TestHasCompletedResetsOnSeekBack(AudioTestComponents.Type id) { - setupBackend(id); + setupBackend(id, true); // start playback and wait for completion. startPlaybackAt(track.Length - 1); @@ -417,7 +395,7 @@ public void TestHasCompletedResetsOnSeekBack(AudioTestComponents.Type id) [TestCase(AudioTestComponents.Type.SDL3)] public void TestZeroFrequencyHandling(AudioTestComponents.Type id) { - setupBackend(id); + setupBackend(id, true); // start track. track.StartAsync(); @@ -487,7 +465,7 @@ public void TestZeroFrequencyDoesntResumeTrack(AudioTestComponents.Type id) [TestCase(AudioTestComponents.Type.SDL3)] public void TestBitrate(AudioTestComponents.Type id) { - setupBackend(id); + setupBackend(id, true); Assert.Greater(track.Bitrate, 0); } diff --git a/osu.Framework/Audio/SDL3AudioManager.cs b/osu.Framework/Audio/SDL3AudioManager.cs index d23043cb93..addaf99c92 100644 --- a/osu.Framework/Audio/SDL3AudioManager.cs +++ b/osu.Framework/Audio/SDL3AudioManager.cs @@ -29,9 +29,6 @@ public class SDL3AudioManager : AudioManager public static readonly int AUDIO_CHANNELS = 2; public static readonly SDL_AudioFormat AUDIO_FORMAT = SDL_AUDIO_F32; - // it is currently in global static... need to do something - internal static SDL3AudioDecoderManager DecoderManager { get; } = new SDL3AudioDecoderManager(); - private readonly List sdlMixerList = new List(); private ImmutableArray deviceIdArray = ImmutableArray.Empty; @@ -179,10 +176,10 @@ protected override bool SetAudioDevice(int deviceIndex) protected override bool IsCurrentDeviceValid() => baseManager.DeviceId > 0 && SDL_AudioDevicePaused(baseManager.DeviceId) == SDL_FALSE; - internal override Track.Track GetNewTrack(Stream data, string name) => new TrackSDL3(name, data, baseManager.AudioSpec, baseManager.BufferSize); + internal override Track.Track GetNewTrack(Stream data, string name) => baseManager.GetNewTrack(data, name); internal override SampleFactory GetSampleFactory(Stream data, string name, AudioMixer mixer, int playbackConcurrency) - => new SampleSDL3Factory(data, name, (SDL3AudioMixer)mixer, playbackConcurrency, baseManager.AudioSpec); + => baseManager.GetSampleFactory(data, name, mixer, playbackConcurrency); protected override void Dispose(bool disposing) { @@ -211,6 +208,8 @@ internal unsafe class SDL3BaseAudioManager : IDisposable private ObjectHandle objectHandle; + private readonly SDL3AudioDecoderManager decoderManager = new SDL3AudioDecoderManager(); + internal SDL3BaseAudioManager(Func> mixerIterator) { this.mixerIterator = mixerIterator; @@ -317,6 +316,20 @@ private void internalAudioCallback(SDL_AudioStream* stream, int additionalAmount } } + internal Track.Track GetNewTrack(Stream data, string name) + { + TrackSDL3 track = new TrackSDL3(name, AudioSpec, BufferSize); + decoderManager.StartDecodingAsync(data, AudioSpec, true, track); + return track; + } + + internal SampleFactory GetSampleFactory(Stream data, string name, AudioMixer mixer, int playbackConcurrency) + { + SampleSDL3Factory sampleFactory = new SampleSDL3Factory(name, (SDL3AudioMixer)mixer, playbackConcurrency, AudioSpec); + decoderManager.StartDecodingAsync(data, AudioSpec, false, sampleFactory); + return sampleFactory; + } + public void Dispose() { if (DeviceStream != null) @@ -329,6 +342,7 @@ public void Dispose() } objectHandle.Dispose(); + decoderManager.Dispose(); } } } From 509b0752c22603a4f2da85545d5d5ec22a5d4557 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Fri, 9 Aug 2024 02:36:27 +0900 Subject: [PATCH 100/127] Satisfy InspectCode --- osu.Framework.Tests/Audio/BassTestComponents.cs | 3 +-- osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs | 2 +- osu.Framework.Tests/Audio/SampleTest.cs | 2 +- osu.Framework/Audio/SDL3AudioDecoderManager.cs | 4 ++-- osu.Framework/Audio/SDL3AudioManager.cs | 4 ++-- osu.Framework/Audio/Sample/SampleSDL3Factory.cs | 1 - osu.Framework/Audio/Track/TrackSDL3.cs | 3 +-- 7 files changed, 8 insertions(+), 11 deletions(-) diff --git a/osu.Framework.Tests/Audio/BassTestComponents.cs b/osu.Framework.Tests/Audio/BassTestComponents.cs index 0e9b131f31..2bf02e0148 100644 --- a/osu.Framework.Tests/Audio/BassTestComponents.cs +++ b/osu.Framework.Tests/Audio/BassTestComponents.cs @@ -1,7 +1,6 @@ // Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. // See the LICENCE file in the repository root for full licence text. -using System; using System.IO; using ManagedBass; using osu.Framework.Audio.Mixing; @@ -16,7 +15,7 @@ namespace osu.Framework.Tests.Audio /// /// Provides a BASS audio pipeline to be used for testing audio components. /// - public class BassTestComponents : AudioTestComponents, IDisposable + public class BassTestComponents : AudioTestComponents { public BassTestComponents(bool init = true) : base(init) diff --git a/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs b/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs index e9fa156294..5195b1bbd5 100644 --- a/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs +++ b/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs @@ -18,7 +18,7 @@ namespace osu.Framework.Tests.Audio /// /// Provides a SDL3 audio pipeline to be used for testing audio components. /// - public class SDL3AudioTestComponents : AudioTestComponents, IDisposable + public class SDL3AudioTestComponents : AudioTestComponents { private SDL3BaseAudioManager baseManager = null!; diff --git a/osu.Framework.Tests/Audio/SampleTest.cs b/osu.Framework.Tests/Audio/SampleTest.cs index b773f89c04..ee32dcf745 100644 --- a/osu.Framework.Tests/Audio/SampleTest.cs +++ b/osu.Framework.Tests/Audio/SampleTest.cs @@ -143,7 +143,7 @@ public void TestPlayingUpdatedAfterInlineStop(AudioTestComponents.Type id) { setupBackend(id); - var channel = sample.Play(); + channel = sample.Play(); audio.Update(); audio.RunOnAudioThread(() => channel.Stop()); diff --git a/osu.Framework/Audio/SDL3AudioDecoderManager.cs b/osu.Framework/Audio/SDL3AudioDecoderManager.cs index 65563781a4..0803ffc392 100644 --- a/osu.Framework/Audio/SDL3AudioDecoderManager.cs +++ b/osu.Framework/Audio/SDL3AudioDecoderManager.cs @@ -58,7 +58,7 @@ internal static SDL3AudioDecoder CreateDecoder(Stream stream, SDL_AudioSpec audi return decoder; } - private bool bassInit; + private readonly bool bassInit; /// /// Starts a decoder thread. @@ -267,7 +267,7 @@ public int Bitrate private double length; /// - /// Audio length in miliseconds. Decoder may fill this in after the first call of . + /// Audio length in milliseconds. Decoder may fill this in after the first call of . /// public double Length { diff --git a/osu.Framework/Audio/SDL3AudioManager.cs b/osu.Framework/Audio/SDL3AudioManager.cs index addaf99c92..b7faabd52c 100644 --- a/osu.Framework/Audio/SDL3AudioManager.cs +++ b/osu.Framework/Audio/SDL3AudioManager.cs @@ -87,7 +87,7 @@ internal void OnNewDeviceEvent(SDL_AudioDeviceID addedDeviceIndex) { AudioScheduler.Add(() => { - // the index is only vaild until next SDL_GetNumAudioDevices call, so get the name first. + // the index is only valid until next SDL_GetNumAudioDevices call, so get the name first. string name = SDL_GetAudioDeviceName(addedDeviceIndex); syncAudioDevices(); @@ -189,7 +189,6 @@ protected override void Dispose(bool disposing) } } - /// /// To share basic playback logic with audio tests. /// @@ -251,6 +250,7 @@ internal bool SetAudioDevice(SDL_AudioDeviceID targetId) SDL_AudioSpec spec = AudioSpec; SDL_AudioStream* deviceStream = SDL_OpenAudioDeviceStream(targetId, &spec, &audioCallback, objectHandle.Handle); + if (deviceStream != null) { SDL_DestroyAudioStream(DeviceStream); diff --git a/osu.Framework/Audio/Sample/SampleSDL3Factory.cs b/osu.Framework/Audio/Sample/SampleSDL3Factory.cs index d11a31d423..ddd24ae6e7 100644 --- a/osu.Framework/Audio/Sample/SampleSDL3Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL3Factory.cs @@ -2,7 +2,6 @@ // See the LICENCE file in the repository root for full licence text. using System; -using System.IO; using System.Threading; using osu.Framework.Audio.Mixing.SDL3; using osu.Framework.Bindables; diff --git a/osu.Framework/Audio/Track/TrackSDL3.cs b/osu.Framework/Audio/Track/TrackSDL3.cs index ed40c93b8a..9e2b1dc386 100644 --- a/osu.Framework/Audio/Track/TrackSDL3.cs +++ b/osu.Framework/Audio/Track/TrackSDL3.cs @@ -2,7 +2,6 @@ // See the LICENCE file in the repository root for full licence text. using System; -using System.IO; using System.Threading; using System.Threading.Tasks; using NAudio.Dsp; @@ -174,7 +173,7 @@ protected override void UpdateState() player.FillRequiredSamples(); } - // Not sure if I need to split this up to another class since this featrue is only exclusive to Track + // Not sure if I need to split this up to another class since this feature is only exclusive to Track if (amplitudeRequested && isRunning && Math.Abs(currentTime - lastTime) > 1000.0 / 60.0) { lastTime = currentTime; From 68c31bfe0e20939f2a5c1532a757e554eeddd3cb Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Fri, 9 Aug 2024 03:00:17 +0900 Subject: [PATCH 101/127] Revert test class name changes to reduce diff --- osu.Framework.Tests/Audio/AudioTestComponents.cs | 2 +- .../Audio/{AudioMixerTest.cs => BassAudioMixerTest.cs} | 2 +- osu.Framework.Tests/Audio/{SampleTest.cs => SampleBassTest.cs} | 2 +- osu.Framework.Tests/Audio/{TrackTest.cs => TrackBassTest.cs} | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) rename osu.Framework.Tests/Audio/{AudioMixerTest.cs => BassAudioMixerTest.cs} (99%) rename osu.Framework.Tests/Audio/{SampleTest.cs => SampleBassTest.cs} (99%) rename osu.Framework.Tests/Audio/{TrackTest.cs => TrackBassTest.cs} (99%) diff --git a/osu.Framework.Tests/Audio/AudioTestComponents.cs b/osu.Framework.Tests/Audio/AudioTestComponents.cs index 1395ed8643..39796d2ed1 100644 --- a/osu.Framework.Tests/Audio/AudioTestComponents.cs +++ b/osu.Framework.Tests/Audio/AudioTestComponents.cs @@ -37,7 +37,7 @@ protected AudioTestComponents(bool init) AllComponents.AddItem(MixerComponents); Mixer = CreateMixer(); - Resources = new DllResourceStore(typeof(TrackTest).Assembly); + Resources = new DllResourceStore(typeof(TrackBassTest).Assembly); TrackStore = new TrackStore(Resources, Mixer, CreateTrack); SampleStore = new SampleStore(Resources, Mixer, CreateSampleFactory); diff --git a/osu.Framework.Tests/Audio/AudioMixerTest.cs b/osu.Framework.Tests/Audio/BassAudioMixerTest.cs similarity index 99% rename from osu.Framework.Tests/Audio/AudioMixerTest.cs rename to osu.Framework.Tests/Audio/BassAudioMixerTest.cs index c31c9387dd..285e5836c3 100644 --- a/osu.Framework.Tests/Audio/AudioMixerTest.cs +++ b/osu.Framework.Tests/Audio/BassAudioMixerTest.cs @@ -17,7 +17,7 @@ namespace osu.Framework.Tests.Audio { [TestFixture] - public class AudioMixerTest + public class BassAudioMixerTest { private AudioTestComponents.Type type; private AudioTestComponents audio; diff --git a/osu.Framework.Tests/Audio/SampleTest.cs b/osu.Framework.Tests/Audio/SampleBassTest.cs similarity index 99% rename from osu.Framework.Tests/Audio/SampleTest.cs rename to osu.Framework.Tests/Audio/SampleBassTest.cs index ee32dcf745..c11abdff4b 100644 --- a/osu.Framework.Tests/Audio/SampleTest.cs +++ b/osu.Framework.Tests/Audio/SampleBassTest.cs @@ -11,7 +11,7 @@ namespace osu.Framework.Tests.Audio { [TestFixture] - public class SampleTest + public class SampleBassTest { private AudioTestComponents audio; private Sample sample; diff --git a/osu.Framework.Tests/Audio/TrackTest.cs b/osu.Framework.Tests/Audio/TrackBassTest.cs similarity index 99% rename from osu.Framework.Tests/Audio/TrackTest.cs rename to osu.Framework.Tests/Audio/TrackBassTest.cs index 8aaf46135a..49554e70a4 100644 --- a/osu.Framework.Tests/Audio/TrackTest.cs +++ b/osu.Framework.Tests/Audio/TrackBassTest.cs @@ -15,7 +15,7 @@ namespace osu.Framework.Tests.Audio { [TestFixture] - public class TrackTest + public class TrackBassTest { private AudioTestComponents audio; private Track track; From b11b249ebe8b49150003959146618d787da35b7d Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Fri, 9 Aug 2024 03:08:01 +0900 Subject: [PATCH 102/127] Init baseManager first in SDL3AudioManager --- osu.Framework/Audio/AudioManager.cs | 6 ++++++ osu.Framework/Audio/SDL3AudioManager.cs | 9 ++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/osu.Framework/Audio/AudioManager.cs b/osu.Framework/Audio/AudioManager.cs index acef0cbdb1..cb5e895aad 100644 --- a/osu.Framework/Audio/AudioManager.cs +++ b/osu.Framework/Audio/AudioManager.cs @@ -125,6 +125,8 @@ public abstract class AudioManager : AudioCollectionManager /// The sample store containing all audio samples to be used in the future. protected AudioManager(AudioThread audioThread, ResourceStore trackStore, ResourceStore sampleStore) { + Prepare(); + CurrentAudioThread = audioThread; CurrentAudioThread.RegisterManager(this); @@ -151,6 +153,10 @@ protected AudioManager(AudioThread audioThread, ResourceStore trackStore }); } + protected virtual void Prepare() + { + } + internal abstract Track.Track GetNewTrack(Stream data, string name); internal abstract SampleFactory GetSampleFactory(Stream data, string name, AudioMixer mixer, int playbackConcurrency); diff --git a/osu.Framework/Audio/SDL3AudioManager.cs b/osu.Framework/Audio/SDL3AudioManager.cs index b7faabd52c..c8b1d2c782 100644 --- a/osu.Framework/Audio/SDL3AudioManager.cs +++ b/osu.Framework/Audio/SDL3AudioManager.cs @@ -39,7 +39,7 @@ public class SDL3AudioManager : AudioManager protected override void InvokeOnLostDevice(string deviceName) => eventScheduler.Add(() => base.InvokeOnLostDevice(deviceName)); - private readonly SDL3BaseAudioManager baseManager; + private SDL3BaseAudioManager baseManager; /// /// Creates a new . @@ -50,11 +50,14 @@ public class SDL3AudioManager : AudioManager public SDL3AudioManager(AudioThread audioThread, ResourceStore trackStore, ResourceStore sampleStore) : base(audioThread, trackStore, sampleStore) { - baseManager = new SDL3BaseAudioManager(() => sdlMixerList); - AudioScheduler.Add(syncAudioDevices); } + protected override void Prepare() + { + baseManager = new SDL3BaseAudioManager(() => sdlMixerList); + } + public override string ToString() { return $@"{GetType().ReadableName()} ({baseManager.DeviceName})"; From 96003b38d456bf3110e70f6ed8dc4ca89afa9cf1 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Fri, 9 Aug 2024 18:32:42 +0900 Subject: [PATCH 103/127] Different workaround for an SDL3 Audio test --- .../Audio/BassAudioMixerTest.cs | 9 +-- .../Audio/SDL3AudioDecoderManager.cs | 68 +++++++++---------- osu.Framework/Audio/SDL3AudioManager.cs | 37 +++++++++- .../Audio/Sample/SampleSDL3Factory.cs | 6 +- osu.Framework/Audio/Track/TrackSDL3.cs | 50 +++++++------- .../Audio/Track/TrackSDL3AudioPlayer.cs | 2 +- 6 files changed, 101 insertions(+), 71 deletions(-) diff --git a/osu.Framework.Tests/Audio/BassAudioMixerTest.cs b/osu.Framework.Tests/Audio/BassAudioMixerTest.cs index 285e5836c3..89d3883440 100644 --- a/osu.Framework.Tests/Audio/BassAudioMixerTest.cs +++ b/osu.Framework.Tests/Audio/BassAudioMixerTest.cs @@ -239,7 +239,7 @@ public void TestChannelRetainsPlayingStateWhenMovedBetweenMixers(AudioTestCompon [TestCase(AudioTestComponents.Type.BASS)] public void TestTrackReferenceLostWhenTrackIsDisposed(AudioTestComponents.Type id) { - setupBackend(id, true); + setupBackend(id); var trackReference = testDisposeTrackWithoutReference(); @@ -247,13 +247,6 @@ public void TestTrackReferenceLostWhenTrackIsDisposed(AudioTestComponents.Type i audio.Update(); audio.Update(); - // Workaround for SDL3, it needs decoder running on another thread to go away. - if (id == AudioTestComponents.Type.SDL3) - { - audio.Dispose(); - audio = null; - } - GC.Collect(); GC.WaitForPendingFinalizers(); diff --git a/osu.Framework/Audio/SDL3AudioDecoderManager.cs b/osu.Framework/Audio/SDL3AudioDecoderManager.cs index 0803ffc392..8123503634 100644 --- a/osu.Framework/Audio/SDL3AudioDecoderManager.cs +++ b/osu.Framework/Audio/SDL3AudioDecoderManager.cs @@ -23,9 +23,10 @@ public interface ISDL3AudioDataReceiver /// so you may need to actual data format. /// This may be used by decoder later to reduce allocation, so you need to copy the data before exiting from this delegate, otherwise you may end up with wrong data. /// Length in byte of decoded audio. Use this instead of data.Length - /// Associated . /// Whether if this is the last data or not. - void GetData(byte[] data, int length, SDL3AudioDecoder decoderData, bool done); + void GetData(byte[] data, int length, bool done); + + void GetMetaData(int bitrate, double length, long byteLength); } /// @@ -123,20 +124,32 @@ private void loop(CancellationToken token) var next = node.Next; SDL3AudioDecoder decoder = node.Value; - if (decoder.StopJob) - { - decoder.Dispose(); - jobs.Remove(node); - } - else + if (!decoder.StopJob) { - int read = decodeAudio(decoder, out byte[] decoded); - decoder.Pass?.GetData(decoded, read, decoder, !decoder.Loading); + try + { + int read = decodeAudio(decoder, out byte[] decoded); + + if (!decoder.MetadataSended) + { + decoder.MetadataSended = true; + decoder.Pass?.GetMetaData(decoder.Bitrate, decoder.Length, decoder.ByteLength); + } + + decoder.Pass?.GetData(decoded, read, !decoder.Loading); + } + catch (ObjectDisposedException) + { + decoder.StopJob = true; + } + + if (!decoder.Loading) + jobs.Remove(node); } - if (!decoder.Loading) + if (decoder.StopJob) { - decoder.RemoveReferenceToReceiver(); // cannot do in Decoder.Dispose since Pass needs to be used later. + decoder.Dispose(); jobs.Remove(node); } @@ -249,9 +262,9 @@ public abstract class SDL3AudioDecoder internal readonly bool AutoDisposeStream; /// - /// Decoder will call or more to pass the decoded audio data. + /// Decoder will call once or more to pass the decoded audio data. /// - internal ISDL3AudioDataReceiver? Pass { get; private set; } + internal readonly ISDL3AudioDataReceiver? Pass; private int bitrate; @@ -288,6 +301,8 @@ public long ByteLength set => Interlocked.Exchange(ref byteLength, value); } + internal bool MetadataSended; + internal volatile bool StopJob; private volatile bool loading; @@ -321,8 +336,6 @@ internal virtual void Dispose() Stream.Dispose(); } - internal void RemoveReferenceToReceiver() => Pass = null; - protected abstract int LoadFromStreamInternal(out byte[] decoded); /// @@ -362,9 +375,6 @@ internal class BassAudioDecoder : SDL3AudioDecoder private int decodeStream; private FileCallbacks? fileCallbacks; - private int syncHandle; - private SyncCallback? syncCallback; - private int resampler; private byte[]? decodeData; @@ -391,17 +401,8 @@ public BassAudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bo internal override void Dispose() { - if (syncHandle != 0) - { - Bass.ChannelRemoveSync(resampler == 0 ? decodeStream : resampler, syncHandle); - syncHandle = 0; - } - fileCallbacks?.Dispose(); - syncCallback?.Dispose(); - fileCallbacks = null; - syncCallback = null; decodeData = null; @@ -428,10 +429,6 @@ protected override int LoadFromStreamInternal(out byte[] decoded) if (!Loading) { fileCallbacks = new FileCallbacks(new DataStreamFileProcedures(Stream)); - syncCallback = new SyncCallback((_, _, _, _) => - { - Loading = false; - }); BassFlags bassFlags = BassFlags.Decode | resolution.ToBassFlag(); if (IsTrack) bassFlags |= BassFlags.Prescan; @@ -468,8 +465,6 @@ protected override int LoadFromStreamInternal(out byte[] decoded) throw new FormatException($"Couldn't get channel info: {Bass.LastError}"); } - syncHandle = Bass.ChannelSetSync(resampler == 0 ? decodeStream : resampler, SyncFlags.End | SyncFlags.Onetime, 0, syncCallback.Callback, syncCallback.Handle); - Loading = true; } @@ -492,6 +487,11 @@ protected override int LoadFromStreamInternal(out byte[] decoded) if (Bass.LastError != Errors.Ended) throw new FormatException($"Couldn't decode: {Bass.LastError}"); } + else if (got < bufferLen) + { + // originally used synchandle to detect end, but it somehow created strong handle + Loading = false; + } decoded = decodeData; return Math.Max(0, got); diff --git a/osu.Framework/Audio/SDL3AudioManager.cs b/osu.Framework/Audio/SDL3AudioManager.cs index c8b1d2c782..cdc6c9c91d 100644 --- a/osu.Framework/Audio/SDL3AudioManager.cs +++ b/osu.Framework/Audio/SDL3AudioManager.cs @@ -215,6 +215,7 @@ internal unsafe class SDL3BaseAudioManager : IDisposable internal SDL3BaseAudioManager(Func> mixerIterator) { this.mixerIterator = mixerIterator; + objectHandle = new ObjectHandle(this, GCHandleType.Normal); AudioSpec = new SDL_AudioSpec { @@ -319,17 +320,49 @@ private void internalAudioCallback(SDL_AudioStream* stream, int additionalAmount } } + /// + /// With how decoders work, we need this to get test passed + /// I don't want this either... otherwise we have to dispose decoder in tests + /// + private class ReceiverGCWrapper : ISDL3AudioDataReceiver + { + private readonly WeakReference channelWeakReference; + + internal ReceiverGCWrapper(WeakReference channel) + { + channelWeakReference = channel; + } + + void ISDL3AudioDataReceiver.GetData(byte[] data, int length, bool done) + { + if (channelWeakReference.TryGetTarget(out ISDL3AudioDataReceiver r)) + r.GetData(data, length, done); + else + throw new ObjectDisposedException("channel is already disposed"); + } + + void ISDL3AudioDataReceiver.GetMetaData(int bitrate, double length, long byteLength) + { + if (channelWeakReference.TryGetTarget(out ISDL3AudioDataReceiver r)) + r.GetMetaData(bitrate, length, byteLength); + else + throw new ObjectDisposedException("channel is already disposed"); + } + } + internal Track.Track GetNewTrack(Stream data, string name) { TrackSDL3 track = new TrackSDL3(name, AudioSpec, BufferSize); - decoderManager.StartDecodingAsync(data, AudioSpec, true, track); + ReceiverGCWrapper receiverGC = new ReceiverGCWrapper(new WeakReference(track)); + decoderManager.StartDecodingAsync(data, AudioSpec, true, receiverGC); return track; } internal SampleFactory GetSampleFactory(Stream data, string name, AudioMixer mixer, int playbackConcurrency) { SampleSDL3Factory sampleFactory = new SampleSDL3Factory(name, (SDL3AudioMixer)mixer, playbackConcurrency, AudioSpec); - decoderManager.StartDecodingAsync(data, AudioSpec, false, sampleFactory); + ReceiverGCWrapper receiverGC = new ReceiverGCWrapper(new WeakReference(sampleFactory)); + decoderManager.StartDecodingAsync(data, AudioSpec, false, receiverGC); return sampleFactory; } diff --git a/osu.Framework/Audio/Sample/SampleSDL3Factory.cs b/osu.Framework/Audio/Sample/SampleSDL3Factory.cs index ddd24ae6e7..670713cae9 100644 --- a/osu.Framework/Audio/Sample/SampleSDL3Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL3Factory.cs @@ -28,7 +28,7 @@ public SampleSDL3Factory(string name, SDL3AudioMixer mixer, int playbackConcurre this.spec = spec; } - void ISDL3AudioDataReceiver.GetData(byte[] audio, int byteLen, SDL3AudioDecoder data, bool done) + void ISDL3AudioDataReceiver.GetData(byte[] audio, int byteLen, bool done) { if (IsDisposed) return; @@ -74,5 +74,9 @@ protected override void Dispose(bool disposing) completion.Dispose(); base.Dispose(disposing); } + + void ISDL3AudioDataReceiver.GetMetaData(int bitrate, double length, long byteLength) + { + } // not needed } } diff --git a/osu.Framework/Audio/Track/TrackSDL3.cs b/osu.Framework/Audio/Track/TrackSDL3.cs index 9e2b1dc386..27871844f0 100644 --- a/osu.Framework/Audio/Track/TrackSDL3.cs +++ b/osu.Framework/Audio/Track/TrackSDL3.cs @@ -7,6 +7,7 @@ using NAudio.Dsp; using osu.Framework.Audio.Mixing.SDL3; using osu.Framework.Extensions; +using osu.Framework.Logging; using SDL; namespace osu.Framework.Audio.Track @@ -55,9 +56,7 @@ public TrackSDL3(string name, SDL_AudioSpec spec, int samples) private readonly object syncRoot = new object(); - private SDL3AudioDecoder? decodeData; - - void ISDL3AudioDataReceiver.GetData(byte[] audio, int length, SDL3AudioDecoder data, bool done) + void ISDL3AudioDataReceiver.GetData(byte[] audio, int length, bool done) { if (IsDisposed) return; @@ -67,17 +66,38 @@ void ISDL3AudioDataReceiver.GetData(byte[] audio, int length, SDL3AudioDecoder d if (!player.IsLoaded) { if (!player.IsLoading) - player.PrepareStream(data.ByteLength); + { + Logger.Log("GetMetaData should be called first, falling back to default buffer size", level: LogLevel.Important); + player.PrepareStream(); + } player.PutSamplesInStream(audio, length); if (done) + { player.DonePutting(); + Length = player.AudioLength; + isCompletelyLoaded = true; + } } } + } + void ISDL3AudioDataReceiver.GetMetaData(int bitrate, double length, long byteLength) + { if (!isLoaded) - Interlocked.Exchange(ref decodeData, data); + { + Length = length; + this.bitrate = bitrate; + + lock (syncRoot) + { + if (!player.IsLoading) + player.PrepareStream(byteLength); + } + + isLoaded = true; + } } private volatile bool amplitudeRequested; @@ -136,23 +156,6 @@ protected override void UpdateState() { base.UpdateState(); - if (decodeData != null) - { - if (!isLoaded) - { - Length = decodeData.Length; - bitrate = decodeData.Bitrate; - isLoaded = true; - } - - if (player.IsLoaded) - { - Length = player.AudioLength; - isCompletelyLoaded = true; - decodeData = null; - } - } - if (player.Done && isRunning) { if (Looping) @@ -299,9 +302,6 @@ protected override void Dispose(bool disposing) isRunning = false; (Mixer as SDL3AudioMixer)?.StreamFree(this); - decodeData?.Stop(); - decodeData = null; - lock (syncRoot) player.Dispose(); diff --git a/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs index e39275c39a..67c50c1a84 100644 --- a/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs @@ -75,7 +75,7 @@ private void prepareArray(long wanted) AudioData = temp; } - internal void PrepareStream(long byteLength) + internal void PrepareStream(long byteLength = 3 * 60 * 44100 * 2 * 4) { if (disposedValue) return; From 32d675d48be7bf722ecf51b803e406b34949c57a Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Fri, 9 Aug 2024 19:16:42 +0900 Subject: [PATCH 104/127] Process 10ms more in advance in TempoSDL3AudioPlayer --- osu.Framework/Audio/Track/TempoSDL3AudioPlayer.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/osu.Framework/Audio/Track/TempoSDL3AudioPlayer.cs b/osu.Framework/Audio/Track/TempoSDL3AudioPlayer.cs index 2a314873be..80fb49d094 100644 --- a/osu.Framework/Audio/Track/TempoSDL3AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TempoSDL3AudioPlayer.cs @@ -53,7 +53,8 @@ private void fillSamples(int samples) while (!base.Done && soundTouch.AvailableSamples < samples) { - int getSamples = (int)Math.Ceiling((samples - soundTouch.AvailableSamples) * Tempo) * SrcChannels; + // process 10ms more to reduce overhead + int getSamples = (int)Math.Ceiling((samples + (int)(SrcRate * 0.01) - soundTouch.AvailableSamples) * Tempo) * SrcChannels; float[] src = new float[getSamples]; getSamples = base.GetRemainingSamples(src); if (getSamples <= 0) From 462560e9a9be7d751a594907fc880a7a615d3bdf Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Fri, 16 Aug 2024 20:02:39 +0900 Subject: [PATCH 105/127] Fix incorrect volume at start in SDL audio single thread mode --- osu.Framework/Audio/Sample/SampleChannelSDL3.cs | 3 +++ osu.Framework/Audio/Track/TrackSDL3.cs | 3 +++ 2 files changed, 6 insertions(+) diff --git a/osu.Framework/Audio/Sample/SampleChannelSDL3.cs b/osu.Framework/Audio/Sample/SampleChannelSDL3.cs index b656a12990..30b064b364 100644 --- a/osu.Framework/Audio/Sample/SampleChannelSDL3.cs +++ b/osu.Framework/Audio/Sample/SampleChannelSDL3.cs @@ -27,6 +27,9 @@ public override void Play() if (started) return; + // ensure state is correct before starting. + InvalidateState(); + playing = true; base.Play(); } diff --git a/osu.Framework/Audio/Track/TrackSDL3.cs b/osu.Framework/Audio/Track/TrackSDL3.cs index 27871844f0..45e03ae2aa 100644 --- a/osu.Framework/Audio/Track/TrackSDL3.cs +++ b/osu.Framework/Audio/Track/TrackSDL3.cs @@ -227,6 +227,9 @@ public override void Start() public override Task StartAsync() => EnqueueAction(() => { + // ensure state is correct before starting. + InvalidateState(); + lock (syncRoot) player.Reset(false); From 5b7e2930a4db594ee8cfc3c155f5fea8f21978ad Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Sat, 17 Aug 2024 00:20:27 +0900 Subject: [PATCH 106/127] Remove a node later in SDL3AudioDecoder --- osu.Framework/Audio/SDL3AudioDecoderManager.cs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/osu.Framework/Audio/SDL3AudioDecoderManager.cs b/osu.Framework/Audio/SDL3AudioDecoderManager.cs index 8123503634..f5b650c3fb 100644 --- a/osu.Framework/Audio/SDL3AudioDecoderManager.cs +++ b/osu.Framework/Audio/SDL3AudioDecoderManager.cs @@ -146,8 +146,7 @@ private void loop(CancellationToken token) if (!decoder.Loading) jobs.Remove(node); } - - if (decoder.StopJob) + else { decoder.Dispose(); jobs.Remove(node); From 4f5cd48d6ee5941a7c5f99010a660172e4f2175f Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Tue, 20 Aug 2024 20:30:44 +0900 Subject: [PATCH 107/127] Fix test function name --- osu.Framework.Tests/Audio/BassAudioMixerTest.cs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/osu.Framework.Tests/Audio/BassAudioMixerTest.cs b/osu.Framework.Tests/Audio/BassAudioMixerTest.cs index 89d3883440..775d4e5bdd 100644 --- a/osu.Framework.Tests/Audio/BassAudioMixerTest.cs +++ b/osu.Framework.Tests/Audio/BassAudioMixerTest.cs @@ -292,7 +292,7 @@ static WeakReference runTest(Sample sample) } } - private void assertIfTrackIsPlaying() + private void assertThatTrackIsPlaying() { if (type == AudioTestComponents.Type.BASS) Assert.That(((BassAudioMixer)mixer).ChannelIsActive((TrackBass)track), Is.Not.EqualTo(PlaybackState.Playing)); @@ -315,12 +315,12 @@ public void TestChannelDoesNotPlayIfReachedEndAndSeekedBackwards(AudioTestCompon Thread.Sleep(50); audio.Update(); - assertIfTrackIsPlaying(); + assertThatTrackIsPlaying(); audio.RunOnAudioThread(() => track.SeekAsync(0).WaitSafely()); audio.Update(); - assertIfTrackIsPlaying(); + assertThatTrackIsPlaying(); } [TestCase(AudioTestComponents.Type.BASS)] @@ -338,13 +338,13 @@ public void TestChannelDoesNotPlayIfReachedEndAndMovedMixers(AudioTestComponents Thread.Sleep(50); audio.Update(); - assertIfTrackIsPlaying(); + assertThatTrackIsPlaying(); var secondMixer = audio.CreateMixer(); secondMixer.Add(track); audio.Update(); - assertIfTrackIsPlaying(); + assertThatTrackIsPlaying(); } } } From 7ea5f36d820a9c737b59522bff83389c6fe32bb7 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Tue, 20 Aug 2024 21:03:22 +0900 Subject: [PATCH 108/127] Make ffmpeg null after finishing audio decoding --- osu.Framework/Audio/SDL3AudioDecoderManager.cs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/osu.Framework/Audio/SDL3AudioDecoderManager.cs b/osu.Framework/Audio/SDL3AudioDecoderManager.cs index f5b650c3fb..e5d48ce8a7 100644 --- a/osu.Framework/Audio/SDL3AudioDecoderManager.cs +++ b/osu.Framework/Audio/SDL3AudioDecoderManager.cs @@ -512,6 +512,8 @@ internal override void Dispose() decodeData = null; ffmpeg?.Dispose(); + ffmpeg = null; + base.Dispose(); } From b4929823e1cba0eba9ee8baeddaf107fce520c9a Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Fri, 23 Aug 2024 00:19:34 +0900 Subject: [PATCH 109/127] Init SDL Audio later --- osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs | 10 ++-------- osu.Framework/Audio/SDL3AudioManager.cs | 7 +++++++ osu.Framework/Platform/SDL3/SDL3Window.cs | 2 +- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs b/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs index 5195b1bbd5..051e034e3c 100644 --- a/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs +++ b/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs @@ -10,7 +10,6 @@ using osu.Framework.Audio.Mixing.SDL3; using osu.Framework.Audio.Sample; using osu.Framework.Audio.Track; -using SDL; using static SDL.SDL3; namespace osu.Framework.Tests.Audio @@ -30,16 +29,13 @@ public SDL3AudioTestComponents(bool init = true) protected override void Prepare() { base.Prepare(); + + SDL_SetHint(SDL_HINT_AUDIO_DRIVER, "dummy"u8); baseManager = new SDL3BaseAudioManager(MixerComponents.Items.OfType); } public override void Init() { - SDL_SetHint(SDL_HINT_AUDIO_DRIVER, "dummy"u8); - - if (SDL_Init(SDL_InitFlags.SDL_INIT_AUDIO) < 0) - throw new InvalidOperationException($"Failed to initialise SDL: {SDL_GetError()}"); - if (!baseManager.SetAudioDevice(SDL_AUDIO_DEVICE_DEFAULT_PLAYBACK)) throw new InvalidOperationException($"Failed to open SDL3 audio device: {SDL_GetError()}"); } @@ -66,8 +62,6 @@ public override void DisposeInternal() { base.DisposeInternal(); baseManager.Dispose(); - - SDL_Quit(); } internal override Track CreateTrack(Stream data, string name) => baseManager.GetNewTrack(data, name); diff --git a/osu.Framework/Audio/SDL3AudioManager.cs b/osu.Framework/Audio/SDL3AudioManager.cs index cdc6c9c91d..1cc044c7ca 100644 --- a/osu.Framework/Audio/SDL3AudioManager.cs +++ b/osu.Framework/Audio/SDL3AudioManager.cs @@ -214,6 +214,11 @@ internal unsafe class SDL3BaseAudioManager : IDisposable internal SDL3BaseAudioManager(Func> mixerIterator) { + if (SDL_InitSubSystem(SDL_InitFlags.SDL_INIT_AUDIO) < 0) + { + throw new InvalidOperationException($"Failed to initialise SDL Audio: {SDL_GetError()}"); + } + this.mixerIterator = mixerIterator; objectHandle = new ObjectHandle(this, GCHandleType.Normal); @@ -379,6 +384,8 @@ public void Dispose() objectHandle.Dispose(); decoderManager.Dispose(); + + SDL_QuitSubSystem(SDL_InitFlags.SDL_INIT_AUDIO); } } } diff --git a/osu.Framework/Platform/SDL3/SDL3Window.cs b/osu.Framework/Platform/SDL3/SDL3Window.cs index d25301653a..a75cb82079 100644 --- a/osu.Framework/Platform/SDL3/SDL3Window.cs +++ b/osu.Framework/Platform/SDL3/SDL3Window.cs @@ -156,7 +156,7 @@ protected SDL3Window(GraphicsSurfaceType surfaceType, string appName) SDL_SetHint(SDL_HINT_APP_NAME, appName); - if (SDL_Init(SDL_InitFlags.SDL_INIT_VIDEO | SDL_InitFlags.SDL_INIT_GAMEPAD | SDL_InitFlags.SDL_INIT_AUDIO) < 0) + if (SDL_Init(SDL_InitFlags.SDL_INIT_VIDEO | SDL_InitFlags.SDL_INIT_GAMEPAD) < 0) { throw new InvalidOperationException($"Failed to initialise SDL: {SDL_GetError()}"); } From e844ea12310f820246c08ea2eaef7ab9c9fd8b70 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Fri, 6 Sep 2024 22:33:09 +0900 Subject: [PATCH 110/127] Separate AmplitudeProcessor from TrackSDL3 --- .../Audio/Mixing/SDL3/ISDL3AudioChannel.cs | 2 +- osu.Framework/Audio/SDL3AmplitudeProcessor.cs | 52 +++++++++++++++++ osu.Framework/Audio/Track/TrackSDL3.cs | 57 ++----------------- 3 files changed, 59 insertions(+), 52 deletions(-) create mode 100644 osu.Framework/Audio/SDL3AmplitudeProcessor.cs diff --git a/osu.Framework/Audio/Mixing/SDL3/ISDL3AudioChannel.cs b/osu.Framework/Audio/Mixing/SDL3/ISDL3AudioChannel.cs index c2f2132abb..f8c7ea4476 100644 --- a/osu.Framework/Audio/Mixing/SDL3/ISDL3AudioChannel.cs +++ b/osu.Framework/Audio/Mixing/SDL3/ISDL3AudioChannel.cs @@ -11,7 +11,7 @@ internal interface ISDL3AudioChannel : IAudioChannel /// /// Returns remaining audio samples. /// - /// Audio data needs to be put here. Length of this determines how much data needs to be filled. + /// Channel puts audio in this array. Length of this determines how much data needs to be filled. /// Sample count int GetRemainingSamples(float[] data); diff --git a/osu.Framework/Audio/SDL3AmplitudeProcessor.cs b/osu.Framework/Audio/SDL3AmplitudeProcessor.cs new file mode 100644 index 0000000000..400beeaaca --- /dev/null +++ b/osu.Framework/Audio/SDL3AmplitudeProcessor.cs @@ -0,0 +1,52 @@ +// Copyright (c) ppy Pty Ltd . Licensed under the MIT Licence. +// See the LICENCE file in the repository root for full licence text. + +using System; +using NAudio.Dsp; +using osu.Framework.Audio.Track; +using osu.Framework.Extensions; + +namespace osu.Framework.Audio +{ + internal class SDL3AmplitudeProcessor + { + /// + /// The most recent amplitude data. Note that this is updated on an ongoing basis and there is no guarantee it is in a consistent (single sample) state. + /// If you need consistent data, make a copy of FrequencyAmplitudes while on the audio thread. + /// + public ChannelAmplitudes CurrentAmplitudes { get; private set; } = ChannelAmplitudes.Empty; + + private Complex[] fftSamples = new Complex[ChannelAmplitudes.AMPLITUDES_SIZE * 2]; + private float[] fftResult = new float[ChannelAmplitudes.AMPLITUDES_SIZE]; + + public void Update(float[] samples, int channels) + { + if (samples.Length / channels < ChannelAmplitudes.AMPLITUDES_SIZE) + return; // not enough data + + float leftAmplitude = 0; + float rightAmplitude = 0; + int secondCh = channels < 2 ? 0 : 1; + int fftIndex = 0; + + for (int i = 0; i < samples.Length; i += channels) + { + leftAmplitude = Math.Max(leftAmplitude, Math.Abs(samples[i])); + rightAmplitude = Math.Max(rightAmplitude, Math.Abs(samples[i + secondCh])); + + if (fftIndex < fftSamples.Length) + { + fftSamples[fftIndex].Y = 0; + fftSamples[fftIndex++].X = samples[i] + samples[i + secondCh]; + } + } + + FastFourierTransform.FFT(true, (int)Math.Log2(fftSamples.Length), fftSamples); + + for (int i = 0; i < fftResult.Length; i++) + fftResult[i] = fftSamples[i].ComputeMagnitude(); + + CurrentAmplitudes = new ChannelAmplitudes(Math.Min(1f, leftAmplitude), Math.Min(1f, rightAmplitude), fftResult); + } + } +} diff --git a/osu.Framework/Audio/Track/TrackSDL3.cs b/osu.Framework/Audio/Track/TrackSDL3.cs index 45e03ae2aa..0391f61692 100644 --- a/osu.Framework/Audio/Track/TrackSDL3.cs +++ b/osu.Framework/Audio/Track/TrackSDL3.cs @@ -4,7 +4,6 @@ using System; using System.Threading; using System.Threading.Tasks; -using NAudio.Dsp; using osu.Framework.Audio.Mixing.SDL3; using osu.Framework.Extensions; using osu.Framework.Logging; @@ -100,57 +99,12 @@ void ISDL3AudioDataReceiver.GetMetaData(int bitrate, double length, long byteLen } } - private volatile bool amplitudeRequested; private double lastTime; - - private ChannelAmplitudes currentAmplitudes = ChannelAmplitudes.Empty; private float[]? samples; - private Complex[]? fftSamples; - private float[]? fftResult; - - public override ChannelAmplitudes CurrentAmplitudes - { - get - { - if (!amplitudeRequested) - amplitudeRequested = true; - - return isRunning ? currentAmplitudes : ChannelAmplitudes.Empty; - } - } - - private void updateCurrentAmplitude() - { - samples ??= new float[(int)(player.SrcRate * (1f / 60)) * player.SrcChannels]; - fftSamples ??= new Complex[ChannelAmplitudes.AMPLITUDES_SIZE * 2]; - fftResult ??= new float[ChannelAmplitudes.AMPLITUDES_SIZE]; - - player.Peek(samples, lastTime); - float leftAmplitude = 0; - float rightAmplitude = 0; - int secondCh = player.SrcChannels < 2 ? 0 : 1; - int fftIndex = 0; + private SDL3AmplitudeProcessor? amplitudeProcessor; - for (int i = 0; i < samples.Length; i += player.SrcChannels) - { - leftAmplitude = Math.Max(leftAmplitude, Math.Abs(samples[i])); - rightAmplitude = Math.Max(rightAmplitude, Math.Abs(samples[i + secondCh])); - - if (fftIndex < fftSamples.Length) - { - fftSamples[fftIndex].Y = 0; - fftSamples[fftIndex++].X = samples[i] + samples[i + secondCh]; - } - } - - FastFourierTransform.FFT(true, (int)Math.Log2(fftSamples.Length), fftSamples); - - for (int i = 0; i < fftResult.Length; i++) - fftResult[i] = fftSamples[i].ComputeMagnitude(); - - currentAmplitudes = new ChannelAmplitudes(Math.Min(1f, leftAmplitude), Math.Min(1f, rightAmplitude), fftResult); - } + public override ChannelAmplitudes CurrentAmplitudes => (amplitudeProcessor ??= new SDL3AmplitudeProcessor()).CurrentAmplitudes; protected override void UpdateState() { @@ -176,12 +130,13 @@ protected override void UpdateState() player.FillRequiredSamples(); } - // Not sure if I need to split this up to another class since this feature is only exclusive to Track - if (amplitudeRequested && isRunning && Math.Abs(currentTime - lastTime) > 1000.0 / 60.0) + if (amplitudeProcessor != null && isRunning && Math.Abs(currentTime - lastTime) > 1000.0 / 60.0) { lastTime = currentTime; + samples ??= new float[(int)(player.SrcRate * (1f / 60)) * player.SrcChannels]; + player.Peek(samples, lastTime); - updateCurrentAmplitude(); + amplitudeProcessor.Update(samples, player.SrcChannels); } } From f70455792c3067d4b871a0571c8c321ecf48d7df Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Fri, 6 Sep 2024 23:00:58 +0900 Subject: [PATCH 111/127] Satisfy InspectCode --- osu.Framework/Audio/SDL3AmplitudeProcessor.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/osu.Framework/Audio/SDL3AmplitudeProcessor.cs b/osu.Framework/Audio/SDL3AmplitudeProcessor.cs index 400beeaaca..a828f9050a 100644 --- a/osu.Framework/Audio/SDL3AmplitudeProcessor.cs +++ b/osu.Framework/Audio/SDL3AmplitudeProcessor.cs @@ -16,8 +16,8 @@ internal class SDL3AmplitudeProcessor /// public ChannelAmplitudes CurrentAmplitudes { get; private set; } = ChannelAmplitudes.Empty; - private Complex[] fftSamples = new Complex[ChannelAmplitudes.AMPLITUDES_SIZE * 2]; - private float[] fftResult = new float[ChannelAmplitudes.AMPLITUDES_SIZE]; + private readonly Complex[] fftSamples = new Complex[ChannelAmplitudes.AMPLITUDES_SIZE * 2]; + private readonly float[] fftResult = new float[ChannelAmplitudes.AMPLITUDES_SIZE]; public void Update(float[] samples, int channels) { From fe296909e802277ca097d81ebabf00ed068fdac4 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Fri, 6 Sep 2024 23:16:45 +0900 Subject: [PATCH 112/127] Potential fix for a flaky test --- osu.Framework.Tests/Audio/TrackBassTest.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework.Tests/Audio/TrackBassTest.cs b/osu.Framework.Tests/Audio/TrackBassTest.cs index 49554e70a4..63794294fd 100644 --- a/osu.Framework.Tests/Audio/TrackBassTest.cs +++ b/osu.Framework.Tests/Audio/TrackBassTest.cs @@ -135,7 +135,7 @@ public void TestStopAtEnd(AudioTestComponents.Type id) [TestCase(AudioTestComponents.Type.SDL3)] public void TestSeek(AudioTestComponents.Type id) { - setupBackend(id); + setupBackend(id, true); track.SeekAsync(1000); audio.Update(); From be1452874728e7a35585a5bb1e65d55df2b6d37e Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Sat, 14 Sep 2024 11:14:26 +0900 Subject: [PATCH 113/127] Satisfy CodeFactor and simplify mixing --- .../Audio/SDL3AudioTestComponents.cs | 4 +- .../Audio/Mixing/SDL3/SDL3AudioMixer.cs | 51 +- .../Audio/SDL3AudioDecoderManager.cs | 504 +++++++++--------- osu.Framework/Audio/SDL3AudioManager.cs | 292 +++++----- .../Audio/Sample/SampleSDL3Factory.cs | 6 +- .../Audio/Track/TempoSDL3AudioPlayer.cs | 4 +- osu.Framework/Audio/Track/TrackSDL3.cs | 6 +- osu.Framework/Audio/Track/Waveform.cs | 2 +- osu.Framework/Extensions/ExtensionMethods.cs | 2 +- 9 files changed, 424 insertions(+), 447 deletions(-) diff --git a/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs b/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs index 051e034e3c..041ee1f9f7 100644 --- a/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs +++ b/osu.Framework.Tests/Audio/SDL3AudioTestComponents.cs @@ -19,7 +19,7 @@ namespace osu.Framework.Tests.Audio /// public class SDL3AudioTestComponents : AudioTestComponents { - private SDL3BaseAudioManager baseManager = null!; + private SDL3AudioManager.SDL3BaseAudioManager baseManager = null!; public SDL3AudioTestComponents(bool init = true) : base(init) @@ -31,7 +31,7 @@ protected override void Prepare() base.Prepare(); SDL_SetHint(SDL_HINT_AUDIO_DRIVER, "dummy"u8); - baseManager = new SDL3BaseAudioManager(MixerComponents.Items.OfType); + baseManager = new SDL3AudioManager.SDL3BaseAudioManager(MixerComponents.Items.OfType); } public override void Init() diff --git a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs index a5da4d17c8..134e0cfa2c 100644 --- a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs +++ b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs @@ -7,6 +7,7 @@ using ManagedBass.Fx; using osu.Framework.Statistics; using NAudio.Dsp; +using System; namespace osu.Framework.Audio.Mixing.SDL3 { @@ -56,27 +57,13 @@ protected override void UpdateState() base.UpdateState(); } - private void mixAudio(float[] dst, float[] src, ref int filled, int samples, float left, float right) + private void mixAudio(float[] dst, float[] src, int samples, float left, float right) { if (left <= 0 && right <= 0) return; - for (int e = 0; e < samples; e += 2) - { - if (e < filled) - { - dst[e] += src[e] * left; - dst[e + 1] += src[e + 1] * right; - } - else - { - dst[e] = src[e] * left; - dst[e + 1] = src[e + 1] * right; - } - } - - if (samples > filled) - filled = samples; + for (int i = 0; i < samples; i++) + dst[i] = Math.Clamp(dst[i] + src[i] * (i % 2 == 0 ? left : right), -1.0f, 1.0f); } private float[]? ret; @@ -90,8 +77,7 @@ private void mixAudio(float[] dst, float[] src, ref int filled, int samples, flo /// /// A float array that audio will be mixed into. /// Size of data - /// Count of usable audio samples in data - public void MixChannelsInto(float[] data, int sampleCount, ref int filledSamples) + public void MixChannelsInto(float[] data, int sampleCount) { lock (syncRoot) { @@ -100,10 +86,13 @@ public void MixChannelsInto(float[] data, int sampleCount, ref int filledSamples bool useFilters = activeEffects.Count > 0; - if (useFilters && (filterArray == null || filterArray.Length != sampleCount)) - filterArray = new float[sampleCount]; + if (useFilters) + { + if (filterArray == null || filterArray.Length != sampleCount) + filterArray = new float[sampleCount]; - int filterArrayFilled = 0; + Array.Fill(filterArray, 0); + } var node = activeChannels.First; @@ -123,11 +112,7 @@ public void MixChannelsInto(float[] data, int sampleCount, ref int filledSamples if (size > 0) { var (left, right) = channel.Volume; - - if (!useFilters) - mixAudio(data, ret, ref filledSamples, size, left, right); - else - mixAudio(filterArray!, ret, ref filterArrayFilled, size, left, right); + mixAudio(useFilters ? filterArray! : data, ret, size, left, right); } } @@ -140,21 +125,13 @@ public void MixChannelsInto(float[] data, int sampleCount, ref int filledSamples { foreach (var filter in activeEffects.Values) { - for (int i = 0; i < filterArrayFilled; i++) + for (int i = 0; i < sampleCount; i++) filterArray![i] = filter.Transform(filterArray[i]); } - mixAudio(data, filterArray!, ref filledSamples, filterArrayFilled, 1, 1); + mixAudio(data, filterArray!, sampleCount, 1, 1); } } - - for (int i = 0; i < filledSamples; i++) - { - if (data[i] > 1.0f) - data[i] = 1.0f; - else if (data[i] < -1.0f) - data[i] = -1.0f; - } } private static BiQuadFilter updateFilter(BiQuadFilter? filter, float freq, BQFParameters bqfp) diff --git a/osu.Framework/Audio/SDL3AudioDecoderManager.cs b/osu.Framework/Audio/SDL3AudioDecoderManager.cs index e5d48ce8a7..20f88dee88 100644 --- a/osu.Framework/Audio/SDL3AudioDecoderManager.cs +++ b/osu.Framework/Audio/SDL3AudioDecoderManager.cs @@ -14,27 +14,27 @@ namespace osu.Framework.Audio { - public interface ISDL3AudioDataReceiver - { - /// - /// Interface to get decoded audio data from the decoder. - /// - /// Decoded audio. The format depends on you specified, - /// so you may need to actual data format. - /// This may be used by decoder later to reduce allocation, so you need to copy the data before exiting from this delegate, otherwise you may end up with wrong data. - /// Length in byte of decoded audio. Use this instead of data.Length - /// Whether if this is the last data or not. - void GetData(byte[] data, int length, bool done); - - void GetMetaData(int bitrate, double length, long byteLength); - } - /// /// Decodes audio from , and convert it to appropriate format. /// It needs a lot of polishing... /// public class SDL3AudioDecoderManager : IDisposable { + public interface ISDL3AudioDataReceiver + { + /// + /// Interface to get decoded audio data from the decoder. + /// + /// Decoded audio. The format depends on you specified, + /// so you may need to actual data format. + /// This may be used by decoder later to reduce allocation, so you need to copy the data before exiting from this delegate, otherwise you may end up with wrong data. + /// Length in byte of decoded audio. Use this instead of data.Length + /// Whether if this is the last data or not. + void GetData(byte[] data, int length, bool done); + + void GetMetaData(int bitrate, double length, long byteLength); + } + private readonly LinkedList jobs = new LinkedList(); private readonly Thread decoderThread; @@ -231,316 +231,316 @@ private static int decodeAudio(SDL3AudioDecoder decoder, out byte[] decoded) return (int)memoryStream.Length; } } - } - - /// - /// Contains decoder information, and perform the actual decoding. - /// - public abstract class SDL3AudioDecoder - { - /// - /// Decoder will decode audio data from this. - /// It accepts most formats. (e.g. MP3, OGG, WAV and so on...) - /// - internal readonly Stream Stream; - - /// - /// Decoder will convert audio data according to this spec if needed. - /// - internal readonly SDL_AudioSpec AudioSpec; - - /// - /// Decoder will call multiple times with partial data if true. - /// It's a receiver's job to combine the data in this case. Otherwise, It will call only once with the entirely decoded data if false. - /// - internal readonly bool IsTrack; - - /// - /// It will automatically dispose once decoding is done/failed. - /// - internal readonly bool AutoDisposeStream; - - /// - /// Decoder will call once or more to pass the decoded audio data. - /// - internal readonly ISDL3AudioDataReceiver? Pass; - - private int bitrate; - - /// - /// Audio bitrate. Decoder may fill this in after the first call of . - /// - public int Bitrate - { - get => bitrate; - set => Interlocked.Exchange(ref bitrate, value); - } - - private double length; /// - /// Audio length in milliseconds. Decoder may fill this in after the first call of . + /// Contains decoder information, and perform the actual decoding. /// - public double Length + public abstract class SDL3AudioDecoder { - get => length; - set => Interlocked.Exchange(ref length, value); - } - - private long byteLength; - - /// - /// Audio length in byte. Note that this may not be accurate. You cannot depend on this value entirely. - /// You can find out the actual byte length by summing up byte counts you received once decoding is done. - /// Decoder may fill this in after the first call of . - /// - public long ByteLength - { - get => byteLength; - set => Interlocked.Exchange(ref byteLength, value); - } - - internal bool MetadataSended; + /// + /// Decoder will decode audio data from this. + /// It accepts most formats. (e.g. MP3, OGG, WAV and so on...) + /// + internal readonly Stream Stream; + + /// + /// Decoder will convert audio data according to this spec if needed. + /// + internal readonly SDL_AudioSpec AudioSpec; + + /// + /// Decoder will call multiple times with partial data if true. + /// It's a receiver's job to combine the data in this case. Otherwise, It will call only once with the entirely decoded data if false. + /// + internal readonly bool IsTrack; + + /// + /// It will automatically dispose once decoding is done/failed. + /// + internal readonly bool AutoDisposeStream; + + /// + /// Decoder will call once or more to pass the decoded audio data. + /// + internal readonly ISDL3AudioDataReceiver? Pass; + + private int bitrate; + + /// + /// Audio bitrate. Decoder may fill this in after the first call of . + /// + public int Bitrate + { + get => bitrate; + set => Interlocked.Exchange(ref bitrate, value); + } - internal volatile bool StopJob; + private double length; - private volatile bool loading; + /// + /// Audio length in milliseconds. Decoder may fill this in after the first call of . + /// + public double Length + { + get => length; + set => Interlocked.Exchange(ref length, value); + } - /// - /// Whether it is decoding or not. - /// - public bool Loading { get => loading; protected set => loading = value; } + private long byteLength; - protected SDL3AudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream, ISDL3AudioDataReceiver? pass) - { - Stream = stream; - AudioSpec = audioSpec; - IsTrack = isTrack; - AutoDisposeStream = autoDisposeStream; - Pass = pass; - } + /// + /// Audio length in byte. Note that this may not be accurate. You cannot depend on this value entirely. + /// You can find out the actual byte length by summing up byte counts you received once decoding is done. + /// Decoder may fill this in after the first call of . + /// + public long ByteLength + { + get => byteLength; + set => Interlocked.Exchange(ref byteLength, value); + } - /// - /// Add a flag to stop decoding in the next loop of decoder thread. - /// - public void Stop() - { - StopJob = true; - } + internal bool MetadataSended; - // Not using IDisposable since things must be handled in a decoder thread - internal virtual void Dispose() - { - if (AutoDisposeStream) - Stream.Dispose(); - } + internal volatile bool StopJob; - protected abstract int LoadFromStreamInternal(out byte[] decoded); + private volatile bool loading; - /// - /// Decodes and resamples audio from job.Stream, and pass it to decoded. - /// You may need to run this multiple times. - /// Don't call this yourself if this decoder is in the decoder thread job list. - /// - /// Decoded audio - public int LoadFromStream(out byte[] decoded) - { - int read = 0; + /// + /// Whether it is decoding or not. + /// + public bool Loading { get => loading; protected set => loading = value; } - try + protected SDL3AudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream, ISDL3AudioDataReceiver? pass) { - read = LoadFromStreamInternal(out decoded); + Stream = stream; + AudioSpec = audioSpec; + IsTrack = isTrack; + AutoDisposeStream = autoDisposeStream; + Pass = pass; } - catch (Exception e) + + /// + /// Add a flag to stop decoding in the next loop of decoder thread. + /// + public void Stop() { - Logger.Log(e.Message, level: LogLevel.Important); - Loading = false; - decoded = Array.Empty(); + StopJob = true; } - finally + + // Not using IDisposable since things must be handled in a decoder thread + internal virtual void Dispose() { - if (!Loading) - Dispose(); + if (AutoDisposeStream) + Stream.Dispose(); } - return read; - } + protected abstract int LoadFromStreamInternal(out byte[] decoded); - /// - /// This is only for using BASS as a decoder for SDL3 backend! - /// - internal class BassAudioDecoder : SDL3AudioDecoder - { - private int decodeStream; - private FileCallbacks? fileCallbacks; - - private int resampler; - - private byte[]? decodeData; - - private Resolution resolution + /// + /// Decodes and resamples audio from job.Stream, and pass it to decoded. + /// You may need to run this multiple times. + /// Don't call this yourself if this decoder is in the decoder thread job list. + /// + /// Decoded audio + public int LoadFromStream(out byte[] decoded) { - get + int read = 0; + + try { - if (AudioSpec.format == SDL_AudioFormat.SDL_AUDIO_S8) - return Resolution.Byte; - else if (AudioSpec.format == SDL3.SDL_AUDIO_S16) // uses constant due to endian - return Resolution.Short; - else - return Resolution.Float; + read = LoadFromStreamInternal(out decoded); + } + catch (Exception e) + { + Logger.Log(e.Message, level: LogLevel.Important); + Loading = false; + decoded = Array.Empty(); + } + finally + { + if (!Loading) + Dispose(); } - } - - private ushort bits => (ushort)SDL3.SDL_AUDIO_BITSIZE(AudioSpec.format); - public BassAudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream, ISDL3AudioDataReceiver? pass) - : base(stream, audioSpec, isTrack, autoDisposeStream, pass) - { + return read; } - internal override void Dispose() + /// + /// This is only for using BASS as a decoder for SDL3 backend! + /// + internal class BassAudioDecoder : SDL3AudioDecoder { - fileCallbacks?.Dispose(); - fileCallbacks = null; + private int decodeStream; + private FileCallbacks? fileCallbacks; - decodeData = null; + private int resampler; - if (resampler != 0) + private byte[]? decodeData; + + private Resolution resolution { - Bass.StreamFree(resampler); - resampler = 0; + get + { + if (AudioSpec.format == SDL_AudioFormat.SDL_AUDIO_S8) + return Resolution.Byte; + else if (AudioSpec.format == SDL3.SDL_AUDIO_S16) // uses constant due to endian + return Resolution.Short; + else + return Resolution.Float; + } } - if (decodeStream != 0) + private ushort bits => (ushort)SDL3.SDL_AUDIO_BITSIZE(AudioSpec.format); + + public BassAudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream, ISDL3AudioDataReceiver? pass) + : base(stream, audioSpec, isTrack, autoDisposeStream, pass) { - Bass.StreamFree(decodeStream); - decodeStream = 0; } - base.Dispose(); - } + internal override void Dispose() + { + fileCallbacks?.Dispose(); + fileCallbacks = null; - protected override int LoadFromStreamInternal(out byte[] decoded) - { - if (Bass.CurrentDevice < 0) - throw new InvalidOperationException($"Initialize a BASS device to decode audio: {Bass.LastError}"); + decodeData = null; - if (!Loading) - { - fileCallbacks = new FileCallbacks(new DataStreamFileProcedures(Stream)); + if (resampler != 0) + { + Bass.StreamFree(resampler); + resampler = 0; + } - BassFlags bassFlags = BassFlags.Decode | resolution.ToBassFlag(); - if (IsTrack) bassFlags |= BassFlags.Prescan; + if (decodeStream != 0) + { + Bass.StreamFree(decodeStream); + decodeStream = 0; + } - decodeStream = Bass.CreateStream(StreamSystem.NoBuffer, bassFlags, fileCallbacks.Callbacks); + base.Dispose(); + } - if (decodeStream == 0) - throw new FormatException($"Couldn't create stream: {Bass.LastError}"); + protected override int LoadFromStreamInternal(out byte[] decoded) + { + if (Bass.CurrentDevice < 0) + throw new InvalidOperationException($"Initialize a BASS device to decode audio: {Bass.LastError}"); - if (Bass.ChannelGetInfo(decodeStream, out var info)) + if (!Loading) { - ByteLength = Bass.ChannelGetLength(decodeStream); - Length = Bass.ChannelBytes2Seconds(decodeStream, ByteLength) * 1000.0d; - Bitrate = (int)Math.Round(Bass.ChannelGetAttribute(decodeStream, ChannelAttribute.Bitrate)); + fileCallbacks = new FileCallbacks(new DataStreamFileProcedures(Stream)); + + BassFlags bassFlags = BassFlags.Decode | resolution.ToBassFlag(); + if (IsTrack) bassFlags |= BassFlags.Prescan; + + decodeStream = Bass.CreateStream(StreamSystem.NoBuffer, bassFlags, fileCallbacks.Callbacks); - if (info.Channels != AudioSpec.channels || info.Frequency != AudioSpec.freq) + if (decodeStream == 0) + throw new FormatException($"Couldn't create stream: {Bass.LastError}"); + + if (Bass.ChannelGetInfo(decodeStream, out var info)) { - resampler = BassMix.CreateMixerStream(AudioSpec.freq, AudioSpec.channels, BassFlags.MixerEnd | BassFlags.Decode | resolution.ToBassFlag()); + ByteLength = Bass.ChannelGetLength(decodeStream); + Length = Bass.ChannelBytes2Seconds(decodeStream, ByteLength) * 1000.0d; + Bitrate = (int)Math.Round(Bass.ChannelGetAttribute(decodeStream, ChannelAttribute.Bitrate)); + + if (info.Channels != AudioSpec.channels || info.Frequency != AudioSpec.freq) + { + resampler = BassMix.CreateMixerStream(AudioSpec.freq, AudioSpec.channels, BassFlags.MixerEnd | BassFlags.Decode | resolution.ToBassFlag()); - if (resampler == 0) - throw new FormatException($"Failed to create BASS Mixer: {Bass.LastError}"); + if (resampler == 0) + throw new FormatException($"Failed to create BASS Mixer: {Bass.LastError}"); - if (!BassMix.MixerAddChannel(resampler, decodeStream, BassFlags.MixerChanNoRampin | BassFlags.MixerChanLimit)) - throw new FormatException($"Failed to add a channel to BASS Mixer: {Bass.LastError}"); + if (!BassMix.MixerAddChannel(resampler, decodeStream, BassFlags.MixerChanNoRampin | BassFlags.MixerChanLimit)) + throw new FormatException($"Failed to add a channel to BASS Mixer: {Bass.LastError}"); - ByteLength /= info.Channels * (bits / 8); - ByteLength = (long)Math.Ceiling((decimal)ByteLength / info.Frequency * AudioSpec.freq); - ByteLength *= AudioSpec.channels * (bits / 8); + ByteLength /= info.Channels * (bits / 8); + ByteLength = (long)Math.Ceiling((decimal)ByteLength / info.Frequency * AudioSpec.freq); + ByteLength *= AudioSpec.channels * (bits / 8); + } } - } - else - { - if (IsTrack) - throw new FormatException($"Couldn't get channel info: {Bass.LastError}"); + else + { + if (IsTrack) + throw new FormatException($"Couldn't get channel info: {Bass.LastError}"); + } + + Loading = true; } - Loading = true; - } + int handle = resampler == 0 ? decodeStream : resampler; - int handle = resampler == 0 ? decodeStream : resampler; + int bufferLen = (int)Bass.ChannelSeconds2Bytes(handle, 1); - int bufferLen = (int)Bass.ChannelSeconds2Bytes(handle, 1); + if (bufferLen <= 0) + bufferLen = 44100 * 2 * 4 * 1; - if (bufferLen <= 0) - bufferLen = 44100 * 2 * 4 * 1; + if (decodeData == null || decodeData.Length < bufferLen) + decodeData = new byte[bufferLen]; - if (decodeData == null || decodeData.Length < bufferLen) - decodeData = new byte[bufferLen]; + int got = Bass.ChannelGetData(handle, decodeData, bufferLen); - int got = Bass.ChannelGetData(handle, decodeData, bufferLen); + if (got == -1) + { + Loading = false; - if (got == -1) - { - Loading = false; + if (Bass.LastError != Errors.Ended) + throw new FormatException($"Couldn't decode: {Bass.LastError}"); + } + else if (got < bufferLen) + { + // originally used synchandle to detect end, but it somehow created strong handle + Loading = false; + } - if (Bass.LastError != Errors.Ended) - throw new FormatException($"Couldn't decode: {Bass.LastError}"); - } - else if (got < bufferLen) - { - // originally used synchandle to detect end, but it somehow created strong handle - Loading = false; + decoded = decodeData; + return Math.Max(0, got); } - - decoded = decodeData; - return Math.Max(0, got); } - } - internal class FFmpegAudioDecoder : SDL3AudioDecoder - { - private VideoDecoder? ffmpeg; - private byte[]? decodeData; - - public FFmpegAudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream, ISDL3AudioDataReceiver? pass) - : base(stream, audioSpec, isTrack, autoDisposeStream, pass) + internal class FFmpegAudioDecoder : SDL3AudioDecoder { - } + private VideoDecoder? ffmpeg; + private byte[]? decodeData; - internal override void Dispose() - { - decodeData = null; + public FFmpegAudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream, ISDL3AudioDataReceiver? pass) + : base(stream, audioSpec, isTrack, autoDisposeStream, pass) + { + } - ffmpeg?.Dispose(); - ffmpeg = null; + internal override void Dispose() + { + decodeData = null; - base.Dispose(); - } + ffmpeg?.Dispose(); + ffmpeg = null; - protected override int LoadFromStreamInternal(out byte[] decoded) - { - if (ffmpeg == null) + base.Dispose(); + } + + protected override int LoadFromStreamInternal(out byte[] decoded) { - ffmpeg = new VideoDecoder(Stream, AudioSpec.freq, AudioSpec.channels, - SDL3.SDL_AUDIO_ISFLOAT(AudioSpec.format), SDL3.SDL_AUDIO_BITSIZE(AudioSpec.format), SDL3.SDL_AUDIO_ISSIGNED(AudioSpec.format)); + if (ffmpeg == null) + { + ffmpeg = new VideoDecoder(Stream, AudioSpec.freq, AudioSpec.channels, + SDL3.SDL_AUDIO_ISFLOAT(AudioSpec.format), SDL3.SDL_AUDIO_BITSIZE(AudioSpec.format), SDL3.SDL_AUDIO_ISSIGNED(AudioSpec.format)); - ffmpeg.PrepareDecoding(); - ffmpeg.RecreateCodecContext(); + ffmpeg.PrepareDecoding(); + ffmpeg.RecreateCodecContext(); - Bitrate = (int)ffmpeg.Bitrate; - Length = ffmpeg.Duration; - ByteLength = (long)Math.Ceiling(ffmpeg.Duration / 1000.0d * AudioSpec.freq) * AudioSpec.channels * (SDL3.SDL_AUDIO_BITSIZE(AudioSpec.format) / 8); // FIXME + Bitrate = (int)ffmpeg.Bitrate; + Length = ffmpeg.Duration; + ByteLength = (long)Math.Ceiling(ffmpeg.Duration / 1000.0d * AudioSpec.freq) * AudioSpec.channels * (SDL3.SDL_AUDIO_BITSIZE(AudioSpec.format) / 8); // FIXME - Loading = true; - } + Loading = true; + } - int got = ffmpeg.DecodeNextAudioFrame(32, ref decodeData, !IsTrack); + int got = ffmpeg.DecodeNextAudioFrame(32, ref decodeData, !IsTrack); - if (ffmpeg.State != VideoDecoder.DecoderState.Running) - Loading = false; + if (ffmpeg.State != VideoDecoder.DecoderState.Running) + Loading = false; - decoded = decodeData; - return got; + decoded = decodeData; + return got; + } } } } diff --git a/osu.Framework/Audio/SDL3AudioManager.cs b/osu.Framework/Audio/SDL3AudioManager.cs index 1cc044c7ca..d7eafa1c42 100644 --- a/osu.Framework/Audio/SDL3AudioManager.cs +++ b/osu.Framework/Audio/SDL3AudioManager.cs @@ -190,202 +190,202 @@ protected override void Dispose(bool disposing) baseManager.Dispose(); } - } - - /// - /// To share basic playback logic with audio tests. - /// - internal unsafe class SDL3BaseAudioManager : IDisposable - { - internal SDL_AudioSpec AudioSpec { get; private set; } - internal SDL_AudioDeviceID DeviceId { get; private set; } - internal SDL_AudioStream* DeviceStream { get; private set; } - - internal int BufferSize { get; private set; } = (int)(SDL3AudioManager.AUDIO_FREQ * 0.01); + /// + /// To share basic playback logic with audio tests. + /// + internal unsafe class SDL3BaseAudioManager : IDisposable + { + internal SDL_AudioSpec AudioSpec { get; private set; } - internal string DeviceName { get; private set; } = "Not loaded"; + internal SDL_AudioDeviceID DeviceId { get; private set; } + internal SDL_AudioStream* DeviceStream { get; private set; } - private readonly Func> mixerIterator; + internal int BufferSize { get; private set; } = (int)(AUDIO_FREQ * 0.01); - private ObjectHandle objectHandle; + internal string DeviceName { get; private set; } = "Not loaded"; - private readonly SDL3AudioDecoderManager decoderManager = new SDL3AudioDecoderManager(); + private readonly Func> mixerIterator; - internal SDL3BaseAudioManager(Func> mixerIterator) - { - if (SDL_InitSubSystem(SDL_InitFlags.SDL_INIT_AUDIO) < 0) - { - throw new InvalidOperationException($"Failed to initialise SDL Audio: {SDL_GetError()}"); - } + private ObjectHandle objectHandle; - this.mixerIterator = mixerIterator; + private readonly SDL3AudioDecoderManager decoderManager = new SDL3AudioDecoderManager(); - objectHandle = new ObjectHandle(this, GCHandleType.Normal); - AudioSpec = new SDL_AudioSpec + internal SDL3BaseAudioManager(Func> mixerIterator) { - freq = SDL3AudioManager.AUDIO_FREQ, - channels = SDL3AudioManager.AUDIO_CHANNELS, - format = SDL3AudioManager.AUDIO_FORMAT - }; - } - - internal void RunWhileLockingAudioStream(Action action) - { - SDL_AudioStream* stream = DeviceStream; + if (SDL_InitSubSystem(SDL_InitFlags.SDL_INIT_AUDIO) < 0) + { + throw new InvalidOperationException($"Failed to initialise SDL Audio: {SDL_GetError()}"); + } - if (stream != null) - SDL_LockAudioStream(stream); + this.mixerIterator = mixerIterator; - try - { - action(); + objectHandle = new ObjectHandle(this, GCHandleType.Normal); + AudioSpec = new SDL_AudioSpec + { + freq = AUDIO_FREQ, + channels = AUDIO_CHANNELS, + format = AUDIO_FORMAT + }; } - finally + + internal void RunWhileLockingAudioStream(Action action) { + SDL_AudioStream* stream = DeviceStream; + if (stream != null) - SDL_UnlockAudioStream(stream); + SDL_LockAudioStream(stream); + + try + { + action(); + } + finally + { + if (stream != null) + SDL_UnlockAudioStream(stream); + } } - } - internal bool SetAudioDevice(SDL_AudioDeviceID targetId) - { - if (DeviceStream != null) + internal bool SetAudioDevice(SDL_AudioDeviceID targetId) { - SDL_DestroyAudioStream(DeviceStream); - DeviceStream = null; - } + if (DeviceStream != null) + { + SDL_DestroyAudioStream(DeviceStream); + DeviceStream = null; + } - SDL_AudioSpec spec = AudioSpec; + SDL_AudioSpec spec = AudioSpec; - SDL_AudioStream* deviceStream = SDL_OpenAudioDeviceStream(targetId, &spec, &audioCallback, objectHandle.Handle); + SDL_AudioStream* deviceStream = SDL_OpenAudioDeviceStream(targetId, &spec, &audioCallback, objectHandle.Handle); - if (deviceStream != null) - { - SDL_DestroyAudioStream(DeviceStream); - DeviceStream = deviceStream; - AudioSpec = spec; + if (deviceStream != null) + { + SDL_DestroyAudioStream(DeviceStream); + DeviceStream = deviceStream; + AudioSpec = spec; - DeviceId = SDL_GetAudioStreamDevice(deviceStream); + DeviceId = SDL_GetAudioStreamDevice(deviceStream); - int sampleFrameSize = 0; - SDL_AudioSpec temp; // this has 'real' device info which is useless since SDL converts audio according to the spec we provided - if (SDL_GetAudioDeviceFormat(DeviceId, &temp, &sampleFrameSize) == 0) - BufferSize = sampleFrameSize * (int)Math.Ceiling((double)spec.freq / temp.freq); - } + int sampleFrameSize = 0; + SDL_AudioSpec temp; // this has 'real' device info which is useless since SDL converts audio according to the spec we provided + if (SDL_GetAudioDeviceFormat(DeviceId, &temp, &sampleFrameSize) == 0) + BufferSize = sampleFrameSize * (int)Math.Ceiling((double)spec.freq / temp.freq); + } - if (deviceStream == null) - { - if (targetId == SDL_AUDIO_DEVICE_DEFAULT_PLAYBACK) - return false; + if (deviceStream == null) + { + if (targetId == SDL_AUDIO_DEVICE_DEFAULT_PLAYBACK) + return false; - return SetAudioDevice(SDL_AUDIO_DEVICE_DEFAULT_PLAYBACK); - } + return SetAudioDevice(SDL_AUDIO_DEVICE_DEFAULT_PLAYBACK); + } - SDL_ResumeAudioDevice(DeviceId); + SDL_ResumeAudioDevice(DeviceId); - DeviceName = SDL_GetAudioDeviceName(targetId); + DeviceName = SDL_GetAudioDeviceName(targetId); - return true; - } + return true; + } - [UnmanagedCallersOnly(CallConvs = new[] { typeof(CallConvCdecl) })] - private static void audioCallback(IntPtr userdata, SDL_AudioStream* stream, int additionalAmount, int totalAmount) - { - var handle = new ObjectHandle(userdata); - if (handle.GetTarget(out SDL3BaseAudioManager audioManager)) - audioManager.internalAudioCallback(stream, additionalAmount); - } + [UnmanagedCallersOnly(CallConvs = new[] { typeof(CallConvCdecl) })] + private static void audioCallback(IntPtr userdata, SDL_AudioStream* stream, int additionalAmount, int totalAmount) + { + var handle = new ObjectHandle(userdata); + if (handle.GetTarget(out SDL3BaseAudioManager audioManager)) + audioManager.internalAudioCallback(stream, additionalAmount); + } - private float[] audioBuffer; + private float[] audioBuffer; - private void internalAudioCallback(SDL_AudioStream* stream, int additionalAmount) - { - additionalAmount /= 4; + private void internalAudioCallback(SDL_AudioStream* stream, int additionalAmount) + { + additionalAmount /= 4; - if (audioBuffer == null || audioBuffer.Length < additionalAmount) - audioBuffer = new float[additionalAmount]; + if (audioBuffer == null || audioBuffer.Length < additionalAmount) + audioBuffer = new float[additionalAmount]; - try - { - int filled = 0; + Array.Fill(audioBuffer, 0); - foreach (var mixer in mixerIterator()) + try { - if (mixer.IsAlive) - mixer.MixChannelsInto(audioBuffer, additionalAmount, ref filled); + foreach (var mixer in mixerIterator()) + { + if (mixer.IsAlive) + mixer.MixChannelsInto(audioBuffer, additionalAmount); + } + + fixed (float* ptr = audioBuffer) + SDL_PutAudioStreamData(stream, (IntPtr)ptr, additionalAmount * 4); + } + catch (Exception e) + { + Logger.Error(e, "Error while pushing audio to SDL"); } - - fixed (float* ptr = audioBuffer) - SDL_PutAudioStreamData(stream, (IntPtr)ptr, filled * 4); } - catch (Exception e) + + /// + /// With how decoders work, we need this to get test passed + /// I don't want this either... otherwise we have to dispose decoder in tests + /// + private class ReceiverGCWrapper : SDL3AudioDecoderManager.ISDL3AudioDataReceiver { - Logger.Error(e, "Error while pushing audio to SDL"); - } - } + private readonly WeakReference channelWeakReference; - /// - /// With how decoders work, we need this to get test passed - /// I don't want this either... otherwise we have to dispose decoder in tests - /// - private class ReceiverGCWrapper : ISDL3AudioDataReceiver - { - private readonly WeakReference channelWeakReference; + internal ReceiverGCWrapper(WeakReference channel) + { + channelWeakReference = channel; + } - internal ReceiverGCWrapper(WeakReference channel) - { - channelWeakReference = channel; + void SDL3AudioDecoderManager.ISDL3AudioDataReceiver.GetData(byte[] data, int length, bool done) + { + if (channelWeakReference.TryGetTarget(out SDL3AudioDecoderManager.ISDL3AudioDataReceiver r)) + r.GetData(data, length, done); + else + throw new ObjectDisposedException("channel is already disposed"); + } + + void SDL3AudioDecoderManager.ISDL3AudioDataReceiver.GetMetaData(int bitrate, double length, long byteLength) + { + if (channelWeakReference.TryGetTarget(out SDL3AudioDecoderManager.ISDL3AudioDataReceiver r)) + r.GetMetaData(bitrate, length, byteLength); + else + throw new ObjectDisposedException("channel is already disposed"); + } } - void ISDL3AudioDataReceiver.GetData(byte[] data, int length, bool done) + internal Track.Track GetNewTrack(Stream data, string name) { - if (channelWeakReference.TryGetTarget(out ISDL3AudioDataReceiver r)) - r.GetData(data, length, done); - else - throw new ObjectDisposedException("channel is already disposed"); + TrackSDL3 track = new TrackSDL3(name, AudioSpec, BufferSize); + ReceiverGCWrapper receiverGC = new ReceiverGCWrapper(new WeakReference(track)); + decoderManager.StartDecodingAsync(data, AudioSpec, true, receiverGC); + return track; } - void ISDL3AudioDataReceiver.GetMetaData(int bitrate, double length, long byteLength) + internal SampleFactory GetSampleFactory(Stream data, string name, AudioMixer mixer, int playbackConcurrency) { - if (channelWeakReference.TryGetTarget(out ISDL3AudioDataReceiver r)) - r.GetMetaData(bitrate, length, byteLength); - else - throw new ObjectDisposedException("channel is already disposed"); + SampleSDL3Factory sampleFactory = new SampleSDL3Factory(name, (SDL3AudioMixer)mixer, playbackConcurrency, AudioSpec); + ReceiverGCWrapper receiverGC = new ReceiverGCWrapper(new WeakReference(sampleFactory)); + decoderManager.StartDecodingAsync(data, AudioSpec, false, receiverGC); + return sampleFactory; } - } - internal Track.Track GetNewTrack(Stream data, string name) - { - TrackSDL3 track = new TrackSDL3(name, AudioSpec, BufferSize); - ReceiverGCWrapper receiverGC = new ReceiverGCWrapper(new WeakReference(track)); - decoderManager.StartDecodingAsync(data, AudioSpec, true, receiverGC); - return track; - } - - internal SampleFactory GetSampleFactory(Stream data, string name, AudioMixer mixer, int playbackConcurrency) - { - SampleSDL3Factory sampleFactory = new SampleSDL3Factory(name, (SDL3AudioMixer)mixer, playbackConcurrency, AudioSpec); - ReceiverGCWrapper receiverGC = new ReceiverGCWrapper(new WeakReference(sampleFactory)); - decoderManager.StartDecodingAsync(data, AudioSpec, false, receiverGC); - return sampleFactory; - } - - public void Dispose() - { - if (DeviceStream != null) + public void Dispose() { - SDL_DestroyAudioStream(DeviceStream); - DeviceStream = null; - DeviceId = 0; - // Destroying audio stream will close audio device because we use SDL3 OpenAudioDeviceStream - // won't use multiple AudioStream for now since it's barely useful - } + if (DeviceStream != null) + { + SDL_DestroyAudioStream(DeviceStream); + DeviceStream = null; + DeviceId = 0; + // Destroying audio stream will close audio device because we use SDL3 OpenAudioDeviceStream + // won't use multiple AudioStream for now since it's barely useful + } - objectHandle.Dispose(); - decoderManager.Dispose(); + objectHandle.Dispose(); + decoderManager.Dispose(); - SDL_QuitSubSystem(SDL_InitFlags.SDL_INIT_AUDIO); + SDL_QuitSubSystem(SDL_InitFlags.SDL_INIT_AUDIO); + } } } } diff --git a/osu.Framework/Audio/Sample/SampleSDL3Factory.cs b/osu.Framework/Audio/Sample/SampleSDL3Factory.cs index 670713cae9..0d1d59341b 100644 --- a/osu.Framework/Audio/Sample/SampleSDL3Factory.cs +++ b/osu.Framework/Audio/Sample/SampleSDL3Factory.cs @@ -9,7 +9,7 @@ namespace osu.Framework.Audio.Sample { - internal class SampleSDL3Factory : SampleFactory, ISDL3AudioDataReceiver + internal class SampleSDL3Factory : SampleFactory, SDL3AudioDecoderManager.ISDL3AudioDataReceiver { private volatile bool isLoaded; public override bool IsLoaded => isLoaded; @@ -28,7 +28,7 @@ public SampleSDL3Factory(string name, SDL3AudioMixer mixer, int playbackConcurre this.spec = spec; } - void ISDL3AudioDataReceiver.GetData(byte[] audio, int byteLen, bool done) + void SDL3AudioDecoderManager.ISDL3AudioDataReceiver.GetData(byte[] audio, int byteLen, bool done) { if (IsDisposed) return; @@ -75,7 +75,7 @@ protected override void Dispose(bool disposing) base.Dispose(disposing); } - void ISDL3AudioDataReceiver.GetMetaData(int bitrate, double length, long byteLength) + void SDL3AudioDecoderManager.ISDL3AudioDataReceiver.GetMetaData(int bitrate, double length, long byteLength) { } // not needed } diff --git a/osu.Framework/Audio/Track/TempoSDL3AudioPlayer.cs b/osu.Framework/Audio/Track/TempoSDL3AudioPlayer.cs index 80fb49d094..6472cac0f0 100644 --- a/osu.Framework/Audio/Track/TempoSDL3AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TempoSDL3AudioPlayer.cs @@ -155,10 +155,10 @@ protected int GetTempoLatencyInSamples() if (soundTouch == null) return 0; - return (int)(soundTouch.UnprocessedSampleCount + soundTouch.AvailableSamples * Tempo); + return (int)(soundTouch.UnprocessedSampleCount + (soundTouch.AvailableSamples * Tempo)); } - protected override double GetProcessingLatency() => base.GetProcessingLatency() + (double)GetTempoLatencyInSamples() / SrcRate * 1000.0d; + protected override double GetProcessingLatency() => base.GetProcessingLatency() + (GetTempoLatencyInSamples() * 1000.0 / SrcRate); public override void Clear() { diff --git a/osu.Framework/Audio/Track/TrackSDL3.cs b/osu.Framework/Audio/Track/TrackSDL3.cs index 0391f61692..3f5b233a3a 100644 --- a/osu.Framework/Audio/Track/TrackSDL3.cs +++ b/osu.Framework/Audio/Track/TrackSDL3.cs @@ -11,7 +11,7 @@ namespace osu.Framework.Audio.Track { - public sealed class TrackSDL3 : Track, ISDL3AudioChannel, ISDL3AudioDataReceiver + public sealed class TrackSDL3 : Track, ISDL3AudioChannel, SDL3AudioDecoderManager.ISDL3AudioDataReceiver { private readonly TempoSDL3AudioPlayer player; @@ -55,7 +55,7 @@ public TrackSDL3(string name, SDL_AudioSpec spec, int samples) private readonly object syncRoot = new object(); - void ISDL3AudioDataReceiver.GetData(byte[] audio, int length, bool done) + void SDL3AudioDecoderManager.ISDL3AudioDataReceiver.GetData(byte[] audio, int length, bool done) { if (IsDisposed) return; @@ -82,7 +82,7 @@ void ISDL3AudioDataReceiver.GetData(byte[] audio, int length, bool done) } } - void ISDL3AudioDataReceiver.GetMetaData(int bitrate, double length, long byteLength) + void SDL3AudioDecoderManager.ISDL3AudioDataReceiver.GetMetaData(int bitrate, double length, long byteLength) { if (!isLoaded) { diff --git a/osu.Framework/Audio/Track/Waveform.cs b/osu.Framework/Audio/Track/Waveform.cs index b2a3436cd3..428ec5ae36 100644 --- a/osu.Framework/Audio/Track/Waveform.cs +++ b/osu.Framework/Audio/Track/Waveform.cs @@ -96,7 +96,7 @@ public Waveform(Stream? data) }; // AudioDecoder will resample data into specified sample rate and channels (44100hz 2ch float) - SDL3AudioDecoder decoder = SDL3AudioDecoderManager.CreateDecoder(data, spec, true, false); + SDL3AudioDecoderManager.SDL3AudioDecoder decoder = SDL3AudioDecoderManager.CreateDecoder(data, spec, true, false); Complex[] complexBuffer = ArrayPool.Shared.Rent(fft_samples); diff --git a/osu.Framework/Extensions/ExtensionMethods.cs b/osu.Framework/Extensions/ExtensionMethods.cs index cb8b00f209..4add4d1e61 100644 --- a/osu.Framework/Extensions/ExtensionMethods.cs +++ b/osu.Framework/Extensions/ExtensionMethods.cs @@ -343,6 +343,6 @@ public static bool CheckIsValidUrl(this string url) /// /// NAudio Complex number /// Magnitude (Absolute number) of a given complex. - public static float ComputeMagnitude(this Complex complex) => (float)Math.Sqrt(complex.X * complex.X + complex.Y * complex.Y); + public static float ComputeMagnitude(this Complex complex) => (float)Math.Sqrt((complex.X * complex.X) + (complex.Y * complex.Y)); } } From 642b625076ec2bbc74c838b76a473dad641c4335 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Mon, 16 Sep 2024 22:22:11 +0900 Subject: [PATCH 114/127] Use simple logic in SDL3 mixing and adjust parameter --- osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs | 9 ++++++++- osu.Framework/Audio/ResamplingPlayer.cs | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs index 134e0cfa2c..9034b73f11 100644 --- a/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs +++ b/osu.Framework/Audio/Mixing/SDL3/SDL3AudioMixer.cs @@ -63,7 +63,14 @@ private void mixAudio(float[] dst, float[] src, int samples, float left, float r return; for (int i = 0; i < samples; i++) - dst[i] = Math.Clamp(dst[i] + src[i] * (i % 2 == 0 ? left : right), -1.0f, 1.0f); + { + dst[i] += src[i] * (i % 2 == 0 ? left : right); + + if (dst[i] > 1.0f) + dst[i] = 1.0f; + else if (dst[i] < -1.0f) + dst[i] = -1.0f; + } } private float[]? ret; diff --git a/osu.Framework/Audio/ResamplingPlayer.cs b/osu.Framework/Audio/ResamplingPlayer.cs index 1dba8b9235..64a80062db 100644 --- a/osu.Framework/Audio/ResamplingPlayer.cs +++ b/osu.Framework/Audio/ResamplingPlayer.cs @@ -56,7 +56,7 @@ private void setRate(double relativeRate) if (resampler == null) { resampler = new WdlResampler(); - resampler.SetMode(true, 2, false); + resampler.SetMode(true, 1, false); resampler.SetFilterParms(); resampler.SetFeedMode(false); } From a3263afc203cc45b9f77dad9410f396e5ebf339a Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Mon, 16 Sep 2024 22:39:50 +0900 Subject: [PATCH 115/127] Apply a change for SDL3-CS update --- osu.Framework/Audio/SDL3AudioManager.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework/Audio/SDL3AudioManager.cs b/osu.Framework/Audio/SDL3AudioManager.cs index d7eafa1c42..7f1479f5e9 100644 --- a/osu.Framework/Audio/SDL3AudioManager.cs +++ b/osu.Framework/Audio/SDL3AudioManager.cs @@ -213,7 +213,7 @@ internal unsafe class SDL3BaseAudioManager : IDisposable internal SDL3BaseAudioManager(Func> mixerIterator) { - if (SDL_InitSubSystem(SDL_InitFlags.SDL_INIT_AUDIO) < 0) + if (SDL_InitSubSystem(SDL_InitFlags.SDL_INIT_AUDIO) == SDL_bool.SDL_FALSE) { throw new InvalidOperationException($"Failed to initialise SDL Audio: {SDL_GetError()}"); } From 15df04220820bec67552089e0debe9631995169d Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Wed, 18 Sep 2024 16:06:56 +0900 Subject: [PATCH 116/127] Potential fix for TestRestartFromRestartPoint failure --- osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs index 67c50c1a84..ff3a2767f6 100644 --- a/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs @@ -32,7 +32,7 @@ internal class TrackSDL3AudioPlayer : ResamplingPlayer, IDisposable /// /// A position in milliseconds to convert /// - public long GetIndexFromMs(double seconds) => (long)(seconds / 1000.0d * SrcRate) * SrcChannels; + public long GetIndexFromMs(double seconds) => (long)Math.Ceiling(seconds / 1000.0d * SrcRate) * SrcChannels; /// /// Stores raw audio data. From d97986b38d52f8cc1bf63afdbd40ad4b92bb4803 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Sat, 21 Sep 2024 10:09:14 +0900 Subject: [PATCH 117/127] Fix comment --- osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs index ff3a2767f6..4f71a5d0aa 100644 --- a/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs @@ -21,7 +21,7 @@ internal class TrackSDL3AudioPlayer : ResamplingPlayer, IDisposable public virtual bool Done => done; /// - /// Returns a byte position converted into milliseconds with configuration set for this player. + /// Returns a data position converted into milliseconds with configuration set for this player. /// /// Position to convert /// From 6ae2d8aff1ddf37b78fa352c7d59ea782f1ce222 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Sat, 21 Sep 2024 13:58:53 +0900 Subject: [PATCH 118/127] Convert bytes instead of unsafe --- .../Audio/Track/TrackSDL3AudioPlayer.cs | 12 +-- osu.Framework/Audio/Track/Waveform.cs | 83 +++++++++---------- 2 files changed, 40 insertions(+), 55 deletions(-) diff --git a/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs index 4f71a5d0aa..53822554b9 100644 --- a/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs @@ -99,17 +99,11 @@ internal void PutSamplesInStream(byte[] next, int length) if (audioDataLength + floatLen > AudioData.LongLength) prepareArray(audioDataLength + floatLen); - unsafe // To directly put bytes as float in array + for (int i = 0; i < floatLen; i++) { - fixed (float* dest = AudioData) - fixed (void* ptr = next) - { - float* src = (float*)ptr; - Buffer.MemoryCopy(src, dest + audioDataLength, (AudioData.LongLength - audioDataLength) * sizeof(float), length); - } + float src = BitConverter.ToSingle(next, i * sizeof(float)); + AudioData[audioDataLength++] = src; } - - audioDataLength += floatLen; } internal void DonePutting() diff --git a/osu.Framework/Audio/Track/Waveform.cs b/osu.Framework/Audio/Track/Waveform.cs index 428ec5ae36..fc51063306 100644 --- a/osu.Framework/Audio/Track/Waveform.cs +++ b/osu.Framework/Audio/Track/Waveform.cs @@ -120,66 +120,57 @@ public Waveform(Stream? data) do { - int read = decoder.LoadFromStream(out byte[] currentBytes); + int read = decoder.LoadFromStream(out byte[] currentBytes) / bytes_per_sample; int sampleIndex = 0; - unsafe + while (sampleIndex < read) { - fixed (void* ptr = currentBytes) + // Each point is composed of multiple samples + for (; pointSamples < samplesPerPoint && sampleIndex < read; pointSamples += channels, sampleIndex += channels) { - float* currentFloats = (float*)ptr; - int currentFloatsLength = read / bytes_per_sample; + // Find the maximum amplitude for each channel in the point + float left = BitConverter.ToSingle(currentBytes, sampleIndex * bytes_per_sample); + float right = BitConverter.ToSingle(currentBytes, (sampleIndex + 1) * bytes_per_sample); - while (sampleIndex < currentFloatsLength) - { - // Each point is composed of multiple samples - for (; pointSamples < samplesPerPoint && sampleIndex < currentFloatsLength; pointSamples += channels, sampleIndex += channels) - { - // Find the maximum amplitude for each channel in the point - float left = *(currentFloats + sampleIndex); - float right = *(currentFloats + sampleIndex + 1); - - point.AmplitudeLeft = Math.Max(point.AmplitudeLeft, Math.Abs(left)); - point.AmplitudeRight = Math.Max(point.AmplitudeRight, Math.Abs(right)); - - complexBuffer[complexBufferIndex].X = left + right; - complexBuffer[complexBufferIndex].Y = 0; + point.AmplitudeLeft = Math.Max(point.AmplitudeLeft, Math.Abs(left)); + point.AmplitudeRight = Math.Max(point.AmplitudeRight, Math.Abs(right)); - if (++complexBufferIndex >= fft_samples) - { - complexBufferIndex = 0; + complexBuffer[complexBufferIndex].X = left + right; + complexBuffer[complexBufferIndex].Y = 0; - FastFourierTransform.FFT(true, m, complexBuffer); + if (++complexBufferIndex >= fft_samples) + { + complexBufferIndex = 0; - point.LowIntensity = computeIntensity(sample_rate, complexBuffer, low_min, mid_min); - point.MidIntensity = computeIntensity(sample_rate, complexBuffer, mid_min, high_min); - point.HighIntensity = computeIntensity(sample_rate, complexBuffer, high_min, high_max); + FastFourierTransform.FFT(true, m, complexBuffer); - for (; fftPointIndex < pointList.Count; fftPointIndex++) - { - var prevPoint = pointList[fftPointIndex]; - prevPoint.LowIntensity = point.LowIntensity; - prevPoint.MidIntensity = point.MidIntensity; - prevPoint.HighIntensity = point.HighIntensity; - pointList[fftPointIndex] = prevPoint; - } + point.LowIntensity = computeIntensity(sample_rate, complexBuffer, low_min, mid_min); + point.MidIntensity = computeIntensity(sample_rate, complexBuffer, mid_min, high_min); + point.HighIntensity = computeIntensity(sample_rate, complexBuffer, high_min, high_max); - fftPointIndex++; // current Point is going to be added - } + for (; fftPointIndex < pointList.Count; fftPointIndex++) + { + var prevPoint = pointList[fftPointIndex]; + prevPoint.LowIntensity = point.LowIntensity; + prevPoint.MidIntensity = point.MidIntensity; + prevPoint.HighIntensity = point.HighIntensity; + pointList[fftPointIndex] = prevPoint; } - if (pointSamples >= samplesPerPoint) - { - // There may be unclipped samples, so clip them ourselves - point.AmplitudeLeft = Math.Min(1, point.AmplitudeLeft); - point.AmplitudeRight = Math.Min(1, point.AmplitudeRight); + fftPointIndex++; // current Point is going to be added + } + } + + if (pointSamples >= samplesPerPoint) + { + // There may be unclipped samples, so clip them ourselves + point.AmplitudeLeft = Math.Min(1, point.AmplitudeLeft); + point.AmplitudeRight = Math.Min(1, point.AmplitudeRight); - pointList.Add(point); + pointList.Add(point); - point = new Point(); - pointSamples = 0; - } - } + point = new Point(); + pointSamples = 0; } } } while (decoder.Loading); From eefb8255cd9359ca39b34fda8003abf1f0af4c86 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Sat, 21 Sep 2024 17:39:59 +0900 Subject: [PATCH 119/127] Rename a variable --- osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs index 53822554b9..94aa965907 100644 --- a/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs @@ -77,7 +77,7 @@ private void prepareArray(long wanted) internal void PrepareStream(long byteLength = 3 * 60 * 44100 * 2 * 4) { - if (disposedValue) + if (isDisposed) return; if (AudioData == null) @@ -88,7 +88,7 @@ internal void PrepareStream(long byteLength = 3 * 60 * 44100 * 2 * 4) internal void PutSamplesInStream(byte[] next, int length) { - if (disposedValue) + if (isDisposed) return; if (AudioData == null) @@ -108,7 +108,7 @@ internal void PutSamplesInStream(byte[] next, int length) internal void DonePutting() { - if (disposedValue) + if (isDisposed) return; // Saved seek was over data length @@ -258,14 +258,14 @@ public virtual void Seek(double seek) } } - private bool disposedValue; + private volatile bool isDisposed; protected virtual void Dispose(bool disposing) { - if (!disposedValue) + if (!isDisposed) { AudioData = null; - disposedValue = true; + isDisposed = true; } } From 9781021c497dfd627f6ffb79c51df0580d3cdf24 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Sat, 21 Sep 2024 17:41:12 +0900 Subject: [PATCH 120/127] Basic refactor to let VideoDecoder decoder Audio/Video at the same time --- .../Audio/SDL3AudioDecoderManager.cs | 4 +- osu.Framework/Graphics/Video/FFmpegFuncs.cs | 3 + osu.Framework/Graphics/Video/VideoDecoder.cs | 342 +++++++++++------- 3 files changed, 218 insertions(+), 131 deletions(-) diff --git a/osu.Framework/Audio/SDL3AudioDecoderManager.cs b/osu.Framework/Audio/SDL3AudioDecoderManager.cs index 20f88dee88..cfb9228360 100644 --- a/osu.Framework/Audio/SDL3AudioDecoderManager.cs +++ b/osu.Framework/Audio/SDL3AudioDecoderManager.cs @@ -524,9 +524,9 @@ protected override int LoadFromStreamInternal(out byte[] decoded) SDL3.SDL_AUDIO_ISFLOAT(AudioSpec.format), SDL3.SDL_AUDIO_BITSIZE(AudioSpec.format), SDL3.SDL_AUDIO_ISSIGNED(AudioSpec.format)); ffmpeg.PrepareDecoding(); - ffmpeg.RecreateCodecContext(); + ffmpeg.OpenAudioStream(); - Bitrate = (int)ffmpeg.Bitrate; + Bitrate = (int)ffmpeg.AudioBitrate; Length = ffmpeg.Duration; ByteLength = (long)Math.Ceiling(ffmpeg.Duration / 1000.0d * AudioSpec.freq) * AudioSpec.channels * (SDL3.SDL_AUDIO_BITSIZE(AudioSpec.format) / 8); // FIXME diff --git a/osu.Framework/Graphics/Video/FFmpegFuncs.cs b/osu.Framework/Graphics/Video/FFmpegFuncs.cs index 84b44571a5..c7fcab153f 100644 --- a/osu.Framework/Graphics/Video/FFmpegFuncs.cs +++ b/osu.Framework/Graphics/Video/FFmpegFuncs.cs @@ -110,6 +110,8 @@ public unsafe class FFmpegFuncs public delegate long AvGetDefaultChannelLayoutDelegate(int nbChannels); + public delegate AVCodec* AvCodecFindDecoderDelegate(AVCodecID id); + #endregion [CanBeNull] @@ -163,6 +165,7 @@ public unsafe class FFmpegFuncs public SwrGetDelayDelegate swr_get_delay; public AvSamplesGetBufferSizeDelegate av_samples_get_buffer_size; public AvGetDefaultChannelLayoutDelegate av_get_default_channel_layout; + public AvCodecFindDecoderDelegate avcodec_find_decoder; // Touching AutoGen.ffmpeg or its LibraryLoader in any way on non-Desktop platforms // will cause it to throw in static constructor, which can't be bypassed. diff --git a/osu.Framework/Graphics/Video/VideoDecoder.cs b/osu.Framework/Graphics/Video/VideoDecoder.cs index 33340ee9be..557e24d337 100644 --- a/osu.Framework/Graphics/Video/VideoDecoder.cs +++ b/osu.Framework/Graphics/Video/VideoDecoder.cs @@ -55,12 +55,12 @@ public unsafe class VideoDecoder : IDisposable /// /// The frame rate of the video stream this decoder is decoding. /// - public double FrameRate => stream == null ? 0 : stream->avg_frame_rate.GetValue(); + public double FrameRate => videoStream == null ? 0 : videoStream->avg_frame_rate.GetValue(); /// /// True if the decoder can seek, false otherwise. Determined by the stream this decoder was created with. /// - public bool CanSeek => videoStream?.CanSeek == true; + public bool CanSeek => dataStream?.CanSeek == true; /// /// The current state of the decoding process. @@ -75,19 +75,25 @@ public unsafe class VideoDecoder : IDisposable // libav-context-related private AVFormatContext* formatContext; private AVIOContext* ioContext; - private AVStream* stream; - private AVCodecContext* codecContext; + + private AVStream* videoStream; + private AVCodecContext* videoCodecContext; private SwsContext* swsContext; + private AVStream* audioStream; + private AVCodecContext* audioCodecContext => audioStream->codec; + private SwrContext* swrContext; + private avio_alloc_context_read_packet readPacketCallback; private avio_alloc_context_seek seekCallback; private bool inputOpened; private bool isDisposed; private bool hwDecodingAllowed = true; - private Stream videoStream; + private Stream dataStream; - private double timeBaseInSeconds; + private double videoTimeBaseInSeconds; + private double audioTimeBaseInSeconds; // active decoder state private volatile float lastDecodedFrameTime; @@ -143,14 +149,12 @@ public VideoDecoder(IRenderer renderer, string filename) private VideoDecoder(Stream stream) { ffmpeg = CreateFuncs(); - videoStream = stream; - if (!videoStream.CanRead) + dataStream = stream; + if (!dataStream.CanRead) throw new InvalidOperationException($"The given stream does not support reading. A stream used for a {nameof(VideoDecoder)} must support reading."); State = DecoderState.Ready; - decodedFrames = new ConcurrentQueue(); decoderCommands = new ConcurrentQueue(); - availableTextures = new ConcurrentQueue(); // TODO: use "real" object pool when there's some public pool supporting disposables handle = new ObjectHandle(this, GCHandleType.Normal); } @@ -164,6 +168,11 @@ public VideoDecoder(IRenderer renderer, Stream videoStream) { this.renderer = renderer; + decodedFrames = new ConcurrentQueue(); + availableTextures = new ConcurrentQueue(); // TODO: use "real" object pool when there's some public pool supporting disposables + scalerFrames = new ConcurrentQueue(); + hwTransferFrames = new ConcurrentQueue(); + TargetHardwareVideoDecoders.BindValueChanged(_ => { // ignore if decoding wasn't initialized yet. @@ -174,27 +183,33 @@ public VideoDecoder(IRenderer renderer, Stream videoStream) }); } - private readonly bool audio; - private readonly int audioRate; - private readonly int audioChannels; - private readonly int audioBits; - private readonly long audioChannelLayout; - private readonly AVSampleFormat audioFmt; - private SwrContext* swrContext; + private readonly bool audioOnly; - public long Bitrate => codecContext->bit_rate; - public long FrameCount => stream->nb_frames; + private bool audio; + private int audioRate; + private int audioChannels; + private int audioBits; + private long audioChannelLayout; + private AVSampleFormat audioFmt; + + public long AudioBitrate => audioCodecContext->bit_rate; + public long AudioFrameCount => audioStream->nb_frames; // Audio mode public VideoDecoder(Stream audioStream, int rate, int channels, bool isFloat, int bits, bool signed) : this(audioStream) + { + audioOnly = true; + EnableAudioDecoding(rate, channels, isFloat, bits, signed); + } + + public void EnableAudioDecoding(int rate, int channels, bool isFloat, int bits, bool signed) { audioRate = rate; audioChannels = channels; audioBits = bits; audio = true; - hwDecodingAllowed = false; audioChannelLayout = ffmpeg.av_get_default_channel_layout(channels); audioFmt = AVSampleFormat.AV_SAMPLE_FMT_FLT; @@ -209,7 +224,7 @@ public VideoDecoder(Stream audioStream, int rate, int channels, bool isFloat, in else if (signed && bits == 32) audioFmt = AVSampleFormat.AV_SAMPLE_FMT_S32; else - Logger.Log("libswresample doesn't support current format! using default format...", level: LogLevel.Important); + throw new InvalidOperationException("swresample doesn't support provided format!"); } /// @@ -223,8 +238,18 @@ public void Seek(double targetTimestamp) decoderCommands.Enqueue(() => { - ffmpeg.avcodec_flush_buffers(codecContext); - ffmpeg.av_seek_frame(formatContext, stream->index, (long)(targetTimestamp / timeBaseInSeconds / 1000.0), FFmpegFuncs.AVSEEK_FLAG_BACKWARD); + if (!audioOnly) + { + ffmpeg.avcodec_flush_buffers(videoCodecContext); + ffmpeg.av_seek_frame(formatContext, videoStream->index, (long)(targetTimestamp / videoTimeBaseInSeconds / 1000.0), FFmpegFuncs.AVSEEK_FLAG_BACKWARD); + } + + if (audio) + { + ffmpeg.avcodec_flush_buffers(audioCodecContext); + ffmpeg.av_seek_frame(formatContext, audioStream->index, (long)(targetTimestamp / videoTimeBaseInSeconds / 1000.0), FFmpegFuncs.AVSEEK_FLAG_BACKWARD); + } + skipOutputUntilTime = targetTimestamp; State = DecoderState.Ready; }); @@ -309,10 +334,10 @@ public IEnumerable GetDecodedFrames() // https://en.wikipedia.org/wiki/YCbCr public Matrix3 GetConversionMatrix() { - if (codecContext == null) + if (videoCodecContext == null) return Matrix3.Zero; - switch (codecContext->colorspace) + switch (videoCodecContext->colorspace) { case AVColorSpace.AVCOL_SPC_BT709: return new Matrix3(1.164f, 1.164f, 1.164f, @@ -337,7 +362,7 @@ private static int readPacket(void* opaque, byte* bufferPtr, int bufferSize) return 0; var span = new Span(bufferPtr, bufferSize); - int bytesRead = decoder.videoStream.Read(span); + int bytesRead = decoder.dataStream.Read(span); return bytesRead != 0 ? bytesRead : FFmpegFuncs.AVERROR_EOF; } @@ -349,37 +374,37 @@ private static long streamSeekCallbacks(void* opaque, long offset, int whence) if (!handle.GetTarget(out VideoDecoder decoder)) return -1; - if (!decoder.videoStream.CanSeek) + if (!decoder.dataStream.CanSeek) throw new InvalidOperationException("Tried seeking on a video sourced by a non-seekable stream."); switch (whence) { case StdIo.SEEK_CUR: - decoder.videoStream.Seek(offset, SeekOrigin.Current); + decoder.dataStream.Seek(offset, SeekOrigin.Current); break; case StdIo.SEEK_END: - decoder.videoStream.Seek(offset, SeekOrigin.End); + decoder.dataStream.Seek(offset, SeekOrigin.End); break; case StdIo.SEEK_SET: - decoder.videoStream.Seek(offset, SeekOrigin.Begin); + decoder.dataStream.Seek(offset, SeekOrigin.Begin); break; case FFmpegFuncs.AVSEEK_SIZE: - return decoder.videoStream.Length; + return decoder.dataStream.Length; default: return -1; } - return decoder.videoStream.Position; + return decoder.dataStream.Position; } // sets up libavformat state: creates the AVFormatContext, the frames, etc. to start decoding, but does not actually start the decodingLoop internal void PrepareDecoding() { - videoStream.Position = 0; + dataStream.Position = 0; const int context_buffer_size = 4096; readPacketCallback = readPacket; @@ -409,50 +434,87 @@ internal void PrepareDecoding() if (findStreamInfoResult < 0) throw new InvalidOperationException($"Error finding stream info: {getErrorMessage(findStreamInfoResult)}"); - int streamIndex = ffmpeg.av_find_best_stream(formatContext, audio ? AVMediaType.AVMEDIA_TYPE_AUDIO : AVMediaType.AVMEDIA_TYPE_VIDEO, -1, -1, null, 0); - if (streamIndex < 0) - throw new InvalidOperationException($"Couldn't find stream: {getErrorMessage(streamIndex)}"); + int streamIndex = -1; - stream = formatContext->streams[streamIndex]; - timeBaseInSeconds = stream->time_base.GetValue(); + if (!audioOnly) + { + streamIndex = ffmpeg.av_find_best_stream(formatContext, AVMediaType.AVMEDIA_TYPE_VIDEO, -1, -1, null, 0); + if (streamIndex < 0) + throw new InvalidOperationException($"Couldn't find stream: {getErrorMessage(streamIndex)}"); - if (stream->duration > 0) - Duration = stream->duration * timeBaseInSeconds * 1000.0; - else - Duration = formatContext->duration / (double)FFmpegFuncs.AV_TIME_BASE * 1000.0; + videoStream = formatContext->streams[streamIndex]; + videoTimeBaseInSeconds = videoStream->time_base.GetValue(); + + if (videoStream->duration > 0) + Duration = videoStream->duration * videoTimeBaseInSeconds * 1000.0; + else + Duration = formatContext->duration / (double)FFmpegFuncs.AV_TIME_BASE * 1000.0; + } + + if (audio) + { + streamIndex = ffmpeg.av_find_best_stream(formatContext, AVMediaType.AVMEDIA_TYPE_AUDIO, -1, streamIndex, null, 0); + if (streamIndex < 0 && audioOnly) + throw new InvalidOperationException($"Couldn't find stream: {getErrorMessage(streamIndex)}"); + + audioStream = formatContext->streams[streamIndex]; + audioTimeBaseInSeconds = audioStream->time_base.GetValue(); + + if (audioOnly) + { + if (audioStream->duration > 0) + Duration = audioStream->duration * audioTimeBaseInSeconds * 1000.0; + else + Duration = formatContext->duration / (double)FFmpegFuncs.AV_TIME_BASE * 1000.0; + } + } packet = ffmpeg.av_packet_alloc(); receiveFrame = ffmpeg.av_frame_alloc(); } + internal void OpenAudioStream() + { + if (audioStream == null) + return; + + int result = ffmpeg.avcodec_open2(audioStream->codec, ffmpeg.avcodec_find_decoder(audioStream->codec->codec_id), null); + + if (result < 0) + throw new InvalidDataException($"Error trying to open audio codec: {getErrorMessage(result)}"); + + if (!prepareResampler()) + throw new InvalidDataException("Error trying to prepare audio resampler"); + } + internal void RecreateCodecContext() { - if (stream == null) + if (videoStream == null) return; - var codecParams = *stream->codecpar; + var codecParams = *videoStream->codecpar; var targetHwDecoders = hwDecodingAllowed ? TargetHardwareVideoDecoders.Value : HardwareVideoDecoder.None; bool openSuccessful = false; foreach (var (decoder, hwDeviceType) in GetAvailableDecoders(formatContext->iformat, codecParams.codec_id, targetHwDecoders)) { // free context in case it was allocated in a previous iteration or recreate call. - if (codecContext != null) + if (videoCodecContext != null) { - fixed (AVCodecContext** ptr = &codecContext) + fixed (AVCodecContext** ptr = &videoCodecContext) ffmpeg.avcodec_free_context(ptr); } - codecContext = ffmpeg.avcodec_alloc_context3(decoder.Pointer); - codecContext->pkt_timebase = stream->time_base; + videoCodecContext = ffmpeg.avcodec_alloc_context3(decoder.Pointer); + videoCodecContext->pkt_timebase = videoStream->time_base; - if (codecContext == null) + if (videoCodecContext == null) { Logger.Log($"Couldn't allocate codec context. Codec: {decoder.Name}"); continue; } - int paramCopyResult = ffmpeg.avcodec_parameters_to_context(codecContext, &codecParams); + int paramCopyResult = ffmpeg.avcodec_parameters_to_context(videoCodecContext, &codecParams); if (paramCopyResult < 0) { @@ -463,7 +525,7 @@ internal void RecreateCodecContext() // initialize hardware decode context. if (hwDeviceType != AVHWDeviceType.AV_HWDEVICE_TYPE_NONE) { - int hwDeviceCreateResult = ffmpeg.av_hwdevice_ctx_create(&codecContext->hw_device_ctx, hwDeviceType, null, null, 0); + int hwDeviceCreateResult = ffmpeg.av_hwdevice_ctx_create(&videoCodecContext->hw_device_ctx, hwDeviceType, null, null, 0); if (hwDeviceCreateResult < 0) { @@ -474,7 +536,7 @@ internal void RecreateCodecContext() Logger.Log($"Successfully opened hardware video decoder context {hwDeviceType} for codec {decoder.Name}"); } - int openCodecResult = ffmpeg.avcodec_open2(codecContext, decoder.Pointer, null); + int openCodecResult = ffmpeg.avcodec_open2(videoCodecContext, decoder.Pointer, null); if (openCodecResult < 0) { @@ -496,13 +558,15 @@ internal void RecreateCodecContext() if (!openSuccessful) throw new InvalidOperationException($"No usable decoder found for codec ID {codecParams.codec_id}"); + + OpenAudioStream(); } private bool prepareResampler() { - long srcChLayout = ffmpeg.av_get_default_channel_layout(codecContext->channels); - AVSampleFormat srcAudioFmt = codecContext->sample_fmt; - int srcRate = codecContext->sample_rate; + long srcChLayout = ffmpeg.av_get_default_channel_layout(audioCodecContext->channels); + AVSampleFormat srcAudioFmt = audioCodecContext->sample_fmt; + int srcRate = audioCodecContext->sample_rate; if (audioChannelLayout == srcChLayout && audioFmt == srcAudioFmt && audioRate == srcRate) { @@ -606,10 +670,7 @@ internal int DecodeNextAudioFrame(int iteration, ref byte[] decodedAudio, bool d decodeNextFrame(packet, receiveFrame); if (State != DecoderState.Running) - { - resampleAndAppendToAudioStream(null); // flush resampler break; - } } } catch (Exception e) @@ -642,9 +703,13 @@ private void decodeNextFrame(AVPacket* packet, AVFrame* receiveFrame) bool unrefPacket = true; - if (packet->stream_index == stream->index) + AVCodecContext* codecContext = + !audioOnly && packet->stream_index == videoStream->index ? videoCodecContext + : audio && packet->stream_index == audioStream->index ? audioCodecContext : null; + + if (codecContext != null) { - int sendPacketResult = sendPacket(receiveFrame, packet); + int sendPacketResult = sendPacket(codecContext, receiveFrame, packet); // keep the packet data for next frame if we didn't send it successfully. if (sendPacketResult == -FFmpegFuncs.EAGAIN) @@ -659,7 +724,14 @@ private void decodeNextFrame(AVPacket* packet, AVFrame* receiveFrame) else if (readFrameResult == FFmpegFuncs.AVERROR_EOF) { // Flush decoder. - sendPacket(receiveFrame, null); + if (!audioOnly) + sendPacket(videoCodecContext, receiveFrame, null); + + if (audio) + { + sendPacket(audioCodecContext, receiveFrame, null); + resampleAndAppendToAudioStream(null); // flush audio resampler + } if (Looping) { @@ -683,7 +755,7 @@ private void decodeNextFrame(AVPacket* packet, AVFrame* receiveFrame) } } - private int sendPacket(AVFrame* receiveFrame, AVPacket* packet) + private int sendPacket(AVCodecContext* codecContext, AVFrame* receiveFrame, AVPacket* packet) { // send the packet for decoding. int sendPacketResult = ffmpeg.avcodec_send_packet(codecContext, packet); @@ -692,7 +764,7 @@ private int sendPacket(AVFrame* receiveFrame, AVPacket* packet) // otherwise we would get stuck in an infinite loop. if (sendPacketResult == 0 || sendPacketResult == -FFmpegFuncs.EAGAIN) { - readDecodedFrames(receiveFrame); + readDecodedFrames(codecContext, receiveFrame); } else { @@ -703,10 +775,10 @@ private int sendPacket(AVFrame* receiveFrame, AVPacket* packet) return sendPacketResult; } - private readonly ConcurrentQueue hwTransferFrames = new ConcurrentQueue(); + private readonly ConcurrentQueue hwTransferFrames; private void returnHwTransferFrame(FFmpegFrame frame) => hwTransferFrames.Enqueue(frame); - private void readDecodedFrames(AVFrame* receiveFrame) + private void readDecodedFrames(AVCodecContext* codecContext, AVFrame* receiveFrame) { while (true) { @@ -723,67 +795,75 @@ private void readDecodedFrames(AVFrame* receiveFrame) break; } - if (audio) - { - resampleAndAppendToAudioStream(receiveFrame); - continue; - } - // use `best_effort_timestamp` as it can be more accurate if timestamps from the source file (pts) are broken. // but some HW codecs don't set it in which case fallback to `pts` long frameTimestamp = receiveFrame->best_effort_timestamp != FFmpegFuncs.AV_NOPTS_VALUE ? receiveFrame->best_effort_timestamp : receiveFrame->pts; - double frameTime = (frameTimestamp - stream->start_time) * timeBaseInSeconds * 1000; + double frameTime = 0.0; - if (skipOutputUntilTime > frameTime) - continue; + if (audio && codecContext->codec_type == AVMediaType.AVMEDIA_TYPE_AUDIO) + { + frameTime = (frameTimestamp - audioStream->start_time) * audioTimeBaseInSeconds * 1000; - // get final frame. - FFmpegFrame frame; + if (skipOutputUntilTime > frameTime) + continue; - if (((AVPixelFormat)receiveFrame->format).IsHardwarePixelFormat()) + resampleAndAppendToAudioStream(receiveFrame); + } + else if (!audioOnly && codecContext->codec_type == AVMediaType.AVMEDIA_TYPE_VIDEO) { - // transfer data from HW decoder to RAM. - if (!hwTransferFrames.TryDequeue(out var hwTransferFrame)) - hwTransferFrame = new FFmpegFrame(ffmpeg, returnHwTransferFrame); + frameTime = (frameTimestamp - videoStream->start_time) * videoTimeBaseInSeconds * 1000; - // WARNING: frames from `av_hwframe_transfer_data` have their timestamps set to AV_NOPTS_VALUE instead of real values. - // if you need to use them later, take them from `receiveFrame`. - int transferResult = ffmpeg.av_hwframe_transfer_data(hwTransferFrame.Pointer, receiveFrame, 0); + if (skipOutputUntilTime > frameTime) + continue; - if (transferResult < 0) + // get final frame. + FFmpegFrame frame; + + if (((AVPixelFormat)receiveFrame->format).IsHardwarePixelFormat()) { - Logger.Log($"Failed to transfer frame from HW decoder: {getErrorMessage(transferResult)}"); - tryDisableHwDecoding(transferResult); + // transfer data from HW decoder to RAM. + if (!hwTransferFrames.TryDequeue(out var hwTransferFrame)) + hwTransferFrame = new FFmpegFrame(ffmpeg, returnHwTransferFrame); - hwTransferFrame.Dispose(); - continue; - } + // WARNING: frames from `av_hwframe_transfer_data` have their timestamps set to AV_NOPTS_VALUE instead of real values. + // if you need to use them later, take them from `receiveFrame`. + int transferResult = ffmpeg.av_hwframe_transfer_data(hwTransferFrame.Pointer, receiveFrame, 0); - frame = hwTransferFrame; - } - else - { - // copy data to a new AVFrame so that `receiveFrame` can be reused. - frame = new FFmpegFrame(ffmpeg); - ffmpeg.av_frame_move_ref(frame.Pointer, receiveFrame); - } + if (transferResult < 0) + { + Logger.Log($"Failed to transfer frame from HW decoder: {getErrorMessage(transferResult)}"); + tryDisableHwDecoding(transferResult); - lastDecodedFrameTime = (float)frameTime; + hwTransferFrame.Dispose(); + continue; + } - // Note: this is the pixel format that `VideoTexture` expects internally - frame = ensureFramePixelFormat(frame, AVPixelFormat.AV_PIX_FMT_YUV420P); - if (frame == null) - continue; + frame = hwTransferFrame; + } + else + { + // copy data to a new AVFrame so that `receiveFrame` can be reused. + frame = new FFmpegFrame(ffmpeg); + ffmpeg.av_frame_move_ref(frame.Pointer, receiveFrame); + } - if (!availableTextures.TryDequeue(out var tex)) - tex = renderer.CreateVideoTexture(frame.Pointer->width, frame.Pointer->height); + // Note: this is the pixel format that `VideoTexture` expects internally + frame = ensureFramePixelFormat(frame, AVPixelFormat.AV_PIX_FMT_YUV420P); + if (frame == null) + continue; - var upload = new VideoTextureUpload(frame); + if (!availableTextures.TryDequeue(out var tex)) + tex = renderer.CreateVideoTexture(frame.Pointer->width, frame.Pointer->height); - // We do not support videos with transparency at this point, so the upload's opacity as well as the texture's opacity is always opaque. - tex.SetData(upload, Opacity.Opaque); - decodedFrames.Enqueue(new DecodedFrame { Time = frameTime, Texture = tex }); + var upload = new VideoTextureUpload(frame); + + // We do not support videos with transparency at this point, so the upload's opacity as well as the texture's opacity is always opaque. + tex.SetData(upload, Opacity.Opaque); + decodedFrames.Enqueue(new DecodedFrame { Time = frameTime, Texture = tex }); + } + + lastDecodedFrameTime = (float)frameTime; } } @@ -797,12 +877,12 @@ private void resampleAndAppendToAudioStream(AVFrame* frame) if (swrContext != null) { - sampleCount = (int)ffmpeg.swr_get_delay(swrContext, codecContext->sample_rate); + sampleCount = (int)ffmpeg.swr_get_delay(swrContext, audioCodecContext->sample_rate); source = null; if (frame != null) { - sampleCount = (int)Math.Ceiling((double)(sampleCount + frame->nb_samples) * audioRate / codecContext->sample_rate); + sampleCount = (int)Math.Ceiling((double)(sampleCount + frame->nb_samples) * audioRate / audioCodecContext->sample_rate); source = frame->data.ToArray(); } @@ -852,7 +932,7 @@ private void resampleAndAppendToAudioStream(AVFrame* frame) } } - private readonly ConcurrentQueue scalerFrames = new ConcurrentQueue(); + private readonly ConcurrentQueue scalerFrames; private void returnScalerFrame(FFmpegFrame frame) => scalerFrames.Enqueue(frame); [CanBeNull] @@ -916,7 +996,7 @@ private FFmpegFrame ensureFramePixelFormat(FFmpegFrame frame, AVPixelFormat targ private void tryDisableHwDecoding(int errorCode) { - if (!hwDecodingAllowed || TargetHardwareVideoDecoders.Value == HardwareVideoDecoder.None || codecContext == null || codecContext->hw_device_ctx == null) + if (!hwDecodingAllowed || TargetHardwareVideoDecoders.Value == HardwareVideoDecoder.None || videoCodecContext == null || videoCodecContext->hw_device_ctx == null) return; hwDecodingAllowed = false; @@ -1085,7 +1165,8 @@ protected virtual FFmpegFuncs CreateFuncs() swr_convert = FFmpeg.AutoGen.ffmpeg.swr_convert, swr_get_delay = FFmpeg.AutoGen.ffmpeg.swr_get_delay, av_samples_get_buffer_size = FFmpeg.AutoGen.ffmpeg.av_samples_get_buffer_size, - av_get_default_channel_layout = FFmpeg.AutoGen.ffmpeg.av_get_default_channel_layout + av_get_default_channel_layout = FFmpeg.AutoGen.ffmpeg.av_get_default_channel_layout, + avcodec_find_decoder = FFmpeg.AutoGen.ffmpeg.avcodec_find_decoder }; } @@ -1141,19 +1222,19 @@ void freeFFmpeg() ffmpeg.avio_context_free(ptr); } - if (codecContext != null) + if (videoCodecContext != null) { - fixed (AVCodecContext** ptr = &codecContext) + fixed (AVCodecContext** ptr = &videoCodecContext) ffmpeg.avcodec_free_context(ptr); } seekCallback = null; readPacketCallback = null; - if (!audio) - videoStream.Dispose(); + if (!audioOnly) + dataStream.Dispose(); - videoStream = null; + dataStream = null; if (swsContext != null) ffmpeg.sws_freeContext(swsContext); @@ -1168,25 +1249,28 @@ void freeFFmpeg() memoryStream = null; - while (decodedFrames.TryDequeue(out var f)) + if (!audioOnly) { - f.Texture.FlushUploads(); - f.Texture.Dispose(); - } + while (decodedFrames.TryDequeue(out var f)) + { + f.Texture.FlushUploads(); + f.Texture.Dispose(); + } - while (availableTextures.TryDequeue(out var t)) - t.Dispose(); + while (availableTextures.TryDequeue(out var t)) + t.Dispose(); - while (hwTransferFrames.TryDequeue(out var hwF)) - hwF.Dispose(); + while (hwTransferFrames.TryDequeue(out var hwF)) + hwF.Dispose(); - while (scalerFrames.TryDequeue(out var sf)) - sf.Dispose(); + while (scalerFrames.TryDequeue(out var sf)) + sf.Dispose(); + } handle.Dispose(); } - if (audio) + if (audioOnly) freeFFmpeg(); else StopDecodingAsync().ContinueWith(_ => freeFFmpeg()); From 24814acce2d2ecb28104828ed6ffafb16031be2b Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Mon, 23 Sep 2024 21:34:22 +0900 Subject: [PATCH 121/127] Check if audioStream is not null in VideoDecoder --- osu.Framework/Graphics/Video/VideoDecoder.cs | 25 +++++++------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/osu.Framework/Graphics/Video/VideoDecoder.cs b/osu.Framework/Graphics/Video/VideoDecoder.cs index 557e24d337..59adf30009 100644 --- a/osu.Framework/Graphics/Video/VideoDecoder.cs +++ b/osu.Framework/Graphics/Video/VideoDecoder.cs @@ -183,9 +183,9 @@ public VideoDecoder(IRenderer renderer, Stream videoStream) }); } + private bool isAudioEnabled; private readonly bool audioOnly; - private bool audio; private int audioRate; private int audioChannels; private int audioBits; @@ -209,9 +209,8 @@ public void EnableAudioDecoding(int rate, int channels, bool isFloat, int bits, audioChannels = channels; audioBits = bits; - audio = true; + isAudioEnabled = true; audioChannelLayout = ffmpeg.av_get_default_channel_layout(channels); - audioFmt = AVSampleFormat.AV_SAMPLE_FMT_FLT; memoryStream = new MemoryStream(); @@ -244,7 +243,7 @@ public void Seek(double targetTimestamp) ffmpeg.av_seek_frame(formatContext, videoStream->index, (long)(targetTimestamp / videoTimeBaseInSeconds / 1000.0), FFmpegFuncs.AVSEEK_FLAG_BACKWARD); } - if (audio) + if (audioStream != null) { ffmpeg.avcodec_flush_buffers(audioCodecContext); ffmpeg.av_seek_frame(formatContext, audioStream->index, (long)(targetTimestamp / videoTimeBaseInSeconds / 1000.0), FFmpegFuncs.AVSEEK_FLAG_BACKWARD); @@ -451,7 +450,7 @@ internal void PrepareDecoding() Duration = formatContext->duration / (double)FFmpegFuncs.AV_TIME_BASE * 1000.0; } - if (audio) + if (isAudioEnabled) { streamIndex = ffmpeg.av_find_best_stream(formatContext, AVMediaType.AVMEDIA_TYPE_AUDIO, -1, streamIndex, null, 0); if (streamIndex < 0 && audioOnly) @@ -544,12 +543,6 @@ internal void RecreateCodecContext() continue; } - if (audio && !prepareResampler()) - { - Logger.Log("Error trying to prepare audio resampler"); - continue; - } - Logger.Log($"Successfully initialized decoder: {decoder.Name}"); openSuccessful = true; @@ -653,7 +646,7 @@ private void decodingLoop(CancellationToken cancellationToken) internal int DecodeNextAudioFrame(int iteration, ref byte[] decodedAudio, bool decodeUntilEnd = false) { - if (!audio) + if (audioStream == null) { decodedAudio = Array.Empty(); return 0; @@ -705,7 +698,7 @@ private void decodeNextFrame(AVPacket* packet, AVFrame* receiveFrame) AVCodecContext* codecContext = !audioOnly && packet->stream_index == videoStream->index ? videoCodecContext - : audio && packet->stream_index == audioStream->index ? audioCodecContext : null; + : audioStream != null && packet->stream_index == audioStream->index ? audioCodecContext : null; if (codecContext != null) { @@ -727,7 +720,7 @@ private void decodeNextFrame(AVPacket* packet, AVFrame* receiveFrame) if (!audioOnly) sendPacket(videoCodecContext, receiveFrame, null); - if (audio) + if (audioStream != null) { sendPacket(audioCodecContext, receiveFrame, null); resampleAndAppendToAudioStream(null); // flush audio resampler @@ -801,7 +794,7 @@ private void readDecodedFrames(AVCodecContext* codecContext, AVFrame* receiveFra double frameTime = 0.0; - if (audio && codecContext->codec_type == AVMediaType.AVMEDIA_TYPE_AUDIO) + if (audioStream != null && codecContext->codec_type == AVMediaType.AVMEDIA_TYPE_AUDIO) { frameTime = (frameTimestamp - audioStream->start_time) * audioTimeBaseInSeconds * 1000; @@ -869,7 +862,7 @@ private void readDecodedFrames(AVCodecContext* codecContext, AVFrame* receiveFra private void resampleAndAppendToAudioStream(AVFrame* frame) { - if (memoryStream == null) + if (memoryStream == null || audioStream == null) return; int sampleCount; From 8ecfd07cb0cdb79ceeec738fd85ea5ac8d2cf7d7 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Mon, 23 Sep 2024 22:08:16 +0900 Subject: [PATCH 122/127] Return MemoryStream buffer in VideoDecoder for audio --- osu.Framework/Audio/SDL3AudioDecoderManager.cs | 6 +----- osu.Framework/Graphics/Video/VideoDecoder.cs | 13 +++++-------- 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/osu.Framework/Audio/SDL3AudioDecoderManager.cs b/osu.Framework/Audio/SDL3AudioDecoderManager.cs index cfb9228360..ae58077ee1 100644 --- a/osu.Framework/Audio/SDL3AudioDecoderManager.cs +++ b/osu.Framework/Audio/SDL3AudioDecoderManager.cs @@ -499,7 +499,6 @@ protected override int LoadFromStreamInternal(out byte[] decoded) internal class FFmpegAudioDecoder : SDL3AudioDecoder { private VideoDecoder? ffmpeg; - private byte[]? decodeData; public FFmpegAudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, bool autoDisposeStream, ISDL3AudioDataReceiver? pass) : base(stream, audioSpec, isTrack, autoDisposeStream, pass) @@ -508,8 +507,6 @@ public FFmpegAudioDecoder(Stream stream, SDL_AudioSpec audioSpec, bool isTrack, internal override void Dispose() { - decodeData = null; - ffmpeg?.Dispose(); ffmpeg = null; @@ -533,12 +530,11 @@ protected override int LoadFromStreamInternal(out byte[] decoded) Loading = true; } - int got = ffmpeg.DecodeNextAudioFrame(32, ref decodeData, !IsTrack); + int got = ffmpeg.DecodeNextAudioFrame(32, out decoded, !IsTrack); if (ffmpeg.State != VideoDecoder.DecoderState.Running) Loading = false; - decoded = decodeData; return got; } } diff --git a/osu.Framework/Graphics/Video/VideoDecoder.cs b/osu.Framework/Graphics/Video/VideoDecoder.cs index 59adf30009..06f4c3cb06 100644 --- a/osu.Framework/Graphics/Video/VideoDecoder.cs +++ b/osu.Framework/Graphics/Video/VideoDecoder.cs @@ -644,7 +644,7 @@ private void decodingLoop(CancellationToken cancellationToken) private MemoryStream memoryStream; - internal int DecodeNextAudioFrame(int iteration, ref byte[] decodedAudio, bool decodeUntilEnd = false) + internal int DecodeNextAudioFrame(int iteration, out byte[] decodedAudio, bool decodeUntilEnd = false) { if (audioStream == null) { @@ -670,16 +670,13 @@ internal int DecodeNextAudioFrame(int iteration, ref byte[] decodedAudio, bool d { Logger.Error(e, "VideoDecoder faulted while decoding audio"); State = DecoderState.Faulted; + decodedAudio = Array.Empty(); return 0; } - if (decodedAudio == null || decodedAudio.Length < memoryStream.Position) - decodedAudio = new byte[memoryStream.Position]; - - int pos = (int)memoryStream.Position; + decodedAudio = memoryStream.GetBuffer(); - memoryStream.Position = 0; - return memoryStream.Read(decodedAudio, 0, pos); + return (int)memoryStream.Position; } private void decodeNextFrame(AVPacket* packet, AVFrame* receiveFrame) @@ -910,7 +907,7 @@ private void resampleAndAppendToAudioStream(AVFrame* frame) // assuming that the destination and source are not planar as we never define planar in ctor nbSamples = sampleCount; - for (int i = 0; i < audioDest.Length; i++) + for (int i = 0; i < audioSize; i++) { audioDest[i] = *(source[0] + i); } From df07df87e47b322d9308d5b72a9ed86192c442d8 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Sat, 28 Sep 2024 20:02:42 +0900 Subject: [PATCH 123/127] Minor things --- osu.Framework/Audio/SDL3AudioDecoderManager.cs | 4 ++-- .../Audio/Track/TrackSDL3AudioPlayer.cs | 9 ++++++--- osu.Framework/Graphics/Video/VideoDecoder.cs | 16 ++++++++-------- 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/osu.Framework/Audio/SDL3AudioDecoderManager.cs b/osu.Framework/Audio/SDL3AudioDecoderManager.cs index ae58077ee1..55bd992d72 100644 --- a/osu.Framework/Audio/SDL3AudioDecoderManager.cs +++ b/osu.Framework/Audio/SDL3AudioDecoderManager.cs @@ -444,7 +444,7 @@ protected override int LoadFromStreamInternal(out byte[] decoded) if (info.Channels != AudioSpec.channels || info.Frequency != AudioSpec.freq) { - resampler = BassMix.CreateMixerStream(AudioSpec.freq, AudioSpec.channels, BassFlags.MixerEnd | BassFlags.Decode | resolution.ToBassFlag()); + resampler = BassMix.CreateMixerStream(AudioSpec.freq, AudioSpec.channels, BassFlags.Decode | resolution.ToBassFlag()); if (resampler == 0) throw new FormatException($"Failed to create BASS Mixer: {Bass.LastError}"); @@ -530,7 +530,7 @@ protected override int LoadFromStreamInternal(out byte[] decoded) Loading = true; } - int got = ffmpeg.DecodeNextAudioFrame(32, out decoded, !IsTrack); + int got = ffmpeg.DecodeNextAudioFrame(out decoded, !IsTrack); if (ffmpeg.State != VideoDecoder.DecoderState.Running) Loading = false; diff --git a/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs b/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs index 94aa965907..03b73a4036 100644 --- a/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs +++ b/osu.Framework/Audio/Track/TrackSDL3AudioPlayer.cs @@ -95,15 +95,18 @@ internal void PutSamplesInStream(byte[] next, int length) throw new InvalidOperationException($"Use {nameof(PrepareStream)} before calling this"); int floatLen = length / sizeof(float); + long currentLen = audioDataLength; - if (audioDataLength + floatLen > AudioData.LongLength) - prepareArray(audioDataLength + floatLen); + if (currentLen + floatLen > AudioData.LongLength) + prepareArray(currentLen + floatLen); for (int i = 0; i < floatLen; i++) { float src = BitConverter.ToSingle(next, i * sizeof(float)); - AudioData[audioDataLength++] = src; + AudioData[currentLen++] = src; } + + Interlocked.Exchange(ref audioDataLength, currentLen); } internal void DonePutting() diff --git a/osu.Framework/Graphics/Video/VideoDecoder.cs b/osu.Framework/Graphics/Video/VideoDecoder.cs index 06f4c3cb06..633893a3f3 100644 --- a/osu.Framework/Graphics/Video/VideoDecoder.cs +++ b/osu.Framework/Graphics/Video/VideoDecoder.cs @@ -200,6 +200,7 @@ public VideoDecoder(Stream audioStream, int rate, int channels, bool isFloat, in : this(audioStream) { audioOnly = true; + hwDecodingAllowed = false; EnableAudioDecoding(rate, channels, isFloat, bits, signed); } @@ -246,7 +247,7 @@ public void Seek(double targetTimestamp) if (audioStream != null) { ffmpeg.avcodec_flush_buffers(audioCodecContext); - ffmpeg.av_seek_frame(formatContext, audioStream->index, (long)(targetTimestamp / videoTimeBaseInSeconds / 1000.0), FFmpegFuncs.AVSEEK_FLAG_BACKWARD); + ffmpeg.av_seek_frame(formatContext, audioStream->index, (long)(targetTimestamp / audioTimeBaseInSeconds / 1000.0), FFmpegFuncs.AVSEEK_FLAG_BACKWARD); } skipOutputUntilTime = targetTimestamp; @@ -433,6 +434,9 @@ internal void PrepareDecoding() if (findStreamInfoResult < 0) throw new InvalidOperationException($"Error finding stream info: {getErrorMessage(findStreamInfoResult)}"); + packet = ffmpeg.av_packet_alloc(); + receiveFrame = ffmpeg.av_frame_alloc(); + int streamIndex = -1; if (!audioOnly) @@ -467,9 +471,6 @@ internal void PrepareDecoding() Duration = formatContext->duration / (double)FFmpegFuncs.AV_TIME_BASE * 1000.0; } } - - packet = ffmpeg.av_packet_alloc(); - receiveFrame = ffmpeg.av_frame_alloc(); } internal void OpenAudioStream() @@ -644,7 +645,7 @@ private void decodingLoop(CancellationToken cancellationToken) private MemoryStream memoryStream; - internal int DecodeNextAudioFrame(int iteration, out byte[] decodedAudio, bool decodeUntilEnd = false) + internal int DecodeNextAudioFrame(out byte[] decodedAudio, bool decodeUntilEnd = false) { if (audioStream == null) { @@ -656,15 +657,14 @@ internal int DecodeNextAudioFrame(int iteration, out byte[] decodedAudio, bool d try { - int i = 0; - - while (decodeUntilEnd || i++ < iteration) + do { decodeNextFrame(packet, receiveFrame); if (State != DecoderState.Running) break; } + while (decodeUntilEnd); } catch (Exception e) { From d66f35334997daa62627daa637f8ac6445a5f951 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Fri, 4 Oct 2024 01:11:11 +0900 Subject: [PATCH 124/127] Minor improvements in handling audio in VideoDecoder --- .../Audio/SDL3AudioDecoderManager.cs | 2 +- osu.Framework/Graphics/Video/FFmpegFuncs.cs | 4 +- osu.Framework/Graphics/Video/VideoDecoder.cs | 53 +++++++++---------- 3 files changed, 29 insertions(+), 30 deletions(-) diff --git a/osu.Framework/Audio/SDL3AudioDecoderManager.cs b/osu.Framework/Audio/SDL3AudioDecoderManager.cs index 55bd992d72..4a9563e30c 100644 --- a/osu.Framework/Audio/SDL3AudioDecoderManager.cs +++ b/osu.Framework/Audio/SDL3AudioDecoderManager.cs @@ -521,7 +521,7 @@ protected override int LoadFromStreamInternal(out byte[] decoded) SDL3.SDL_AUDIO_ISFLOAT(AudioSpec.format), SDL3.SDL_AUDIO_BITSIZE(AudioSpec.format), SDL3.SDL_AUDIO_ISSIGNED(AudioSpec.format)); ffmpeg.PrepareDecoding(); - ffmpeg.OpenAudioStream(); + ffmpeg.RecreateCodecContext(); Bitrate = (int)ffmpeg.AudioBitrate; Length = ffmpeg.Duration; diff --git a/osu.Framework/Graphics/Video/FFmpegFuncs.cs b/osu.Framework/Graphics/Video/FFmpegFuncs.cs index c7fcab153f..674b2e7385 100644 --- a/osu.Framework/Graphics/Video/FFmpegFuncs.cs +++ b/osu.Framework/Graphics/Video/FFmpegFuncs.cs @@ -110,7 +110,7 @@ public unsafe class FFmpegFuncs public delegate long AvGetDefaultChannelLayoutDelegate(int nbChannels); - public delegate AVCodec* AvCodecFindDecoderDelegate(AVCodecID id); + public delegate int SwrGetOutSamplesDelegate(SwrContext* s, int inSamples); #endregion @@ -165,7 +165,7 @@ public unsafe class FFmpegFuncs public SwrGetDelayDelegate swr_get_delay; public AvSamplesGetBufferSizeDelegate av_samples_get_buffer_size; public AvGetDefaultChannelLayoutDelegate av_get_default_channel_layout; - public AvCodecFindDecoderDelegate avcodec_find_decoder; + public SwrGetOutSamplesDelegate swr_get_out_samples; // Touching AutoGen.ffmpeg or its LibraryLoader in any way on non-Desktop platforms // will cause it to throw in static constructor, which can't be bypassed. diff --git a/osu.Framework/Graphics/Video/VideoDecoder.cs b/osu.Framework/Graphics/Video/VideoDecoder.cs index 633893a3f3..32c786bbec 100644 --- a/osu.Framework/Graphics/Video/VideoDecoder.cs +++ b/osu.Framework/Graphics/Video/VideoDecoder.cs @@ -81,7 +81,7 @@ public unsafe class VideoDecoder : IDisposable private SwsContext* swsContext; private AVStream* audioStream; - private AVCodecContext* audioCodecContext => audioStream->codec; + private AVCodecContext* audioCodecContext; private SwrContext* swrContext; private avio_alloc_context_read_packet readPacketCallback; @@ -473,48 +473,43 @@ internal void PrepareDecoding() } } - internal void OpenAudioStream() + internal void RecreateCodecContext() { - if (audioStream == null) - return; - - int result = ffmpeg.avcodec_open2(audioStream->codec, ffmpeg.avcodec_find_decoder(audioStream->codec->codec_id), null); + RecreateCodecContext(ref videoStream, ref videoCodecContext, hwDecodingAllowed); + RecreateCodecContext(ref audioStream, ref audioCodecContext, false); - if (result < 0) - throw new InvalidDataException($"Error trying to open audio codec: {getErrorMessage(result)}"); - - if (!prepareResampler()) + if (audioCodecContext != null && !prepareResampler()) throw new InvalidDataException("Error trying to prepare audio resampler"); } - internal void RecreateCodecContext() + internal void RecreateCodecContext(ref AVStream* stream, ref AVCodecContext* codecContext, bool allowHwDecoding) { - if (videoStream == null) + if (stream == null) return; - var codecParams = *videoStream->codecpar; - var targetHwDecoders = hwDecodingAllowed ? TargetHardwareVideoDecoders.Value : HardwareVideoDecoder.None; + var codecParams = *stream->codecpar; + var targetHwDecoders = allowHwDecoding ? TargetHardwareVideoDecoders.Value : HardwareVideoDecoder.None; bool openSuccessful = false; foreach (var (decoder, hwDeviceType) in GetAvailableDecoders(formatContext->iformat, codecParams.codec_id, targetHwDecoders)) { // free context in case it was allocated in a previous iteration or recreate call. - if (videoCodecContext != null) + if (codecContext != null) { - fixed (AVCodecContext** ptr = &videoCodecContext) + fixed (AVCodecContext** ptr = &codecContext) ffmpeg.avcodec_free_context(ptr); } - videoCodecContext = ffmpeg.avcodec_alloc_context3(decoder.Pointer); - videoCodecContext->pkt_timebase = videoStream->time_base; + codecContext = ffmpeg.avcodec_alloc_context3(decoder.Pointer); + codecContext->pkt_timebase = stream->time_base; - if (videoCodecContext == null) + if (codecContext == null) { Logger.Log($"Couldn't allocate codec context. Codec: {decoder.Name}"); continue; } - int paramCopyResult = ffmpeg.avcodec_parameters_to_context(videoCodecContext, &codecParams); + int paramCopyResult = ffmpeg.avcodec_parameters_to_context(codecContext, &codecParams); if (paramCopyResult < 0) { @@ -525,7 +520,7 @@ internal void RecreateCodecContext() // initialize hardware decode context. if (hwDeviceType != AVHWDeviceType.AV_HWDEVICE_TYPE_NONE) { - int hwDeviceCreateResult = ffmpeg.av_hwdevice_ctx_create(&videoCodecContext->hw_device_ctx, hwDeviceType, null, null, 0); + int hwDeviceCreateResult = ffmpeg.av_hwdevice_ctx_create(&codecContext->hw_device_ctx, hwDeviceType, null, null, 0); if (hwDeviceCreateResult < 0) { @@ -536,7 +531,7 @@ internal void RecreateCodecContext() Logger.Log($"Successfully opened hardware video decoder context {hwDeviceType} for codec {decoder.Name}"); } - int openCodecResult = ffmpeg.avcodec_open2(videoCodecContext, decoder.Pointer, null); + int openCodecResult = ffmpeg.avcodec_open2(codecContext, decoder.Pointer, null); if (openCodecResult < 0) { @@ -552,8 +547,6 @@ internal void RecreateCodecContext() if (!openSuccessful) throw new InvalidOperationException($"No usable decoder found for codec ID {codecParams.codec_id}"); - - OpenAudioStream(); } private bool prepareResampler() @@ -867,12 +860,12 @@ private void resampleAndAppendToAudioStream(AVFrame* frame) if (swrContext != null) { - sampleCount = (int)ffmpeg.swr_get_delay(swrContext, audioCodecContext->sample_rate); + sampleCount = (int)ffmpeg.swr_get_delay(swrContext, audioRate); source = null; if (frame != null) { - sampleCount = (int)Math.Ceiling((double)(sampleCount + frame->nb_samples) * audioRate / audioCodecContext->sample_rate); + sampleCount = ffmpeg.swr_get_out_samples(swrContext, frame->nb_samples); source = frame->data.ToArray(); } @@ -1156,7 +1149,7 @@ protected virtual FFmpegFuncs CreateFuncs() swr_get_delay = FFmpeg.AutoGen.ffmpeg.swr_get_delay, av_samples_get_buffer_size = FFmpeg.AutoGen.ffmpeg.av_samples_get_buffer_size, av_get_default_channel_layout = FFmpeg.AutoGen.ffmpeg.av_get_default_channel_layout, - avcodec_find_decoder = FFmpeg.AutoGen.ffmpeg.avcodec_find_decoder + swr_get_out_samples = FFmpeg.AutoGen.ffmpeg.swr_get_out_samples, }; } @@ -1218,6 +1211,12 @@ void freeFFmpeg() ffmpeg.avcodec_free_context(ptr); } + if (audioCodecContext != null) + { + fixed (AVCodecContext** ptr = &audioCodecContext) + ffmpeg.avcodec_free_context(ptr); + } + seekCallback = null; readPacketCallback = null; From d11e6193f6a8b48afe5cc2350e9a5aae82f10197 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Fri, 4 Oct 2024 01:35:44 +0900 Subject: [PATCH 125/127] Satisfy InspectCode --- osu.Framework/Graphics/Video/VideoDecoder.cs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/osu.Framework/Graphics/Video/VideoDecoder.cs b/osu.Framework/Graphics/Video/VideoDecoder.cs index 32c786bbec..18f5dede55 100644 --- a/osu.Framework/Graphics/Video/VideoDecoder.cs +++ b/osu.Framework/Graphics/Video/VideoDecoder.cs @@ -655,9 +655,8 @@ internal int DecodeNextAudioFrame(out byte[] decodedAudio, bool decodeUntilEnd = decodeNextFrame(packet, receiveFrame); if (State != DecoderState.Running) - break; - } - while (decodeUntilEnd); + decodeUntilEnd = false; + } while (decodeUntilEnd); } catch (Exception e) { From 197787eb8bb975dc5e26c741e82a51e57ac091b4 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Sun, 6 Oct 2024 16:08:24 +0900 Subject: [PATCH 126/127] Calculate sample count instead of using ffmpeg function --- osu.Framework/Graphics/Video/FFmpegFuncs.cs | 3 --- osu.Framework/Graphics/Video/VideoDecoder.cs | 3 +-- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/osu.Framework/Graphics/Video/FFmpegFuncs.cs b/osu.Framework/Graphics/Video/FFmpegFuncs.cs index 674b2e7385..9bcf50aeb1 100644 --- a/osu.Framework/Graphics/Video/FFmpegFuncs.cs +++ b/osu.Framework/Graphics/Video/FFmpegFuncs.cs @@ -106,8 +106,6 @@ public unsafe class FFmpegFuncs public delegate long SwrGetDelayDelegate(SwrContext* s, long value); - public delegate int AvSamplesGetBufferSizeDelegate(int* linesize, int nbChannels, int nbSamples, AVSampleFormat sampleFmt, int align); - public delegate long AvGetDefaultChannelLayoutDelegate(int nbChannels); public delegate int SwrGetOutSamplesDelegate(SwrContext* s, int inSamples); @@ -163,7 +161,6 @@ public unsafe class FFmpegFuncs public SwrCloseDelegate swr_close; public SwrConvertDelegate swr_convert; public SwrGetDelayDelegate swr_get_delay; - public AvSamplesGetBufferSizeDelegate av_samples_get_buffer_size; public AvGetDefaultChannelLayoutDelegate av_get_default_channel_layout; public SwrGetOutSamplesDelegate swr_get_out_samples; diff --git a/osu.Framework/Graphics/Video/VideoDecoder.cs b/osu.Framework/Graphics/Video/VideoDecoder.cs index 18f5dede55..11a967e1ab 100644 --- a/osu.Framework/Graphics/Video/VideoDecoder.cs +++ b/osu.Framework/Graphics/Video/VideoDecoder.cs @@ -882,7 +882,7 @@ private void resampleAndAppendToAudioStream(AVFrame* frame) return; } - int audioSize = ffmpeg.av_samples_get_buffer_size(null, audioChannels, sampleCount, audioFmt, 0); + int audioSize = sampleCount * audioChannels * (audioBits / 8); byte[] audioDest = ArrayPool.Shared.Rent(audioSize); int nbSamples = 0; @@ -1146,7 +1146,6 @@ protected virtual FFmpegFuncs CreateFuncs() swr_close = FFmpeg.AutoGen.ffmpeg.swr_close, swr_convert = FFmpeg.AutoGen.ffmpeg.swr_convert, swr_get_delay = FFmpeg.AutoGen.ffmpeg.swr_get_delay, - av_samples_get_buffer_size = FFmpeg.AutoGen.ffmpeg.av_samples_get_buffer_size, av_get_default_channel_layout = FFmpeg.AutoGen.ffmpeg.av_get_default_channel_layout, swr_get_out_samples = FFmpeg.AutoGen.ffmpeg.swr_get_out_samples, }; From ac451bd3ef1ef9187360e7a41b3af18ed3191793 Mon Sep 17 00:00:00 2001 From: hwsmm <9151706+hwsmm@users.noreply.github.com> Date: Fri, 18 Oct 2024 00:47:14 +0900 Subject: [PATCH 127/127] Remove unneeded finalizer in SampleChannelSDL3 --- osu.Framework/Audio/Sample/SampleChannelSDL3.cs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/osu.Framework/Audio/Sample/SampleChannelSDL3.cs b/osu.Framework/Audio/Sample/SampleChannelSDL3.cs index 30b064b364..76ebaefa62 100644 --- a/osu.Framework/Audio/Sample/SampleChannelSDL3.cs +++ b/osu.Framework/Audio/Sample/SampleChannelSDL3.cs @@ -84,11 +84,6 @@ internal override void OnStateChanged() bool ISDL3AudioChannel.Playing => playing; - ~SampleChannelSDL3() - { - Dispose(false); - } - protected override void Dispose(bool disposing) { if (IsDisposed)