diff --git a/Builds/LinuxMakefile/Makefile b/Builds/LinuxMakefile/Makefile
index e857246f..9a2bcf3a 100644
--- a/Builds/LinuxMakefile/Makefile
+++ b/Builds/LinuxMakefile/Makefile
@@ -35,7 +35,7 @@ ifeq ($(CONFIG),Debug)
TARGET_ARCH :=
endif
- JUCE_CPPFLAGS := $(DEPFLAGS) "-DLINUX=1" "-DDEBUG=1" "-D_DEBUG=1" "-DPIP_JUCE_EXAMPLES_DIRECTORY=QzpcdG9vbHNcSlVDRVxleGFtcGxlcw==" "-DSAMPLER_SKIP_UI" "-DJUCE_MODAL_LOOPS_PERMITTED" "-DHAVE_LIBSAMPLERATE" "-DUSE_BUILTIN_FFT" "-DUSE_PTHREADS" "-DBUILD_DAWDREAMER_FAUST" "-DBUILD_DAWDREAMER_RUBBERBAND" "-DJUCER_LINUX_MAKE_6D53C8B4=1" "-DJUCE_APP_VERSION=0.6.5" "-DJUCE_APP_VERSION_HEX=0x605" $(shell pkg-config --cflags alsa freetype2) -pthread -I../../JuceLibraryCode/modules/juce_audio_processors/format_types/VST3_SDK -I../../thirdparty/JUCE/modules/juce_audio_processors/format_types/VST3_SDK -I../../JuceLibraryCode -I../../JuceLibraryCode/modules -I../../thirdparty/pybind11/include -I../../thirdparty/faust/architecture -I../../thirdparty/faust/compiler -I../../thirdparty/faust/compiler/utils -I../../thirdparty/libsamplerate/src -I../../thirdparty/libsamplerate/include -I../../thirdparty/rubberband -I../../thirdparty/rubberband/rubberband -I../../thirdparty/rubberband/src/kissfft -I../../thirdparty/rubberband/src -I../../thirdparty/portable_endian/include $(CPPFLAGS)
+ JUCE_CPPFLAGS := $(DEPFLAGS) "-DLINUX=1" "-DDEBUG=1" "-D_DEBUG=1" "-DPIP_JUCE_EXAMPLES_DIRECTORY=QzpcdG9vbHNcSlVDRVxleGFtcGxlcw==" "-DSAMPLER_SKIP_UI" "-DJUCE_MODAL_LOOPS_PERMITTED" "-DHAVE_LIBSAMPLERATE" "-DUSE_BUILTIN_FFT" "-DUSE_PTHREADS" "-DBUILD_DAWDREAMER_FAUST" "-DBUILD_DAWDREAMER_RUBBERBAND" "-DJUCER_LINUX_MAKE_6D53C8B4=1" "-DJUCE_APP_VERSION=0.6.6" "-DJUCE_APP_VERSION_HEX=0x606" $(shell pkg-config --cflags alsa freetype2) -pthread -I../../JuceLibraryCode/modules/juce_audio_processors/format_types/VST3_SDK -I../../thirdparty/JUCE/modules/juce_audio_processors/format_types/VST3_SDK -I../../JuceLibraryCode -I../../JuceLibraryCode/modules -I../../thirdparty/pybind11/include -I../../thirdparty/faust/architecture -I../../thirdparty/faust/compiler -I../../thirdparty/faust/compiler/utils -I../../thirdparty/libsamplerate/src -I../../thirdparty/libsamplerate/include -I../../thirdparty/rubberband -I../../thirdparty/rubberband/rubberband -I../../thirdparty/rubberband/src/kissfft -I../../thirdparty/rubberband/src -I../../thirdparty/portable_endian/include $(CPPFLAGS)
JUCE_CPPFLAGS_DYNAMIC_LIBRARY := "-DJucePlugin_Build_VST=0" "-DJucePlugin_Build_VST3=0" "-DJucePlugin_Build_AU=0" "-DJucePlugin_Build_AUv3=0" "-DJucePlugin_Build_RTAS=0" "-DJucePlugin_Build_AAX=0" "-DJucePlugin_Build_Standalone=0" "-DJucePlugin_Build_Unity=0"
JUCE_CFLAGS_DYNAMIC_LIBRARY := -fPIC -fvisibility=hidden
JUCE_LDFLAGS_DYNAMIC_LIBRARY := -shared
@@ -58,7 +58,7 @@ ifeq ($(CONFIG),Release)
TARGET_ARCH :=
endif
- JUCE_CPPFLAGS := $(DEPFLAGS) "-DLINUX=1" "-DNDEBUG=1" "-DPIP_JUCE_EXAMPLES_DIRECTORY=QzpcdG9vbHNcSlVDRVxleGFtcGxlcw==" "-DSAMPLER_SKIP_UI" "-DJUCE_MODAL_LOOPS_PERMITTED" "-DHAVE_LIBSAMPLERATE" "-DUSE_BUILTIN_FFT" "-DUSE_PTHREADS" "-DBUILD_DAWDREAMER_FAUST" "-DBUILD_DAWDREAMER_RUBBERBAND" "-DJUCER_LINUX_MAKE_6D53C8B4=1" "-DJUCE_APP_VERSION=0.6.5" "-DJUCE_APP_VERSION_HEX=0x605" $(shell pkg-config --cflags alsa freetype2) -pthread -I../../JuceLibraryCode/modules/juce_audio_processors/format_types/VST3_SDK -I../../thirdparty/JUCE/modules/juce_audio_processors/format_types/VST3_SDK -I../../JuceLibraryCode -I../../JuceLibraryCode/modules -I../../thirdparty/pybind11/include -I../../thirdparty/faust/architecture -I../../thirdparty/faust/compiler -I../../thirdparty/faust/compiler/utils -I../../thirdparty/libsamplerate/src -I../../thirdparty/libsamplerate/include -I../../thirdparty/rubberband -I../../thirdparty/rubberband/rubberband -I../../thirdparty/rubberband/src/kissfft -I../../thirdparty/rubberband/src -I../../thirdparty/portable_endian/include $(CPPFLAGS)
+ JUCE_CPPFLAGS := $(DEPFLAGS) "-DLINUX=1" "-DNDEBUG=1" "-DPIP_JUCE_EXAMPLES_DIRECTORY=QzpcdG9vbHNcSlVDRVxleGFtcGxlcw==" "-DSAMPLER_SKIP_UI" "-DJUCE_MODAL_LOOPS_PERMITTED" "-DHAVE_LIBSAMPLERATE" "-DUSE_BUILTIN_FFT" "-DUSE_PTHREADS" "-DBUILD_DAWDREAMER_FAUST" "-DBUILD_DAWDREAMER_RUBBERBAND" "-DJUCER_LINUX_MAKE_6D53C8B4=1" "-DJUCE_APP_VERSION=0.6.6" "-DJUCE_APP_VERSION_HEX=0x606" $(shell pkg-config --cflags alsa freetype2) -pthread -I../../JuceLibraryCode/modules/juce_audio_processors/format_types/VST3_SDK -I../../thirdparty/JUCE/modules/juce_audio_processors/format_types/VST3_SDK -I../../JuceLibraryCode -I../../JuceLibraryCode/modules -I../../thirdparty/pybind11/include -I../../thirdparty/faust/architecture -I../../thirdparty/faust/compiler -I../../thirdparty/faust/compiler/utils -I../../thirdparty/libsamplerate/src -I../../thirdparty/libsamplerate/include -I../../thirdparty/rubberband -I../../thirdparty/rubberband/rubberband -I../../thirdparty/rubberband/src/kissfft -I../../thirdparty/rubberband/src -I../../thirdparty/portable_endian/include $(CPPFLAGS)
JUCE_CPPFLAGS_DYNAMIC_LIBRARY := "-DJucePlugin_Build_VST=0" "-DJucePlugin_Build_VST3=0" "-DJucePlugin_Build_AU=0" "-DJucePlugin_Build_AUv3=0" "-DJucePlugin_Build_RTAS=0" "-DJucePlugin_Build_AAX=0" "-DJucePlugin_Build_Standalone=0" "-DJucePlugin_Build_Unity=0"
JUCE_CFLAGS_DYNAMIC_LIBRARY := -fPIC -fvisibility=hidden
JUCE_LDFLAGS_DYNAMIC_LIBRARY := -shared
@@ -101,8 +101,10 @@ OBJECTS_DYNAMIC_LIBRARY := \
$(JUCE_OBJDIR)/SamplerAudioProcessorEditor_39db550d.o \
$(JUCE_OBJDIR)/FaustProcessor_2056a3e0.o \
$(JUCE_OBJDIR)/FilterProcessor_8755f5b.o \
+ $(JUCE_OBJDIR)/PlaybackWarpProcessor_eedba470.o \
$(JUCE_OBJDIR)/PluginProcessor_a059e380.o \
$(JUCE_OBJDIR)/ProcessorBase_65bf9464.o \
+ $(JUCE_OBJDIR)/CustomParameters_5970e144.o \
$(JUCE_OBJDIR)/custom_pybind_wrappers_ac32b5b4.o \
$(JUCE_OBJDIR)/RenderEngine_d1c4d401.o \
$(JUCE_OBJDIR)/RenderEngineWrapper_9b21dedc.o \
@@ -281,6 +283,11 @@ $(JUCE_OBJDIR)/FilterProcessor_8755f5b.o: ../../Source/FilterProcessor.cpp
@echo "Compiling FilterProcessor.cpp"
$(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_DYNAMIC_LIBRARY) $(JUCE_CFLAGS_DYNAMIC_LIBRARY) -o "$@" -c "$<"
+$(JUCE_OBJDIR)/PlaybackWarpProcessor_eedba470.o: ../../Source/PlaybackWarpProcessor.cpp
+ -$(V_AT)mkdir -p $(JUCE_OBJDIR)
+ @echo "Compiling PlaybackWarpProcessor.cpp"
+ $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_DYNAMIC_LIBRARY) $(JUCE_CFLAGS_DYNAMIC_LIBRARY) -o "$@" -c "$<"
+
$(JUCE_OBJDIR)/PluginProcessor_a059e380.o: ../../Source/PluginProcessor.cpp
-$(V_AT)mkdir -p $(JUCE_OBJDIR)
@echo "Compiling PluginProcessor.cpp"
@@ -291,6 +298,11 @@ $(JUCE_OBJDIR)/ProcessorBase_65bf9464.o: ../../Source/ProcessorBase.cpp
@echo "Compiling ProcessorBase.cpp"
$(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_DYNAMIC_LIBRARY) $(JUCE_CFLAGS_DYNAMIC_LIBRARY) -o "$@" -c "$<"
+$(JUCE_OBJDIR)/CustomParameters_5970e144.o: ../../Source/CustomParameters.cpp
+ -$(V_AT)mkdir -p $(JUCE_OBJDIR)
+ @echo "Compiling CustomParameters.cpp"
+ $(V_AT)$(CXX) $(JUCE_CXXFLAGS) $(JUCE_CPPFLAGS_DYNAMIC_LIBRARY) $(JUCE_CFLAGS_DYNAMIC_LIBRARY) -o "$@" -c "$<"
+
$(JUCE_OBJDIR)/custom_pybind_wrappers_ac32b5b4.o: ../../Source/custom_pybind_wrappers.cpp
-$(V_AT)mkdir -p $(JUCE_OBJDIR)
@echo "Compiling custom_pybind_wrappers.cpp"
diff --git a/Builds/MacOSX/DawDreamer.xcodeproj/project.pbxproj b/Builds/MacOSX/DawDreamer.xcodeproj/project.pbxproj
index b7dafce9..86a8fe3b 100644
--- a/Builds/MacOSX/DawDreamer.xcodeproj/project.pbxproj
+++ b/Builds/MacOSX/DawDreamer.xcodeproj/project.pbxproj
@@ -11,6 +11,8 @@
03E7579908465C2C4C83596E /* include_juce_dsp.mm */ = {isa = PBXBuildFile; fileRef = 2AA8CBD57F5CC5B7912A192F; };
0C01D9069A27A2ECB0B24253 /* StretcherChannelData.cpp */ = {isa = PBXBuildFile; fileRef = 5558FF156E977213C6DE9C14; };
158D137D07F738CDB6AD2028 /* Accelerate.framework */ = {isa = PBXBuildFile; fileRef = 553C0A3501582EE95328719B; };
+ 1B14253412716097BBB38A9B /* CustomParameters.cpp */ = {isa = PBXBuildFile; fileRef = C5A12760CCD5CE6DD270F92F; };
+ 210F38443F9D77354A4B28EB /* PlaybackWarpProcessor.cpp */ = {isa = PBXBuildFile; fileRef = AB39C0017B2EE44B9E510D89; };
21FB7BCF683F51D403A7475D /* StretcherProcess.cpp */ = {isa = PBXBuildFile; fileRef = 1B00487B34E2B8C9A26AFA49; };
24E83A217028D2AEF88FF90E /* include_juce_data_structures.mm */ = {isa = PBXBuildFile; fileRef = DE2959972547E114EB1E1FCD; };
2706724BA659554B6F4EE85E /* include_juce_audio_formats.mm */ = {isa = PBXBuildFile; fileRef = 03BD258F06159505E43073E1; };
@@ -172,7 +174,6 @@
8A778A5F6395BCBDE3BE5982 /* Scavenger.h */ /* Scavenger.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = Scavenger.h; path = ../../thirdparty/rubberband/src/base/Scavenger.h; sourceTree = SOURCE_ROOT; };
8B852E83502C66F8B990F051 /* SilentAudioCurve.h */ /* SilentAudioCurve.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = SilentAudioCurve.h; path = ../../thirdparty/rubberband/src/audiocurves/SilentAudioCurve.h; sourceTree = SOURCE_ROOT; };
8D80555F3F275D566655DEC2 /* getopt.c */ /* getopt.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; name = getopt.c; path = ../../thirdparty/rubberband/src/getopt/getopt.c; sourceTree = SOURCE_ROOT; };
- 8D88F06BD34407D802DFB597 /* AllProcessors.h */ /* AllProcessors.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = AllProcessors.h; path = ../../Source/AllProcessors.h; sourceTree = SOURCE_ROOT; };
8F78B14DAA164DE1331AD3FD /* Main.cpp */ /* Main.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = Main.cpp; path = ../../Source/Sampler/Source/Main.cpp; sourceTree = SOURCE_ROOT; };
8F7B057EB4717EDC7E8E8048 /* StretcherImpl.h */ /* StretcherImpl.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = StretcherImpl.h; path = ../../thirdparty/rubberband/src/StretcherImpl.h; sourceTree = SOURCE_ROOT; };
94091A9630E5C9649E958A16 /* kiss_fftr.h */ /* kiss_fftr.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = kiss_fftr.h; path = ../../thirdparty/rubberband/src/kissfft/kiss_fftr.h; sourceTree = SOURCE_ROOT; };
@@ -195,6 +196,7 @@
A64A3D2EF69F5E968839751A /* sysutils.h */ /* sysutils.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = sysutils.h; path = ../../thirdparty/rubberband/src/system/sysutils.h; sourceTree = SOURCE_ROOT; };
A997E92A77CDFA70A6536BBD /* MPESettingsDataModel.cpp */ /* MPESettingsDataModel.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = MPESettingsDataModel.cpp; path = ../../Source/Sampler/Source/DataModels/MPESettingsDataModel.cpp; sourceTree = SOURCE_ROOT; };
AA2C7D7E9590EB6623631A35 /* CommandFifo.h */ /* CommandFifo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = CommandFifo.h; path = ../../Source/Sampler/Source/CommandFifo.h; sourceTree = SOURCE_ROOT; };
+ AB39C0017B2EE44B9E510D89 /* PlaybackWarpProcessor.cpp */ /* PlaybackWarpProcessor.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = PlaybackWarpProcessor.cpp; path = ../../Source/PlaybackWarpProcessor.cpp; sourceTree = SOURCE_ROOT; };
AD248A6B37034D215326CB7F /* juce_audio_processors */ /* juce_audio_processors */ = {isa = PBXFileReference; lastKnownFileType = folder; name = juce_audio_processors; path = ../../JuceLibraryCode/modules/juce_audio_processors; sourceTree = SOURCE_ROOT; };
B1FAEBBC0B8073BAE84C5665 /* PlaybackProcessor.h */ /* PlaybackProcessor.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = PlaybackProcessor.h; path = ../../Source/PlaybackProcessor.h; sourceTree = SOURCE_ROOT; };
B2CCBF7C3614EE8B865D41C0 /* include_juce_graphics.mm */ /* include_juce_graphics.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; name = include_juce_graphics.mm; path = ../../JuceLibraryCode/include_juce_graphics.mm; sourceTree = SOURCE_ROOT; };
@@ -208,6 +210,7 @@
C379050966C094B7C05FEED8 /* Resampler.h */ /* Resampler.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = Resampler.h; path = ../../thirdparty/rubberband/src/dsp/Resampler.h; sourceTree = SOURCE_ROOT; };
C503A9D9F3009D679DBFC556 /* FilterProcessor.h */ /* FilterProcessor.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = FilterProcessor.h; path = ../../Source/FilterProcessor.h; sourceTree = SOURCE_ROOT; };
C519012CDDE2CA92FF58903A /* include_juce_video.mm */ /* include_juce_video.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; name = include_juce_video.mm; path = ../../JuceLibraryCode/include_juce_video.mm; sourceTree = SOURCE_ROOT; };
+ C5A12760CCD5CE6DD270F92F /* CustomParameters.cpp */ /* CustomParameters.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = CustomParameters.cpp; path = ../../Source/CustomParameters.cpp; sourceTree = SOURCE_ROOT; };
C5E04D72E37F04C0663E5EA6 /* CoreMIDI.framework */ /* CoreMIDI.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreMIDI.framework; path = System/Library/Frameworks/CoreMIDI.framework; sourceTree = SDKROOT; };
C6695EB398FC46133B7FAC70 /* PlaybackPositionOverlay.h */ /* PlaybackPositionOverlay.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = PlaybackPositionOverlay.h; path = ../../Source/Sampler/Source/Components/PlaybackPositionOverlay.h; sourceTree = SOURCE_ROOT; };
C713F7F77E549E8C08771E49 /* AbletonClipInfo.h */ /* AbletonClipInfo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = AbletonClipInfo.h; path = ../../Source/AbletonClipInfo.h; sourceTree = SOURCE_ROOT; };
@@ -410,6 +413,7 @@
561257822B09113041A6909C,
E553BE5C53ED6D87F71CC515,
7C90363A6D8BA8EA0D91A985,
+ C5A12760CCD5CE6DD270F92F,
3319047264FBD43706A45A08,
A2CE988FDEA16DA46477FB4B,
094BECA334087BBDDA1CA022,
@@ -570,13 +574,13 @@
C20139B2E8CF4DC6B19AC0A6,
8038F1D9B3CEBD503DE7AD1F,
4915A3C580F0DAA69FAAED98,
- 8D88F06BD34407D802DFB597,
9ABCCF2DBAA01DE762D8BD60,
BFD142EBB8AFBADC8AB04A8A,
7332613820553661C4FA3CD8,
C503A9D9F3009D679DBFC556,
1799B9631459052DD116F71A,
F7EDC034912036BE18A95734,
+ AB39C0017B2EE44B9E510D89,
B1FAEBBC0B8073BAE84C5665,
EE65D34C5FCFBF1D5F6C12D8,
2F962B6F6EA479E59F083A16,
@@ -736,8 +740,10 @@
B942805B7846AE297800EF37,
E358C00D8D92D35AFBC9944C,
7C6656C8A61187E3C9113001,
+ 210F38443F9D77354A4B28EB,
92F6A21BA484A258A4EF5F23,
BD1166D575CE2FE6886E0DFC,
+ 1B14253412716097BBB38A9B,
9D34C1F40CAD6A7617F1B60B,
48CA111A7776D753A8BFF02E,
F69DC2F3034023CA38AFFC92,
@@ -788,8 +794,8 @@
"BUILD_DAWDREAMER_FAUST",
"BUILD_DAWDREAMER_RUBBERBAND",
"JUCER_XCODE_MAC_F6D2F4CF=1",
- "JUCE_APP_VERSION=0.6.5",
- "JUCE_APP_VERSION_HEX=0x605",
+ "JUCE_APP_VERSION=0.6.6",
+ "JUCE_APP_VERSION_HEX=0x606",
"JucePlugin_Build_VST=0",
"JucePlugin_Build_VST3=0",
"JucePlugin_Build_AU=0",
@@ -914,8 +920,8 @@
"BUILD_DAWDREAMER_FAUST",
"BUILD_DAWDREAMER_RUBBERBAND",
"JUCER_XCODE_MAC_F6D2F4CF=1",
- "JUCE_APP_VERSION=0.6.5",
- "JUCE_APP_VERSION_HEX=0x605",
+ "JUCE_APP_VERSION=0.6.6",
+ "JUCE_APP_VERSION_HEX=0x606",
"JucePlugin_Build_VST=0",
"JucePlugin_Build_VST3=0",
"JucePlugin_Build_AU=0",
diff --git a/Builds/VisualStudio2019/DawDreamer_DynamicLibrary.vcxproj b/Builds/VisualStudio2019/DawDreamer_DynamicLibrary.vcxproj
index 8e6b5716..10485d3c 100644
--- a/Builds/VisualStudio2019/DawDreamer_DynamicLibrary.vcxproj
+++ b/Builds/VisualStudio2019/DawDreamer_DynamicLibrary.vcxproj
@@ -66,7 +66,7 @@
Disabled
ProgramDatabase
..\..\JuceLibraryCode\modules\juce_audio_processors\format_types\VST3_SDK;..\..\thirdparty\JUCE\modules\juce_audio_processors\format_types\VST3_SDK;..\..\JuceLibraryCode;..\..\JuceLibraryCode\modules;$(pythonLocation)\include;..\..\thirdparty\pybind11\include;..\..\thirdparty\faust\architecture;..\..\thirdparty\faust\compiler;..\..\thirdparty\faust\compiler\utils;..\..\thirdparty\libsamplerate\src;..\..\thirdparty\libsamplerate\include;..\..\thirdparty\rubberband;..\..\thirdparty\rubberband\rubberband;..\..\thirdparty\rubberband\src\kissfft;..\..\thirdparty\rubberband\src;..\..\thirdparty\portable_endian\include;%(AdditionalIncludeDirectories)
- _CRT_SECURE_NO_WARNINGS;WIN32;_WINDOWS;DEBUG;_DEBUG;PIP_JUCE_EXAMPLES_DIRECTORY=QzpcdG9vbHNcSlVDRVxleGFtcGxlcw==;SAMPLER_SKIP_UI;JUCE_MODAL_LOOPS_PERMITTED;_WIN32;__SSE__;__SSE2__;BUILD_DAWDREAMER_FAUST;BUILD_DAWDREAMER_RUBBERBAND;NOMINMAX;HAVE_LIBSAMPLERATE;HAVE_KISSFFT;JUCER_VS2019_78A5026=1;JUCE_APP_VERSION=0.6.5;JUCE_APP_VERSION_HEX=0x605;JucePlugin_Build_VST=0;JucePlugin_Build_VST3=0;JucePlugin_Build_AU=0;JucePlugin_Build_AUv3=0;JucePlugin_Build_RTAS=0;JucePlugin_Build_AAX=0;JucePlugin_Build_Standalone=0;JucePlugin_Build_Unity=0;_LIB;%(PreprocessorDefinitions)
+ _CRT_SECURE_NO_WARNINGS;WIN32;_WINDOWS;DEBUG;_DEBUG;PIP_JUCE_EXAMPLES_DIRECTORY=QzpcdG9vbHNcSlVDRVxleGFtcGxlcw==;SAMPLER_SKIP_UI;JUCE_MODAL_LOOPS_PERMITTED;_WIN32;__SSE__;__SSE2__;BUILD_DAWDREAMER_FAUST;BUILD_DAWDREAMER_RUBBERBAND;NOMINMAX;HAVE_LIBSAMPLERATE;HAVE_KISSFFT;JUCER_VS2019_78A5026=1;JUCE_APP_VERSION=0.6.6;JUCE_APP_VERSION_HEX=0x606;JucePlugin_Build_VST=0;JucePlugin_Build_VST3=0;JucePlugin_Build_AU=0;JucePlugin_Build_AUv3=0;JucePlugin_Build_RTAS=0;JucePlugin_Build_AAX=0;JucePlugin_Build_Standalone=0;JucePlugin_Build_Unity=0;_LIB;%(PreprocessorDefinitions)
MultiThreadedDebugDLL
true
NotUsing
@@ -116,7 +116,7 @@ copy "..\..\thirdparty\libfaust\win-x64\Debug\bin\faust.dll" "$(p
Full
..\..\JuceLibraryCode\modules\juce_audio_processors\format_types\VST3_SDK;..\..\thirdparty\JUCE\modules\juce_audio_processors\format_types\VST3_SDK;..\..\JuceLibraryCode;..\..\JuceLibraryCode\modules;$(pythonLocation)\include;..\..\thirdparty\pybind11\include;..\..\thirdparty\faust\architecture;..\..\thirdparty\faust\compiler;..\..\thirdparty\faust\compiler\utils;..\..\thirdparty\libsamplerate\src;..\..\thirdparty\libsamplerate\include;..\..\thirdparty\rubberband;..\..\thirdparty\rubberband\rubberband;..\..\thirdparty\rubberband\src\kissfft;..\..\thirdparty\rubberband\src;..\..\thirdparty\portable_endian\include;%(AdditionalIncludeDirectories)
- _CRT_SECURE_NO_WARNINGS;WIN32;_WINDOWS;NDEBUG;PIP_JUCE_EXAMPLES_DIRECTORY=QzpcdG9vbHNcSlVDRVxleGFtcGxlcw==;SAMPLER_SKIP_UI;JUCE_MODAL_LOOPS_PERMITTED;_WIN32;__SSE__;__SSE2__;BUILD_DAWDREAMER_FAUST;BUILD_DAWDREAMER_RUBBERBAND;NOMINMAX;HAVE_LIBSAMPLERATE;HAVE_KISSFFT;JUCER_VS2019_78A5026=1;JUCE_APP_VERSION=0.6.5;JUCE_APP_VERSION_HEX=0x605;JucePlugin_Build_VST=0;JucePlugin_Build_VST3=0;JucePlugin_Build_AU=0;JucePlugin_Build_AUv3=0;JucePlugin_Build_RTAS=0;JucePlugin_Build_AAX=0;JucePlugin_Build_Standalone=0;JucePlugin_Build_Unity=0;_LIB;%(PreprocessorDefinitions)
+ _CRT_SECURE_NO_WARNINGS;WIN32;_WINDOWS;NDEBUG;PIP_JUCE_EXAMPLES_DIRECTORY=QzpcdG9vbHNcSlVDRVxleGFtcGxlcw==;SAMPLER_SKIP_UI;JUCE_MODAL_LOOPS_PERMITTED;_WIN32;__SSE__;__SSE2__;BUILD_DAWDREAMER_FAUST;BUILD_DAWDREAMER_RUBBERBAND;NOMINMAX;HAVE_LIBSAMPLERATE;HAVE_KISSFFT;JUCER_VS2019_78A5026=1;JUCE_APP_VERSION=0.6.6;JUCE_APP_VERSION_HEX=0x606;JucePlugin_Build_VST=0;JucePlugin_Build_VST3=0;JucePlugin_Build_AU=0;JucePlugin_Build_AUv3=0;JucePlugin_Build_RTAS=0;JucePlugin_Build_AAX=0;JucePlugin_Build_Standalone=0;JucePlugin_Build_Unity=0;_LIB;%(PreprocessorDefinitions)
MultiThreadedDLL
true
NotUsing
@@ -200,8 +200,10 @@ copy "..\..\thirdparty\libfaust\win-x64\Release\bin\faust.dll" "$
+
+
@@ -2349,7 +2351,6 @@ copy "..\..\thirdparty\libfaust\win-x64\Release\bin\faust.dll" "$
-
diff --git a/Builds/VisualStudio2019/DawDreamer_DynamicLibrary.vcxproj.filters b/Builds/VisualStudio2019/DawDreamer_DynamicLibrary.vcxproj.filters
index a986a562..52729c88 100644
--- a/Builds/VisualStudio2019/DawDreamer_DynamicLibrary.vcxproj.filters
+++ b/Builds/VisualStudio2019/DawDreamer_DynamicLibrary.vcxproj.filters
@@ -631,12 +631,18 @@
DawDreamer\Processors
+
+ DawDreamer\Processors
+
DawDreamer\Processors
DawDreamer\Processors
+
+ DawDreamer
+
DawDreamer
@@ -3078,9 +3084,6 @@
DawDreamer\Processors
-
- DawDreamer\Processors
-
DawDreamer\Processors
diff --git a/Builds/VisualStudio2019/resources.rc b/Builds/VisualStudio2019/resources.rc
index ea211781..eb91b857 100644
--- a/Builds/VisualStudio2019/resources.rc
+++ b/Builds/VisualStudio2019/resources.rc
@@ -9,16 +9,16 @@
#include
VS_VERSION_INFO VERSIONINFO
-FILEVERSION 0,6,5,0
+FILEVERSION 0,6,6,0
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904E4"
BEGIN
VALUE "FileDescription", "DawDreamer\0"
- VALUE "FileVersion", "0.6.5\0"
+ VALUE "FileVersion", "0.6.6\0"
VALUE "ProductName", "DawDreamer\0"
- VALUE "ProductVersion", "0.6.5\0"
+ VALUE "ProductVersion", "0.6.6\0"
END
END
diff --git a/DawDreamer.jucer b/DawDreamer.jucer
index 060da481..bfe13e43 100644
--- a/DawDreamer.jucer
+++ b/DawDreamer.jucer
@@ -1,6 +1,6 @@
-
-
+
+
0).astype(np.float32)
+synth.set_automation(1, automation, ppqn=960)
+
+# Load a MIDI file and convert the timing to absolute seconds. Changes to the Render Engine's BPM
+# won't affect the timing. The kwargs below are defaults.
+synth.load_midi("C:/path/to/song.mid", clear_previous=True, convert_to_sec=True, all_events=True)
+
+# Load a MIDI file and keep the timing in units of beats. Changes to the Render Engine's BPM
+# will affect the timing.
+synth.load_midi("C:/path/to/song.mid", convert_to_sec=False)
+
+# We can also add one note at a time, specifying a start time and duration, both in seconds
+synth.add_midi_note(60, 127, 0.5, .25) # (MIDI note, velocity, start, duration)
-synth.load_midi("C:/path/to/song.mid")
-# We can also add notes one at a time.
-synth.add_midi_note(67, 127, 0.5, .25) # (MIDI note, velocity, start sec, duration sec)
+# With `convert_to_sec=False`, we can use beats as the unit for the start time and duration.
+# Rest for a beat and then play a note for a half beat.
+synth.add_midi_note(67, 127, 1, .5, convert_to_sec=False)
# For any processor type, we can get the number of inputs and outputs
print("synth num inputs: ", synth.get_num_input_channels())
@@ -153,7 +178,7 @@ filter_processor = engine.make_filter_processor("filter", "high", 7000.0, .5, 1.
filter_processor.freq = 7123. # Some parameters can be get/set like this.
freq_automation = make_sine(.5, DURATION)*5000. + 7000. # 0.5 Hz sine wave centered at 7000 w/ amp 5000.
filter_processor.set_automation("freq", freq_automation) # argument is single channel numpy array.
-freq_automation = filter_processor.get_automation("freq") # You can get automation of most processor parameters.
+freq_automation = filter_processor.get_automation("freq") # Get automation of most processor parameters.
filter_processor.record = True # This will allow us to access the filter processor's audio after a render.
# A graph is a meaningfully ordered list of tuples.
@@ -164,7 +189,7 @@ filter_processor.record = True # This will allow us to access the filter proces
# The audio from the last tuple's processor will be accessed automatically later by engine.get_audio()
graph = [
(synth, []), # synth takes no inputs, so we give an empty list.
- (engine.make_reverb_processor("reverb"), [synth.get_name()]), # Apply JUCE reverb to the synth named earlier
+ (engine.make_reverb_processor("reverb"), [synth.get_name()]), # Apply JUCE reverb to synth from earlier
(engine.make_plugin_processor("more_reverb", REVERB_PLUGIN), ["reverb"]), # Apply VST reverb
(engine.make_playback_processor("vocals", vocals), []), # Playback has no inputs.
(filter_processor, ["vocals"]), # High-pass filter with automation set earlier.
@@ -216,7 +241,7 @@ faust_processor = engine.make_faust_processor("faust")
faust_processor.set_dsp(DSP_PATH) # You can do this anytime.
# Using compile() isn't necessary, but it's an early warning check.
-faust_processor.compile()
+faust_processor.compile() # throws a catchable Python Runtime Error for bad Faust code
print(faust_processor.get_parameters_description())
diff --git a/Source/AllProcessors.h b/Source/AllProcessors.h
deleted file mode 100644
index c11d03ac..00000000
--- a/Source/AllProcessors.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#pragma once
-
-#include "ProcessorBase.h"
-#include "AddProcessor.h"
-#include "CompressorProcessor.h"
-#include "DelayProcessor.h"
-#include "FaustProcessor.h"
-#include "FilterProcessor.h"
-#include "OscillatorProcessor.h"
-#include "PlaybackProcessor.h"
-#include "PlaybackWarpProcessor.h"
-#include "PluginProcessor.h"
-#include "ReverbProcessor.h"
-#include "PannerProcessor.h"
-#include "SamplerProcessor.h"
diff --git a/Source/CompressorProcessor.h b/Source/CompressorProcessor.h
index 079b6114..76bd9410 100644
--- a/Source/CompressorProcessor.h
+++ b/Source/CompressorProcessor.h
@@ -42,10 +42,10 @@ class CompressorProcessor : public ProcessorBase
AudioPlayHead::CurrentPositionInfo posInfo;
getPlayHead()->getCurrentPosition(posInfo);
- *myThreshold = getAutomationVal("threshold", posInfo.timeInSamples);
- *myRatio = getAutomationVal("ratio", posInfo.timeInSamples);
- *myAttack = getAutomationVal("attack", posInfo.timeInSamples);
- *myRelease = getAutomationVal("release", posInfo.timeInSamples);
+ *myThreshold = getAutomationVal("threshold", posInfo);
+ *myRatio = getAutomationVal("ratio", posInfo);
+ *myAttack = getAutomationVal("attack", posInfo);
+ *myRelease = getAutomationVal("release", posInfo);
updateParameters();
}
@@ -56,16 +56,16 @@ class CompressorProcessor : public ProcessorBase
const juce::String getName() { return "CompressorProcessor"; };
void setThreshold(float threshold) { setAutomationVal("threshold", threshold); }
- float getThreshold() { return getAutomationVal("threshold", 0); }
+ float getThreshold() { AudioPlayHead::CurrentPositionInfo posInfo; return getAutomationVal("threshold", posInfo); }
void setRatio(float ratio) { setAutomationVal("ratio", ratio); }
- float getRatio() { return getAutomationVal("ratio", 0); }
+ float getRatio() { AudioPlayHead::CurrentPositionInfo posInfo; return getAutomationVal("ratio", posInfo); }
void setAttack(float attack) { setAutomationVal("attack", attack); }
- float getAttack() { return getAutomationVal("attack", 0); }
+ float getAttack() { AudioPlayHead::CurrentPositionInfo posInfo; return getAutomationVal("attack", posInfo); }
void setRelease(float release) { setAutomationVal("release", release); }
- float getRelease() { return getAutomationVal("release", 0); }
+ float getRelease() { AudioPlayHead::CurrentPositionInfo posInfo; return getAutomationVal("release", posInfo); }
private:
diff --git a/Source/CustomParameters.cpp b/Source/CustomParameters.cpp
new file mode 100644
index 00000000..1bd918ef
--- /dev/null
+++ b/Source/CustomParameters.cpp
@@ -0,0 +1,59 @@
+#include "CustomParameters.h"
+#include "ProcessorBase.h"
+
+#include
+
+bool
+AutomateParameter::setAutomation(py::array_t input, std::uint32_t newPPQN) {
+
+ if (newPPQN < 0) {
+ throw std::runtime_error("The PPQN must be greater than or equal to zero. Received: " + std::to_string(newPPQN));
+ }
+
+ m_ppqn = newPPQN;
+
+ try
+ {
+ myAutomation.clear();
+
+ auto numSamples = input.shape(0);
+
+ myAutomation = std::vector(numSamples, 0.f);
+
+ memcpy(myAutomation.data(), (float*)input.data(), numSamples * sizeof(float));
+ }
+ catch (const std::exception& e)
+ {
+ throw std::runtime_error(std::string("Error: setAutomation: ") + e.what());
+ return false;
+ }
+
+ return true;
+}
+
+void
+AutomateParameter::setAutomation(const float val) {
+ myAutomation.clear();
+ myAutomation.push_back(val);
+}
+
+std::vector
+AutomateParameter::getAutomation() {
+ return myAutomation;
+}
+
+float
+AutomateParameter::sample(juce::AudioPlayHead::CurrentPositionInfo& posInfo) {
+
+ size_t i;
+ if (m_ppqn > 0) {
+ i = std::min(myAutomation.size() - 1, size_t(posInfo.ppqPosition * m_ppqn));
+ }
+ else {
+ i = std::min(myAutomation.size() - 1, size_t(posInfo.timeInSamples));
+ }
+
+ i = std::max((size_t)0, i);
+ return myAutomation.at(i);
+
+}
\ No newline at end of file
diff --git a/Source/CustomParameters.h b/Source/CustomParameters.h
index bb3fe540..2adaa667 100644
--- a/Source/CustomParameters.h
+++ b/Source/CustomParameters.h
@@ -1,6 +1,7 @@
#pragma once
#include "../JuceLibraryCode/JuceHeader.h"
+#include "custom_pybind_wrappers.h"
using juce::ADSR;
using juce::AbstractFifo;
@@ -112,6 +113,7 @@ using juce::roundFloatToInt;
using juce::roundToInt;
using juce::uint8;
+
class AutomateParameter
{
@@ -119,51 +121,20 @@ class AutomateParameter
AutomateParameter() {}
- bool setAutomation(py::array_t input) {
-
- try
- {
- float* input_ptr = (float*)input.data();
- myAutomation.clear();
-
- myAutomation = std::vector(input.shape(0), 0.f);
+ bool setAutomation(py::array_t input, std::uint32_t newPPQN);
- for (int x = 0; x < input.shape(0); x++) {
- myAutomation[x] = *(input_ptr++);
- }
+ void setAutomation(const float val);
- }
- catch (const std::exception& e)
- {
- throw std::runtime_error(std::string("Error: setAutomation: ") + e.what());
- return false;
- }
+ std::vector getAutomation();
- return true;
- }
+ float sample(juce::AudioPlayHead::CurrentPositionInfo& posInfo);
- void setAutomation(const float val) {
- myAutomation.clear();
- myAutomation.push_back(val);
- }
-
- std::vector getAutomation() {
- return myAutomation;
- }
-
- float sample(size_t index) {
- auto i = std::min(myAutomation.size() - 1, index);
- i = std::max((size_t)0, i);
- return myAutomation.at(i);
- }
+ ~AutomateParameter() {}
protected:
std::vector myAutomation;
-
- ~AutomateParameter()
- {
- }
+ std::uint32_t m_ppqn = 0;
};
diff --git a/Source/DelayProcessor.h b/Source/DelayProcessor.h
index 91324b04..ef82d805 100644
--- a/Source/DelayProcessor.h
+++ b/Source/DelayProcessor.h
@@ -50,8 +50,8 @@ class DelayProcessor : public ProcessorBase
AudioPlayHead::CurrentPositionInfo posInfo;
getPlayHead()->getCurrentPosition(posInfo);
- *myWetLevel = getAutomationVal("wet_level", posInfo.timeInSamples);
- *myDelaySize = getAutomationVal("delay", posInfo.timeInSamples);
+ *myWetLevel = getAutomationVal("wet_level", posInfo);
+ *myDelaySize = getAutomationVal("delay", posInfo);
updateParameters();
}
@@ -62,10 +62,10 @@ class DelayProcessor : public ProcessorBase
const juce::String getName() { return "DelayProcessor"; };
void setDelay(float newDelaySize) { setAutomationVal("delay", newDelaySize); }
- float getDelay() { return getAutomationVal("delay", 0); }
+ float getDelay() { AudioPlayHead::CurrentPositionInfo posInfo; return getAutomationVal("delay", posInfo); }
void setWet(float newWet) { setAutomationVal("wet_level", newWet); }
- float getWet() { return getAutomationVal("wet_level", 0); }
+ float getWet() { AudioPlayHead::CurrentPositionInfo posInfo; return getAutomationVal("wet_level", posInfo); }
private:
diff --git a/Source/FaustProcessor.cpp b/Source/FaustProcessor.cpp
index 13a996a0..969116cb 100644
--- a/Source/FaustProcessor.cpp
+++ b/Source/FaustProcessor.cpp
@@ -41,7 +41,17 @@ FaustProcessor::FaustProcessor(std::string newUniqueName, double sampleRate, int
FaustProcessor::~FaustProcessor() {
clear();
- delete myMidiIterator;
+ delete myMidiIteratorQN;
+ delete myMidiIteratorSec;
+}
+
+bool
+FaustProcessor::setAutomation(std::string parameterName, py::array input, std::uint32_t ppqn) {
+
+ if (!m_isCompiled) {
+ this->compile();
+ }
+ return ProcessorBase::setAutomation(parameterName, input, ppqn);
}
bool
@@ -52,6 +62,9 @@ FaustProcessor::canApplyBusesLayout(const juce::AudioProcessor::BusesLayout& lay
void
FaustProcessor::prepareToPlay(double sampleRate, int samplesPerBlock)
{
+ if (!m_isCompiled) {
+ this->compile();
+ }
}
void
@@ -63,45 +76,70 @@ FaustProcessor::processBlock(juce::AudioSampleBuffer& buffer, juce::MidiBuffer&
getPlayHead()->getCurrentPosition(posInfo);
if (!m_isCompiled) {
- ProcessorBase::processBlock(buffer, midiBuffer);
- return;
+ throw std::runtime_error("Faust Processor called processBlock but it wasn't compiled.");
}
if (m_nvoices < 1) {
if (m_dsp != NULL) {
m_dsp->compute(buffer.getNumSamples(), (float**)buffer.getArrayOfReadPointers(), buffer.getArrayOfWritePointers());
}
+ else {
+ throw std::runtime_error("Faust Processor: m_dsp is null");
+ }
}
else if (m_dsp_poly != NULL) {
- long long int start = posInfo.timeInSamples;
+ auto start = posInfo.timeInSamples;
+
+ auto pulseStart = std::floor(posInfo.ppqPosition * PPQN);
+ auto pulseStep = (posInfo.bpm * PPQN) / (mySampleRate * 60.);
- // render one sample at a time
+ // render one sample at a time because we want accurate timing of keyOn/keyOff.
+
+ auto oneSampReadPtrs = (float**)oneSampleInBuffer.getArrayOfReadPointers();
+ auto oneSampWritePtrs = (float**)oneSampleOutBuffer.getArrayOfWritePointers();
+ const int midiChannel = 0;
for (size_t i = 0; i < buffer.getNumSamples(); i++)
{
- myIsMessageBetween = myMidiMessagePosition >= start && myMidiMessagePosition < start + 1;
- do {
- if (myIsMessageBetween) {
+ {
+ myIsMessageBetweenSec = myMidiMessagePositionSec >= start && myMidiMessagePositionSec < start + 1;
+ while (myIsMessageBetweenSec && myMidiEventsDoRemainSec) {
- int midiChannel = 0;
- if (myMidiMessage.isNoteOn()) {
- m_dsp_poly->keyOn(midiChannel, myMidiMessage.getNoteNumber(), myMidiMessage.getVelocity());
+ if (myMidiMessageSec.isNoteOn()) {
+ m_dsp_poly->keyOn(midiChannel, myMidiMessageSec.getNoteNumber(), myMidiMessageSec.getVelocity());
}
- else if (myMidiMessage.isNoteOff()) {
- m_dsp_poly->keyOff(midiChannel, myMidiMessage.getNoteNumber(), myMidiMessage.getVelocity());
+ else if (myMidiMessageSec.isNoteOff()) {
+ m_dsp_poly->keyOff(midiChannel, myMidiMessageSec.getNoteNumber(), myMidiMessageSec.getVelocity());
}
- myMidiEventsDoRemain = myMidiIterator->getNextEvent(myMidiMessage, myMidiMessagePosition);
- myIsMessageBetween = myMidiMessagePosition >= start && myMidiMessagePosition < start + 1;
+ myMidiEventsDoRemainSec = myMidiIteratorSec->getNextEvent(myMidiMessageSec, myMidiMessagePositionSec);
+ myIsMessageBetweenSec = myMidiMessagePositionSec >= start && myMidiMessagePositionSec < start + 1;
}
- } while (myIsMessageBetween && myMidiEventsDoRemain);
+ }
+
+ {
+ myIsMessageBetweenQN = myMidiMessagePositionQN >= pulseStart && myMidiMessagePositionQN < pulseStart + 1;
+ while (myIsMessageBetweenQN && myMidiEventsDoRemainQN) {
+
+ if (myMidiMessageQN.isNoteOn()) {
+ m_dsp_poly->keyOn(midiChannel, myMidiMessageQN.getNoteNumber(), myMidiMessageQN.getVelocity());
+ }
+ else if (myMidiMessageQN.isNoteOff()) {
+ m_dsp_poly->keyOff(midiChannel, myMidiMessageQN.getNoteNumber(), myMidiMessageQN.getVelocity());
+ }
+
+ myMidiEventsDoRemainQN = myMidiIteratorQN->getNextEvent(myMidiMessageQN, myMidiMessagePositionQN);
+
+ myIsMessageBetweenQN = myMidiMessagePositionQN >= pulseStart && myMidiMessagePositionQN < pulseStart + 1;
+ }
+ }
for (size_t chan = 0; chan < m_numInputChannels; chan++)
{
oneSampleInBuffer.setSample(chan, 0, buffer.getSample(chan, i));
}
- m_dsp_poly->compute(1, (float**)oneSampleInBuffer.getArrayOfReadPointers(), (float**)oneSampleOutBuffer.getArrayOfWritePointers());
+ m_dsp_poly->compute(1, oneSampReadPtrs, oneSampWritePtrs);
for (size_t chan = 0; chan < m_numOutputChannels; chan++)
{
@@ -109,8 +147,12 @@ FaustProcessor::processBlock(juce::AudioSampleBuffer& buffer, juce::MidiBuffer&
}
start += 1;
+ pulseStart += pulseStep;
}
}
+ else {
+ throw std::runtime_error("Faust Processor: m_dsp_poly is null");
+ }
ProcessorBase::processBlock(buffer, midiBuffer);
}
@@ -147,7 +189,7 @@ FaustProcessor::automateParameters() {
int faustIndex = m_map_juceIndex_to_faustIndex[i];
if (theParameter) {
- m_ui->setParamValue(faustIndex, theParameter->sample(posInfo.timeInSamples));
+ m_ui->setParamValue(faustIndex, theParameter->sample(posInfo));
}
else {
auto theName = this->getParameterName(i);
@@ -172,6 +214,7 @@ FaustProcessor::automateParameters() {
void
FaustProcessor::reset()
{
+
if (m_dsp) {
m_dsp->instanceClear();
}
@@ -180,9 +223,13 @@ FaustProcessor::reset()
m_dsp_poly->instanceClear();
}
- delete myMidiIterator;
- myMidiIterator = new MidiBuffer::Iterator(myMidiBuffer); // todo: deprecated.
- myMidiEventsDoRemain = myMidiIterator->getNextEvent(myMidiMessage, myMidiMessagePosition);
+ delete myMidiIteratorQN;
+ myMidiIteratorQN = new MidiBuffer::Iterator(myMidiBufferQN); // todo: deprecated.
+ myMidiEventsDoRemainQN = myMidiIteratorQN->getNextEvent(myMidiMessageQN, myMidiMessagePositionQN);
+
+ delete myMidiIteratorSec;
+ myMidiIteratorSec = new MidiBuffer::Iterator(myMidiBufferSec); // todo: deprecated.
+ myMidiEventsDoRemainSec = myMidiIteratorSec->getNextEvent(myMidiMessageSec, myMidiMessagePositionSec);
if (!m_isCompiled) {
this->compile();
@@ -250,8 +297,6 @@ FaustProcessor::setDSPString(const std::string& code)
return true;
}
-#define FAUSTPROCESSOR_FAIL_COMPILE clear(); return false;
-
bool
FaustProcessor::compile()
{
@@ -266,16 +311,16 @@ FaustProcessor::compile()
auto pathToFaustLibraries = getPathToFaustLibraries();
- int argc = 0;
- const char** argv = new const char* [256];
- if (pathToFaustLibraries.compare(std::string("")) != 0) {
- argv[argc++] = "-I";
- argv[argc++] = pathToFaustLibraries.c_str();
- }
- else {
+ if (pathToFaustLibraries.compare(std::string("")) == 0) {
throw std::runtime_error("FaustProcessor::compile(): Error for path for faust libraries: " + pathToFaustLibraries);
}
+ int argc = 0;
+ const char** argv = new const char* [256];
+
+ argv[argc++] = "-I";
+ argv[argc++] = pathToFaustLibraries.c_str();
+
if (m_faustLibrariesPath.compare(std::string("")) != 0) {
argv[argc++] = "-I";
argv[argc++] = m_faustLibrariesPath.c_str();
@@ -308,11 +353,17 @@ FaustProcessor::compile()
argc, argv, target.c_str(), m_errorString, optimize);
}
+ for (int i = 0; i < argc; i++) {
+ argv[i] = NULL;
+ }
+ delete[] argv;
+ argv = nullptr;
+
// check for error
if (m_errorString != "") {
// output error
+ clear();
throw std::runtime_error("FaustProcessor::compile(): " + m_errorString + ". Check the faustlibraries path: " + pathToFaustLibraries);
- FAUSTPROCESSOR_FAIL_COMPILE
}
//// print where faustlib is looking for stdfaust.lib and the other lib files.
@@ -327,18 +378,12 @@ FaustProcessor::compile()
// std::cout << name << "\n" << std::endl;
//}
- for (int i = 0; i < argc; i++) {
- argv[i] = NULL;
- }
- delete[] argv;
- argv = nullptr;
-
if (is_polyphonic) {
// (false, true) works
m_dsp_poly = m_poly_factory->createPolyDSPInstance(m_nvoices, true, m_groupVoices);
if (!m_dsp_poly) {
+ clear();
throw std::runtime_error("FaustProcessor::compile(): Cannot create Poly DSP instance.");
- FAUSTPROCESSOR_FAIL_COMPILE
}
m_dsp_poly->setReleaseLength(m_releaseLengthSec);
}
@@ -346,8 +391,8 @@ FaustProcessor::compile()
// create DSP instance
m_dsp = m_factory->createDSPInstance();
if (!m_dsp) {
+ clear();
throw std::runtime_error("FaustProcessor::compile(): Cannot create DSP instance.");
- FAUSTPROCESSOR_FAIL_COMPILE
}
}
@@ -395,6 +440,11 @@ bool
FaustProcessor::setDSPFile(const std::string& path)
{
m_isCompiled = false;
+
+ if (!std::filesystem::exists(path.c_str())) {
+ throw std::runtime_error("File not found: " + path);
+ }
+
if (std::strcmp(path.c_str(), "") == 0) {
throw std::runtime_error("Path to DSP file is empty.");
}
@@ -459,7 +509,9 @@ FaustProcessor::getParamWithIndex(const int index)
auto& parAddress = it->second;
- return this->getAutomationVal(parAddress, 0);
+ AudioPlayHead::CurrentPositionInfo posInfo;
+
+ return this->getAutomationVal(parAddress, posInfo);
}
float
@@ -471,7 +523,9 @@ FaustProcessor::getParamWithPath(const std::string& n)
}
if (!m_ui) return 0; // todo: better handling
- return this->getAutomationVal(n, 0);
+ AudioPlayHead::CurrentPositionInfo posInfo;
+
+ return this->getAutomationVal(n, posInfo);
}
std::string
@@ -589,7 +643,8 @@ FaustProcessor::getPluginParametersDescription()
myDictionary["min"] = m_ui->getParamMin(faustIndex);
myDictionary["max"] = m_ui->getParamMax(faustIndex);
myDictionary["step"] = m_ui->getParamStep(faustIndex);
- myDictionary["value"] = this->getAutomationVal(theName, 0);
+ AudioPlayHead::CurrentPositionInfo posInfo;
+ myDictionary["value"] = this->getAutomationVal(theName, posInfo);
myList.append(myDictionary);
}
@@ -605,25 +660,54 @@ FaustProcessor::getPluginParametersDescription()
int
FaustProcessor::getNumMidiEvents()
{
- return myMidiBuffer.getNumEvents();
+ return myMidiBufferSec.getNumEvents() + myMidiBufferQN.getNumEvents();
};
bool
-FaustProcessor::loadMidi(const std::string& path)
+FaustProcessor::loadMidi(const std::string& path, bool clearPrevious, bool convertToSeconds, bool allEvents)
{
+ if (!std::filesystem::exists(path.c_str())) {
+ throw std::runtime_error("File not found: " + path);
+ }
+
File file = File(path);
FileInputStream fileStream(file);
MidiFile midiFile;
midiFile.readFrom(fileStream);
- midiFile.convertTimestampTicksToSeconds();
- myMidiBuffer.clear();
-
- for (int t = 0; t < midiFile.getNumTracks(); t++) {
- const MidiMessageSequence* track = midiFile.getTrack(t);
- for (int i = 0; i < track->getNumEvents(); i++) {
- MidiMessage& m = track->getEventPointer(i)->message;
- int sampleOffset = (int)(mySampleRate * m.getTimeStamp());
- myMidiBuffer.addEvent(m, sampleOffset);
+
+ if (clearPrevious) {
+ myMidiBufferSec.clear();
+ myMidiBufferQN.clear();
+ }
+
+ if (convertToSeconds) {
+ midiFile.convertTimestampTicksToSeconds();
+
+ for (int t = 0; t < midiFile.getNumTracks(); t++) {
+ const MidiMessageSequence* track = midiFile.getTrack(t);
+ for (int i = 0; i < track->getNumEvents(); i++) {
+ MidiMessage& m = track->getEventPointer(i)->message;
+ int sampleOffset = (int)(mySampleRate * m.getTimeStamp());
+ if (allEvents || m.isNoteOff() || m.isNoteOn()) {
+ myMidiBufferSec.addEvent(m, sampleOffset);
+ }
+ }
+ }
+ }
+ else {
+ auto timeFormat = midiFile.getTimeFormat(); // the ppqn (Ableton makes midi files with 96 ppqn)
+
+ for (int t = 0; t < midiFile.getNumTracks(); t++) {
+ const MidiMessageSequence* track = midiFile.getTrack(t);
+ for (int i = 0; i < track->getNumEvents(); i++) {
+ MidiMessage& m = track->getEventPointer(i)->message;
+
+ if (allEvents || m.isNoteOff() || m.isNoteOn()) {
+ // convert timestamp from its original time format to our high resolution PPQN
+ auto timeStamp = m.getTimeStamp() * PPQN / timeFormat;
+ myMidiBufferQN.addEvent(m, timeStamp);
+ }
+ }
}
}
@@ -632,14 +716,16 @@ FaustProcessor::loadMidi(const std::string& path)
void
FaustProcessor::clearMidi() {
- myMidiBuffer.clear();
+ myMidiBufferSec.clear();
+ myMidiBufferQN.clear();
}
bool
FaustProcessor::addMidiNote(uint8 midiNote,
uint8 midiVelocity,
const double noteStart,
- const double noteLength) {
+ const double noteLength,
+ bool convert_to_sec) {
if (midiNote > 255) midiNote = 255;
if (midiNote < 0) midiNote = 0;
@@ -658,11 +744,20 @@ FaustProcessor::addMidiNote(uint8 midiNote,
midiNote,
midiVelocity);
- auto startTime = noteStart * mySampleRate;
- onMessage.setTimeStamp(startTime);
- offMessage.setTimeStamp(startTime + noteLength * mySampleRate);
- myMidiBuffer.addEvent(onMessage, (int)onMessage.getTimeStamp());
- myMidiBuffer.addEvent(offMessage, (int)offMessage.getTimeStamp());
+ if (convert_to_sec) {
+ auto startTime = noteStart * mySampleRate;
+ onMessage.setTimeStamp(startTime);
+ offMessage.setTimeStamp(startTime + noteLength * mySampleRate);
+ myMidiBufferSec.addEvent(onMessage, (int)onMessage.getTimeStamp());
+ myMidiBufferSec.addEvent(offMessage, (int)offMessage.getTimeStamp());
+ }
+ else {
+ auto startTime = noteStart * PPQN;
+ onMessage.setTimeStamp(startTime);
+ offMessage.setTimeStamp(startTime + noteLength * PPQN);
+ myMidiBufferQN.addEvent(onMessage, (int)onMessage.getTimeStamp());
+ myMidiBufferQN.addEvent(offMessage, (int)offMessage.getTimeStamp());
+ }
return true;
}
@@ -754,7 +849,6 @@ FaustProcessor::getPathToFaustLibraries() {
catch (...) {
throw std::runtime_error("Error getting path to faustlibraries.");
}
- return "";
}
using myaudiotype = py::array_t;
diff --git a/Source/FaustProcessor.h b/Source/FaustProcessor.h
index 553c78f8..a6801eb7 100644
--- a/Source/FaustProcessor.h
+++ b/Source/FaustProcessor.h
@@ -117,6 +117,8 @@ class FaustProcessor : public ProcessorBase
const juce::String getName() const { return "FaustProcessor"; }
+ bool setAutomation(std::string parameterName, py::array input, std::uint32_t ppqn);
+
// faust stuff
void clear();
bool compile();
@@ -139,7 +141,7 @@ class FaustProcessor : public ProcessorBase
void setAutoImport(const std::string& s) { m_autoImport = s; }
std::string getAutoImport() { return m_autoImport; }
- bool loadMidi(const std::string& path);
+ bool loadMidi(const std::string& path, bool clearPrevious, bool convertToSeconds, bool allEvents);
void clearMidi();
@@ -148,7 +150,8 @@ class FaustProcessor : public ProcessorBase
bool addMidiNote(const uint8 midiNote,
const uint8 midiVelocity,
const double noteStart,
- const double noteLength);
+ const double noteLength,
+ bool convert_to_sec);
void setSoundfiles(py::dict);
@@ -199,12 +202,23 @@ class FaustProcessor : public ProcessorBase
bool m_groupVoices = true;
bool m_isCompiled = false;
- MidiBuffer myMidiBuffer;
- MidiMessage myMidiMessage;
- int myMidiMessagePosition = -1;
- MidiBuffer::Iterator* myMidiIterator = nullptr;
- bool myIsMessageBetween = false;
- bool myMidiEventsDoRemain = false;
+ MidiBuffer myMidiBufferQN;
+ MidiBuffer myMidiBufferSec;
+
+ MidiMessage myMidiMessageQN;
+ MidiMessage myMidiMessageSec;
+
+ int myMidiMessagePositionQN = -1;
+ int myMidiMessagePositionSec = -1;
+
+ MidiBuffer::Iterator* myMidiIteratorQN = nullptr;
+ MidiBuffer::Iterator* myMidiIteratorSec = nullptr;
+
+ bool myIsMessageBetweenQN = false;
+ bool myIsMessageBetweenSec = false;
+
+ bool myMidiEventsDoRemainQN = false;
+ bool myMidiEventsDoRemainSec = false;
juce::AudioSampleBuffer oneSampleInBuffer;
juce::AudioSampleBuffer oneSampleOutBuffer;
diff --git a/Source/FilterProcessor.cpp b/Source/FilterProcessor.cpp
index bb4eaca1..3ce0679c 100644
--- a/Source/FilterProcessor.cpp
+++ b/Source/FilterProcessor.cpp
@@ -48,9 +48,9 @@ void FilterProcessor::automateParameters() {
AudioPlayHead::CurrentPositionInfo posInfo;
getPlayHead()->getCurrentPosition(posInfo);
- *myFreq = getAutomationVal("freq", posInfo.timeInSamples);
- *myQ = getAutomationVal("q", posInfo.timeInSamples);
- *myGain = getAutomationVal("gain", posInfo.timeInSamples);
+ *myFreq = getAutomationVal("freq", posInfo);
+ *myQ = getAutomationVal("q", posInfo);
+ *myGain = getAutomationVal("gain", posInfo);
switch (myMode)
{
@@ -160,14 +160,14 @@ FilterProcessor::getMode() {
void
FilterProcessor::setFrequency(float freq) { setAutomationVal("freq", freq);}
float
-FilterProcessor::getFrequency() { return getAutomationVal("freq", 0); }
+FilterProcessor::getFrequency() { AudioPlayHead::CurrentPositionInfo posInfo; return getAutomationVal("freq", posInfo); }
void
FilterProcessor::setQ(float q) { setAutomationVal("q", q);}
float
-FilterProcessor::getQ() { return getAutomationVal("q", 0); }
+FilterProcessor::getQ() { AudioPlayHead::CurrentPositionInfo posInfo; return getAutomationVal("q", posInfo); }
void
FilterProcessor::setGain(float gain) { setAutomationVal("gain", gain);}
float
-FilterProcessor::getGain() { return getAutomationVal("gain", 0);}
+FilterProcessor::getGain() { AudioPlayHead::CurrentPositionInfo posInfo; return getAutomationVal("gain", posInfo);}
diff --git a/Source/PannerProcessor.h b/Source/PannerProcessor.h
index 1228a0f6..3ba2c902 100644
--- a/Source/PannerProcessor.h
+++ b/Source/PannerProcessor.h
@@ -37,7 +37,7 @@ class PannerProcessor : public ProcessorBase
AudioPlayHead::CurrentPositionInfo posInfo;
getPlayHead()->getCurrentPosition(posInfo);
- *myPan = getAutomationVal("pan", posInfo.timeInSamples);
+ *myPan = getAutomationVal("pan", posInfo);
updateParameters();
}
@@ -48,7 +48,7 @@ class PannerProcessor : public ProcessorBase
const juce::String getName() { return "PannerProcessor"; };
void setPan(float newPanVal) { setAutomationVal("pan", newPanVal); }
- float getPan() { return getAutomationVal("pan", 0); }
+ float getPan() { AudioPlayHead::CurrentPositionInfo posInfo; return getAutomationVal("pan", posInfo); }
void setRule(std::string newRule) {
myRule = stringToRule(newRule);
diff --git a/Source/PlaybackWarpProcessor.cpp b/Source/PlaybackWarpProcessor.cpp
new file mode 100644
index 00000000..67d0b41b
--- /dev/null
+++ b/Source/PlaybackWarpProcessor.cpp
@@ -0,0 +1,404 @@
+#include "PlaybackWarpProcessor.h"
+#ifdef BUILD_DAWDREAMER_RUBBERBAND
+
+PlaybackWarpProcessor::PlaybackWarpProcessor(std::string newUniqueName, std::vector> inputData, double sr) : ProcessorBase{ createParameterLayout, newUniqueName }
+{
+ m_numChannels = (int)inputData.size();
+ setMainBusInputsAndOutputs(0, m_numChannels);
+ const int numSamples = (int)inputData.at(0).size();
+
+ myPlaybackData.setSize(m_numChannels, numSamples);
+ for (int chan = 0; chan < m_numChannels; chan++) {
+ myPlaybackData.copyFrom(chan, 0, inputData.at(chan).data(), numSamples);
+ }
+
+ init(sr);
+}
+
+PlaybackWarpProcessor::PlaybackWarpProcessor(std::string newUniqueName, py::array_t input, double sr) : ProcessorBase{ createParameterLayout, newUniqueName }
+{
+ setData(input);
+ init(sr);
+}
+
+void
+PlaybackWarpProcessor::init(double sr) {
+ m_sample_rate = sr;
+ setAutomationVal("transpose", 0.);
+ myTranspose = myParameters.getRawParameterValue("transpose");
+ setupRubberband(sr, m_numChannels);
+ setClipPositionsDefault();
+}
+
+
+void
+PlaybackWarpProcessor::setClipPositionsDefault() {
+
+ std::vector> positions;
+
+ positions.push_back(std::tuple(0.f, 65536.f, 0.f));
+
+ setClipPositions(positions);
+}
+
+void
+PlaybackWarpProcessor::prepareToPlay(double, int) {
+
+}
+
+void
+PlaybackWarpProcessor::automateParameters() {
+
+ AudioPlayHead::CurrentPositionInfo posInfo;
+ getPlayHead()->getCurrentPosition(posInfo);
+
+ *myTranspose = getAutomationVal("transpose", posInfo);
+ double scale = std::pow(2., *myTranspose / 12.);
+ m_rbstretcher->setPitchScale(scale);
+}
+
+
+py::array_t
+PlaybackWarpProcessor::getWarpMarkers() {
+
+ py::array_t arr({ (int)m_clipInfo.warp_markers.size(), 2 });
+
+ auto ra = arr.mutable_unchecked();
+
+ int i = 0;
+ for (auto& warp_marker : m_clipInfo.warp_markers) {
+ ra(i, 0) = warp_marker.first; // time in seconds in the audio
+ ra(i, 1) = warp_marker.second; // time in beats in the audio, relative to 1.1.1
+ i++;
+ }
+
+ return arr;
+}
+
+void
+PlaybackWarpProcessor::resetWarpMarkers(double bpm) {
+ m_clipInfo.warp_markers.clear();
+
+ m_clipInfo.warp_markers.push_back(std::make_pair(0, 0));
+ double numSamples = 128;
+ double beats = bpm / (60. * numSamples / m_sample_rate);
+ m_clipInfo.warp_markers.push_back(std::make_pair(numSamples, beats));
+}
+
+void
+PlaybackWarpProcessor::setWarpMarkers(py::array_t input) {
+
+ if (input.ndim() != 2) {
+ throw std::runtime_error("The warp markers must be two-dimensional and shaped (num_markers, 2).");
+ return;
+ }
+
+ const int numPairs = (int)input.shape(0);
+
+ if (numPairs < 2) {
+ throw std::runtime_error("The number of warp markers must be greater than one.");
+ return;
+ }
+
+ if (input.shape(1) != 2) {
+ throw std::runtime_error("The dimensions of the passed warp markers are incorrect.");
+ return;
+ }
+
+ std::vector> warp_markers;
+
+ double beat, new_beat;
+ double pos, new_pos;
+ beat = new_beat = pos = new_pos = -999999.;
+
+ float* input_ptr = (float*)input.data();
+
+ for (int pair_i = 0; pair_i < numPairs; pair_i++) {
+
+ new_pos = *input_ptr++;
+ new_beat = *input_ptr++;
+
+ if (new_beat <= beat || new_pos <= pos) {
+ throw std::runtime_error("The warp markers must be monotonically increasing. new_beat: " + std::to_string(new_beat) + " beat: " + std::to_string(beat) + " new_pos: " + std::to_string(new_pos) + " pos: " + std::to_string(pos));
+ }
+
+ pos = new_pos;
+ beat = new_beat;
+
+ warp_markers.push_back(std::make_pair(pos, beat));
+ }
+
+ m_clipInfo.warp_markers = warp_markers;
+}
+
+
+bool
+PlaybackWarpProcessor::setClipPositions(std::vector> positions) {
+
+ // a position is a (clip start, clip end, clip offset)
+ // clip start: The position in beats relative to the engine's timeline where the clip starts
+ // clip end: The position in beats relative to the engine's timeline where the clip ends
+ // clip offset: A clip's first sample is determined by the "start marker" in the ASD file.
+ // This is an offset to that start marker.
+
+ m_clips.clear();
+
+ for (auto& position : positions) {
+
+ Clip clip = Clip((double)std::get<0>(position), (double)std::get<1>(position), (double)std::get<2>(position));
+ m_clips.push_back(clip);
+ }
+
+ return true;
+}
+
+void
+PlaybackWarpProcessor::processBlock(juce::AudioSampleBuffer& buffer, juce::MidiBuffer& midiBuffer)
+{
+ AudioPlayHead::CurrentPositionInfo posInfo;
+ getPlayHead()->getCurrentPosition(posInfo);
+
+ automateParameters();
+
+ if (m_clips.size() == 0) {
+ ProcessorBase::processBlock(buffer, midiBuffer);
+ return;
+ }
+
+ if (m_clipIndex >= m_clips.size()) {
+ // we've already passed the last clip.
+ ProcessorBase::processBlock(buffer, midiBuffer);
+ return;
+ }
+
+ double movingPPQ = posInfo.ppqPosition;
+
+ double nextPPQ = posInfo.ppqPosition + (double(buffer.getNumSamples()) / m_sample_rate) * posInfo.bpm / 60.;
+
+ std::uint32_t numAvailable = 0;
+ const std::uint32_t numSamplesNeeded = buffer.getNumSamples();
+
+ std::uint32_t numWritten = 0;
+ std::uint64_t numToRetrieve = 0;
+
+ while (numWritten < numSamplesNeeded) {
+ // In this loop, figure out just one sample at a time.
+ // There are a lot of things to juggle including:
+ // The rubberband stretcher: does it have samples available? what samples should we tell it to process?
+ // The global clip position: are we inside a region that should be producing any kind of audio at all or silence?
+ // The local clip position: Do we have samples for the requested sample index, or do we need to loop to another position, or fake zeros?
+ // The clip info: is warping enabled, is looping enabled.
+
+ numAvailable = m_rbstretcher->available();
+
+ numToRetrieve = std::min(numAvailable, numSamplesNeeded - numWritten);
+ numToRetrieve = std::min(numToRetrieve, (std::uint64_t)(std::ceil((m_currentClip.end_pos - movingPPQ) / (posInfo.bpm) * 60. * m_sample_rate)));
+
+ if (numToRetrieve > 0) {
+ m_nonInterleavedBuffer.setSize(m_numChannels, numToRetrieve);
+ numToRetrieve = m_rbstretcher->retrieve(m_nonInterleavedBuffer.getArrayOfWritePointers(), numToRetrieve);
+
+ for (int chan = 0; chan < m_numChannels; chan++) {
+ auto chanPtr = m_nonInterleavedBuffer.getReadPointer(chan);
+ buffer.copyFrom(chan, numWritten, chanPtr, numToRetrieve);
+ }
+
+ numWritten += numToRetrieve;
+ movingPPQ += double(numToRetrieve) * posInfo.bpm / (m_sample_rate * 60.);
+ continue;
+ }
+
+ while (movingPPQ >= m_currentClip.end_pos) {
+ m_clipIndex += 1;
+ if (m_clipIndex < m_clips.size()) {
+ m_currentClip = m_clips.at(m_clipIndex);
+ setupRubberband(m_sample_rate, m_numChannels);
+ if (m_clipInfo.warp_on) {
+ sampleReadIndex = m_clipInfo.beat_to_sample(m_clipInfo.start_marker + m_currentClip.start_marker_offset, m_sample_rate);
+ }
+ else {
+ sampleReadIndex = 0;
+ }
+ }
+ else {
+ ProcessorBase::processBlock(buffer, midiBuffer);
+ return;
+ }
+ }
+
+ if (nextPPQ < m_currentClip.start_pos || movingPPQ < m_currentClip.start_pos) {
+ // write some zeros into the output
+ for (int chan = 0; chan < m_numChannels; chan++) {
+ buffer.setSample(chan, numWritten, 0.f);
+ }
+ numWritten += 1;
+ movingPPQ += posInfo.bpm / (m_sample_rate * 60.);
+ continue;
+ }
+
+ double ppqPosition = movingPPQ - m_currentClip.start_pos + m_currentClip.start_marker_offset;
+
+ bool past_end_marker_and_loop_off = ppqPosition > m_clipInfo.end_marker && !m_clipInfo.loop_on;
+ if (past_end_marker_and_loop_off || movingPPQ > m_currentClip.end_pos) {
+ m_clipIndex += 1;
+ if (m_clipIndex < m_clips.size()) {
+ // Use the next clip position.
+ m_currentClip = m_clips.at(m_clipIndex);
+ setupRubberband(m_sample_rate, m_numChannels);
+ if (m_clipInfo.warp_on) {
+ sampleReadIndex = m_clipInfo.beat_to_sample(m_clipInfo.start_marker + m_currentClip.start_marker_offset, m_sample_rate);
+ }
+ else {
+ sampleReadIndex = 0;
+ }
+ continue;
+ }
+ else {
+ for (int chan = 0; chan < m_numChannels; chan++) {
+ buffer.setSample(chan, numWritten, 0.f);
+ }
+ numWritten += 1;
+ movingPPQ += posInfo.bpm / (m_sample_rate * 60.);
+ continue;
+ }
+ }
+
+ if (m_clipInfo.warp_on) {
+ // todo: if the playback data sample rate is different than the engine's sr
+ // then that would affect the call to setTimeRatio.
+
+ double instant_bpm;
+ double _;
+ m_clipInfo.beat_to_seconds(ppqPosition, _, instant_bpm);
+ m_rbstretcher->setTimeRatio(instant_bpm / posInfo.bpm);
+ }
+ else {
+ m_rbstretcher->setTimeRatio(m_time_ratio_if_warp_off);
+ }
+
+ if (m_clipInfo.loop_on) {
+ int loop_end_sample = m_clipInfo.beat_to_sample(m_clipInfo.loop_end, m_sample_rate);
+ if (sampleReadIndex > loop_end_sample) {
+ int loop_start_sample = m_clipInfo.beat_to_sample(m_clipInfo.loop_start, m_sample_rate);
+ sampleReadIndex = loop_start_sample;
+ }
+ }
+ else {
+ int end_marker_sample = myPlaybackData.getNumSamples() - 1;
+ if (sampleReadIndex > end_marker_sample) {
+ continue;
+ }
+ }
+
+ m_nonInterleavedBuffer.setSize(m_numChannels, 1);
+
+ // can we read from the playback data or are we out of bounds and we need to pass zeros to rubberband?
+ const int last_sample = myPlaybackData.getNumSamples() - 1;
+ if (sampleReadIndex > -1 && sampleReadIndex <= last_sample) {
+ for (int chan = 0; chan < m_numChannels; chan++) {
+ m_nonInterleavedBuffer.copyFrom(chan, 0, myPlaybackData, chan, sampleReadIndex, 1);
+ }
+ }
+ else {
+ // pass zeros because the requested clip loop parameters are asking for out of bounds samples.
+ m_nonInterleavedBuffer.clear();
+ }
+
+ m_rbstretcher->process(m_nonInterleavedBuffer.getArrayOfReadPointers(), m_nonInterleavedBuffer.getNumSamples(), false);
+
+ sampleReadIndex += 1;
+ }
+
+ ProcessorBase::processBlock(buffer, midiBuffer);
+}
+
+void
+PlaybackWarpProcessor::reset() {
+
+ setupRubberband(m_sample_rate, m_numChannels);
+
+ m_clipIndex = 0;
+ sampleReadIndex = 0;
+
+ if (m_clipIndex < m_clips.size()) {
+ m_currentClip = m_clips.at(0);
+ if (m_clipInfo.warp_on) {
+ sampleReadIndex = m_clipInfo.beat_to_sample(m_clipInfo.start_marker + m_currentClip.start_marker_offset, m_sample_rate);
+ }
+ else {
+ sampleReadIndex = 0;
+ }
+ }
+}
+
+void
+PlaybackWarpProcessor::setData(py::array_t input) {
+ float* input_ptr = (float*)input.data();
+
+ m_numChannels = (int)input.shape(0);
+ setMainBusInputsAndOutputs(0, m_numChannels);
+ const int numSamples = (int)input.shape(1);
+
+ myPlaybackData.setSize(m_numChannels, numSamples);
+ for (int chan = 0; chan < m_numChannels; chan++) {
+ myPlaybackData.copyFrom(chan, 0, input_ptr, numSamples);
+ input_ptr += numSamples;
+ }
+}
+
+bool
+PlaybackWarpProcessor::loadAbletonClipInfo(const char* filepath) {
+ return m_clipInfo.readWarpFile(filepath);;
+}
+
+void
+PlaybackWarpProcessor::setupRubberband(float sr, int numChannels) {
+ // Note that we call this instead of calling m_rbstretcher->reset() because
+ // that method doesn't seem to work correctly.
+ // It's better to just create a whole new stretcher object.
+ using namespace RubberBand;
+
+ RubberBandStretcher::Options options = 0;
+ options |= RubberBandStretcher::OptionProcessRealTime;
+ options |= RubberBandStretcher::OptionStretchPrecise;
+ //options |= RubberBandStretcher::OptionPhaseIndependent;
+ //options |= RubberBandStretcher::OptionWindowLong;
+ //options |= RubberBandStretcher::OptionWindowShort;
+ //options |= RubberBandStretcher::OptionSmoothingOn;
+ //options |= RubberBandStretcher::OptionFormantPreserved;
+ options |= RubberBandStretcher::OptionPitchHighQuality;
+ options |= RubberBandStretcher::OptionChannelsTogether; // enabling this is NOT the default
+
+ // Pick one of these:
+ //options |= RubberBandStretcher::OptionThreadingAuto;
+ options |= RubberBandStretcher::OptionThreadingNever;
+ //options |= RubberBandStretcher::OptionThreadingAlways;
+
+ // Pick one of these:
+ options |= RubberBandStretcher::OptionTransientsSmooth;
+ //options |= RubberBandStretcher::OptionTransientsMixed;
+ //options |= RubberBandStretcher::OptionTransientsCrisp;
+
+ // Pick one of these:
+ options |= RubberBandStretcher::OptionDetectorCompound;
+ //options |= RubberBandStretcher::OptionDetectorPercussive;
+ //options |= RubberBandStretcher::OptionDetectorSoft;
+
+ m_rbstretcher = std::make_unique(
+ sr,
+ numChannels,
+ options,
+ 1.,
+ 1.);
+}
+
+juce::AudioProcessorValueTreeState::ParameterLayout
+PlaybackWarpProcessor::createParameterLayout()
+{
+ juce::AudioProcessorValueTreeState::ParameterLayout params;
+
+ params.add(std::make_unique("transpose", "transpose", NormalisableRange(-96.f, 96.f), 0.f));
+ return params;
+}
+
+#endif
\ No newline at end of file
diff --git a/Source/PlaybackWarpProcessor.h b/Source/PlaybackWarpProcessor.h
index 152c79c0..f55adc7a 100644
--- a/Source/PlaybackWarpProcessor.h
+++ b/Source/PlaybackWarpProcessor.h
@@ -11,69 +11,27 @@
class PlaybackWarpProcessor : public ProcessorBase
{
public:
- PlaybackWarpProcessor(std::string newUniqueName, std::vector> inputData, double sr) : ProcessorBase{ createParameterLayout, newUniqueName }
- {
- m_numChannels = (int) inputData.size();
- setMainBusInputsAndOutputs(0, m_numChannels);
- const int numSamples = (int)inputData.at(0).size();
-
- myPlaybackData.setSize(m_numChannels, numSamples);
- for (int chan = 0; chan < m_numChannels; chan++) {
- myPlaybackData.copyFrom(chan, 0, inputData.at(chan).data(), numSamples);
- }
-
- init(sr);
- }
-
- PlaybackWarpProcessor(std::string newUniqueName, py::array_t input, double sr) : ProcessorBase{ createParameterLayout, newUniqueName }
- {
- setData(input);
- init(sr);
- }
+ PlaybackWarpProcessor(std::string newUniqueName, std::vector> inputData, double sr);
-private:
- void init(double sr) {
- m_sample_rate = sr;
- setAutomationVal("transpose", 0.);
- myTranspose = myParameters.getRawParameterValue("transpose");
- setupRubberband(sr, m_numChannels);
- setClipPositionsDefault();
- }
-
- void setClipPositionsDefault() {
-
- std::vector> positions;
-
- positions.push_back(std::tuple(0.f, 65536.f, 0.f));
+ PlaybackWarpProcessor(std::string newUniqueName, py::array_t input, double sr);
- setClipPositions(positions);
- }
+ void prepareToPlay(double, int);
-public:
- void
- prepareToPlay(double, int) {
+ void automateParameters();
- }
+ void processBlock(juce::AudioSampleBuffer& buffer, juce::MidiBuffer& midiBuffer);
- void setTimeRatio(double ratio) {
- m_time_ratio_if_warp_off = ratio;
- }
- double getTimeRatio() {
- return m_time_ratio_if_warp_off;
- }
+ void reset();
- void automateParameters() {
+ const juce::String getName() const { return "PlaybackWarpProcessor"; }
- AudioPlayHead::CurrentPositionInfo posInfo;
- getPlayHead()->getCurrentPosition(posInfo);
+ void setData(py::array_t input);
- *myTranspose = getAutomationVal("transpose", posInfo.timeInSamples);
- double scale = std::pow(2., *myTranspose / 12.);
- m_rbstretcher->setPitchScale(scale);
- }
+ void setTimeRatio(double ratio) { m_time_ratio_if_warp_off = ratio; }
+ double getTimeRatio() { return m_time_ratio_if_warp_off; }
void setTranspose(float newVal) { setAutomationVal("transpose", newVal); }
- float getTranspose() { return getAutomationVal("transpose", 0); }
+ float getTranspose() { AudioPlayHead::CurrentPositionInfo posInfo; return getAutomationVal("transpose", posInfo); }
bool getWarpOn() { return m_clipInfo.warp_on; }
void setWarpOn(bool warpOn) { m_clipInfo.warp_on = warpOn; }
@@ -89,308 +47,30 @@ class PlaybackWarpProcessor : public ProcessorBase
double getEndMarker() { return m_clipInfo.end_marker; }
void setEndMarker(double endMarker) { m_clipInfo.end_marker = endMarker;}
- py::array_t getWarpMarkers() {
-
- py::array_t arr({ (int)m_clipInfo.warp_markers.size(), 2 });
-
- auto ra = arr.mutable_unchecked();
-
- int i = 0;
- for (auto& warp_marker : m_clipInfo.warp_markers) {
- ra(i, 0) = warp_marker.first; // time in seconds in the audio
- ra(i, 1) = warp_marker.second; // time in beats in the audio, relative to 1.1.1
- i++;
- }
-
- return arr;
- }
-
- void resetWarpMarkers(double bpm) {
- m_clipInfo.warp_markers.clear();
-
- m_clipInfo.warp_markers.push_back(std::make_pair(0, 0));
- double numSamples = 128;
- double beats = bpm / (60. * numSamples / m_sample_rate);
- m_clipInfo.warp_markers.push_back(std::make_pair(numSamples, beats));
- }
-
- void setWarpMarkers(py::array_t input) {
-
- if (input.ndim() != 2) {
- throw std::runtime_error("The warp markers must be two-dimensional and shaped (num_markers, 2).");
- return;
- }
-
- const int numPairs = (int)input.shape(0);
-
- if (numPairs < 2) {
- throw std::runtime_error("The number of warp markers must be greater than one.");
- return;
- }
+ py::array_t getWarpMarkers();
- if (input.shape(1) != 2) {
- throw std::runtime_error("The dimensions of the passed warp markers are incorrect.");
- return;
- }
+ void resetWarpMarkers(double bpm);
- std::vector> warp_markers;
+ void setWarpMarkers(py::array_t input);
- double beat, new_beat;
- double pos, new_pos;
- beat = new_beat = pos = new_pos = -999999.;
+ bool setClipPositions(std::vector> positions);
- float* input_ptr = (float*)input.data();
-
- for (int pair_i = 0; pair_i < numPairs; pair_i++) {
-
- new_pos = *input_ptr++;
- new_beat = *input_ptr++;
-
- if (new_beat <= beat || new_pos <= pos) {
- throw std::runtime_error("The warp markers must be monotonically increasing. new_beat: " + std::to_string(new_beat) + " beat: " + std::to_string(beat) + " new_pos: " + std::to_string(new_pos) + " pos: " + std::to_string(pos));
- }
-
- pos = new_pos;
- beat = new_beat;
-
- warp_markers.push_back(std::make_pair(pos, beat));
- }
-
- m_clipInfo.warp_markers = warp_markers;
- }
+ bool loadAbletonClipInfo(const char* filepath);
private:
- class Clip {
- public:
- Clip(double startPos, double endPos, double startMarkerOffset) : start_pos{ startPos }, end_pos{ endPos }, start_marker_offset{ startMarkerOffset } {};
- Clip() : start_pos { 0. }, end_pos{ 4. }, start_marker_offset{ 0. } {};
- double start_pos = 0.;
- double end_pos = 4.;
- double start_marker_offset = 0.;
- };
-public:
- bool setClipPositions(std::vector> positions) {
-
- // a position is a (clip start, clip end, clip offset)
- // clip start: The position in beats relative to the engine's timeline where the clip starts
- // clip end: The position in beats relative to the engine's timeline where the clip ends
- // clip offset: A clip's first sample is determined by the "start marker" in the ASD file.
- // This is an offset to that start marker.
-
- m_clips.clear();
-
- for (auto& position : positions) {
-
- Clip clip = Clip((double)std::get<0>(position), (double)std::get<1>(position), (double)std::get<2>(position));
- m_clips.push_back(clip);
- }
-
- return true;
- }
-
- void
- processBlock(juce::AudioSampleBuffer& buffer, juce::MidiBuffer& midiBuffer)
- {
- AudioPlayHead::CurrentPositionInfo posInfo;
- getPlayHead()->getCurrentPosition(posInfo);
-
- automateParameters();
-
- if (m_clips.size() == 0) {
- ProcessorBase::processBlock(buffer, midiBuffer);
- return;
- }
-
- if (m_clipIndex >= m_clips.size()) {
- // we've already passed the last clip.
- ProcessorBase::processBlock(buffer, midiBuffer);
- return;
- }
-
- double movingPPQ = posInfo.ppqPosition;
-
- double nextPPQ = posInfo.ppqPosition + (double(buffer.getNumSamples())/ m_sample_rate) * posInfo.bpm / 60.;
-
- int numAvailable = 0;
- const int numSamplesNeeded = buffer.getNumSamples();
-
- int numWritten = 0;
-
- while (numWritten < numSamplesNeeded) {
- // In this loop, figure out just one sample at a time.
- // There are a lot of things to juggle including:
- // The rubberband stretcher: does it have samples available? what samples should we tell it to process?
- // The global clip position: are we inside a region that should be producing any kind of audio at all or silence?
- // The local clip position: Do we have samples for the requested sample index, or do we need to loop to another position, or fake zeros?
- // The clip info: is warping enabled, is looping enabled.
-
- numAvailable = m_rbstretcher->available();
-
- int numToRetrieve = std::min(numAvailable, numSamplesNeeded - numWritten);
- numToRetrieve = std::min(numToRetrieve,int(std::ceil( (m_currentClip.end_pos-movingPPQ)/(posInfo.bpm)*60.*m_sample_rate)));
-
- if (numToRetrieve > 0) {
- m_nonInterleavedBuffer.setSize(m_numChannels, numToRetrieve);
- numToRetrieve = m_rbstretcher->retrieve(m_nonInterleavedBuffer.getArrayOfWritePointers(), numToRetrieve);
-
- for (int chan = 0; chan < m_numChannels; chan++) {
- auto chanPtr = m_nonInterleavedBuffer.getReadPointer(chan);
- buffer.copyFrom(chan, numWritten, chanPtr, numToRetrieve);
- }
-
- numWritten += numToRetrieve;
- movingPPQ += (double)(numToRetrieve)*posInfo.bpm / (m_sample_rate * 60.);
- continue;
- }
-
- while (movingPPQ >= m_currentClip.end_pos) {
- m_clipIndex += 1;
- if (m_clipIndex < m_clips.size()) {
- m_currentClip = m_clips.at(m_clipIndex);
- setupRubberband(m_sample_rate, m_numChannels);
- if (m_clipInfo.warp_on) {
- sampleReadIndex = m_clipInfo.beat_to_sample(m_clipInfo.start_marker + m_currentClip.start_marker_offset, m_sample_rate);
- }
- else {
- sampleReadIndex = 0;
- }
- }
- else {
- ProcessorBase::processBlock(buffer, midiBuffer);
- return;
- }
- }
-
- if (nextPPQ < m_currentClip.start_pos || movingPPQ < m_currentClip.start_pos) {
- // write some zeros into the output
- for (int chan = 0; chan < m_numChannels; chan++) {
- buffer.setSample(chan, numWritten, 0.f);
- }
- numWritten += 1;
- movingPPQ += posInfo.bpm / (m_sample_rate * 60.);
-
- continue;
- }
-
- double ppqPosition = movingPPQ - m_currentClip.start_pos + m_currentClip.start_marker_offset;
-
- bool past_end_marker_and_loop_off = ppqPosition > m_clipInfo.end_marker && !m_clipInfo.loop_on;
- if (past_end_marker_and_loop_off || movingPPQ > m_currentClip.end_pos) {
- m_clipIndex += 1;
- if (m_clipIndex < m_clips.size()) {
- // Use the next clip position.
- m_currentClip = m_clips.at(m_clipIndex);
- setupRubberband(m_sample_rate, m_numChannels);
- if (m_clipInfo.warp_on) {
- sampleReadIndex = m_clipInfo.beat_to_sample(m_clipInfo.start_marker + m_currentClip.start_marker_offset, m_sample_rate);
- }
- else {
- sampleReadIndex = 0;
- }
-
- continue;
- }
- else {
- for (int chan = 0; chan < m_numChannels; chan++) {
- buffer.setSample(chan, numWritten, 0.f);
- }
- numWritten += 1;
- movingPPQ += posInfo.bpm / (m_sample_rate * 60.);
-
- continue;
- }
- }
-
- if (m_clipInfo.warp_on) {
- // todo: if the playback data sample rate is different than the engine's sr
- // then that would affect the call to setTimeRatio.
-
- double instant_bpm;
- double _;
- m_clipInfo.beat_to_seconds(ppqPosition, _, instant_bpm);
- m_rbstretcher->setTimeRatio(instant_bpm / posInfo.bpm);
- }
- else {
- m_rbstretcher->setTimeRatio(m_time_ratio_if_warp_off);
- }
-
- if (m_clipInfo.loop_on) {
- int loop_end_sample = m_clipInfo.beat_to_sample(m_clipInfo.loop_end, m_sample_rate);
- if (sampleReadIndex > loop_end_sample) {
- int loop_start_sample = m_clipInfo.beat_to_sample(m_clipInfo.loop_start, m_sample_rate);
- sampleReadIndex = loop_start_sample;
- }
- }
- else {
- int end_marker_sample = myPlaybackData.getNumSamples() - 1;
- if (sampleReadIndex > end_marker_sample) {
- continue;
- }
- }
-
- m_nonInterleavedBuffer.setSize(m_numChannels, 1);
-
- // can we read from the playback data or are we out of bounds and we need to pass zeros to rubberband?
- const int last_sample = myPlaybackData.getNumSamples() - 1;
- if (sampleReadIndex > -1 && sampleReadIndex <= last_sample) {
- for (int chan = 0; chan < m_numChannels; chan++) {
- m_nonInterleavedBuffer.copyFrom(chan, 0, myPlaybackData, chan, sampleReadIndex, 1);
- }
- }
- else {
- // pass zeros because the requested clip loop parameters are asking for out of bounds samples.
- m_nonInterleavedBuffer.clear();
- }
-
- m_rbstretcher->process(m_nonInterleavedBuffer.getArrayOfReadPointers(), m_nonInterleavedBuffer.getNumSamples(), false);
-
- sampleReadIndex += 1;
- }
-
- ProcessorBase::processBlock(buffer, midiBuffer);
- }
-
- void
- reset() {
-
- setupRubberband(m_sample_rate, m_numChannels);
-
- m_clipIndex = 0;
- sampleReadIndex = 0;
-
- if (m_clipIndex < m_clips.size()) {
- m_currentClip = m_clips.at(0);
- if (m_clipInfo.warp_on) {
- sampleReadIndex = m_clipInfo.beat_to_sample(m_clipInfo.start_marker + m_currentClip.start_marker_offset, m_sample_rate);
- }
- else {
- sampleReadIndex = 0;
- }
- }
- }
+ void init(double sr);
- const juce::String getName() const { return "PlaybackWarpProcessor"; }
-
- void setData(py::array_t input) {
- float* input_ptr = (float*)input.data();
-
- m_numChannels = (int) input.shape(0);
- setMainBusInputsAndOutputs(0, m_numChannels);
- const int numSamples = (int) input.shape(1);
-
- myPlaybackData.setSize(m_numChannels, numSamples);
- for (int chan = 0; chan < m_numChannels; chan++) {
- myPlaybackData.copyFrom(chan, 0, input_ptr, numSamples);
- input_ptr += numSamples;
- }
- }
-
- bool loadAbletonClipInfo(const char* filepath) {
- return m_clipInfo.readWarpFile(filepath);;
- }
+ void setClipPositionsDefault();
-private:
+ class Clip {
+ public:
+ Clip(double startPos, double endPos, double startMarkerOffset) : start_pos{ startPos }, end_pos{ endPos }, start_marker_offset{ startMarkerOffset } {};
+ Clip() : start_pos { 0. }, end_pos{ 4. }, start_marker_offset{ 0. } {};
+ double start_pos = 0.;
+ double end_pos = 4.;
+ double start_marker_offset = 0.;
+ };
juce::AudioSampleBuffer myPlaybackData;
@@ -411,53 +91,9 @@ class PlaybackWarpProcessor : public ProcessorBase
int m_clipIndex = 0;
Clip m_currentClip;
- // Note that we call this instead of calling m_rbstretcher->reset() because
- // that method doesn't seem to work correctly.
- // It's better to just create a whole new stretcher object.
- void setupRubberband(float sr, int numChannels) {
- using namespace RubberBand;
-
- RubberBandStretcher::Options options = 0;
- options |= RubberBandStretcher::OptionProcessRealTime;
- options |= RubberBandStretcher::OptionStretchPrecise;
- //options |= RubberBandStretcher::OptionPhaseIndependent;
- //options |= RubberBandStretcher::OptionWindowLong;
- //options |= RubberBandStretcher::OptionWindowShort;
- //options |= RubberBandStretcher::OptionSmoothingOn;
- //options |= RubberBandStretcher::OptionFormantPreserved;
- options |= RubberBandStretcher::OptionPitchHighQuality;
- options |= RubberBandStretcher::OptionChannelsTogether; // enabling this is NOT the default
-
- // Pick one of these:
- options |= RubberBandStretcher::OptionThreadingAuto;
- //options |= RubberBandStretcher::OptionThreadingNever;
- //options |= RubberBandStretcher::OptionThreadingAlways;
-
- // Pick one of these:
- options |= RubberBandStretcher::OptionTransientsSmooth;
- //options |= RubberBandStretcher::OptionTransientsMixed;
- //options |= RubberBandStretcher::OptionTransientsCrisp;
-
- // Pick one of these:
- options |= RubberBandStretcher::OptionDetectorCompound;
- //options |= RubberBandStretcher::OptionDetectorPercussive;
- //options |= RubberBandStretcher::OptionDetectorSoft;
-
- m_rbstretcher = std::make_unique(
- sr,
- numChannels,
- options,
- 1.,
- 1.);
- }
-
- static juce::AudioProcessorValueTreeState::ParameterLayout createParameterLayout()
- {
- juce::AudioProcessorValueTreeState::ParameterLayout params;
-
- params.add(std::make_unique("transpose", "transpose", NormalisableRange(-96.f, 96.f), 0.f));
- return params;
- }
+ void setupRubberband(float sr, int numChannels);
+
+ static juce::AudioProcessorValueTreeState::ParameterLayout createParameterLayout();
};
#endif
diff --git a/Source/PluginProcessor.cpp b/Source/PluginProcessor.cpp
index ff4f7851..7aa0fd29 100644
--- a/Source/PluginProcessor.cpp
+++ b/Source/PluginProcessor.cpp
@@ -130,7 +130,8 @@ PluginProcessor::~PluginProcessor() {
std::lock_guard lock(PLUGIN_INSTANCE_MUTEX);
myPlugin.reset();
}
- delete myMidiIterator;
+ delete myMidiIteratorQN;
+ delete myMidiIteratorSec;
}
void PluginProcessor::setPlayHead(AudioPlayHead* newPlayHead)
@@ -174,28 +175,35 @@ PluginProcessor::processBlock(juce::AudioSampleBuffer& buffer, juce::MidiBuffer&
AudioPlayHead::CurrentPositionInfo posInfo;
getPlayHead()->getCurrentPosition(posInfo);
myRenderMidiBuffer.clear();
-
+
if (!myPlugin.get() || !isLoaded) {
- buffer.clear();
-
- if (posInfo.ppqPosition == 0) {
- throw std::runtime_error("Error: no plugin was processed for processor named " + this->getUniqueName());
- }
- return;
+ throw std::runtime_error("Error: no plugin was processed for processor named " + this->getUniqueName());
}
-
+
automateParameters();
- long long int start = posInfo.timeInSamples;
- long long int end = start + buffer.getNumSamples();
- myIsMessageBetween = myMidiMessagePosition >= start && myMidiMessagePosition < end;
- do {
- if (myIsMessageBetween) {
- myRenderMidiBuffer.addEvent(myMidiMessage, int(myMidiMessagePosition - start));
- myMidiEventsDoRemain = myMidiIterator->getNextEvent(myMidiMessage, myMidiMessagePosition);
- myIsMessageBetween = myMidiMessagePosition >= start && myMidiMessagePosition < end;
+ {
+ auto start = posInfo.timeInSamples;
+ auto end = start + buffer.getNumSamples();
+ myIsMessageBetweenSec = myMidiMessagePositionSec >= start && myMidiMessagePositionSec < end;
+ while (myIsMessageBetweenSec && myMidiEventsDoRemainSec) {
+ myRenderMidiBuffer.addEvent(myMidiMessageSec, int(myMidiMessagePositionSec - start));
+ myMidiEventsDoRemainSec = myMidiIteratorSec->getNextEvent(myMidiMessageSec, myMidiMessagePositionSec);
+ myIsMessageBetweenSec = myMidiMessagePositionSec >= start && myMidiMessagePositionSec < end;
+ }
+ }
+
+ {
+ auto pulseStart = std::floor(posInfo.ppqPosition * PPQN);
+ auto pulseEnd = pulseStart + buffer.getNumSamples() * (posInfo.bpm * PPQN) / (mySampleRate * 60.);
+
+ myIsMessageBetweenQN = myMidiMessagePositionQN >= pulseStart && myMidiMessagePositionQN < pulseEnd;
+ while (myIsMessageBetweenQN && myMidiEventsDoRemainQN) {
+ myRenderMidiBuffer.addEvent(myMidiMessageQN, int(myMidiMessagePositionQN - pulseStart));
+ myMidiEventsDoRemainQN = myMidiIteratorQN->getNextEvent(myMidiMessageQN, myMidiMessagePositionQN);
+ myIsMessageBetweenQN = myMidiMessagePositionQN >= pulseStart && myMidiMessagePositionQN < pulseEnd;
}
- } while (myIsMessageBetween && myMidiEventsDoRemain);
+ }
myPlugin->processBlock(buffer, myRenderMidiBuffer);
@@ -217,7 +225,7 @@ PluginProcessor::automateParameters() {
auto theParameter = ((AutomateParameterFloat*)myParameters.getParameter(paramID));
if (theParameter) {
// todo: change to setParameterNotifyingHost?
- myPlugin->setParameter(i, theParameter->sample(posInfo.timeInSamples));
+ myPlugin->setParameter(i, theParameter->sample(posInfo));
}
else {
throw std::runtime_error("Error automateParameters: " + myPlugin->getParameterName(i).toStdString());
@@ -233,10 +241,16 @@ PluginProcessor::reset()
myPlugin->reset();
}
- delete myMidiIterator;
- myMidiIterator = new MidiBuffer::Iterator(myMidiBuffer); // todo: deprecated.
+ delete myMidiIteratorSec;
+ myMidiIteratorSec = new MidiBuffer::Iterator(myMidiBufferSec); // todo: deprecated.
+
+ myMidiEventsDoRemainSec = myMidiIteratorSec->getNextEvent(myMidiMessageSec, myMidiMessagePositionSec);
+
+ delete myMidiIteratorQN;
+ myMidiIteratorQN = new MidiBuffer::Iterator(myMidiBufferQN); // todo: deprecated.
+
+ myMidiEventsDoRemainQN = myMidiIteratorQN->getNextEvent(myMidiMessageQN, myMidiMessagePositionQN);
- myMidiEventsDoRemain = myMidiIterator->getNextEvent(myMidiMessage, myMidiMessagePosition);
myRenderMidiBuffer.clear();
}
@@ -434,6 +448,8 @@ PluginProcessor::getPatch() {
params.clear();
params.reserve(myPlugin->getNumParameters());
+
+ AudioPlayHead::CurrentPositionInfo posInfo;
for (int i = 0; i < myPlugin->AudioProcessor::getNumParameters(); i++) {
auto theName = myPlugin->getParameterName(i);
@@ -444,7 +460,7 @@ PluginProcessor::getPatch() {
auto parameter = ((AutomateParameterFloat*)myParameters.getParameter(theName));
if (parameter) {
- float val = parameter->sample(0);
+ float val = parameter->sample(posInfo);
if (parameter) {
params.push_back(std::make_pair(i, val));
}
@@ -476,26 +492,52 @@ PluginProcessor::getPluginParameterSize()
int
PluginProcessor::getNumMidiEvents() {
- return myMidiBuffer.getNumEvents();
+ return myMidiBufferSec.getNumEvents() + myMidiBufferQN.getNumEvents();
};
bool
-PluginProcessor::loadMidi(const std::string& path, bool allEvents)
+PluginProcessor::loadMidi(const std::string& path, bool clearPrevious, bool convertToSeconds, bool allEvents)
{
+ if (!std::filesystem::exists(path.c_str())) {
+ throw std::runtime_error("File not found: " + path);
+ }
+
File file = File(path);
FileInputStream fileStream(file);
MidiFile midiFile;
midiFile.readFrom(fileStream);
- midiFile.convertTimestampTicksToSeconds();
- myMidiBuffer.clear();
-
- for (int t = 0; t < midiFile.getNumTracks(); t++) {
- const MidiMessageSequence* track = midiFile.getTrack(t);
- for (int i = 0; i < track->getNumEvents(); i++) {
- MidiMessage& m = track->getEventPointer(i)->message;
- int sampleOffset = (int)(mySampleRate * m.getTimeStamp());
- if (allEvents || m.isNoteOff() || m.isNoteOn()) {
- myMidiBuffer.addEvent(m, sampleOffset);
+
+ if (clearPrevious) {
+ myMidiBufferSec.clear();
+ myMidiBufferQN.clear();
+ }
+
+ if (convertToSeconds) {
+ midiFile.convertTimestampTicksToSeconds();
+
+ for (int t = 0; t < midiFile.getNumTracks(); t++) {
+ const MidiMessageSequence* track = midiFile.getTrack(t);
+ for (int i = 0; i < track->getNumEvents(); i++) {
+ MidiMessage& m = track->getEventPointer(i)->message;
+ int sampleOffset = (int)(mySampleRate * m.getTimeStamp());
+ if (allEvents || m.isNoteOff() || m.isNoteOn()) {
+ myMidiBufferSec.addEvent(m, sampleOffset);
+ }
+ }
+ }
+ }
+ else {
+ auto timeFormat = midiFile.getTimeFormat(); // the ppqn (Ableton makes midi files with 96 ppqn)
+
+ for (int t = 0; t < midiFile.getNumTracks(); t++) {
+ const MidiMessageSequence* track = midiFile.getTrack(t);
+ for (int i = 0; i < track->getNumEvents(); i++) {
+ MidiMessage& m = track->getEventPointer(i)->message;
+ if (allEvents || m.isNoteOff() || m.isNoteOn()) {
+ // convert timestamp from its original time format to our high resolution PPQN
+ auto timeStamp = m.getTimeStamp() * PPQN / timeFormat;
+ myMidiBufferQN.addEvent(m, timeStamp);
+ }
}
}
}
@@ -505,14 +547,16 @@ PluginProcessor::loadMidi(const std::string& path, bool allEvents)
void
PluginProcessor::clearMidi() {
- myMidiBuffer.clear();
+ myMidiBufferSec.clear();
+ myMidiBufferQN.clear();
}
bool
PluginProcessor::addMidiNote(uint8 midiNote,
uint8 midiVelocity,
const double noteStart,
- const double noteLength) {
+ const double noteLength,
+ bool convert_to_sec) {
if (midiNote > 255) midiNote = 255;
if (midiNote < 0) midiNote = 0;
@@ -531,11 +575,20 @@ PluginProcessor::addMidiNote(uint8 midiNote,
midiNote,
midiVelocity);
- auto startTime = noteStart * mySampleRate;
- onMessage.setTimeStamp(startTime);
- offMessage.setTimeStamp(startTime + noteLength * mySampleRate);
- myMidiBuffer.addEvent(onMessage, (int)onMessage.getTimeStamp());
- myMidiBuffer.addEvent(offMessage, (int)offMessage.getTimeStamp());
+ if (convert_to_sec) {
+ auto startTime = noteStart * mySampleRate;
+ onMessage.setTimeStamp(startTime);
+ offMessage.setTimeStamp(startTime + noteLength * mySampleRate);
+ myMidiBufferSec.addEvent(onMessage, (int)onMessage.getTimeStamp());
+ myMidiBufferSec.addEvent(offMessage, (int)offMessage.getTimeStamp());
+ }
+ else {
+ auto startTime = noteStart * PPQN;
+ onMessage.setTimeStamp(startTime);
+ offMessage.setTimeStamp(startTime + noteLength * PPQN);
+ myMidiBufferQN.addEvent(onMessage, (int)onMessage.getTimeStamp());
+ myMidiBufferQN.addEvent(offMessage, (int)offMessage.getTimeStamp());
+ }
return true;
}
@@ -573,7 +626,9 @@ PluginProcessorWrapper::wrapperGetParameter(int parameterIndex)
return 0.;
}
- return ProcessorBase::getAutomationVal(std::to_string(parameterIndex), 0);
+ AudioPlayHead::CurrentPositionInfo posInfo;
+
+ return ProcessorBase::getAutomationVal(std::to_string(parameterIndex), posInfo);
}
std::string
@@ -595,8 +650,8 @@ PluginProcessorWrapper::wrapperSetParameter(int parameter, float value)
}
bool
-PluginProcessorWrapper::wrapperSetAutomation(int parameterIndex, py::array input) {
- return PluginProcessorWrapper::setAutomation(std::to_string(parameterIndex), input);
+PluginProcessorWrapper::wrapperSetAutomation(int parameterIndex, py::array input, std::uint32_t ppqn) {
+ return PluginProcessorWrapper::setAutomation(std::to_string(parameterIndex), input, ppqn);
}
int
diff --git a/Source/PluginProcessor.h b/Source/PluginProcessor.h
index 8a9150b8..cf2c06f0 100644
--- a/Source/PluginProcessor.h
+++ b/Source/PluginProcessor.h
@@ -64,7 +64,7 @@ class PluginProcessor : public ProcessorBase
const juce::String getName() const { return "PluginProcessor"; }
- bool loadMidi(const std::string& path, bool allEvents);
+ bool loadMidi(const std::string& path, bool clearPrevious, bool convertToSeconds, bool allEvents);
void clearMidi();
@@ -73,7 +73,8 @@ class PluginProcessor : public ProcessorBase
bool addMidiNote(const uint8 midiNote,
const uint8 midiVelocity,
const double noteStart,
- const double noteLength);
+ const double noteLength,
+ bool convert_to_sec);
void setPlayHead(AudioPlayHead* newPlayHead);
@@ -91,13 +92,25 @@ class PluginProcessor : public ProcessorBase
std::string myPluginPath;
double mySampleRate;
- MidiBuffer myMidiBuffer;
+ MidiBuffer myMidiBufferQN;
+ MidiBuffer myMidiBufferSec;
+
MidiBuffer myRenderMidiBuffer;
- MidiMessage myMidiMessage;
- int myMidiMessagePosition = -1;
- MidiBuffer::Iterator* myMidiIterator = nullptr;
- bool myIsMessageBetween = false;
- bool myMidiEventsDoRemain = false;
+
+ MidiMessage myMidiMessageQN;
+ MidiMessage myMidiMessageSec;
+
+ int myMidiMessagePositionQN = -1;
+ int myMidiMessagePositionSec = -1;
+
+ MidiBuffer::Iterator* myMidiIteratorQN = nullptr;
+ MidiBuffer::Iterator* myMidiIteratorSec = nullptr;
+
+ bool myIsMessageBetweenQN = false;
+ bool myIsMessageBetweenSec = false;
+
+ bool myMidiEventsDoRemainQN = false;
+ bool myMidiEventsDoRemainSec = false;
void automateParameters();
@@ -124,7 +137,7 @@ class PluginProcessorWrapper : public PluginProcessor
bool wrapperSetParameter(int parameter, float value);
- bool wrapperSetAutomation(int parameterIndex, py::array input);
+ bool wrapperSetAutomation(int parameterIndex, py::array input, std::uint32_t ppqn);
int wrapperGetPluginParameterSize();
diff --git a/Source/ProcessorBase.cpp b/Source/ProcessorBase.cpp
index 505408b9..01c331b8 100644
--- a/Source/ProcessorBase.cpp
+++ b/Source/ProcessorBase.cpp
@@ -23,14 +23,14 @@ ProcessorBase::setStateInformation(const void* data, int sizeInBytes)
myParameters.replaceState(juce::ValueTree::fromXml(*xmlState));
}
-bool ProcessorBase::setAutomation(std::string parameterName, py::array input) {
+bool ProcessorBase::setAutomation(std::string parameterName, py::array input, std::uint32_t ppqn) {
try
{
auto parameter = (AutomateParameterFloat*)myParameters.getParameter(parameterName); // todo: why do we have to cast to AutomateParameterFloat instead of AutomateParameter
if (parameter) {
- return parameter->setAutomation(input);
+ return parameter->setAutomation(input, ppqn);
}
else {
throw std::runtime_error("Failed to find parameter: " + parameterName);
@@ -75,11 +75,11 @@ std::vector ProcessorBase::getAutomation(std::string parameterName) {
}
}
-float ProcessorBase::getAutomationVal(std::string parameterName, int index) {
+float ProcessorBase::getAutomationVal(std::string parameterName, AudioPlayHead::CurrentPositionInfo& posInfo) {
auto parameter = (AutomateParameterFloat*)myParameters.getParameter(parameterName); // todo: why do we have to cast to AutomateParameterFloat instead of AutomateParameter
if (parameter) {
- return parameter->sample(index);
+ return parameter->sample(posInfo);
}
else {
throw std::runtime_error("Failed to get automation value for parameter: " + parameterName);
diff --git a/Source/ProcessorBase.h b/Source/ProcessorBase.h
index 4c04a372..901302f9 100644
--- a/Source/ProcessorBase.h
+++ b/Source/ProcessorBase.h
@@ -29,7 +29,7 @@ class ProcessorBase : public juce::AudioProcessor
if (!m_recordEnable) {
return;
}
- AudioPlayHead::CurrentPositionInfo posInfo;
+ juce::AudioPlayHead::CurrentPositionInfo posInfo;
getPlayHead()->getCurrentPosition(posInfo);
const int numberChannels = myRecordBuffer.getNumChannels();
@@ -71,11 +71,11 @@ class ProcessorBase : public juce::AudioProcessor
void getStateInformation(juce::MemoryBlock&);
void setStateInformation(const void*, int);
- bool setAutomation(std::string parameterName, py::array input);
+ bool setAutomation(std::string parameterName, py::array input, std::uint32_t ppqn);
- bool setAutomationVal(std::string parameterName, float val);
+ virtual bool setAutomationVal(std::string parameterName, float val);
- float getAutomationVal(std::string parameterName, int index);
+ float getAutomationVal(std::string parameterName, juce::AudioPlayHead::CurrentPositionInfo& posInfo);
std::vector getAutomation(std::string parameterName);
py::array_t getAutomationNumpy(std::string parameterName);
@@ -148,7 +148,6 @@ class ProcessorBase : public juce::AudioProcessor
}
else {
throw std::invalid_argument(this->getUniqueName() + " CANNOT ApplyBusesLayout inputs: " + std::to_string(inputs) + " outputs: " + std::to_string(outputs));
- return false;
}
}
@@ -157,6 +156,18 @@ class ProcessorBase : public juce::AudioProcessor
return this->canApplyBusesLayout(busesLayout);
}
+ // todo: this is not a good thing to hard-code.
+ // Ableton saves MIDI at a PPQN of 96, which is somewhat low.
+ // To be easily compatible with higher resolution PPQN MIDI files that
+ // are loaded with `load_midi`, we use an internal high rate PPQN of 960.
+ // It's easy to "upsample" to this higher resolution, as we do in `load_midi` and `add_midi_note`.
+ // Another bad design of the code is that FaustProcessor, PluginProcessor, and SamplerProcessor
+ // have a lot of common code related to MIDIBuffers.
+ // There's one set of variables for keeping track of everything in absolute time (when
+ // `convert_to_sec` is True), and another for relative-to-tempo timing (when note start
+ // times and durations are measured in "quarter notes" (QN)).
+ const static std::uint32_t PPQN = 3840;
+
private:
//==============================================================================
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR(ProcessorBase)
diff --git a/Source/RenderEngine.cpp b/Source/RenderEngine.cpp
index d1e7c34a..d62f6e13 100644
--- a/Source/RenderEngine.cpp
+++ b/Source/RenderEngine.cpp
@@ -1,5 +1,4 @@
#include "RenderEngine.h"
-#include "AllProcessors.h"
#include
@@ -10,6 +9,19 @@ RenderEngine::RenderEngine(double sr, int bs) :
{
myMainProcessorGraph->setNonRealtime(true);
myMainProcessorGraph->setPlayHead(this);
+
+ bpmAutomation.setSize(1, 1);
+ bpmAutomation.setSample(0, 0, 120.); // default 120 bpm
+}
+
+bool
+RenderEngine::removeProcessor(const std::string& name) {
+ if (m_UniqueNameToNodeID.find(name) != m_UniqueNameToNodeID.end()) {
+ myMainProcessorGraph->removeNode(m_UniqueNameToNodeID[name]);
+ m_UniqueNameToNodeID.erase(name);
+ return true;
+ }
+ return false;
}
bool
@@ -40,9 +52,6 @@ RenderEngine::loadGraph(DAG inDagNodes) {
);
}
- myMainProcessorGraph->enableAllBuses();
- // NB: don't enableAllBuses on all the nodes in the graph because
- // it will actually mess them up (FaustProcessor)
return success;
}
@@ -88,6 +97,7 @@ RenderEngine::connectGraph() {
}
processor->setPlayHead(this);
+ processor->prepareToPlay(mySampleRate, myBufferSize);
processor->automateParameters();
int numOutputAudioChans = processor->getMainBusNumOutputChannels();
int expectedInputChannels = processor->getMainBusNumInputChannels();
@@ -128,23 +138,29 @@ RenderEngine::connectGraph() {
myMainProcessorGraph->setPlayConfigDetails(0, 0, mySampleRate, myBufferSize);
myMainProcessorGraph->prepareToPlay(mySampleRate, myBufferSize);
- for (auto entry : m_stringDag) {
- auto node = myMainProcessorGraph->getNodeForId(m_UniqueNameToNodeID[entry.first]);
- node->getProcessor()->prepareToPlay(mySampleRate, myBufferSize);
- }
return true;
}
+float RenderEngine::getBPM(double ppqPosition) {
+
+ int index = int(myBPMPPQN * ppqPosition);
+ index = std::min(bpmAutomation.getNumSamples() - 1, index);
+
+ auto bpm = bpmAutomation.getSample(0, index);
+
+ return bpm;
+}
+
bool
RenderEngine::render(const double renderLength) {
- int numRenderedSamples = renderLength * mySampleRate;
+ std::uint64_t numRenderedSamples = renderLength * mySampleRate;
if (numRenderedSamples <= 0) {
throw std::runtime_error("Render length must be greater than zero.");
}
- int numberOfBuffers = myBufferSize == 1 ? numRenderedSamples : int(std::ceil((numRenderedSamples -1.) / myBufferSize));
+ std::uint64_t numberOfBuffers = myBufferSize == 1 ? numRenderedSamples : std::uint64_t(std::ceil((numRenderedSamples -1.) / myBufferSize));
bool graphIsConnected = true;
int audioBufferNumChans = 0;
@@ -168,26 +184,21 @@ RenderEngine::render(const double renderLength) {
else {
throw std::runtime_error("Unable to cast to Processor Base during render.");
}
-
- auto faustProcessor = dynamic_cast (processor);
- if (faustProcessor && (!faustProcessor->isCompiled())) {
- if (!faustProcessor->compile()) {
- return false;
- }
- }
}
myMainProcessorGraph->reset();
myMainProcessorGraph->setPlayHead(this);
myCurrentPositionInfo.resetToDefault();
- myCurrentPositionInfo.bpm = myBPM;
+ myCurrentPositionInfo.ppqPosition = 0.;
myCurrentPositionInfo.isPlaying = true;
myCurrentPositionInfo.isRecording = true;
+ myCurrentPositionInfo.timeInSeconds = 0;
myCurrentPositionInfo.timeInSamples = 0;
myCurrentPositionInfo.timeSigNumerator = 4;
myCurrentPositionInfo.timeSigDenominator = 4;
myCurrentPositionInfo.isLooping = false;
+ myCurrentPositionInfo.bpm = getBPM(myCurrentPositionInfo.ppqPosition);
if (!graphIsConnected) {
bool result = connectGraph();
@@ -197,6 +208,8 @@ RenderEngine::render(const double renderLength) {
}
AudioSampleBuffer audioBuffer(audioBufferNumChans, myBufferSize);
+
+ bool lastProcessorRecordEnable = false;
for (auto entry : m_stringDag) {
auto node = myMainProcessorGraph->getNodeForId(m_UniqueNameToNodeID[entry.first]);
@@ -205,6 +218,7 @@ RenderEngine::render(const double renderLength) {
if (processor) {
if (entry == m_stringDag.at(m_stringDag.size()-1)) {
// Always force the last processor to record.
+ lastProcessorRecordEnable = processor->getRecordEnable();
processor->setRecordEnable(true);
}
processor->setRecorderLength(processor->getRecordEnable() ? numRenderedSamples : 0);
@@ -215,17 +229,33 @@ RenderEngine::render(const double renderLength) {
}
MidiBuffer renderMidiBuffer;
+
+ auto stepInSeconds = double(myBufferSize) / mySampleRate;
- for (long long int i = 0; i < numberOfBuffers; ++i)
+ for (std::uint64_t i = 0; i < numberOfBuffers; ++i)
{
+ myCurrentPositionInfo.bpm = getBPM(myCurrentPositionInfo.ppqPosition);
+
myMainProcessorGraph->processBlock(audioBuffer, renderMidiBuffer);
myCurrentPositionInfo.timeInSamples += myBufferSize;
- myCurrentPositionInfo.ppqPosition = (myCurrentPositionInfo.timeInSamples / (mySampleRate * 60.)) * myBPM;
+ myCurrentPositionInfo.timeInSeconds += stepInSeconds;
+ myCurrentPositionInfo.ppqPosition += (stepInSeconds / 60.) * myCurrentPositionInfo.bpm;
}
myCurrentPositionInfo.isPlaying = false;
myCurrentPositionInfo.isRecording = false;
+
+ // restore the record-enable of the last processor.
+ if (m_stringDag.size()) {
+
+ auto node = myMainProcessorGraph->getNodeForId(m_UniqueNameToNodeID[m_stringDag.at(m_stringDag.size() - 1).first]);
+ auto processor = dynamic_cast (node->getProcessor());
+
+ if (processor) {
+ processor->setRecordEnable(lastProcessorRecordEnable);
+ }
+ }
return true;
}
@@ -235,7 +265,25 @@ void RenderEngine::setBPM(double bpm) {
throw std::runtime_error("BPM must be positive.");
return;
}
- myBPM = bpm;
+ bpmAutomation.setSize(1, 1);
+ bpmAutomation.setSample(0, 0, bpm);
+}
+
+bool RenderEngine::setBPMwithPPQN(py::array_t input, std::uint32_t ppqn) {
+
+ if (ppqn <= 0) {
+ throw std::runtime_error("The BPM's PPQN cannot be less than or equal to zero.");
+ }
+
+ myBPMPPQN = ppqn;
+
+ auto numSamples = input.shape(0);
+
+ bpmAutomation.setSize(1, numSamples);
+
+ bpmAutomation.copyFrom(0, 0, (float*)input.data(), numSamples);
+
+ return true;
}
py::array_t
diff --git a/Source/RenderEngine.h b/Source/RenderEngine.h
index 7b183574..4b98df5f 100644
--- a/Source/RenderEngine.h
+++ b/Source/RenderEngine.h
@@ -3,14 +3,15 @@
#include "../JuceLibraryCode/JuceHeader.h"
#include "custom_pybind_wrappers.h"
#include "CustomParameters.h"
+
+#include "ProcessorBase.h"
+
#include
#include
#include
#include
#include
-class ProcessorBase;
-
class DAGNode {
public:
ProcessorBase* processorBase;
@@ -28,11 +29,15 @@ class RenderEngine : AudioPlayHead
RenderEngine(double sr, int bs);
bool loadGraph(DAG dagNodes);
+
+ bool removeProcessor(const std::string& name);
bool render (const double renderLength);
void setBPM(double bpm);
+ bool setBPMwithPPQN(py::array_t input, std::uint32_t ppqn);
+
py::array_t getAudioFrames();
py::array_t getAudioFramesForName(std::string& name);
@@ -47,7 +52,6 @@ class RenderEngine : AudioPlayHead
double mySampleRate;
int myBufferSize;
- double myBPM = 120.;
std::unordered_map m_UniqueNameToNodeID;
bool connectGraph();
@@ -59,4 +63,8 @@ class RenderEngine : AudioPlayHead
private:
CurrentPositionInfo myCurrentPositionInfo;
+ AudioSampleBuffer bpmAutomation;
+ std::uint32_t myBPMPPQN = 960;
+
+ float getBPM(double ppqPosition);
};
diff --git a/Source/RenderEngineWrapper.cpp b/Source/RenderEngineWrapper.cpp
index f85b6d00..c43af770 100644
--- a/Source/RenderEngineWrapper.cpp
+++ b/Source/RenderEngineWrapper.cpp
@@ -10,10 +10,9 @@ RenderEngineWrapper::RenderEngineWrapper(double sr, int bs) :
void
RenderEngineWrapper::prepareProcessor(ProcessorBase* processor, const std::string& name)
{
- if (m_UniqueNameToNodeID.find(name) != m_UniqueNameToNodeID.end()) {
- myMainProcessorGraph->removeNode(m_UniqueNameToNodeID[name]);
- m_UniqueNameToNodeID.erase(name);
- }
+ if (this->removeProcessor(name)) {
+ // todo: maybe warn the user that a processor was removed.
+ };
auto node = myMainProcessorGraph->addNode((std::unique_ptr)(processor));
m_UniqueNameToNodeID[name] = node->nodeID;
diff --git a/Source/RenderEngineWrapper.h b/Source/RenderEngineWrapper.h
index ffc9e310..94e184b8 100644
--- a/Source/RenderEngineWrapper.h
+++ b/Source/RenderEngineWrapper.h
@@ -1,9 +1,22 @@
#pragma once
#include "RenderEngine.h"
-#include "AllProcessors.h"
#include "custom_pybind_wrappers.h"
+#include "ProcessorBase.h"
+#include "AddProcessor.h"
+#include "CompressorProcessor.h"
+#include "DelayProcessor.h"
+#include "FaustProcessor.h"
+#include "FilterProcessor.h"
+#include "OscillatorProcessor.h"
+#include "PlaybackProcessor.h"
+#include "PlaybackWarpProcessor.h"
+#include "PluginProcessor.h"
+#include "ReverbProcessor.h"
+#include "PannerProcessor.h"
+#include "SamplerProcessor.h"
+
class RenderEngineWrapper : public RenderEngine
{
public:
diff --git a/Source/ReverbProcessor.h b/Source/ReverbProcessor.h
index 902a0a5c..e52c9d00 100644
--- a/Source/ReverbProcessor.h
+++ b/Source/ReverbProcessor.h
@@ -43,11 +43,11 @@ class ReverbProcessor : public ProcessorBase
AudioPlayHead::CurrentPositionInfo posInfo;
getPlayHead()->getCurrentPosition(posInfo);
- *myRoomSize = getAutomationVal("room_size", posInfo.timeInSamples);
- *myDamping = getAutomationVal("damping", posInfo.timeInSamples);
- *myDryLevel = getAutomationVal("dry_level", posInfo.timeInSamples);
- *myWetLevel = getAutomationVal("wet_level", posInfo.timeInSamples);
- *myWidth = getAutomationVal("width", posInfo.timeInSamples);
+ *myRoomSize = getAutomationVal("room_size", posInfo);
+ *myDamping = getAutomationVal("damping", posInfo);
+ *myDryLevel = getAutomationVal("dry_level", posInfo);
+ *myWetLevel = getAutomationVal("wet_level", posInfo);
+ *myWidth = getAutomationVal("width", posInfo);
updateParameters();
}
@@ -59,19 +59,19 @@ class ReverbProcessor : public ProcessorBase
const juce::String getName() { return "ReverbProcessor"; };
void setRoomSize(float roomSize) { setAutomationVal("room_size", roomSize); }
- float getRoomSize() { return getAutomationVal("room_size", 0); }
+ float getRoomSize() { AudioPlayHead::CurrentPositionInfo posInfo; return getAutomationVal("room_size", posInfo); }
void setDamping(float damping) { setAutomationVal("damping", damping); }
- float getDamping() { return getAutomationVal("damping", 0); }
+ float getDamping() { AudioPlayHead::CurrentPositionInfo posInfo; return getAutomationVal("damping", posInfo); }
void setWetLevel(float wetLevel) { setAutomationVal("wet_level", wetLevel); }
- float getWetLevel() { return getAutomationVal("wet_level", 0); }
+ float getWetLevel() { AudioPlayHead::CurrentPositionInfo posInfo; return getAutomationVal("wet_level", posInfo); }
void setDryLevel(float dryLevel) { setAutomationVal("dry_level", dryLevel); }
- float getDryLevel() { return getAutomationVal("dry_level", 0); }
+ float getDryLevel() { AudioPlayHead::CurrentPositionInfo posInfo; return getAutomationVal("dry_level", posInfo); }
void setWidth(float width) { setAutomationVal("width", width); }
- float getWidth() { return getAutomationVal("width", 0); }
+ float getWidth() { AudioPlayHead::CurrentPositionInfo posInfo; return getAutomationVal("width", posInfo); }
private:
diff --git a/Source/SamplerProcessor.h b/Source/SamplerProcessor.h
index 28556fcf..1dd9f8bb 100644
--- a/Source/SamplerProcessor.h
+++ b/Source/SamplerProcessor.h
@@ -2,6 +2,7 @@
#include "ProcessorBase.h"
#include "../Source/Sampler/Source/SamplerAudioProcessor.h"
+#include
class SamplerProcessor : public ProcessorBase
{
@@ -22,7 +23,10 @@ class SamplerProcessor : public ProcessorBase
setMainBusInputsAndOutputs(0, input.shape(0));
}
- ~SamplerProcessor() {}
+ ~SamplerProcessor() {
+ delete myMidiIteratorSec;
+ delete myMidiIteratorQN;
+ }
bool acceptsMidi() const { return true; }
bool producesMidi() const { return true; }
@@ -37,7 +41,9 @@ class SamplerProcessor : public ProcessorBase
auto parameterName = sampler.getParameterName(parameterIndex);
- return ProcessorBase::getAutomationVal(parameterName.toStdString(), 0);
+ juce::AudioPlayHead::CurrentPositionInfo posInfo;
+
+ return ProcessorBase::getAutomationVal(parameterName.toStdString(), posInfo);
}
void
@@ -69,12 +75,16 @@ class SamplerProcessor : public ProcessorBase
void reset() {
sampler.reset();
- if (myMidiIterator) {
- delete myMidiIterator;
- }
+ delete myMidiIteratorSec;
+ myMidiIteratorSec = new MidiBuffer::Iterator(myMidiBufferSec); // todo: deprecated.
+
+ myMidiEventsDoRemainSec = myMidiIteratorSec->getNextEvent(myMidiMessageSec, myMidiMessagePositionSec);
+
+ delete myMidiIteratorQN;
+ myMidiIteratorQN = new MidiBuffer::Iterator(myMidiBufferQN); // todo: deprecated.
+
+ myMidiEventsDoRemainQN = myMidiIteratorQN->getNextEvent(myMidiMessageQN, myMidiMessagePositionQN);
- myMidiIterator = new MidiBuffer::Iterator(myMidiBuffer); // todo: deprecated.
- myMidiEventsDoRemain = myMidiIterator->getNextEvent(myMidiMessage, myMidiMessagePosition);
myRenderMidiBuffer.clear();
}
@@ -88,23 +98,34 @@ class SamplerProcessor : public ProcessorBase
buffer.clear(); // todo: why did this become necessary?
midiBuffer.clear();
+ myRenderMidiBuffer.clear();
- long long int start = posInfo.timeInSamples;
- long long int end = start + buffer.getNumSamples();
+ {
+ auto start = posInfo.timeInSamples;
+ auto end = start + buffer.getNumSamples();
+
+ myIsMessageBetweenSec = myMidiMessagePositionSec >= start && myMidiMessagePositionSec < end;
+ while (myIsMessageBetweenSec && myMidiEventsDoRemainSec) {
+ myRenderMidiBuffer.addEvent(myMidiMessageSec, int(myMidiMessagePositionSec - start));
+ myMidiEventsDoRemainSec = myMidiIteratorSec->getNextEvent(myMidiMessageSec, myMidiMessagePositionSec);
+ myIsMessageBetweenSec = myMidiMessagePositionSec >= start && myMidiMessagePositionSec < end;
+ }
+ }
- myIsMessageBetween = myMidiMessagePosition >= start && myMidiMessagePosition < end;
- do {
- if (myIsMessageBetween) {
- myRenderMidiBuffer.addEvent(myMidiMessage, int(myMidiMessagePosition - start));
- myMidiEventsDoRemain = myMidiIterator->getNextEvent(myMidiMessage, myMidiMessagePosition);
- myIsMessageBetween = myMidiMessagePosition >= start && myMidiMessagePosition < end;
+ {
+ auto pulseStart = std::floor(posInfo.ppqPosition * PPQN);
+ auto pulseEnd = pulseStart + buffer.getNumSamples() * (posInfo.bpm * PPQN) / (mySampleRate * 60.);
+
+ myIsMessageBetweenQN = myMidiMessagePositionQN >= pulseStart && myMidiMessagePositionQN < pulseEnd;
+ while (myIsMessageBetweenQN && myMidiEventsDoRemainQN) {
+ myRenderMidiBuffer.addEvent(myMidiMessageQN, int(myMidiMessagePositionQN - pulseStart));
+ myMidiEventsDoRemainQN = myMidiIteratorQN->getNextEvent(myMidiMessageQN, myMidiMessagePositionQN);
+ myIsMessageBetweenQN = myMidiMessagePositionQN >= pulseStart && myMidiMessagePositionQN < pulseEnd;
}
- } while (myIsMessageBetween && myMidiEventsDoRemain);
+ }
sampler.processBlock(buffer, myRenderMidiBuffer);
- myRenderMidiBuffer.clear();
-
ProcessorBase::processBlock(buffer, midiBuffer);
}
@@ -128,25 +149,54 @@ class SamplerProcessor : public ProcessorBase
int
getNumMidiEvents()
{
- return myMidiBuffer.getNumEvents();
+ return myMidiBufferSec.getNumEvents() + myMidiBufferQN.getNumEvents();
};
bool
- loadMidi(const std::string& path)
+ loadMidi(const std::string& path, bool clearPrevious, bool convertToSeconds, bool allEvents)
{
+
+ if (!std::filesystem::exists(path.c_str())) {
+ throw std::runtime_error("File not found: " + path);
+ }
+
File file = File(path);
FileInputStream fileStream(file);
MidiFile midiFile;
midiFile.readFrom(fileStream);
- midiFile.convertTimestampTicksToSeconds();
- myMidiBuffer.clear();
-
- for (int t = 0; t < midiFile.getNumTracks(); t++) {
- const MidiMessageSequence* track = midiFile.getTrack(t);
- for (int i = 0; i < track->getNumEvents(); i++) {
- MidiMessage& m = track->getEventPointer(i)->message;
- int sampleOffset = (int)(mySampleRate * m.getTimeStamp());
- myMidiBuffer.addEvent(m, sampleOffset);
+
+ if (clearPrevious) {
+ myMidiBufferSec.clear();
+ myMidiBufferQN.clear();
+ }
+
+ if (convertToSeconds) {
+ midiFile.convertTimestampTicksToSeconds();
+
+ for (int t = 0; t < midiFile.getNumTracks(); t++) {
+ const MidiMessageSequence* track = midiFile.getTrack(t);
+ for (int i = 0; i < track->getNumEvents(); i++) {
+ MidiMessage& m = track->getEventPointer(i)->message;
+ int sampleOffset = (int)(mySampleRate * m.getTimeStamp());
+ if (allEvents || m.isNoteOff() || m.isNoteOn()) {
+ myMidiBufferSec.addEvent(m, sampleOffset);
+ }
+ }
+ }
+ }
+ else {
+ auto timeFormat = midiFile.getTimeFormat(); // the ppqn (Ableton makes midi files with 96 ppqn)
+ for (int t = 0; t < midiFile.getNumTracks(); t++) {
+ const MidiMessageSequence* track = midiFile.getTrack(t);
+ for (int i = 0; i < track->getNumEvents(); i++) {
+ MidiMessage& m = track->getEventPointer(i)->message;
+
+ if (allEvents || m.isNoteOff() || m.isNoteOn()) {
+ // convert timestamp from its original time format to our high resolution PPQN
+ auto timeStamp = m.getTimeStamp() * PPQN / timeFormat;
+ myMidiBufferQN.addEvent(m, timeStamp);
+ }
+ }
}
}
@@ -155,14 +205,16 @@ class SamplerProcessor : public ProcessorBase
void
clearMidi() {
- myMidiBuffer.clear();
+ myMidiBufferSec.clear();
+ myMidiBufferQN.clear();
}
bool
addMidiNote(uint8 midiNote,
uint8 midiVelocity,
const double noteStart,
- const double noteLength) {
+ const double noteLength,
+ bool convert_to_sec) {
if (midiNote > 255) midiNote = 255;
if (midiNote < 0) midiNote = 0;
@@ -170,7 +222,6 @@ class SamplerProcessor : public ProcessorBase
if (midiVelocity < 0) midiVelocity = 0;
if (noteLength <= 0) {
throw std::runtime_error("The note length must be greater than zero.");
- return false;
}
// Get the note on midiBuffer.
@@ -182,11 +233,20 @@ class SamplerProcessor : public ProcessorBase
midiNote,
midiVelocity);
- auto startTime = noteStart * mySampleRate;
- onMessage.setTimeStamp(startTime);
- offMessage.setTimeStamp(startTime + noteLength * mySampleRate);
- myMidiBuffer.addEvent(onMessage, (int)onMessage.getTimeStamp());
- myMidiBuffer.addEvent(offMessage, (int)offMessage.getTimeStamp());
+ if (convert_to_sec) {
+ auto startTime = noteStart * mySampleRate;
+ onMessage.setTimeStamp(startTime);
+ offMessage.setTimeStamp(startTime + noteLength * mySampleRate);
+ myMidiBufferSec.addEvent(onMessage, (int)onMessage.getTimeStamp());
+ myMidiBufferSec.addEvent(offMessage, (int)offMessage.getTimeStamp());
+ }
+ else {
+ auto startTime = noteStart * PPQN;
+ onMessage.setTimeStamp(startTime);
+ offMessage.setTimeStamp(startTime + noteLength * PPQN);
+ myMidiBufferQN.addEvent(onMessage, (int)onMessage.getTimeStamp());
+ myMidiBufferQN.addEvent(offMessage, (int)offMessage.getTimeStamp());
+ }
return true;
}
@@ -270,7 +330,7 @@ class SamplerProcessor : public ProcessorBase
auto theParameter = ((AutomateParameterFloat*)myParameters.getParameter(theName));
if (theParameter) {
- sampler.setParameterRawNotifyingHost(i, theParameter->sample(posInfo.timeInSamples));
+ sampler.setParameterRawNotifyingHost(i, theParameter->sample(posInfo));
}
else {
std::cerr << "Error automateParameters: " << theName << std::endl;
@@ -286,11 +346,23 @@ class SamplerProcessor : public ProcessorBase
SamplerAudioProcessor sampler;
- MidiBuffer myMidiBuffer;
+ MidiBuffer myMidiBufferQN;
+ MidiBuffer myMidiBufferSec;
+
MidiBuffer myRenderMidiBuffer;
- MidiMessage myMidiMessage;
- int myMidiMessagePosition = -1;
- MidiBuffer::Iterator* myMidiIterator = nullptr;
- bool myIsMessageBetween = false;
- bool myMidiEventsDoRemain = false;
+
+ MidiMessage myMidiMessageQN;
+ MidiMessage myMidiMessageSec;
+
+ int myMidiMessagePositionQN = -1;
+ int myMidiMessagePositionSec = -1;
+
+ MidiBuffer::Iterator* myMidiIteratorQN = nullptr;
+ MidiBuffer::Iterator* myMidiIteratorSec = nullptr;
+
+ bool myIsMessageBetweenQN = false;
+ bool myIsMessageBetweenSec = false;
+
+ bool myMidiEventsDoRemainQN = false;
+ bool myMidiEventsDoRemainSec = false;
};
diff --git a/Source/source.cpp b/Source/source.cpp
index c95f944c..ca1e96da 100644
--- a/Source/source.cpp
+++ b/Source/source.cpp
@@ -30,15 +30,17 @@ PYBIND11_MODULE(dawdreamer, m)
)pbdoc";
py::class_(m, "ProcessorBase")
- .def("set_automation", &ProcessorBase::setAutomation, arg("parameter_name"), arg("data"), R"pbdoc(
+ .def("set_automation", &ProcessorBase::setAutomation, arg("parameter_name"), arg("data"), kw_only(), arg("ppqn")=0, R"pbdoc(
Set a parameter's automation with a numpy array.
Parameters
----------
parameter_name : str
The name of the parameter.
- data : str
+ data : np.array
An array of data for the parameter automation.
+ ppqn : integer
+ If specified, it is the pulses-per-quarter-note rate of the automation data. If not specified or zero, the data will be interpreted at audio rate.
Returns
-------
@@ -156,6 +158,9 @@ but the filter mode cannot under automation.";
"A list of gain levels to apply to the corresponding inputs.")
.doc() = "An Add Processor adds one or more stereo inputs with corresponding gain parameters.";
+ auto add_midi_description = "Add a single MIDI note whose note and velocity are integers between 0 and 127. By default, `convert_to_sec` is True, so the start_time and duration are measured in seconds. If `convert_to_sec` is False, they are measured in beats.";
+ auto load_midi_description = "Load MIDI from a file. If `all_events` is True, then all events (not just Note On/Off) will be loaded. By default, `convert_to_sec` is True, so notes will be converted to absolute times and will not be affected by the Render Engine's BPM. By default `clear_previous` is True.";
+
py::class_(m, "PluginProcessor")
.def("can_set_bus", &PluginProcessorWrapper::canApplyBusInputsAndOutputs, arg("inputs"), arg("outputs"), "Return bool for whether this combination of input and output channels can be set.")
.def("set_bus", &PluginProcessorWrapper::setMainBusInputsAndOutputs, arg("inputs"), arg("outputs"), "Set the number of input and output channels. An error will be thrown for an unaccepted option.")
@@ -171,18 +176,17 @@ but the filter mode cannot under automation.";
.def("get_parameter_text", &PluginProcessorWrapper::getParameterAsText, arg("index"), "Get a parameter's value as text.")
.def("set_parameter", &PluginProcessorWrapper::wrapperSetParameter, arg("index"), arg("value"),
"Set a parameter's value to a constant.")
- .def("set_automation", &PluginProcessorWrapper::wrapperSetAutomation, arg("parameter_index"), arg("data"),
+ .def("set_automation", &PluginProcessorWrapper::wrapperSetAutomation, arg("parameter_index"), arg("data"), kw_only(), arg("ppqn")=0,
"Set the automation based on its index.")
.def("get_plugin_parameter_size", &PluginProcessorWrapper::wrapperGetPluginParameterSize, "Get the number of parameters.")
.def("get_plugin_parameters_description", &PluginProcessorWrapper::getPluginParametersDescription,
"Get a list of dictionaries describing the plugin's parameters.")
.def_property_readonly("n_midi_events", &PluginProcessorWrapper::getNumMidiEvents, "The number of MIDI events stored in the buffer. \
Note that note-ons and note-offs are counted separately.")
- .def("load_midi", &PluginProcessorWrapper::loadMidi, arg("filepath"), kw_only(), arg("all_events")=true, "Load MIDI from a file. If `all_events` is True, then all events (not just Note On/Off) will be loaded.")
+ .def("load_midi", &PluginProcessorWrapper::loadMidi, arg("filepath"), kw_only(), arg("clear_previous")=true, arg("convert_to_sec")=true, arg("all_events")=true, load_midi_description)
.def("clear_midi", &PluginProcessorWrapper::clearMidi, "Remove all MIDI notes.")
.def("add_midi_note", &PluginProcessorWrapper::addMidiNote,
- arg("note"), arg("velocity"), arg("start_time"), arg("duration"),
- "Add a single MIDI note whose note and velocity are integers between 0 and 127.")
+ arg("note"), arg("velocity"), arg("start_time"), arg("duration"), kw_only(), arg("convert_to_sec")=true, add_midi_description)
.doc() = "A Plugin Processor can load VST \".dll\" and \".vst3\" files on Windows. It can load \".vst\", \".vst3\", and \".component\" files on macOS. The files can be for either instruments \
or effects. Some plugins such as ones that do sidechain compression can accept two inputs when loading a graph.";
@@ -198,11 +202,10 @@ or effects. Some plugins such as ones that do sidechain compression can accept t
"Get a list of dictionaries describing the plugin's parameters.")
.def_property_readonly("n_midi_events", &SamplerProcessor::getNumMidiEvents, "The number of MIDI events stored in the buffer. \
Note that note-ons and note-offs are counted separately.")
- .def("load_midi", &SamplerProcessor::loadMidi, arg("filepath"), "Load MIDI from a file.")
+ .def("load_midi", &SamplerProcessor::loadMidi, arg("filepath"), kw_only(), arg("clear_previous")=true, arg("convert_to_sec")=true, arg("all_events")=true, load_midi_description)
.def("clear_midi", &SamplerProcessor::clearMidi, "Remove all MIDI notes.")
.def("add_midi_note", &SamplerProcessor::addMidiNote,
- arg("note"), arg("velocity"), arg("start_time"), arg("duration"),
- "Add a single MIDI note whose note and velocity are integers between 0 and 127.")
+ arg("note"), arg("velocity"), arg("start_time"), arg("duration"), kw_only(), arg("convert_to_sec")=true, add_midi_description)
.doc() = "The Sampler Processor works like a basic Sampler instrument. It takes a typically short audio sample and can play it back \
at different pitches and speeds. It has parameters for an ADSR envelope controlling the amplitude and another for controlling a low-pass filter cutoff. \
Unlike a VST, the parameters don't need to be between 0 and 1. For example, you can set an envelope attack parameter to 50 to represent 50 milliseconds.";
@@ -220,6 +223,7 @@ Unlike a VST, the parameters don't need to be between 0 and 1. For example, you
.def("get_parameter", &FaustProcessor::getParamWithPath, arg("parameter_path"))
.def("set_parameter", &FaustProcessor::setParamWithIndex, arg("parameter_index"), arg("value"))
.def("set_parameter", &FaustProcessor::setAutomationVal, arg("parameter_path"), arg("value"))
+ .def("set_automation", &FaustProcessor::setAutomation, arg("parameter_name"), arg("data"), kw_only(), arg("ppqn") = 0)
.def_property_readonly("compiled", &FaustProcessor::isCompiled, "Did the most recent DSP code compile?")
.def_property_readonly("code", &FaustProcessor::code, "Get the most recently compiled Faust DSP code.")
.def_property("num_voices", &FaustProcessor::getNumVoices, &FaustProcessor::setNumVoices, "The number of voices for polyphony. Set to zero to disable polyphony. One or more enables polyphony.")
@@ -228,10 +232,10 @@ Unlike a VST, the parameters don't need to be between 0 and 1. For example, you
.def_property("faust_libraries_path", &FaustProcessor::getFaustLibrariesPath, &FaustProcessor::setFaustLibrariesPath, "Absolute path to directory containing your custom \".lib\" files containing Faust code.")
.def_property_readonly("n_midi_events", &FaustProcessor::getNumMidiEvents, "The number of MIDI events stored in the buffer. \
Note that note-ons and note-offs are counted separately.")
- .def("load_midi", &FaustProcessor::loadMidi, arg("filepath"), "Load MIDI from a file.")
+ .def("load_midi", &FaustProcessor::loadMidi, arg("filepath"), kw_only(), arg("clear_previous")=true, arg("convert_to_sec")=true, arg("all_events")=true, load_midi_description)
.def("clear_midi", &FaustProcessor::clearMidi, "Remove all MIDI notes.")
- .def("add_midi_note", &FaustProcessor::addMidiNote, arg("note"), arg("velocity"), arg("start_time"), arg("duration"),
- "Add a single MIDI note whose note and velocity are integers between 0 and 127.")
+ .def("add_midi_note", &FaustProcessor::addMidiNote,
+ arg("note"), arg("velocity"), arg("start_time"), arg("duration"), kw_only(), arg("convert_to_sec") = true, add_midi_description)
.def("set_soundfiles", &FaustProcessor::setSoundfiles, arg("soundfile_dict"), "Set the audio data that the FaustProcessor can use with the `soundfile` primitive.")
.doc() = "A Faust Processor can compile and execute FAUST code. See https://faust.grame.fr for more information.";
#endif
@@ -243,9 +247,11 @@ Note that note-ons and note-offs are counted separately.")
py::class_(m, "RenderEngine", "A Render Engine loads and runs a graph of audio processors.")
.def(py::init(), arg("sample_rate"), arg("block_size"))
.def("render", &RenderEngineWrapper::render, arg("seconds"), "Render the most recently loaded graph.")
- .def("set_bpm", &RenderEngineWrapper::setBPM, arg("bpm"), "Set the beats-per-minute of the engine.")
+ .def("set_bpm", &RenderEngineWrapper::setBPM, arg("bpm"), "Set the beats-per-minute of the engine as a constant rate.")
+ .def("set_bpm", &RenderEngineWrapper::setBPMwithPPQN, arg("bpm"), arg("ppqn"), "Set the beats-per-minute of the engine using a 1D numpy array and a constant PPQN. If the values in the array suddenly change every PPQN samples, the tempo change will occur \"on-the-beat.\"")
.def("get_audio", &RenderEngine::getAudioFrames, "Get the most recently rendered audio as a numpy array.")
.def("get_audio", &RenderEngine::getAudioFramesForName, arg("name"), "Get the most recently rendered audio for a specific processor.")
+ .def("remove_processor", &RenderEngine::removeProcessor, arg("name"), "Remove a processor based on its unique name. Existing Python references to the processor will become invalid.")
.def("load_graph", &RenderEngineWrapper::loadGraphWrapper, arg("dag"), "Load a directed acyclic graph of processors.")
.def("make_oscillator_processor", &RenderEngineWrapper::makeOscillatorProcessor, arg("name"), arg("frequency"),
"Make an Oscillator Processor", returnPolicy)
diff --git a/tests/dawdreamer_utils.py b/tests/dawdreamer_utils.py
index 100f27de..fbe4bd4e 100644
--- a/tests/dawdreamer_utils.py
+++ b/tests/dawdreamer_utils.py
@@ -2,6 +2,7 @@
from os import getenv
from os.path import abspath, isfile, isdir, basename, splitext
from pathlib import Path
+from itertools import product
import pathlib
import platform
import random
diff --git a/tests/test_faust_poly.py b/tests/test_faust_poly.py
index 33fec92c..1d3a2e15 100644
--- a/tests/test_faust_poly.py
+++ b/tests/test_faust_poly.py
@@ -1,124 +1,146 @@
from dawdreamer_utils import *
def _test_faust_poly(file_path, group_voices=True, num_voices=8, buffer_size=1, cutoff=None,
- automation=False, decay=None):
+ automation=False, decay=None):
- engine = daw.RenderEngine(SAMPLE_RATE, buffer_size)
+ engine = daw.RenderEngine(SAMPLE_RATE, buffer_size)
- dsp_path = abspath(FAUST_DSP / "polyphonic.dsp")
- faust_processor = engine.make_faust_processor("faust")
- faust_processor.set_dsp(dsp_path)
-
- # Group voices will affect the number of parameters.
- # True will result in fewer parameters because all voices will
- # share the same parameters.
- faust_processor.group_voices = group_voices
- faust_processor.num_voices = num_voices
-
- faust_processor.compile()
+ dsp_path = abspath(FAUST_DSP / "polyphonic.dsp")
+ faust_processor = engine.make_faust_processor("faust")
+ faust_processor.set_dsp(dsp_path)
+
+ # Group voices will affect the number of parameters.
+ # True will result in fewer parameters because all voices will
+ # share the same parameters.
+ faust_processor.group_voices = group_voices
+ faust_processor.num_voices = num_voices
+
+ faust_processor.compile()
- # for par in faust_processor.get_parameters_description():
- # print(par)
+ # for par in faust_processor.get_parameters_description():
+ # print(par)
- # (MIDI note, velocity, start sec, duration sec)
- faust_processor.add_midi_note(60, 60, 0.0, .25)
- faust_processor.add_midi_note(64, 80, 0.5, .5)
- faust_processor.add_midi_note(67, 127, 0.75, .5)
+ # (MIDI note, velocity, start sec, duration sec)
+ faust_processor.add_midi_note(60, 60, 0.0, .25)
+ faust_processor.add_midi_note(64, 80, 0.5, .5)
+ faust_processor.add_midi_note(67, 127, 0.75, .5)
- assert(faust_processor.n_midi_events == 3*2) # multiply by 2 because of the off-notes.
+ assert(faust_processor.n_midi_events == 3*2) # multiply by 2 because of the off-notes.
- if cutoff is not None:
- faust_processor.set_parameter("/Sequencer/DSP2/MyInstrument/cutoff", cutoff)
- elif automation:
- faust_processor.set_automation("/Sequencer/DSP2/MyInstrument/cutoff", 5000+4900*make_sine(30, 10.))
+ if cutoff is not None:
+ faust_processor.set_parameter("/Sequencer/DSP2/MyInstrument/cutoff", cutoff)
+ elif automation:
+ faust_processor.set_automation("/Sequencer/DSP2/MyInstrument/cutoff", 5000+4900*make_sine(30., 10.))
- if decay is not None:
- if group_voices:
- faust_processor.set_parameter("/Sequencer/DSP1/Polyphonic/Voices/MyInstrument/decay", decay)
- else:
- for i in range(1, num_voices+1):
- faust_processor.set_parameter(f"/Sequencer/DSP1/Polyphonic/V{i}/MyInstrument/decay", decay)
+ if decay is not None:
+ if group_voices:
+ faust_processor.set_parameter("/Sequencer/DSP1/Polyphonic/Voices/MyInstrument/decay", decay)
+ else:
+ for i in range(1, num_voices+1):
+ faust_processor.set_parameter(f"/Sequencer/DSP1/Polyphonic/V{i}/MyInstrument/decay", decay)
- # for par in faust_processor.get_parameters_description():
- # print(par)
+ # for par in faust_processor.get_parameters_description():
+ # print(par)
- graph = [
- (faust_processor, [])
- ]
+ graph = [
+ (faust_processor, [])
+ ]
- engine.load_graph(graph)
+ engine.load_graph(graph)
- render(engine, file_path=file_path, duration=3.)
+ render(engine, file_path=file_path, duration=3.)
- return engine.get_audio()
+ return engine.get_audio()
def test_faust_poly():
- audio1 = _test_faust_poly(OUTPUT / 'test_faust_poly_grouped.wav', group_voices=True)
- audio2 = _test_faust_poly(OUTPUT / 'test_faust_poly_ungrouped.wav', group_voices=False)
+ audio1 = _test_faust_poly(OUTPUT / 'test_faust_poly_grouped.wav', group_voices=True)
+ audio2 = _test_faust_poly(OUTPUT / 'test_faust_poly_ungrouped.wav', group_voices=False)
- assert np.allclose(audio1, audio2)
+ assert np.allclose(audio1, audio2)
- audio1 = _test_faust_poly(OUTPUT / 'test_faust_poly_2k_cutoff_grouped.wav', group_voices=True, cutoff=2000)
- audio2 = _test_faust_poly(OUTPUT / 'test_faust_poly_2k_cutoff_ungrouped.wav', group_voices=False, cutoff=2000)
+ audio1 = _test_faust_poly(OUTPUT / 'test_faust_poly_2k_cutoff_grouped.wav', group_voices=True, cutoff=2000)
+ audio2 = _test_faust_poly(OUTPUT / 'test_faust_poly_2k_cutoff_ungrouped.wav', group_voices=False, cutoff=2000)
- assert np.allclose(audio1, audio2)
+ assert np.allclose(audio1, audio2)
- audio1 = _test_faust_poly(OUTPUT / 'test_faust_poly_automation_cutoff_grouped.wav', group_voices=True, automation=True)
- audio2 = _test_faust_poly(OUTPUT / 'test_faust_poly_automation_cutoff_ungrouped.wav', group_voices=False, automation=True)
+ audio1 = _test_faust_poly(OUTPUT / 'test_faust_poly_automation_cutoff_grouped.wav', group_voices=True, automation=True)
+ audio2 = _test_faust_poly(OUTPUT / 'test_faust_poly_automation_cutoff_ungrouped.wav', group_voices=False, automation=True)
- assert np.allclose(audio1, audio2)
+ assert np.allclose(audio1, audio2)
- audio1 = _test_faust_poly(OUTPUT / 'test_faust_poly_automation_decay_grouped.wav', group_voices=True, decay=.5)
- audio2 = _test_faust_poly(OUTPUT / 'test_faust_poly_automation_decay_ungrouped.wav', group_voices=False, decay=.5)
+ audio1 = _test_faust_poly(OUTPUT / 'test_faust_poly_decay_grouped.wav', group_voices=True, decay=.5)
+ audio2 = _test_faust_poly(OUTPUT / 'test_faust_poly_decay_ungrouped.wav', group_voices=False, decay=.5)
- assert np.allclose(audio1, audio2)
+ assert np.allclose(audio1, audio2)
-@pytest.mark.parametrize("midi_path", [abspath(ASSETS / 'MIDI-Unprocessed_SMF_02_R1_2004_01-05_ORIG_MID--AUDIO_02_R1_2004_05_Track05_wav.midi')])
-def test_faust_sine(midi_path: str, buffer_size=1):
+@pytest.mark.parametrize("midi_path,bpm_automation,convert_to_sec,buffer_size",
+ product(
+ [abspath(ASSETS / 'MIDI-Unprocessed_SMF_02_R1_2004_01-05_ORIG_MID--AUDIO_02_R1_2004_05_Track05_wav.midi')],
+ [False, True],
+ [False, True],
+ [1, 128]
+ )
+)
+def test_faust_sine(midi_path: str, bpm_automation: bool, convert_to_sec: bool, buffer_size: int):
engine = daw.RenderEngine(SAMPLE_RATE, buffer_size)
+ duration = 10.
+
+ ppqn = 960
+
+ if bpm_automation:
+ bpm_data = 120.+60.*make_sine(1./3., duration*10., sr=ppqn)
+ engine.set_bpm(bpm_data, ppqn=ppqn)
+
faust_processor = engine.make_faust_processor("faust")
faust_processor.num_voices = 16
faust_processor.group_voices = True
faust_processor.release_length = .5 # note that this is the maximum of the "release" hslider below
faust_processor.set_dsp_string(
- f"""
- declare name "MyInstrument";
+ f"""
+ declare name "MyInstrument";
- declare options "[nvoices:8]"; // FaustProcessor has a property which will override this.
- import("stdfaust.lib");
+ declare options "[nvoices:8]"; // FaustProcessor has a property which will override this.
+ import("stdfaust.lib");
- freq = hslider("freq",200,50,1000,0.01); // note pitch
- gain = hslider("gain",0.1,0,1,0.01); // note velocity
- gate = button("gate"); // note on/off
+ freq = hslider("freq",200,50,1000,0.01); // note pitch
+ gain = hslider("gain",0.1,0,1,0.01); // note velocity
+ gate = button("gate"); // note on/off
- attack = hslider("attack", .002, 0.001, 10., 0.);
- decay = hslider("decay", .05, 0.001, 10., 0.);
- sustain = hslider("sustain", 1.0, 0.0, 1., 0.);
- release = hslider("release", .05, 0.001, {faust_processor.release_length}, 0.);
+ attack = hslider("attack", .002, 0.001, 10., 0.);
+ decay = hslider("decay", .05, 0.001, 10., 0.);
+ sustain = hslider("sustain", 1.0, 0.0, 1., 0.);
+ release = hslider("release", .05, 0.001, {faust_processor.release_length}, 0.);
- envVol = 0.35*gain*en.adsr(attack, decay, sustain, release, gate);
+ envVol = 0.35*gain*en.adsr(attack, decay, sustain, release, gate);
- process = os.osc(freq)*envVol <: _, _;
- effect = _, _;
- """
- )
+ process = os.osc(freq)*envVol <: _, _;
+ effect = _, _;
+ """
+ )
faust_processor.compile()
# desc = faust_processor.get_parameters_description()
# for par in desc:
# print(par)
- faust_processor.load_midi(midi_path)
+ faust_processor.load_midi(midi_path, convert_to_sec=convert_to_sec, clear_previous=True, all_events=False)
graph = [
(faust_processor, [])
]
engine.load_graph(graph)
- render(engine, file_path=OUTPUT / ('test_faust_sine_' + splitext(basename(midi_path))[0] + '.wav'), duration=10.)
+ file_path = OUTPUT / ''.join([
+ 'test_faust_sine_',
+ 'bpm_' if bpm_automation else '',
+ 'conv_' if convert_to_sec else '',
+ f'bs_{buffer_size}_',
+ splitext(basename(midi_path))[0],
+ '.wav'])
+ render(engine, file_path=file_path, duration=duration)
audio = engine.get_audio()
assert(np.mean(np.abs(audio)) > .0001)
diff --git a/tests/test_faust_poly_sampler.py b/tests/test_faust_poly_sampler.py
index 08888e35..0faa284f 100644
--- a/tests/test_faust_poly_sampler.py
+++ b/tests/test_faust_poly_sampler.py
@@ -1,17 +1,20 @@
from dawdreamer_utils import *
-BUFFER_SIZE = 1024
+BUFFER_SIZE = 128
-@pytest.mark.parametrize("sample_seq,output_path", [
- (load_audio_file(ASSETS / "60988__folktelemetry__crash-fast-14.wav"), "test_faust_poly_sampler_cymbal.wav")
+@pytest.mark.parametrize("audio_file_path,output_path,convert_to_sec", [
+ (ASSETS / "60988__folktelemetry__crash-fast-14.wav", "test_faust_poly_sampler_cymbal.wav", False),
+ (ASSETS / "60988__folktelemetry__crash-fast-14.wav", "test_faust_poly_sampler_cymbal_convert_to_sec.wav", True)
])
-def test_faust_poly_sampler(sample_seq, output_path, lagrange_order=4):
+def test_faust_poly_sampler(audio_file_path: str, output_path: str, convert_to_sec: bool, lagrange_order=4):
engine = daw.RenderEngine(SAMPLE_RATE, BUFFER_SIZE)
faust_processor = engine.make_faust_processor("faust")
faust_processor.num_voices = 8
+ sample_seq = load_audio_file(ASSETS / "60988__folktelemetry__crash-fast-14.wav")
+
# set_soundfiles
if sample_seq.ndim == 1:
sample_seq = sample_seq.reshape(1, -1)
@@ -37,20 +40,16 @@ def test_faust_poly_sampler(sample_seq, output_path, lagrange_order=4):
# print(par)
# (MIDI note, velocity, start sec, duration sec)
- faust_processor.add_midi_note(60, 60, 0.0, .25)
- faust_processor.add_midi_note(64, 80, 0.5, .5)
- faust_processor.add_midi_note(67, 127, 0.75, .5)
-
- assert(faust_processor.n_midi_events == 3*2) # multiply by 2 because of the off-notes.
+ faust_processor.add_midi_note(60, 60, 0.0, .25, convert_to_sec=convert_to_sec)
+ faust_processor.add_midi_note(64, 80, 0.5, .5, convert_to_sec=convert_to_sec)
+ faust_processor.add_midi_note(67, 127, 0.75, .5, convert_to_sec=convert_to_sec)
graph = [
(faust_processor, [])
]
engine.load_graph(graph)
-
render(engine, file_path=OUTPUT / output_path, duration=3.)
-
# check that it's non-silent
audio = engine.get_audio()
- assert(np.mean(np.abs(audio)) > .01)
\ No newline at end of file
+ assert(np.mean(np.abs(audio)) > .001)
\ No newline at end of file
diff --git a/tests/test_faust_processor.py b/tests/test_faust_processor.py
index 481f79b8..8ce7cb8f 100644
--- a/tests/test_faust_processor.py
+++ b/tests/test_faust_processor.py
@@ -55,6 +55,33 @@ def test_faust_passthrough(duration, buffer_size):
assert np.allclose(data, audio, atol=1e-07)
+def test_faust_automation_tempo():
+
+ duration = 10.
+
+ engine = daw.RenderEngine(SAMPLE_RATE, 128)
+
+ faust_processor = engine.make_faust_processor("faust")
+ faust_processor.set_dsp_string(
+ """
+ declare name "MyInstrument";
+ process = hslider("freq", 440., 0., 15000., 0.) : os.osc : _*.4 <: _, _;
+ """)
+
+ ppqn = 960
+ automation = make_sine(4, duration, sr=ppqn)
+ automation = 220.+220.*(automation > 0).astype(np.float32)
+ faust_processor.set_automation("/MyInstrument/freq", automation, ppqn=ppqn)
+ # print(faust_processor.get_parameters_description())
+
+ graph = [
+ (faust_processor, [])
+ ]
+
+ engine.load_graph(graph)
+
+ render(engine, file_path=OUTPUT / 'test_faust_automation_tempo.wav', duration=duration)
+
@pytest.mark.parametrize("duration,buffer_size", product(
[(44099.5/44100), (44100.5/44100), 1., (4./44100)],
[1, 2, 4, 8, 16, 128, 2048]))
diff --git a/tests/test_playbackwarp_processor.py b/tests/test_playbackwarp_processor.py
index f26cdd7f..325463e9 100644
--- a/tests/test_playbackwarp_processor.py
+++ b/tests/test_playbackwarp_processor.py
@@ -1,14 +1,19 @@
from dawdreamer_utils import *
-BUFFER_SIZE = 1
-def test_playbackwarp_processor1():
+@pytest.mark.parametrize("buffer_size", [1, 2048])
+def test_playbackwarp_processor1(buffer_size: int):
- DURATION = 10.
+ DURATION = 15.
+
+ engine = daw.RenderEngine(SAMPLE_RATE, buffer_size)
- engine = daw.RenderEngine(SAMPLE_RATE, BUFFER_SIZE)
+ ppqn = 960
- engine.set_bpm(140.)
+ full_measure = ppqn * 4 # four beats is a measure
+ bpm = np.concatenate([140*np.ones(full_measure), 70.*np.ones(full_measure)])
+ bpm = np.tile(bpm, (100))
+ engine.set_bpm(bpm, ppqn=ppqn)
drums = engine.make_playbackwarp_processor("drums",
load_audio_file(ASSETS / "Music Delta - Disco" / "drums.wav", duration=DURATION))
@@ -54,28 +59,26 @@ def test_playbackwarp_processor1():
]
engine.load_graph(graph)
-
- render(engine, file_path=OUTPUT / 'test_playbackwarp_processor1a.wav')
+ render(engine, file_path=OUTPUT / 'test_playbackwarp_processor1a.wav', duration=DURATION)
assert(np.mean(np.abs(engine.get_audio())) > .01)
other.transpose = 2.
-
- render(engine, file_path=OUTPUT / 'test_playbackwarp_processor1b.wav')
+ render(engine, file_path=OUTPUT / 'test_playbackwarp_processor1b.wav', duration=DURATION)
assert(np.mean(np.abs(engine.get_audio())) > .01)
other.set_automation('transpose', make_sine(1., DURATION))
-
- render(engine, file_path=OUTPUT / 'test_playbackwarp_processor1c.wav')
+ render(engine, file_path=OUTPUT / 'test_playbackwarp_processor1c.wav', duration=DURATION)
assert(np.mean(np.abs(engine.get_audio())) > .01)
-def test_playbackwarp_processor2():
+@pytest.mark.parametrize("buffer_size", [1])
+def test_playbackwarp_processor2(buffer_size: int):
DURATION = 10.
- engine = daw.RenderEngine(SAMPLE_RATE, BUFFER_SIZE)
+ engine = daw.RenderEngine(SAMPLE_RATE, buffer_size)
# Pick 120 because it's easy to analyze for timing in Audacity.
# 1 second is two quarter notes.
@@ -120,8 +123,8 @@ def test_playbackwarp_processor2():
assert(np.mean(np.abs(engine.get_audio())) > .01)
-
-def test_playbackwarp_processor3():
+@pytest.mark.parametrize("buffer_size", [1])
+def test_playbackwarp_processor3(buffer_size: int):
"""
Test using the playback warp processor without a clip file and therefore without warping.
@@ -132,7 +135,7 @@ def test_playbackwarp_processor3():
DURATION = 3.
- engine = daw.RenderEngine(SAMPLE_RATE, BUFFER_SIZE)
+ engine = daw.RenderEngine(SAMPLE_RATE, buffer_size)
drum_audio = load_audio_file(ASSETS / "575854__yellowtree__d-b-funk-loop.wav")
drum_audio = drum_audio[:, int(SAMPLE_RATE*.267):] # manually trim to beginning
@@ -169,8 +172,8 @@ def test_playbackwarp_processor3():
assert(np.mean(np.abs(audio)) > .01)
-
-def test_playbackwarp_processor4():
+@pytest.mark.parametrize("buffer_size", [1])
+def test_playbackwarp_processor4(buffer_size: int):
"""
Test using the playback warp processor without a clip file and therefore without warping.
@@ -180,7 +183,7 @@ def test_playbackwarp_processor4():
DURATION = 10.
- engine = daw.RenderEngine(SAMPLE_RATE, BUFFER_SIZE)
+ engine = daw.RenderEngine(SAMPLE_RATE, buffer_size)
drum_audio = load_audio_file(ASSETS / "Music Delta - Disco" / "drums.wav")
drum_audio = drum_audio[:, int(SAMPLE_RATE*.267):] # manually trim to beginning
diff --git a/tests/test_plugins.py b/tests/test_plugins.py
index c2d0f8c4..c029af37 100644
--- a/tests/test_plugins.py
+++ b/tests/test_plugins.py
@@ -264,9 +264,10 @@ def test_plugin_upright_piano():
@pytest.mark.parametrize("plugin_path",
[
+ "C:/VSTPlugIns/TAL-NoiseMaker-64.vst3",
"C:/VSTPlugIns/LABS (64 Bit).dll",
- # "C:/VSTPlugIns/TAL-NoiseMaker-64.vst3",
- # "C:/VSTPlugIns/sparta/sparta_ambiBIN.dll",
+ "C:/VSTPlugIns/Kontakt.dll",
+ # "C:/VSTPlugIns/Kontakt.vst3", # not working
]
)
def test_plugin_editor(plugin_path: str):
@@ -299,20 +300,25 @@ def load_help(processor, filepath: str):
DURATION = 5.
- plugin_basename = splitext(basename(plugin_path))[0]
+ plugin_basename = basename(plugin_path)
engine = daw.RenderEngine(SAMPLE_RATE, 128)
synth = engine.make_plugin_processor("synth", plugin_path)
plat_system = platform.system()
- state_file_path = abspath(OUTPUT / (f'state_test_plugin_{plat_system}_{plugin_basename}'))
+ state_file_path = abspath(OUTPUT / (f'state_test_plugin_{plat_system}_{plugin_basename}.bin'))
+
+ # from time import sleep
+ # sleep(.5)
load_help(synth, state_file_path)
+ # sleep(.5)
+
# print(synth.get_plugin_parameters_description())
- print('inputs: ', synth.get_num_input_channels(), ' outputs: ', synth.get_num_output_channels())
+ print('synth: ', plugin_basename, ' inputs: ', synth.get_num_input_channels(), ' outputs: ', synth.get_num_output_channels())
# assert(synth.get_num_input_channels() == 0)
# assert(synth.get_num_output_channels() == 2)
@@ -321,10 +327,23 @@ def load_help(processor, filepath: str):
synth.add_midi_note(60, 60, 0.0, .25)
synth.add_midi_note(64, 80, 0.5, .5)
synth.add_midi_note(67, 127, 0.75, .5)
+ synth.add_midi_note(48, 80, 1.5, .5)
+ synth.add_midi_note(36, 80, 2.0, .5)
- assert(synth.n_midi_events == 3*2) # multiply by 2 because of the off-notes.
+ assert(synth.n_midi_events == 5*2) # multiply by 2 because of the off-notes.
- engine.load_graph([(synth, [])])
+ graph = [(synth, [])]
+
+ num_outputs = synth.get_num_output_channels()
+
+ if num_outputs > 2:
+ # Use faust to "fan-in" the number of channels from something larger than 2 to 2.
+ faust_processor = engine.make_faust_processor("faust")
+ num_outputs = synth.get_num_output_channels()
+ faust_processor.set_dsp_string(f"process = si.bus({num_outputs}) :> si.bus(2);")
+ graph.append((faust_processor, ["synth"]))
+
+ engine.load_graph(graph)
render(engine, file_path=OUTPUT / (f'test_plugin_{plugin_basename}.wav'), duration=DURATION)
@@ -347,9 +366,9 @@ def test_plugin_iem(plugin_path1="C:/VSTPlugIns/IEMPluginSuite/VST2/IEM/MultiEnc
ambisonics_encoder.record = True
ambisonics_decoder.record = True
- plugin_basename = splitext(basename(plugin_path1))[0]
+ plugin_basename = basename(plugin_path1)
- state_file_path = abspath(OUTPUT / (f'state_test_plugin_{plugin_basename}'))
+ state_file_path = abspath(OUTPUT / (f'state_test_plugin_{plugin_basename}.bin'))
if isfile(state_file_path):
ambisonics_encoder.load_state(state_file_path)
@@ -375,8 +394,8 @@ def test_plugin_iem(plugin_path1="C:/VSTPlugIns/IEMPluginSuite/VST2/IEM/MultiEnc
# print(ambisonics_encoder.get_plugin_parameters_description())
# print('inputs: ', ambisonics_encoder.get_num_input_channels(), ' outputs: ', ambisonics_encoder.get_num_output_channels())
- plugin_basename = splitext(basename(plugin_path2))[0]
- state_file_path = abspath(OUTPUT / (f'state_test_plugin_{plugin_basename}'))
+ plugin_basename = basename(plugin_path2)
+ state_file_path = abspath(OUTPUT / (f'state_test_plugin_{plugin_basename}.bin'))
if isfile(state_file_path):
ambisonics_decoder.load_state(state_file_path)
diff --git a/thirdparty/rubberband b/thirdparty/rubberband
index a78dcaf4..4cff1741 160000
--- a/thirdparty/rubberband
+++ b/thirdparty/rubberband
@@ -1 +1 @@
-Subproject commit a78dcaf4c8efe6bb95432b5dfc2567c9018fd5a1
+Subproject commit 4cff1741664918791474da9a9d41f48cc3119229