Skip to content

Commit

Permalink
Merge branch 'pre-production' into production
Browse files Browse the repository at this point in the history
  • Loading branch information
tremblap committed Jan 7, 2025
2 parents 60c944d + f89e3e4 commit 7dfcc22
Show file tree
Hide file tree
Showing 15 changed files with 85 additions and 61 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ if(hasParent)
endif()

if(APPLE)
set(CMAKE_OSX_DEPLOYMENT_TARGET "10.8" CACHE STRING "")
set(CMAKE_OSX_DEPLOYMENT_TARGET "10.9" CACHE STRING "")
#A consequence of targetting 10.8. Needs to be set globally from 10.15 onwards in order for the test program to compile successfully during configure
string(APPEND CMAKE_CXX_FLAGS " -stdlib=libc++")
endif()
Expand Down
2 changes: 1 addition & 1 deletion FlucomaVersion.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ find_package(Git REQUIRED)

set(flucoma_VERSION_MAJOR 1)
set(flucoma_VERSION_MINOR 0)
set(flucoma_VERSION_PATCH 7)
set(flucoma_VERSION_PATCH 8)
set(flucoma_VERSION_SUFFIX "")

function(make_flucoma_version_string output_variable)
Expand Down
1 change: 1 addition & 0 deletions docs/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@ sphinx
breathe
myst-parser
sphinx-book-theme
schema
6 changes: 3 additions & 3 deletions include/algorithms/public/SKMeans.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@ class SKMeans : public KMeans
using namespace Eigen;
using namespace _impl;
assert(!mTrained || (dataset.pointSize() == mDims && mK == k));
MatrixXd dataPoints = asEigen<Matrix>(dataset.getData());
MatrixXd dataPoints =
asEigen<Matrix>(dataset.getData()).rowwise().normalized();
MatrixXd dataPointsT = dataPoints.transpose();
if (mTrained) { mAssignments = assignClusters(dataPointsT);}
else
Expand Down Expand Up @@ -87,9 +88,8 @@ class SKMeans : public KMeans
{
for (index i = 0; i < mAssignments.cols(); i++)
{
double val = mEmbedding(mAssignments(i), i);
mEmbedding.col(i).setZero();
mEmbedding(mAssignments(i), i) = val;
mEmbedding(mAssignments(i), i) = 1.0;
}
}

Expand Down
25 changes: 17 additions & 8 deletions include/algorithms/util/FFT.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ under the European Union’s Horizon 2020 research and innovation programme
#include "../../data/FluidMemory.hpp"
#include <Eigen/Core>
#include <fft/fft.hpp>
#include <optional>

namespace fluid {
namespace algorithm {
Expand All @@ -38,8 +39,8 @@ class FFTSetup
FFTSetup(FFTSetup const&) = delete;
FFTSetup& operator=(FFTSetup const&) = delete;

FFTSetup(FFTSetup&& other) { *this = std::move(other); };
FFTSetup& operator=(FFTSetup&& other)
FFTSetup(FFTSetup&& other) noexcept { *this = std::move(other); };
FFTSetup& operator=(FFTSetup&& other) noexcept
{
using std::swap;
swap(mMaxSize, other.mMaxSize);
Expand All @@ -62,13 +63,13 @@ class FFT
public:
using MapXcd = Eigen::Map<Eigen::ArrayXcd>;

static void setup() { getFFTSetup(); }

FFT() = delete;

FFT(index size, Allocator& alloc = FluidDefaultAllocator()) noexcept
: mMaxSize(size), mSize(size), mFrameSize(size / 2 + 1),
mLog2Size(static_cast<index>(std::log2(size))), mSetup(getFFTSetup()),
mLog2Size(static_cast<index>(std::log2(size))),
mSetup(getFFTSetup(*this, size)),
mRealBuffer(asUnsigned(mFrameSize), alloc),
mImagBuffer(asUnsigned(mFrameSize), alloc),
mOutputBuffer(asUnsigned(mFrameSize), alloc)
Expand Down Expand Up @@ -106,11 +107,18 @@ class FFT
return {mOutputBuffer.data(), mFrameSize};
}

private:
std::optional<impl::FFTSetup> mLocalSetup;
protected:
static htl::setup_type<double> getFFTSetup()
{
static const impl::FFTSetup static_setup(65536);
return static_setup();
static constexpr index default_max = 65536;
static htl::setup_type<double> getFFTSetup(FFT& _this, index size)
{
static const impl::FFTSetup static_setup(default_max);
if(size <= default_max) return static_setup();
else {
_this.mLocalSetup = std::optional<impl::FFTSetup>(size);
return _this.mLocalSetup->operator()();
}
}

index mMaxSize{16384};
Expand All @@ -127,6 +135,7 @@ class FFT
rt::vector<std::complex<double>> mOutputBuffer;
};


class IFFT : public FFT
{

Expand Down
21 changes: 13 additions & 8 deletions include/algorithms/util/TruePeak.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,12 @@ class Interpolator
};

public:
Interpolator(index maxtaps, index maxfactor, Allocator& alloc)
: mMaxTaps{maxtaps}, mMaxFactor{maxfactor},
mMaxLatency{(mMaxTaps + mMaxFactor - 1) / mMaxFactor},
mBuffer(asUnsigned(mMaxLatency), alloc), mCount(asUnsigned(mMaxFactor), alloc),
Interpolator(index maxtaps, index maxfactor, index minFactor,
Allocator& alloc)
: mMaxTaps{maxtaps}, mMaxFactor{maxfactor}, mMinFactor{minFactor},
mMaxLatency{(mMaxTaps + mMinFactor - 1) / minFactor},
mBuffer(asUnsigned(mMaxLatency), alloc),
mCount(asUnsigned(mMaxFactor), alloc),
mFilters(mMaxFactor, mMaxLatency, alloc),
mIndex(mMaxFactor, mMaxLatency, alloc)
{}
Expand All @@ -47,6 +49,7 @@ class Interpolator
{
assert(taps <= mMaxTaps);
assert(factor <= mMaxFactor);
assert(factor >= mMinFactor);
assert(factor > 0);
assert(taps > 0);

Expand Down Expand Up @@ -121,7 +124,8 @@ class Interpolator

index mMaxTaps;
index mMaxFactor;
index mMaxLatency;
index mMinFactor;
index mMaxLatency;

index mTaps;
index mFactor;
Expand All @@ -142,17 +146,18 @@ class TruePeak
{
static constexpr index nTaps = 49;
static constexpr index maxFactor = 4;
static constexpr index minFactor = 2;

public:
TruePeak(index maxSize, Allocator& alloc)
: mInterpolator(nTaps, maxFactor, alloc), mBuffer{maxFactor * maxSize,
alloc}
: mInterpolator(nTaps, maxFactor, minFactor, alloc),
mBuffer{maxFactor * maxSize, alloc}
{}

void init(index /*size*/, double sampleRate, Allocator&)
{
mSampleRate = sampleRate;
mFactor = sampleRate < (2 * 44100) ? 4 : 2;
mFactor = sampleRate < (2 * 44100) ? maxFactor : minFactor;
mInterpolator.init(nTaps, mFactor);
}

Expand Down
6 changes: 3 additions & 3 deletions include/clients/nrt/DataSetQueryClient.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -144,10 +144,10 @@ class DataSetQueryClient : public FluidBaseClient, OfflineIn, OfflineOut
return OK();
}

MessageResult<void> limit(index rows)
MessageResult<void> limit(index points)
{
if (rows <= 0) return Error("invalid value");
mAlgorithm.limit(rows);
if (points <= 0) return Error("invalid limit on the number of points");
mAlgorithm.limit(points);
return OK();
}

Expand Down
23 changes: 13 additions & 10 deletions include/clients/nrt/KNNClassifierClient.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,11 @@ namespace knnclassifier {

struct KNNClassifierData
{
algorithm::KDTree tree{0};
algorithm::KDTree tree{algorithm::KDTree()};
FluidDataSet<std::string, std::string, 1> labels{1};
index size() const { return labels.size(); }
index dims() const { return tree.dims(); }
void clear()
index size() const { return labels.size(); }
index dims() const { return tree.dims(); }
void clear()
{
labels = FluidDataSet<std::string, std::string, 1>(1);
tree.clear();
Expand All @@ -43,7 +43,10 @@ void to_json(nlohmann::json& j, const KNNClassifierData& data)
bool check_json(const nlohmann::json& j, const KNNClassifierData&)
{
return fluid::check_json(j, {"tree", "labels"},
{JSONTypes::OBJECT, JSONTypes::OBJECT});
{JSONTypes::OBJECT, JSONTypes::OBJECT}) &&
fluid::algorithm::check_json(j.at("tree"), algorithm::KDTree()) &&
fluid::check_json(j.at("labels"),
FluidDataSet<std::string, std::string, 1>());
}

void from_json(const nlohmann::json& j, KNNClassifierData& data)
Expand Down Expand Up @@ -132,14 +135,14 @@ class KNNClassifierClient : public FluidBaseClient,
algorithm::KNNClassifier classifier;
RealVector point(mAlgorithm.tree.dims());
point <<= BufferAdaptor::ReadAccess(data.get())
.samps(0, mAlgorithm.tree.dims(), 0);
.samps(0, mAlgorithm.tree.dims(), 0);
std::string result = classifier.predict(mAlgorithm.tree, point,
mAlgorithm.labels, k, weight);
return result;
}

MessageResult<void> predict(InputDataSetClientRef source,
LabelSetClientRef dest) const
MessageResult<void> predict(InputDataSetClientRef source,
LabelSetClientRef dest) const
{
index k = get<kNumNeighbors>();
bool weight = get<kWeight>() != 0;
Expand All @@ -163,7 +166,7 @@ class KNNClassifierClient : public FluidBaseClient,
{
RealVectorView point = data.row(i);
StringVector label = {classifier.predict(mAlgorithm.tree, point,
mAlgorithm.labels, k, weight)};
mAlgorithm.labels, k, weight)};
result.add(ids(i), label);
}
destPtr->setLabelSet(result);
Expand All @@ -186,7 +189,7 @@ class KNNClassifierClient : public FluidBaseClient,
makeMessage("read", &KNNClassifierClient::read));
}

index encodeIndex(std::string const& label) const
index encodeIndex(std::string const& label) const
{
return mLabelSetEncoder.encodeIndex(label);
}
Expand Down
28 changes: 17 additions & 11 deletions include/clients/nrt/KNNRegressorClient.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ namespace knnregressor {

struct KNNRegressorData
{
algorithm::KDTree tree{0};
algorithm::KDTree tree{algorithm::KDTree()};
FluidDataSet<std::string, double, 1> target{1};
index size() const { return target.size(); }
index dims() const { return tree.dims(); }
Expand All @@ -41,7 +41,10 @@ void to_json(nlohmann::json& j, const KNNRegressorData& data)
bool check_json(const nlohmann::json& j, const KNNRegressorData&)
{
return fluid::check_json(j, {"tree", "target"},
{JSONTypes::OBJECT, JSONTypes::OBJECT});
{JSONTypes::OBJECT, JSONTypes::OBJECT}) &&
fluid::algorithm::check_json(j.at("tree"), algorithm::KDTree()) &&
fluid::check_json(j.at("labels"),
FluidDataSet<std::string, std::string, 1>());
}

void from_json(const nlohmann::json& j, KNNRegressorData& data)
Expand Down Expand Up @@ -128,24 +131,25 @@ class KNNRegressorClient : public FluidBaseClient,
if (mAlgorithm.tree.size() < k) return Error(NotEnoughData);

InBufferCheck bufCheck(mAlgorithm.tree.dims());
if (!bufCheck.checkInputs(in.get()))
return Error(bufCheck.error());
if (!bufCheck.checkInputs(in.get())) return Error(bufCheck.error());
BufferAdaptor::ReadAccess inBuf(in.get());
BufferAdaptor::Access outBuf(out.get());
BufferAdaptor::Access outBuf(out.get());
if (!outBuf.exists()) return Error(InvalidBuffer);
Result resizeResult = outBuf.resize(mAlgorithm.target.dims(), 1, inBuf.sampleRate());
Result resizeResult =
outBuf.resize(mAlgorithm.target.dims(), 1, inBuf.sampleRate());
if (!resizeResult.ok()) return Error(BufferAlloc);
algorithm::KNNRegressor regressor;
RealVector input(mAlgorithm.tree.dims());
RealVector output(mAlgorithm.target.dims());
input <<= inBuf.samps(0, mAlgorithm.tree.dims(), 0);
regressor.predict(mAlgorithm.tree, mAlgorithm.target, input, output, k, weight);
regressor.predict(mAlgorithm.tree, mAlgorithm.target, input, output, k,
weight);
outBuf.samps(0) <<= output;
return OK();
}

MessageResult<void> predict(InputDataSetClientRef source,
DataSetClientRef dest) const
DataSetClientRef dest) const
{
index k = get<kNumNeighbors>();
bool weight = get<kWeight>() != 0;
Expand All @@ -169,7 +173,8 @@ class KNNRegressorClient : public FluidBaseClient,
for (index i = 0; i < dataSet.size(); i++)
{
RealVectorView point = data.row(i);
regressor.predict(mAlgorithm.tree, mAlgorithm.target, point, prediction, k, weight);
regressor.predict(mAlgorithm.tree, mAlgorithm.target, point, prediction,
k, weight);
result.add(ids(i), prediction);
}
destPtr->setDataSet(result);
Expand Down Expand Up @@ -262,9 +267,10 @@ class KNNRegressorQuery : public FluidBaseClient, ControlIn, ControlOut
RealVector output(algorithm.target.dims(), c.allocator());

input <<= BufferAdaptor::ReadAccess(get<kInputBuffer>().get())
.samps(0, algorithm.tree.dims(), 0);
.samps(0, algorithm.tree.dims(), 0);

regressor.predict(algorithm.tree, algorithm.target, input, output, k, weight, c.allocator());
regressor.predict(algorithm.tree, algorithm.target, input, output, k,
weight, c.allocator());
outBuf.samps(0) <<= output;
}
}
Expand Down
2 changes: 1 addition & 1 deletion include/clients/nrt/NMFCrossClient.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ constexpr auto NMFCrossParams = defineParameters(
InputBufferParam("target", "Target Buffer"),
BufferParam("output", "Output Buffer"),
LongParam("timeSparsity", "Time Sparsity", 7, Min(1), Odd()),
LongParam("polyphony", "Polyphony", 10, Min(1), Odd(),
LongParam("polyphony", "Polyphony", 11, Min(1), Odd(),
FrameSizeUpperLimit<kFFT>()),
LongParam("continuity", "Continuity", 7, Min(1), Odd()),
LongParam("iterations", "Number of Iterations", 50, Min(1)),
Expand Down
4 changes: 2 additions & 2 deletions include/clients/nrt/UMAPClient.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,8 @@ class UMAPClient : public FluidBaseClient,
auto src = srcPtr->getDataSet();
auto dest = destPtr->getDataSet();
if (src.size() == 0) return Error(EmptyDataSet);
if (get<kNumNeighbors>() > src.size())
return Error("Number of Neighbours is larger than dataset");
if (get<kNumNeighbors>() >= src.size())
return Error("Number of Neighbours is greater or equal to the size of the the dataset");
FluidDataSet<string, double, 1> result;
try
{
Expand Down
2 changes: 1 addition & 1 deletion include/data/FluidDataSet.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ class FluidDataSet
using namespace std;
if (size() == 0) return "{}";
ostringstream result;
result << endl << "rows: " << size() << " cols: " << pointSize() << endl;
result << endl << "points:" << size() << " cols:" << pointSize() << endl;
if (size() < maxRows)
{
for (index r = 0; r < size(); r++)
Expand Down
20 changes: 10 additions & 10 deletions tests/data/TestFluidTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ TEST_CASE("FluidTensor can be created from a list of dimenions","[FluidTensor]")

REQUIRE(x.size() == 3);
REQUIRE(x.rows() == 3);
REQUIRE(x.cols() == 1);
// REQUIRE(x.cols() == 1);
REQUIRE(std::distance(x.begin(),x.end()) == x.size());
}
SECTION("2D creation reports correct sizes"){
Expand All @@ -31,15 +31,15 @@ TEST_CASE("FluidTensor can be created from a list of dimenions","[FluidTensor]")
REQUIRE(x.cols() == 2);
REQUIRE(std::distance(x.begin(),x.end()) == x.size());
}
// SECTION("3D creation reports correct sizes"){
// const FluidTensor<int,3> x(fluid::FluidDefaultAllocator(), 3,2,5);
SECTION("3D creation reports correct sizes"){
const FluidTensor<int,3> x(3,2,5);

// REQUIRE(x.size() == (3 * 2 * 5));
// REQUIRE(x.rows() == 3);
// REQUIRE(x.cols() == 2);
// REQUIRE(x.descriptor().extents[2] == 5);
// REQUIRE(std::distance(x.begin(),x.end()) == x.size());
// }
REQUIRE(x.size() == (3 * 2 * 5));
REQUIRE(x.rows() == 3);
REQUIRE(x.cols() == 2);
REQUIRE(x.descriptor().extents[2] == 5);
REQUIRE(std::distance(x.begin(),x.end()) == x.size());
}
}

TEST_CASE("FluidTensor can be initialized from initializer lists","[FluidTensor]"){
Expand All @@ -62,7 +62,7 @@ TEST_CASE("FluidTensor can be initialized from initializer lists","[FluidTensor]
4};
const std::array<int, 5> y{0,1,2,3,4};
REQUIRE(x.rows() == 5);
REQUIRE(x.cols() == 1);
// REQUIRE(x.cols() == 1);
REQUIRE(std::distance(x.begin(),x.end()) == x.size());
REQUIRE(std::equal(x.begin(),x.end(), y.begin()));
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@

rows: 100 cols: 100
points:100 cols:100
0 0 1 2 ... 97 98 99
0 100 101 102 ... 197 198 199
0 200 201 202 ... 297 298 299
Expand Down
Loading

0 comments on commit 7dfcc22

Please sign in to comment.