diff --git a/src/backend/CMakeLists.txt b/src/backend/CMakeLists.txt index 0aa8ca1..f295a8d 100644 --- a/src/backend/CMakeLists.txt +++ b/src/backend/CMakeLists.txt @@ -6,7 +6,7 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}") add_library(backend STATIC parsing_utils.cpp backend.cpp) target_link_libraries(backend "${TORCH_LIBRARIES}") -set_property(TARGET backend PROPERTY CXX_STANDARD 14) +set_property(TARGET backend PROPERTY CXX_STANDARD 17) if(MSVC) set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT") diff --git a/src/backend/backend.cpp b/src/backend/backend.cpp index afa4fdc..f243bdb 100644 --- a/src/backend/backend.cpp +++ b/src/backend/backend.cpp @@ -18,6 +18,9 @@ void Backend::perform(std::vector in_buffer, c10::InferenceMode guard; auto params = get_method_params(method); + // std::cout << "in_buffer length : " << in_buffer.size() << std::endl; + // std::cout << "out_buffer length : " << out_buffer.size() << std::endl; + if (!params.size()) return; @@ -31,13 +34,20 @@ void Backend::perform(std::vector in_buffer, // COPY BUFFER INTO A TENSOR std::vector tensor_in; - for (auto buf : in_buffer) - tensor_in.push_back(torch::from_blob(buf, {1, 1, n_vec})); + // for (auto buf : in_buffer) + for (int i(0); i < in_buffer.size(); i++) { + tensor_in.push_back(torch::from_blob(in_buffer[i], {1, 1, n_vec})); + // std::cout << i << " : " << tensor_in[i].min().item() << std::endl; + } auto cat_tensor_in = torch::cat(tensor_in, 1); cat_tensor_in = cat_tensor_in.reshape({in_dim, n_batches, -1, in_ratio}); cat_tensor_in = cat_tensor_in.select(-1, -1); cat_tensor_in = cat_tensor_in.permute({1, 0, 2}); + // std::cout << cat_tensor_in.size(0) << ";" << cat_tensor_in.size(1) << ";" << cat_tensor_in.size(2) << std::endl; + // for (int i = 0; i < cat_tensor_in.size(1); i++ ) + // std::cout << cat_tensor_in[0][i][0] << ";"; + // std::cout << std::endl; // SEND TENSOR TO DEVICE std::unique_lock model_lock(m_model_mutex); @@ -59,6 +69,12 @@ void Backend::perform(std::vector in_buffer, int out_batches(tensor_out.size(0)), out_channels(tensor_out.size(1)), out_n_vec(tensor_out.size(2)); + // for (int b(0); b < out_batches; b++) { + // for (int c(0); c < out_channels; c++) { + // std::cout << b << ";" << c << ";" << tensor_out[b][c].min().item() << std::endl; + // } + // } + // CHECKS ON TENSOR SHAPE if (out_batches * out_channels != out_buffer.size()) { std::cout << "bad out_buffer size, expected " << out_batches * out_channels diff --git a/src/frontend/maxmsp/mc.nn_tilde/mc.nn_tilde.cpp b/src/frontend/maxmsp/mc.nn_tilde/mc.nn_tilde.cpp index d90cfc9..e873a12 100644 --- a/src/frontend/maxmsp/mc.nn_tilde/mc.nn_tilde.cpp +++ b/src/frontend/maxmsp/mc.nn_tilde/mc.nn_tilde.cpp @@ -175,6 +175,22 @@ void model_perform(mc_nn_tilde *mc_nn_instance) { mc_nn_instance->m_method, mc_nn_instance->get_batches()); } +void check_loop_buffers(mc_nn_tilde *mc_nn_instance, std::vector &in_model, std::vector &out_model) { + if (mc_nn_instance->m_in_model.size() != in_model.size()) + { + in_model.clear(); + for (auto &ptr : mc_nn_instance->m_in_model) + in_model.push_back(ptr.get()); + + } + if (mc_nn_instance->m_out_model.size() != out_model.size()) + { + out_model.clear(); + for (auto &ptr : mc_nn_instance->m_out_model) + out_model.push_back(ptr.get()); + } +} + void model_perform_loop(mc_nn_tilde *mc_nn_instance) { std::vector in_model, out_model; @@ -185,6 +201,7 @@ void model_perform_loop(mc_nn_tilde *mc_nn_instance) { out_model.push_back(ptr.get()); while (!mc_nn_instance->m_should_stop_perform_thread) { + check_loop_buffers(mc_nn_instance, in_model, out_model); if (mc_nn_instance->m_data_available_lock.try_acquire_for( std::chrono::milliseconds(200))) { mc_nn_instance->m_model->perform( @@ -275,7 +292,7 @@ mc_nn_tilde::mc_nn_tilde(const atoms &args) // CREATE INLETS, OUTLETS and BUFFERS m_in_buffer = std::make_unique[]>( m_in_dim * get_batches()); - for (int i(0); i < m_in_dim; i++) { + for (int i(0); i < m_in_dim * get_batches(); i++) { std::string input_label = ""; try { input_label = m_model->get_model() @@ -294,7 +311,7 @@ mc_nn_tilde::mc_nn_tilde(const atoms &args) m_out_buffer = std::make_unique[]>( m_out_dim * get_batches()); - for (int i(0); i < m_out_dim; i++) { + for (int i(0); i < m_out_dim * get_batches(); i++) { std::string output_label = ""; try { output_label = m_model->get_model() @@ -404,7 +421,7 @@ void mc_nn_tilde::perform(audio_bundle input, audio_bundle output) { if (m_in_buffer[0].full()) { // BUFFER IS FULL if (!m_use_thread) { // TRANSFER MEMORY BETWEEN INPUT CIRCULAR BUFFER AND MODEL BUFFER - for (int c(0); c < m_in_dim; c++) + for (int c(0); c < m_in_dim * get_batches(); c++) m_in_buffer[c].get(m_in_model[c].get(), m_buffer_size); // CALL MODEL PERFORM IN CURRENT THREAD diff --git a/src/frontend/maxmsp/mcs.nn_tilde/CMakeLists.txt b/src/frontend/maxmsp/mcs.nn_tilde/CMakeLists.txt index 33844eb..17fd8f8 100755 --- a/src/frontend/maxmsp/mcs.nn_tilde/CMakeLists.txt +++ b/src/frontend/maxmsp/mcs.nn_tilde/CMakeLists.txt @@ -44,7 +44,7 @@ add_library( ) target_link_libraries(${PROJECT_NAME} PRIVATE backend) -set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 14) +set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 17) if (APPLE) # SEARCH FOR TORCH DYLIB IN THE LOADER FOLDER diff --git a/src/frontend/maxmsp/mcs.nn_tilde/mcs.nn_tilde.cpp b/src/frontend/maxmsp/mcs.nn_tilde/mcs.nn_tilde.cpp index a84664f..ddfe3c3 100644 --- a/src/frontend/maxmsp/mcs.nn_tilde/mcs.nn_tilde.cpp +++ b/src/frontend/maxmsp/mcs.nn_tilde/mcs.nn_tilde.cpp @@ -386,13 +386,14 @@ void mc_bnn_tilde::perform(audio_bundle input, audio_bundle output) { for (int d(0); d < m_in_dim; d++) { auto in = input.samples(b * m_in_dim + d); m_in_buffer[d * get_batches() + b].put(in, vec_size); + std::cout << "populate batch " << b << "; channel " << d << " into buffer" << d * get_batches() + b << "; value : " << in[0] << std::endl; } } if (m_in_buffer[0].full()) { // BUFFER IS FULL if (!m_use_thread) { // TRANSFER MEMORY BETWEEN INPUT CIRCULAR BUFFER AND MODEL BUFFER - for (int c(0); c < m_in_dim; c++) + for (int c(0); c < m_in_dim * get_batches(); c++) m_in_buffer[c].get(m_in_model[c].get(), m_buffer_size); // CALL MODEL PERFORM IN CURRENT THREAD @@ -404,11 +405,11 @@ void mc_bnn_tilde::perform(audio_bundle input, audio_bundle output) { } else if (m_result_available_lock.try_acquire()) { // TRANSFER MEMORY BETWEEN INPUT CIRCULAR BUFFER AND MODEL BUFFER - for (int c(0); c < m_in_dim; c++) + for (int c(0); c < m_in_dim * get_batches(); c++) m_in_buffer[c].get(m_in_model[c].get(), m_buffer_size); // TRANSFER MEMORY BETWEEN OUTPUT CIRCULAR BUFFER AND MODEL BUFFER - for (int c(0); c < m_out_dim; c++) + for (int c(0); c < m_out_dim * get_batches(); c++) m_out_buffer[c].put(m_out_model[c].get(), m_buffer_size); // SIGNAL PERFORM THREAD THAT DATA IS AVAILABLE diff --git a/src/frontend/puredata/nn_tilde/CMakeLists.txt b/src/frontend/puredata/nn_tilde/CMakeLists.txt index d3f465c..8c55db0 100644 --- a/src/frontend/puredata/nn_tilde/CMakeLists.txt +++ b/src/frontend/puredata/nn_tilde/CMakeLists.txt @@ -70,4 +70,4 @@ target_link_libraries(nn PRIVATE backend) if (MSVC) target_link_libraries(nn PRIVATE "${PUREDATA_BIN_DIR}/pd.lib") endif() -set_property(TARGET nn PROPERTY CXX_STANDARD 14) \ No newline at end of file +set_property(TARGET nn PROPERTY CXX_STANDARD 17) \ No newline at end of file