diff --git a/.github/workflows/clang-benchmarks-linux-nix-check.yml b/.github/workflows/clang-benchmarks-linux-nix-check.yml
new file mode 100644
index 0000000000..c214d00c13
--- /dev/null
+++ b/.github/workflows/clang-benchmarks-linux-nix-check.yml
@@ -0,0 +1,44 @@
+name: Build and Test benchmark tests on Linux with clang
+
+on:
+  workflow_call:
+
+jobs:
+  build-and-test:
+    name: "Build and test benchmark tests on Linux with clang"
+    runs-on: [self-hosted, Linux, X64, aws_autoscaling]
+    continue-on-error: true
+    steps:
+      # https://github.com/actions/checkout/issues/1552
+      - name: Clean up after previous checkout
+        run: chmod +w -R ${GITHUB_WORKSPACE}; rm -rf ${GITHUB_WORKSPACE}/*;
+
+      - name: Checkout
+        uses: actions/checkout@v3
+        with:
+          fetch-depth: 0
+
+      - name: Run checks
+        run: |
+          nix build -L .?#checks.x86_64-linux.all-clang-benchmarks
+          mkdir results
+          ls -l -a
+          ls -l -a ./result
+          ls -l -a ./result/test-logs/
+          cat ./result/test-logs/test_errors.txt
+          cp result/test-logs/*_test.xml results/
+          cp result/test-logs/*_benchmark.xml results/
+          ls -l -a results/
+        continue-on-error: true
+        env:
+          NIX_CONFIG: |
+            cores = 8
+
+      - name: Publish Benchmarks Test Results
+        uses: EnricoMi/publish-unit-test-result-action/linux@v2
+        with:
+          check_name: "Benchmarks Test Results"
+          files: "results/*.xml"
+          comment_mode: ${{ github.event.pull_request.head.repo.fork && 'off' || 'always' }} # Don't create PR comment from fork runs
+          action_fail_on_inconclusive: true # fail, if no reports
+
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
index b7accb798f..b499fbd0e4 100644
--- a/.github/workflows/nightly.yml
+++ b/.github/workflows/nightly.yml
@@ -1,8 +1,14 @@
 name: Nightly Testing
 
+#on:
+#  schedule:
+#    - cron: '0 0 * * *'
 on:
-  schedule:
-    - cron: '0 0 * * *'
+  pull_request:
+  merge_group:
+  push:
+    branches:
+      - master
 
 jobs:
   test-linux-sanitizers:
@@ -12,11 +18,19 @@ jobs:
       always() && !cancelled()
     secrets: inherit
 
+  test-linux-benchmarks:
+    name: Linux placeholder benchmark testing with clang
+    uses: ./.github/workflows/clang-benchmarks-linux-nix-check.yml
+    if: |
+      always() && !cancelled()
+    secrets: inherit
+
   post-telemetry:
     name: Post test results in Open Telemetry format
     runs-on: [self-hosted, Linux, X64, aws_autoscaling]
     needs:
       - test-linux-sanitizers
+      - test-linux-benchmarks
     steps:
       - name: Checkout
         uses: actions/checkout@v3
@@ -27,7 +41,6 @@ jobs:
         run: |
           ls -l -a
           nix build -L .?#checks.x86_64-linux.all-clang-sanitize
-          cat ./result/test-logs/test_errors.txt
           export UndefinedBehaviorSanitizer=$(grep UndefinedBehaviorSanitizer result/test-logs/test_errors.txt | wc -l)
           export AddressSanitizer=$(grep AddressSanitizer result/test-logs/test_errors.txt | wc -l)
           export LeakSanitizer=$(grep LeakSanitizer result/test-logs/test_errors.txt | wc -l)
@@ -43,3 +56,11 @@ jobs:
             --service_name nightly-build \
             python3 ./parse_tests.py
 
+          nix build -L .?#checks.x86_64-linux.all-clang-benchmarks
+          /home/ec2-user/.local/bin/opentelemetry-instrument \
+            --traces_exporter console,otlp \
+            --metrics_exporter console,otlp \
+            --logs_exporter console,otlp \
+            --service_name nightly-build \
+            python3 ./parse_tests.py
+
diff --git a/.github/workflows/pull-request.yml b/.github/workflows/pull-request.yml
deleted file mode 100644
index 91ac921fab..0000000000
--- a/.github/workflows/pull-request.yml
+++ /dev/null
@@ -1,45 +0,0 @@
-name: PR Testing
-
-on:
-  pull_request:
-  merge_group:
-  push:
-    branches:
-      - master
-
-concurrency:
-  # In master we want to run for every commit, in other branches — only for the last one
-  group: ${{
-    ( github.ref == 'refs/heads/master' && format('{0}/{1}/{2}', github.workflow, github.ref, github.sha) )
-    ||
-    format('{0}/{1}', github.workflow, github.ref) }}
-  cancel-in-progress: true
-
-jobs:
-  test-linux-gcc:
-    name: Linux placeholder testing with gcc
-    uses: ./.github/workflows/gcc-linux-nix-check.yml
-    if: |
-      always() && !cancelled()
-    secrets: inherit
-
-  test-linux-clang:
-    name: Linux placeholder testing with clang
-    uses: ./.github/workflows/clang-linux-nix-check.yml
-    if: |
-      always() && !cancelled()
-    secrets: inherit
-
-  check-proof-producer:
-    name: Check Proof Producer
-    uses: ./.github/workflows/check-proof-producer.yml
-    if: |
-      always() && !cancelled()
-    secrets: inherit
-
-  verify-circuit-proof:
-    name: Verify Circuit Proof
-    uses: ./.github/workflows/verify-circuit-proof.yml
-    if: |
-      always() && !cancelled()
-    secrets: inherit
diff --git a/crypto3.nix b/crypto3.nix
index cb7ba532e2..bf014ff37a 100644
--- a/crypto3.nix
+++ b/crypto3.nix
@@ -39,7 +39,7 @@ in stdenv.mkDerivation {
     ];
 
   cmakeBuildType = if enableDebug then "Debug" else "Release";
-  doCheck = runTests; # tests are inside crypto3-tests derivation
+  doCheck = runTests || benchmarkTests;
 
   checkPhase = ''
     # JUNIT file without explicit file name is generated after the name of the master test suite inside `CMAKE_CURRENT_SOURCE_DIR`
@@ -50,6 +50,7 @@ in stdenv.mkDerivation {
     cd ..
     mkdir -p ${placeholder "out"}/test-logs
     find .. -type f -name '*_test.xml' -exec cp {} ${placeholder "out"}/test-logs \;
+    find .. -type f -name '*_benchmark.xml' -exec cp {} ${placeholder "out"}/test-logs \;
     cp crypto3/test_errors.txt ${placeholder "out"}/test-logs \
   '';
 
diff --git a/crypto3/benchmarks/CMakeLists.txt b/crypto3/benchmarks/CMakeLists.txt
index c1f19f6dde..4e1c9528c1 100644
--- a/crypto3/benchmarks/CMakeLists.txt
+++ b/crypto3/benchmarks/CMakeLists.txt
@@ -60,7 +60,7 @@ set(BENCHMARK_NAMES
 
     "multiprecision/modular_adaptor_fixed"
 
-    "zk/lpc"
+    #"zk/lpc"
     "zk/pedersen"
 )
 
diff --git a/crypto3/benchmarks/algebra/fields.cpp b/crypto3/benchmarks/algebra/fields.cpp
index 0a7861a90b..90a67fe128 100644
--- a/crypto3/benchmarks/algebra/fields.cpp
+++ b/crypto3/benchmarks/algebra/fields.cpp
@@ -25,7 +25,7 @@
 // SOFTWARE.
 //---------------------------------------------------------------------------//
 
-#define BOOST_TEST_MODULE algebra_fields_bench_test
+#define BOOST_TEST_MODULE algebra_fields_bench_benchmark
 
 #include <ostream>
 #include <fstream>
diff --git a/crypto3/benchmarks/algebra/multiexp.cpp b/crypto3/benchmarks/algebra/multiexp.cpp
index 07fa5c1c2d..e2823b3678 100644
--- a/crypto3/benchmarks/algebra/multiexp.cpp
+++ b/crypto3/benchmarks/algebra/multiexp.cpp
@@ -23,7 +23,7 @@
 // SOFTWARE.
 //---------------------------------------------------------------------------//
 
-#define BOOST_TEST_MODULE multiexpr_test
+#define BOOST_TEST_MODULE multiexpr_benchmark
 
 #include <boost/test/unit_test.hpp>
 #include <boost/test/data/test_case.hpp>
diff --git a/crypto3/benchmarks/algebra_benchmark.xml b/crypto3/benchmarks/algebra_benchmark.xml
new file mode 100644
index 0000000000..9d0cf1acff
--- /dev/null
+++ b/crypto3/benchmarks/algebra_benchmark.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<testsuite tests="6" skipped="0" errors="0" failures="0" id="0" name="algebra_benchmark" time="1268.28">
+<testcase assertions="0" classname="curves_benchmark" name="pallas" time="40.1403">
+<system-out><![CDATA[MESSAGE:
+- file   : boost.test framework
+- line   : 0
+- message: Test case curves_benchmark/pallas did not check any assertions
+
+]]></system-out>
+</testcase>
+<testcase assertions="0" classname="curves_benchmark" name="vesta" time="40.2609">
+<system-out><![CDATA[MESSAGE:
+- file   : boost.test framework
+- line   : 0
+- message: Test case curves_benchmark/vesta did not check any assertions
+
+]]></system-out>
+</testcase>
+<testcase assertions="0" classname="curves_benchmark" name="bls12_381" time="250.289">
+<system-out><![CDATA[MESSAGE:
+- file   : boost.test framework
+- line   : 0
+- message: Test case curves_benchmark/bls12_381 did not check any assertions
+
+]]></system-out>
+</testcase>
+<testcase assertions="0" classname="curves_benchmark" name="bls12_377" time="224.689">
+<system-out><![CDATA[MESSAGE:
+- file   : boost.test framework
+- line   : 0
+- message: Test case curves_benchmark/bls12_377 did not check any assertions
+
+]]></system-out>
+</testcase>
+<testcase assertions="0" classname="curves_benchmark" name="mnt4_298" time="248.203">
+<system-out><![CDATA[MESSAGE:
+- file   : boost.test framework
+- line   : 0
+- message: Test case curves_benchmark/mnt4_298 did not check any assertions
+
+]]></system-out>
+</testcase>
+<testcase assertions="0" classname="curves_benchmark" name="mnt6_298" time="464.699">
+<system-out><![CDATA[MESSAGE:
+- file   : boost.test framework
+- line   : 0
+- message: Test case curves_benchmark/mnt6_298 did not check any assertions
+
+]]></system-out>
+</testcase>
+</testsuite>
\ No newline at end of file
diff --git a/crypto3/benchmarks/algebra_benchmark_0.xml b/crypto3/benchmarks/algebra_benchmark_0.xml
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/crypto3/benchmarks/algebra_fields_bench_benchmark.xml b/crypto3/benchmarks/algebra_fields_bench_benchmark.xml
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/crypto3/benchmarks/math/polynomial_dfs.cpp b/crypto3/benchmarks/math/polynomial_dfs.cpp
index 96fef13cf5..b6fe70f568 100644
--- a/crypto3/benchmarks/math/polynomial_dfs.cpp
+++ b/crypto3/benchmarks/math/polynomial_dfs.cpp
@@ -22,7 +22,7 @@
 // SOFTWARE.
 //---------------------------------------------------------------------------//
 
-#define BOOST_TEST_MODULE polynomial_dfs_benchmark_test
+#define BOOST_TEST_MODULE polynomial_dfs_benchmark_benchmark
 
 #include <algorithm>
 #include <cctype>
diff --git a/crypto3/benchmarks/modular_fixed_multiprecision_benchmark.xml b/crypto3/benchmarks/modular_fixed_multiprecision_benchmark.xml
new file mode 100644
index 0000000000..bac32eedd9
--- /dev/null
+++ b/crypto3/benchmarks/modular_fixed_multiprecision_benchmark.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<testsuite tests="5" skipped="0" errors="0" failures="0" id="0" name="modular_fixed_multiprecision_benchmark" time="16.1268">
+<testcase assertions="0" classname="runtime_tests" name="modular_adaptor_montgomery_mult_perf_test" time="3.2846">
+<system-out><![CDATA[MESSAGE:
+- file   : boost.test framework
+- line   : 0
+- message: Test case runtime_tests/modular_adaptor_montgomery_mult_perf_test did not check any assertions
+
+]]></system-out>
+</testcase>
+<testcase assertions="0" classname="runtime_tests" name="modular_adaptor_backend_sub_perf_test" time="3.15055">
+<system-out><![CDATA[MESSAGE:
+- file   : boost.test framework
+- line   : 0
+- message: Test case runtime_tests/modular_adaptor_backend_sub_perf_test did not check any assertions
+
+]]></system-out>
+</testcase>
+<testcase assertions="0" classname="runtime_tests" name="modular_adaptor_backend_add_perf_test" time="3.13206">
+<system-out><![CDATA[MESSAGE:
+- file   : boost.test framework
+- line   : 0
+- message: Test case runtime_tests/modular_adaptor_backend_add_perf_test did not check any assertions
+
+]]></system-out>
+</testcase>
+<testcase assertions="0" classname="runtime_tests" name="modular_adaptor_backend_mult_perf_test" time="3.26774">
+<system-out><![CDATA[MESSAGE:
+- file   : boost.test framework
+- line   : 0
+- message: Test case runtime_tests/modular_adaptor_backend_mult_perf_test did not check any assertions
+
+]]></system-out>
+</testcase>
+<testcase assertions="0" classname="runtime_tests" name="modular_adaptor_number_mult_perf_test" time="3.29176">
+<system-out><![CDATA[MESSAGE:
+- file   : boost.test framework
+- line   : 0
+- message: Test case runtime_tests/modular_adaptor_number_mult_perf_test did not check any assertions
+
+]]></system-out>
+</testcase>
+</testsuite>
\ No newline at end of file
diff --git a/crypto3/benchmarks/multiexpr_benchmark.xml b/crypto3/benchmarks/multiexpr_benchmark.xml
new file mode 100644
index 0000000000..0daed79921
--- /dev/null
+++ b/crypto3/benchmarks/multiexpr_benchmark.xml
@@ -0,0 +1,11 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<testsuite tests="1" skipped="0" errors="0" failures="0" id="0" name="multiexpr_benchmark" time="179.731">
+<testcase assertions="0" classname="multiexp_test_suite" name="multiexp_test_case" time="179.731">
+<system-out><![CDATA[MESSAGE:
+- file   : boost.test framework
+- line   : 0
+- message: Test case multiexp_test_suite/multiexp_test_case did not check any assertions
+
+]]></system-out>
+</testcase>
+</testsuite>
\ No newline at end of file
diff --git a/crypto3/benchmarks/multiprecision/modular_adaptor_fixed.cpp b/crypto3/benchmarks/multiprecision/modular_adaptor_fixed.cpp
index 8c3e8cb13d..1fd813d2fa 100644
--- a/crypto3/benchmarks/multiprecision/modular_adaptor_fixed.cpp
+++ b/crypto3/benchmarks/multiprecision/modular_adaptor_fixed.cpp
@@ -7,7 +7,7 @@
 // http://www.boost.org/LICENSE_1_0.txt
 //---------------------------------------------------------------------------//
 
-#define BOOST_TEST_MODULE modular_fixed_multiprecision_test
+#define BOOST_TEST_MODULE modular_fixed_multiprecision_benchmark
 
 #define TEST_CPP_INT
 
diff --git a/crypto3/benchmarks/pedersen_benchmark.xml b/crypto3/benchmarks/pedersen_benchmark.xml
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/crypto3/benchmarks/polynomial_dfs_benchmark_benchmark.xml b/crypto3/benchmarks/polynomial_dfs_benchmark_benchmark.xml
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/crypto3/benchmarks/zk/lpc.cpp b/crypto3/benchmarks/zk/lpc.cpp
index 77b8d440aa..8940cad790 100644
--- a/crypto3/benchmarks/zk/lpc.cpp
+++ b/crypto3/benchmarks/zk/lpc.cpp
@@ -24,7 +24,7 @@
 // SOFTWARE.
 //---------------------------------------------------------------------------//
 
-#define BOOST_TEST_MODULE lpc_test
+#define BOOST_TEST_MODULE lpc_benchmark
 
 // Do it manually for all performance tests
 #define PROFILING_ENABLED
diff --git a/crypto3/benchmarks/zk/pedersen.cpp b/crypto3/benchmarks/zk/pedersen.cpp
index d066c97c57..e537405140 100644
--- a/crypto3/benchmarks/zk/pedersen.cpp
+++ b/crypto3/benchmarks/zk/pedersen.cpp
@@ -25,7 +25,7 @@
 // SOFTWARE.
 //---------------------------------------------------------------------------//
 
-#define BOOST_TEST_MODULE pedersen_test
+#define BOOST_TEST_MODULE pedersen_benchmark
 
 #include <vector>
 #include <iostream>
diff --git a/flake.nix b/flake.nix
index 9e1ff9bd7d..38eb30b638 100644
--- a/flake.nix
+++ b/flake.nix
@@ -41,7 +41,8 @@
             sanitize = true;
           });
           crypto3-clang-bench = (pkgs.callPackage ./crypto3.nix {
-            runTests = true;
+            stdenv = pkgs.llvmPackages_19.stdenv;
+            runTests = false;
             enableDebug = false;
             benchmarkTests = true;
           });
@@ -60,7 +61,8 @@
             enableDebug = false;
           });
           parallel-crypto3-clang-bench = (pkgs.callPackage ./parallel-crypto3.nix {
-            runTests = true;
+            stdenv = pkgs.llvmPackages_19.stdenv;
+            runTests = false;
             enableDebug = false;
             benchmarkTests = true;
           });
@@ -112,7 +114,7 @@
             enableDebug = false;
           });
           crypto3-gcc-bench = (pkgs.callPackage ./crypto3.nix {
-            runTests = true;
+            runTests = false;
             enableDebug = false;
             benchmarkTests = true;
           });
@@ -129,7 +131,7 @@
           });
           crypto3-clang-bench = (pkgs.callPackage ./crypto3.nix {
             stdenv = pkgs.llvmPackages_19.stdenv;
-            runTests = true;
+            runTests = false;
             enableDebug = false;
             benchmarkTests = true;
           });
@@ -137,11 +139,11 @@
           parallel-crypto3-gcc = (pkgs.callPackage ./parallel-crypto3.nix {
             runTests = true;
             enableDebug = false;
-            benchmarkTests = true;
           });
           parallel-crypto3-gcc-bench = (pkgs.callPackage ./parallel-crypto3.nix {
-            runTests = true;
+            runTests = false;
             enableDebug = false;
+            benchmarkTests = true;
           });
           parallel-crypto3-clang = (pkgs.callPackage ./parallel-crypto3.nix {
             stdenv = pkgs.llvmPackages_19.stdenv;
@@ -155,7 +157,7 @@
           });
           parallel-crypto3-clang-bench = (pkgs.callPackage ./parallel-crypto3.nix {
             stdenv = pkgs.llvmPackages_19.stdenv;
-            runTests = true;
+            runTests = false;
             enableDebug = false;
             benchmarkTests = true;
           });
@@ -184,6 +186,10 @@
             name = "all";
             paths = [ crypto3-clang-sanitize parallel-crypto3-clang-sanitize proof-producer-clang-sanitize ];
           };
+          all-clang-benchmarks = pkgs.symlinkJoin {
+            name = "all";
+            paths = [ crypto3-clang-bench parallel-crypto3-clang-bench ];
+          };
           all-gcc = pkgs.symlinkJoin {
             name = "all";
             paths = [ crypto3-gcc parallel-crypto3-gcc proof-producer-gcc ];
diff --git a/parallel-crypto3.nix b/parallel-crypto3.nix
index d29be8d457..1b53644f07 100644
--- a/parallel-crypto3.nix
+++ b/parallel-crypto3.nix
@@ -36,19 +36,23 @@ in stdenv.mkDerivation {
       (if sanitize then "-DSANITIZE=ON" else "-DSANITIZE=OFF")
       (if benchmarkTests then "-DENABLE_BENCHMARKS=ON" else "-DENABLE_BENCHMARKS=OFF")
       "-DPARALLEL_CRYPTO3_ENABLE=TRUE"
+      "-G Ninja"
     ];
 
   cmakeBuildType = if enableDebug then "Debug" else "Release";
-  doCheck = runTests; # tests are inside parallel-crypto3-tests derivation
+  doCheck = runTests || benchmarkTests;
 
   checkPhase = ''
     # JUNIT file without explicit file name is generated after the name of the master test suite inside `CMAKE_CURRENT_SOURCE_DIR`
     export BOOST_TEST_LOGGER=JUNIT:HRF
     cd parallel-crypto3
-    ctest --verbose --output-on-failure -R
+    # remove || true after all tests are fixed under clang-sanitizers check:
+    ctest --verbose --output-on-failure -R > test_errors.txt || true
     cd ..
     mkdir -p ${placeholder "out"}/test-logs
     find .. -type f -name '*_test.xml' -exec cp {} ${placeholder "out"}/test-logs \;
+    find .. -type f -name '*_benchmark.xml' -exec cp {} ${placeholder "out"}/test-logs \;
+    cp parallel-crypto3/test_errors.txt ${placeholder "out"}/test-logs \
   '';
 
   shellHook = ''
diff --git a/parallel-crypto3/CMakeLists.txt b/parallel-crypto3/CMakeLists.txt
index 2837852927..481878b48b 100644
--- a/parallel-crypto3/CMakeLists.txt
+++ b/parallel-crypto3/CMakeLists.txt
@@ -31,6 +31,9 @@ add_subdirectory(libs/parallelization-utils)
 add_subdirectory(libs/parallel-containers)
 add_subdirectory(libs/parallel-math)
 add_subdirectory(libs/parallel-zk)
+if(ENABLE_BENCHMARKS)
+    add_subdirectory(benchmarks)
+endif()
 
 add_library(${PROJECT_NAME}_all INTERFACE)
 add_library(${PROJECT_NAME}::all ALIAS ${PROJECT_NAME}_all)
diff --git a/parallel-crypto3/benchmarks/CMakeLists.txt b/parallel-crypto3/benchmarks/CMakeLists.txt
new file mode 100644
index 0000000000..ffb551e2a7
--- /dev/null
+++ b/parallel-crypto3/benchmarks/CMakeLists.txt
@@ -0,0 +1,55 @@
+#---------------------------------------------------------------------------#
+# Copyright (c) 2018-2020 Mikhail Komarov <nemo@nil.foundation>
+#
+# Distributed under the Boost Software License, Version 1.0
+# See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt
+#---------------------------------------------------------------------------#
+
+include(CMTest)
+
+find_package(Boost REQUIRED COMPONENTS
+    timer
+    unit_test_framework
+)
+
+macro(define_bench_test name)
+    set(test_name "parallel_crypto3_${name}_bench")
+
+    set(additional_args "")
+    if(ENABLE_JUNIT_TEST_OUTPUT)
+        set(TEST_RESULTS_DIR "${CMAKE_CURRENT_BINARY_DIR}/junit_results")
+        set(TEST_LOGS_DIR "${TEST_RESULTS_DIR}/logs")
+        file(MAKE_DIRECTORY ${TEST_LOGS_DIR})
+
+        set(additional_args "--log_format=JUNIT"
+                            "--log_sink=${TEST_LOGS_DIR}/${test_name}.xml")
+    endif()
+
+    cm_test(NAME ${test_name} SOURCES ${name}.cpp ARGS ${additional_args})
+
+    target_include_directories(${test_name} PRIVATE
+                               "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>"
+                               "$<BUILD_INTERFACE:${CMAKE_BINARY_DIR}/include>"
+
+                               ${Boost_INCLUDE_DIRS})
+
+    set_target_properties(${test_name} PROPERTIES CXX_STANDARD 17)
+
+endmacro()
+
+cm_test_link_libraries(
+    crypto3::algebra
+    crypto3::math
+    crypto3::multiprecision
+    crypto3::random
+    ${Boost_LIBRARIES}
+)
+
+set(TESTS_NAMES
+    "polynomial_dfs_benchmark"
+)
+
+foreach(TEST_NAME ${TESTS_NAMES})
+    define_bench_test(${TEST_NAME})
+endforeach()
diff --git a/parallel-crypto3/libs/parallel-math/test/benchmarks/polynomial_dfs_benchmark.cpp b/parallel-crypto3/benchmarks/polynomial_dfs_benchmark.cpp
similarity index 99%
rename from parallel-crypto3/libs/parallel-math/test/benchmarks/polynomial_dfs_benchmark.cpp
rename to parallel-crypto3/benchmarks/polynomial_dfs_benchmark.cpp
index 96fef13cf5..db7e056a1b 100644
--- a/parallel-crypto3/libs/parallel-math/test/benchmarks/polynomial_dfs_benchmark.cpp
+++ b/parallel-crypto3/benchmarks/polynomial_dfs_benchmark.cpp
@@ -22,7 +22,7 @@
 // SOFTWARE.
 //---------------------------------------------------------------------------//
 
-#define BOOST_TEST_MODULE polynomial_dfs_benchmark_test
+#define BOOST_TEST_MODULE polynomial_dfs_benchmark
 
 #include <algorithm>
 #include <cctype>
diff --git a/parallel-crypto3/libs/parallel-math/test/CMakeLists.txt b/parallel-crypto3/libs/parallel-math/test/CMakeLists.txt
index 525e1f6b90..d88e18ea21 100644
--- a/parallel-crypto3/libs/parallel-math/test/CMakeLists.txt
+++ b/parallel-crypto3/libs/parallel-math/test/CMakeLists.txt
@@ -52,7 +52,3 @@ set(TESTS_NAMES
 foreach(TEST_NAME ${TESTS_NAMES})
     define_math_test(${TEST_NAME})
 endforeach()
-
-if(ENABLE_BENCHMARKS)
-    add_subdirectory(benchmarks)
-endif()
diff --git a/parallel-crypto3/libs/parallel-math/test/benchmarks/CMakeLists.txt b/parallel-crypto3/libs/parallel-math/test/benchmarks/CMakeLists.txt
deleted file mode 100644
index 374894a767..0000000000
--- a/parallel-crypto3/libs/parallel-math/test/benchmarks/CMakeLists.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-#---------------------------------------------------------------------------#
-# Copyright (c) 2018-2020 Mikhail Komarov <nemo@nil.foundation>
-#
-# Distributed under the Boost Software License, Version 1.0
-# See accompanying file LICENSE_1_0.txt or copy at
-# http://www.boost.org/LICENSE_1_0.txt
-#---------------------------------------------------------------------------#
-
-find_package(Boost REQUIRED COMPONENTS
-    timer
-    unit_test_framework
-)
-
-cm_test_link_libraries(${CMAKE_WORKSPACE_NAME}_${CURRENT_PROJECT_NAME}
-
-    crypto3::algebra
-    crypto3::multiprecision
-    crypto3::random
-
-    ${Boost_LIBRARIES}
-)
-
-set(TESTS_NAMES
-    "polynomial_dfs_benchmark"
-)
-
-foreach(TEST_NAME ${TESTS_NAMES})
-    define_math_test(${TEST_NAME})
-endforeach()
-
-
-#get_target_property(my_include_dirs math_polynomial_dfs_benchmark_test INCLUDE_DIRECTORIES)
-#message(include dirs: ${my_include_dirs})
diff --git a/parse_benchmarks.py b/parse_benchmarks.py
new file mode 100644
index 0000000000..6e587fab7c
--- /dev/null
+++ b/parse_benchmarks.py
@@ -0,0 +1,43 @@
+import logging, json
+from junitparser import JUnitXml
+import glob, os
+from opentelemetry import trace
+
+aggregated_test_results = JUnitXml();
+for file in glob.glob("result/test-logs/*_benchmark.xml"):
+    try:
+        test_result = JUnitXml.fromfile(file)
+        result[test_result.name]=test_result.time
+        aggregated_test_results.append(test_result)
+    except Exception as ex:
+        print("Error processing {}".format(file))
+        print(ex)
+
+for file in glob.glob("result/test-logs/*_benchmark.xml"):
+    try:
+    except Exception as ex:
+        print("Error processing {}".format(file))
+        print(ex)
+
+succeeded = aggregated_test_results.tests - \
+            aggregated_test_results.failures - \
+            aggregated_test_results.errors - \
+            aggregated_test_results.skipped
+
+result = {
+    "benchmark_tests"          : aggregated_test_results.tests,
+    "benchmark_failures"       : aggregated_test_results.failures,
+    "benchmark_errors"         : aggregated_test_results.errors,
+    "benchmark_skipped"        : aggregated_test_results.skipped,
+    "benchmark_succeeded"      : succeeded,
+    "benchmark_execution_time" : aggregated_test_results.time,
+}
+
+print("Resulting JSON: {}".format(json.dumps(result)))
+
+tracer = trace.get_tracer_provider().get_tracer(__name__)
+with tracer.start_as_current_span("nightly_span"):
+    current_span = trace.get_current_span()
+    current_span.add_event("Nightly benchmarks build finished")
+    logging.getLogger().error(json.dumps(result))
+
diff --git a/parse_tests.py b/parse_tests.py
index 20339e810d..008f3bd368 100644
--- a/parse_tests.py
+++ b/parse_tests.py
@@ -3,12 +3,12 @@
 import glob, os
 from opentelemetry import trace
 
-undefined_behavior_sanitizer=os.environ['UndefinedBehaviorSanitizer']
-address_sanitizer=os.environ['AddressSanitizer']
-leak_sanitizer=os.environ['LeakSanitizer']
+undefined_behavior_sanitizer=os.getenv('UndefinedBehaviorSanitizer', -1)
+address_sanitizer=os.getenv('AddressSanitizer', -1)
+leak_sanitizer=os.getenv('LeakSanitizer', -1)
 
 aggregated_test_results = JUnitXml();
-for file in glob.glob("result/test-logs/*.xml"):
+for file in glob.glob("result/test-logs/*_test.xml"):
     try:
         aggregated_test_results.append(JUnitXml.fromfile(file))
     except Exception as ex:
@@ -32,11 +32,20 @@
     "leak_sanitizer"               : int(leak_sanitizer),
 }
 
+for file in glob.glob("result/test-logs/*_benchmark.xml"):
+    try:
+        test_result = JUnitXml.fromfile(file)
+        result[test_result.name]=test_result.time
+
+    except Exception as ex:
+        print("Error processing {}".format(file))
+        print(ex)
+
 print("Resulting JSON: {}".format(json.dumps(result)))
 
 tracer = trace.get_tracer_provider().get_tracer(__name__)
 with tracer.start_as_current_span("nightly_span"):
     current_span = trace.get_current_span()
-    current_span.add_event("Nightly build finished")
+    current_span.add_event("Nightly sanitizers build finished")
     logging.getLogger().error(json.dumps(result))