diff --git a/jolt-core/benches/binding.rs b/jolt-core/benches/binding.rs index 33e3158d6..057bb1906 100644 --- a/jolt-core/benches/binding.rs +++ b/jolt-core/benches/binding.rs @@ -157,19 +157,19 @@ fn main() { benchmark_sparse_interleaved::(&mut criterion, 64, 21, 0.1); benchmark_sparse_interleaved::(&mut criterion, 128, 21, 0.1); - // benchmark_dense::(&mut criterion, 20); - // benchmark_dense::(&mut criterion, 22); - // benchmark_dense::(&mut criterion, 24); + benchmark_dense::(&mut criterion, 20); + benchmark_dense::(&mut criterion, 22); + benchmark_dense::(&mut criterion, 24); - // benchmark_dense_interleaved::(&mut criterion, 22); + benchmark_dense_interleaved::(&mut criterion, 22); // benchmark_dense_interleaved::(&mut criterion, 23); - // benchmark_dense_interleaved::(&mut criterion, 24); + benchmark_dense_interleaved::(&mut criterion, 24); // benchmark_dense_interleaved::(&mut criterion, 25); - // benchmark_dense_batch::(&mut criterion, 20, 4); - // benchmark_dense_batch::(&mut criterion, 20, 8); - // benchmark_dense_batch::(&mut criterion, 20, 16); - // benchmark_dense_batch::(&mut criterion, 20, 32); + benchmark_dense_batch::(&mut criterion, 20, 4); + benchmark_dense_batch::(&mut criterion, 20, 8); + benchmark_dense_batch::(&mut criterion, 20, 16); + benchmark_dense_batch::(&mut criterion, 20, 32); criterion.final_summary(); } diff --git a/jolt-core/benches/commit.rs b/jolt-core/benches/commit.rs index 03a6b3834..86a2e6a70 100644 --- a/jolt-core/benches/commit.rs +++ b/jolt-core/benches/commit.rs @@ -5,6 +5,7 @@ use jolt_core::poly::commitment::commitment_scheme::{BatchType, CommitShape, Com use jolt_core::poly::commitment::hyperkzg::HyperKZG; use jolt_core::poly::commitment::kzg::CommitMode; use jolt_core::poly::commitment::zeromorph::Zeromorph; +use jolt_core::poly::multilinear_polynomial::MultilinearPolynomial; use jolt_core::utils::transcript::{KeccakTranscript, Transcript}; use rand_chacha::ChaCha20Rng; use rand_core::{RngCore, SeedableRng}; @@ -77,8 +78,8 @@ fn benchmark_commit( let (leaves, setup, _) = setup_bench::(num_layer, layer_size, threshold); let leaves = leaves - .iter() - .map(|layer| layer.as_slice()) + .into_iter() + .map(|layer| MultilinearPolynomial::from(layer)) .collect::>(); let mode = match batch_type { BatchType::GrandProduct => CommitMode::GrandProduct, @@ -88,7 +89,11 @@ fn benchmark_commit( &format!("{} Commit(mode:{:?}): {}% Ones", name, mode, threshold), |b| { b.iter(|| { - PCS::batch_commit(&leaves, &setup, batch_type.clone()); + PCS::batch_commit( + &leaves.iter().collect::>(), + &setup, + batch_type.clone(), + ); }); }, ); diff --git a/jolt-core/benches/compute_cubic.rs b/jolt-core/benches/compute_cubic.rs index 0262087a0..a2dcad02d 100644 --- a/jolt-core/benches/compute_cubic.rs +++ b/jolt-core/benches/compute_cubic.rs @@ -3,13 +3,11 @@ use ark_std::{rand::Rng, test_rng}; use criterion::Criterion; use jolt_core::field::JoltField; use jolt_core::poly::dense_interleaved_poly::DenseInterleavedPolynomial; -use jolt_core::poly::dense_mlpoly::DensePolynomial; use jolt_core::poly::sparse_interleaved_poly::{SparseCoefficient, SparseInterleavedPolynomial}; use jolt_core::poly::split_eq_poly::SplitEqPolynomial; -use jolt_core::subprotocols::sumcheck::{BatchedCubicSumcheck, Bindable}; +use jolt_core::subprotocols::sumcheck::BatchedCubicSumcheck; use jolt_core::utils::math::Math; use jolt_core::utils::transcript::KeccakTranscript; -use rayon::prelude::*; fn random_dense_coeffs(rng: &mut impl Rng, num_vars: usize) -> Vec { std::iter::repeat_with(|| F::random(rng)) @@ -110,11 +108,11 @@ fn main() { .configure_from_args() .warm_up_time(std::time::Duration::from_secs(5)); - // benchmark_dense_interleaved::(&mut criterion, 20); + benchmark_dense_interleaved::(&mut criterion, 20); // benchmark_dense_interleaved::(&mut criterion, 21); - // benchmark_dense_interleaved::(&mut criterion, 22); + benchmark_dense_interleaved::(&mut criterion, 22); // benchmark_dense_interleaved::(&mut criterion, 23); - // benchmark_dense_interleaved::(&mut criterion, 24); + benchmark_dense_interleaved::(&mut criterion, 24); // benchmark_dense_interleaved::(&mut criterion, 25); benchmark_sparse_interleaved::(&mut criterion, 64, 20, 0.1); diff --git a/jolt-core/benches/iai.rs b/jolt-core/benches/iai.rs index 0535f9177..eb77ba15f 100644 --- a/jolt-core/benches/iai.rs +++ b/jolt-core/benches/iai.rs @@ -1,20 +1,9 @@ -use ark_bn254::{Fr, G1Projective}; -use ark_ec::CurveGroup; -use ark_std::{test_rng, UniformRand}; +use ark_bn254::Fr; +use ark_std::test_rng; use iai_callgrind::{library_benchmark, library_benchmark_group, main}; -use jolt_core::{field::JoltField, msm::VariableBaseMSM, poly::dense_mlpoly::DensePolynomial}; +use jolt_core::{field::JoltField, poly::dense_mlpoly::DensePolynomial}; use std::hint::black_box; -fn msm_setup(num_points: usize) -> (Vec, Vec) { - let mut rng = test_rng(); - - // Generate a vector of random affine points on the curve. - ( - vec![G::rand(&mut rng); num_points], - vec![G::ScalarField::rand(&mut rng); num_points], - ) -} - fn bound_poly_setup(size: usize) -> (DensePolynomial, F) { let mut rng = test_rng(); @@ -32,12 +21,6 @@ fn eval_poly_setup(size: usize) -> (DensePolynomial, Vec) { (poly, points) } -#[library_benchmark] -#[bench::long(msm_setup::(4096))] -fn bench_msm(input: (Vec, Vec)) -> G { - black_box(VariableBaseMSM::msm(&G::normalize_batch(&input.0), None, &input.1).unwrap()) -} - #[library_benchmark] #[bench::long(bound_poly_setup::(4096))] fn bench_polynomial_binding(input: (DensePolynomial, F)) { @@ -54,7 +37,7 @@ fn bench_polynomial_evaluate(input: (DensePolynomial, Vec)) library_benchmark_group!( name = jolt_core_ops; - benchmarks = bench_msm, bench_polynomial_binding, bench_polynomial_evaluate + benchmarks = bench_polynomial_binding, bench_polynomial_evaluate ); main!(library_benchmark_groups = jolt_core_ops); diff --git a/jolt-core/benches/msm.rs b/jolt-core/benches/msm.rs index e2a4ede9f..f314d7302 100644 --- a/jolt-core/benches/msm.rs +++ b/jolt-core/benches/msm.rs @@ -1,29 +1,26 @@ use ark_bn254::{Bn254, Fr, G1Affine, G1Projective}; -use ark_ff::{BigInteger, PrimeField}; -use ark_std::rand::Rng; use ark_std::UniformRand; -use ark_std::{One, Zero}; use criterion::Criterion; use jolt_core::field::JoltField; #[cfg(feature = "icicle")] use jolt_core::msm::Icicle; -use jolt_core::msm::{icicle_init, GpuBaseType, MsmType, VariableBaseMSM}; +use jolt_core::msm::{icicle_init, GpuBaseType, VariableBaseMSM}; use jolt_core::poly::commitment::commitment_scheme::CommitmentScheme; use jolt_core::poly::commitment::zeromorph::Zeromorph; +use jolt_core::poly::multilinear_polynomial::MultilinearPolynomial; use jolt_core::utils::transcript::{KeccakTranscript, Transcript}; use rand_chacha::ChaCha20Rng; use rand_core::{RngCore, SeedableRng}; -use rayon::prelude::*; const SRS_SIZE: usize = 1 << 20; // Sets up the benchmark fn setup_bench( - msm_type: MsmType, + max_num_bits: usize, ) -> ( Vec, Option>>, - Vec, + MultilinearPolynomial, ) where F: JoltField, @@ -31,32 +28,38 @@ where ProofTranscript: Transcript, { let mut rng = ChaCha20Rng::seed_from_u64(SRS_SIZE as u64); - - let scalars = match msm_type { - MsmType::Zero => { - vec![Fr::zero(); SRS_SIZE] - } - MsmType::One => { - vec![Fr::one(); SRS_SIZE] - } - MsmType::Small(_) => (0..SRS_SIZE) - .into_iter() - .map(|_| { - let i = rng.gen_range(0..(1 << 10)); - ::from_u64(i).unwrap() - }) - .collect(), - MsmType::Medium(_) => (0..SRS_SIZE) - .into_iter() - .map(|_| { - let i = rng.next_u64(); - ::from_u64(i).unwrap() - }) - .collect(), - MsmType::Large(_) => (0..SRS_SIZE) - .into_iter() - .map(|_| Fr::random(&mut rng)) - .collect(), + let poly = match max_num_bits { + 0 => MultilinearPolynomial::from(vec![0u8; SRS_SIZE]), + 1..=8 => MultilinearPolynomial::from( + (0..SRS_SIZE) + .into_iter() + .map(|_| (rng.next_u32() & ((1 << max_num_bits) - 1)) as u8) + .collect::>(), + ), + 9..=16 => MultilinearPolynomial::from( + (0..SRS_SIZE) + .into_iter() + .map(|_| (rng.next_u32() & ((1 << max_num_bits) - 1)) as u16) + .collect::>(), + ), + 17..=32 => MultilinearPolynomial::from( + (0..SRS_SIZE) + .into_iter() + .map(|_| (rng.next_u64() & ((1 << max_num_bits) - 1)) as u32) + .collect::>(), + ), + 33..=64 => MultilinearPolynomial::from( + (0..SRS_SIZE) + .into_iter() + .map(|_| rng.next_u64() & ((1 << max_num_bits) - 1)) + .collect::>(), + ), + _ => MultilinearPolynomial::from( + (0..SRS_SIZE) + .into_iter() + .map(|_| Fr::random(&mut rng)) + .collect::>(), + ), }; let bases: Vec = std::iter::repeat_with(|| G1Affine::rand(&mut rng)) @@ -72,23 +75,17 @@ where #[cfg(not(feature = "icicle"))] let gpu_bases = None; - let max_num_bits = scalars - .par_iter() - .map(|s| s.clone().into_bigint().num_bits()) - .max() - .unwrap(); - println!("Using max num bits: {}", max_num_bits); - (bases, gpu_bases, scalars) + (bases, gpu_bases, poly) } -fn benchmark_msm(c: &mut Criterion, name: &str, msm_type: MsmType) +fn benchmark_msm(c: &mut Criterion, name: &str, max_num_bits: usize) where F: JoltField, PCS: CommitmentScheme, ProofTranscript: Transcript, { - let (bases, gpu_bases, scalars) = setup_bench::(msm_type); + let (bases, gpu_bases, poly) = setup_bench::(max_num_bits); icicle_init(); #[cfg(feature = "icicle")] let id = format!("{} [mode:Icicle]", name); @@ -97,7 +94,7 @@ where c.bench_function(&id, |b| { b.iter(|| { let msm = - ::msm(&bases, gpu_bases.as_deref(), &scalars); + ::msm(&bases, gpu_bases.as_deref(), &poly, None); let _ = msm.expect("MSM failed"); }); }); @@ -110,18 +107,33 @@ fn main() { .warm_up_time(std::time::Duration::from_secs(5)); benchmark_msm::, Fr, KeccakTranscript>( &mut criterion, - "VariableBaseMSM::msm(Large)", - MsmType::Large(0 /* unused */), + "VariableBaseMSM::msm(256 bit scalars)", + 256, + ); + benchmark_msm::, Fr, KeccakTranscript>( + &mut criterion, + "VariableBaseMSM::msm(64 bit scalars)", + 64, + ); + benchmark_msm::, Fr, KeccakTranscript>( + &mut criterion, + "VariableBaseMSM::msm(32 bit scalars)", + 32, + ); + benchmark_msm::, Fr, KeccakTranscript>( + &mut criterion, + "VariableBaseMSM::msm(16 bit scalars)", + 16, ); benchmark_msm::, Fr, KeccakTranscript>( &mut criterion, - "VariableBaseMSM::msm(Medium)", - MsmType::Medium(0 /* unused */), + "VariableBaseMSM::msm(8 bit scalars)", + 8, ); benchmark_msm::, Fr, KeccakTranscript>( &mut criterion, - "VariableBaseMSM::msm(Small)", - MsmType::Small(0 /* unused */), + "VariableBaseMSM::msm(1 bit scalars)", + 1, ); criterion.final_summary(); } diff --git a/jolt-core/benches/msm_batch.rs b/jolt-core/benches/msm_batch.rs index 22070a05d..3d94edc0f 100644 --- a/jolt-core/benches/msm_batch.rs +++ b/jolt-core/benches/msm_batch.rs @@ -1,16 +1,14 @@ use ark_bn254::{Bn254, Fr, G1Affine, G1Projective}; -use ark_ff::BigInteger; use ark_std::rand::seq::SliceRandom; -use ark_std::rand::Rng; use ark_std::UniformRand; -use ark_std::{One, Zero}; use criterion::Criterion; use jolt_core::field::JoltField; #[cfg(feature = "icicle")] use jolt_core::msm::Icicle; -use jolt_core::msm::{icicle_init, GpuBaseType, MsmType, VariableBaseMSM}; +use jolt_core::msm::{icicle_init, GpuBaseType, VariableBaseMSM}; use jolt_core::poly::commitment::commitment_scheme::CommitmentScheme; use jolt_core::poly::commitment::zeromorph::Zeromorph; +use jolt_core::poly::multilinear_polynomial::MultilinearPolynomial; use jolt_core::utils::transcript::{KeccakTranscript, Transcript}; use rand_chacha::ChaCha20Rng; use rand_core::{RngCore, SeedableRng}; @@ -21,11 +19,11 @@ const SRS_SIZE: usize = 1 << 14; // Sets up the benchmark fn setup_bench( - batch_config: BatchConfig, + max_num_bits: Vec, ) -> ( Vec, Option>>, - Vec>, + Vec>, ) where F: JoltField, @@ -33,19 +31,13 @@ where ProofTranscript: Transcript, { let mut rng = ChaCha20Rng::seed_from_u64(SRS_SIZE as u64); - // For each type in the batch config create a vector of scalars - let mut scalar_batches: Vec> = vec![]; - - (0..batch_config.small) - .into_iter() - .for_each(|_| scalar_batches.push(get_scalars(MsmType::Small(0 /* unused */), SRS_SIZE))); - (0..batch_config.medium) + // For each `max_num_bits` value, create a polynomial + let mut polys: Vec<_> = max_num_bits .into_iter() - .for_each(|_| scalar_batches.push(get_scalars(MsmType::Medium(0 /* unused */), SRS_SIZE))); - (0..batch_config.large) - .into_iter() - .for_each(|_| scalar_batches.push(get_scalars(MsmType::Large(0 /* unused */), SRS_SIZE))); - scalar_batches.shuffle(&mut rng); + .map(|num_bits| random_poly(num_bits, SRS_SIZE)) + .collect(); + + polys.shuffle(&mut rng); let bases: Vec = std::iter::repeat_with(|| G1Affine::rand(&mut rng)) .take(SRS_SIZE) @@ -59,55 +51,58 @@ where ); #[cfg(not(feature = "icicle"))] let gpu_bases = None; - (bases, gpu_bases, scalar_batches) + (bases, gpu_bases, polys) } -fn get_scalars(msm_type: MsmType, size: usize) -> Vec { - let mut rng = ChaCha20Rng::seed_from_u64(size as u64); - match msm_type { - MsmType::Zero => { - vec![Fr::zero(); size] - } - MsmType::One => { - vec![Fr::one(); size] - } - MsmType::Small(_) => (0..size) - .into_iter() - .map(|_| { - let i = rng.gen_range(0..(1 << 10)); - ::from_u64(i).unwrap() - }) - .collect(), - MsmType::Medium(_) => (0..size) - .into_iter() - .map(|_| { - let i = rng.next_u64(); - ::from_u64(i).unwrap() - }) - .collect(), - MsmType::Large(_) => (0..size) - .into_iter() - .map(|_| Fr::random(&mut rng)) - .collect(), +fn random_poly(max_num_bits: usize, len: usize) -> MultilinearPolynomial { + let mut rng = ChaCha20Rng::seed_from_u64(len as u64); + match max_num_bits { + 0 => MultilinearPolynomial::from(vec![0u8; len]), + 1..=8 => MultilinearPolynomial::from( + (0..len) + .into_iter() + .map(|_| (rng.next_u32() & ((1 << max_num_bits) - 1)) as u8) + .collect::>(), + ), + 9..=16 => MultilinearPolynomial::from( + (0..len) + .into_iter() + .map(|_| (rng.next_u32() & ((1 << max_num_bits) - 1)) as u16) + .collect::>(), + ), + 17..=32 => MultilinearPolynomial::from( + (0..len) + .into_iter() + .map(|_| (rng.next_u64() & ((1 << max_num_bits) - 1)) as u32) + .collect::>(), + ), + 33..=64 => MultilinearPolynomial::from( + (0..len) + .into_iter() + .map(|_| rng.next_u64() & ((1 << max_num_bits) - 1)) + .collect::>(), + ), + _ => MultilinearPolynomial::from( + (0..len) + .into_iter() + .map(|_| Fr::random(&mut rng)) + .collect::>(), + ), } } fn benchmark_msm_batch( c: &mut Criterion, name: &str, - batch_config: BatchConfig, + max_num_bits: Vec, ) where F: JoltField, PCS: CommitmentScheme, ProofTranscript: Transcript, { - let (bases, gpu_bases, scalar_batches) = setup_bench::(batch_config); - let scalar_batches_ref: Vec<_> = scalar_batches - .iter() - .map(|inner_vec| inner_vec.as_slice()) - .collect(); + let (bases, gpu_bases, polys) = setup_bench::(max_num_bits); + let polys_ref: Vec<_> = polys.iter().collect(); icicle_init(); - println!("Running benchmark for {:?}", batch_config); #[cfg(feature = "icicle")] let id = format!("{} [mode:Icicle]", name); #[cfg(not(feature = "icicle"))] @@ -117,51 +112,38 @@ fn benchmark_msm_batch( let msm = ::batch_msm( &bases, gpu_bases.as_deref(), - &scalar_batches_ref, + &polys_ref, ); - assert_eq!(msm.len(), scalar_batches.len()); + assert_eq!(msm.len(), polys.len()); }); }); } -#[derive(Debug, Clone, Copy)] -struct BatchConfig { - small: usize, - medium: usize, - large: usize, -} - fn main() { let mut criterion = Criterion::default() .configure_from_args() .sample_size(10) .warm_up_time(std::time::Duration::from_secs(10)); + + let max_num_bits = [vec![8; 100], vec![32; 100], vec![256; 300]].concat(); benchmark_msm_batch::, Fr, KeccakTranscript>( &mut criterion, "VariableBaseMSM::msm_batch(bias: Large)", - BatchConfig { - small: 100, - medium: 100, - large: 300, - }, + max_num_bits, ); + + let max_num_bits = [vec![8; 100], vec![32; 300], vec![256; 100]].concat(); benchmark_msm_batch::, Fr, KeccakTranscript>( &mut criterion, "VariableBaseMSM::msm_batch(bias: Medium)", - BatchConfig { - small: 100, - medium: 300, - large: 100, - }, + max_num_bits, ); + + let max_num_bits = [vec![8; 300], vec![32; 100], vec![256; 100]].concat(); benchmark_msm_batch::, Fr, KeccakTranscript>( &mut criterion, "VariableBaseMSM::msm_batch(bias: Small)", - BatchConfig { - small: 300, - medium: 100, - large: 100, - }, + max_num_bits, ); criterion.final_summary(); } diff --git a/jolt-evm-verifier/script/src/bin/hyperkzg_batch_example.rs b/jolt-evm-verifier/script/src/bin/hyperkzg_batch_example.rs deleted file mode 100644 index c8e2c66c0..000000000 --- a/jolt-evm-verifier/script/src/bin/hyperkzg_batch_example.rs +++ /dev/null @@ -1,102 +0,0 @@ -use alloy_primitives::{hex, U256}; -use alloy_sol_types::{sol, SolType}; - -use ark_bn254::Bn254; -use ark_ec::pairing::Pairing; -use ark_ff::BigInteger; -use ark_ff::PrimeField; -use ark_std::UniformRand; -use jolt_core::poly::commitment::commitment_scheme::{BatchType, CommitmentScheme}; -use jolt_core::poly::commitment::hyperkzg::*; -use jolt_core::poly::dense_mlpoly::DensePolynomial; -use jolt_core::utils::transcript::{KeccakTranscript, Transcript}; -use rand_core::SeedableRng; - -use jolt_core::utils::sol_types::{HyperKZGProofSol, VK}; - -fn main() { - // Testing 2^12 ie 4096 elements - // We replicate the behavior of the standard rust tests, but output - // the proof and verification key to ensure it is verified in sol as well. - - let ell = 12; - let mut rng = rand_chacha::ChaCha20Rng::seed_from_u64(ell as u64); - - let n = 1 << ell; // n = 2^ell - - let srs = HyperKZGSRS::setup(&mut rng, n); - let (pk, vk): (HyperKZGProverKey, HyperKZGVerifierKey) = srs.trim(n); - - let point = (0..ell) - .map(|_| ::ScalarField::rand(&mut rng)) - .collect::>(); - - let mut polys = vec![]; - let mut evals = vec![]; - let mut commitments = vec![]; - let mut borrowed = vec![]; - for _ in 0..8 { - let poly = DensePolynomial::new( - (0..n) - .map(|_| ::ScalarField::rand(&mut rng)) - .collect::>(), - ); - let eval = poly.evaluate(&point); - commitments.push(HyperKZG::<_, KeccakTranscript>::commit(&pk, &poly).unwrap()); - polys.push(poly); - evals.push(eval); - } - - for poly in polys.iter() { - borrowed.push(poly); - } - - // prove an evaluation - let mut prover_transcript = KeccakTranscript::new(b"TestEval"); - let proof: HyperKZGProof = HyperKZG::batch_prove( - &(pk, vk), - borrowed.as_slice(), - &point, - &evals, - BatchType::Big, - &mut prover_transcript, - ); - - sol!(struct BatchedExample { - VK vk; - HyperKZGProofSol proof; - uint256[] commitments; - uint256[] point; - uint256[] claims; - }); - - let vk_sol = (&vk).into(); - let proof_sol = (&proof).into(); - - let mut encoded_commitments = vec![]; - for point in commitments.iter() { - let x = U256::from_be_slice(&point.0.x.into_bigint().to_bytes_be()); - let y = U256::from_be_slice(&point.0.y.into_bigint().to_bytes_be()); - encoded_commitments.push(x); - encoded_commitments.push(y); - } - - let point_encoded = point - .iter() - .map(|i| U256::from_be_slice(i.into_bigint().to_bytes_be().as_slice())) - .collect(); - let mut evals_encoded = vec![]; - for eval in evals.iter() { - evals_encoded.push(U256::from_be_slice(&eval.into_bigint().to_bytes_be())); - } - - let example = BatchedExample { - proof: proof_sol, - vk: vk_sol, - commitments: encoded_commitments, - point: point_encoded, - claims: evals_encoded, - }; - - print!("{}", hex::encode(BatchedExample::abi_encode(&example))); -} diff --git a/jolt-evm-verifier/script/src/bin/hyperkzg_example.rs b/jolt-evm-verifier/script/src/bin/hyperkzg_example.rs index bf142e883..af45811a3 100644 --- a/jolt-evm-verifier/script/src/bin/hyperkzg_example.rs +++ b/jolt-evm-verifier/script/src/bin/hyperkzg_example.rs @@ -7,7 +7,7 @@ use ark_ff::BigInteger; use ark_ff::PrimeField; use ark_std::UniformRand; use jolt_core::poly::commitment::hyperkzg::*; -use jolt_core::poly::dense_mlpoly::DensePolynomial; +use jolt_core::poly::multilinear_polynomial::{MultilinearPolynomial, PolynomialEvaluation}; use jolt_core::utils::transcript::{KeccakTranscript, Transcript}; use rand_core::SeedableRng; @@ -26,7 +26,7 @@ fn main() { let srs = HyperKZGSRS::setup(&mut rng, n); let (pk, vk): (HyperKZGProverKey, HyperKZGVerifierKey) = srs.trim(n); - let poly = DensePolynomial::new( + let poly = MultilinearPolynomial::from( (0..n) .map(|_| ::ScalarField::rand(&mut rng)) .collect::>(), diff --git a/jolt-evm-verifier/src/subprotocols/HyperKZG.sol b/jolt-evm-verifier/src/subprotocols/HyperKZG.sol index dd2f6c6d5..5a7cc4c7f 100644 --- a/jolt-evm-verifier/src/subprotocols/HyperKZG.sol +++ b/jolt-evm-verifier/src/subprotocols/HyperKZG.sol @@ -35,38 +35,6 @@ contract HyperKZG { uint256 immutable VK_beta_g2_y_c0; uint256 immutable VK_beta_g2_y_c1; - /// Implements a batching protocol to verify multiple polynomial openings to the same point - /// using hyper kzg and a random linear combination. - /// @param commitments The polynomial commitment points in a vector arranged with x in the even - /// and y in the odd positions - /// @param point The point which is opened - /// @param p_of_x The vector of claimed evaluations - /// @param pi The proof of the opening, passed into the rlc verify - /// @param transcript The fiat shamair transcript we are sourcing deterministic randoms from - /// TODO - WARN - YOU MUST WRITE COMMITMENTS TO TRANSCRIPT BEFORE CALLING - /// TODO - Affine and calldata pointer versions of this, to save gas on point rep - function batch_verify( - uint256[] memory commitments, - uint256[] memory point, - uint256[] memory p_of_x, - HyperKZGProof memory pi, - Transcript memory transcript - ) public view returns (bool) { - // Load a rho from transcript - Fr rho = Fr.wrap(transcript.challenge_scalar(MODULUS)); - (uint256 running_x, uint256 running_y) = (commitments[0], commitments[1]); - Fr running_eval = Fr.wrap(p_of_x[0]); - Fr scalar = rho; - for (uint256 i = 2; i < commitments.length; i += 2) { - (uint256 next_x, uint256 next_y) = ec_scalar_mul(commitments[i], commitments[i + 1], scalar.unwrap()); - (running_x, running_y) = ec_add(running_x, running_y, next_x, next_y); - running_eval = running_eval + Fr.wrap(mulmod(p_of_x[i / 2], scalar.unwrap(), MODULUS)); - scalar = scalar * rho; - } - // Pass the RLC into the singular verify function - return (verify(running_x, running_y, point, running_eval.unwrap(), pi, transcript)); - } - /// Implements the version multilinear hyper kzg verification as in the rust code at /// https://github.com/a16z/jolt/blob/main/jolt-core/src/poly/commitment/hyperkzg.rs /// @param c_x The x coordinate of the commitment to the multilinear polynomial @@ -93,16 +61,27 @@ contract HyperKZG { // now for the consistency checks uint256 ell = point.length; - require(pi.v_y.length == ell && pi.v_yneg.length == ell && pi.v_ypos.length == ell, "bad length"); + require( + pi.v_y.length == ell && + pi.v_yneg.length == ell && + pi.v_ypos.length == ell, + "bad length" + ); for (uint256 i = 0; i < ell; i++) { uint256 y_i = i == ell - 1 ? p_of_x : pi.v_y[i + 1]; Fr left = Fr.wrap(2) * Fr.wrap(r) * FrLib.from(y_i); Fr x_minus = FrLib.from(point[ell - i - 1]); - Fr ypos_sub_yneg = FrLib.from(pi.v_ypos[i]) - FrLib.from(pi.v_yneg[i]); - Fr ypos_plus_yneg = FrLib.from(pi.v_ypos[i]) + FrLib.from(pi.v_yneg[i]); + Fr ypos_sub_yneg = FrLib.from(pi.v_ypos[i]) - + FrLib.from(pi.v_yneg[i]); + Fr ypos_plus_yneg = FrLib.from(pi.v_ypos[i]) + + FrLib.from(pi.v_yneg[i]); // Get the other side of the equality - Fr right = Fr.wrap(r) * (Fr.wrap(1) - x_minus) * ypos_plus_yneg + x_minus * ypos_sub_yneg; + Fr right = Fr.wrap(r) * + (Fr.wrap(1) - x_minus) * + ypos_plus_yneg + + x_minus * + ypos_sub_yneg; require(left == right, "bad construction"); } @@ -168,13 +147,20 @@ contract HyperKZG { // NOTE - This is gas inefficient and grows with log of the proof size so we might want // to move to a pippenger window algo with much smaller MSMs which we might save gas on. // Our first value is the c_x c_y as this would be the first entry of com in rust. - (uint256 L_x, uint256 L_y) = ec_scalar_mul(c_x, c_y, q_powers[0].unwrap()); + (uint256 L_x, uint256 L_y) = ec_scalar_mul( + c_x, + c_y, + q_powers[0].unwrap() + ); // Now we do a running sum over the points in com for (uint256 i = 0; i < pi.com.length; i += 2) { // First the scalar mult then the add - (uint256 temp_x_loop, uint256 temp_y_loop) = - ec_scalar_mul(pi.com[i], pi.com[i + 1], q_powers[i / 2 + 1].unwrap()); + (uint256 temp_x_loop, uint256 temp_y_loop) = ec_scalar_mul( + pi.com[i], + pi.com[i + 1], + q_powers[i / 2 + 1].unwrap() + ); (L_x, L_y) = ec_add(L_x, L_y, temp_x_loop, temp_y_loop); } @@ -182,13 +168,22 @@ contract HyperKZG { (uint256 temp_x, uint256 temp_y) = ec_scalar_mul(pi.w[0], pi.w[1], r); (L_x, L_y) = ec_add(L_x, L_y, temp_x, temp_y); // U[1] = -r * d_0 - (temp_x, temp_y) = ec_scalar_mul(pi.w[2], pi.w[3], mulmod(MODULUS - r, d_0.unwrap(), MODULUS)); + (temp_x, temp_y) = ec_scalar_mul( + pi.w[2], + pi.w[3], + mulmod(MODULUS - r, d_0.unwrap(), MODULUS) + ); (L_x, L_y) = ec_add(L_x, L_y, temp_x, temp_y); // U[2] = r*r * d_1 - (temp_x, temp_y) = ec_scalar_mul(pi.w[4], pi.w[5], mulmod(mulmod(r, r, MODULUS), d_1.unwrap(), MODULUS)); + (temp_x, temp_y) = ec_scalar_mul( + pi.w[4], + pi.w[5], + mulmod(mulmod(r, r, MODULUS), d_1.unwrap(), MODULUS) + ); (L_x, L_y) = ec_add(L_x, L_y, temp_x, temp_y); // -(B_u[0] + d_0 * B_u[1] + d_1 * B_u[2]) - uint256 b_u = MODULUS - (B_u_ypos + d_0 * B_u_yneg + d_1 * B_u_y).unwrap(); + uint256 b_u = MODULUS - + (B_u_ypos + d_0 * B_u_yneg + d_1 * B_u_y).unwrap(); // Add in to the msm b_u Vk_g1 (temp_x, temp_y) = ec_scalar_mul(VK_g1_x, VK_g1_y, b_u); (L_x, L_y) = ec_add(L_x, L_y, temp_x, temp_y); @@ -210,7 +205,11 @@ contract HyperKZG { /// @param p_x The x of the point Q /// @param p_y The y of the point Q /// @param n The scalar - function ec_scalar_mul(uint256 p_x, uint256 p_y, uint256 n) internal view returns (uint256 x_new, uint256 y_new) { + function ec_scalar_mul( + uint256 p_x, + uint256 p_y, + uint256 n + ) internal view returns (uint256 x_new, uint256 y_new) { bool success; assembly ("memory-safe") { let prev_frm := mload(0x40) @@ -231,11 +230,12 @@ contract HyperKZG { /// @param p_y The y of the point P /// @param q_x The x of the point P /// @param q_y The y of the point P - function ec_add(uint256 p_x, uint256 p_y, uint256 q_x, uint256 q_y) - internal - view - returns (uint256 x_new, uint256 y_new) - { + function ec_add( + uint256 p_x, + uint256 p_y, + uint256 q_x, + uint256 q_y + ) internal view returns (uint256 x_new, uint256 y_new) { bool success; assembly ("memory-safe") { let prev_frm := mload(0x40) @@ -260,7 +260,12 @@ contract HyperKZG { /// @param L_y The y of the point L /// @param R_x The x of the point R /// @param R_y The y of the point R - function pairing(uint256 L_x, uint256 L_y, uint256 R_x, uint256 R_y) internal view returns (bool valid) { + function pairing( + uint256 L_x, + uint256 L_y, + uint256 R_x, + uint256 R_y + ) internal view returns (bool valid) { // put the immutables into local uint256 vk_g2_x_c0 = VK_g2_x_c0; uint256 vk_g2_x_c1 = VK_g2_x_c1; diff --git a/jolt-evm-verifier/test/TestHyperKZG.sol b/jolt-evm-verifier/test/TestHyperKZG.sol index f4263846e..3c38fb96e 100644 --- a/jolt-evm-verifier/test/TestHyperKZG.sol +++ b/jolt-evm-verifier/test/TestHyperKZG.sol @@ -57,26 +57,19 @@ contract TestHyperKZG is TestBase { HyperKZG verifier = new DeployableHyperKZG(data.vk); // We build a transcript in memory bytes32 start_string = "TestEval"; - Transcript memory transcript = FiatShamirTranscript.new_transcript(start_string, 3); + Transcript memory transcript = FiatShamirTranscript.new_transcript( + start_string, + 3 + ); // We call into the verifier contract - bool passes = - verifier.verify(data.commitment_x, data.commitment_y, data.point, data.claim, data.proof, transcript); - require(passes, "does not verify a valid proof"); - } - - function testHyperKZGBatchPasses() public { - // Invoke the rust to get a non trivial example proof - string[] memory cmds = new string[](1); - cmds[0] = "./script/target/release/hyperkzg_batch_example"; - bytes memory result = vm.ffi(cmds); - BatchedExample memory data = abi.decode(result, (BatchedExample)); - // Now deploy a verifier with the key inited - HyperKZG verifier = new DeployableHyperKZG(data.vk); - // We build a transcript in memory - bytes32 start_string = "TestEval"; - Transcript memory transcript = FiatShamirTranscript.new_transcript(start_string, 3); - // We call into the verifier contract - bool passes = verifier.batch_verify(data.commitments, data.point, data.claims, data.proof, transcript); + bool passes = verifier.verify( + data.commitment_x, + data.commitment_y, + data.point, + data.claim, + data.proof, + transcript + ); require(passes, "does not verify a valid proof"); } }