From 2973e7a81ede79f050a8ac6699ea731d78f78941 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Antonio=20Mej=C3=ADas=20Gil?= Date: Mon, 25 Mar 2024 14:37:18 +0100 Subject: [PATCH] Add Hyrax multilinear PCS (#130) * added hyrax PCS * adapt the scheme to https://github.com/arkworks-rs/algebra/issues/691 * remove unused code in hyrax * expanded on Future Optimisations section * Remove Prepared data types from `PolynomialCommitment` trait impl * added necessary dependencies overwritten by previous merge commit * fixed hashbrown version * pulled * created separate benchmark files * fixed duplicate dependency to match other branches * patched bn254 dep * restructured benchmark macros to accept ML schemes; benches working * Hyrax fix bench (#42) * fix bench call * set num vars from 12-20 * Hyrax parallel `commit` (#39) * Enable parallel commitment in hyrax amend * make `rand` optional * remove dead code * Make Hyrax hiding again (#43) * removed evaluation randomness from proof and ignored claimed value in check to make scheme hiding * fmt * removed unnecessary usage of argument in check, added _ * Delete `IOPTranscript`, update with master (#50) (aka Hyrax++) * Add the trait bounds * Add `CommitmentState` * Update benches for the new type * Fix the name of local variable * Merge `PCCommitmentState` with `PCRandomness` * Update `README.md` * Fix a bug * Change `Randomness` to `CommitmentState` * Maybe `empty` not return `Self` * Make `empty` return `Self` * Rename `rand` to `state` * Partially integrate the new design into Hyrax * Update Hyrax with the shared state * Rename nonnative to emulated, as in `r1cs-std` (#137) * Rename nonnative to emulated, as in `r1cs-std` * Run `fmt` * Temporarily change `Cargo.toml` * Revert `Cargo.toml` * Refactor `FoldedPolynomialStream` partially * Substitute `ChallengeGenerator` by the generic sponge (#139) * Rename nonnative to emulated, as in `r1cs-std` * Run `fmt` * Temporarily change `Cargo.toml` * Substitute `ChallengeGenerator` with the generic sponge * Run `fmt` * Remove the extra file * Update modules * Delete the unnecessary loop * Revert `Cargo.toml` * Refactor `FoldedPolynomialStream` partially * Update README * Make the diff more readable * Bring the whitespace back * Make diff more readable, 2 * Fix according to breaking changes in `ark-ec` (#141) * Fix for KZG10 * Fix the breaking changes in `ark-ec` * Remove the extra loop * Fix the loop range * re-use the preprocessing table * also re-use the preprocessing table for multilinear_pc --------- Co-authored-by: mmagician * Auxiliary opening data (#134) * Add the trait bounds * Add `CommitmentState` * Update benches for the new type * Fix the name of local variable * Merge `PCCommitmentState` with `PCRandomness` * Update `README.md` * Fix a bug * Put `Randomness` in `CommitmentState` * Add a comment * Remove the extra loop * Update the comment for `CommitmentState` Co-authored-by: Marcin * cargo fmt --------- Co-authored-by: Marcin * `batch_mul_with_preprocessing` no longer takes `self` as argument (#142) * batch_mul_with_preprocessing no longer takes `self` as argument * Apply suggestions from code review Co-authored-by: Pratyush Mishra * fix variable name --------- Co-authored-by: Pratyush Mishra * Remove ChallengeGenerator for Ligero (#56) * Squash and merge `delete-chalgen` onto here * Fix for `ChallengeGenerator` * Delete `IOPTranscript` for Hyrax (#55) * Use the sponge generic and rearrange `use`s * Use sponge instead of `IOPTransript` * Fix benches * Remove the extra loop --------- Co-authored-by: mmagician Co-authored-by: Pratyush Mishra * Add a few comments and update `Cargo.toml` * Remove extra `cfg_iter!` Co-authored-by: Pratyush Mishra * Change `pedersen_commit` and add `cfg_into_iter!` * Hash and absorb --------- Co-authored-by: mmagician Co-authored-by: Hossein Moghaddas Co-authored-by: Pratyush Mishra --- Cargo.toml | 7 +- README.md | 10 + bench-templates/src/lib.rs | 39 +- poly-commit/Cargo.toml | 19 +- poly-commit/README.md | 10 + poly-commit/benches/hyrax_times.rs | 28 + poly-commit/benches/{pcs.rs => ipa_times.rs} | 6 +- poly-commit/src/error.rs | 65 +++ poly-commit/src/hyrax/data_structures.rs | 132 +++++ poly-commit/src/hyrax/mod.rs | 521 ++++++++++++++++++ poly-commit/src/hyrax/tests.rs | 213 +++++++ poly-commit/src/hyrax/utils.rs | 38 ++ poly-commit/src/lib.rs | 13 + poly-commit/src/multilinear_pc/mod.rs | 8 +- .../src/streaming_kzg/data_structures.rs | 3 +- poly-commit/src/streaming_kzg/mod.rs | 6 - poly-commit/src/streaming_kzg/space.rs | 3 +- poly-commit/src/utils.rs | 129 +++++ 18 files changed, 1217 insertions(+), 33 deletions(-) create mode 100644 poly-commit/benches/hyrax_times.rs rename poly-commit/benches/{pcs.rs => ipa_times.rs} (85%) create mode 100644 poly-commit/src/hyrax/data_structures.rs create mode 100644 poly-commit/src/hyrax/mod.rs create mode 100644 poly-commit/src/hyrax/tests.rs create mode 100644 poly-commit/src/hyrax/utils.rs create mode 100644 poly-commit/src/utils.rs diff --git a/Cargo.toml b/Cargo.toml index bc7f3243..bde57bf6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,8 +31,11 @@ debug = true ark-ff = { git = "https://github.com/arkworks-rs/algebra/" } ark-ec = { git = "https://github.com/arkworks-rs/algebra/" } ark-serialize = { git = "https://github.com/arkworks-rs/algebra/" } +ark-poly = { git = "https://github.com/arkworks-rs/algebra/" } + ark-crypto-primitives = { git = "https://github.com/arkworks-rs/crypto-primitives" } ark-r1cs-std = { git = "https://github.com/arkworks-rs/r1cs-std/" } -ark-bls12-377 = { git = "https://github.com/arkworks-rs/curves/" } -ark-bls12-381 = { git = "https://github.com/arkworks-rs/curves/" } +ark-bls12-377 = { git = "https://github.com/arkworks-rs/algebra/" } +ark-bls12-381 = { git = "https://github.com/arkworks-rs/algebra/" } +ark-bn254 = { git = "https://github.com/arkworks-rs/algebra/" } diff --git a/README.md b/README.md index 82c3e9a6..64c2ea2f 100644 --- a/README.md +++ b/README.md @@ -184,6 +184,8 @@ Unless you explicitly state otherwise, any contribution that you submit to this [aurora-light]: https://ia.cr/2019/601 [pcd-acc]: https://ia.cr/2020/499 [pst]: https://ia.cr/2011/587 +[ligero]: https://ia.cr/2022/1608 +[hyrax]: https://eprint.iacr.org/2017/1132 ## Reference papers @@ -211,6 +213,14 @@ TCC 2020 Charalampos Papamanthou, Elaine Shi, Roberto Tamassia TCC 2013 +[Ligero: Lightweight Sublinear Arguments Without a Trusted Setup][ligero] +Scott Ames, Carmit Hazay, Yuval Ishai, Muthuramakrishnan Venkitasubramaniam +CCS 2017 + +[Doubly-efficient zkSNARKs without trusted setup][hyrax] +Riad S. Wahby, Ioanna Tzialla, abhi shelat, Justin Thaler, Michael Walfish +2018 IEEE Symposium on Security and Privacy + ## Acknowledgements This work was supported by: an Engineering and Physical Sciences Research Council grant; a Google Faculty Award; the RISELab at UC Berkeley; and donations from the Ethereum Foundation and the Interchain Foundation. diff --git a/bench-templates/src/lib.rs b/bench-templates/src/lib.rs index 9451c313..952a36e4 100644 --- a/bench-templates/src/lib.rs +++ b/bench-templates/src/lib.rs @@ -16,27 +16,32 @@ use ark_poly_commit::{LabeledPolynomial, PolynomialCommitment}; pub use criterion::*; pub use paste::paste; -/// Measure the time cost of {commit/open/verify} across a range of num_vars +/// Measure the time cost of `method` (i.e., commit/open/verify) of a +/// multilinear PCS for all `num_vars` specified in `nv_list`. +/// `rand_poly` is a function that outputs a random multilinear polynomial. +/// `rand_point` is a function that outputs a random point in the domain of polynomial. pub fn bench_pcs_method< F: PrimeField, P: Polynomial, PCS: PolynomialCommitment>, >( c: &mut Criterion, - range: Vec, + nv_list: Vec, msg: &str, method: impl Fn( &PCS::CommitterKey, &PCS::VerifierKey, usize, fn(usize, &mut ChaCha20Rng) -> P, + fn(usize, &mut ChaCha20Rng) -> P::Point, ) -> Duration, rand_poly: fn(usize, &mut ChaCha20Rng) -> P, + rand_point: fn(usize, &mut ChaCha20Rng) -> P::Point, ) { let mut group = c.benchmark_group(msg); let rng = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); - for num_vars in range { + for num_vars in nv_list { let pp = PCS::setup(num_vars, Some(num_vars), rng).unwrap(); let (ck, vk) = PCS::trim(&pp, num_vars, num_vars, None).unwrap(); @@ -44,7 +49,13 @@ pub fn bench_pcs_method< BenchmarkId::from_parameter(num_vars), &num_vars, |b, num_vars| { - b.iter(|| method(&ck, &vk, *num_vars, rand_poly)); + b.iter_custom(|i| { + let mut time = Duration::from_nanos(0); + for _ in 0..i { + time += method(&ck, &vk, *num_vars, rand_poly, rand_point); + } + time + }); }, ); } @@ -62,6 +73,7 @@ pub fn commit< _vk: &PCS::VerifierKey, num_vars: usize, rand_poly: fn(usize, &mut ChaCha20Rng) -> P, + _rand_point: fn(usize, &mut ChaCha20Rng) -> P::Point, ) -> Duration { let rng = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); @@ -102,12 +114,12 @@ pub fn open( _vk: &PCS::VerifierKey, num_vars: usize, rand_poly: fn(usize, &mut ChaCha20Rng) -> P, + rand_point: fn(usize, &mut ChaCha20Rng) -> P::Point, ) -> Duration where F: PrimeField, P: Polynomial, PCS: PolynomialCommitment>, - P::Point: UniformRand, { let rng = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); @@ -115,7 +127,7 @@ where LabeledPolynomial::new("test".to_string(), rand_poly(num_vars, rng), None, None); let (coms, states) = PCS::commit(&ck, [&labeled_poly], Some(rng)).unwrap(); - let point = P::Point::rand(rng); + let point = rand_point(num_vars, rng); let start = Instant::now(); let _ = PCS::open( @@ -173,12 +185,12 @@ pub fn verify( vk: &PCS::VerifierKey, num_vars: usize, rand_poly: fn(usize, &mut ChaCha20Rng) -> P, + rand_point: fn(usize, &mut ChaCha20Rng) -> P::Point, ) -> Duration where F: PrimeField, P: Polynomial, PCS: PolynomialCommitment>, - P::Point: UniformRand, { let rng = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); @@ -186,7 +198,7 @@ where LabeledPolynomial::new("test".to_string(), rand_poly(num_vars, rng), None, None); let (coms, states) = PCS::commit(&ck, [&labeled_poly], Some(rng)).unwrap(); - let point = P::Point::rand(rng); + let point = rand_point(num_vars, rng); let claimed_eval = labeled_poly.evaluate(&point); let proof = PCS::open( &ck, @@ -243,7 +255,7 @@ fn test_sponge() -> PoseidonSponge { #[macro_export] macro_rules! bench_method { - ($c:expr, $method:ident, $scheme_type:ty, $rand_poly:ident) => { + ($c:expr, $method:ident, $scheme_type:ty, $rand_poly:ident, $rand_point:ident) => { let scheme_type_str = stringify!($scheme_type); let bench_name = format!("{} {}", stringify!($method), scheme_type_str); bench_pcs_method::<_, _, $scheme_type>( @@ -252,6 +264,7 @@ macro_rules! bench_method { &bench_name, $method::<_, _, $scheme_type>, $rand_poly::<_>, + $rand_point::<_>, ); }; } @@ -259,12 +272,12 @@ macro_rules! bench_method { #[macro_export] macro_rules! bench { ( - $scheme_type:ty, $rand_poly:ident + $scheme_type:ty, $rand_poly:ident, $rand_point:ident ) => { fn bench_pcs(c: &mut Criterion) { - bench_method!(c, commit, $scheme_type, $rand_poly); - bench_method!(c, open, $scheme_type, $rand_poly); - bench_method!(c, verify, $scheme_type, $rand_poly); + bench_method!(c, commit, $scheme_type, $rand_poly, $rand_point); + bench_method!(c, open, $scheme_type, $rand_poly, $rand_point); + bench_method!(c, verify, $scheme_type, $rand_poly, $rand_point); } criterion_group!(benches, bench_pcs); diff --git a/poly-commit/Cargo.toml b/poly-commit/Cargo.toml index 19098ce0..c7a28579 100644 --- a/poly-commit/Cargo.toml +++ b/poly-commit/Cargo.toml @@ -15,18 +15,24 @@ ark-ec = { version = "^0.4.0", default-features = false } ark-poly = {version = "^0.4.0", default-features = false } ark-crypto-primitives = {version = "^0.4.0", default-features = false, features = ["sponge", "merkle_tree"] } ark-std = { version = "^0.4.0", default-features = false } - +blake2 = { version = "0.10", default-features = false } +rand = { version = "0.8.0", optional = true } ark-relations = { version = "^0.4.0", default-features = false, optional = true } ark-r1cs-std = { version = "^0.4.0", default-features = false, optional = true } -hashbrown = { version = "0.13", default-features = false, optional = true } digest = "0.10" derivative = { version = "2", features = [ "use_core" ] } rayon = { version = "1", optional = true } +hashbrown = { version = "0.14", default-features = false, optional = true } [[bench]] -name = "pcs" -path = "benches/pcs.rs" +name = "ipa_times" +path = "benches/ipa_times.rs" +harness = false + +[[bench]] +name = "hyrax_times" +path = "benches/hyrax_times.rs" harness = false [[bench]] @@ -38,7 +44,8 @@ harness = false ark-ed-on-bls12-381 = { version = "^0.4.0", default-features = false } ark-bls12-381 = { version = "^0.4.0", default-features = false, features = [ "curve" ] } ark-bls12-377 = { version = "^0.4.0", default-features = false, features = [ "curve" ] } -blake2 = { version = "0.10", default-features = false } +ark-bn254 = { version = "^0.4.0", default-features = false, features = [ "curve" ] } + rand_chacha = { version = "0.3.0", default-features = false } ark-pcs-bench-templates = { path = "../bench-templates" } @@ -47,4 +54,4 @@ default = [ "std", "parallel" ] std = [ "ark-ff/std", "ark-ec/std", "ark-poly/std", "ark-std/std", "ark-relations/std", "ark-serialize/std", "ark-crypto-primitives/std"] r1cs = [ "ark-relations", "ark-r1cs-std", "hashbrown", "ark-crypto-primitives/r1cs"] print-trace = [ "ark-std/print-trace" ] -parallel = [ "std", "ark-ff/parallel", "ark-ec/parallel", "ark-poly/parallel", "ark-std/parallel", "rayon" ] +parallel = [ "std", "ark-ff/parallel", "ark-ec/parallel", "ark-poly/parallel", "ark-std/parallel", "rayon", "rand" ] diff --git a/poly-commit/README.md b/poly-commit/README.md index ec22e02a..e2e3d0fc 100644 --- a/poly-commit/README.md +++ b/poly-commit/README.md @@ -56,6 +56,16 @@ EUROCRYPT 2020 Aniket Kate, Gregory M. Zaverucha, Ian Goldberg ASIACRYPT 2010 +### Hyrax multilinear PC + +Polynomial commitment scheme introduced together with the Hyrax zkSNARK (in [this](https://eprint.iacr.org/2017/1132) article). It is based on Pedersen commitments and therefore relies on the difficulty of the discrete logarithm problem in order to provide a hiding PCS. + +[Doubly-efficient zkSNARKs without trusted setup][hyrax] +Riad S. Wahby, Ioanna Tzialla, abhi shelat, Justin Thaler, Michael Walfish +2018 IEEE Symposium on Security and Privacy + +[hyrax]: https://eprint.iacr.org/2017/1132 + ### Marlin variant of the Papamanthou-Shi-Tamassia multivariate PC Multivariate polynomial commitment based on the construction in the Papamanthou-Shi-Tamassia construction with batching and (optional) hiding property inspired by the univariate scheme in Marlin. diff --git a/poly-commit/benches/hyrax_times.rs b/poly-commit/benches/hyrax_times.rs new file mode 100644 index 00000000..c76753df --- /dev/null +++ b/poly-commit/benches/hyrax_times.rs @@ -0,0 +1,28 @@ +use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; +use ark_pcs_bench_templates::*; +use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; + +use ark_bn254::{Fr, G1Affine}; +use ark_ff::PrimeField; +use ark_poly_commit::hyrax::HyraxPC; + +use rand_chacha::ChaCha20Rng; + +// Hyrax PCS over BN254 +type Hyrax254 = HyraxPC, PoseidonSponge>; + +fn rand_poly_hyrax( + num_vars: usize, + rng: &mut ChaCha20Rng, +) -> DenseMultilinearExtension { + DenseMultilinearExtension::rand(num_vars, rng) +} + +fn rand_point_hyrax(num_vars: usize, rng: &mut ChaCha20Rng) -> Vec { + (0..num_vars).map(|_| F::rand(rng)).collect() +} + +const MIN_NUM_VARS: usize = 12; +const MAX_NUM_VARS: usize = 22; + +bench!(Hyrax254, rand_poly_hyrax, rand_point_hyrax); diff --git a/poly-commit/benches/pcs.rs b/poly-commit/benches/ipa_times.rs similarity index 85% rename from poly-commit/benches/pcs.rs rename to poly-commit/benches/ipa_times.rs index 77ab04f7..27b4d3ba 100644 --- a/poly-commit/benches/pcs.rs +++ b/poly-commit/benches/ipa_times.rs @@ -22,7 +22,11 @@ fn rand_poly_ipa_pc(degree: usize, rng: &mut ChaCha20Rng) -> Dens DenseUnivariatePoly::rand(degree, rng) } +fn rand_point_ipa_pc(_: usize, rng: &mut ChaCha20Rng) -> F { + F::rand(rng) +} + const MIN_NUM_VARS: usize = 10; const MAX_NUM_VARS: usize = 20; -bench!(IPA_JubJub, rand_poly_ipa_pc); +bench!(IPA_JubJub, rand_poly_ipa_pc, rand_point_ipa_pc); diff --git a/poly-commit/src/error.rs b/poly-commit/src/error.rs index de7091eb..3712d2b2 100644 --- a/poly-commit/src/error.rs +++ b/poly-commit/src/error.rs @@ -93,6 +93,49 @@ pub enum Error { /// Index of the offending polynomial. label: String, }, + + /// This means a failure in verifying the commitment or the opening. + InvalidCommitment, + + /// This means during opening or verification, a commitment of incorrect + /// size (for example, with an insufficient number of entries) was + /// encountered + IncorrectCommitmentSize { + /// Encountered commitment size + encountered: usize, + /// Expected commitment size + expected: usize, + }, + + /// For PCS which rely on Fiat-Shamir to be rendered non-interactive, + /// these are errors that result from incorrect transcript manipulation. + TranscriptError, + + /// This means the required soundness error bound is inherently impossible. + /// E.g., the field is not big enough. + InvalidParameters(String), + + /// Error resulting from hashing in linear code - based PCS. + HashingError, + + /// This means a commitment with a certain label was matched with a + /// a polynomial which has a different label - which shouldn't happen + MismatchedLabels { + /// The label of the commitment + commitment_label: String, + /// The label of the polynomial + polynomial_label: String, + }, + + /// This means multivariate polynomial with a certain number of variables + /// was matched (for instance, during commitment, opening or verification) + /// to a point with a different number of variables. + MismatchedNumVars { + /// The number of variables of the polynomial + poly_nv: usize, + /// The number of variables of the point + point_nv: usize, + }, } impl core::fmt::Display for Error { @@ -179,6 +222,28 @@ impl core::fmt::Display for Error { support up to degree ({:?})", label, poly_degree, supported_degree ), Error::IncorrectInputLength(err) => write!(f, "{}", err), + Error::InvalidCommitment => write!(f, "Failed to verify the commitment"), + Error::IncorrectCommitmentSize { + encountered, + expected, + } => write!( + f, + "the commitment has size {}, but size {} was expected", + encountered, expected + ), + Error::TranscriptError => write!(f, "Incorrect transcript manipulation"), + Error::InvalidParameters(err) => write!(f, "{}", err), + Error::HashingError => write!(f, "Error resulting from hashing"), + Error::MismatchedLabels { commitment_label, polynomial_label } => + write!(f, "Mismatched labels: commitment label: {}, polynomial label: {}", + commitment_label, + polynomial_label + ), + Error::MismatchedNumVars { poly_nv, point_nv } => + write!(f, "Mismatched number of variables: polynomial has {}, point has {}", + poly_nv, + point_nv, + ), } } } diff --git a/poly-commit/src/hyrax/data_structures.rs b/poly-commit/src/hyrax/data_structures.rs new file mode 100644 index 00000000..aa58b7cf --- /dev/null +++ b/poly-commit/src/hyrax/data_structures.rs @@ -0,0 +1,132 @@ +use ark_ec::AffineRepr; +use ark_ff::PrimeField; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::{rand::RngCore, vec::Vec}; + +use crate::{ + utils::Matrix, PCCommitment, PCCommitmentState, PCCommitterKey, PCUniversalParams, + PCVerifierKey, +}; + +/// `UniversalParams` amounts to a Pederson commitment key of sufficient length +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub struct HyraxUniversalParams { + /// A list of generators of the group. + pub com_key: Vec, + /// A generator of the group. + pub h: G, +} + +impl PCUniversalParams for HyraxUniversalParams { + fn max_degree(&self) -> usize { + // Only MLEs are supported + 1 + } +} + +/// The committer key, which coincides with the universal parameters +pub type HyraxCommitterKey = HyraxUniversalParams; + +/// The verifier key, which coincides with the committer key +pub type HyraxVerifierKey = HyraxCommitterKey; + +impl PCCommitterKey for HyraxCommitterKey { + fn max_degree(&self) -> usize { + // Only MLEs are supported + 1 + } + fn supported_degree(&self) -> usize { + // Only MLEs are supported + 1 + } +} + +impl PCVerifierKey for HyraxVerifierKey { + // Only MLEs are supported + fn max_degree(&self) -> usize { + 1 + } + // Only MLEs are supported + fn supported_degree(&self) -> usize { + 1 + } +} + +/// Hyrax commitment to a polynomial consisting of one multi-commit per row of +/// the coefficient matrix +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub struct HyraxCommitment { + /// A list of multi-commits to each row of the matrix representing the + /// polynomial. + pub row_coms: Vec, +} + +impl PCCommitment for HyraxCommitment { + #[inline] + fn empty() -> Self { + HyraxCommitment { + row_coms: Vec::new(), + } + } + + // The degree bound is always 1, since only multilinear polynomials are + // supported + fn has_degree_bound(&self) -> bool { + true + } +} + +pub(crate) type HyraxRandomness = Vec; + +/// Hyrax Commitment State blah blah blah blah +/// blah blah blah blah +/// blah blah blah blah +/// blah blah blah blah +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub struct HyraxCommitmentState +where + F: PrimeField, +{ + pub(crate) randomness: HyraxRandomness, + pub(crate) mat: Matrix, +} + +/// A vector of scalars, each of which multiplies the distinguished group +/// element in the Pederson commitment key for a different commitment +impl PCCommitmentState for HyraxCommitmentState { + type Randomness = HyraxRandomness; + fn empty() -> Self { + unimplemented!() + } + + fn rand( + num_queries: usize, + _has_degree_bound: bool, + _num_vars: Option, + rng: &mut R, + ) -> Self::Randomness { + (0..num_queries).map(|_| F::rand(rng)).collect() + } +} + +/// Proof of a Hyrax opening, containing various commitments +/// and auxiliary values generated randomly during the opening +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub struct HyraxProof { + /// Commitment to the evaluation of the polynomial at the requested point + pub com_eval: G, + /// Commitment to auxiliary random vector `d` + pub com_d: G, + /// Commitment to auxiliary random scalar `b` + pub com_b: G, + /// Auxiliary random vector + pub z: Vec, + /// Auxiliary random scalar + pub z_d: G::ScalarField, + /// Auxiliary random scalar + pub z_b: G::ScalarField, +} diff --git a/poly-commit/src/hyrax/mod.rs b/poly-commit/src/hyrax/mod.rs new file mode 100644 index 00000000..21e66cb6 --- /dev/null +++ b/poly-commit/src/hyrax/mod.rs @@ -0,0 +1,521 @@ +use crate::hyrax::utils::tensor_prime; +use crate::utils::{inner_product, scalar_by_vector, vector_sum, Matrix}; +use crate::{ + hyrax::utils::flat_to_matrix_column_major, Error, LabeledCommitment, LabeledPolynomial, + PolynomialCommitment, +}; +use ark_crypto_primitives::sponge::{Absorb, CryptographicSponge}; +use ark_ec::{AffineRepr, CurveGroup, VariableBaseMSM}; +use ark_ff::PrimeField; +use ark_poly::MultilinearExtension; +use ark_serialize::serialize_to_vec; +use ark_std::{marker::PhantomData, rand::RngCore, string::ToString, vec::Vec, UniformRand}; +use blake2::Blake2s256; +use digest::Digest; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +mod data_structures; +pub use data_structures::*; +#[cfg(test)] +mod tests; +mod utils; +/// String of bytes used to seed the randomness during the setup function. +/// Note that the latter should never be used in production environments. +pub const PROTOCOL_NAME: &'static [u8] = b"Hyrax protocol"; + +/// Hyrax polynomial committment scheme: +/// A polynomial commitment scheme based on the hardness of the +/// discrete logarithm problem in prime-order groups. This is a +/// Fiat-Shamired version of the PCS described in the Hyrax paper +/// [[WTsTW17]][hyrax]. +/// +/// [hyrax]: https://eprint.iacr.org/2017/1132.pdf +/// +/// ### Future optimisations +/// +/// - Add parallelisation. There is at least one natural place where +/// parallelisation could bring performance gains: in essence, the prover +/// commits to the polynomial by expressing it as an evaluation matrix and +/// Pederson-multi-committing to each row. Each of this commitments can be +/// computed independently from the rest, and therefore, in parallel. It is +/// still to be seen how much of an improvement this would entail, since each +/// Pederson multi-commitment boils down to a multi-exponentiation and this +/// operation is itself parallelised. +/// - Due to the homomorphic nature of Pedersen commitments, it is likely +/// some of the following methods can be designed more efficiently than their +/// default implementations: `batch_open`, `batch_check`, +/// `open_combinations`, `check_combinations`. This is not discussed in the +/// reference article, but the IPA and KZG modules might be a good starting +/// point. +/// - On a related note to the previous point, there might be a more +/// efficient way to open several polynomials at a single point (this is the +/// functionality of the `open` method) than the currently implemented +/// technique, where only the computation of the vectors `L` and `R` is +/// shared across polynomials. +/// - The cited article proposes an optimisation in the section _Reducing the +/// cost of proof-of-dot-prod_. It allows for non-square matrices (and hence +/// removes the requirement for the number of variables to be even) and +/// introduces a tradeoff between proof size and verifier time. It is +/// probably worth pursuing. + +pub struct HyraxPC< + // The elliptic curve used for Pedersen commitments (only EC groups are + // supported as of now). + G: AffineRepr, + // A polynomial type representing multilinear polynomials + P: MultilinearExtension, + // The sponge used in the protocol as random oracle + S: CryptographicSponge, +> { + _phantom: PhantomData<(G, P, S)>, +} + +impl HyraxPC +where + G: AffineRepr, + P: MultilinearExtension, + S: CryptographicSponge, +{ + /// Pedersen commitment to a vector of scalars as described in appendix A.1 + /// of the reference article. + /// The function does not add handle hiding term `h * r`. + /// It is only a wrapper around MSM. + /// + /// # Panics + /// + /// Panics if `key` and `scalars` do not have the same length + fn pedersen_commit(key: &[G], scalars: &[G::ScalarField]) -> G::Group { + assert_eq!(key.len(), scalars.len()); + let scalars_bigint = ark_std::cfg_iter!(scalars) + .map(|s| s.into_bigint()) + .collect::>(); + // Multi-exponentiation in the group of points of the EC + ::msm_bigint(&key, &scalars_bigint) + } +} + +impl PolynomialCommitment for HyraxPC +where + G: AffineRepr, + G::ScalarField: Absorb, + P: MultilinearExtension, + S: CryptographicSponge, +{ + type UniversalParams = HyraxUniversalParams; + type CommitterKey = HyraxCommitterKey; + type VerifierKey = HyraxVerifierKey; + type Commitment = HyraxCommitment; + type CommitmentState = HyraxCommitmentState; + type Proof = Vec>; + type BatchProof = Vec; + type Error = Error; + + /// Outputs mock universal parameters for the Hyrax polynomial commitment + /// scheme. It does *not* return random keys across calls and should never + /// be used in settings where security is required - it is only useful for + /// testing. + /// + /// # Panics + /// + /// Panics if `num_vars` is None or contains an odd value. + fn setup( + _max_degree: usize, + num_vars: Option, + _rng: &mut R, + ) -> Result { + if num_vars.is_none() { + return Err(Error::InvalidNumberOfVariables); + } + + let n = num_vars.unwrap(); + + if n % 2 == 1 { + // Only polynomials with an even number of variables are + // supported in this implementation + return Err(Error::InvalidNumberOfVariables); + } + + // Number of rows (or, equivalently, colums) of a square matrix + // containing the coefficients of an n-variate ML polynomial + let dim = 1 << n / 2; + + // The following block of code is largely taking from the IPA module + // in this crate. It generates random points (not guaranteed to be + // generators, since the point at infinity should theoretically occur) + let points: Vec<_> = ark_std::cfg_into_iter!(0u64..dim + 1) + .map(|i| { + let mut hash = + Blake2s256::digest([PROTOCOL_NAME, &i.to_le_bytes()].concat().as_slice()); + let mut p = G::from_random_bytes(&hash); + let mut j = 0u64; + while p.is_none() { + let mut bytes = PROTOCOL_NAME.to_vec(); + bytes.extend(i.to_le_bytes()); + bytes.extend(j.to_le_bytes()); + hash = Blake2s256::digest(bytes.as_slice()); + p = G::from_random_bytes(&hash); + j += 1; + } + let point = p.unwrap(); + point.mul_by_cofactor_to_group() + }) + .collect(); + + // Converting from projective to affine representation + let mut points = G::Group::normalize_batch(&points); + + let h: G = points.pop().unwrap(); + + Ok(HyraxUniversalParams { com_key: points, h }) + } + + /// Trims a key into a prover key and a verifier key. This should only + /// amount to discarding some of the points in said key if the prover + /// and verifier only wish to commit to polynomials with fewer variables + /// than the key can support. Since the number of variables is not + /// considered in the prototype, this function currently simply clones the + /// key. + fn trim( + pp: &Self::UniversalParams, + _supported_degree: usize, + _supported_hiding_bound: usize, + _enforced_degree_bounds: Option<&[usize]>, + ) -> Result<(Self::CommitterKey, Self::VerifierKey), Self::Error> { + Ok((pp.clone(), pp.clone())) + } + + /// Produces a list of commitments to the passed polynomials. Cf. the + /// section "Square-root commitment scheme" from the reference article. + /// + /// # Panics + /// + /// Panics if `rng` is None, since Hyrax requires randomness in order to + /// commit to a polynomial + #[allow(unused_variables)] + fn commit<'a>( + ck: &Self::CommitterKey, + polynomials: impl IntoIterator>, + rng: Option<&mut dyn RngCore>, + ) -> Result< + ( + Vec>, + Vec, + ), + Self::Error, + > + where + P: 'a, + { + let mut coms = Vec::new(); + let mut states = Vec::new(); + + #[cfg(not(feature = "parallel"))] + let rng_inner = rng.expect("Committing to polynomials requires a random generator"); + + for l_poly in polynomials { + let label = l_poly.label(); + let poly = l_poly.polynomial(); + + let n = poly.num_vars(); + let dim = 1 << n / 2; + + if n % 2 == 1 { + // Only polynomials with an even number of variables are + // supported in this implementation + return Err(Error::InvalidNumberOfVariables); + } + + if n > ck.com_key.len() { + return Err(Error::InvalidNumberOfVariables); + } + + let m = flat_to_matrix_column_major(&poly.to_evaluations(), dim, dim); + + // Commiting to the matrix with one multi-commitment per row + let (row_coms, com_rands): (Vec<_>, Vec<_>) = cfg_iter!(m) + .map(|row| { + #[cfg(not(feature = "parallel"))] + let r = G::ScalarField::rand(rng_inner); + #[cfg(feature = "parallel")] + let r = G::ScalarField::rand(&mut rand::thread_rng()); + let c = (Self::pedersen_commit(&ck.com_key, row) + ck.h * r).into(); + (c, r) + }) + .unzip(); + + let com = HyraxCommitment { row_coms }; + let l_comm = LabeledCommitment::new(label.to_string(), com, Some(1)); + + coms.push(l_comm); + states.push(HyraxCommitmentState { + randomness: com_rands, + mat: Matrix::new_from_rows(m), + }); + } + + Ok((coms, states)) + } + + /// Opens a list of polynomial commitments at a desired point. This + /// requires the list of original polynomials (`labeled_polynomials`) as + /// well as the random values using by the Pedersen multi-commits during + /// the commitment phase (`randomness`). Cf. sections "Square-root + /// commitment scheme" and appendix A.2 from the reference article. + /// + /// # Panics + /// + /// Panics if + /// - `rng` is None, since Hyrax requires randomness in order to + /// open the commitment to a polynomial. + /// - The point doesn't have an even number of variables. + /// - The labels of a commitment doesn't match that of the corresponding + /// polynomial. + /// - The number of variables of a polynomial doesn't match that of the + /// point. + fn open<'a>( + ck: &Self::CommitterKey, + labeled_polynomials: impl IntoIterator>, + commitments: impl IntoIterator>, + point: &'a P::Point, + sponge: &mut S, + states: impl IntoIterator, + rng: Option<&mut dyn RngCore>, + ) -> Result + where + Self::Commitment: 'a, + Self::CommitmentState: 'a, + P: 'a, + { + let n = point.len(); + + if n % 2 == 1 { + // Only polynomials with an even number of variables are + // supported in this implementation + return Err(Error::InvalidNumberOfVariables); + } + + let dim = 1 << n / 2; + + // Reversing the point is necessary because the MLE interface returns + // evaluations in little-endian order + let point_rev: Vec = point.iter().rev().cloned().collect(); + + let point_lower = &point_rev[n / 2..]; + let point_upper = &point_rev[..n / 2]; + + // Deriving the tensors which result in the evaluation of the polynomial + // when they are multiplied by the coefficient matrix. + let l = tensor_prime(point_lower); + let r = tensor_prime(point_upper); + + let mut proofs = Vec::new(); + + let rng_inner = rng.expect("Opening polynomials requires randomness"); + + for (l_poly, (l_com, state)) in labeled_polynomials + .into_iter() + .zip(commitments.into_iter().zip(states.into_iter())) + { + let label = l_poly.label(); + if label != l_com.label() { + return Err(Error::MismatchedLabels { + commitment_label: l_com.label().to_string(), + polynomial_label: label.to_string(), + }); + } + + let poly = l_poly.polynomial(); + let com = l_com.commitment(); + + if poly.num_vars() != n { + return Err(Error::MismatchedNumVars { + poly_nv: poly.num_vars(), + point_nv: n, + }); + } + + // Absorbing public parameters + sponge.absorb( + &Blake2s256::digest(serialize_to_vec!(*ck).map_err(|_| Error::TranscriptError)?) + .as_slice(), + ); + + // Absorbing the commitment to the polynomial + sponge.absorb(&serialize_to_vec!(com.row_coms).map_err(|_| Error::TranscriptError)?); + + // Absorbing the point + sponge.absorb(point); + + // Commiting to the matrix formed by the polynomial coefficients + let t = &state.mat; + + let lt = t.row_mul(&l); + + // t_prime coincides witht he Pedersen commitment to lt with the + // randomnes r_lt computed here + let r_lt = cfg_iter!(l) + .zip(&state.randomness) + .map(|(l, r)| *l * r) + .sum::(); + + let eval = inner_product(<, &r); + + // Singleton commit + let (com_eval, r_eval) = { + let r = G::ScalarField::rand(rng_inner); + ((ck.com_key[0] * eval + ck.h * r).into(), r) + }; + + // ******** Dot product argument ******** + // Appendix A.2 in the reference article + + let d: Vec = + (0..dim).map(|_| G::ScalarField::rand(rng_inner)).collect(); + + let b = inner_product(&r, &d); + + // Multi-commit + let r_d = G::ScalarField::rand(rng_inner); + let com_d = (Self::pedersen_commit(&ck.com_key, &d) + ck.h * r_d).into(); + + // Singleton commit + let r_b = G::ScalarField::rand(rng_inner); + let com_b = (ck.com_key[0] * b + ck.h * r_b).into(); + + // Absorbing the commitment to the evaluation + sponge.absorb(&serialize_to_vec!(com_eval).map_err(|_| Error::TranscriptError)?); + + // Absorbing the two auxiliary commitments + sponge.absorb(&serialize_to_vec!(com_d).map_err(|_| Error::TranscriptError)?); + sponge.absorb(&serialize_to_vec!(com_b).map_err(|_| Error::TranscriptError)?); + + // Receive the random challenge c from the verifier, i.e. squeeze + // it from the transcript. + let c = sponge.squeeze_field_elements(1)[0]; + + let z = vector_sum(&d, &scalar_by_vector(c, <)); + let z_d = c * r_lt + r_d; + let z_b = c * r_eval + r_b; + + proofs.push(HyraxProof { + com_eval, + com_d, + com_b, + z, + z_d, + z_b, + }); + } + + Ok(proofs) + } + + /// Verifies a list of opening proofs and confirms the evaluation of the + /// committed polynomials at the desired point. + /// + /// # Panics + /// - If the point doesn't have an even number of variables. + /// - If the length of a commitment does not correspond to the length of the + /// point (specifically, commitment length should be 2^(point-length/2)). + /// + /// # Disregarded arguments + /// - `rng` + fn check<'a>( + vk: &Self::VerifierKey, + commitments: impl IntoIterator>, + point: &'a P::Point, + _values: impl IntoIterator, + proof: &Self::Proof, + sponge: &mut S, + _rng: Option<&mut dyn RngCore>, + ) -> Result + where + Self::Commitment: 'a, + { + let n = point.len(); + + if n % 2 == 1 { + // Only polynomials with an even number of variables are + // supported in this implementation + return Err(Error::InvalidNumberOfVariables); + } + + // Reversing the point is necessary because the MLE interface returns + // evaluations in little-endian order + let point_rev: Vec = point.iter().rev().cloned().collect(); + + let point_lower = &point_rev[n / 2..]; + let point_upper = &point_rev[..n / 2]; + + // Deriving the tensors which result in the evaluation of the polynomial + // when they are multiplied by the coefficient matrix. + let l = tensor_prime(point_lower); + let r = tensor_prime(point_upper); + + for (com, h_proof) in commitments.into_iter().zip(proof.iter()) { + let row_coms = &com.commitment().row_coms; + + // extract each field from h_proof + let HyraxProof { + com_eval, + com_d, + com_b, + z, + z_d, + z_b, + } = h_proof; + + if row_coms.len() != 1 << n / 2 { + return Err(Error::IncorrectCommitmentSize { + encountered: row_coms.len(), + expected: 1 << n / 2, + }); + } + + // Computing t_prime with a multi-exponentiation + let l_bigint = cfg_iter!(l) + .map(|chi| chi.into_bigint()) + .collect::>(); + let t_prime: G = ::msm_bigint(&row_coms, &l_bigint).into(); + + // Absorbing public parameters + sponge.absorb( + &Blake2s256::digest(serialize_to_vec!(*vk).map_err(|_| Error::TranscriptError)?) + .as_slice(), + ); + + // Absorbing the commitment to the polynomial + sponge.absorb(&serialize_to_vec!(*row_coms).map_err(|_| Error::TranscriptError)?); + + // Absorbing the point + sponge.absorb(point); + + // Absorbing the commitment to the evaluation + sponge.absorb(&serialize_to_vec!(*com_eval).map_err(|_| Error::TranscriptError)?); + + // Absorbing the two auxiliary commitments + sponge.absorb(&serialize_to_vec!(*com_d).map_err(|_| Error::TranscriptError)?); + sponge.absorb(&serialize_to_vec!(*com_b).map_err(|_| Error::TranscriptError)?); + + // Receive the random challenge c from the verifier, i.e. squeeze + // it from the transcript. + let c: G::ScalarField = sponge.squeeze_field_elements(1)[0]; + + // First check + let com_z_zd = (Self::pedersen_commit(&vk.com_key, z) + vk.h * z_d).into(); + if com_z_zd != (t_prime.mul(c) + com_d).into() { + return Ok(false); + } + + // Second check + let com_dp = (vk.com_key[0] * inner_product(&r, z) + vk.h * z_b).into(); + if com_dp != (com_eval.mul(c) + com_b).into() { + return Ok(false); + } + } + + Ok(true) + } +} diff --git a/poly-commit/src/hyrax/tests.rs b/poly-commit/src/hyrax/tests.rs new file mode 100644 index 00000000..713dd7f3 --- /dev/null +++ b/poly-commit/src/hyrax/tests.rs @@ -0,0 +1,213 @@ +use crate::hyrax::HyraxPC; +use crate::tests::*; +use crate::utils::test_sponge; +use crate::{LabeledPolynomial, PolynomialCommitment}; +use ark_bls12_377::G1Affine; +use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; +use ark_ec::AffineRepr; +use ark_ed_on_bls12_381::EdwardsAffine; +use ark_ff::PrimeField; +use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; +use ark_std::test_rng; +use rand_chacha::{rand_core::SeedableRng, ChaCha20Rng}; + +// The test structure is largely taken from the multilinear_ligero module +// inside this crate + +// ****************** types ****************** + +type Fq = ::ScalarField; +type Hyrax377 = HyraxPC, PoseidonSponge>; + +type Fr = ::ScalarField; +type Hyrax381 = HyraxPC, PoseidonSponge>; + +// ******** auxiliary test functions ******** + +fn rand_poly( + _: usize, // degree: unused + num_vars: Option, + rng: &mut ChaCha20Rng, +) -> DenseMultilinearExtension { + match num_vars { + Some(n) => DenseMultilinearExtension::rand(n, rng), + None => panic!("Must specify the number of variables"), + } +} + +fn constant_poly( + _: usize, // degree: unused + num_vars: Option, + rng: &mut ChaCha20Rng, +) -> DenseMultilinearExtension { + match num_vars { + Some(0) => DenseMultilinearExtension::rand(0, rng), + _ => panic!("Must specify the number of variables: 0"), + } +} + +fn rand_point(num_vars: Option, rng: &mut ChaCha20Rng) -> Vec { + match num_vars { + Some(n) => (0..n).map(|_| F::rand(rng)).collect(), + None => panic!("Must specify the number of variables"), + } +} + +// ****************** tests ****************** + +#[test] +fn test_hyrax_construction() { + // Desired number of variables (must be even!) + let n = 8; + + let chacha = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); + + let pp = Hyrax381::setup(1, Some(n), chacha).unwrap(); + + let (ck, vk) = Hyrax381::trim(&pp, 1, 1, None).unwrap(); + + let l_poly = LabeledPolynomial::new( + "test_poly".to_string(), + rand_poly::(0, Some(n), chacha), + None, + None, + ); + + let (c, rands) = Hyrax381::commit(&ck, &[l_poly.clone()], Some(chacha)).unwrap(); + + let point: Vec = rand_point(Some(n), chacha); + let value = l_poly.evaluate(&point); + + // Dummy argument + let mut test_sponge = test_sponge::(); + + let proof = Hyrax381::open( + &ck, + &[l_poly], + &c, + &point, + &mut (test_sponge.clone()), + &rands, + Some(chacha), + ) + .unwrap(); + + assert!(Hyrax381::check( + &vk, + &c, + &point, + [value], + &proof, + &mut test_sponge, + Some(chacha), + ) + .unwrap()); +} + +#[test] +fn hyrax_single_poly_test() { + single_poly_test::<_, _, Hyrax377, _>( + Some(10), + rand_poly, + rand_point, + poseidon_sponge_for_test, + ) + .expect("test failed for bls12-377"); + single_poly_test::<_, _, Hyrax381, _>( + Some(10), + rand_poly, + rand_point, + poseidon_sponge_for_test, + ) + .expect("test failed for bls12-381"); +} + +#[test] +fn hyrax_constant_poly_test() { + single_poly_test::<_, _, Hyrax377, _>( + Some(0), + constant_poly, + rand_point, + poseidon_sponge_for_test, + ) + .expect("test failed for bls12-377"); + single_poly_test::<_, _, Hyrax381, _>( + Some(0), + constant_poly, + rand_point, + poseidon_sponge_for_test, + ) + .expect("test failed for bls12-381"); +} + +#[test] +fn hyrax_full_end_to_end_test() { + full_end_to_end_test::<_, _, Hyrax377, _>( + Some(8), + rand_poly, + rand_point, + poseidon_sponge_for_test, + ) + .expect("test failed for bls12-377"); + full_end_to_end_test::<_, _, Hyrax381, _>( + Some(10), + rand_poly, + rand_point, + poseidon_sponge_for_test, + ) + .expect("test failed for bls12-381"); +} + +#[test] +fn hyrax_single_equation_test() { + single_equation_test::<_, _, Hyrax377, _>( + Some(6), + rand_poly, + rand_point, + poseidon_sponge_for_test, + ) + .expect("test failed for bls12-377"); + single_equation_test::<_, _, Hyrax381, _>( + Some(6), + rand_poly, + rand_point, + poseidon_sponge_for_test, + ) + .expect("test failed for bls12-381"); +} + +#[test] +fn hyrax_two_equation_test() { + two_equation_test::<_, _, Hyrax377, _>( + Some(10), + rand_poly, + rand_point, + poseidon_sponge_for_test, + ) + .expect("test failed for bls12-377"); + two_equation_test::<_, _, Hyrax381, _>( + Some(10), + rand_poly, + rand_point, + poseidon_sponge_for_test, + ) + .expect("test failed for bls12-381"); +} + +#[test] +fn hyrax_full_end_to_end_equation_test() { + full_end_to_end_equation_test::<_, _, Hyrax377, _>( + Some(8), + rand_poly, + rand_point, + poseidon_sponge_for_test, + ) + .expect("test failed for bls12-377"); + full_end_to_end_equation_test::<_, _, Hyrax381, _>( + Some(8), + rand_poly, + rand_point, + poseidon_sponge_for_test, + ) + .expect("test failed for bls12-381"); +} diff --git a/poly-commit/src/hyrax/utils.rs b/poly-commit/src/hyrax/utils.rs new file mode 100644 index 00000000..74879a9e --- /dev/null +++ b/poly-commit/src/hyrax/utils.rs @@ -0,0 +1,38 @@ +use ark_ff::Field; +use ark_std::vec::Vec; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +/// Transforms a flat vector into a n*m matrix in column-major order. The +/// latter is given as a list of rows. +/// +/// For example, if flat = [1, 2, 3, 4, 5, 6] and n = 3, m = 2, then +/// the output is [[1, 3, 5], [2, 4, 6]]. +pub(crate) fn flat_to_matrix_column_major(flat: &[T], n: usize, m: usize) -> Vec> { + assert_eq!(flat.len(), n * m, "n * m should coincide with flat.len()"); + let mut res = Vec::new(); + + for row in 0..n { + res.push((0..m).map(|col| flat[col * n + row]).collect()) + } + res +} + +// This function computes all evaluations of the MLE EQ(i, values) for i +// between 0...0 and 1...1 (n-bit strings). This results in essentially +// the same as the tensor_vec function in the `linear_codes/utils.rs`, +// the difference being the endianness of the order of the output. +pub(crate) fn tensor_prime(values: &[F]) -> Vec { + if values.is_empty() { + return vec![F::one()]; + } + + let tail = tensor_prime(&values[1..]); + let val = values[0]; + + cfg_iter!(tail) + .map(|v| *v * (F::one() - val)) + .chain(cfg_iter!(tail).map(|v| *v * val)) + .collect() +} diff --git a/poly-commit/src/lib.rs b/poly-commit/src/lib.rs index 8ebb9710..0e1587ee 100644 --- a/poly-commit/src/lib.rs +++ b/poly-commit/src/lib.rs @@ -34,6 +34,9 @@ use ark_std::{ pub mod data_structures; pub use data_structures::*; +/// Useful functions +pub(crate) mod utils; + /// R1CS constraints for polynomial constraints. #[cfg(feature = "r1cs")] mod constraints; @@ -123,6 +126,16 @@ pub use marlin::marlin_pst13_pc; /// [bdfg]: https://eprint.iacr.org/2020/081.pdf pub mod streaming_kzg; +/// A polynomial commitment scheme based on the hardness of the +/// discrete logarithm problem in prime-order groups. This is a +/// Fiat-Shamired version of the PCS described in the Hyrax paper +/// [[WTsTW17]][hyrax], with the difference that, unlike in the +/// cited reference, the evaluation of the polynomial at the point +/// of interest is indeed revealed to the verifier at the end. +/// +/// [hyrax]: https://eprint.iacr.org/2017/1132.pdf +pub mod hyrax; + /// `QuerySet` is the set of queries that are to be made to a set of labeled polynomials/equations /// `p` that have previously been committed to. Each element of a `QuerySet` is a pair of /// `(label, (point_label, point))`, where `label` is the label of a polynomial in `p`, diff --git a/poly-commit/src/multilinear_pc/mod.rs b/poly-commit/src/multilinear_pc/mod.rs index cff20eb5..0973e822 100644 --- a/poly-commit/src/multilinear_pc/mod.rs +++ b/poly-commit/src/multilinear_pc/mod.rs @@ -241,7 +241,9 @@ mod tests { use crate::multilinear_pc::MultilinearPC; use ark_bls12_381::Bls12_381; use ark_ec::pairing::Pairing; - use ark_poly::{DenseMultilinearExtension, MultilinearExtension, SparseMultilinearExtension}; + use ark_poly::{ + DenseMultilinearExtension, MultilinearExtension, Polynomial, SparseMultilinearExtension, + }; use ark_std::rand::RngCore; use ark_std::test_rng; use ark_std::vec::Vec; @@ -260,7 +262,7 @@ mod tests { let com = MultilinearPC::commit(&ck, poly); let proof = MultilinearPC::open(&ck, poly, &point); - let value = poly.evaluate(&point).unwrap(); + let value = poly.evaluate(&point); let result = MultilinearPC::check(&vk, &com, &point, value, &proof); assert!(result); } @@ -308,7 +310,7 @@ mod tests { let com = MultilinearPC::commit(&ck, &poly); let proof = MultilinearPC::open(&ck, &poly, &point); - let value = poly.evaluate(&point).unwrap(); + let value = poly.evaluate(&point); let result = MultilinearPC::check(&vk, &com, &point, value + &(1u16.into()), &proof); assert!(!result); } diff --git a/poly-commit/src/streaming_kzg/data_structures.rs b/poly-commit/src/streaming_kzg/data_structures.rs index 0dc68e87..c8b19c83 100644 --- a/poly-commit/src/streaming_kzg/data_structures.rs +++ b/poly-commit/src/streaming_kzg/data_structures.rs @@ -2,9 +2,10 @@ use ark_ff::Field; use ark_std::borrow::Borrow; use ark_std::vec::Vec; -use crate::streaming_kzg::ceil_div; use ark_std::iterable::Iterable; +use crate::utils::ceil_div; + /// A `Streamer` folding a vector of coefficients /// with the given challenges, and producing a stream of items /// `(i, v)` where `i` indicates the depth, and `v` is the next coefficient. diff --git a/poly-commit/src/streaming_kzg/mod.rs b/poly-commit/src/streaming_kzg/mod.rs index e3bdb2af..8fd494e2 100644 --- a/poly-commit/src/streaming_kzg/mod.rs +++ b/poly-commit/src/streaming_kzg/mod.rs @@ -284,12 +284,6 @@ pub(crate) fn vanishing_polynomial(points: &[F]) -> DensePolynomial .fold(one, |x, y| x.naive_mul(&y)) } -/// Return ceil(x / y). -pub(crate) fn ceil_div(x: usize, y: usize) -> usize { - // XXX. warning: this expression can overflow. - (x + y - 1) / y -} - /// Compute a linear combination of the polynomials `polynomials` with the given challenges. pub(crate) fn linear_combination( polynomials: &[PP], diff --git a/poly-commit/src/streaming_kzg/space.rs b/poly-commit/src/streaming_kzg/space.rs index ab50adfd..cc1d36d2 100644 --- a/poly-commit/src/streaming_kzg/space.rs +++ b/poly-commit/src/streaming_kzg/space.rs @@ -6,7 +6,8 @@ use ark_std::borrow::Borrow; use ark_std::collections::VecDeque; use ark_std::vec::Vec; -use crate::streaming_kzg::{ceil_div, vanishing_polynomial, FoldedPolynomialTree}; +use crate::streaming_kzg::{vanishing_polynomial, FoldedPolynomialTree}; +use crate::utils::ceil_div; use ark_ec::scalar_mul::variable_base::{ChunkedPippenger, HashMapPippenger, VariableBaseMSM}; use ark_std::iterable::{Iterable, Reverse}; diff --git a/poly-commit/src/utils.rs b/poly-commit/src/utils.rs new file mode 100644 index 00000000..fcb62ad9 --- /dev/null +++ b/poly-commit/src/utils.rs @@ -0,0 +1,129 @@ +use ark_ff::Field; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::vec::Vec; + +#[cfg(feature = "parallel")] +use rayon::{ + iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}, + prelude::IndexedParallelIterator, +}; + +/// Return ceil(x / y). +pub(crate) fn ceil_div(x: usize, y: usize) -> usize { + // XXX. warning: this expression can overflow. + (x + y - 1) / y +} + +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub(crate) struct Matrix { + pub(crate) n: usize, + pub(crate) m: usize, + entries: Vec>, +} + +impl Matrix { + /// Returns a Matrix given a list of its rows, each in turn represented as a list of field elements. + /// + /// # Panics + /// Panics if the sub-lists do not all have the same length. + pub(crate) fn new_from_rows(row_list: Vec>) -> Self { + let m = row_list[0].len(); + + for row in row_list.iter().skip(1) { + assert_eq!( + row.len(), + m, + "Invalid matrix construction: not all rows have the same length" + ); + } + + Self { + n: row_list.len(), + m, + entries: row_list, + } + } + + /// Returns the product v * self, where v is interpreted as a row vector. In other words, + /// it returns a linear combination of the rows of self with coefficients given by v. + /// + /// Panics if the length of v is different from the number of rows of self. + pub(crate) fn row_mul(&self, v: &[F]) -> Vec { + assert_eq!( + v.len(), + self.n, + "Invalid row multiplication: vector has {} elements whereas each matrix column has {}", + v.len(), + self.n + ); + + cfg_into_iter!(0..self.m) + .map(|col| { + inner_product( + v, + &cfg_into_iter!(0..self.m) + .map(|row| self.entries[row][col]) + .collect::>(), + ) + }) + .collect() + } +} + +#[inline] +pub(crate) fn inner_product(v1: &[F], v2: &[F]) -> F { + ark_std::cfg_iter!(v1) + .zip(v2) + .map(|(li, ri)| *li * ri) + .sum() +} + +#[inline] +pub(crate) fn scalar_by_vector(s: F, v: &[F]) -> Vec { + ark_std::cfg_iter!(v).map(|x| *x * s).collect() +} + +#[inline] +pub(crate) fn vector_sum(v1: &[F], v2: &[F]) -> Vec { + ark_std::cfg_iter!(v1) + .zip(v2) + .map(|(li, ri)| *li + ri) + .collect() +} + +// TODO: replace by https://github.com/arkworks-rs/crypto-primitives/issues/112. +#[cfg(test)] +use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; +#[cfg(test)] +use ark_ff::PrimeField; + +#[cfg(test)] +pub(crate) fn test_sponge() -> PoseidonSponge { + use ark_crypto_primitives::sponge::{poseidon::PoseidonConfig, CryptographicSponge}; + use ark_std::test_rng; + + let full_rounds = 8; + let partial_rounds = 31; + let alpha = 17; + + let mds = vec![ + vec![F::one(), F::zero(), F::one()], + vec![F::one(), F::one(), F::zero()], + vec![F::zero(), F::one(), F::one()], + ]; + + let mut v = Vec::new(); + let mut ark_rng = test_rng(); + + for _ in 0..(full_rounds + partial_rounds) { + let mut res = Vec::new(); + + for _ in 0..3 { + res.push(F::rand(&mut ark_rng)); + } + v.push(res); + } + let config = PoseidonConfig::new(full_rounds, partial_rounds, alpha, mds, v, 2, 1); + PoseidonSponge::new(&config) +}