diff --git a/.github/workflows/rust-ci.yml b/.github/workflows/rust-ci.yml new file mode 100644 index 00000000..63316e7b --- /dev/null +++ b/.github/workflows/rust-ci.yml @@ -0,0 +1,70 @@ +name: Rust CI + +on: [push, pull_request] + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + override: true + + - name: Uninstall pre-installed tools + run: | + rm -f ~/.cargo/bin/rust-analyzer + rm -f ~/.cargo/bin/rustfmt + rm -f ~/.cargo/bin/cargo-fmt + + - name: Update Rust toolchain and components + run: | + rustup update + rustup component add rustfmt + rustup component add clippy + + - name: Build + run: cargo build --verbose + working-directory: caledonia + + - name: Verify target directory exists + run: | + echo "Checking if target directory exists and is not empty" + ls -la caledonia/target + + - name: Run tests + run: cargo test --verbose + working-directory: caledonia + +# - name: Run Clippy +# run: cargo clippy -- -D warnings +# working-directory: caledonia + + - name: Check format + run: cargo fmt -- --check + working-directory: caledonia + + - name: Build and test documentation + run: cargo doc --no-deps --verbose + working-directory: caledonia + + - name: Cache Cargo registry + uses: actions/cache@v3 + with: + path: ~/.cargo/registry + key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo-registry- + + - name: Cache Cargo build + uses: actions/cache@v3 + with: + path: caledonia/target + key: ${{ runner.os }}-cargo-build-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo-build- diff --git a/caledonia/.gitignore b/caledonia/.gitignore new file mode 100644 index 00000000..9f970225 --- /dev/null +++ b/caledonia/.gitignore @@ -0,0 +1 @@ +target/ \ No newline at end of file diff --git a/caledonia/Cargo.toml b/caledonia/Cargo.toml new file mode 100644 index 00000000..4872fa2e --- /dev/null +++ b/caledonia/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "caledonia" +version = "0.1.0" +edition = "2021" +description = "A Rust implementation of Approximate Lower Bound Arguments (ALBAs)." +categories = ["cryptography"] +include = ["**/*.rs", "Cargo.toml", "README.md", ".gitignore"] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +vrf_dalek = {path = "./vrf"} +blake2 = "0.10.6" +rand_core = "0.6.4" +rayon = "1.10.0" + +[dev-dependencies] +rand = "0.8.5" +rand_chacha = "0.3.1" +criterion = { version = "0.5.1", features = ["html_reports"] } + +[[bench]] +name = "bounded_time" +harness = false + +[[bench]] +name = "bounded_step" +harness = false + +[[bench]] +name = "decentralised_time" +harness = false + +[[bench]] +name = "weighted_time" +harness = false diff --git a/caledonia/benches/bounded_repetition.rs b/caledonia/benches/bounded_repetition.rs new file mode 100644 index 00000000..d4ff9797 --- /dev/null +++ b/caledonia/benches/bounded_repetition.rs @@ -0,0 +1,76 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use rand_chacha::ChaCha20Rng; +use rand_core::SeedableRng; +use std::time::Duration; + +use caledonia::bounded::Proof; + +pub mod utils; + +fn prepetitions( + c: &mut Criterion, + lambdas: &[usize], + s_p: &[usize], + n_p: &[usize], + _hash_size: usize, +) { + let mut group = c.benchmark_group("Alba Bounded".to_string()); + + fn prove_repetitions(l: usize, sp: usize, np: usize, truncate_size: usize, n: u64) -> u64 { + let mut rng = ChaCha20Rng::from_entropy(); + let mut total_repetitions = 0; + for _ in 0..n { + // Setup + let (mut dataset, bench_setup) = utils::setup_bounded_wrapper(&mut rng, l, sp, np); + dataset.truncate(truncate_size); + // Bench + black_box({ + let (_, r, _) = Proof::bench(&bench_setup, &dataset); + total_repetitions += 1 + r; + }); + } + total_repetitions as u64 + } + + for &l in lambdas { + for &sp in s_p { + for &np in n_p { + // Bench with all of Sp + let high = sp; + + // Bench with all of np% of Sp + let low = (high * np).div_ceil(100); + + // Bench with (100+np)/2 percent of Sp + let mean = (100 + np).div_ceil(2); + let mid = (high + low).div_ceil(2); + + group.bench_function( + utils::bench_id("Proving repetitions", np, l, sp, np), + move |b| b.iter_custom(|n| prove_repetitions(l, sp, np, low, n)), + ); + group.bench_function( + utils::bench_id("Proving repetitions", mean, l, sp, np), + move |b| b.iter_custom(|n| prove_repetitions(l, sp, np, mid, n)), + ); + group.bench_function( + utils::bench_id("Proving repetitions", 100, l, sp, np), + move |b| b.iter_custom(|n| prove_repetitions(l, sp, np, high, n)), + ); + } + } + } + group.finish(); +} + +fn prove_step_benches(c: &mut Criterion) { + prepetitions(c, &[50], &[100], &[60, 66, 80], 256); +} + +criterion_group!(name = benches; + config = Criterion::default().sample_size(500).nresamples(1000).measurement_time(Duration::from_secs(30)).with_measurement(utils::Repetitions); + targets = + prove_step_benches +); + +criterion_main!(benches); diff --git a/caledonia/benches/bounded_step.rs b/caledonia/benches/bounded_step.rs new file mode 100644 index 00000000..f51e0ad6 --- /dev/null +++ b/caledonia/benches/bounded_step.rs @@ -0,0 +1,74 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use rand_chacha::ChaCha20Rng; +use rand_core::SeedableRng; + +use caledonia::bounded::Proof; + +pub mod utils; + +fn psteps( + c: &mut Criterion, + lambdas: &[usize], + s_p: &[usize], + n_p: &[usize], + _hash_size: usize, +) { + let mut group = c.benchmark_group("Alba Bounded".to_string()); + + fn prove_steps(l: usize, sp: usize, np: usize, truncate_size: usize, n: u64) -> u64 { + let mut rng = ChaCha20Rng::from_entropy(); + let mut total_steps = 0; + for _ in 0..n { + // Setup + let (mut dataset, bench_setup) = utils::setup_bounded_wrapper(&mut rng, l, sp, np); + dataset.truncate(truncate_size); + // Bench + black_box({ + let (steps, _, _) = Proof::bench(&bench_setup, &dataset); + total_steps += steps; + }); + } + total_steps as u64 + } + + for &l in lambdas { + for &sp in s_p { + for &np in n_p { + // Bench with all of Sp + let high = sp; + + // Bench with all of np% of Sp + let low = (high * np).div_ceil(100); + + // Bench with (100+np)/2 percent of Sp + let mean = (100 + np).div_ceil(2); + let mid = (high + low).div_ceil(2); + + group.bench_function(utils::bench_id("Proving steps", np, l, sp, np), move |b| { + b.iter_custom(|n| prove_steps(l, sp, np, low, n)) + }); + group.bench_function( + utils::bench_id("Proving steps", mean, l, sp, np), + move |b| b.iter_custom(|n| prove_steps(l, sp, np, mid, n)), + ); + group.bench_function(utils::bench_id("Proving steps", 100, l, sp, np), move |b| { + b.iter_custom(|n| prove_steps(l, sp, np, high, n)) + }); + } + } + } + group.finish(); +} + +fn prove_step_benches(c: &mut Criterion) { + // prove(c, &[128], &[1000, 5000], &[60, 66, 80], 256); + psteps(c, &[10], &[1000], &[60, 66, 80], 256); +} + +criterion_group!(name = benches; + config = Criterion::default().nresamples(1000).with_measurement(utils::Steps); + targets = + prove_step_benches +); + +criterion_main!(benches); diff --git a/caledonia/benches/bounded_time.rs b/caledonia/benches/bounded_time.rs new file mode 100644 index 00000000..0b71507b --- /dev/null +++ b/caledonia/benches/bounded_time.rs @@ -0,0 +1,127 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use rand_chacha::ChaCha20Rng; +use rand_core::SeedableRng; +use std::time::{Duration, Instant}; + +use caledonia::bounded::Proof; + +pub mod utils; + +fn prove(c: &mut Criterion, lambdas: &[usize], s_p: &[usize], n_p: &[usize], _hash_size: usize) { + let mut group = c.benchmark_group("Alba Bounded".to_string()); + + fn prove_duration(l: usize, sp: usize, np: usize, truncate_size: usize, n: u64) -> Duration { + let mut rng = ChaCha20Rng::from_entropy(); + let mut total_duration = Duration::ZERO; + for _ in 0..n { + // Setup + let (mut dataset, bench_setup) = utils::setup_bounded_wrapper(&mut rng, l, sp, np); + dataset.truncate(truncate_size); + // Bench + let start = Instant::now(); + black_box({ + Proof::prove(&bench_setup, &dataset); + }); + total_duration = total_duration.saturating_add(start.elapsed()); + } + total_duration + } + + for &l in lambdas { + for &sp in s_p { + for &np in n_p { + // Bench with all of Sp + let high = sp; + + // Bench with all of np% of Sp + let low = (high * np).div_ceil(100); + + // Bench with (100+np)/2 percent of Sp + let mean = (100 + np).div_ceil(2); + let mid = (high + low).div_ceil(2); + + group.bench_function(utils::bench_id("Proving time", np, l, sp, np), move |b| { + b.iter_custom(|n| prove_duration(l, sp, np, low, n)) + }); + group.bench_function(utils::bench_id("Proving time", mean, l, sp, np), move |b| { + b.iter_custom(|n| prove_duration(l, sp, np, mid, n)) + }); + group.bench_function(utils::bench_id("Proving time", 100, l, sp, np), move |b| { + b.iter_custom(|n| prove_duration(l, sp, np, high, n)) + }); + } + } + } + group.finish(); +} + +fn verify(c: &mut Criterion, lambdas: &[usize], s_p: &[usize], n_p: &[usize], _hash_size: usize) { + let mut group = c.benchmark_group("Alba".to_string()); + + fn verify_duration(l: usize, sp: usize, np: usize, truncate_size: usize, n: u64) -> Duration { + let mut rng = ChaCha20Rng::from_entropy(); + let mut total_duration = Duration::ZERO; + for _ in 0..n { + // Setup + let (mut dataset, bench_setup) = utils::setup_bounded_wrapper(&mut rng, l, sp, np); + dataset.truncate(truncate_size); + // Prove + let proof = Proof::prove(&bench_setup, &dataset); + // Bench + let start = Instant::now(); + black_box({ + Proof::verify(&bench_setup, proof); + }); + total_duration = total_duration.saturating_add(start.elapsed()); + } + total_duration + } + + for &l in lambdas { + for &sp in s_p { + for &np in n_p { + // Bench with all of Sp + let high = sp; + + // Bench with all of np% of Sp + let low = (high * np).div_ceil(100); + + // Bench with (100+np)/2 percent of Sp + let mean = (100 + np).div_ceil(2); + let mid = (high + low).div_ceil(2); + + group.bench_function( + utils::bench_id("Verification time", np, l, sp, np), + move |b| b.iter_custom(|n| verify_duration(l, sp, np, low, n)), + ); + group.bench_function( + utils::bench_id("Verification time", mean, l, sp, np), + move |b| b.iter_custom(|n| verify_duration(l, sp, np, mid, n)), + ); + group.bench_function( + utils::bench_id("Verification time", 100, l, sp, np), + move |b| b.iter_custom(|n| verify_duration(l, sp, np, high, n)), + ); + } + } + } + group.finish(); +} + +fn prove_benches(c: &mut Criterion) { + // prove(c, &[128], &[1000, 5000], &[60, 66, 80], 256); + prove(c, &[128], &[1_000_000], &[60, 66, 80], 256); +} + +fn verify_benches(c: &mut Criterion) { + verify(c, &[10], &[1000], &[60, 66, 80], 256); +} + +criterion_group!(name = benches; + config = Criterion::default().nresamples(1000); + targets = + prove_benches, + verify_benches +); + +criterion_main!(benches); diff --git a/caledonia/benches/decentralised_time.rs b/caledonia/benches/decentralised_time.rs new file mode 100644 index 00000000..30e56056 --- /dev/null +++ b/caledonia/benches/decentralised_time.rs @@ -0,0 +1,129 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use rand_chacha::ChaCha20Rng; +use rand_core::SeedableRng; +use std::time::{Duration, Instant}; + +use caledonia::decentralised::Proof; + +pub mod utils; + +fn prove(c: &mut Criterion, lambdas: &[usize], s_p: &[usize], n_p: &[usize], _hash_size: usize) { + let mut group = c.benchmark_group("Alba decentralised".to_string()); + + fn prove_duration(l: usize, sp: usize, np: usize, truncate_size: usize, n: u64) -> Duration { + let mut rng = ChaCha20Rng::from_entropy(); + let mut total_duration = Duration::ZERO; + for _ in 0..n { + // Setup + let (mut dataset, bench_setup) = + utils::setup_decentralised_wrapper(&mut rng, l, sp, np); + dataset.truncate(truncate_size); + // Bench + let start = Instant::now(); + black_box({ + Proof::prove(&bench_setup, &dataset); + }); + total_duration = total_duration.saturating_add(start.elapsed()); + } + total_duration + } + + for &l in lambdas { + for &sp in s_p { + for &np in n_p { + // Bench with all of Sp + let high = sp; + + // Bench with all of np% of Sp + let low = (high * np).div_ceil(100); + + // Bench with (100+np)/2 percent of Sp + let mean = (100 + np).div_ceil(2); + let mid = (high + low).div_ceil(2); + + group.bench_function(utils::bench_id("Proving time", np, l, sp, np), move |b| { + b.iter_custom(|n| prove_duration(l, sp, np, low, n)) + }); + group.bench_function(utils::bench_id("Proving time", mean, l, sp, np), move |b| { + b.iter_custom(|n| prove_duration(l, sp, np, mid, n)) + }); + group.bench_function(utils::bench_id("Proving time", 100, l, sp, np), move |b| { + b.iter_custom(|n| prove_duration(l, sp, np, high, n)) + }); + } + } + } + group.finish(); +} + +fn verify(c: &mut Criterion, lambdas: &[usize], s_p: &[usize], n_p: &[usize], _hash_size: usize) { + let mut group = c.benchmark_group("Alba decentralised".to_string()); + + fn verify_duration(l: usize, sp: usize, np: usize, truncate_size: usize, n: u64) -> Duration { + let mut rng = ChaCha20Rng::from_entropy(); + let mut total_duration = Duration::ZERO; + for _ in 0..n { + // Setup + let (mut dataset, bench_setup) = + utils::setup_decentralised_wrapper(&mut rng, l, sp, np); + dataset.truncate(truncate_size); + // Prove + let proof = Proof::prove(&bench_setup, &dataset); + // Bench + let start = Instant::now(); + black_box({ + Proof::verify(&bench_setup, proof); + }); + total_duration = total_duration.saturating_add(start.elapsed()); + } + total_duration + } + + for &l in lambdas { + for &sp in s_p { + for &np in n_p { + // Bench with all of Sp + let high = sp; + + // Bench with all of np% of Sp + let low = (high * np).div_ceil(100); + + // Bench with (100+np)/2 percent of Sp + let mean = (100 + np).div_ceil(2); + let mid = (high + low).div_ceil(2); + + group.bench_function( + utils::bench_id("Verification time", np, l, sp, np), + move |b| b.iter_custom(|n| verify_duration(l, sp, np, low, n)), + ); + group.bench_function( + utils::bench_id("Verification time", mean, l, sp, np), + move |b| b.iter_custom(|n| verify_duration(l, sp, np, mid, n)), + ); + group.bench_function( + utils::bench_id("Verification time", 100, l, sp, np), + move |b| b.iter_custom(|n| verify_duration(l, sp, np, high, n)), + ); + } + } + } + group.finish(); +} + +fn prove_benches(c: &mut Criterion) { + // prove(c, &[128], &[1_000, 5_000], &[60, 66, 80], 256); + prove(c, &[128], &[100_000], &[60, 66, 80], 256); +} + +fn verify_benches(c: &mut Criterion) { + verify(c, &[10], &[1_000], &[60, 66, 80], 256); +} + +criterion_group!(name = benches; + config = Criterion::default().nresamples(1000); + targets = + prove_benches, + verify_benches +); + +criterion_main!(benches); diff --git a/caledonia/benches/utils.rs b/caledonia/benches/utils.rs new file mode 100644 index 00000000..bafb19d3 --- /dev/null +++ b/caledonia/benches/utils.rs @@ -0,0 +1,287 @@ +use criterion::{ + measurement::{Measurement, ValueFormatter}, + BenchmarkId, Throughput, +}; + +use rand_chacha::ChaCha20Rng; +use rand_core::{RngCore, SeedableRng}; + +use caledonia::{ + utils::{gen_items, gen_weighted_items}, + weighted_decentralised::VerifiableData, +}; +use vrf_dalek::vrf::{PublicKey, SecretKey}; + +// Helper functions +pub fn setup_bounded_wrapper( + rng: &mut ChaCha20Rng, + l: usize, + sp: usize, + np: usize, +) -> (Vec<[u8; 32]>, caledonia::bounded::Setup) { + use caledonia::bounded::*; + let seed_u32 = rng.next_u32(); + let seed = seed_u32.to_ne_bytes().to_vec(); + let dataset: Vec<[u8; 32]> = gen_items(seed, sp); + let params = Params { + lambda_sec: l, + lambda_rel: l, + n_p: (np * sp).div_ceil(100), + n_f: ((100 - np) * sp).div_ceil(100), + }; + (dataset, Setup::new(¶ms)) +} + +pub fn setup_decentralised_wrapper( + rng: &mut ChaCha20Rng, + l: usize, + sp: usize, + np: usize, +) -> (Vec<[u8; 32]>, caledonia::decentralised::Setup) { + use caledonia::decentralised::*; + let seed_u32 = rng.next_u32(); + let seed = seed_u32.to_ne_bytes().to_vec(); + let params = Params::new( + l, + l, + (np * sp).div_ceil(100), + ((100 - np) * sp).div_ceil(100), + ); + let dataset = gen_items(seed, sp) + .iter() + .filter_map(|&s| Proof::lottery(params.n_p, params.mu, s).then(|| s)) + .collect(); + (dataset, Setup::new(¶ms)) +} + +pub fn setup_weighted_wrapper( + rng: &mut ChaCha20Rng, + l: usize, + sp: usize, + voters: usize, + np: usize, +) -> ( + Vec, + caledonia::weighted_decentralised::Setup, +) { + use caledonia::weighted_decentralised::*; + let seed_u32 = rng.next_u32(); + let seed = seed_u32.to_ne_bytes().to_vec(); + let dataset = gen_weighted_items(seed, sp, voters); + let params = Params::new( + l, + l, + (np * sp).div_ceil(100), + ((100 - np) * sp).div_ceil(100), + ); + let setup = Setup::new(¶ms); + + let mut verifiable_set = Vec::new(); + let mut rng = ChaCha20Rng::from_seed([0u8; 32]); + for spi in dataset { + let ski = SecretKey::generate(&mut rng); + let pki = PublicKey::from(&ski); + let (data, stake) = spi; + let votes = Proof::prove_lottery(setup.n_p_lottery, setup.mu, data, &ski, &pki, stake); + for v in votes { + verifiable_set.push(v); + } + } + + let params = Params::new( + l, + l, + (np * sp).div_ceil(100), + ((100 - np) * sp).div_ceil(100), + ); + (verifiable_set, Setup::new(¶ms)) +} + +pub fn bench_id(bench_name: &str, pc: usize, l: usize, sp: usize, np: usize) -> BenchmarkId { + BenchmarkId::new( + bench_name, + format!("Security parameter: {l}, Sp:{sp} ({pc}%), n_p:{np}"), + ) +} + +pub fn bench_id_users( + bench_name: &str, + pc: usize, + l: usize, + sp: usize, + users: usize, + np: usize, +) -> BenchmarkId { + BenchmarkId::new( + bench_name, + format!("Security parameter: {l}, Sp:{sp} (users {users}, {pc}%), n_p:{np}"), + ) +} +// Measurements + +/// Nb of DFS call per proof +pub struct Steps; +impl Measurement for Steps { + type Intermediate = u64; + type Value = u64; + + fn start(&self) -> Self::Intermediate { + 0 + } + + fn end(&self, _i: Self::Intermediate) -> Self::Value { + 0 + } + + fn add(&self, v1: &Self::Value, v2: &Self::Value) -> Self::Value { + v1 + v2 + } + + fn zero(&self) -> Self::Value { + 0 + } + + fn to_f64(&self, value: &Self::Value) -> f64 { + *value as f64 + } + + fn formatter(&self) -> &dyn ValueFormatter { + &StepsFormatter + } +} + +struct StepsFormatter; + +impl ValueFormatter for StepsFormatter { + fn format_value(&self, value: f64) -> String { + format!("{:.4} steps", value) + } + + fn format_throughput(&self, throughput: &Throughput, value: f64) -> String { + match throughput { + Throughput::Bytes(b) => format!("{:.4} spb", value / *b as f64), + Throughput::Elements(b) => format!("{:.4} steps/{}", value, b), + Throughput::BytesDecimal(b) => format!("{:.4} spb (decimal)", value / *b as f64), + } + } + + fn scale_values(&self, _typical_value: f64, _values: &mut [f64]) -> &'static str { + "steps" + } + + fn scale_throughputs( + &self, + _typical_value: f64, + throughput: &Throughput, + values: &mut [f64], + ) -> &'static str { + match throughput { + Throughput::Bytes(n) => { + for val in values { + *val /= *n as f64; + } + "spb" + } + Throughput::Elements(n) => { + for val in values { + *val /= *n as f64; + } + "spe" + } + Throughput::BytesDecimal(n) => { + for val in values { + *val /= *n as f64; + } + "spb (decimal)" + } + } + } + + fn scale_for_machines(&self, _values: &mut [f64]) -> &'static str { + "steps" + } +} + +/// Nb of repet, times prove_index was called, per proof +/// +pub struct Repetitions; +impl Measurement for Repetitions { + type Intermediate = u64; + type Value = u64; + + fn start(&self) -> Self::Intermediate { + 0 + } + + fn end(&self, _i: Self::Intermediate) -> Self::Value { + 0 + } + + fn add(&self, v1: &Self::Value, v2: &Self::Value) -> Self::Value { + v1 + v2 + } + + fn zero(&self) -> Self::Value { + 0 + } + + fn to_f64(&self, value: &Self::Value) -> f64 { + *value as f64 + } + + fn formatter(&self) -> &dyn ValueFormatter { + &RepetitionsFormatter + } +} + +struct RepetitionsFormatter; + +impl ValueFormatter for RepetitionsFormatter { + fn format_value(&self, value: f64) -> String { + format!("{:.4} repet", value) + } + + fn format_throughput(&self, throughput: &Throughput, value: f64) -> String { + match throughput { + Throughput::Bytes(b) => format!("{:.4} rpb", value / *b as f64), + Throughput::Elements(b) => format!("{:.4} repet/{}", value, b), + Throughput::BytesDecimal(b) => format!("{:.4} rpb (decimal)", value / *b as f64), + } + } + + fn scale_values(&self, _typical_value: f64, _values: &mut [f64]) -> &'static str { + "repet" + } + + fn scale_throughputs( + &self, + _typical_value: f64, + throughput: &Throughput, + values: &mut [f64], + ) -> &'static str { + match throughput { + Throughput::Bytes(n) => { + for val in values { + *val /= *n as f64; + } + "rpb" + } + Throughput::Elements(n) => { + for val in values { + *val /= *n as f64; + } + "rpe" + } + Throughput::BytesDecimal(n) => { + for val in values { + *val /= *n as f64; + } + "rpb (decimal)" + } + } + } + + fn scale_for_machines(&self, _values: &mut [f64]) -> &'static str { + "repet" + } +} diff --git a/caledonia/benches/weighted_time.rs b/caledonia/benches/weighted_time.rs new file mode 100644 index 00000000..ea4ec3c8 --- /dev/null +++ b/caledonia/benches/weighted_time.rs @@ -0,0 +1,164 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use rand_chacha::ChaCha20Rng; +use rand_core::SeedableRng; +use std::time::{Duration, Instant}; + +use caledonia::weighted_decentralised::Proof; + +pub mod utils; + +fn prove( + c: &mut Criterion, + lambdas: &[usize], + s_p: &[usize], + s_u: &[usize], + n_p: &[usize], + _hash_size: usize, +) { + let mut group = c.benchmark_group("Alba weighted decentralised".to_string()); + + fn prove_duration( + l: usize, + sp: usize, + users: usize, + np: usize, + truncate_size: usize, + n: u64, + ) -> Duration { + let mut rng = ChaCha20Rng::from_entropy(); + let mut total_duration = Duration::ZERO; + for _ in 0..n { + // Setup + let (mut dataset, bench_setup) = + utils::setup_weighted_wrapper(&mut rng, l, sp, users, np); + dataset.truncate(truncate_size); + // Bench + let start = Instant::now(); + black_box({ + Proof::prove(&bench_setup, &dataset); + }); + total_duration = total_duration.saturating_add(start.elapsed()); + } + total_duration + } + + for &l in lambdas { + for &sp in s_p { + for &users in s_u { + for &np in n_p { + // Bench with all of Sp + let high = sp; + + // Bench with all of np% of Sp + let low = (high * np).div_ceil(100); + + // Bench with (100+np)/2 percent of Sp + let mean = (100 + np).div_ceil(2); + let mid = (high + low).div_ceil(2); + + group.bench_function( + utils::bench_id_users("Proving time", np, l, sp, users, np), + move |b| b.iter_custom(|n| prove_duration(l, sp, users, np, low, n)), + ); + group.bench_function( + utils::bench_id_users("Proving time", mean, l, sp, users, np), + move |b| b.iter_custom(|n| prove_duration(l, sp, users, np, mid, n)), + ); + group.bench_function( + utils::bench_id_users("Proving time", 100, l, sp, users, np), + move |b| b.iter_custom(|n| prove_duration(l, sp, users, np, high, n)), + ); + } + } + } + } + group.finish(); +} + +fn verify( + c: &mut Criterion, + lambdas: &[usize], + s_p: &[usize], + s_u: &[usize], + n_p: &[usize], + _hash_size: usize, +) { + let mut group = c.benchmark_group("Alba weighted decentralised".to_string()); + + fn verify_duration( + l: usize, + sp: usize, + users: usize, + np: usize, + truncate_size: usize, + n: u64, + ) -> Duration { + let mut rng = ChaCha20Rng::from_entropy(); + let mut total_duration = Duration::ZERO; + for _ in 0..n { + // Setup + let (mut dataset, bench_setup) = + utils::setup_weighted_wrapper(&mut rng, l, sp, users, np); + dataset.truncate(truncate_size); + // Prove + let proof = Proof::prove(&bench_setup, &dataset); + // Bench + let start = Instant::now(); + black_box({ + Proof::verify(&bench_setup, proof); + }); + total_duration = total_duration.saturating_add(start.elapsed()); + } + total_duration + } + + for &l in lambdas { + for &sp in s_p { + for &users in s_u { + for &np in n_p { + // Bench with all of Sp + let high = sp; + + // Bench with all of np% of Sp + let low = (high * np).div_ceil(100); + + // Bench with (100+np)/2 percent of Sp + let mean = (100 + np).div_ceil(2); + let mid = (high + low).div_ceil(2); + + group.bench_function( + utils::bench_id_users("Verification time", np, l, sp, users, np), + move |b| b.iter_custom(|n| verify_duration(l, sp, users, np, low, n)), + ); + group.bench_function( + utils::bench_id_users("Verification time", mean, l, sp, users, np), + move |b| b.iter_custom(|n| verify_duration(l, sp, users, np, mid, n)), + ); + group.bench_function( + utils::bench_id_users("Verification time", 100, l, sp, users, np), + move |b| b.iter_custom(|n| verify_duration(l, sp, users, np, high, n)), + ); + } + } + } + } + group.finish(); +} + +fn prove_benches(c: &mut Criterion) { + // prove(c, &[128], &[1_000, 5_000], &[1_000, 1_000], &[60, 66, 80], 256); + prove(c, &[128], &[100_000], &[1_000], &[60, 66, 80], 256); +} + +fn verify_benches(c: &mut Criterion) { + verify(c, &[10], &[1_000], &[100], &[60, 66, 80], 256); +} + +criterion_group!(name = benches; + config = Criterion::default().nresamples(1000); + targets = + prove_benches, + verify_benches +); + +criterion_main!(benches); diff --git a/caledonia/src/bounded.rs b/caledonia/src/bounded.rs new file mode 100644 index 00000000..3770561c --- /dev/null +++ b/caledonia/src/bounded.rs @@ -0,0 +1,505 @@ +//! Rust implementation of ALBA's bounded DFS scheme using Blake2b as hash +//! function. +use rayon::prelude::*; +use std::sync::atomic::{self, AtomicUsize}; + +extern crate core; +use crate::utils; + +use std::f64::consts::E; +use std::sync::Arc; + +const DATA_LENGTH: usize = 32; +const DIGEST_SIZE: usize = 32; + +type Data = [u8; DATA_LENGTH]; +type Hash = [u8; DIGEST_SIZE]; + +/// Setup input parameters +#[derive(Debug, Clone)] +pub struct Params { + /// Soundness security parameter + pub lambda_sec: usize, + /// Completeness security parameter + pub lambda_rel: usize, + /// Approximate size of set Sp to lower bound + pub n_p: usize, + /// Target lower bound + pub n_f: usize, +} +pub enum Cases { + /// Case where u =< λ^2 + Small, + /// Case where λ^2 < u < λ^3 + Mid, + /// Case where u >= λ^3 + High, +} + +impl Params { + /// Returns information on which case corresponds some parameter + pub fn which_case(&self) -> (Cases, usize) { + let lsec = self.lambda_sec as f64; + let lrel = self.lambda_rel as f64; + let np = self.n_p as f64; + let nf = self.n_f as f64; + let loge = E.log2(); + + let lognpnf = (np / nf).log2(); + let u_f64 = (lsec + lrel.log2() + 5.0 - loge.log2()) / lognpnf; + let u = u_f64.ceil() as u64; + + let ratio = 9.0 * np * loge / ((17 * u).pow(2) as f64); + let s1 = ratio - 7.0; + let s2 = ratio - 2.0; + + if s1 < 1.0 || s2 < 1.0 { + return (Cases::Small, u as usize); + } + + let lrel2 = lrel.min(s2); + if (u as f64) < lrel2 { + return (Cases::Mid, u as usize); + } else { + return (Cases::High, u as usize); + } + } +} + +/// Setup output parameters +#[derive(Debug, Clone)] +pub struct Setup { + /// Approximate size of set Sp to lower bound + pub n_p: usize, + /// Proof size (in Sp elements) + pub u: usize, + /// Proof max counter + pub r: usize, + /// Proof max 2nd counter + pub d: usize, + /// Inverse of probability p_q + pub q: usize, + /// Computation bound + pub b: usize, +} +impl Setup { + /// Setup algorithm taking a Params as input and returning setup parameters (u,d,q) + pub fn new(params: &Params) -> Self { + let loge = E.log2(); + fn compute_w(u: f64, l: f64) -> f64 { + fn factorial_check(w: f64, l: f64) -> bool { + let bound = 0.5f64.powf(l); + let factors: Vec = (1..=(w as u64 + 1)).rev().collect(); + let mut ratio = (14.0 * w * w * (w + 2.0) * E.powf((w + 1.0) / w)) + / (E * (w + 2.0 - E.powf(1.0 / w))); + + for f in factors { + ratio /= f as f64; + if ratio <= bound { + return true; + } + } + return false; + } + let mut w: f64 = u; + while !factorial_check(w, l) { + w += 1.0; + } + w + } + + let n_p_f64 = params.n_p as f64; + let n_f_f64 = params.n_f as f64; + let lognpnf = (n_p_f64 / n_f_f64).log2(); + let lambda_rel = params.lambda_rel as f64; + let logrel = lambda_rel.log2(); + let lambda_sec = (params.lambda_sec as f64) + logrel; + + let u_f64 = ((lambda_sec + logrel + 5.0 - loge.log2()) / lognpnf).ceil(); + let u = u_f64 as usize; + + let ratio = 9.0 * n_p_f64 * loge / ((17 * u).pow(2) as f64); + let s1 = ratio - 7.0; + let s2 = ratio - 2.0; + + if s1 < 1.0 || s2 < 1.0 { + // Small case, ie n_p <= λ^2 + let ln12 = (12f64).ln(); + let d = (32.0 * ln12 * u_f64).ceil(); + return Setup { + n_p: params.n_p, + u, + r: params.lambda_rel, + d: d as usize, + q: (2.0 * ln12 / d).recip().ceil() as usize, + b: (8.0 * (u_f64 + 1.0) * d / ln12).floor() as usize, + }; + } + let lambda_rel2 = lambda_rel.min(s2); + if u_f64 < lambda_rel2 { + // Case 3, Theorem 14, ie n_p >= λ^3 + let d = (16.0 * u_f64 * (lambda_rel2 + 2.0) / loge).ceil(); + assert!(n_p_f64 >= d * d * loge / (9.0 * (lambda_rel2 + 2.0))); + return Setup { + n_p: params.n_p, + u, + r: (lambda_rel / lambda_rel2).ceil() as usize, + d: d as usize, + q: (2.0 * (lambda_rel2 + 2.0) / (d * loge)).recip().ceil() as usize, + b: (((lambda_rel2 + 2.0 + u_f64.log2()) / (lambda_rel2 + 2.0)) + * (3.0 * u_f64 * d / 4.0) + + d + + u_f64) + .floor() as usize, + }; + } else { + // Case 2, Theorem 13, ie λ^3 > n_p > λ^2 + let lambda_rel1 = lambda_rel.min(s1); + let lbar = (lambda_rel1 + 7.0) / loge; + let d = (16.0 * u_f64 * lbar).ceil(); + assert!(n_p_f64 >= d * d / (9.0 * lbar)); + + let w = compute_w(u_f64, lambda_rel1); + return Setup { + n_p: params.n_p, + u, + r: (lambda_rel / lambda_rel1).ceil() as usize, + d: d as usize, + q: (2.0 * lbar / d).recip().ceil() as usize, + b: (((w * lbar) / d + 1.0) + * E.powf(2.0 * u_f64 * w * lbar / n_p_f64 + 7.0 * u_f64 / w) + * d + * u_f64 + + d) + .floor() as usize, + }; + } + } +} + +/// Round parameters +#[derive(Debug, Clone)] +pub struct Round { + /// Proof counter + v: usize, + /// Proof 2nd counter + t: usize, + // Round candidate tuple + s_list: Vec, + /// Round candidate hash + h: Hash, + /// Round candidate hash mapped to [1, n_p] + h_usize: usize, + /// Approximate size of set Sp to lower bound + n_p: usize, +} + +impl Round { + /// Oracle producing a uniformly random value in [1, n_p] used for round candidates + /// We also return hash(data) to follow the optimization presented in Section 3.3 + fn h1(data: Vec>, n_p: usize) -> (Hash, usize) { + let digest = utils::combine_hashes::(data); + return (digest, utils::oracle(&digest, n_p)); + } + + /// Output a round from a proof counter and n_p + /// Initilialises the hash with H1(t) and random value as oracle(H1(t), n_p) + pub fn new(v: usize, t: usize, n_p: usize) -> Round { + let mut data = Vec::new(); + data.push(v.to_ne_bytes().to_vec()); + data.push(t.to_ne_bytes().to_vec()); + let (h, h_usize) = Round::h1(data, n_p); + Round { + v, + t, + s_list: Vec::new(), + h: h, + h_usize, + n_p, + } + } + + /// Updates a round with an element of S_p + /// Replaces the hash $h$ with $h' = H1(h, s)$ and the random value as oracle(h', n_p) + pub fn update(r: &Round, s: Data) -> Round { + let mut s_list = r.s_list.clone(); + s_list.push(s); + let mut data = Vec::new(); + data.push(r.h.clone().to_vec()); + data.push(s.to_vec()); + let (h, h_usize) = Round::h1(data, r.n_p); + Round { + v: r.v, + t: r.t, + s_list, + h: h, + h_usize, + n_p: r.n_p, + } + } +} + +#[derive(Debug, Clone)] +/// Alba proof +pub struct Proof { + /// Proof counter + r: usize, + /// Proof 2nd counter + d: usize, + /// Proof tuple + items: Vec, +} + +impl Proof { + /// Returns a new proof + fn new() -> Self { + Proof { + r: 0, + d: 0, + items: Vec::new(), + } + } + + /// Oracle producing a uniformly random value in [1, n_p] used for prehashing S_p + fn h0(setup: &Setup, v: usize, s: Data) -> usize { + let mut data = Vec::new(); + data.push(v.to_ne_bytes().to_vec()); + data.push(s.to_vec()); + let digest = utils::combine_hashes::(data); + return utils::oracle(&digest, setup.n_p); + } + + /// Oracle defined as Bernoulli(q) returning 1 with probability q and 0 otherwise + fn h2(setup: &Setup, r: &Round) -> bool { + let mut data = Vec::new(); + data.push(r.v.to_ne_bytes().to_vec()); + data.push(r.t.to_ne_bytes().to_vec()); + for s in &r.s_list { + data.push(s.clone().to_vec()); + } + let digest = utils::combine_hashes::(data); + return utils::oracle(&digest, setup.q) == 0; + } + + /// Depth-first search which goes through all potential round candidates + /// and returns first round candidate Round{t, x_1, ..., x_u)} such that: + /// - for all i ∈ [0, u-1], H0(x_i+1) ∈ bins[H1(t, x_1, ..., x_i)] + /// - H2(t, x_0, ..., x_u) = true + fn dfs( + setup: &Setup, + bins: &Vec>, + round: &Round, + // nb_steps: Arc>, + nb_steps: Arc, + ) -> Option { + if round.s_list.len() == setup.u { + if Proof::h2(setup, round) { + let r = round.v; + let d = round.t; + let items = round.s_list.clone(); + return Some(Proof { r, d, items }); + } else { + return None; + } + } + let result = bins[round.h_usize].par_iter().find_map_first(|&s| { + // if *nb_steps.lock().unwrap() == setup.b { + if nb_steps.load(atomic::Ordering::Relaxed) == setup.d { + return None; + } + + // *nb_steps.lock().unwrap() += 1; + nb_steps.fetch_add(1, atomic::Ordering::Relaxed); + Self::dfs(setup, bins, &Round::update(round, s), nb_steps.clone()) + }); + return result; + } + + /// Indexed proving algorithm, returns an empty proof if no suitable + /// candidate is found within the setup.b steps. + fn prove_index(setup: &Setup, set: &Vec, v: usize) -> (usize, Option) { + let mut bins: Vec> = Vec::new(); + for _ in 1..(setup.n_p + 1) { + bins.push(Vec::new()); + } + for &s in set.iter() { + bins[Proof::h0(setup, v, s)].push(s); + } + // let nb_steps = Arc::new(Mutex::new(0usize)); + let nb_steps = Arc::new(AtomicUsize::new(0)); + for t in 1..(setup.d + 1) { + // if *nb_steps.lock().unwrap() + if nb_steps.load(atomic::Ordering::Relaxed) == setup.b { + return (0, None); + } + // *nb_steps.lock().unwrap() += 1; + nb_steps.fetch_add(1, atomic::Ordering::Relaxed); + let round = Round::new(v, t, setup.n_p); + let res = Proof::dfs(setup, &bins, &round, nb_steps.clone()); + if res.is_some() { + // return (*nb_steps.lock().unwrap(), res); + return (nb_steps.load(atomic::Ordering::Relaxed), res); + } + } + // return (*nb_steps.lock().unwrap(), None); + return (nb_steps.load(atomic::Ordering::Relaxed), None); + } + + /// Alba's proving algorithm, based on a depth-first search algorithm. + /// Calls up to setup.r times the prove_index function and returns an empty + /// proof if no suitable candidate is found. + pub fn prove(setup: &Setup, set: &Vec) -> Self { + for v in 0..setup.r { + if let (_, Some(proof)) = Proof::prove_index(setup, set, v) { + return proof; + } + } + return Proof::new(); + } + + /// Alba's proving algorithm used for benchmarking, returning a proof as + /// well as the number of steps ran to find it. + pub fn bench(setup: &Setup, set: &Vec) -> (usize, usize, Self) { + let mut nb_steps = 0; + for v in 0..setup.r { + let (steps, opt) = Proof::prove_index(setup, set, v); + nb_steps += steps; + if let Some(proof) = opt { + return (nb_steps, proof.r, proof); + } + } + return (nb_steps, setup.r, Proof::new()); + } + + /// Alba's verification algorithm, follows proving algorithm by running the + /// same depth-first search algorithm. + pub fn verify(setup: &Setup, proof: Proof) -> bool { + if proof.d == 0 || proof.d > setup.d || proof.r > setup.r || proof.items.len() != setup.u { + return false; + } + let r0 = Round::new(proof.r, proof.d, setup.n_p); + let (b, round) = proof.items.iter().fold((true, r0), |(b, r), &s| { + ( + b && r.h_usize == Proof::h0(setup, proof.r, s), + Round::update(&r, s), + ) + }); + return b && Proof::h2(setup, &round); + } +} + +#[cfg(test)] +mod tests { + + use super::*; + use rand_chacha::ChaCha20Rng; + use rand_core::{RngCore, SeedableRng}; + + #[test] + fn test_params() { + let lambdas = [10, 80, 100, 128]; + let pows: Vec = (2..10).collect(); + let sps: Vec = pows.iter().map(|&i| 10_u32.pow(i) as usize).collect(); + let ratios = [60, 66, 80, 95, 99]; + let mut params = Vec::new(); + for l in lambdas { + for &sp in &sps { + for r in ratios { + params.push(Params { + lambda_sec: l, + lambda_rel: l, + n_p: (sp * r) / 100, + n_f: (sp * (100 - r)) / 100, + }) + } + } + } + + let mut smalls = Vec::new(); + let mut mids = Vec::new(); + let mut highs = Vec::new(); + for p in params { + match Params::which_case(&p) { + (Cases::Small, u) => smalls.push((p.clone(), u)), + (Cases::Mid, u) => mids.push((p.clone(), u)), + (Cases::High, u) => highs.push((p.clone(), u)), + } + } + + println!("------------ Small cases"); + for s in smalls { + println!("{:?}", s); + } + println!("\n------------ Mid cases"); + for s in mids { + println!("{:?}", s); + } + println!("\n------------ High cases"); + for s in highs { + println!("{:?}", s); + } + } + + #[test] + fn test_verify() { + let mut rng = ChaCha20Rng::from_seed(Default::default()); + let nb_tests = 1_000; + let set_size = 1_000; + for _t in 0..nb_tests { + let seed = rng.next_u32().to_ne_bytes().to_vec(); + let s_p = utils::gen_items::(seed, set_size); + let params = Params { + lambda_sec: 10, + lambda_rel: 10, + n_p: 80, + n_f: 20, + }; + let setup = Setup::new(¶ms); + let proof = Proof::prove(&setup, &s_p); + assert!(Proof::verify(&setup, proof.clone())); + let proof_0 = Proof { + r: proof.r, + d: 0, + items: proof.items.clone(), + }; + assert!(!Proof::verify(&setup, proof_0)); + let proof_d = Proof { + r: proof.r, + d: proof.d.wrapping_add(1), + items: proof.items.clone(), + }; + assert!(!Proof::verify(&setup, proof_d)); + let proof_r = Proof { + r: proof.r.wrapping_add(1), + d: proof.d, + items: proof.items.clone(), + }; + assert!(!Proof::verify(&setup, proof_r)); + let proof_item = Proof { + r: proof.r, + d: proof.d, + items: Vec::new(), + }; + assert!(!Proof::verify(&setup, proof_item)); + let mut wrong_items = proof.items.clone(); + let last_item = wrong_items.pop().unwrap(); + let mut penultimate_item = wrong_items.pop().unwrap(); + let proof_itembis = Proof { + r: proof.r, + d: proof.d, + items: wrong_items.clone(), + }; + assert!(!Proof::verify(&setup, proof_itembis)); + // Modifying the penultimate item to check correctness of H1 check and not H2 + penultimate_item[0] = penultimate_item[0].wrapping_add(42u8); + wrong_items.push(penultimate_item); + wrong_items.push(last_item); + let proof_itembis = Proof { + r: proof.r, + d: proof.d, + items: wrong_items.clone(), + }; + assert!(!Proof::verify(&setup, proof_itembis)); + } + } +} diff --git a/caledonia/src/decentralised.rs b/caledonia/src/decentralised.rs new file mode 100644 index 00000000..9057f019 --- /dev/null +++ b/caledonia/src/decentralised.rs @@ -0,0 +1,477 @@ +//! Rust implementation of ALBA's Telescope scheme using Blake2b as hash +//! function. + +use crate::utils; +extern crate core; +use std::f64::consts::E; + +const DATA_LENGTH: usize = 32; +const DIGEST_SIZE: usize = 32; + +type Data = [u8; DATA_LENGTH]; + +type Hash = [u8; DIGEST_SIZE]; + +/// Setup input parameters +#[derive(Debug, Clone)] +pub struct Params { + /// Soundness security parameter + pub lambda_sec: usize, + /// Completeness security parameter + pub lambda_rel: usize, + /// Approximate size of set Sp to lower bound + pub n_p: usize, + /// Target lower bound + pub n_f: usize, + /// Expected number of participants + pub mu: usize, +} +pub enum Cases { + /// Case where u =< λ^2 + Small, + /// Case where λ^2 < u < λ^3 + Mid, + /// Case where u >= λ^3 + High, +} + +impl Params { + /// Returns the minimum mu, found by dichotomic search, + /// so that the soundness error is <= 2^-lambda_sec + pub fn min_mu(lambda_sec: usize, lambda_rel: usize, n_p: usize, n_f: usize) -> usize { + fn compute_bounds(lsec: f64, lrel: f64, np: f64, nf: f64, mu: f64) -> bool { + let npnf = np / nf; + let lognpnf = npnf.log2(); + let loge = E.log2(); + let ln12 = (12f64).ln(); + + let bound_completeness = 2f64 * (lrel + 1f64) / loge; + + let delta = (bound_completeness / mu).sqrt(); + let rho = ((1f64 - delta) * mu).ceil(); + let rhomu = rho / mu as f64; + let logrhomu = rhomu.log2(); + let u_f64 = ((lsec + (lrel + 1f64).log2() + 1f64 + loge + ln12.log2()) + / (lognpnf + logrhomu)) + .ceil(); + + // Soudness check + let bound_soudness = npnf * u_f64 * u_f64; + return (mu > bound_completeness) && (mu >= bound_soudness) && (npnf * rhomu > 1f64); + } + let (np, nf) = (n_p as f64, n_f as f64); + let (mut lower, mut upper) = (0.0, np); + let mut mu = 0.5 * np; + while mu != np { + let b = compute_bounds(lambda_sec as f64, lambda_rel as f64, np, nf, mu); + if b { + upper = mu; + } else { + lower = mu; + } + let new_mu = ((lower + upper) / 2.0).ceil(); + if mu == new_mu && b { + return mu as usize; + } + mu = new_mu; + } + return mu as usize; + } + + pub fn new(lambda_sec: usize, lambda_rel: usize, n_p: usize, n_f: usize) -> Params { + let mu = Params::min_mu(lambda_sec, lambda_rel, n_p, n_f); + return Params { + lambda_sec, + lambda_rel, + n_p, + n_f, + mu, + }; + } +} + +/// Setup output parameters +#[derive(Debug, Clone)] +pub struct Setup { + /// Approximate size of set Sp to lower bound + pub n_p_lottery: usize, + /// Post lottery Alba np + pub n_p: usize, + /// Expected number of participants + pub mu: usize, + /// Proof size (in Sp elements) + pub u: usize, + /// Proof max counter + pub r: usize, + /// Proof max 2nd counter + pub d: usize, + /// Inverse of probability p_q + pub q: usize, + /// Computation bound + pub b: usize, +} +impl Setup { + /// Setup algorithm taking a Params as input and returning setup parameters (u,d,q) + /// Follows Theorem 16 and 17 + pub fn new(params: &Params) -> Self { + let n_p_f64 = params.n_p as f64; + let n_f_f64 = params.n_f as f64; + let npnf = n_p_f64 / n_f_f64; + let lognpnf = npnf.log2(); + let lambda_rel = params.lambda_rel as f64; + let lambda_sec = params.lambda_sec as f64; + let mu = params.mu as f64; + let loge = E.log2(); + let ln12 = (12f64).ln(); + + // Completness check + let mu_completeness = 2f64 * (lambda_rel + 1f64) / loge; + assert!(mu == n_p_f64 || mu > mu_completeness); + + let delta = (mu_completeness / mu).sqrt(); + let rho = ((1f64 - delta) * mu).ceil(); + let rhomu = rho / mu as f64; + let logrhomu = rhomu.log2(); + let u_f64 = (lambda_sec + (lambda_rel + 1f64).log2() + 1f64 + loge + ln12.log2()) + / (lognpnf + logrhomu); + let d = (32.0 * ln12 * u_f64).ceil(); + let q = (2.0 * ln12 / d).recip().ceil(); + + // Soudness check + let mu_soundness = npnf * u_f64 * u_f64; + assert!(mu == n_p_f64 || mu >= mu_soundness); + assert!(mu == n_p_f64 || npnf * rhomu > 1f64); + + // let soundness_error = q + // * d + // * (lambda_rel + 1.0) + // * (npnf.recip() * rhomu.recip()).powf(u_f64) + // * E.powf(u_f64 * u_f64 * npnf / mu); + // println!("Soundness error: {}", soundness_error); + + return Setup { + n_p_lottery: params.n_p, + n_p: rho as usize, + mu: params.mu, + u: u_f64 as usize, + r: params.lambda_rel + 1, + d: d as usize, + q: q as usize, + b: (8.0 * (u_f64 + 1.0) * d / ln12).floor() as usize, + }; + } +} + +/// Round parameters +#[derive(Debug, Clone)] +pub struct Round { + /// Proof counter + v: usize, + /// Proof 2nd counter + t: usize, + // Round candidate tuple + s_list: Vec, + /// Round candidate hash + h: Hash, + /// Round candidate hash mapped to [1, n_p] + h_usize: usize, + /// Approximate size of set Sp to lower bound + n_p: usize, +} + +impl Round { + /// Oracle producing a uniformly random value in [1, n_p] used for round candidates + /// We also return hash(data) to follow the optimization presented in Section 3.3 + fn h1(data: Vec>, n_p: usize) -> (Hash, usize) { + let digest = utils::combine_hashes::(data); + return (digest, utils::oracle(&digest, n_p)); + } + + /// Output a round from a proof counter and n_p + /// Initilialises the hash with H1(t) and random value as oracle(H1(t), n_p) + pub fn new(v: usize, t: usize, n_p: usize) -> Round { + let mut data = Vec::new(); + data.push(v.to_ne_bytes().to_vec()); + data.push(t.to_ne_bytes().to_vec()); + let (h, h_usize) = Round::h1(data, n_p); + Round { + v, + t, + s_list: Vec::new(), + h: h, + h_usize, + n_p, + } + } + + /// Updates a round with an element of S_p + /// Replaces the hash $h$ with $h' = H1(h, s)$ and the random value as oracle(h', n_p) + pub fn update(r: &Round, s: Data) -> Round { + let mut s_list = r.s_list.clone(); + s_list.push(s); + let mut data = Vec::new(); + data.push(r.h.clone().to_vec()); + data.push(s.to_vec()); + let (h, h_usize) = Round::h1(data, r.n_p); + Round { + v: r.v, + t: r.t, + s_list, + h: h, + h_usize, + n_p: r.n_p, + } + } +} + +#[derive(Debug, Clone)] +/// Alba proof +pub struct Proof { + /// Proof counter + r: usize, + /// Proof 2nd counter + d: usize, + /// Proof tuple + items: Vec, +} + +impl Proof { + /// Returns a new proof + fn new() -> Self { + Proof { + r: 0, + d: 0, + items: Vec::new(), + } + } + + /// Lottery (Section 4.1 of ALBA paper) scheme using oracle outputing 1 + /// with probability p = mu / np where mu (resp. np) is the expected (resp. + /// total) number of participants + pub fn lottery(np: usize, mu: usize, s: Data) -> bool { + let mut data = Vec::new(); + data.push(s.to_vec()); + let digest = utils::combine_hashes::(data); + let proba: f64 = mu as f64 / np as f64; + let inverse_proba = proba.recip().ceil() as usize; + return utils::oracle(&digest, inverse_proba) == 0; + } + + /// Oracle producing a uniformly random value in [1, n_p] used for prehashing S_p + fn h0(setup: &Setup, v: usize, s: Data) -> usize { + let mut data = Vec::new(); + data.push(v.to_ne_bytes().to_vec()); + data.push(s.to_vec()); + let digest = utils::combine_hashes::(data); + return utils::oracle(&digest, setup.n_p); + } + + /// Oracle defined as Bernoulli(q) returning 1 with probability q and 0 otherwise + fn h2(setup: &Setup, r: &Round) -> bool { + let mut data = Vec::new(); + data.push(r.v.to_ne_bytes().to_vec()); + data.push(r.t.to_ne_bytes().to_vec()); + for s in &r.s_list { + data.push(s.clone().to_vec()); + } + let digest = utils::combine_hashes::(data); + return utils::oracle(&digest, setup.q) == 0; + } + + /// Depth-first search which goes through all potential round candidates + /// and returns first round candidate Round{t, x_1, ..., x_u)} such that: + /// - for all i ∈ [0, u-1], H0(x_i+1) ∈ bins[H1(t, x_1, ..., x_i)] + /// - H2(t, x_0, ..., x_u) = true + fn dfs( + setup: &Setup, + bins: &Vec>, + round: &Round, + nb_steps: &mut usize, + ) -> Option { + if round.s_list.len() == setup.u { + if Proof::h2(setup, round) { + let r = round.v; + let d = round.t; + let items = round.s_list.clone(); + return Some(Proof { r, d, items }); + } else { + return None; + } + } + let result = bins[round.h_usize].iter().find_map(|&s| { + if *nb_steps == setup.b { + return None; + } + *nb_steps += 1; + Self::dfs(setup, bins, &Round::update(round, s), nb_steps) + }); + return result; + } + + /// Indexed proving algorithm, returns an empty proof if no suitable + /// candidate is found within the setup.b steps. + fn prove_index(setup: &Setup, set: &Vec, v: usize) -> (usize, Option) { + let mut bins: Vec> = Vec::new(); + for _ in 1..(setup.n_p + 1) { + bins.push(Vec::new()); + } + for &s in set.iter() { + bins[Proof::h0(setup, v, s)].push(s); + } + let mut nb_steps = 0; + for t in 1..(setup.d + 1) { + if nb_steps == setup.b { + return (0, None); + } + nb_steps += 1; + let round = Round::new(v, t, setup.n_p); + let res = Proof::dfs(setup, &bins, &round, &mut nb_steps); + if res.is_some() { + return (nb_steps, res); + } + } + return (nb_steps, None); + } + + /// Alba's proving algorithm, based on a depth-first search algorithm. + /// Calls up to setup.r times the prove_index function and returns an empty + /// proof if no suitable candidate is found. + pub fn prove(setup: &Setup, set: &Vec) -> Self { + // Lottery must be done by each participant on its own, we have here + // the aggregator running it another time for robustness. + let winner_set = set + .iter() + .filter_map(|&s| Proof::lottery(setup.n_p_lottery, setup.mu, s).then(|| s)) + .collect(); + + for v in 0..setup.r { + if let (_, Some(proof)) = Proof::prove_index(setup, &winner_set, v) { + return proof; + } + } + return Proof::new(); + } + + /// Alba's proving algorithm used for benchmarking, returning a proof as + /// well as the number of steps ran to find it. + pub fn bench(setup: &Setup, set: &Vec) -> (usize, Self) { + let mut nb_steps = 0; + for v in 0..setup.r { + let (steps, opt) = Proof::prove_index(setup, set, v); + nb_steps += steps; + if let Some(proof) = opt { + return (nb_steps, proof); + } + } + return (nb_steps, Proof::new()); + } + + /// Alba's verification algorithm, follows proving algorithm by running the + /// same depth-first search algorithm. + pub fn verify(setup: &Setup, proof: Proof) -> bool { + if proof.d == 0 || proof.d > setup.d || proof.r > setup.r || proof.items.len() != setup.u { + return false; + } + + let r0 = Round::new(proof.r, proof.d, setup.n_p); + let (b, round) = proof.items.iter().fold((true, r0), |(b, r), &s| { + ( + b && r.h_usize == Proof::h0(setup, proof.r, s) + && Proof::lottery(setup.n_p_lottery, setup.mu, s), + Round::update(&r, s), + ) + }); + return b && Proof::h2(setup, &round); + } +} + +#[cfg(test)] +mod tests { + + use super::*; + use rand_chacha::ChaCha20Rng; + use rand_core::{RngCore, SeedableRng}; + + #[test] + fn test_params() { + let lambdas = [80, 100, 128]; + let pows: Vec = (2..10).collect(); + let sps: Vec = pows.iter().map(|&i| 10_u32.pow(i) as usize).collect(); + let ratios = [60, 66, 80, 95, 99]; + let mut params = Vec::new(); + for l in lambdas { + for &sp in &sps { + for r in ratios { + let n_p = (sp * r) / 100; + let n_f = (sp * (100 - r)) / 100; + let p = Params::new(l, l, n_p, n_f); + params.push(p); + } + } + } + } + + #[test] + fn test_verify() { + let mut rng = ChaCha20Rng::from_seed(Default::default()); + let nb_tests = 100; + let set_size = 100_000; + let lambda = 80; + let np = 60; + let nf = 100 - np; + for _t in 0..nb_tests { + let seed = rng.next_u32().to_ne_bytes().to_vec(); + let params = Params::new(lambda, lambda, set_size * np / 100, set_size * nf / 100); + let s_p = utils::gen_items::(seed, set_size) + .iter() + .filter_map(|&s| Proof::lottery(params.n_p, params.mu, s).then(|| s)) + .collect(); + let setup = Setup::new(¶ms); + let proof = Proof::prove(&setup, &s_p); + assert!(Proof::verify(&setup, proof.clone())); + let proof_0 = Proof { + r: proof.r, + d: 0, + items: proof.items.clone(), + }; + assert!(!Proof::verify(&setup, proof_0)); + let proof_d = Proof { + r: proof.r, + d: proof.d.wrapping_add(1), + items: proof.items.clone(), + }; + assert!(!Proof::verify(&setup, proof_d)); + let proof_r = Proof { + r: proof.r.wrapping_add(1), + d: proof.d, + items: proof.items.clone(), + }; + assert!(!Proof::verify(&setup, proof_r)); + let proof_item = Proof { + r: proof.r, + d: proof.d, + items: Vec::new(), + }; + assert!(!Proof::verify(&setup, proof_item)); + let mut wrong_items = proof.items.clone(); + let last_item = wrong_items.pop().unwrap(); + let mut penultimate_item = wrong_items.pop().unwrap(); + let proof_itembis = Proof { + r: proof.r, + d: proof.d, + items: wrong_items.clone(), + }; + assert!(!Proof::verify(&setup, proof_itembis)); + // Modifying the penultimate item to check correctness of H1 check and not H2 + penultimate_item[0] = penultimate_item[0].wrapping_add(42u8); + wrong_items.push(penultimate_item); + wrong_items.push(last_item); + let proof_itembis = Proof { + r: proof.r, + d: proof.d, + items: wrong_items.clone(), + }; + assert!(!Proof::verify(&setup, proof_itembis)); + } + } +} diff --git a/caledonia/src/lib.rs b/caledonia/src/lib.rs new file mode 100644 index 00000000..150a3a41 --- /dev/null +++ b/caledonia/src/lib.rs @@ -0,0 +1,19 @@ +//! Approximate Lower Bound Arguments (ALBA, ) +//! +//! Alba is a generic protocol to prove succinctly a lower bound of the size of +//! a, potentially weighted, set. Say we have a set Sp of size |Sp| >= $n_p$, +//! and a lower bound $n_f$ < $n_p$ of it we want to prove. Alba gives us a +//! method to generate a proof of knowledge of this bound by finding the +//! smallest subset of Sp of size $u$ to convince a verifier. +//! The paper presents several schemes and optimizations. The basic scheme is +//! enhanced in the "prehashed" version thanks to sorting Sp with a balls and +//! bins sorting algorithm reducing the number of hashes done per round. A +//! lottery scheme is also introduced to support Alba in a decentralised +//! settings as well as a modification to use PRF in the CRS settings instead of +//! using the ROM. +pub mod utils; + +pub mod bounded; +pub mod decentralised; +pub mod prehashed; +pub mod weighted_decentralised; diff --git a/caledonia/src/prehashed.rs b/caledonia/src/prehashed.rs new file mode 100644 index 00000000..749b46d2 --- /dev/null +++ b/caledonia/src/prehashed.rs @@ -0,0 +1,274 @@ +//! Rust implementation of ALBA's prehashed scheme using Blake2b as hash +//! function, working for big sets. + +extern crate core; +use crate::utils; + +use std::f64::consts::E; + +const DATA_LENGTH: usize = 32; +const DIGEST_SIZE: usize = 32; + +type Data = [u8; DATA_LENGTH]; +type Hash = [u8; DIGEST_SIZE]; + +/// Setup input parameters +#[derive(Debug, Clone)] +pub struct Params { + /// Soundness security parameter + pub lambda_sec: usize, + /// Completeness security parameter + pub lambda_rel: usize, + /// Approximate size of set Sp to lower bound + pub n_p: usize, + /// Target lower bound + pub n_f: usize, +} +/// Setup output parameters +#[derive(Debug, Clone)] +pub struct Setup { + /// Approximate size of set Sp to lower bound + pub n_p: usize, + /// Proof size (in Sp elements) + pub u: usize, + /// Proof max counter + pub d: usize, + /// Inverse of probability p_q + pub q: usize, +} +impl Setup { + /// Setup algorithm taking a Params as input and returning setup parameters (u,d,q) + pub fn new(params: &Params) -> Self { + let e = E; + let log_2 = |x: f64| x.log2(); + let loge = log_2(e); + let logloge = log_2(loge); + let log3 = log_2(3.0); + + let n_p_f64 = params.n_p as f64; + let n_f_f64 = params.n_f as f64; + let lognpnf = log_2(n_p_f64 / n_f_f64); + let lambda_sec = params.lambda_sec as f64; + let lambda_rel = params.lambda_rel as f64; + + // We define the parameters according to Section 3.2, Corrollary 2 + let u_f64 = (lambda_sec + log_2(lambda_rel + log3) + 1.0 - logloge) / lognpnf; + let u = u_f64.ceil() as usize; + let d_f64 = 16.0 * u_f64 * (lambda_rel + log3) / loge; + let d = d_f64.ceil() as usize; + let q = (2.0 * (lambda_rel + log3) / (d_f64 * loge)).recip().ceil() as usize; + + let check = ((d_f64 * d_f64 * loge) / (9.0 * (lambda_rel + log3))).ceil() as usize; + assert!(params.n_p >= check); + + Setup { + n_p: params.n_p, + u, + d, + q, + } + } +} + +/// Round parameters +#[derive(Debug, Clone)] +pub struct Round { + /// Proof counter + t: usize, + // Round candidate tuple + s_list: Vec, + /// Round candidate hash + h: Vec, + /// Round candidate hash mapped to [1, n_p] + h_usize: usize, + /// Approximate size of set Sp to lower bound + n_p: usize, +} + +impl Round { + /// Oracle producing a uniformly random value in [1, n_p] used for round candidates + /// We also return hash(data) to follow the optimization presented in Section 3.3 + fn h1(data: Vec>, n_p: usize) -> (Hash, usize) { + let digest = utils::combine_hashes(data); + return (digest, utils::oracle(&digest, n_p)); + } + + /// Output a round from a proof counter and n_p + /// Initilialises the hash with H1(t) and random value as oracle(H1(t), n_p) + pub fn new(t: usize, n_p: usize) -> Round { + let data = [t.to_ne_bytes().to_vec()].to_vec(); + let (h, h_usize) = Round::h1(data, n_p); + Round { + t, + s_list: Vec::new(), + h: h.to_vec(), + h_usize, + n_p, + } + } + + /// Updates a round with an element of S_p + /// Replaces the hash $h$ with $h' = H1(h, s)$ and the random value as oracle(h', n_p) + pub fn update(r: &Round, s: Data) -> Round { + let mut s_list = r.s_list.clone(); + s_list.push(s); + let mut data = Vec::new(); + data.push(r.h.clone()); + data.push(s.to_vec()); + let (h, h_usize) = Round::h1(data, r.n_p); + Round { + t: r.t, + s_list, + h: h.to_vec(), + h_usize, + n_p: r.n_p, + } + } +} + +#[derive(Debug, Clone)] +/// Alba proof +pub struct Proof { + /// Proof counter + d: usize, + /// Proof tuple + items: Vec, +} + +impl Proof { + /// Oracle producing a uniformly random value in [1, n_p] used for prehashing S_p + // TODO: We also return hash(data) to follow the optimization presented in Section 3.3 + fn h0(setup: &Setup, s: Data) -> usize { + let mut data = Vec::new(); + data.push(s.to_vec()); + let digest = utils::combine_hashes::(data); + return utils::oracle(&digest, setup.n_p); + } + + /// Oracle defined as Bernoulli(q) returning 1 with probability q and 0 otherwise + fn h2(setup: &Setup, r: &Round) -> bool { + let mut data = Vec::new(); + data.push(r.t.to_ne_bytes().to_vec()); + for s in &r.s_list { + data.push(s.clone().to_vec()); + } + let digest = utils::combine_hashes::(data); + return utils::oracle(&digest, setup.q) == 0; + } + + /// Depth-first search which goes through all potential round candidates + /// and returns first round candidate Round{t, x_1, ..., x_u)} such that: + /// - for all i ∈ [0, u-1], H0(x_i+1) ∈ bins[H1(t, x_1, ..., x_i)] + /// - H2(t, x_0, ..., x_u) = true + fn dfs(setup: &Setup, bins: &Vec>, round: &Round) -> Option { + if round.s_list.len() == setup.u { + if Proof::h2(setup, round) { + let d = round.t; + let items = round.s_list.clone(); + return Some(Proof { d, items }); + } else { + return None; + } + } + let result = bins[round.h_usize] + .iter() + .find_map(|&s| Self::dfs(setup, bins, &Round::update(round, s))); + return result; + } + + /// Alba's proving algorithm, based on a depth-first search algorithm. + /// Returns an empty proof if no suitable candidate is found. + pub fn prove(setup: &Setup, set: &Vec) -> Self { + let mut bins: Vec> = Vec::new(); + for _ in 1..(setup.n_p + 1) { + bins.push(Vec::new()); + } + for &s in set.iter() { + bins[Proof::h0(setup, s)].push(s); + } + + for t in 1..(setup.d + 1) { + let round = Round::new(t, setup.n_p); + if let Some(proof) = Proof::dfs(setup, &bins, &round) { + return proof; + }; + } + + return Proof { + d: 0, + items: Vec::new(), + }; + } + + /// Alba's verification algorithm, follows proving algorithm by running the + /// same depth-first search algorithm. + pub fn verify(setup: &Setup, proof: Proof) -> bool { + if proof.d == 0 || proof.d > setup.d || proof.items.len() != setup.u { + return false; + } + let r0 = Round::new(proof.d, setup.n_p); + let (b, round) = proof.items.iter().fold((true, r0), |(b, r), &s| { + (b && r.h_usize == Proof::h0(setup, s), Round::update(&r, s)) + }); + return b && Proof::h2(setup, &round); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand_chacha::ChaCha20Rng; + use rand_core::{RngCore, SeedableRng}; + + #[test] + fn test_verify() { + let mut rng = ChaCha20Rng::from_seed(Default::default()); + let nb_tests = 1_000; + let set_size = 1_000; + for _t in 0..nb_tests { + let seed = rng.next_u32().to_ne_bytes().to_vec(); + let s_p = utils::gen_items::(seed, set_size); + let params = Params { + lambda_sec: 10, + lambda_rel: 10, + n_p: 800, + n_f: 2, + }; + let setup = Setup::new(¶ms); + let proof = Proof::prove(&setup, &s_p); + assert!(Proof::verify(&setup, proof.clone())); + let proof_0 = Proof { + d: 0, + items: proof.items.clone(), + }; + assert!(!Proof::verify(&setup, proof_0)); + let proof_d = Proof { + d: proof.d.wrapping_add(1), + items: proof.items.clone(), + }; + assert!(!Proof::verify(&setup, proof_d)); + let proof_item = Proof { + d: proof.d, + items: Vec::new(), + }; + assert!(!Proof::verify(&setup, proof_item)); + let mut wrong_items = proof.items.clone(); + let last_item = wrong_items.pop().unwrap(); + let mut penultimate_item = wrong_items.pop().unwrap(); + let proof_itembis = Proof { + d: proof.d, + items: wrong_items.clone(), + }; + assert!(!Proof::verify(&setup, proof_itembis)); + // Modifying the penultimate item to check correctness of H1 check and not H2 + penultimate_item[0] = penultimate_item[0].wrapping_add(1); + wrong_items.push(penultimate_item); + wrong_items.push(last_item); + let proof_itembis = Proof { + d: proof.d, + items: wrong_items, + }; + assert!(!Proof::verify(&setup, proof_itembis)); + } + } +} diff --git a/caledonia/src/utils.rs b/caledonia/src/utils.rs new file mode 100644 index 00000000..e1e12ddd --- /dev/null +++ b/caledonia/src/utils.rs @@ -0,0 +1,205 @@ +use blake2::digest::{Update, VariableOutput}; +use blake2::Blake2bVar; +use std::cmp::min; + +// Helper functions +fn mod_non_power_of_2(hash: &[u8], n: usize) -> usize { + let epsilon_fail: usize = 1 << 40; // roughly 1 in 10 billion + let k: usize = log_base2(n * epsilon_fail); + let k_prime: usize = 1 << k; + let d: usize = k_prime.div_ceil(n); + + let i = mod_power_of_2(hash, k_prime); + + if i >= d * n { + panic!("failed: i = {}, d = {}, n = {}, k = {}", i, d, n, k); + } else { + i % n + } +} + +fn mod_power_of_2(hash: &[u8], n: usize) -> usize { + let r = from_bytes_le(hash); + (n - 1) & r +} + +fn log_base2(x: usize) -> usize { + usize::BITS as usize - x.leading_zeros() as usize - 1 +} + +fn from_bytes_le(bytes: &[u8]) -> usize { + let mut array = [0u8; 8]; + let bytes = &bytes[..min(8, bytes.len())]; + array[..bytes.len()].copy_from_slice(bytes); + usize::from_le_bytes(array) +} + +/// Return a 32-byte hash of the given data +pub fn hash_bytes(data: &[u8]) -> [u8; N] { + let mut hasher = Blake2bVar::new(N).expect("Failed to construct hasher!"); + hasher.update(data); + let mut buf = [0u8; N]; + hasher + .finalize_variable(&mut buf) + .expect("Failed to finalize hashing"); + buf +} + +/// Return 32-byte hash of the given list of data +pub fn combine_hashes(hash_list: Vec>) -> [u8; N] { + let mut hasher = Blake2bVar::new(N).expect("Failed to construct hasher!"); + for data in hash_list.iter() { + hasher.update(data); + } + let mut buf = [0u8; N]; + hasher + .finalize_variable(&mut buf) + .expect("Failed to finalize hashing"); + buf +} + +pub fn oracle(hash: &[u8], n: usize) -> usize { + if n.is_power_of_two() { + mod_power_of_2(hash, n) + } else { + mod_non_power_of_2(hash, n) + } +} + +/// Generate a set of items given the set size and a seed +/// Items are generated by hashing the current index +pub fn gen_items(seed: Vec, set_size: usize) -> Vec<[u8; N]> { + let mut s_p = Vec::with_capacity(set_size); + for b in 0..set_size { + let mut data = Vec::new(); + data.push(seed.clone()); + data.push(b.to_ne_bytes().to_vec()); + let item = combine_hashes::(data); + s_p.push(item); + } + s_p +} + +/// Generate a set of weighted items given the total weight size, number of items and a seed +/// Items are generated by hashing the current index +pub fn gen_weighted_items( + seed: Vec, + total_weight: usize, + set_size: usize, +) -> Vec<([u8; N], usize)> { + assert!(set_size <= total_weight); + let mut s_p = Vec::with_capacity(set_size); + let mut s_n = Vec::with_capacity(set_size); + let mut sum: u64 = 0; + // Initialising items with random weights + for b in 0..set_size { + let mut data = Vec::new(); + data.push(seed.clone()); + data.push(b.to_ne_bytes().to_vec()); + let item = combine_hashes::(data); + let weight = u32::from_be_bytes(hash_bytes::<4>(&item)); + sum += weight as u64; + s_p.push(item); + s_n.push(weight); + } + // Updating weights to add up to around total_weight, with minimum weight of 1 + let denominator = sum as f32 / (total_weight - set_size) as f32; + let mut new_sum: u64 = 0; + s_n.iter_mut().for_each(|w| { + *w = 1 + (*w as f32 / denominator).ceil() as u32; + new_sum += *w as u64; + }); + // Fixing ceiling error + let total_weight = total_weight as u64; + let b = total_weight < new_sum; + let mut delta = if b { + new_sum - total_weight + } else { + total_weight - new_sum + }; + + while delta != 0 { + s_n = s_n + .iter() + .map(|&w| match (delta, b) { + (0, _) => w, + (_, true) => { + delta -= 1; + w - if w > 1 { 1 } else { 0 } + } + (_, false) => { + delta -= 1; + w + 1 + } + }) + .collect(); + } + let mut result: Vec<([u8; N], usize)> = Vec::new(); + for i in 0..set_size { + result.push((s_p[i], s_n[i] as usize)); + } + return result; +} + +pub fn format_time(nanos: u128) -> String { + let mut time = nanos; + let bounds = [1000, 1000, 1000, 60, 60, 60]; + let units = ["ns", "μs", "ms", "s", "min", "h"]; + for (&bound, &unit) in bounds.iter().zip(units.iter()) { + if time < bound { + return time.to_string() + unit; + } + time = time / bound; + } + (time * 60).to_string() + "h" +} + +/// Compute the Probability Mass Function (PMF) of a Binomial Distribution B(n,p) +pub fn bin_pmf(n: usize, k: usize, p: f64) -> f64 { + // Compute the binomial coefficient (k out of n) + fn bin_coeff(n: usize, k: usize) -> usize { + if k == 0 { + return 1; + }; + ((n as u128 * bin_coeff(n - 1, k - 1) as u128) / k as u128) as usize + } + let coeff = bin_coeff(n, k) as f64; + coeff * p.powi(k as i32) * (1f64 - p).powi((n - k) as i32) +} + +/// Compute the discrete Cumulative Distribution Function (CDF) of a Binomial Distribution B(n,p) +pub fn bin_cdf(n: usize, k: usize, p: f64) -> f64 { + if k == n { + return 1.0; + }; + (0..=k).map(|i| bin_pmf(n, i, p)).sum() +} + +pub fn format_nb(x: usize) -> String { + let mut y = x; + let mut s = String::new(); + let mut b = true; + while y / 1000 != 0 { + let to_add = (y % 1000).to_string(); + let preppend = "0".repeat(3 - to_add.len()) + &to_add; + let append = if b { "" } else { &("_".to_string() + &s) }; + s = preppend + append; + b = false; + y = y / 1000; + } + if b { + y.to_string() + } else { + (y % 1000).to_string() + "_" + &s + } +} + +#[cfg(test)] +mod tests { + + #[test] + fn test_oracle() { + // Test distribution of oracle + assert!(true); + } +} diff --git a/caledonia/src/weighted_decentralised.rs b/caledonia/src/weighted_decentralised.rs new file mode 100644 index 00000000..213ac417 --- /dev/null +++ b/caledonia/src/weighted_decentralised.rs @@ -0,0 +1,622 @@ +//! Rust implementation of ALBA's Telescope weighted scheme using Blake2b as hash +//! function and Sortition as lottery. + +use crate::utils; +extern crate core; +use std::f64::consts::E; +use vrf_dalek::vrf::{PublicKey, SecretKey, VrfProof}; + +const DATA_LENGTH: usize = 32; +const DIGEST_SIZE: usize = 32; + +type Data = [u8; DATA_LENGTH]; + +#[derive(Clone, Copy, Debug)] +pub struct IndexedData { + // Data, e.g. hash of a vote + pub data: Data, + // Lottery index of the data, must be strictly lower than the VotingWeight + pub index: usize, +} + +impl IndexedData { + pub fn to_hash(self) -> Data { + let mut data = Vec::new(); + data.push(self.data.to_vec()); + data.push(self.index.to_be_bytes().to_vec()); + return utils::combine_hashes::(data); + } +} + +#[derive(Clone, Copy, Debug)] +pub struct VerifiableData { + // Data, e.g. hash of a vote + pub indexed_data: IndexedData, + pub pk: PublicKey, + pub pi: VrfProof, + pub stake: usize, +} + +type Hash = [u8; DIGEST_SIZE]; + +type VotingWeight = usize; + +/// Compute a vote's weight based on binomial distribution of the +/// possible weights. +// +// This function is directly stolen from Algorand's codebase +// (https://github.com/algorand/sortition/blob/main/sortition.cpp) +// It computes the actual voting weight through a dichotomial search in the +// binomial distribution $B(n,p)$ where $n$ is the voter's stake (in ADA) and +// $p$ is the ratio of target committee size over the total stake. The value +// searched for is the given `ratio` which is compared to quantiles of the +// distribution. +// NOTE: While this is significantly faster, by several orders of magnitude, +// to the lottery drawing (see `isLotteryWinner`) process, the soundness of +// this process has not been researched so it should be considered with some +// caution. +pub fn sortition_binomial_cdf_walk(n: usize, p: f64, ratio: f64, weight: usize) -> VotingWeight { + let mut bound = 0f64; + for i in 0..weight { + bound += utils::bin_pmf(n, i, p); + + if ratio <= bound { + return i; + } + } + return weight; +} + +pub fn prove_sortition( + public_key: &PublicKey, + secret_key: &SecretKey, + msg: &[u8], + voter_stake: usize, + total_stake: usize, + expected_size: usize, +) -> (VotingWeight, VrfProof) { + let proof = VrfProof::generate(public_key, secret_key, &msg); + // We hash to 64 bits as the largest float is f64 + let hash = utils::hash_bytes::<8>(&proof.verify(public_key, &msg).unwrap()); + let i = sortition_binomial_cdf_walk( + voter_stake, + expected_size as f64 / total_stake as f64, + u64::from_be_bytes(hash) as f64 / (u64::MAX as f64), + voter_stake, + ); + return (i, proof); +} + +pub fn verify_sortition( + public_key: &PublicKey, + proof: VrfProof, + msg: &[u8], + voter_stake: usize, + total_stake: usize, + expected_size: usize, +) -> VotingWeight { + let hash = match proof.verify(public_key, &msg) { + Ok(h) => utils::hash_bytes::<8>(&h), + Err(_) => return 0, + }; + return sortition_binomial_cdf_walk( + voter_stake, + expected_size as f64 / total_stake as f64, + u64::from_be_bytes(hash) as f64 / (u64::MAX as f64), + voter_stake, + ); +} + +/// Setup input parameters +#[derive(Debug, Clone)] +pub struct Params { + /// Soundness security parameter + pub lambda_sec: usize, + /// Completeness security parameter + pub lambda_rel: usize, + /// Approximate size of set Sp to lower bound + pub n_p: usize, + /// Target lower bound + pub n_f: usize, + /// Expected number of participants + pub mu: usize, +} +pub enum Cases { + /// Case where u =< λ^2 + Small, + /// Case where λ^2 < u < λ^3 + Mid, + /// Case where u >= λ^3 + High, +} + +impl Params { + /// Returns the minimum mu, found by dichotomic search, + /// so that the soundness error is <= 2^-lambda_sec + pub fn min_mu(lambda_sec: usize, lambda_rel: usize, n_p: usize, n_f: usize) -> usize { + fn compute_bounds(lsec: f64, lrel: f64, np: f64, nf: f64, mu: f64) -> bool { + let npnf = np / nf; + let lognpnf = npnf.log2(); + let loge = E.log2(); + let ln12 = (12f64).ln(); + + let bound_completeness = 2f64 * (lrel + 1f64) / loge; + + let delta = (bound_completeness / mu).sqrt(); + let rho = ((1f64 - delta) * mu).ceil(); + let rhomu = rho / mu as f64; + let logrhomu = rhomu.log2(); + let u_f64 = ((lsec + (lrel + 1f64).log2() + 1f64 + loge + ln12.log2()) + / (lognpnf + logrhomu)) + .ceil(); + + // Soudness check + let bound_soudness = npnf * u_f64 * u_f64; + return (mu > bound_completeness) && (mu >= bound_soudness) && (npnf * rhomu > 1f64); + } + let (np, nf) = (n_p as f64, n_f as f64); + let (mut lower, mut upper) = (0.0, np); + let mut mu = 0.5 * np; + while mu != np { + let b = compute_bounds(lambda_sec as f64, lambda_rel as f64, np, nf, mu); + if b { + upper = mu; + } else { + lower = mu; + } + let new_mu = ((lower + upper) / 2.0).ceil(); + if mu == new_mu && b { + return mu as usize; + } + mu = new_mu; + } + return mu as usize; + } + + pub fn new(lambda_sec: usize, lambda_rel: usize, n_p: usize, n_f: usize) -> Params { + let mu = Params::min_mu(lambda_sec, lambda_rel, n_p, n_f); + return Params { + lambda_sec, + lambda_rel, + n_p, + n_f, + mu, + }; + } +} + +/// Setup output parameters +#[derive(Debug, Clone)] +pub struct Setup { + /// Approximate size of set Sp to lower bound + pub n_p_lottery: usize, + /// Post lottery Alba np + pub n_p: usize, + /// Expected number of participants + pub mu: usize, + /// Proof size (in Sp elements) + pub u: usize, + /// Proof max counter + pub r: usize, + /// Proof max 2nd counter + pub d: usize, + /// Inverse of probability p_q + pub q: usize, + /// Computation bound + pub b: usize, +} +impl Setup { + /// Setup algorithm taking a Params as input and returning setup parameters (u,d,q) + /// Follows Theorem 16 and 17 + pub fn new(params: &Params) -> Self { + let n_p_f64 = params.n_p as f64; + let n_f_f64 = params.n_f as f64; + let npnf = n_p_f64 / n_f_f64; + let lognpnf = npnf.log2(); + let lambda_rel = params.lambda_rel as f64; + let lambda_sec = params.lambda_sec as f64; + let mu = params.mu as f64; + let loge = E.log2(); + let ln12 = (12f64).ln(); + + // Completness check + let mu_completeness = 2f64 * (lambda_rel + 1f64) / loge; + assert!(mu == n_p_f64 || mu > mu_completeness); + + let delta = (mu_completeness / mu).sqrt(); + let rho = ((1f64 - delta) * mu).ceil(); + let rhomu = rho / mu as f64; + let logrhomu = rhomu.log2(); + let u_f64 = (lambda_sec + (lambda_rel + 1f64).log2() + 1f64 + loge + ln12.log2()) + / (lognpnf + logrhomu); + let d = (32.0 * ln12 * u_f64).ceil(); + let q = (2.0 * ln12 / d).recip().ceil(); + + // Soudness check + let mu_soundness = npnf * u_f64 * u_f64; + assert!(mu == n_p_f64 || mu >= mu_soundness); + assert!(mu == n_p_f64 || npnf * rhomu > 1f64); + + // let soundness_error = q + // * d + // * (lambda_rel + 1.0) + // * (npnf.recip() * rhomu.recip()).powf(u_f64) + // * E.powf(u_f64 * u_f64 * npnf / mu); + // println!("Soundness error: {}", soundness_error); + + return Setup { + n_p_lottery: params.n_p, + n_p: rho as usize, + mu: params.mu, + u: u_f64 as usize, + r: params.lambda_rel + 1, + d: d as usize, + q: q as usize, + b: (8.0 * (u_f64 + 1.0) * d / ln12).floor() as usize, + }; + } +} + +/// Round parameters +#[derive(Debug, Clone)] +pub struct Round { + /// Proof counter + v: usize, + /// Proof 2nd counter + t: usize, + // Round candidate tuple + s_list: Vec, + /// Round candidate hash + h: Hash, + /// Round candidate hash mapped to [1, n_p] + h_usize: usize, + /// Approximate size of set Sp to lower bound + n_p: usize, +} + +impl Round { + /// Oracle producing a uniformly random value in [1, n_p] used for round candidates + /// We also return hash(data) to follow the optimization presented in Section 3.3 + fn h1(data: Vec>, n_p: usize) -> (Hash, usize) { + let digest = utils::combine_hashes::(data); + return (digest, utils::oracle(&digest, n_p)); + } + + /// Output a round from a proof counter and n_p + /// Initilialises the hash with H1(t) and random value as oracle(H1(t), n_p) + pub fn new(v: usize, t: usize, n_p: usize) -> Round { + let mut data = Vec::new(); + data.push(v.to_ne_bytes().to_vec()); + data.push(t.to_ne_bytes().to_vec()); + let (h, h_usize) = Round::h1(data, n_p); + Round { + v, + t, + s_list: Vec::new(), + h: h, + h_usize, + n_p, + } + } + + /// Updates a round with an element of S_p + /// Replaces the hash $h$ with $h' = H1(h, s)$ and the random value as oracle(h', n_p) + pub fn update(r: &Round, vd: VerifiableData) -> Round { + let indexed_data = vd.indexed_data.clone(); + let mut s_list = r.s_list.clone(); + s_list.push(vd); + let mut data = Vec::new(); + data.push(r.h.clone().to_vec()); + data.push(indexed_data.data.to_vec()); + data.push(indexed_data.index.to_be_bytes().to_vec()); + let (h, h_usize) = Round::h1(data, r.n_p); + Round { + v: r.v, + t: r.t, + s_list, + h: h, + h_usize, + n_p: r.n_p, + } + } +} + +#[derive(Debug, Clone)] +/// Alba proof +pub struct Proof { + /// Proof counter + r: usize, + /// Proof 2nd counter + d: usize, + /// Proof tuple + items: Vec, +} + +impl Proof { + /// Returns a new proof + fn new() -> Self { + Proof { + r: 0, + d: 0, + items: Vec::new(), + } + } + + /// Sortition-based lottery proving scheme (Section 5 of ALBA paper) scheme using Binomial distribution + /// with probability p = mu / np where mu (resp. np) is the expected (resp. + /// total weight) number of participants + pub fn prove_lottery( + np: usize, + mu: usize, + s: Data, + sk: &SecretKey, + pk: &PublicKey, + stake: usize, + ) -> Vec { + let (i, pi) = prove_sortition(pk, sk, &s, stake, np, mu); + let mut winners = Vec::new(); + for j in 0..i { + let indexed_data = IndexedData { data: s, index: j }; + winners.push(VerifiableData { + indexed_data, + pi: pi.clone(), + pk: *pk, + stake, + }); + } + return winners; + } + + /// Sortition-based lottery verification scheme (Section 5 of ALBA paper) scheme using Binomial distribution + /// with probability p = mu / np where mu (resp. np) is the expected (resp. + /// total weight) number of participants + pub fn verify_lottery(np_lottery: usize, mu: usize, vd: &VerifiableData) -> bool { + let w = verify_sortition( + &vd.pk, + vd.pi.clone(), + &vd.indexed_data.data, + vd.stake, + np_lottery, + mu, + ); + return vd.indexed_data.index < w; + } + + /// Oracle producing a uniformly random value in [1, n_p] used for prehashing S_p + fn h0(setup: &Setup, v: usize, is: IndexedData) -> usize { + let mut data = Vec::new(); + data.push(v.to_ne_bytes().to_vec()); + data.push(is.data.to_vec()); + data.push(is.index.to_be_bytes().to_vec()); + let digest = utils::combine_hashes::(data); + return utils::oracle(&digest, setup.n_p); + } + + /// Oracle defined as Bernoulli(q) returning 1 with probability q and 0 otherwise + fn h2(setup: &Setup, r: &Round) -> bool { + let mut data = Vec::new(); + data.push(r.v.to_ne_bytes().to_vec()); + data.push(r.t.to_ne_bytes().to_vec()); + for vd in &r.s_list { + data.push(vd.indexed_data.data.to_vec()); + data.push(vd.indexed_data.index.to_be_bytes().to_vec()); + } + let digest = utils::combine_hashes::(data); + return utils::oracle(&digest, setup.q) == 0; + } + + /// Depth-first search which goes through all potential round candidates + /// and returns first round candidate Round{t, x_1, ..., x_u)} such that: + /// - for all i ∈ [0, u-1], H0(x_i+1) ∈ bins[H1(t, x_1, ..., x_i)] + /// - H2(t, x_0, ..., x_u) = true + fn dfs( + setup: &Setup, + bins: &Vec>, + round: &Round, + nb_steps: &mut usize, + ) -> Option { + if round.s_list.len() == setup.u { + if Proof::h2(setup, round) { + let r = round.v; + let d = round.t; + let items = round.s_list.clone(); + return Some(Proof { r, d, items }); + } else { + return None; + } + } + let result = bins[round.h_usize].iter().find_map(|&vd| { + if *nb_steps == setup.b { + return None; + } + *nb_steps += 1; + Self::dfs(setup, bins, &Round::update(round, vd), nb_steps) + }); + return result; + } + + /// Indexed proving algorithm, returns an empty proof if no suitable + /// candidate is found within the setup.b steps. + fn prove_index(setup: &Setup, set: &Vec, v: usize) -> (usize, Option) { + let mut bins: Vec> = Vec::new(); + for _ in 1..(setup.n_p + 1) { + bins.push(Vec::new()); + } + for &vd in set.iter() { + bins[Proof::h0(setup, v, vd.indexed_data)].push(vd); + } + let mut nb_steps = 0; + for t in 1..(setup.d + 1) { + if nb_steps == setup.b { + return (0, None); + } + nb_steps += 1; + let round = Round::new(v, t, setup.n_p); + let res = Proof::dfs(setup, &bins, &round, &mut nb_steps); + if res.is_some() { + return (nb_steps, res); + } + } + return (nb_steps, None); + } + + /// Alba's proving algorithm, based on a depth-first search algorithm. + /// Calls up to setup.r times the prove_index function and returns an empty + /// proof if no suitable candidate is found. + pub fn prove(setup: &Setup, set: &Vec) -> Self { + // Lottery must be done by each participant on its own, we have here + // the aggregator running it another time for robustness. + let winner_set = set + .iter() + .filter_map(|&vd| Proof::verify_lottery(setup.n_p_lottery, setup.mu, &vd).then(|| vd)) + .collect(); + + for v in 0..setup.r { + if let (_, Some(proof)) = Proof::prove_index(setup, &winner_set, v) { + return proof; + } + } + return Proof::new(); + } + + /// Alba's proving algorithm used for benchmarking, returning a proof as + /// well as the number of steps ran to find it. + pub fn bench(setup: &Setup, set: &Vec) -> (usize, Self) { + let mut nb_steps = 0; + for v in 0..setup.r { + let (steps, opt) = Proof::prove_index(setup, set, v); + nb_steps += steps; + if let Some(proof) = opt { + return (nb_steps, proof); + } + } + return (nb_steps, Proof::new()); + } + + /// Alba's verification algorithm, follows proving algorithm by running the + /// same depth-first search algorithm. + pub fn verify(setup: &Setup, proof: Proof) -> bool { + if proof.d == 0 || proof.d > setup.d || proof.r > setup.r || proof.items.len() != setup.u { + return false; + } + + let r0 = Round::new(proof.r, proof.d, setup.n_p); + let (b, round) = proof.items.iter().fold((true, r0), |(b, r), &vd| { + ( + b && r.h_usize == Proof::h0(setup, proof.r, vd.indexed_data) + && Proof::verify_lottery(setup.n_p_lottery, setup.mu, &vd), + Round::update(&r, vd), + ) + }); + return b && Proof::h2(setup, &round); + } +} + +#[cfg(test)] +mod tests { + + use super::*; + use rand_chacha::ChaCha20Rng; + use rand_core::{RngCore, SeedableRng}; + + #[test] + fn test_params() { + let lambdas = [80, 100, 128]; + let pows: Vec = (2..10).collect(); + let sps: Vec = pows.iter().map(|&i| 10_u32.pow(i) as usize).collect(); + let ratios = [60, 66, 80, 95, 99]; + let mut params = Vec::new(); + for l in lambdas { + for &sp in &sps { + for r in ratios { + let n_p = (sp * r) / 100; + let n_f = (sp * (100 - r)) / 100; + let p = Params::new(l, l, n_p, n_f); + params.push(p); + } + } + } + } + + #[test] + fn test_verify() { + let mut rng = ChaCha20Rng::from_seed(Default::default()); + let nb_tests = 100; + let (total_weight, set_size) = (100_000, 1_000); + let lambda = 80; + let np = 60; + let nf = 100 - np; + for _t in 0..nb_tests { + let seed = rng.next_u32().to_ne_bytes().to_vec(); + + let s_p = utils::gen_weighted_items::(seed, total_weight, set_size); + let params = Params::new( + lambda, + lambda, + total_weight * np / 100, + total_weight * nf / 100, + ); + let setup = Setup::new(¶ms); + println!("\n{:?}", setup); + + let mut verifiable_set = Vec::new(); + let mut rng = ChaCha20Rng::from_seed([0u8; 32]); + for spi in s_p { + let ski = SecretKey::generate(&mut rng); + let pki = PublicKey::from(&ski); + let (data, stake) = spi; + let votes = + Proof::prove_lottery(setup.n_p_lottery, setup.mu, data, &ski, &pki, stake); + for v in votes { + verifiable_set.push(v); + } + } + let proof = Proof::prove(&setup, &verifiable_set); + assert!(Proof::verify(&setup, proof.clone())); + let proof_0 = Proof { + r: proof.r, + d: 0, + items: proof.items.clone(), + }; + assert!(!Proof::verify(&setup, proof_0)); + let proof_d = Proof { + r: proof.r, + d: proof.d.wrapping_add(1), + items: proof.items.clone(), + }; + assert!(!Proof::verify(&setup, proof_d)); + let proof_r = Proof { + r: proof.r.wrapping_add(1), + d: proof.d, + items: proof.items.clone(), + }; + assert!(!Proof::verify(&setup, proof_r)); + let proof_item = Proof { + r: proof.r, + d: proof.d, + items: Vec::new(), + }; + assert!(!Proof::verify(&setup, proof_item)); + let mut wrong_items = proof.items.clone(); + let last_item = wrong_items.pop().unwrap(); + let mut penultimate_item = wrong_items.pop().unwrap(); + let proof_itembis = Proof { + r: proof.r, + d: proof.d, + items: wrong_items.clone(), + }; + assert!(!Proof::verify(&setup, proof_itembis)); + // Modifying the penultimate item to check correctness of H1 check and not H2 + penultimate_item.indexed_data.data[0] = + penultimate_item.indexed_data.data[0].wrapping_add(42u8); + wrong_items.push(penultimate_item); + wrong_items.push(last_item); + let proof_itembis = Proof { + r: proof.r, + d: proof.d, + items: wrong_items.clone(), + }; + assert!(!Proof::verify(&setup, proof_itembis)); + } + } +} diff --git a/caledonia/vrf b/caledonia/vrf new file mode 160000 index 00000000..4fd3a423 --- /dev/null +++ b/caledonia/vrf @@ -0,0 +1 @@ +Subproject commit 4fd3a42330598ec179e6cb06c4da77e3fdd9ca91