diff --git a/crates/proof-of-sql/src/base/commitment/column_bounds.rs b/crates/proof-of-sql/src/base/commitment/column_bounds.rs index 753fb7609..bd56bf0c0 100644 --- a/crates/proof-of-sql/src/base/commitment/column_bounds.rs +++ b/crates/proof-of-sql/src/base/commitment/column_bounds.rs @@ -107,9 +107,8 @@ where (Bounds::Sharp(bounds_a), Bounds::Sharp(bounds_b)) => { Bounds::Sharp(bounds_a.union(bounds_b)) } - (Bounds::Bounded(bounds_a), Bounds::Bounded(bounds_b)) - | (Bounds::Bounded(bounds_a), Bounds::Sharp(bounds_b)) - | (Bounds::Sharp(bounds_a), Bounds::Bounded(bounds_b)) => { + (Bounds::Bounded(bounds_a) | Bounds::Sharp(bounds_a), Bounds::Bounded(bounds_b)) + | (Bounds::Bounded(bounds_a), Bounds::Sharp(bounds_b)) => { Bounds::Bounded(bounds_a.union(bounds_b)) } (bounds, Bounds::Empty) | (Bounds::Empty, bounds) => bounds, @@ -128,14 +127,13 @@ where match (self, other) { (Bounds::Empty, _) => Bounds::Empty, (bounds, Bounds::Empty) => bounds, - (Bounds::Sharp(bounds_a), Bounds::Sharp(bounds_b)) - | (Bounds::Sharp(bounds_a), Bounds::Bounded(bounds_b)) + (Bounds::Sharp(bounds_a), Bounds::Sharp(bounds_b) | Bounds::Bounded(bounds_b)) if bounds_a.max() < bounds_b.min() || bounds_b.max() < bounds_a.min() => { // source collections must be disjoint, so no rows are removed Bounds::Sharp(bounds_a) } - (Bounds::Bounded(bounds), _) | (Bounds::Sharp(bounds), _) => Bounds::Bounded(bounds), + (Bounds::Bounded(bounds) | Bounds::Sharp(bounds), _) => Bounds::Bounded(bounds), } } diff --git a/crates/proof-of-sql/src/base/commitment/column_commitment_metadata_map.rs b/crates/proof-of-sql/src/base/commitment/column_commitment_metadata_map.rs index 4d5f17b7d..4a72620ad 100644 --- a/crates/proof-of-sql/src/base/commitment/column_commitment_metadata_map.rs +++ b/crates/proof-of-sql/src/base/commitment/column_commitment_metadata_map.rs @@ -102,7 +102,7 @@ impl ColumnCommitmentMetadataMapExt for ColumnCommitmentMetadataMap { Err(ColumnCommitmentsMismatch::Identifier { id_a: identifier_a.to_string(), id_b: identifier_b.to_string(), - })? + })?; } Ok((identifier_a, metadata_a.try_union(metadata_b)?)) @@ -125,7 +125,7 @@ impl ColumnCommitmentMetadataMapExt for ColumnCommitmentMetadataMap { Err(ColumnCommitmentsMismatch::Identifier { id_a: identifier_a.to_string(), id_b: identifier_b.to_string(), - })? + })?; } Ok((identifier_a, metadata_a.try_difference(metadata_b)?)) diff --git a/crates/proof-of-sql/src/base/commitment/committable_column.rs b/crates/proof-of-sql/src/base/commitment/committable_column.rs index 44050a83a..0a5257d3d 100644 --- a/crates/proof-of-sql/src/base/commitment/committable_column.rs +++ b/crates/proof-of-sql/src/base/commitment/committable_column.rs @@ -428,7 +428,7 @@ mod tests { ); // non-empty case - let timestamps = [1625072400, 1625076000, 1625083200]; + let timestamps = [1_625_072_400, 1_625_076_000, 1_625_083_200]; let from_borrowed_column = CommittableColumn::from(&Column::::TimestampTZ( PoSQLTimeUnit::Second, @@ -610,7 +610,7 @@ mod tests { ); // non-empty case - let timestamps = vec![1625072400, 1625076000, 1625083200]; + let timestamps = vec![1_625_072_400, 1_625_076_000, 1_625_083_200]; let owned_column = OwnedColumn::::TimestampTZ( PoSQLTimeUnit::Second, PoSQLTimeZone::Utc, @@ -937,7 +937,7 @@ mod tests { assert_eq!(commitment_buffer[0], CompressedRistretto::default()); // Non-empty case - let timestamps = [1625072400, 1625076000, 1625083200]; + let timestamps = [1_625_072_400, 1_625_076_000, 1_625_083_200]; let committable_column = CommittableColumn::TimestampTZ(PoSQLTimeUnit::Second, PoSQLTimeZone::Utc, ×tamps); diff --git a/crates/proof-of-sql/src/base/commitment/vec_commitment_ext.rs b/crates/proof-of-sql/src/base/commitment/vec_commitment_ext.rs index 42f5e6dfc..2080ab074 100644 --- a/crates/proof-of-sql/src/base/commitment/vec_commitment_ext.rs +++ b/crates/proof-of-sql/src/base/commitment/vec_commitment_ext.rs @@ -135,7 +135,7 @@ impl VecCommitmentExt for Vec { ) where COL: Into>, { - self.extend(Self::from_columns_with_offset(columns, offset, setup)) + self.extend(Self::from_columns_with_offset(columns, offset, setup)); } fn try_add(self, other: Self) -> Result diff --git a/crates/proof-of-sql/src/base/database/arrow_array_to_column_conversion.rs b/crates/proof-of-sql/src/base/database/arrow_array_to_column_conversion.rs index baf618a69..f33485f97 100644 --- a/crates/proof-of-sql/src/base/database/arrow_array_to_column_conversion.rs +++ b/crates/proof-of-sql/src/base/database/arrow_array_to_column_conversion.rs @@ -385,7 +385,7 @@ mod tests { #[test] fn we_can_convert_timestamp_array_normal_range() { let alloc = Bump::new(); - let data = vec![1625072400, 1625076000, 1625083200]; // Example Unix timestamps + let data = vec![1_625_072_400, 1_625_076_000, 1_625_083_200]; // Example Unix timestamps let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt( data.clone().into(), Some("Z"), @@ -401,7 +401,7 @@ mod tests { #[test] fn we_can_build_an_empty_column_from_an_empty_range_timestamp() { let alloc = Bump::new(); - let data = vec![1625072400, 1625076000]; // Example Unix timestamps + let data = vec![1_625_072_400, 1_625_076_000]; // Example Unix timestamps let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt( data.into(), Some("+00:00"), @@ -419,7 +419,7 @@ mod tests { #[test] fn we_can_convert_timestamp_array_empty_range() { let alloc = Bump::new(); - let data = vec![1625072400, 1625076000, 1625083200]; // Example Unix timestamps + let data = vec![1_625_072_400, 1_625_076_000, 1_625_083_200]; // Example Unix timestamps let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt( data.into(), Some("+0:00"), @@ -435,7 +435,7 @@ mod tests { #[test] fn we_cannot_convert_timestamp_array_oob_range() { let alloc = Bump::new(); - let data = vec![1625072400, 1625076000, 1625083200]; + let data = vec![1_625_072_400, 1_625_076_000, 1_625_083_200]; let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt( data.into(), Some("Utc"), @@ -451,7 +451,7 @@ mod tests { #[test] fn we_can_convert_timestamp_array_with_nulls() { let alloc = Bump::new(); - let data = vec![Some(1625072400), None, Some(1625083200)]; + let data = vec![Some(1_625_072_400), None, Some(1_625_083_200)]; let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt( data.into(), Some("00:00"), @@ -903,7 +903,10 @@ mod tests { #[test] fn we_can_build_an_empty_column_from_an_empty_range_decimal128() { let alloc = Bump::new(); - let decimal_values = vec![12345678901234567890_i128, -12345678901234567890_i128]; + let decimal_values = vec![ + 12_345_678_901_234_567_890_i128, + -12_345_678_901_234_567_890_i128, + ]; let array: ArrayRef = Arc::new( Decimal128Array::from(decimal_values) .with_precision_and_scale(38, 0) @@ -1010,7 +1013,7 @@ mod tests { #[test] fn we_can_convert_valid_timestamp_array_refs_into_valid_columns() { let alloc = Bump::new(); - let data = vec![1625072400, 1625076000]; // Example Unix timestamps + let data = vec![1_625_072_400, 1_625_076_000]; // Example Unix timestamps let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt( data.clone().into(), Some("UTC"), @@ -1072,7 +1075,7 @@ mod tests { fn we_can_convert_valid_timestamp_array_refs_into_valid_columns_using_ranges_smaller_than_arrays( ) { let alloc = Bump::new(); - let data = vec![1625072400, 1625076000, 1625083200]; // Example Unix timestamps + let data = vec![1_625_072_400, 1_625_076_000, 1_625_083_200]; // Example Unix timestamps let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt( data.clone().into(), Some("Utc"), @@ -1131,7 +1134,7 @@ mod tests { #[test] fn we_can_convert_valid_timestamp_array_refs_into_valid_columns_using_ranges_with_zero_size() { let alloc = Bump::new(); - let data = vec![1625072400, 1625076000]; // Example Unix timestamps + let data = vec![1_625_072_400, 1_625_076_000]; // Example Unix timestamps let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt( data.clone().into(), Some("Utc"), @@ -1160,7 +1163,7 @@ mod tests { #[test] fn we_can_convert_valid_timestamp_array_refs_into_valid_vec_scalars() { - let data = vec![1625072400, 1625076000]; // Example Unix timestamps + let data = vec![1_625_072_400, 1_625_076_000]; // Example Unix timestamps let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt( data.clone().into(), Some("Utc"), diff --git a/crates/proof-of-sql/src/base/database/column_operation.rs b/crates/proof-of-sql/src/base/database/column_operation.rs index 7e14fb85f..eb5773121 100644 --- a/crates/proof-of-sql/src/base/database/column_operation.rs +++ b/crates/proof-of-sql/src/base/database/column_operation.rs @@ -1427,7 +1427,7 @@ mod test { .into_iter() .map(Curve25519Scalar::from) .collect::>(); - let rhs = [71_i64, 150000, -20000] + let rhs = [71_i64, 150_000, -20000] .into_iter() .map(Curve25519Scalar::from) .collect::>(); @@ -1438,7 +1438,7 @@ mod test { assert_eq!(expected, actual); // lhs is decimal with nonnegative scale and rhs is decimal with negative scale - let lhs = [71_i64, 150000, -20000] + let lhs = [71_i64, 150_000, -20000] .into_iter() .map(Curve25519Scalar::from) .collect::>(); @@ -1457,7 +1457,7 @@ mod test { .into_iter() .map(Curve25519Scalar::from) .collect::>(); - let rhs = [71_i64, 150000, -20000] + let rhs = [71_i64, 150_000, -20000] .into_iter() .map(Curve25519Scalar::from) .collect::>(); @@ -1548,7 +1548,7 @@ mod test { .into_iter() .map(Curve25519Scalar::from) .collect::>(); - let rhs = [71_i64, 150000, -30000] + let rhs = [71_i64, 150_000, -30000] .into_iter() .map(Curve25519Scalar::from) .collect::>(); @@ -1559,7 +1559,7 @@ mod test { assert_eq!(expected, actual); // lhs is decimal with nonnegative scale and rhs is decimal with negative scale - let lhs = [71_i64, 150000, -19000] + let lhs = [71_i64, 150_000, -19000] .into_iter() .map(Curve25519Scalar::from) .collect::>(); @@ -1578,7 +1578,7 @@ mod test { .into_iter() .map(Curve25519Scalar::from) .collect::>(); - let rhs = [71000_i64, 150000, -21000] + let rhs = [71000_i64, 150_000, -21000] .into_iter() .map(Curve25519Scalar::from) .collect::>(); @@ -1669,7 +1669,7 @@ mod test { .into_iter() .map(Curve25519Scalar::from) .collect::>(); - let rhs = [71_i64, 150000, -30000] + let rhs = [71_i64, 150_000, -30000] .into_iter() .map(Curve25519Scalar::from) .collect::>(); @@ -1680,7 +1680,7 @@ mod test { assert_eq!(expected, actual); // lhs is decimal with nonnegative scale and rhs is decimal with negative scale - let lhs = [71_i64, 150000, -19000] + let lhs = [71_i64, 150_000, -19000] .into_iter() .map(Curve25519Scalar::from) .collect::>(); @@ -1699,7 +1699,7 @@ mod test { .into_iter() .map(Curve25519Scalar::from) .collect::>(); - let rhs = [71000_i64, 150000, -21000] + let rhs = [71000_i64, 150_000, -21000] .into_iter() .map(Curve25519Scalar::from) .collect::>(); @@ -1838,9 +1838,9 @@ mod test { let actual: (Precision, i8, Vec) = try_add_decimal_columns(&lhs, &rhs, left_column_type, right_column_type).unwrap(); let expected_scalars = vec![ - Curve25519Scalar::from(400071), - Curve25519Scalar::from(1499918), - Curve25519Scalar::from(-199977), + Curve25519Scalar::from(400_071), + Curve25519Scalar::from(1_499_918), + Curve25519Scalar::from(-199_977), ]; let expected = (Precision::new(75).unwrap(), 3, expected_scalars); assert_eq!(expected, actual); @@ -2000,9 +2000,9 @@ mod test { let actual: (Precision, i8, Vec) = try_subtract_decimal_columns(&lhs, &rhs, left_column_type, right_column_type).unwrap(); let expected_scalars = vec![ - Curve25519Scalar::from(399929), - Curve25519Scalar::from(1500082), - Curve25519Scalar::from(-200023), + Curve25519Scalar::from(399_929), + Curve25519Scalar::from(1_500_082), + Curve25519Scalar::from(-200_023), ]; let expected = (Precision::new(75).unwrap(), 3, expected_scalars); assert_eq!(expected, actual); @@ -2246,8 +2246,8 @@ mod test { try_divide_decimal_columns(&lhs, &rhs, left_column_type, right_column_type).unwrap(); let expected_scalars = vec![ Curve25519Scalar::from(0_i64), - Curve25519Scalar::from(40000000_i64), - Curve25519Scalar::from(150000000_i64), + Curve25519Scalar::from(40_000_000_i64), + Curve25519Scalar::from(150_000_000_i64), ]; let expected = (Precision::new(13).unwrap(), 6, expected_scalars); assert_eq!(expected, actual); @@ -2263,9 +2263,9 @@ mod test { let actual: (Precision, i8, Vec) = try_divide_decimal_columns(&lhs, &rhs, left_column_type, right_column_type).unwrap(); let expected_scalars = vec![ - Curve25519Scalar::from(5633802), - Curve25519Scalar::from(-18292682), - Curve25519Scalar::from(-8695652), + Curve25519Scalar::from(5_633_802), + Curve25519Scalar::from(-18_292_682), + Curve25519Scalar::from(-8_695_652), ]; let expected = (Precision::new(18).unwrap(), 6, expected_scalars); assert_eq!(expected, actual); @@ -2284,9 +2284,9 @@ mod test { let actual: (Precision, i8, Vec) = try_divide_decimal_columns(&lhs, &rhs, left_column_type, right_column_type).unwrap(); let expected_scalars = vec![ - Curve25519Scalar::from(1333333), - Curve25519Scalar::from(-400000), - Curve25519Scalar::from(-285714), + Curve25519Scalar::from(1_333_333), + Curve25519Scalar::from(-400_000), + Curve25519Scalar::from(-285_714), ]; let expected = (Precision::new(10).unwrap(), 6, expected_scalars); assert_eq!(expected, actual); @@ -2305,9 +2305,9 @@ mod test { let actual: (Precision, i8, Vec) = try_divide_decimal_columns(&lhs, &rhs, left_column_type, right_column_type).unwrap(); let expected_scalars = vec![ - Curve25519Scalar::from(5633802816_i128), - Curve25519Scalar::from(-18292682926_i128), - Curve25519Scalar::from(-8695652173_i128), + Curve25519Scalar::from(5_633_802_816_i128), + Curve25519Scalar::from(-18_292_682_926_i128), + Curve25519Scalar::from(-8_695_652_173_i128), ]; let expected = (Precision::new(13).unwrap(), 6, expected_scalars); assert_eq!(expected, actual); @@ -2326,9 +2326,9 @@ mod test { let actual: (Precision, i8, Vec) = try_divide_decimal_columns(&lhs, &rhs, left_column_type, right_column_type).unwrap(); let expected_scalars = vec![ - Curve25519Scalar::from(563380), - Curve25519Scalar::from(-1829268), - Curve25519Scalar::from(-869565), + Curve25519Scalar::from(563_380), + Curve25519Scalar::from(-1_829_268), + Curve25519Scalar::from(-869_565), ]; let expected = (Precision::new(9).unwrap(), 6, expected_scalars); assert_eq!(expected, actual); diff --git a/crates/proof-of-sql/src/base/database/expression_evaluation_test.rs b/crates/proof-of-sql/src/base/database/expression_evaluation_test.rs index 9bb2ddaaf..a75fcc79c 100644 --- a/crates/proof-of-sql/src/base/database/expression_evaluation_test.rs +++ b/crates/proof-of-sql/src/base/database/expression_evaluation_test.rs @@ -37,7 +37,7 @@ fn we_can_evaluate_a_simple_literal() { )); let actual_column = table.evaluate(&expr).unwrap(); // UNIX timestamp for 2022-03-01T00:00:00Z - let actual_timestamp = 1646092800; + let actual_timestamp = 1_646_092_800; let expected_column = OwnedColumn::TimestampTZ( PoSQLTimeUnit::Second, PoSQLTimeZone::Utc, @@ -187,7 +187,7 @@ fn we_can_evaluate_an_arithmetic_expression() { col("int128s"), ); let actual_column = table.evaluate(&expr).unwrap(); - let expected_scalars = [-16000000, -7960000, 80000, 8120000, 16160000] + let expected_scalars = [-16_000_000, -7_960_000, 80000, 8_120_000, 16_160_000] .iter() .map(|&x| x.into()) .collect(); diff --git a/crates/proof-of-sql/src/base/database/owned_column_operation.rs b/crates/proof-of-sql/src/base/database/owned_column_operation.rs index 345c959ed..be1603396 100644 --- a/crates/proof-of-sql/src/base/database/owned_column_operation.rs +++ b/crates/proof-of-sql/src/base/database/owned_column_operation.rs @@ -1677,7 +1677,7 @@ mod test { let rhs = OwnedColumn::::Decimal75(Precision::new(5).unwrap(), 2, rhs_scalars); let result = (lhs / rhs).unwrap(); - let expected_scalars = [-400000000_i128, 250000000, 75000000] + let expected_scalars = [-400_000_000_i128, 250_000_000, 75_000_000] .iter() .map(Curve25519Scalar::from) .collect(); @@ -1696,7 +1696,7 @@ mod test { let rhs = OwnedColumn::::Decimal75(Precision::new(3).unwrap(), 2, rhs_scalars); let result = (lhs / rhs).unwrap(); - let expected_scalars = [-400000000, 250000000, 100000000] + let expected_scalars = [-400_000_000, 250_000_000, 100_000_000] .iter() .map(Curve25519Scalar::from) .collect(); diff --git a/crates/proof-of-sql/src/base/database/owned_table_test.rs b/crates/proof-of-sql/src/base/database/owned_table_test.rs index 3932298b9..93a91182e 100644 --- a/crates/proof-of-sql/src/base/database/owned_table_test.rs +++ b/crates/proof-of-sql/src/base/database/owned_table_test.rs @@ -158,7 +158,7 @@ fn we_get_inequality_between_tables_with_differing_data() { "time_stamp", PoSQLTimeUnit::Second, PoSQLTimeZone::Utc, - [1625072400], + [1_625_072_400], ), ]); let owned_table_b: OwnedTable = owned_table([ @@ -170,7 +170,7 @@ fn we_get_inequality_between_tables_with_differing_data() { "time_stamp", PoSQLTimeUnit::Second, PoSQLTimeZone::Utc, - [1625076000], + [1_625_076_000], ), ]); assert_ne!(owned_table_a, owned_table_b); diff --git a/crates/proof-of-sql/src/base/database/scalar_and_i256_conversions.rs b/crates/proof-of-sql/src/base/database/scalar_and_i256_conversions.rs index 5052c10de..59d7b8265 100644 --- a/crates/proof-of-sql/src/base/database/scalar_and_i256_conversions.rs +++ b/crates/proof-of-sql/src/base/database/scalar_and_i256_conversions.rs @@ -2,12 +2,12 @@ use crate::base::scalar::Scalar; use arrow::datatypes::i256; const MIN_SUPPORTED_I256: i256 = i256::from_parts( - 326411208032252286695448638536326387210, - -10633823966279326983230456482242756609, + 326_411_208_032_252_286_695_448_638_536_326_387_210, + -10_633_823_966_279_326_983_230_456_482_242_756_609, ); const MAX_SUPPORTED_I256: i256 = i256::from_parts( - 13871158888686176767925968895441824246, - 10633823966279326983230456482242756608, + 13_871_158_888_686_176_767_925_968_895_441_824_246, + 10_633_823_966_279_326_983_230_456_482_242_756_608, ); /// Converts a type implementing [Scalar] into an arrow i256 diff --git a/crates/proof-of-sql/src/base/encode/scalar_varint.rs b/crates/proof-of-sql/src/base/encode/scalar_varint.rs index 8acf15131..2ab7e0b97 100644 --- a/crates/proof-of-sql/src/base/encode/scalar_varint.rs +++ b/crates/proof-of-sql/src/base/encode/scalar_varint.rs @@ -22,20 +22,20 @@ pub fn write_u256_varint(buf: &mut [u8], mut zig_x: U256) -> usize { // we keep writing until we get a value that has the MSB not set. // a MSB not set implies that we have reached the end of the number. - while zig_x.high != 0 || zig_x.low >= 0b10000000 { + while zig_x.high != 0 || zig_x.low >= 0b1000_0000 { // we read the next 7 bits from `zig_x` casting to u8 and setting // the 8-th bit to 1 to indicate that we still need to write more bytes to buf - buf[pos] = (zig_x.low as u8) | 0b10000000; + buf[pos] = (zig_x.low as u8) | 0b1000_0000; pos += 1; // we shift the whole `zig_x` number 7 bits to right - zig_x.low = (zig_x.low >> 7) | ((zig_x.high & 0b01111111) << 121); + zig_x.low = (zig_x.low >> 7) | ((zig_x.high & 0b0111_1111) << 121); zig_x.high >>= 7; } // we write the last byte to buf with the MSB not set. // that indicates that the number has no continuation. - buf[pos] = (zig_x.low & 0b01111111) as u8; + buf[pos] = (zig_x.low & 0b0111_1111) as u8; pos + 1 } @@ -75,13 +75,13 @@ pub fn read_u256_varint(buf: &[u8]) -> Option<(U256, usize)> { // we write the `next 7 bits` at the [shift_amount..shift_amount + 7) // bit positions of val u256 number match shift_amount.cmp(&126_u32) { - Ordering::Less => val.low |= ((*next_byte & 0b01111111) as u128) << shift_amount, + Ordering::Less => val.low |= ((*next_byte & 0b0111_1111) as u128) << shift_amount, Ordering::Equal => { - val.low |= ((*next_byte & 0b00000011) as u128) << shift_amount; - val.high |= ((*next_byte & 0b01111100) as u128) >> 2; + val.low |= ((*next_byte & 0b0000_0011) as u128) << shift_amount; + val.high |= ((*next_byte & 0b0111_1100) as u128) >> 2; } Ordering::Greater => { - val.high |= ((*next_byte & 0b01111111) as u128) << (shift_amount - 128) + val.high |= ((*next_byte & 0b0111_1111) as u128) << (shift_amount - 128); } } diff --git a/crates/proof-of-sql/src/base/encode/scalar_varint_test.rs b/crates/proof-of-sql/src/base/encode/scalar_varint_test.rs index 1da16e446..ccf54ba31 100644 --- a/crates/proof-of-sql/src/base/encode/scalar_varint_test.rs +++ b/crates/proof-of-sql/src/base/encode/scalar_varint_test.rs @@ -33,8 +33,8 @@ fn big_scalars_that_are_smaller_than_their_additive_inverses_are_encoded_as_posi // x = (p - 1) / 10 (p is the ristretto group order) // y = -x = (p + 1) / 10 let val: Curve25519Scalar = (&U256::from_words( - 0x9bafe5c976b25c7bd59b704f6fb22eca, - 0x1999999999999999999999999999999, + 0x9baf_e5c9_76b2_5c7b_d59b_704f_6fb2_2eca, + 0x0199_9999_9999_9999_9999_9999_9999_9999, )) .into(); assert!(scalar_varint_size(&val) == 36); @@ -46,8 +46,8 @@ fn big_additive_inverses_that_are_smaller_than_the_input_scalars_are_encoded_as_ // x = (p + 1) / 10 (p is the ristretto group order) // y = -x = (p - 1) / 10 let val: Curve25519Scalar = (&U256::from_words( - 0x9bafe5c976b25c7bd59b704f6fb22ecb, - 0x1999999999999999999999999999999, + 0x9baf_e5c9_76b2_5c7b_d59b_704f_6fb2_2ecb, + 0x0199_9999_9999_9999_9999_9999_9999_9999, )) .into(); assert!(scalar_varint_size(&val) == 36); @@ -59,8 +59,8 @@ fn the_maximum_positive_and_negative_encoded_scalars_consume_the_maximum_amount_ // x = (p + 1) / 2 (p is the ristretto group order) // y = -x = (p - 1) / 2 let val: Curve25519Scalar = (&U256::from_words( - 0xa6f7cef517bce6b2c09318d2e7ae9f7, - 0x8000000000000000000000000000000, + 0x0a6f_7cef_517b_ce6b_2c09_318d_2e7a_e9f7, + 0x0800_0000_0000_0000_0000_0000_0000_0000, )) .into(); assert!(scalar_varint_size(&val) == 37); @@ -69,8 +69,8 @@ fn the_maximum_positive_and_negative_encoded_scalars_consume_the_maximum_amount_ // x = (p - 1) / 2 (p is the ristretto group order) // y = -x = (p + 1) / 2 let val: Curve25519Scalar = (&U256::from_words( - 0xa6f7cef517bce6b2c09318d2e7ae9f6, - 0x8000000000000000000000000000000, + 0x0a6f_7cef_517b_ce6b_2c09_318d_2e7a_e9f6, + 0x0800_0000_0000_0000_0000_0000_0000_0000, )) .into(); @@ -84,8 +84,8 @@ fn scalar_slices_consumes_the_correct_amount_of_bytes() { // x = (p + 1) / 2 let val2: Curve25519Scalar = (&U256::from_words( - 0xa6f7cef517bce6b2c09318d2e7ae9f7, - 0x8000000000000000000000000000000, + 0x0a6f_7cef_517b_ce6b_2c09_318d_2e7a_e9f7, + 0x0800_0000_0000_0000_0000_0000_0000_0000, )) .into(); @@ -152,8 +152,8 @@ fn big_scalars_that_are_smaller_than_their_additive_inverses_are_correctly_encod // (p - 1) / 2 (p is the ristretto group order) // y = -x = (p + 1) / 2 (which is bigger than x) let val: Curve25519Scalar = (&U256::from_words( - 0xa6f7cef517bce6b2c09318d2e7ae9f6, - 0x8000000000000000000000000000000, + 0x0a6f_7cef_517b_ce6b_2c09_318d_2e7a_e9f6, + 0x0800_0000_0000_0000_0000_0000_0000_0000, )) .into(); assert!(write_scalar_varint(&mut buf[..], &val) == 37); @@ -171,8 +171,8 @@ fn big_additive_inverses_that_are_smaller_than_the_input_scalars_are_correctly_e // x = (p + 1) / 2 (p is the group order) // y = -x = (p - 1) / 2 (which is smaller than x) let val: Curve25519Scalar = (&U256::from_words( - 0xa6f7cef517bce6b2c09318d2e7ae9f7, - 0x8000000000000000000000000000000, + 0x0a6f_7cef_517b_ce6b_2c09_318d_2e7a_e9f7, + 0x0800_0000_0000_0000_0000_0000_0000_0000, )) .into(); @@ -186,18 +186,18 @@ fn big_additive_inverses_that_are_smaller_than_the_input_scalars_are_correctly_e #[test] fn valid_varint_encoded_input_that_map_to_curve25519_scalars_smaller_than_the_p_field_order_in_the_read_scalar_will_not_wrap_around_p( ) { - let mut buf = [0b11111111_u8; 36]; + let mut buf = [0b1111_1111_u8; 36]; // 252 bits set is fine (252 bits = 36 * 7 as // each byte can hold only 7 bits in the varint encoding) - buf[35] = 0b01111111_u8; + buf[35] = 0b0111_1111_u8; // buf represents the number 2^252 - 1 // removing the varint encoding, we would have y = ((2^252 - 1) // 2 + 1) % p // since we want x, we would have x = -y let expected_x = -Curve25519Scalar::from(&U256::from_words( - 0x00000000000000000000000000000000, - 0x8000000000000000000000000000000, + 0x0000_0000_0000_0000_0000_0000_0000_0000, + 0x0800_0000_0000_0000_0000_0000_0000_0000, )); assert!(read_scalar_varint(&buf[..]).unwrap() == (expected_x, 36)); @@ -206,38 +206,38 @@ fn valid_varint_encoded_input_that_map_to_curve25519_scalars_smaller_than_the_p_ #[test] fn valid_varint_encoded_input_that_map_to_curve25519_scalars_bigger_than_the_p_field_order_in_the_read_scalar_will_wrap_around_p( ) { - let mut buf = [0b11111111_u8; 37]; + let mut buf = [0b1111_1111_u8; 37]; // we set the first bit to 0 so that we have a positive varint encoding - buf[0] = 0b11111110; + buf[0] = 0b1111_1110; // we set the last byte to 31, so that we have 256 bits set, and the MST equal 0 - buf[36] = 0b00001111; // buf has 256 bit-length + buf[36] = 0b0000_1111; // buf has 256 bit-length // at this point, buf represents the number 2^256 - 2, // which has 256 bit-length, where 255 bits are set to 1 // also, `expected_val` is simply x = ((2^256 - 2) >> 1) % p let expected_val: Curve25519Scalar = (&U256::from_words( - 0x6de72ae98b3ab623977f4a4775473484, - 0xfffffffffffffffffffffffffffffff, + 0x6de7_2ae9_8b3a_b623_977f_4a47_7547_3484, + 0x0fff_ffff_ffff_ffff_ffff_ffff_ffff_ffff, )) .into(); assert!(read_scalar_varint(&buf[..]).unwrap() == (expected_val, 37)); // even though we are able to read varint numbers of up to 259 bits-length, // we can only represent a number up to 256 bits-length. Bits 257 to 259 are ignored - buf[36] = 0b00011111; // buf has 257 bit-length + buf[36] = 0b0001_1111; // buf has 257 bit-length assert!(read_scalar_varint(&buf[..]).unwrap() == (expected_val, 37)); - buf[36] = 0b00111111; // buf has 258 bit-length + buf[36] = 0b0011_1111; // buf has 258 bit-length assert!(read_scalar_varint(&buf[..]).unwrap() == (expected_val, 37)); - buf[36] = 0b01111111; // buf has 259 bit-length + buf[36] = 0b0111_1111; // buf has 259 bit-length assert!(read_scalar_varint(&buf[..]).unwrap() == (expected_val, 37)); } #[test] fn varint_encoded_values_that_never_ends_will_make_the_read_scalar_to_error_out() { - let buf = [0b11111111_u8; 5]; + let buf = [0b1111_1111_u8; 5]; // varint numbers that do not terminate will fail out assert!((read_scalar_varint(&buf[..]) as Option<(Curve25519Scalar, _)>).is_none()); @@ -246,16 +246,16 @@ fn varint_encoded_values_that_never_ends_will_make_the_read_scalar_to_error_out( #[test] fn valid_varint_encoded_input_that_has_length_bigger_than_259_bits_will_make_the_read_scalar_to_error_out( ) { - let mut buf = [0b11111111_u8; 38]; + let mut buf = [0b1111_1111_u8; 38]; // a varint with 260 bit-length will fail (260 bits = 37 * 7 + 1 as // each byte can hold only 7 bits in the varint encoding) - buf[37] = 0b00000001_u8; + buf[37] = 0b0000_0001_u8; assert!((read_scalar_varint(&buf[..37]) as Option<(Curve25519Scalar, _)>).is_none()); // a varint with 266 bit-length will fail (266 bits = 38 * 7 as // each byte can hold only 7 bits in the varint encoding) - buf[37] = 0b01111111_u8; + buf[37] = 0b0111_1111_u8; assert!((read_scalar_varint(&buf[..38]) as Option<(Curve25519Scalar, _)>).is_none()); } diff --git a/crates/proof-of-sql/src/base/encode/varint_trait_test.rs b/crates/proof-of-sql/src/base/encode/varint_trait_test.rs index 6e8002a4e..94ff26e37 100644 --- a/crates/proof-of-sql/src/base/encode/varint_trait_test.rs +++ b/crates/proof-of-sql/src/base/encode/varint_trait_test.rs @@ -24,14 +24,14 @@ fn test_required_space() { assert_eq!(1_u32.required_space(), 1); assert_eq!(128_u32.required_space(), 2); assert_eq!(16384_u32.required_space(), 3); - assert_eq!(2097151_u32.required_space(), 3); - assert_eq!(2097152_u32.required_space(), 4); + assert_eq!(2_097_151_u32.required_space(), 3); + assert_eq!(2_097_152_u32.required_space(), 4); } #[test] fn test_encode_u64() { - assert_eq!(0_u32.encode_var_vec(), vec![0b00000000]); - assert_eq!(300_u32.encode_var_vec(), vec![0b10101100, 0b00000010]); + assert_eq!(0_u32.encode_var_vec(), vec![0b0000_0000]); + assert_eq!(300_u32.encode_var_vec(), vec![0b1010_1100, 0b0000_0010]); } #[test] @@ -65,8 +65,8 @@ fn test_encode_i64() { assert_eq!(150_i64.encode_var_vec(), 300_u32.encode_var_vec()); assert_eq!((-150_i64).encode_var_vec(), 299_u32.encode_var_vec()); assert_eq!( - (-2147483648_i64).encode_var_vec(), - 4294967295_u64.encode_var_vec() + (-2_147_483_648_i64).encode_var_vec(), + 4_294_967_295_u64.encode_var_vec() ); assert_eq!( i64::MAX.encode_var_vec(), @@ -150,7 +150,7 @@ fn test_decode_extra_bytes_i64() { #[test] fn test_regression_22() { - let encoded: Vec = 0x112233_u64.encode_var_vec(); + let encoded: Vec = 0x0011_2233_u64.encode_var_vec(); assert!(i8::decode_var(&encoded).is_none()); } @@ -319,8 +319,17 @@ fn we_can_encode_and_decode_large_positive_u128() { let value: u128 = 0b110_0010101_1111111_1111111_1111111_1111111_1111111_1111111_1111111_1111111_0011100; let expected_result: &[u8] = &[ - 0b10011100, 0b11111111, 0b11111111, 0b11111111, 0b11111111, 0b11111111, 0b11111111, - 0b11111111, 0b11111111, 0b10010101, 0b00000110, + 0b1001_1100, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1001_0101, + 0b0000_0110, ]; let result: &mut [u8] = &mut [0; 11]; assert_eq!(value.required_space(), 11); @@ -335,8 +344,17 @@ fn we_can_encode_and_decode_large_positive_i128() { let value: i128 = 0b110_0010101_1111111_1111111_1111111_1111111_1111111_1111111_1111111_1111111_001110; let expected_result: &[u8] = &[ - 0b10011100, 0b11111111, 0b11111111, 0b11111111, 0b11111111, 0b11111111, 0b11111111, - 0b11111111, 0b11111111, 0b10010101, 0b00000110, + 0b1001_1100, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1001_0101, + 0b0000_0110, ]; let result: &mut [u8] = &mut [0; 11]; assert_eq!(value.required_space(), 11); @@ -351,8 +369,17 @@ fn we_can_encode_and_decode_large_negative_i128() { let value: i128 = -1 - 0b110_0010101_1111111_1111111_1111111_1111111_1111111_1111111_1111111_1111111_001110; let expected_result: &[u8] = &[ - 0b10011101, 0b11111111, 0b11111111, 0b11111111, 0b11111111, 0b11111111, 0b11111111, - 0b11111111, 0b11111111, 0b10010101, 0b00000110, + 0b1001_1101, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1111_1111, + 0b1001_0101, + 0b0000_0110, ]; let result: &mut [u8] = &mut [0; 11]; assert_eq!(value.required_space(), 11); diff --git a/crates/proof-of-sql/src/base/encode/zigzag_test.rs b/crates/proof-of-sql/src/base/encode/zigzag_test.rs index 4f9ac6705..026f9502d 100644 --- a/crates/proof-of-sql/src/base/encode/zigzag_test.rs +++ b/crates/proof-of-sql/src/base/encode/zigzag_test.rs @@ -21,7 +21,7 @@ fn small_scalars_are_encoded_as_positive_zigzag_values() { // since x < y, where x + y = 0, the ZigZag value is encoded as 2 * x assert!( Curve25519Scalar::from(u128::MAX).zigzag() - == U256::from_words(0xfffffffffffffffffffffffffffffffe, 0x1) + == U256::from_words(0xffff_ffff_ffff_ffff_ffff_ffff_ffff_fffe, 0x1) ); for x in 1..1000_u128 { @@ -53,16 +53,16 @@ fn big_scalars_that_are_smaller_than_their_additive_inverses_are_encoded_as_posi { // x = (p - 1) / 2 (p is the ristretto group order) let val: Curve25519Scalar = (&U256::from_words( - 0xa6f7cef517bce6b2c09318d2e7ae9f6, - 0x8000000000000000000000000000000, + 0x0a6f_7cef_517b_ce6b_2c09_318d_2e7a_e9f6, + 0x0800_0000_0000_0000_0000_0000_0000_0000, )) .into(); // since x < y, where x + y = 0, the ZigZag value is encoded as 2 * x assert!( val.zigzag() == U256::from_words( - 27742317777372353535851937790883648492, - 21267647932558653966460912964485513216 + 27_742_317_777_372_353_535_851_937_790_883_648_492, + 21_267_647_932_558_653_966_460_912_964_485_513_216 ) ); } @@ -72,8 +72,8 @@ fn big_additive_inverses_that_are_smaller_than_the_input_scalars_are_encoded_as_ ) { // x = (p + 1) / 2 (p is the ristretto group order) let val: Curve25519Scalar = (&U256::from_words( - 0xa6f7cef517bce6b2c09318d2e7ae9f7, - 0x8000000000000000000000000000000, + 0x0a6f_7cef_517b_ce6b_2c09_318d_2e7a_e9f7, + 0x0800_0000_0000_0000_0000_0000_0000_0000, )) .into(); @@ -82,8 +82,8 @@ fn big_additive_inverses_that_are_smaller_than_the_input_scalars_are_encoded_as_ assert!( val.zigzag() == U256::from_words( - 27742317777372353535851937790883648491, - 21267647932558653966460912964485513216 + 27_742_317_777_372_353_535_851_937_790_883_648_491, + 21_267_647_932_558_653_966_460_912_964_485_513_216 ) ); @@ -94,5 +94,8 @@ fn big_additive_inverses_that_are_smaller_than_the_input_scalars_are_encoded_as_ high: 0x1_u128, }) .into(); - assert!((-val).zigzag() == U256::from_words(0xffffffffffffffffffffffffffffffff_u128, 0x1_u128)); + assert!( + (-val).zigzag() + == U256::from_words(0xffff_ffff_ffff_ffff_ffff_ffff_ffff_ffff_u128, 0x1_u128) + ); } diff --git a/crates/proof-of-sql/src/base/math/log.rs b/crates/proof-of-sql/src/base/math/log.rs index 2909ab8a0..b18d3305e 100644 --- a/crates/proof-of-sql/src/base/math/log.rs +++ b/crates/proof-of-sql/src/base/math/log.rs @@ -83,7 +83,7 @@ mod tests { ); assert_eq!( log2_up_bytes(&[0, 0, 0, 1]), - 16777216f32.log2().ceil() as usize + 16_777_216_f32.log2().ceil() as usize ); // Bytes are non-trivial powers of 2 @@ -111,11 +111,11 @@ mod tests { ); assert_eq!( log2_up_bytes(&[6, 5, 3, 0]), - 197894f32.log2().ceil() as usize + 197_894_f32.log2().ceil() as usize ); assert_eq!( log2_up_bytes(&[255, 255, 255, 255]), - 4294967295f32.log2().ceil() as usize + 4_294_967_295_f32.log2().ceil() as usize ); } } diff --git a/crates/proof-of-sql/src/base/polynomial/composite_polynomial.rs b/crates/proof-of-sql/src/base/polynomial/composite_polynomial.rs index 7547c7d19..d7dde920b 100644 --- a/crates/proof-of-sql/src/base/polynomial/composite_polynomial.rs +++ b/crates/proof-of-sql/src/base/polynomial/composite_polynomial.rs @@ -78,7 +78,7 @@ impl CompositePolynomial { for m in product { let m_ptr: *const Vec = Rc::as_ptr(&m); if let Some(index) = self.raw_pointers_lookup_table.get(&m_ptr) { - indexed_product.push(*index) + indexed_product.push(*index); } else { let curr_index = self.flattened_ml_extensions.len(); self.flattened_ml_extensions.push(m.clone()); diff --git a/crates/proof-of-sql/src/base/polynomial/lagrange_basis_evaluation_test.rs b/crates/proof-of-sql/src/base/polynomial/lagrange_basis_evaluation_test.rs index 5d1306edc..0dbd46fb7 100644 --- a/crates/proof-of-sql/src/base/polynomial/lagrange_basis_evaluation_test.rs +++ b/crates/proof-of-sql/src/base/polynomial/lagrange_basis_evaluation_test.rs @@ -211,7 +211,7 @@ fn compute_truncated_lagrange_basis_inner_product_gives_correct_values_with_3_va ]; assert_eq!( compute_truncated_lagrange_basis_inner_product(8, &a, &b), - Curve25519Scalar::from(123880u32) + Curve25519Scalar::from(123_880_u32) ); assert_eq!( compute_truncated_lagrange_basis_inner_product(7, &a, &b), @@ -262,7 +262,7 @@ fn compute_truncated_lagrange_basis_inner_product_gives_correct_values_with_3_va ]; assert_eq!( compute_truncated_lagrange_basis_inner_product(8, &a, &b), - Curve25519Scalar::from(123880u32) + Curve25519Scalar::from(123_880_u32) ); assert_eq!( compute_truncated_lagrange_basis_inner_product(7, &a, &b), @@ -305,7 +305,7 @@ fn compute_truncated_lagrange_basis_inner_product_gives_correct_values_with_3_va let b: Vec = vec![3, 11, 13]; assert_eq!( compute_truncated_lagrange_basis_inner_product(8, &a, &b), - 123880 + 123_880 ); assert_eq!( compute_truncated_lagrange_basis_inner_product(7, &a, &b), diff --git a/crates/proof-of-sql/src/base/proof/keccak256_transcript.rs b/crates/proof-of-sql/src/base/proof/keccak256_transcript.rs index 3459be1b8..89b5b4824 100644 --- a/crates/proof-of-sql/src/base/proof/keccak256_transcript.rs +++ b/crates/proof-of-sql/src/base/proof/keccak256_transcript.rs @@ -21,7 +21,7 @@ impl TranscriptCore for Keccak256Transcript { Self(Keccak::v256()) } fn raw_append(&mut self, message: &[u8]) { - self.0.update(message) + self.0.update(message); } fn raw_challenge(&mut self) -> [u8; 32] { let mut result = [0; 32]; diff --git a/crates/proof-of-sql/src/base/proof/merlin_transcript_core.rs b/crates/proof-of-sql/src/base/proof/merlin_transcript_core.rs index 62c266003..b2ff61597 100644 --- a/crates/proof-of-sql/src/base/proof/merlin_transcript_core.rs +++ b/crates/proof-of-sql/src/base/proof/merlin_transcript_core.rs @@ -3,7 +3,7 @@ impl super::transcript_core::TranscriptCore for merlin::Transcript { merlin::Transcript::new(b"TranscriptCore::new") } fn raw_append(&mut self, message: &[u8]) { - self.append_message(b"TranscriptCore::raw_append", message) + self.append_message(b"TranscriptCore::raw_append", message); } fn raw_challenge(&mut self) -> [u8; 32] { let mut result = [0u8; 32]; diff --git a/crates/proof-of-sql/src/base/proof/transcript_core.rs b/crates/proof-of-sql/src/base/proof/transcript_core.rs index 57c9549b4..a7e34035c 100644 --- a/crates/proof-of-sql/src/base/proof/transcript_core.rs +++ b/crates/proof-of-sql/src/base/proof/transcript_core.rs @@ -30,8 +30,8 @@ impl Transcript for T { messages.into_iter().for_each(|mut message| { let bytes = message.as_bytes_mut(); bytes.reverse(); - self.raw_append(bytes) - }) + self.raw_append(bytes); + }); } fn extend_as_le_from_refs<'a, M: AsBytes + 'a + ?Sized>( &mut self, @@ -39,13 +39,13 @@ impl Transcript for T { ) { messages .into_iter() - .for_each(|message| self.raw_append(message.as_bytes())) + .for_each(|message| self.raw_append(message.as_bytes())); } fn extend_scalars_as_be<'a, S: Scalar + 'a>( &mut self, messages: impl IntoIterator, ) { - self.extend_as_be::<[u64; 4]>(messages.into_iter().map(RefInto::ref_into)) + self.extend_as_be::<[u64; 4]>(messages.into_iter().map(RefInto::ref_into)); } fn scalar_challenge_as_be(&mut self) -> S { receive_challenge_as_be::<[u64; 4]>(self).into() diff --git a/crates/proof-of-sql/src/base/scalar/mont_scalar.rs b/crates/proof-of-sql/src/base/scalar/mont_scalar.rs index 81c264e9c..c87d3802f 100644 --- a/crates/proof-of-sql/src/base/scalar/mont_scalar.rs +++ b/crates/proof-of-sql/src/base/scalar/mont_scalar.rs @@ -105,7 +105,7 @@ impl> Debug for MontScalar { impl> Eq for MontScalar {} impl> Hash for MontScalar { fn hash(&self, state: &mut H) { - self.0.hash(state) + self.0.hash(state); } } impl> Ord for MontScalar { diff --git a/crates/proof-of-sql/src/base/scalar/mont_scalar_from.rs b/crates/proof-of-sql/src/base/scalar/mont_scalar_from.rs index a52ef5eb7..7951cb871 100644 --- a/crates/proof-of-sql/src/base/scalar/mont_scalar_from.rs +++ b/crates/proof-of-sql/src/base/scalar/mont_scalar_from.rs @@ -21,7 +21,7 @@ impl> From<&[u8]> for MontScalar { let hash = blake3::hash(x); let mut bytes: [u8; 32] = hash.into(); - bytes[31] &= 0b00001111_u8; + bytes[31] &= 0b0000_1111_u8; Self::from_le_bytes_mod_order(&bytes) } diff --git a/crates/proof-of-sql/src/base/scalar/mont_scalar_test.rs b/crates/proof-of-sql/src/base/scalar/mont_scalar_test.rs index d1502ee7c..4dd18cdf2 100644 --- a/crates/proof-of-sql/src/base/scalar/mont_scalar_test.rs +++ b/crates/proof-of-sql/src/base/scalar/mont_scalar_test.rs @@ -53,7 +53,7 @@ fn test_curve25519_scalar_serialization() { Curve25519Scalar::from(12345), Curve25519Scalar::from(2357), Curve25519Scalar::from(999), - Curve25519Scalar::from(123456789), + Curve25519Scalar::from(123_456_789), ]; let serialized = serde_json::to_string(&s).unwrap(); let deserialized: [Curve25519Scalar; 10] = serde_json::from_str(&serialized).unwrap(); @@ -64,35 +64,35 @@ fn test_curve25519_scalar_serialization() { fn test_curve25519_scalar_display() { assert_eq!( "0000000000000000000000000000000000000000000000000000000000ABC123", - format!("{}", Curve25519Scalar::from(0xABC123)) + format!("{}", Curve25519Scalar::from(0x00AB_C123)) ); assert_eq!( "1000000000000000000000000000000014DEF9DEA2F79CD65812631A5C4A12CA", - format!("{}", Curve25519Scalar::from(-0xABC123)) + format!("{}", Curve25519Scalar::from(-0x00AB_C123)) ); assert_eq!( "0x0000...C123", - format!("{:#}", Curve25519Scalar::from(0xABC123)) + format!("{:#}", Curve25519Scalar::from(0x00AB_C123)) ); assert_eq!( "0x1000...12CA", - format!("{:#}", Curve25519Scalar::from(-0xABC123)) + format!("{:#}", Curve25519Scalar::from(-0x00AB_C123)) ); assert_eq!( "+0000000000000000000000000000000000000000000000000000000000ABC123", - format!("{:+}", Curve25519Scalar::from(0xABC123)) + format!("{:+}", Curve25519Scalar::from(0x00AB_C123)) ); assert_eq!( "-0000000000000000000000000000000000000000000000000000000000ABC123", - format!("{:+}", Curve25519Scalar::from(-0xABC123)) + format!("{:+}", Curve25519Scalar::from(-0x00AB_C123)) ); assert_eq!( "+0x0000...C123", - format!("{:+#}", Curve25519Scalar::from(0xABC123)) + format!("{:+#}", Curve25519Scalar::from(0x00AB_C123)) ); assert_eq!( "-0x0000...C123", - format!("{:+#}", Curve25519Scalar::from(-0xABC123)) + format!("{:+#}", Curve25519Scalar::from(-0x00AB_C123)) ); } diff --git a/crates/proof-of-sql/src/base/slice_ops/batch_inverse.rs b/crates/proof-of-sql/src/base/slice_ops/batch_inverse.rs index 0a8c5fefb..378ccbdd5 100644 --- a/crates/proof-of-sql/src/base/slice_ops/batch_inverse.rs +++ b/crates/proof-of-sql/src/base/slice_ops/batch_inverse.rs @@ -48,10 +48,10 @@ where // Batch invert in parallel, without copying the vector v.par_chunks_mut(num_elem_per_thread).for_each(|chunk| { serial_batch_inversion_and_mul(chunk, coeff); - }) + }); }, serial_batch_inversion_and_mul(v, coeff) - ) + ); } fn serial_batch_inversion_and_mul(v: &mut [F], coeff: F) diff --git a/crates/proof-of-sql/src/base/slice_ops/mul_add_assign.rs b/crates/proof-of-sql/src/base/slice_ops/mul_add_assign.rs index 4f73277a3..ff020a9e2 100644 --- a/crates/proof-of-sql/src/base/slice_ops/mul_add_assign.rs +++ b/crates/proof-of-sql/src/base/slice_ops/mul_add_assign.rs @@ -19,5 +19,5 @@ where .zip(to_mul_add) .for_each(|(res_i, &data_i)| { *res_i += multiplier * data_i.into(); - }) + }); } diff --git a/crates/proof-of-sql/src/proof_primitive/dory/deferred_msm.rs b/crates/proof-of-sql/src/proof_primitive/dory/deferred_msm.rs index 4c1a5abd9..c1cc2c9eb 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/deferred_msm.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/deferred_msm.rs @@ -42,7 +42,7 @@ impl From for DeferredMSM { } impl AddAssign for DeferredMSM { fn add_assign(&mut self, rhs: G) { - self.pairs.push((rhs, None)) + self.pairs.push((rhs, None)); } } impl MulAssign for DeferredMSM { @@ -50,7 +50,7 @@ impl MulAssign for DeferredMSM { self.pairs.iter_mut().for_each(|(_, f)| match f { Some(i) => *i *= rhs, None => *f = Some(rhs), - }) + }); } } impl Mul for DeferredMSM { @@ -62,7 +62,7 @@ impl Mul for DeferredMSM { } impl AddAssign> for DeferredMSM { fn add_assign(&mut self, rhs: DeferredMSM) { - self.pairs.extend(rhs.pairs) + self.pairs.extend(rhs.pairs); } } impl Add> for DeferredMSM { diff --git a/crates/proof-of-sql/src/proof_primitive/dory/dory_messages.rs b/crates/proof-of-sql/src/proof_primitive/dory/dory_messages.rs index 32897f0ac..9adff61ad 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/dory_messages.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/dory_messages.rs @@ -93,7 +93,7 @@ impl DoryMessages { pub(super) fn verifier_F_message(&mut self, transcript: &mut impl Transcript) -> (F, F) { let mut message = F::zero(); while message.is_zero() { - message = transcript.scalar_challenge_as_be::().0 + message = transcript.scalar_challenge_as_be::().0; } let message_inv = message.inverse().unwrap(); (message, message_inv) diff --git a/crates/proof-of-sql/src/proof_primitive/dory/dynamic_dory_standard_basis_helper.rs b/crates/proof-of-sql/src/proof_primitive/dory/dynamic_dory_standard_basis_helper.rs index a2f8a3e8c..08aa3c3bf 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/dynamic_dory_standard_basis_helper.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/dynamic_dory_standard_basis_helper.rs @@ -30,7 +30,7 @@ pub(super) fn compute_dynamic_standard_basis_vecs(point: &[F], lo_vec: &mut [F], point.iter().skip(1).enumerate().for_each(|(i, v)| { let p = i / 2; let o = 2 + i % 2; - (o << p..(o + 1) << p).for_each(|k| hi_vec[k] *= v) + (o << p..(o + 1) << p).for_each(|k| hi_vec[k] *= v); }); } diff --git a/crates/proof-of-sql/src/proof_primitive/dory/extended_dory_inner_product.rs b/crates/proof-of-sql/src/proof_primitive/dory/extended_dory_inner_product.rs index 749c8ac92..8c1895773 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/extended_dory_inner_product.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/extended_dory_inner_product.rs @@ -25,7 +25,7 @@ pub fn extended_dory_inner_product_prove( extended_dory_reduce_prove(messages, transcript, &mut state, setup); } let base_state = fold_scalars_0_prove(messages, transcript, state, setup); - scalar_product_prove(messages, transcript, base_state) + scalar_product_prove(messages, transcript, base_state); } /// This is the verifier side of the extended Dory-Innerproduct algorithm in section 4.3 of https://eprint.iacr.org/2020/1274.pdf. diff --git a/crates/proof-of-sql/src/proof_primitive/dory/setup.rs b/crates/proof-of-sql/src/proof_primitive/dory/setup.rs index 9b067d2a7..1d509f89a 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/setup.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/setup.rs @@ -72,7 +72,7 @@ impl<'a> ProverSetup<'a> { element_num_bytes: u32, scalars: &[u8], ) { - self.blitzar_handle.msm(res, element_num_bytes, scalars) + self.blitzar_handle.msm(res, element_num_bytes, scalars); } #[cfg(feature = "blitzar")] @@ -84,7 +84,7 @@ impl<'a> ProverSetup<'a> { scalars: &[u8], ) { self.blitzar_handle - .packed_msm(res, output_bit_table, scalars) + .packed_msm(res, output_bit_table, scalars); } } diff --git a/crates/proof-of-sql/src/sql/postprocessing/order_by_postprocessing_test.rs b/crates/proof-of-sql/src/sql/postprocessing/order_by_postprocessing_test.rs index 84c7a134f..23e8997b8 100644 --- a/crates/proof-of-sql/src/sql/postprocessing/order_by_postprocessing_test.rs +++ b/crates/proof-of-sql/src/sql/postprocessing/order_by_postprocessing_test.rs @@ -138,7 +138,7 @@ fn we_can_use_int128_columns_inside_order_by_in_asc_order() { #[test] fn we_can_do_order_by_with_random_i128_data() { let mut rng = rand::thread_rng(); - let range: Vec = (-300000..300000).collect(); + let range: Vec = (-300_000..300_000).collect(); let table: Vec = range .iter() .map(|_| rng.gen_range(i128::MIN..i128::MAX)) diff --git a/crates/proof-of-sql/src/sql/proof/composite_polynomial_builder.rs b/crates/proof-of-sql/src/sql/proof/composite_polynomial_builder.rs index db81338d3..0da553564 100644 --- a/crates/proof-of-sql/src/sql/proof/composite_polynomial_builder.rs +++ b/crates/proof-of-sql/src/sql/proof/composite_polynomial_builder.rs @@ -102,11 +102,11 @@ impl CompositePolynomialBuilder { for (mult, terms) in self.fr_multiplicands_rest.iter() { let fr_iter = iter::once(self.fr.clone()); let terms_iter = terms.iter().cloned(); - res.add_product(fr_iter.chain(terms_iter), *mult) + res.add_product(fr_iter.chain(terms_iter), *mult); } for (mult, terms) in self.zerosum_multiplicands.iter() { let terms_iter = terms.iter().cloned(); - res.add_product(terms_iter, *mult) + res.add_product(terms_iter, *mult); } res.annotate_trace(); diff --git a/crates/proof-of-sql/src/sql/proof/composite_polynomial_builder_test.rs b/crates/proof-of-sql/src/sql/proof/composite_polynomial_builder_test.rs index a7d940576..5f0b25b54 100644 --- a/crates/proof-of-sql/src/sql/proof/composite_polynomial_builder_test.rs +++ b/crates/proof-of-sql/src/sql/proof/composite_polynomial_builder_test.rs @@ -13,7 +13,7 @@ fn we_combine_single_degree_fr_multiplicands() { let p = builder.make_composite_polynomial(); assert_eq!(p.products.len(), 1); assert_eq!(p.flattened_ml_extensions.len(), 2); - let pt = [Curve25519Scalar::from(9268764u64)]; + let pt = [Curve25519Scalar::from(9_268_764_u64)]; let m0 = Curve25519Scalar::one() - pt[0]; let m1 = pt[0]; let eval1 = Curve25519Scalar::from(mle1[0]) * m0 + Curve25519Scalar::from(mle1[1]) * m1; @@ -34,7 +34,7 @@ fn we_dont_duplicate_repeated_mles() { let p = builder.make_composite_polynomial(); assert_eq!(p.products.len(), 3); assert_eq!(p.flattened_ml_extensions.len(), 4); - let pt = [Curve25519Scalar::from(9268764u64)]; + let pt = [Curve25519Scalar::from(9_268_764_u64)]; let m0 = Curve25519Scalar::one() - pt[0]; let m1 = pt[0]; let eval1 = Curve25519Scalar::from(mle1[0]) * m0 + Curve25519Scalar::from(mle1[1]) * m1; @@ -60,7 +60,7 @@ fn we_can_combine_identity_with_zero_sum_polynomials() { let p = builder.make_composite_polynomial(); assert_eq!(p.products.len(), 3); //1 for the linear term, 1 for the fr multiplicand, 1 for the zerosum multiplicand assert_eq!(p.flattened_ml_extensions.len(), 6); //1 for fr, 1 for the linear term, and 4 for mle1-4 - let pt = [Curve25519Scalar::from(9268764u64)]; + let pt = [Curve25519Scalar::from(9_268_764_u64)]; let m0 = Curve25519Scalar::one() - pt[0]; let m1 = pt[0]; let eval1 = Curve25519Scalar::from(mle1[0]) * m0 + Curve25519Scalar::from(mle1[1]) * m1; @@ -80,7 +80,7 @@ fn we_can_handle_only_an_empty_fr_multiplicand() { let p = builder.make_composite_polynomial(); assert_eq!(p.products.len(), 1); //1 for the fr multiplicand assert_eq!(p.flattened_ml_extensions.len(), 2); //1 for fr, 1 for the linear term - let pt = [Curve25519Scalar::from(9268764u64)]; + let pt = [Curve25519Scalar::from(9_268_764_u64)]; let m0 = Curve25519Scalar::one() - pt[0]; let m1 = pt[0]; let eval1 = (m0 + m1) * Curve25519Scalar::from(17); @@ -106,7 +106,7 @@ fn we_can_handle_empty_terms_with_other_terms() { let p = builder.make_composite_polynomial(); assert_eq!(p.products.len(), 3); //1 for the linear term, 1 for the fr multiplicand, 1 for the zerosum multiplicand assert_eq!(p.flattened_ml_extensions.len(), 6); //1 for fr, 1 for the linear term, and 4 for mle1-4 - let pt = [Curve25519Scalar::from(9268764u64)]; + let pt = [Curve25519Scalar::from(9_268_764_u64)]; let m0 = Curve25519Scalar::one() - pt[0]; let m1 = pt[0]; let eval1 = Curve25519Scalar::from(mle1[0]) * m0 + Curve25519Scalar::from(mle1[1]) * m1; diff --git a/crates/proof-of-sql/src/sql/proof/proof_builder_test.rs b/crates/proof-of-sql/src/sql/proof/proof_builder_test.rs index 6586556c9..a74fb4130 100644 --- a/crates/proof-of-sql/src/sql/proof/proof_builder_test.rs +++ b/crates/proof-of-sql/src/sql/proof/proof_builder_test.rs @@ -126,7 +126,7 @@ fn we_can_form_an_aggregated_sumcheck_polynomial() { ); let random_point = [ Curve25519Scalar::from(123u64), - Curve25519Scalar::from(101112u64), + Curve25519Scalar::from(101_112_u64), ]; let eval = poly.evaluate(&random_point); let expected_eval = expected_poly.evaluate(&random_point); diff --git a/crates/proof-of-sql/src/sql/proof/provable_query_result_test.rs b/crates/proof-of-sql/src/sql/proof/provable_query_result_test.rs index 220fbc816..7a9040fba 100644 --- a/crates/proof-of-sql/src/sql/proof/provable_query_result_test.rs +++ b/crates/proof-of-sql/src/sql/proof/provable_query_result_test.rs @@ -256,9 +256,9 @@ fn evaluation_fails_if_the_result_cant_be_decoded() { let mut res = ProvableQueryResult::new_from_raw_data( 1, Indexes::Sparse(vec![0]), - vec![0b11111111_u8; 38], + vec![0b1111_1111_u8; 38], ); - res.data_mut()[37] = 0b00000001_u8; + res.data_mut()[37] = 0b0000_0001_u8; let evaluation_point = [ Curve25519Scalar::from(10u64), Curve25519Scalar::from(100u64), diff --git a/crates/proof-of-sql/src/sql/proof/result_element_serialization.rs b/crates/proof-of-sql/src/sql/proof/result_element_serialization.rs index 726b8c35c..84af864fa 100644 --- a/crates/proof-of-sql/src/sql/proof/result_element_serialization.rs +++ b/crates/proof-of-sql/src/sql/proof/result_element_serialization.rs @@ -417,7 +417,7 @@ mod tests { assert!(::decode(&out[..]).is_ok()); - out[..].clone_from_slice(&vec![0b11111111; value.required_bytes()]); + out[..].clone_from_slice(&vec![0b1111_1111; value.required_bytes()]); assert!(::decode(&out[..]).is_err()); } diff --git a/crates/proof-of-sql/src/sql/proof/sumcheck_subpolynomial.rs b/crates/proof-of-sql/src/sql/proof/sumcheck_subpolynomial.rs index ef66ca664..db8fdb4a0 100644 --- a/crates/proof-of-sql/src/sql/proof/sumcheck_subpolynomial.rs +++ b/crates/proof-of-sql/src/sql/proof/sumcheck_subpolynomial.rs @@ -48,7 +48,7 @@ impl<'a, S: Scalar> SumcheckSubpolynomial<'a, S> { for (mult, term) in self.terms.iter() { match self.subpolynomial_type { SumcheckSubpolynomialType::Identity => { - composite_polynomial.produce_fr_multiplicand(&(*mult * group_multiplier), term) + composite_polynomial.produce_fr_multiplicand(&(*mult * group_multiplier), term); } SumcheckSubpolynomialType::ZeroSum => composite_polynomial .produce_zerosum_multiplicand(&(*mult * group_multiplier), term), diff --git a/crates/proof-of-sql/src/sql/proof/verifiable_query_result_test_utility.rs b/crates/proof-of-sql/src/sql/proof/verifiable_query_result_test_utility.rs index 478fcb547..ea3b0e6e6 100644 --- a/crates/proof-of-sql/src/sql/proof/verifiable_query_result_test_utility.rs +++ b/crates/proof-of-sql/src/sql/proof/verifiable_query_result_test_utility.rs @@ -44,7 +44,10 @@ pub fn exercise_verification( // try changing intermediate commitments let commit_p = RistrettoPoint::compute_commitments( - &[CommittableColumn::BigInt(&[353453245i64, 93402346i64])], + &[CommittableColumn::BigInt(&[ + 353_453_245_i64, + 93_402_346_i64, + ])], 0_usize, &(), )[0]; diff --git a/crates/proof-of-sql/src/sql/proof_exprs/aggregate_expr.rs b/crates/proof-of-sql/src/sql/proof_exprs/aggregate_expr.rs index 8bb6f9b21..ddeb08b77 100644 --- a/crates/proof-of-sql/src/sql/proof_exprs/aggregate_expr.rs +++ b/crates/proof-of-sql/src/sql/proof_exprs/aggregate_expr.rs @@ -71,6 +71,6 @@ impl ProofExpr for AggregateExpr { } fn get_column_references(&self, columns: &mut IndexSet) { - self.expr.get_column_references(columns) + self.expr.get_column_references(columns); } } diff --git a/crates/proof-of-sql/src/sql/proof_exprs/multiply_expr_test.rs b/crates/proof-of-sql/src/sql/proof_exprs/multiply_expr_test.rs index 882de6244..d76c62098 100644 --- a/crates/proof-of-sql/src/sql/proof_exprs/multiply_expr_test.rs +++ b/crates/proof-of-sql/src/sql/proof_exprs/multiply_expr_test.rs @@ -191,12 +191,18 @@ fn result_expr_can_overflow_more() { #[test] fn where_clause_can_wrap_around() { let data = owned_table([ - bigint("a", [2357878470324616199_i64, 2657439699204141, 884]), - bigint("b", [31194601778911687_i64, 1644425323726039, 884]), - bigint("c", [500213946116239_i64, 1570568673569987, 884]), - bigint("d", [211980999383887_i64, 1056107792886999, 884]), - bigint("e", [927908842441_i64, 998426626609497, 884]), - bigint("res", [-20_i64, 50, 539835356263424]), + bigint( + "a", + [2_357_878_470_324_616_199_i64, 2_657_439_699_204_141, 884], + ), + bigint( + "b", + [31_194_601_778_911_687_i64, 1_644_425_323_726_039, 884], + ), + bigint("c", [500_213_946_116_239_i64, 1_570_568_673_569_987, 884]), + bigint("d", [211_980_999_383_887_i64, 1_056_107_792_886_999, 884]), + bigint("e", [927_908_842_441_i64, 998_426_626_609_497, 884]), + bigint("res", [-20_i64, 50, 539_835_356_263_424]), ]); let t = "sxt.t".parse().unwrap(); let accessor = OwnedTableTestAccessor::::new_from_table(t, data, 0, ()); @@ -222,12 +228,18 @@ fn where_clause_can_wrap_around() { exercise_verification(&verifiable_res, &ast, &accessor, t); let res = verifiable_res.verify(&ast, &accessor, &()).unwrap().table; let expected_res = owned_table([ - bigint("a", [2357878470324616199_i64, 2657439699204141, 884]), - bigint("b", [31194601778911687_i64, 1644425323726039, 884]), - bigint("c", [500213946116239_i64, 1570568673569987, 884]), - bigint("d", [211980999383887_i64, 1056107792886999, 884]), - bigint("e", [927908842441_i64, 998426626609497, 884]), - bigint("res", [-20_i64, 50, 539835356263424]), + bigint( + "a", + [2_357_878_470_324_616_199_i64, 2_657_439_699_204_141, 884], + ), + bigint( + "b", + [31_194_601_778_911_687_i64, 1_644_425_323_726_039, 884], + ), + bigint("c", [500_213_946_116_239_i64, 1_570_568_673_569_987, 884]), + bigint("d", [211_980_999_383_887_i64, 1_056_107_792_886_999, 884]), + bigint("e", [927_908_842_441_i64, 998_426_626_609_497, 884]), + bigint("res", [-20_i64, 50, 539_835_356_263_424]), ]); assert_eq!(res, expected_res); }