Skip to content

Commit

Permalink
chore: resolve clippy::semicolon_if_nothing_returned, `clippy::unne…
Browse files Browse the repository at this point in the history
…sted_or_patterns` and `clippy::unreadable_literal` lints in proof-of-sql (#215)

# Rationale for this change

We have cargo clippy running in our CI in order to enforce code quality.
In order to increase our standards, we should enable the
clippy::pedantic lint group.

# What changes are included in this PR?

This PR fixes `clippy::semicolon_if_nothing_returned`,
`clippy::unnested_or_patterns` and `clippy::unreadable_literal` warnings
for `proof-of-sql lib`.

# Are these changes tested?

Yes.
  • Loading branch information
JayWhite2357 authored Oct 4, 2024
2 parents c845139 + 1850787 commit 258789c
Show file tree
Hide file tree
Showing 40 changed files with 230 additions and 184 deletions.
10 changes: 4 additions & 6 deletions crates/proof-of-sql/src/base/commitment/column_bounds.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,9 +107,8 @@ where
(Bounds::Sharp(bounds_a), Bounds::Sharp(bounds_b)) => {
Bounds::Sharp(bounds_a.union(bounds_b))
}
(Bounds::Bounded(bounds_a), Bounds::Bounded(bounds_b))
| (Bounds::Bounded(bounds_a), Bounds::Sharp(bounds_b))
| (Bounds::Sharp(bounds_a), Bounds::Bounded(bounds_b)) => {
(Bounds::Bounded(bounds_a) | Bounds::Sharp(bounds_a), Bounds::Bounded(bounds_b))
| (Bounds::Bounded(bounds_a), Bounds::Sharp(bounds_b)) => {
Bounds::Bounded(bounds_a.union(bounds_b))
}
(bounds, Bounds::Empty) | (Bounds::Empty, bounds) => bounds,
Expand All @@ -128,14 +127,13 @@ where
match (self, other) {
(Bounds::Empty, _) => Bounds::Empty,
(bounds, Bounds::Empty) => bounds,
(Bounds::Sharp(bounds_a), Bounds::Sharp(bounds_b))
| (Bounds::Sharp(bounds_a), Bounds::Bounded(bounds_b))
(Bounds::Sharp(bounds_a), Bounds::Sharp(bounds_b) | Bounds::Bounded(bounds_b))
if bounds_a.max() < bounds_b.min() || bounds_b.max() < bounds_a.min() =>
{
// source collections must be disjoint, so no rows are removed
Bounds::Sharp(bounds_a)
}
(Bounds::Bounded(bounds), _) | (Bounds::Sharp(bounds), _) => Bounds::Bounded(bounds),
(Bounds::Bounded(bounds) | Bounds::Sharp(bounds), _) => Bounds::Bounded(bounds),
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ impl ColumnCommitmentMetadataMapExt for ColumnCommitmentMetadataMap {
Err(ColumnCommitmentsMismatch::Identifier {
id_a: identifier_a.to_string(),
id_b: identifier_b.to_string(),
})?
})?;
}

Ok((identifier_a, metadata_a.try_union(metadata_b)?))
Expand All @@ -125,7 +125,7 @@ impl ColumnCommitmentMetadataMapExt for ColumnCommitmentMetadataMap {
Err(ColumnCommitmentsMismatch::Identifier {
id_a: identifier_a.to_string(),
id_b: identifier_b.to_string(),
})?
})?;
}

Ok((identifier_a, metadata_a.try_difference(metadata_b)?))
Expand Down
6 changes: 3 additions & 3 deletions crates/proof-of-sql/src/base/commitment/committable_column.rs
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,7 @@ mod tests {
);

// non-empty case
let timestamps = [1625072400, 1625076000, 1625083200];
let timestamps = [1_625_072_400, 1_625_076_000, 1_625_083_200];
let from_borrowed_column =
CommittableColumn::from(&Column::<Curve25519Scalar>::TimestampTZ(
PoSQLTimeUnit::Second,
Expand Down Expand Up @@ -610,7 +610,7 @@ mod tests {
);

// non-empty case
let timestamps = vec![1625072400, 1625076000, 1625083200];
let timestamps = vec![1_625_072_400, 1_625_076_000, 1_625_083_200];
let owned_column = OwnedColumn::<Curve25519Scalar>::TimestampTZ(
PoSQLTimeUnit::Second,
PoSQLTimeZone::Utc,
Expand Down Expand Up @@ -937,7 +937,7 @@ mod tests {
assert_eq!(commitment_buffer[0], CompressedRistretto::default());

// Non-empty case
let timestamps = [1625072400, 1625076000, 1625083200];
let timestamps = [1_625_072_400, 1_625_076_000, 1_625_083_200];
let committable_column =
CommittableColumn::TimestampTZ(PoSQLTimeUnit::Second, PoSQLTimeZone::Utc, &timestamps);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ impl<C: Commitment> VecCommitmentExt for Vec<C> {
) where
COL: Into<CommittableColumn<'a>>,
{
self.extend(Self::from_columns_with_offset(columns, offset, setup))
self.extend(Self::from_columns_with_offset(columns, offset, setup));
}

fn try_add(self, other: Self) -> Result<Self, NumColumnsMismatch>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ mod tests {
#[test]
fn we_can_convert_timestamp_array_normal_range() {
let alloc = Bump::new();
let data = vec![1625072400, 1625076000, 1625083200]; // Example Unix timestamps
let data = vec![1_625_072_400, 1_625_076_000, 1_625_083_200]; // Example Unix timestamps
let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt(
data.clone().into(),
Some("Z"),
Expand All @@ -401,7 +401,7 @@ mod tests {
#[test]
fn we_can_build_an_empty_column_from_an_empty_range_timestamp() {
let alloc = Bump::new();
let data = vec![1625072400, 1625076000]; // Example Unix timestamps
let data = vec![1_625_072_400, 1_625_076_000]; // Example Unix timestamps
let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt(
data.into(),
Some("+00:00"),
Expand All @@ -419,7 +419,7 @@ mod tests {
#[test]
fn we_can_convert_timestamp_array_empty_range() {
let alloc = Bump::new();
let data = vec![1625072400, 1625076000, 1625083200]; // Example Unix timestamps
let data = vec![1_625_072_400, 1_625_076_000, 1_625_083_200]; // Example Unix timestamps
let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt(
data.into(),
Some("+0:00"),
Expand All @@ -435,7 +435,7 @@ mod tests {
#[test]
fn we_cannot_convert_timestamp_array_oob_range() {
let alloc = Bump::new();
let data = vec![1625072400, 1625076000, 1625083200];
let data = vec![1_625_072_400, 1_625_076_000, 1_625_083_200];
let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt(
data.into(),
Some("Utc"),
Expand All @@ -451,7 +451,7 @@ mod tests {
#[test]
fn we_can_convert_timestamp_array_with_nulls() {
let alloc = Bump::new();
let data = vec![Some(1625072400), None, Some(1625083200)];
let data = vec![Some(1_625_072_400), None, Some(1_625_083_200)];
let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt(
data.into(),
Some("00:00"),
Expand Down Expand Up @@ -903,7 +903,10 @@ mod tests {
#[test]
fn we_can_build_an_empty_column_from_an_empty_range_decimal128() {
let alloc = Bump::new();
let decimal_values = vec![12345678901234567890_i128, -12345678901234567890_i128];
let decimal_values = vec![
12_345_678_901_234_567_890_i128,
-12_345_678_901_234_567_890_i128,
];
let array: ArrayRef = Arc::new(
Decimal128Array::from(decimal_values)
.with_precision_and_scale(38, 0)
Expand Down Expand Up @@ -1010,7 +1013,7 @@ mod tests {
#[test]
fn we_can_convert_valid_timestamp_array_refs_into_valid_columns() {
let alloc = Bump::new();
let data = vec![1625072400, 1625076000]; // Example Unix timestamps
let data = vec![1_625_072_400, 1_625_076_000]; // Example Unix timestamps
let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt(
data.clone().into(),
Some("UTC"),
Expand Down Expand Up @@ -1072,7 +1075,7 @@ mod tests {
fn we_can_convert_valid_timestamp_array_refs_into_valid_columns_using_ranges_smaller_than_arrays(
) {
let alloc = Bump::new();
let data = vec![1625072400, 1625076000, 1625083200]; // Example Unix timestamps
let data = vec![1_625_072_400, 1_625_076_000, 1_625_083_200]; // Example Unix timestamps
let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt(
data.clone().into(),
Some("Utc"),
Expand Down Expand Up @@ -1131,7 +1134,7 @@ mod tests {
#[test]
fn we_can_convert_valid_timestamp_array_refs_into_valid_columns_using_ranges_with_zero_size() {
let alloc = Bump::new();
let data = vec![1625072400, 1625076000]; // Example Unix timestamps
let data = vec![1_625_072_400, 1_625_076_000]; // Example Unix timestamps
let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt(
data.clone().into(),
Some("Utc"),
Expand Down Expand Up @@ -1160,7 +1163,7 @@ mod tests {

#[test]
fn we_can_convert_valid_timestamp_array_refs_into_valid_vec_scalars() {
let data = vec![1625072400, 1625076000]; // Example Unix timestamps
let data = vec![1_625_072_400, 1_625_076_000]; // Example Unix timestamps
let array: ArrayRef = Arc::new(TimestampSecondArray::with_timezone_opt(
data.clone().into(),
Some("Utc"),
Expand Down
58 changes: 29 additions & 29 deletions crates/proof-of-sql/src/base/database/column_operation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1427,7 +1427,7 @@ mod test {
.into_iter()
.map(Curve25519Scalar::from)
.collect::<Vec<_>>();
let rhs = [71_i64, 150000, -20000]
let rhs = [71_i64, 150_000, -20000]
.into_iter()
.map(Curve25519Scalar::from)
.collect::<Vec<_>>();
Expand All @@ -1438,7 +1438,7 @@ mod test {
assert_eq!(expected, actual);

// lhs is decimal with nonnegative scale and rhs is decimal with negative scale
let lhs = [71_i64, 150000, -20000]
let lhs = [71_i64, 150_000, -20000]
.into_iter()
.map(Curve25519Scalar::from)
.collect::<Vec<_>>();
Expand All @@ -1457,7 +1457,7 @@ mod test {
.into_iter()
.map(Curve25519Scalar::from)
.collect::<Vec<_>>();
let rhs = [71_i64, 150000, -20000]
let rhs = [71_i64, 150_000, -20000]
.into_iter()
.map(Curve25519Scalar::from)
.collect::<Vec<_>>();
Expand Down Expand Up @@ -1548,7 +1548,7 @@ mod test {
.into_iter()
.map(Curve25519Scalar::from)
.collect::<Vec<_>>();
let rhs = [71_i64, 150000, -30000]
let rhs = [71_i64, 150_000, -30000]
.into_iter()
.map(Curve25519Scalar::from)
.collect::<Vec<_>>();
Expand All @@ -1559,7 +1559,7 @@ mod test {
assert_eq!(expected, actual);

// lhs is decimal with nonnegative scale and rhs is decimal with negative scale
let lhs = [71_i64, 150000, -19000]
let lhs = [71_i64, 150_000, -19000]
.into_iter()
.map(Curve25519Scalar::from)
.collect::<Vec<_>>();
Expand All @@ -1578,7 +1578,7 @@ mod test {
.into_iter()
.map(Curve25519Scalar::from)
.collect::<Vec<_>>();
let rhs = [71000_i64, 150000, -21000]
let rhs = [71000_i64, 150_000, -21000]
.into_iter()
.map(Curve25519Scalar::from)
.collect::<Vec<_>>();
Expand Down Expand Up @@ -1669,7 +1669,7 @@ mod test {
.into_iter()
.map(Curve25519Scalar::from)
.collect::<Vec<_>>();
let rhs = [71_i64, 150000, -30000]
let rhs = [71_i64, 150_000, -30000]
.into_iter()
.map(Curve25519Scalar::from)
.collect::<Vec<_>>();
Expand All @@ -1680,7 +1680,7 @@ mod test {
assert_eq!(expected, actual);

// lhs is decimal with nonnegative scale and rhs is decimal with negative scale
let lhs = [71_i64, 150000, -19000]
let lhs = [71_i64, 150_000, -19000]
.into_iter()
.map(Curve25519Scalar::from)
.collect::<Vec<_>>();
Expand All @@ -1699,7 +1699,7 @@ mod test {
.into_iter()
.map(Curve25519Scalar::from)
.collect::<Vec<_>>();
let rhs = [71000_i64, 150000, -21000]
let rhs = [71000_i64, 150_000, -21000]
.into_iter()
.map(Curve25519Scalar::from)
.collect::<Vec<_>>();
Expand Down Expand Up @@ -1838,9 +1838,9 @@ mod test {
let actual: (Precision, i8, Vec<Curve25519Scalar>) =
try_add_decimal_columns(&lhs, &rhs, left_column_type, right_column_type).unwrap();
let expected_scalars = vec![
Curve25519Scalar::from(400071),
Curve25519Scalar::from(1499918),
Curve25519Scalar::from(-199977),
Curve25519Scalar::from(400_071),
Curve25519Scalar::from(1_499_918),
Curve25519Scalar::from(-199_977),
];
let expected = (Precision::new(75).unwrap(), 3, expected_scalars);
assert_eq!(expected, actual);
Expand Down Expand Up @@ -2000,9 +2000,9 @@ mod test {
let actual: (Precision, i8, Vec<Curve25519Scalar>) =
try_subtract_decimal_columns(&lhs, &rhs, left_column_type, right_column_type).unwrap();
let expected_scalars = vec![
Curve25519Scalar::from(399929),
Curve25519Scalar::from(1500082),
Curve25519Scalar::from(-200023),
Curve25519Scalar::from(399_929),
Curve25519Scalar::from(1_500_082),
Curve25519Scalar::from(-200_023),
];
let expected = (Precision::new(75).unwrap(), 3, expected_scalars);
assert_eq!(expected, actual);
Expand Down Expand Up @@ -2246,8 +2246,8 @@ mod test {
try_divide_decimal_columns(&lhs, &rhs, left_column_type, right_column_type).unwrap();
let expected_scalars = vec![
Curve25519Scalar::from(0_i64),
Curve25519Scalar::from(40000000_i64),
Curve25519Scalar::from(150000000_i64),
Curve25519Scalar::from(40_000_000_i64),
Curve25519Scalar::from(150_000_000_i64),
];
let expected = (Precision::new(13).unwrap(), 6, expected_scalars);
assert_eq!(expected, actual);
Expand All @@ -2263,9 +2263,9 @@ mod test {
let actual: (Precision, i8, Vec<Curve25519Scalar>) =
try_divide_decimal_columns(&lhs, &rhs, left_column_type, right_column_type).unwrap();
let expected_scalars = vec![
Curve25519Scalar::from(5633802),
Curve25519Scalar::from(-18292682),
Curve25519Scalar::from(-8695652),
Curve25519Scalar::from(5_633_802),
Curve25519Scalar::from(-18_292_682),
Curve25519Scalar::from(-8_695_652),
];
let expected = (Precision::new(18).unwrap(), 6, expected_scalars);
assert_eq!(expected, actual);
Expand All @@ -2284,9 +2284,9 @@ mod test {
let actual: (Precision, i8, Vec<Curve25519Scalar>) =
try_divide_decimal_columns(&lhs, &rhs, left_column_type, right_column_type).unwrap();
let expected_scalars = vec![
Curve25519Scalar::from(1333333),
Curve25519Scalar::from(-400000),
Curve25519Scalar::from(-285714),
Curve25519Scalar::from(1_333_333),
Curve25519Scalar::from(-400_000),
Curve25519Scalar::from(-285_714),
];
let expected = (Precision::new(10).unwrap(), 6, expected_scalars);
assert_eq!(expected, actual);
Expand All @@ -2305,9 +2305,9 @@ mod test {
let actual: (Precision, i8, Vec<Curve25519Scalar>) =
try_divide_decimal_columns(&lhs, &rhs, left_column_type, right_column_type).unwrap();
let expected_scalars = vec![
Curve25519Scalar::from(5633802816_i128),
Curve25519Scalar::from(-18292682926_i128),
Curve25519Scalar::from(-8695652173_i128),
Curve25519Scalar::from(5_633_802_816_i128),
Curve25519Scalar::from(-18_292_682_926_i128),
Curve25519Scalar::from(-8_695_652_173_i128),
];
let expected = (Precision::new(13).unwrap(), 6, expected_scalars);
assert_eq!(expected, actual);
Expand All @@ -2326,9 +2326,9 @@ mod test {
let actual: (Precision, i8, Vec<Curve25519Scalar>) =
try_divide_decimal_columns(&lhs, &rhs, left_column_type, right_column_type).unwrap();
let expected_scalars = vec![
Curve25519Scalar::from(563380),
Curve25519Scalar::from(-1829268),
Curve25519Scalar::from(-869565),
Curve25519Scalar::from(563_380),
Curve25519Scalar::from(-1_829_268),
Curve25519Scalar::from(-869_565),
];
let expected = (Precision::new(9).unwrap(), 6, expected_scalars);
assert_eq!(expected, actual);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ fn we_can_evaluate_a_simple_literal() {
));
let actual_column = table.evaluate(&expr).unwrap();
// UNIX timestamp for 2022-03-01T00:00:00Z
let actual_timestamp = 1646092800;
let actual_timestamp = 1_646_092_800;
let expected_column = OwnedColumn::TimestampTZ(
PoSQLTimeUnit::Second,
PoSQLTimeZone::Utc,
Expand Down Expand Up @@ -187,7 +187,7 @@ fn we_can_evaluate_an_arithmetic_expression() {
col("int128s"),
);
let actual_column = table.evaluate(&expr).unwrap();
let expected_scalars = [-16000000, -7960000, 80000, 8120000, 16160000]
let expected_scalars = [-16_000_000, -7_960_000, 80000, 8_120_000, 16_160_000]
.iter()
.map(|&x| x.into())
.collect();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1677,7 +1677,7 @@ mod test {
let rhs =
OwnedColumn::<Curve25519Scalar>::Decimal75(Precision::new(5).unwrap(), 2, rhs_scalars);
let result = (lhs / rhs).unwrap();
let expected_scalars = [-400000000_i128, 250000000, 75000000]
let expected_scalars = [-400_000_000_i128, 250_000_000, 75_000_000]
.iter()
.map(Curve25519Scalar::from)
.collect();
Expand All @@ -1696,7 +1696,7 @@ mod test {
let rhs =
OwnedColumn::<Curve25519Scalar>::Decimal75(Precision::new(3).unwrap(), 2, rhs_scalars);
let result = (lhs / rhs).unwrap();
let expected_scalars = [-400000000, 250000000, 100000000]
let expected_scalars = [-400_000_000, 250_000_000, 100_000_000]
.iter()
.map(Curve25519Scalar::from)
.collect();
Expand Down
4 changes: 2 additions & 2 deletions crates/proof-of-sql/src/base/database/owned_table_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ fn we_get_inequality_between_tables_with_differing_data() {
"time_stamp",
PoSQLTimeUnit::Second,
PoSQLTimeZone::Utc,
[1625072400],
[1_625_072_400],
),
]);
let owned_table_b: OwnedTable<DoryScalar> = owned_table([
Expand All @@ -170,7 +170,7 @@ fn we_get_inequality_between_tables_with_differing_data() {
"time_stamp",
PoSQLTimeUnit::Second,
PoSQLTimeZone::Utc,
[1625076000],
[1_625_076_000],
),
]);
assert_ne!(owned_table_a, owned_table_b);
Expand Down
Loading

0 comments on commit 258789c

Please sign in to comment.