Skip to content

Commit

Permalink
chore: major resolved clippy::cast_possible_wrap & `clippy::cast_po…
Browse files Browse the repository at this point in the history
…ssible_truncation` lint warnings (#270)

# Rationale for this change
We have cargo clippy running in our CI in order to enforce code quality.
In order to increase our standards, we should enable the
clippy::pedantic lint group.

# What changes are included in this PR?
Resolved the following lint warnings

`too_many_lines`
`cast_possible_wrap`
`cast_sign_loss`
`cast_possible_truncation`

# Are these changes tested?
Yes.
  • Loading branch information
mehulmathur16 authored Oct 19, 2024
1 parent 5b4b962 commit fd3a339
Show file tree
Hide file tree
Showing 35 changed files with 104 additions and 17 deletions.
4 changes: 4 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -105,3 +105,7 @@ trivially_copy_pass_by_ref = "deny"
unnecessary_wraps = "deny"
should_panic_without_expect = "deny"
needless_pass_by_value = "deny"
too_many_lines = "deny"
cast_possible_wrap = "deny"
cast_sign_loss = "deny"
cast_possible_truncation = "deny"
1 change: 1 addition & 0 deletions crates/proof-of-sql/benches/scaffold/querys.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
#![allow(clippy::cast_possible_wrap)]
use super::OptionalRandBound;
use proof_of_sql::base::database::ColumnType;

Expand Down
1 change: 1 addition & 0 deletions crates/proof-of-sql/benches/scaffold/random_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ pub type OptionalRandBound = Option<fn(usize) -> i64>;
/// Will panic if:
/// - The provided identifier cannot be parsed into an `Identifier` type.
/// - An unsupported `ColumnType` is encountered, triggering a panic in the `todo!()` macro.
#[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)]
pub fn generate_random_columns<'a, S: Scalar>(
alloc: &'a Bump,
rng: &mut impl Rng,
Expand Down
1 change: 1 addition & 0 deletions crates/proof-of-sql/examples/posql_db/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,7 @@ fn end_timer(instant: Instant) {
/// - **Proof Verification Failure**: Panics if the proof verification process fails.
/// - **Serialization/Deserialization Failure**: Panics if the proof cannot be serialized or deserialized.
/// - **Record Batch Conversion Failure**: Panics if the query result cannot be converted into a `RecordBatch`.
#[allow(clippy::too_many_lines)]
fn main() {
let args = CliArgs::parse();
println!("Warming up GPU...");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -791,6 +791,7 @@ mod tests {
);
}

#[allow(clippy::too_many_lines)]
#[test]
fn we_cannot_perform_arithmetic_on_mismatched_metadata() {
let boolean_metadata = ColumnCommitmentMetadata {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,7 @@ impl ArrayRefExt for ArrayRef {
///
/// # Panics
/// - When any range is OOB, i.e. indexing 3..6 or 5..5 on array of size 2.
#[allow(clippy::too_many_lines)]
fn to_column<'a, S: Scalar>(
&'a self,
alloc: &'a Bump,
Expand Down
1 change: 1 addition & 0 deletions crates/proof-of-sql/src/base/database/column.rs
Original file line number Diff line number Diff line change
Expand Up @@ -390,6 +390,7 @@ impl ColumnType {
}
}

#[allow(clippy::cast_possible_truncation)]
/// Returns the bit size of the column type.
#[must_use]
pub fn bit_size(&self) -> u32 {
Expand Down
4 changes: 4 additions & 0 deletions crates/proof-of-sql/src/base/database/column_operation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2016,6 +2016,7 @@ mod test {
));
}

#[allow(clippy::too_many_lines)]
#[test]
fn we_can_try_add_decimal_columns() {
// lhs is integer and rhs is decimal with nonnegative scale
Expand Down Expand Up @@ -2195,6 +2196,7 @@ mod test {
));
}

#[allow(clippy::too_many_lines)]
#[test]
fn we_can_try_subtract_decimal_columns() {
// lhs is integer and rhs is decimal with nonnegative scale
Expand Down Expand Up @@ -2355,6 +2357,7 @@ mod test {
));
}

#[allow(clippy::too_many_lines)]
#[test]
fn we_can_try_multiply_decimal_columns() {
// lhs is integer and rhs is decimal with nonnegative scale
Expand Down Expand Up @@ -2535,6 +2538,7 @@ mod test {
));
}

#[allow(clippy::too_many_lines)]
#[test]
fn we_can_try_divide_decimal_columns() {
// lhs is integer and rhs is decimal with nonnegative scale
Expand Down
7 changes: 6 additions & 1 deletion crates/proof-of-sql/src/base/database/group_by_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ pub enum AggregateColumnsError {
ColumnLengthMismatch,
}

#[allow(clippy::missing_panics_doc)]
/// This is a function that gives the result of a group by query similar to the following:
/// ```sql
/// SELECT <group_by[0]>, <group_by[1]>, ..., SUM(<sum_columns[0]>), SUM(<sum_columns[1]>), ...,
Expand Down Expand Up @@ -124,7 +125,11 @@ pub fn aggregate_columns<'a, S: Scalar>(
.collect();

// Cast the counts to something compatible with BigInt.
let count_column_out = alloc.alloc_slice_fill_iter(counts.into_iter().map(|c| c as i64));
let count_column_out = alloc.alloc_slice_fill_iter(
counts
.into_iter()
.map(|c| c.try_into().expect("Count should fit within i64")),
);

Ok(AggregatedColumns {
group_by_columns: group_by_columns_out,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ fn we_can_aggregate_columns_with_empty_group_by() {
assert_eq!(aggregate_result.min_columns, expected_min_result);
}

#[allow(clippy::too_many_lines)]
#[test]
fn we_can_aggregate_columns() {
let slice_a = &[3, 3, 3, 2, 2, 1, 1, 2, 2, 3, 3, 3];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,8 @@ impl<S: Scalar> TryFrom<ArrayRef> for OwnedColumn<S> {
}
impl<S: Scalar> TryFrom<&ArrayRef> for OwnedColumn<S> {
type Error = OwnedArrowConversionError;

#[allow(clippy::too_many_lines)]
/// # Panics
///
/// Will panic if downcasting fails for the following types:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ impl<S: Scalar> OwnedColumn<S> {
}

/// Element-wise equality check for two columns
#[allow(clippy::too_many_lines)]
pub fn element_wise_eq(&self, rhs: &Self) -> ColumnOperationResult<Self> {
if self.len() != rhs.len() {
return Err(ColumnOperationError::DifferentColumnLength {
Expand Down Expand Up @@ -248,6 +249,7 @@ impl<S: Scalar> OwnedColumn<S> {
}

/// Element-wise <= check for two columns
#[allow(clippy::too_many_lines)]
pub fn element_wise_le(&self, rhs: &Self) -> ColumnOperationResult<Self> {
if self.len() != rhs.len() {
return Err(ColumnOperationError::DifferentColumnLength {
Expand Down Expand Up @@ -428,6 +430,7 @@ impl<S: Scalar> OwnedColumn<S> {
}

/// Element-wise >= check for two columns
#[allow(clippy::too_many_lines)]
pub fn element_wise_ge(&self, rhs: &Self) -> ColumnOperationResult<Self> {
if self.len() != rhs.len() {
return Err(ColumnOperationError::DifferentColumnLength {
Expand Down Expand Up @@ -611,6 +614,7 @@ impl<S: Scalar> OwnedColumn<S> {
impl<S: Scalar> Add for OwnedColumn<S> {
type Output = ColumnOperationResult<Self>;

#[allow(clippy::too_many_lines)]
fn add(self, rhs: Self) -> Self::Output {
if self.len() != rhs.len() {
return Err(ColumnOperationError::DifferentColumnLength {
Expand Down Expand Up @@ -804,6 +808,7 @@ impl<S: Scalar> Add for OwnedColumn<S> {
impl<S: Scalar> Sub for OwnedColumn<S> {
type Output = ColumnOperationResult<Self>;

#[allow(clippy::too_many_lines)]
fn sub(self, rhs: Self) -> Self::Output {
if self.len() != rhs.len() {
return Err(ColumnOperationError::DifferentColumnLength {
Expand Down Expand Up @@ -1001,6 +1006,7 @@ impl<S: Scalar> Sub for OwnedColumn<S> {
impl<S: Scalar> Mul for OwnedColumn<S> {
type Output = ColumnOperationResult<Self>;

#[allow(clippy::too_many_lines)]
fn mul(self, rhs: Self) -> Self::Output {
if self.len() != rhs.len() {
return Err(ColumnOperationError::DifferentColumnLength {
Expand Down Expand Up @@ -1198,6 +1204,7 @@ impl<S: Scalar> Mul for OwnedColumn<S> {
impl<S: Scalar> Div for OwnedColumn<S> {
type Output = ColumnOperationResult<Self>;

#[allow(clippy::too_many_lines)]
fn div(self, rhs: Self) -> Self::Output {
if self.len() != rhs.len() {
return Err(ColumnOperationError::DifferentColumnLength {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ pub fn convert_scalar_to_i256<S: Scalar>(val: &S) -> i256 {
}
}

#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
/// Converts an arrow i256 into limbed representation and then
/// into a type implementing [Scalar]
#[must_use]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ impl Default for RandomTestAccessorDescriptor {
/// - When calling `.unwrap()` on the result of `RecordBatch::try_new(schema, columns)`, which
/// will panic if the schema and columns do not align correctly or if there are any other
/// underlying errors.
#[allow(dead_code)]
#[allow(dead_code, clippy::too_many_lines)]
pub fn make_random_test_accessor_data(
rng: &mut StdRng,
cols: &[(&str, ColumnType)],
Expand Down
2 changes: 2 additions & 0 deletions crates/proof-of-sql/src/base/encode/scalar_varint.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ use core::cmp::{max, Ordering};
pub fn write_scalar_varint<T: MontConfig<4>>(buf: &mut [u8], x: &MontScalar<T>) -> usize {
write_u256_varint(buf, x.zigzag())
}

#[allow(clippy::cast_possible_truncation)]
pub fn write_u256_varint(buf: &mut [u8], mut zig_x: U256) -> usize {
let mut pos = 0;

Expand Down
9 changes: 7 additions & 2 deletions crates/proof-of-sql/src/base/encode/varint_trait.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ pub trait VarInt: Sized + Copy {
}
}

#[allow(clippy::cast_sign_loss)]
#[inline]
fn zigzag_encode(from: i64) -> u64 {
((from << 1) ^ (from >> 63)) as u64
Expand All @@ -61,6 +62,7 @@ fn zigzag_encode(from: i64) -> u64 {
// see: http://stackoverflow.com/a/2211086/56332
// casting required because operations like unary negation
// cannot be performed on unsigned integers
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
#[inline]
fn zigzag_decode(from: u64) -> i64 {
((from >> 1) ^ (-((from & 1) as i64)) as u64) as i64
Expand All @@ -75,7 +77,7 @@ macro_rules! impl_varint {
(self as u64).required_space()
}

#[allow(clippy::cast_lossless)]
#[allow(clippy::cast_lossless, clippy::cast_possible_truncation)]
fn decode_var(src: &[u8]) -> Option<(Self, usize)> {
let (n, s) = u64::decode_var(src)?;
// This check is required to ensure that we actually return `None` when `src` has a value that would overflow `Self`.
Expand All @@ -99,7 +101,7 @@ macro_rules! impl_varint {
(self as i64).required_space()
}

#[allow(clippy::cast_lossless)]
#[allow(clippy::cast_lossless, clippy::cast_possible_truncation)]
fn decode_var(src: &[u8]) -> Option<(Self, usize)> {
let (n, s) = i64::decode_var(src)?;
// This check is required to ensure that we actually return `None` when `src` has a value that would overflow `Self`.
Expand Down Expand Up @@ -185,6 +187,7 @@ impl VarInt for u64 {
}
}

#[allow(clippy::cast_possible_truncation)]
#[inline]
fn encode_var(self, dst: &mut [u8]) -> usize {
assert!(dst.len() >= self.required_space());
Expand Down Expand Up @@ -247,6 +250,7 @@ impl VarInt for u128 {
}

// Adapted from integer-encoding-rs. See third_party/license/integer-encoding.LICENSE
#[allow(clippy::cast_sign_loss)]
#[inline]
fn zigzag_encode_i128(from: i128) -> u128 {
((from << 1) ^ (from >> 127)) as u128
Expand All @@ -255,6 +259,7 @@ fn zigzag_encode_i128(from: i128) -> u128 {
// see: http://stackoverflow.com/a/2211086/56332
// casting required because operations like unary negation
// cannot be performed on unsigned integers
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
#[inline]
fn zigzag_decode_i128(from: u128) -> i128 {
((from >> 1) ^ (-((from & 1) as i128)) as u128) as i128
Expand Down
16 changes: 10 additions & 6 deletions crates/proof-of-sql/src/base/math/decimal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,7 @@ impl<S: Scalar> Decimal<S> {
}

/// Scale the decimal to the new scale factor. Negative scaling and overflow error out.
#[allow(clippy::cast_sign_loss)]
pub fn with_precision_and_scale(
&self,
new_precision: Precision,
Expand All @@ -142,6 +143,7 @@ impl<S: Scalar> Decimal<S> {
}

/// Get a decimal with given precision and scale from an i64
#[allow(clippy::cast_sign_loss)]
pub fn from_i64(value: i64, precision: Precision, scale: i8) -> DecimalResult<Self> {
const MINIMAL_PRECISION: u8 = 19;
let raw_precision = precision.value();
Expand All @@ -160,6 +162,7 @@ impl<S: Scalar> Decimal<S> {
}

/// Get a decimal with given precision and scale from an i128
#[allow(clippy::cast_sign_loss)]
pub fn from_i128(value: i128, precision: Precision, scale: i8) -> DecimalResult<Self> {
const MINIMAL_PRECISION: u8 = 39;
let raw_precision = precision.value();
Expand Down Expand Up @@ -243,7 +246,7 @@ mod scale_adjust_test {

assert!(try_into_to_scalar::<Curve25519Scalar>(
&decimal,
Precision::new(decimal.value().digits() as u8).unwrap(),
Precision::new(u8::try_from(decimal.value().digits()).unwrap_or(u8::MAX)).unwrap(),
target_scale
)
.is_err());
Expand Down Expand Up @@ -282,7 +285,7 @@ mod scale_adjust_test {

let limbs = try_into_to_scalar::<Curve25519Scalar>(
&decimal,
Precision::new(decimal.value().digits() as u8).unwrap(),
Precision::new(u8::try_from(decimal.value().digits()).unwrap_or(u8::MAX)).unwrap(),
target_scale,
)
.unwrap();
Expand All @@ -297,13 +300,14 @@ mod scale_adjust_test {
let expected_limbs = [12345, 0, 0, 0];
let limbs = try_into_to_scalar::<Curve25519Scalar>(
&decimal,
Precision::new(decimal.value().digits() as u8).unwrap(),
Precision::new(u8::try_from(decimal.value().digits()).unwrap_or(u8::MAX)).unwrap(),
target_scale,
)
.unwrap();
assert_eq!(limbs, -Curve25519Scalar::from(expected_limbs));
}

#[allow(clippy::cast_possible_wrap)]
#[test]
fn we_can_match_decimals_at_extrema() {
// a big decimal cannot scale up past the supported precision
Expand All @@ -313,7 +317,7 @@ mod scale_adjust_test {
let target_scale = 6; // now precision exceeds maximum
assert!(try_into_to_scalar::<Curve25519Scalar>(
&decimal,
Precision::new(decimal.value().digits() as u8,).unwrap(),
Precision::new(u8::try_from(decimal.value().digits()).unwrap_or(u8::MAX),).unwrap(),
target_scale
)
.is_err());
Expand Down Expand Up @@ -352,7 +356,7 @@ mod scale_adjust_test {
let target_scale = MAX_SUPPORTED_PRECISION as i8;
assert!(try_into_to_scalar::<Curve25519Scalar>(
&decimal,
Precision::new(decimal.value().digits() as u8,).unwrap(),
Precision::new(u8::try_from(decimal.value().digits()).unwrap_or(u8::MAX),).unwrap(),
target_scale
)
.is_ok());
Expand All @@ -372,7 +376,7 @@ mod scale_adjust_test {
let target_scale = 75;
assert!(try_into_to_scalar::<Curve25519Scalar>(
&decimal,
Precision::new(decimal.value().digits() as u8,).unwrap(),
Precision::new(u8::try_from(decimal.value().digits()).unwrap_or(u8::MAX),).unwrap(),
target_scale
)
.is_err());
Expand Down
1 change: 1 addition & 0 deletions crates/proof-of-sql/src/base/math/log.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ mod tests {
assert_eq!(log2_up(4u32), 2);
}

#[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)]
#[test]
fn test_log2_bytes_ceil() {
// 0-1 edge cases
Expand Down
7 changes: 6 additions & 1 deletion crates/proof-of-sql/src/base/polynomial/interpolate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,11 @@ where
/// Let `d` be `evals.len() - 1` and let `f` be the polynomial such that `f(i) = evals[i]`.
/// The output of this function is the vector of coefficients of `f`, with the leading coefficient first.
/// That is, `f(x) = evals[j] * x^(d - j)`.
#[allow(clippy::missing_panics_doc)]
#[allow(
clippy::missing_panics_doc,
clippy::cast_possible_truncation,
clippy::cast_possible_wrap
)]
// This function is guaranteed not to panic because:
// - The product in `inv()` will never be zero, as the numbers being multiplied are all non-zero by construction.
// - If there are no elements to reduce, `unwrap_or(vec![])` provides an empty vector as a safe fallback.
Expand All @@ -90,6 +94,7 @@ where
+ Inv<Output = Option<S>>
+ Product,
{
assert!(i32::try_from(evals.len()).is_ok());
let n = evals.len().max(1) - 1;
evals
.iter()
Expand Down
5 changes: 4 additions & 1 deletion crates/proof-of-sql/src/base/polynomial/interpolate_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,10 @@ fn interpolate_uni_poly_gives_correct_value_for_known_evaluation() {
];
for i in 0..evaluations.len() {
assert_eq!(
interpolate_uni_poly(&evaluations, Curve25519Scalar::from(i as u32)),
interpolate_uni_poly(
&evaluations,
Curve25519Scalar::from(u32::try_from(i).unwrap_or(u32::MAX))
),
evaluations[i]
);
}
Expand Down
2 changes: 2 additions & 0 deletions crates/proof-of-sql/src/base/scalar/mont_scalar.rs
Original file line number Diff line number Diff line change
Expand Up @@ -562,6 +562,8 @@ where
MontScalar<T>: Scalar,
{
type Error = ScalarConversionError;

#[allow(clippy::cast_possible_wrap)]
fn try_from(value: MontScalar<T>) -> Result<Self, Self::Error> {
let (sign, abs): (i128, [u64; 4]) = if value > <MontScalar<T>>::MAX_SIGNED {
(-1, (-value).into())
Expand Down
Loading

0 comments on commit fd3a339

Please sign in to comment.