From a60761b65de8e9c24b26cea7fc0eb41ba99f36a0 Mon Sep 17 00:00:00 2001 From: Gengar Date: Fri, 1 Aug 2025 21:32:55 +0300 Subject: [PATCH 1/5] Update scalar.rs --- p256/src/arithmetic/scalar.rs | 86 ++++++++++++++++++++++++++++++----- 1 file changed, 75 insertions(+), 11 deletions(-) diff --git a/p256/src/arithmetic/scalar.rs b/p256/src/arithmetic/scalar.rs index 7ef2ca0bc..dd9a3c4b8 100644 --- a/p256/src/arithmetic/scalar.rs +++ b/p256/src/arithmetic/scalar.rs @@ -13,7 +13,7 @@ use core::{ }; use elliptic_curve::{ Curve, - bigint::{Limb, U256, prelude::*}, + bigint::{Limb, U256, U384, prelude::*}, group::ff::{self, Field, PrimeField}, ops::{Invert, Reduce, ReduceNonZero}, rand_core::TryRngCore, @@ -634,6 +634,31 @@ impl Reduce for Scalar { } } +impl Reduce for Scalar { + type Bytes = [u8; 48]; + + fn reduce(w: U384) -> Self { + // Convert U384 to U512 for use with barrett_reduce, which expects 512 bits total + let w_bytes = w.to_be_bytes(); + let mut lo_bytes = [0u8; 32]; + let mut hi_bytes = [0u8; 32]; + + // Copy the lower 256 bits (bytes 16-47) + lo_bytes.copy_from_slice(&w_bytes[16..48]); + // Copy the upper 128 bits to the lower part of hi (bytes 0-15 -> 16-31 of hi) + hi_bytes[16..32].copy_from_slice(&w_bytes[0..16]); + + let lo = U256::from_be_byte_array(lo_bytes.into()); + let hi = U256::from_be_byte_array(hi_bytes.into()); + let w_reduced = barrett_reduce(lo, hi); + Self(w_reduced) + } + + fn reduce_bytes(bytes: &[u8; 48]) -> Self { + Self::reduce(U384::from_be_byte_array((*bytes).into())) + } +} + impl ReduceNonZero for Scalar { fn reduce_nonzero(w: U256) -> Self { const ORDER_MINUS_ONE: U256 = NistP256::ORDER.wrapping_sub(&U256::ONE); @@ -647,6 +672,21 @@ impl ReduceNonZero for Scalar { } } +impl ReduceNonZero for Scalar { + fn reduce_nonzero(w: U384) -> Self { + // Reduce U384 to U256 first, then apply non-zero reduction + let reduced = >::reduce(w); + const ORDER_MINUS_ONE: U256 = NistP256::ORDER.wrapping_sub(&U256::ONE); + let (r, underflow) = reduced.0.borrowing_sub(&ORDER_MINUS_ONE, Limb::ZERO); + let underflow = Choice::from((underflow.0 >> (Limb::BITS - 1)) as u8); + Self(U256::conditional_select(&reduced.0, &r, !underflow).wrapping_add(&U256::ONE)) + } + + fn reduce_nonzero_bytes(bytes: &[u8; 48]) -> Self { + Self::reduce_nonzero(U384::from_be_byte_array((*bytes).into())) + } +} + impl Sum for Scalar { fn sum>(iter: I) -> Self { iter.reduce(Add::add).unwrap_or(Self::ZERO) @@ -781,37 +821,61 @@ mod tests { #[test] fn reduce_nonzero() { - assert_eq!(Scalar::reduce_nonzero_bytes(&Array::default()).0, U256::ONE,); - assert_eq!(Scalar::reduce_nonzero(U256::ONE).0, U256::from_u8(2),); - assert_eq!(Scalar::reduce_nonzero(U256::from_u8(2)).0, U256::from_u8(3),); + assert_eq!(>::reduce_nonzero_bytes(&Array::default()).0, U256::ONE,); + assert_eq!(>::reduce_nonzero(U256::ONE).0, U256::from_u8(2),); + assert_eq!(>::reduce_nonzero(U256::from_u8(2)).0, U256::from_u8(3),); - assert_eq!(Scalar::reduce_nonzero(NistP256::ORDER).0, U256::from_u8(2),); + assert_eq!(>::reduce_nonzero(NistP256::ORDER).0, U256::from_u8(2),); assert_eq!( - Scalar::reduce_nonzero(NistP256::ORDER.wrapping_sub(&U256::from_u8(1))).0, + >::reduce_nonzero(NistP256::ORDER.wrapping_sub(&U256::from_u8(1))).0, U256::ONE, ); assert_eq!( - Scalar::reduce_nonzero(NistP256::ORDER.wrapping_sub(&U256::from_u8(2))).0, + >::reduce_nonzero(NistP256::ORDER.wrapping_sub(&U256::from_u8(2))).0, NistP256::ORDER.wrapping_sub(&U256::ONE), ); assert_eq!( - Scalar::reduce_nonzero(NistP256::ORDER.wrapping_sub(&U256::from_u8(3))).0, + >::reduce_nonzero(NistP256::ORDER.wrapping_sub(&U256::from_u8(3))).0, NistP256::ORDER.wrapping_sub(&U256::from_u8(2)), ); assert_eq!( - Scalar::reduce_nonzero(NistP256::ORDER.wrapping_add(&U256::ONE)).0, + >::reduce_nonzero(NistP256::ORDER.wrapping_add(&U256::ONE)).0, U256::from_u8(3), ); assert_eq!( - Scalar::reduce_nonzero(NistP256::ORDER.wrapping_add(&U256::from_u8(2))).0, + >::reduce_nonzero(NistP256::ORDER.wrapping_add(&U256::from_u8(2))).0, U256::from_u8(4), ); } + #[test] + fn reduce_nonzero_u384() { + use elliptic_curve::bigint::{U384, ArrayEncoding}; + + // Test with 48 zero bytes (384 bits) + let zero_bytes = [0u8; 48]; + assert_eq!(>::reduce_nonzero_bytes(&zero_bytes).0, U256::ONE); + + // Test with small values + let mut bytes = [0u8; 48]; + bytes[47] = 1; // Set the least significant byte to 1 + assert_eq!(>::reduce_nonzero(U384::from_be_byte_array(bytes.into())).0, U256::from_u8(2)); + + bytes[47] = 2; + assert_eq!(>::reduce_nonzero(U384::from_be_byte_array(bytes.into())).0, U256::from_u8(3)); + + // Test with a value that spans the full 384 bits + let large_value = U384::from_be_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"); + let reduced = >::reduce_nonzero(large_value); + // The result should be non-zero and within the field + assert_ne!(reduced.0, U256::ZERO); + assert!(reduced.0 < NistP256::ORDER); + } + prop_compose! { fn non_zero_scalar()(bytes in any::<[u8; 32]>()) -> NonZeroScalar { - NonZeroScalar::reduce_nonzero_bytes(&bytes.into()) + >::reduce_nonzero_bytes(&bytes.into()) } } From f8e9ac6ac8ee9c3269214a639ed775a95c14008c Mon Sep 17 00:00:00 2001 From: Gengar Date: Wed, 13 Aug 2025 15:38:26 +0300 Subject: [PATCH 2/5] Update scalar.rs --- p256/src/arithmetic/scalar.rs | 52 ++++++++++++++++++++++++++--------- 1 file changed, 39 insertions(+), 13 deletions(-) diff --git a/p256/src/arithmetic/scalar.rs b/p256/src/arithmetic/scalar.rs index dd9a3c4b8..a3e4eb928 100644 --- a/p256/src/arithmetic/scalar.rs +++ b/p256/src/arithmetic/scalar.rs @@ -13,7 +13,7 @@ use core::{ }; use elliptic_curve::{ Curve, - bigint::{Limb, U256, U384, prelude::*}, + bigint::{Limb, U128, U256, U384, U512, prelude::*}, group::ff::{self, Field, PrimeField}, ops::{Invert, Reduce, ReduceNonZero}, rand_core::TryRngCore, @@ -634,19 +634,19 @@ impl Reduce for Scalar { } } -impl Reduce for Scalar { - type Bytes = [u8; 48]; +impl Reduce for Scalar { + type Bytes = [u8; 64]; - fn reduce(w: U384) -> Self { - // Convert U384 to U512 for use with barrett_reduce, which expects 512 bits total + fn reduce(w: U512) -> Self { + // Convert U512 to two U256s for use with barrett_reduce let w_bytes = w.to_be_bytes(); let mut lo_bytes = [0u8; 32]; let mut hi_bytes = [0u8; 32]; - // Copy the lower 256 bits (bytes 16-47) - lo_bytes.copy_from_slice(&w_bytes[16..48]); - // Copy the upper 128 bits to the lower part of hi (bytes 0-15 -> 16-31 of hi) - hi_bytes[16..32].copy_from_slice(&w_bytes[0..16]); + // Copy the lower 256 bits (bytes 32-63) + lo_bytes.copy_from_slice(&w_bytes[32..64]); + // Copy the upper 256 bits (bytes 0-31) + hi_bytes.copy_from_slice(&w_bytes[0..32]); let lo = U256::from_be_byte_array(lo_bytes.into()); let hi = U256::from_be_byte_array(hi_bytes.into()); @@ -654,6 +654,20 @@ impl Reduce for Scalar { Self(w_reduced) } + fn reduce_bytes(bytes: &[u8; 64]) -> Self { + Self::reduce(U512::from_be_byte_array((*bytes).into())) + } +} + +impl Reduce for Scalar { + type Bytes = [u8; 48]; + + fn reduce(w: U384) -> Self { + // Use U512 reduction by concatenating U384 with U128::ZERO + let w512 = w.concat(&U128::ZERO); + >::reduce(w512) + } + fn reduce_bytes(bytes: &[u8; 48]) -> Self { Self::reduce(U384::from_be_byte_array((*bytes).into())) } @@ -672,16 +686,28 @@ impl ReduceNonZero for Scalar { } } -impl ReduceNonZero for Scalar { - fn reduce_nonzero(w: U384) -> Self { - // Reduce U384 to U256 first, then apply non-zero reduction - let reduced = >::reduce(w); +impl ReduceNonZero for Scalar { + fn reduce_nonzero(w: U512) -> Self { + // Reduce U512 to U256 first, then apply non-zero reduction + let reduced = >::reduce(w); const ORDER_MINUS_ONE: U256 = NistP256::ORDER.wrapping_sub(&U256::ONE); let (r, underflow) = reduced.0.borrowing_sub(&ORDER_MINUS_ONE, Limb::ZERO); let underflow = Choice::from((underflow.0 >> (Limb::BITS - 1)) as u8); Self(U256::conditional_select(&reduced.0, &r, !underflow).wrapping_add(&U256::ONE)) } + fn reduce_nonzero_bytes(bytes: &[u8; 64]) -> Self { + Self::reduce_nonzero(U512::from_be_byte_array((*bytes).into())) + } +} + +impl ReduceNonZero for Scalar { + fn reduce_nonzero(w: U384) -> Self { + // Use U512 non-zero reduction by concatenating U384 with U128::ZERO + let w512 = w.concat(&U128::ZERO); + >::reduce_nonzero(w512) + } + fn reduce_nonzero_bytes(bytes: &[u8; 48]) -> Self { Self::reduce_nonzero(U384::from_be_byte_array((*bytes).into())) } From 32909843de8dbb317314d913964bd778ff697c10 Mon Sep 17 00:00:00 2001 From: Gengar Date: Wed, 13 Aug 2025 16:53:47 +0300 Subject: [PATCH 3/5] Update scalar.rs --- p256/src/arithmetic/scalar.rs | 133 +++++++++++++++++++++------------- 1 file changed, 81 insertions(+), 52 deletions(-) diff --git a/p256/src/arithmetic/scalar.rs b/p256/src/arithmetic/scalar.rs index 786545f2a..f83a5056c 100644 --- a/p256/src/arithmetic/scalar.rs +++ b/p256/src/arithmetic/scalar.rs @@ -13,7 +13,7 @@ use core::{ }; use elliptic_curve::{ Curve, - bigint::{Limb, U128, U256, U384, U512, prelude::*}, + bigint::{Limb, U256, U384, U512, prelude::*}, group::ff::{self, Field, PrimeField}, ops::{Invert, Reduce, ReduceNonZero}, rand_core::TryRngCore, @@ -636,41 +636,33 @@ impl Reduce for Scalar { } impl Reduce for Scalar { - type Bytes = [u8; 64]; - - fn reduce(w: U512) -> Self { + fn reduce(w: &U512) -> Self { // Convert U512 to two U256s for use with barrett_reduce let w_bytes = w.to_be_bytes(); let mut lo_bytes = [0u8; 32]; let mut hi_bytes = [0u8; 32]; - + // Copy the lower 256 bits (bytes 32-63) lo_bytes.copy_from_slice(&w_bytes[32..64]); // Copy the upper 256 bits (bytes 0-31) hi_bytes.copy_from_slice(&w_bytes[0..32]); - + let lo = U256::from_be_byte_array(lo_bytes.into()); let hi = U256::from_be_byte_array(hi_bytes.into()); let w_reduced = barrett_reduce(lo, hi); Self(w_reduced) } - - fn reduce_bytes(bytes: &[u8; 64]) -> Self { - Self::reduce(U512::from_be_byte_array((*bytes).into())) - } } impl Reduce for Scalar { - type Bytes = [u8; 48]; - - fn reduce(w: U384) -> Self { - // Use U512 reduction by concatenating U384 with U128::ZERO - let w512 = w.concat(&U128::ZERO); - >::reduce(w512) - } - - fn reduce_bytes(bytes: &[u8; 48]) -> Self { - Self::reduce(U384::from_be_byte_array((*bytes).into())) + fn reduce(w: &U384) -> Self { + // Convert U384 to U512 by zero-padding the high bits + let w_bytes = w.to_be_bytes(); + let mut w512_bytes = [0u8; 64]; + // Copy U384 bytes to the lower part of U512 (384 bits = 48 bytes) + w512_bytes[16..64].copy_from_slice(&w_bytes); + let w512 = U512::from_be_byte_array(w512_bytes.into()); + >::reduce(&w512) } } @@ -691,7 +683,7 @@ impl ReduceNonZero for Scalar { } impl ReduceNonZero for Scalar { - fn reduce_nonzero(w: U512) -> Self { + fn reduce_nonzero(w: &U512) -> Self { // Reduce U512 to U256 first, then apply non-zero reduction let reduced = >::reduce(w); const ORDER_MINUS_ONE: U256 = NistP256::ORDER.wrapping_sub(&U256::ONE); @@ -699,21 +691,17 @@ impl ReduceNonZero for Scalar { let underflow = Choice::from((underflow.0 >> (Limb::BITS - 1)) as u8); Self(U256::conditional_select(&reduced.0, &r, !underflow).wrapping_add(&U256::ONE)) } - - fn reduce_nonzero_bytes(bytes: &[u8; 64]) -> Self { - Self::reduce_nonzero(U512::from_be_byte_array((*bytes).into())) - } } impl ReduceNonZero for Scalar { - fn reduce_nonzero(w: U384) -> Self { - // Use U512 non-zero reduction by concatenating U384 with U128::ZERO - let w512 = w.concat(&U128::ZERO); - >::reduce_nonzero(w512) - } - - fn reduce_nonzero_bytes(bytes: &[u8; 48]) -> Self { - Self::reduce_nonzero(U384::from_be_byte_array((*bytes).into())) + fn reduce_nonzero(w: &U384) -> Self { + // Convert U384 to U512 by zero-padding the high bits + let w_bytes = w.to_be_bytes(); + let mut w512_bytes = [0u8; 64]; + // Copy U384 bytes to the lower part of U512 (384 bits = 48 bytes) + w512_bytes[16..64].copy_from_slice(&w_bytes); + let w512 = U512::from_be_byte_array(w512_bytes.into()); + >::reduce_nonzero(&w512) } } @@ -785,7 +773,7 @@ mod tests { use crate::{FieldBytes, NistP256, NonZeroScalar, SecretKey}; use elliptic_curve::{ Curve, - array::Array, + bigint::ArrayEncoding, group::ff::{Field, PrimeField}, ops::{BatchInvert, ReduceNonZero}, }; @@ -851,53 +839,93 @@ mod tests { #[test] fn reduce_nonzero() { - assert_eq!(>::reduce_nonzero_bytes(&Array::default()).0, U256::ONE,); - assert_eq!(>::reduce_nonzero(U256::ONE).0, U256::from_u8(2),); - assert_eq!(>::reduce_nonzero(U256::from_u8(2)).0, U256::from_u8(3),); + assert_eq!( + >::reduce_nonzero(&U256::ZERO).0, + U256::ONE, + ); + assert_eq!( + >::reduce_nonzero(&U256::ONE).0, + U256::from_u8(2), + ); + assert_eq!( + >::reduce_nonzero(&U256::from_u8(2)).0, + U256::from_u8(3), + ); - assert_eq!(>::reduce_nonzero(NistP256::ORDER).0, U256::from_u8(2),); assert_eq!( - >::reduce_nonzero(NistP256::ORDER.wrapping_sub(&U256::from_u8(1))).0, + >::reduce_nonzero(&NistP256::ORDER).0, + U256::from_u8(2), + ); + assert_eq!( + >::reduce_nonzero( + &NistP256::ORDER.wrapping_sub(&U256::from_u8(1)) + ) + .0, U256::ONE, ); assert_eq!( - >::reduce_nonzero(NistP256::ORDER.wrapping_sub(&U256::from_u8(2))).0, + >::reduce_nonzero( + &NistP256::ORDER.wrapping_sub(&U256::from_u8(2)) + ) + .0, NistP256::ORDER.wrapping_sub(&U256::ONE), ); assert_eq!( - >::reduce_nonzero(NistP256::ORDER.wrapping_sub(&U256::from_u8(3))).0, + >::reduce_nonzero( + &NistP256::ORDER.wrapping_sub(&U256::from_u8(3)) + ) + .0, NistP256::ORDER.wrapping_sub(&U256::from_u8(2)), ); assert_eq!( - >::reduce_nonzero(NistP256::ORDER.wrapping_add(&U256::ONE)).0, + >::reduce_nonzero( + &NistP256::ORDER.wrapping_add(&U256::ONE) + ) + .0, U256::from_u8(3), ); assert_eq!( - >::reduce_nonzero(NistP256::ORDER.wrapping_add(&U256::from_u8(2))).0, + >::reduce_nonzero( + &NistP256::ORDER.wrapping_add(&U256::from_u8(2)) + ) + .0, U256::from_u8(4), ); } #[test] fn reduce_nonzero_u384() { - use elliptic_curve::bigint::{U384, ArrayEncoding}; - + use elliptic_curve::bigint::{ArrayEncoding, U384}; + // Test with 48 zero bytes (384 bits) - let zero_bytes = [0u8; 48]; - assert_eq!(>::reduce_nonzero_bytes(&zero_bytes).0, U256::ONE); + let zero_u384 = U384::ZERO; + assert_eq!( + >::reduce_nonzero(&zero_u384).0, + U256::ONE + ); // Test with small values let mut bytes = [0u8; 48]; bytes[47] = 1; // Set the least significant byte to 1 - assert_eq!(>::reduce_nonzero(U384::from_be_byte_array(bytes.into())).0, U256::from_u8(2)); + let u384_val = U384::from_be_byte_array(bytes.into()); + assert_eq!( + >::reduce_nonzero(&u384_val).0, + U256::from_u8(2) + ); bytes[47] = 2; - assert_eq!(>::reduce_nonzero(U384::from_be_byte_array(bytes.into())).0, U256::from_u8(3)); + let u384_val2 = U384::from_be_byte_array(bytes.into()); + assert_eq!( + >::reduce_nonzero(&u384_val2).0, + U256::from_u8(3) + ); // Test with a value that spans the full 384 bits - let large_value = U384::from_be_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"); - let reduced = >::reduce_nonzero(large_value); + let large_value = U384::from_be_hex( + "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + ); + let reduced = >::reduce_nonzero(&large_value); // The result should be non-zero and within the field assert_ne!(reduced.0, U256::ZERO); assert!(reduced.0 < NistP256::ORDER); @@ -905,7 +933,8 @@ mod tests { prop_compose! { fn non_zero_scalar()(bytes in any::<[u8; 32]>()) -> NonZeroScalar { - >::reduce_nonzero_bytes(&bytes.into()) + let uint = U256::from_be_byte_array(bytes.into()); + >::reduce_nonzero(&uint) } } From d57beaac7ea47a7d5044fe50bb55f1c7126cc473 Mon Sep 17 00:00:00 2001 From: Gengar Date: Wed, 13 Aug 2025 18:37:38 +0300 Subject: [PATCH 4/5] Update p256/src/arithmetic/scalar.rs Co-authored-by: Tony Arcieri --- p256/src/arithmetic/scalar.rs | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/p256/src/arithmetic/scalar.rs b/p256/src/arithmetic/scalar.rs index f83a5056c..bb2ed4974 100644 --- a/p256/src/arithmetic/scalar.rs +++ b/p256/src/arithmetic/scalar.rs @@ -637,18 +637,7 @@ impl Reduce for Scalar { impl Reduce for Scalar { fn reduce(w: &U512) -> Self { - // Convert U512 to two U256s for use with barrett_reduce - let w_bytes = w.to_be_bytes(); - let mut lo_bytes = [0u8; 32]; - let mut hi_bytes = [0u8; 32]; - - // Copy the lower 256 bits (bytes 32-63) - lo_bytes.copy_from_slice(&w_bytes[32..64]); - // Copy the upper 256 bits (bytes 0-31) - hi_bytes.copy_from_slice(&w_bytes[0..32]); - - let lo = U256::from_be_byte_array(lo_bytes.into()); - let hi = U256::from_be_byte_array(hi_bytes.into()); + let (lo, hi) = w.split(); let w_reduced = barrett_reduce(lo, hi); Self(w_reduced) } From 4fc59c2563f3427761dcabb3abd9e48c0dd06c68 Mon Sep 17 00:00:00 2001 From: Gengar Date: Wed, 13 Aug 2025 18:37:46 +0300 Subject: [PATCH 5/5] Update p256/src/arithmetic/scalar.rs Co-authored-by: Tony Arcieri --- p256/src/arithmetic/scalar.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/p256/src/arithmetic/scalar.rs b/p256/src/arithmetic/scalar.rs index bb2ed4974..3d741bdeb 100644 --- a/p256/src/arithmetic/scalar.rs +++ b/p256/src/arithmetic/scalar.rs @@ -645,13 +645,7 @@ impl Reduce for Scalar { impl Reduce for Scalar { fn reduce(w: &U384) -> Self { - // Convert U384 to U512 by zero-padding the high bits - let w_bytes = w.to_be_bytes(); - let mut w512_bytes = [0u8; 64]; - // Copy U384 bytes to the lower part of U512 (384 bits = 48 bytes) - w512_bytes[16..64].copy_from_slice(&w_bytes); - let w512 = U512::from_be_byte_array(w512_bytes.into()); - >::reduce(&w512) + >::reduce(&w.concat(&U128::ZERO)) } }