diff --git a/.github/workflows/chacha20.yml b/.github/workflows/chacha20.yml index c54a0004..281510e6 100644 --- a/.github/workflows/chacha20.yml +++ b/.github/workflows/chacha20.yml @@ -25,7 +25,7 @@ jobs: strategy: matrix: rust: - - 1.49.0 # MSRV + - 1.51.0 # MSRV - stable target: - thumbv7em-none-eabi @@ -53,7 +53,7 @@ jobs: include: # 32-bit Linux - target: i686-unknown-linux-gnu - rust: 1.49.0 # MSRV + rust: 1.51.0 # MSRV deps: sudo apt update && sudo apt install gcc-multilib - target: i686-unknown-linux-gnu rust: stable @@ -61,7 +61,7 @@ jobs: # 64-bit Linux - target: x86_64-unknown-linux-gnu - rust: 1.49.0 # MSRV + rust: 1.51.0 # MSRV - target: x86_64-unknown-linux-gnu rust: stable steps: @@ -90,7 +90,7 @@ jobs: include: # 32-bit Linux - target: i686-unknown-linux-gnu - rust: 1.49.0 # MSRV + rust: 1.51.0 # MSRV deps: sudo apt update && sudo apt install gcc-multilib - target: i686-unknown-linux-gnu rust: stable @@ -98,7 +98,7 @@ jobs: # 64-bit Linux - target: x86_64-unknown-linux-gnu - rust: 1.49.0 # MSRV + rust: 1.51.0 # MSRV - target: x86_64-unknown-linux-gnu rust: stable steps: @@ -126,7 +126,7 @@ jobs: include: # 32-bit Linux - target: i686-unknown-linux-gnu - rust: 1.49.0 # MSRV + rust: 1.51.0 # MSRV deps: sudo apt update && sudo apt install gcc-multilib - target: i686-unknown-linux-gnu rust: stable @@ -134,7 +134,7 @@ jobs: # 64-bit Linux - target: x86_64-unknown-linux-gnu - rust: 1.49.0 # MSRV + rust: 1.51.0 # MSRV - target: x86_64-unknown-linux-gnu rust: stable steps: @@ -160,13 +160,13 @@ jobs: include: # ARM64 - target: aarch64-unknown-linux-gnu - rust: 1.49.0 # MSRV + rust: 1.51.0 # MSRV - target: aarch64-unknown-linux-gnu rust: stable # PPC32 - target: powerpc-unknown-linux-gnu - rust: 1.49.0 # MSRV + rust: 1.51.0 # MSRV - target: powerpc-unknown-linux-gnu rust: stable diff --git a/.github/workflows/workspace.yml b/.github/workflows/workspace.yml index d3aa8545..817ccbd0 100644 --- a/.github/workflows/workspace.yml +++ b/.github/workflows/workspace.yml @@ -16,7 +16,7 @@ jobs: - uses: actions/checkout@v1 - uses: actions-rs/toolchain@v1 with: - toolchain: 1.49.0 # MSRV (highest in repo) + toolchain: 1.51.0 # MSRV (highest in repo) components: clippy override: true profile: minimal diff --git a/chacha20/README.md b/chacha20/README.md index 86d4016e..4137c950 100644 --- a/chacha20/README.md +++ b/chacha20/README.md @@ -62,7 +62,7 @@ stream cipher itself) are designed to execute in constant time. ## Minimum Supported Rust Version -Rust **1.49** or higher. +Rust **1.51** or higher. Minimum supported Rust version can be changed in the future, but it will be done with a minor version bump. @@ -94,7 +94,7 @@ dual licensed as above, without any additional terms or conditions. [docs-image]: https://docs.rs/chacha20/badge.svg [docs-link]: https://docs.rs/chacha20/ [license-image]: https://img.shields.io/badge/license-Apache2.0/MIT-blue.svg -[rustc-image]: https://img.shields.io/badge/rustc-1.49+-blue.svg +[rustc-image]: https://img.shields.io/badge/rustc-1.51+-blue.svg [chat-image]: https://img.shields.io/badge/zulip-join_chat-blue.svg [chat-link]: https://rustcrypto.zulipchat.com/#narrow/stream/260049-stream-ciphers [build-image]: https://github.com/RustCrypto/stream-ciphers/workflows/chacha20/badge.svg?branch=master&event=push diff --git a/chacha20/src/backend/autodetect.rs b/chacha20/src/backend/autodetect.rs index beb91cd2..1d5f7950 100644 --- a/chacha20/src/backend/autodetect.rs +++ b/chacha20/src/backend/autodetect.rs @@ -8,7 +8,9 @@ use core::mem::ManuallyDrop; /// Size of buffers passed to `generate` and `apply_keystream` for this /// backend, which operates on two blocks in parallel for optimal performance. -pub(crate) const BUFFER_SIZE: usize = BLOCK_SIZE * 2; +/// The backend consumes four blocks at a time, so that the AVX2 implementation +/// can additionally pipeline the pairs of blocks for better ILP. +pub(crate) const BUFFER_SIZE: usize = BLOCK_SIZE * 4; cpufeatures::new!(avx2_cpuid, "avx2"); diff --git a/chacha20/src/backend/avx2.rs b/chacha20/src/backend/avx2.rs index a5bed3dd..94b6908f 100644 --- a/chacha20/src/backend/avx2.rs +++ b/chacha20/src/backend/avx2.rs @@ -17,15 +17,106 @@ use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; +/// The number of blocks processed per invocation by this backend. +const BLOCKS: usize = 4; + /// Helper union for accessing per-block state. /// /// ChaCha20 block state is stored in four 32-bit words, so we can process two blocks in /// parallel. We store the state words as a union to enable cheap transformations between /// their interpretations. +/// +/// Additionally, we process four blocks at a time to take advantage of ILP. #[derive(Clone, Copy)] union StateWord { - blocks: [__m128i; 2], - avx: __m256i, + blocks: [__m128i; BLOCKS], + avx: [__m256i; BLOCKS / 2], +} + +impl StateWord { + #[inline] + #[target_feature(enable = "avx2")] + unsafe fn add_assign_epi32(&mut self, rhs: &Self) { + self.avx = [ + _mm256_add_epi32(self.avx[0], rhs.avx[0]), + _mm256_add_epi32(self.avx[1], rhs.avx[1]), + ]; + } + + #[inline] + #[target_feature(enable = "avx2")] + unsafe fn xor_assign(&mut self, rhs: &Self) { + self.avx = [ + _mm256_xor_si256(self.avx[0], rhs.avx[0]), + _mm256_xor_si256(self.avx[1], rhs.avx[1]), + ]; + } + + #[inline] + #[target_feature(enable = "avx2")] + unsafe fn shuffle_epi32(&mut self) { + self.avx = [ + _mm256_shuffle_epi32(self.avx[0], MASK), + _mm256_shuffle_epi32(self.avx[1], MASK), + ]; + } + + #[inline] + #[target_feature(enable = "avx2")] + unsafe fn rol(&mut self) { + self.avx = [ + _mm256_xor_si256( + _mm256_slli_epi32(self.avx[0], BY), + _mm256_srli_epi32(self.avx[0], REST), + ), + _mm256_xor_si256( + _mm256_slli_epi32(self.avx[1], BY), + _mm256_srli_epi32(self.avx[1], REST), + ), + ]; + } + + #[inline] + #[target_feature(enable = "avx2")] + unsafe fn rol_8(&mut self) { + self.avx = [ + _mm256_shuffle_epi8( + self.avx[0], + _mm256_set_epi8( + 14, 13, 12, 15, 10, 9, 8, 11, 6, 5, 4, 7, 2, 1, 0, 3, 14, 13, 12, 15, 10, 9, 8, + 11, 6, 5, 4, 7, 2, 1, 0, 3, + ), + ), + _mm256_shuffle_epi8( + self.avx[1], + _mm256_set_epi8( + 14, 13, 12, 15, 10, 9, 8, 11, 6, 5, 4, 7, 2, 1, 0, 3, 14, 13, 12, 15, 10, 9, 8, + 11, 6, 5, 4, 7, 2, 1, 0, 3, + ), + ), + ]; + } + + #[inline] + #[target_feature(enable = "avx2")] + unsafe fn rol_16(&mut self) { + self.avx = [ + _mm256_shuffle_epi8( + self.avx[0], + _mm256_set_epi8( + 13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2, 13, 12, 15, 14, 9, 8, 11, + 10, 5, 4, 7, 6, 1, 0, 3, 2, + ), + ), + _mm256_shuffle_epi8( + self.avx[1], + _mm256_set_epi8( + 13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2, 13, 12, 15, 14, 9, 8, 11, + 10, 5, 4, 7, 6, 1, 0, 3, 2, + ), + ), + ]; + } } /// The ChaCha20 core function (AVX2 accelerated implementation for x86/x86_64) @@ -63,7 +154,7 @@ impl Core { unsafe { let (mut v0, mut v1, mut v2) = (self.v0, self.v1, self.v2); let mut v3 = iv_setup(self.iv, counter); - self.rounds(&mut v0.avx, &mut v1.avx, &mut v2.avx, &mut v3.avx); + self.rounds(&mut v0, &mut v1, &mut v2, &mut v3); store(v0, v1, v2, v3, output); } } @@ -77,24 +168,17 @@ impl Core { unsafe { let (mut v0, mut v1, mut v2) = (self.v0, self.v1, self.v2); let mut v3 = iv_setup(self.iv, counter); - self.rounds(&mut v0.avx, &mut v1.avx, &mut v2.avx, &mut v3.avx); - - for (chunk, a) in output[..BLOCK_SIZE] - .chunks_mut(0x10) - .zip([v0, v1, v2, v3].iter().map(|s| s.blocks[0])) - { - let b = _mm_loadu_si128(chunk.as_ptr() as *const __m128i); - let out = _mm_xor_si128(a, b); - _mm_storeu_si128(chunk.as_mut_ptr() as *mut __m128i, out); - } + self.rounds(&mut v0, &mut v1, &mut v2, &mut v3); - for (chunk, a) in output[BLOCK_SIZE..] - .chunks_mut(0x10) - .zip([v0, v1, v2, v3].iter().map(|s| s.blocks[1])) - { - let b = _mm_loadu_si128(chunk.as_ptr() as *const __m128i); - let out = _mm_xor_si128(a, b); - _mm_storeu_si128(chunk.as_mut_ptr() as *mut __m128i, out); + for i in 0..BLOCKS { + for (chunk, a) in output[i * BLOCK_SIZE..(i + 1) * BLOCK_SIZE] + .chunks_mut(0x10) + .zip([v0, v1, v2, v3].iter().map(|s| s.blocks[i])) + { + let b = _mm_loadu_si128(chunk.as_ptr() as *const __m128i); + let out = _mm_xor_si128(a, b); + _mm_storeu_si128(chunk.as_mut_ptr() as *mut __m128i, out); + } } } } @@ -103,10 +187,10 @@ impl Core { #[target_feature(enable = "avx2")] unsafe fn rounds( &self, - v0: &mut __m256i, - v1: &mut __m256i, - v2: &mut __m256i, - v3: &mut __m256i, + v0: &mut StateWord, + v1: &mut StateWord, + v2: &mut StateWord, + v3: &mut StateWord, ) { let v3_orig = *v3; @@ -114,10 +198,10 @@ impl Core { double_quarter_round(v0, v1, v2, v3); } - *v0 = _mm256_add_epi32(*v0, self.v0.avx); - *v1 = _mm256_add_epi32(*v1, self.v1.avx); - *v2 = _mm256_add_epi32(*v2, self.v2.avx); - *v3 = _mm256_add_epi32(*v3, v3_orig); + v0.add_assign_epi32(&self.v0); + v1.add_assign_epi32(&self.v1); + v2.add_assign_epi32(&self.v2); + v3.add_assign_epi32(&v3_orig); } } @@ -130,9 +214,15 @@ unsafe fn key_setup(key: &[u8; KEY_SIZE]) -> (StateWord, StateWord, StateWord) { let v2 = _mm_loadu_si128(key.as_ptr().offset(0x10) as *const __m128i); ( - StateWord { blocks: [v0, v0] }, - StateWord { blocks: [v1, v1] }, - StateWord { blocks: [v2, v2] }, + StateWord { + blocks: [v0, v0, v0, v0], + }, + StateWord { + blocks: [v1, v1, v1, v1], + }, + StateWord { + blocks: [v2, v2, v2, v2], + }, ) } @@ -147,7 +237,12 @@ unsafe fn iv_setup(iv: [i32; 2], counter: u64) -> StateWord { ); StateWord { - blocks: [s3, _mm_add_epi64(s3, _mm_set_epi64x(0, 1))], + blocks: [ + s3, + _mm_add_epi64(s3, _mm_set_epi64x(0, 1)), + _mm_add_epi64(s3, _mm_set_epi64x(0, 2)), + _mm_add_epi64(s3, _mm_set_epi64x(0, 3)), + ], } } @@ -157,24 +252,24 @@ unsafe fn iv_setup(iv: [i32; 2], counter: u64) -> StateWord { unsafe fn store(v0: StateWord, v1: StateWord, v2: StateWord, v3: StateWord, output: &mut [u8]) { debug_assert_eq!(output.len(), BUFFER_SIZE); - for (chunk, v) in output[..BLOCK_SIZE] - .chunks_mut(0x10) - .zip([v0, v1, v2, v3].iter().map(|s| s.blocks[0])) - { - _mm_storeu_si128(chunk.as_mut_ptr() as *mut __m128i, v); - } - - for (chunk, v) in output[BLOCK_SIZE..] - .chunks_mut(0x10) - .zip([v0, v1, v2, v3].iter().map(|s| s.blocks[1])) - { - _mm_storeu_si128(chunk.as_mut_ptr() as *mut __m128i, v); + for i in 0..BLOCKS { + for (chunk, v) in output[i * BLOCK_SIZE..(i + 1) * BLOCK_SIZE] + .chunks_mut(0x10) + .zip([v0, v1, v2, v3].iter().map(|s| s.blocks[i])) + { + _mm_storeu_si128(chunk.as_mut_ptr() as *mut __m128i, v); + } } } #[inline] #[target_feature(enable = "avx2")] -unsafe fn double_quarter_round(a: &mut __m256i, b: &mut __m256i, c: &mut __m256i, d: &mut __m256i) { +unsafe fn double_quarter_round( + a: &mut StateWord, + b: &mut StateWord, + c: &mut StateWord, + d: &mut StateWord, +) { add_xor_rot(a, b, c, d); rows_to_cols(a, b, c, d); add_xor_rot(a, b, c, d); @@ -218,11 +313,16 @@ unsafe fn double_quarter_round(a: &mut __m256i, b: &mut __m256i, c: &mut __m256i /// - https://github.com/floodyberry/chacha-opt/blob/0ab65cb99f5016633b652edebaf3691ceb4ff753/chacha_blocks_ssse3-64.S#L639-L643 #[inline] #[target_feature(enable = "avx2")] -unsafe fn rows_to_cols(a: &mut __m256i, _b: &mut __m256i, c: &mut __m256i, d: &mut __m256i) { +unsafe fn rows_to_cols( + a: &mut StateWord, + _b: &mut StateWord, + c: &mut StateWord, + d: &mut StateWord, +) { // c = ROR256_B(c); d = ROR256_C(d); a = ROR256_D(a); - *c = _mm256_shuffle_epi32(*c, 0b_00_11_10_01); // _MM_SHUFFLE(0, 3, 2, 1) - *d = _mm256_shuffle_epi32(*d, 0b_01_00_11_10); // _MM_SHUFFLE(1, 0, 3, 2) - *a = _mm256_shuffle_epi32(*a, 0b_10_01_00_11); // _MM_SHUFFLE(2, 1, 0, 3) + c.shuffle_epi32::<0b_00_11_10_01>(); // _MM_SHUFFLE(0, 3, 2, 1) + d.shuffle_epi32::<0b_01_00_11_10>(); // _MM_SHUFFLE(1, 0, 3, 2) + a.shuffle_epi32::<0b_10_01_00_11>(); // _MM_SHUFFLE(2, 1, 0, 3) } /// The goal of this function is to transform the state words from: @@ -244,45 +344,38 @@ unsafe fn rows_to_cols(a: &mut __m256i, _b: &mut __m256i, c: &mut __m256i, d: &m /// reversing the transformation of [`rows_to_cols`]. #[inline] #[target_feature(enable = "avx2")] -unsafe fn cols_to_rows(a: &mut __m256i, _b: &mut __m256i, c: &mut __m256i, d: &mut __m256i) { +unsafe fn cols_to_rows( + a: &mut StateWord, + _b: &mut StateWord, + c: &mut StateWord, + d: &mut StateWord, +) { // c = ROR256_D(c); d = ROR256_C(d); a = ROR256_B(a); - *c = _mm256_shuffle_epi32(*c, 0b_10_01_00_11); // _MM_SHUFFLE(2, 1, 0, 3) - *d = _mm256_shuffle_epi32(*d, 0b_01_00_11_10); // _MM_SHUFFLE(1, 0, 3, 2) - *a = _mm256_shuffle_epi32(*a, 0b_00_11_10_01); // _MM_SHUFFLE(0, 3, 2, 1) + c.shuffle_epi32::<0b_10_01_00_11>(); // _MM_SHUFFLE(2, 1, 0, 3) + d.shuffle_epi32::<0b_01_00_11_10>(); // _MM_SHUFFLE(1, 0, 3, 2) + a.shuffle_epi32::<0b_00_11_10_01>(); // _MM_SHUFFLE(0, 3, 2, 1) } #[inline] #[target_feature(enable = "avx2")] -unsafe fn add_xor_rot(a: &mut __m256i, b: &mut __m256i, c: &mut __m256i, d: &mut __m256i) { +unsafe fn add_xor_rot(a: &mut StateWord, b: &mut StateWord, c: &mut StateWord, d: &mut StateWord) { // a = ADD256_32(a,b); d = XOR256(d,a); d = ROL256_16(d); - *a = _mm256_add_epi32(*a, *b); - *d = _mm256_xor_si256(*d, *a); - *d = _mm256_shuffle_epi8( - *d, - _mm256_set_epi8( - 13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2, 13, 12, 15, 14, 9, 8, 11, 10, 5, - 4, 7, 6, 1, 0, 3, 2, - ), - ); + a.add_assign_epi32(b); + d.xor_assign(a); + d.rol_16(); // c = ADD256_32(c,d); b = XOR256(b,c); b = ROL256_12(b); - *c = _mm256_add_epi32(*c, *d); - *b = _mm256_xor_si256(*b, *c); - *b = _mm256_xor_si256(_mm256_slli_epi32(*b, 12), _mm256_srli_epi32(*b, 20)); + c.add_assign_epi32(d); + b.xor_assign(c); + b.rol::<12, 20>(); // a = ADD256_32(a,b); d = XOR256(d,a); d = ROL256_8(d); - *a = _mm256_add_epi32(*a, *b); - *d = _mm256_xor_si256(*d, *a); - *d = _mm256_shuffle_epi8( - *d, - _mm256_set_epi8( - 14, 13, 12, 15, 10, 9, 8, 11, 6, 5, 4, 7, 2, 1, 0, 3, 14, 13, 12, 15, 10, 9, 8, 11, 6, - 5, 4, 7, 2, 1, 0, 3, - ), - ); + a.add_assign_epi32(b); + d.xor_assign(a); + d.rol_8(); // c = ADD256_32(c,d); b = XOR256(b,c); b = ROL256_7(b); - *c = _mm256_add_epi32(*c, *d); - *b = _mm256_xor_si256(*b, *c); - *b = _mm256_xor_si256(_mm256_slli_epi32(*b, 7), _mm256_srli_epi32(*b, 25)); + c.add_assign_epi32(d); + b.xor_assign(c); + b.rol::<7, 25>(); } diff --git a/chacha20/src/chacha.rs b/chacha20/src/chacha.rs index 8f5e47ef..3ae29fb4 100644 --- a/chacha20/src/chacha.rs +++ b/chacha20/src/chacha.rs @@ -70,7 +70,7 @@ pub struct ChaCha { buffer: Buffer, /// Position within buffer, or `None` if the buffer is not in use - buffer_pos: u8, + buffer_pos: u16, /// Current counter value relative to the start of the keystream counter: u64, @@ -121,7 +121,7 @@ impl StreamCipher for ChaCha { if data.len() < BUFFER_SIZE - pos { let n = pos + data.len(); xor(data, &self.buffer[pos..n]); - self.buffer_pos = n as u8; + self.buffer_pos = n as u16; return Ok(()); } else { let (l, r) = data.split_at_mut(BUFFER_SIZE - pos); @@ -129,7 +129,7 @@ impl StreamCipher for ChaCha { if let Some(new_ctr) = counter.checked_add(COUNTER_INCR) { counter = new_ctr; } else if data.is_empty() { - self.buffer_pos = BUFFER_SIZE as u8; + self.buffer_pos = BUFFER_SIZE as u16; } else { return Err(LoopError); } @@ -137,7 +137,7 @@ impl StreamCipher for ChaCha { } } - if self.buffer_pos == BUFFER_SIZE as u8 { + if self.buffer_pos == BUFFER_SIZE as u16 { if data.is_empty() { return Ok(()); } else { @@ -154,7 +154,7 @@ impl StreamCipher for ChaCha { } let rem = chunks.into_remainder(); - self.buffer_pos = rem.len() as u8; + self.buffer_pos = rem.len() as u16; self.counter = counter; if !rem.is_empty() { self.generate_block(counter); @@ -168,24 +168,27 @@ impl StreamCipher for ChaCha { impl StreamCipherSeek for ChaCha { fn try_current_pos(&self) -> Result { // quick and dirty fix, until ctr-like parallel block processing will be added - let (counter, pos) = if self.buffer_pos < BLOCK_SIZE as u8 { - (self.counter, self.buffer_pos) - } else { - ( - self.counter.checked_add(1).ok_or(OverflowError)?, - self.buffer_pos - BLOCK_SIZE as u8, - ) + let (counter, pos) = { + let mut counter = self.counter; + let mut pos = self.buffer_pos; + + while pos >= BLOCK_SIZE as u16 { + counter = counter.checked_add(1).ok_or(OverflowError)?; + pos -= BLOCK_SIZE as u16; + } + + (counter, pos) }; - T::from_block_byte(counter, pos, BLOCK_SIZE as u8) + T::from_block_byte(counter, pos as u8, BLOCK_SIZE as u8) } fn try_seek(&mut self, pos: T) -> Result<(), LoopError> { - let res: (u64, u8) = pos.to_block_byte(BUFFER_SIZE as u8)?; + let res: (u64, u8) = pos.to_block_byte(BLOCK_SIZE as u8)?; let old_counter = self.counter; let old_buffer_pos = self.buffer_pos; - self.counter = res.0.checked_mul(COUNTER_INCR).ok_or(LoopError)?; - self.buffer_pos = res.1; + self.counter = res.0; + self.buffer_pos = res.1 as u16; if let Err(e) = self.check_data_len(&[0]) { self.counter = old_counter; diff --git a/chacha20/src/rng.rs b/chacha20/src/rng.rs index 0a1df5df..672f65f6 100644 --- a/chacha20/src/rng.rs +++ b/chacha20/src/rng.rs @@ -12,6 +12,28 @@ use crate::{ }; use core::convert::TryInto; +/// Array wrapper used for `BlockRngCore::Results` associated types. +#[repr(transparent)] +pub struct BlockRngResults([u32; BUFFER_SIZE / 4]); + +impl Default for BlockRngResults { + fn default() -> Self { + BlockRngResults([u32::default(); BUFFER_SIZE / 4]) + } +} + +impl AsRef<[u32]> for BlockRngResults { + fn as_ref(&self) -> &[u32] { + &self.0 + } +} + +impl AsMut<[u32]> for BlockRngResults { + fn as_mut(&mut self) -> &mut [u32] { + &mut self.0 + } +} + macro_rules! impl_chacha_rng { ($name:ident, $core:ident, $rounds:ident, $doc:expr) => { #[doc = $doc] @@ -71,7 +93,7 @@ macro_rules! impl_chacha_rng { impl BlockRngCore for $core { type Item = u32; - type Results = [u32; BUFFER_SIZE / 4]; + type Results = BlockRngResults; fn generate(&mut self, results: &mut Self::Results) { // is this necessary? @@ -83,7 +105,7 @@ macro_rules! impl_chacha_rng { let mut buffer = [0u8; BUFFER_SIZE]; self.block.generate(self.counter, &mut buffer); - for (n, chunk) in results.iter_mut().zip(buffer.chunks_exact(4)) { + for (n, chunk) in results.as_mut().iter_mut().zip(buffer.chunks_exact(4)) { *n = u32::from_le_bytes(chunk.try_into().unwrap()); } diff --git a/ctr/src/lib.rs b/ctr/src/lib.rs index 8c6abddb..292e2008 100644 --- a/ctr/src/lib.rs +++ b/ctr/src/lib.rs @@ -43,6 +43,7 @@ html_root_url = "https://docs.rs/ctr/0.8.0" )] #![warn(missing_docs, rust_2018_idioms)] +#![allow(clippy::upper_case_acronyms)] pub use cipher; use cipher::{