diff --git a/esp-hal/src/dma/buffers.rs b/esp-hal/src/dma/buffers.rs index a42eaaf2baf..1a4e0e1213f 100644 --- a/esp-hal/src/dma/buffers.rs +++ b/esp-hal/src/dma/buffers.rs @@ -194,14 +194,28 @@ impl ExternalBurstConfig { } impl InternalBurstConfig { - pub(super) fn is_burst_enabled(self) -> bool { + pub(super) const fn is_burst_enabled(self) -> bool { !matches!(self, Self::Disabled) } + // Size and address alignment as those come in pairs on current hardware. const fn min_dram_alignment(self, direction: TransferDirection) -> usize { - // IN transfers must be word aligned if matches!(direction, TransferDirection::In) { - 4 + // NOTE(danielb): commenting this check is incorrect as per TRM, but works. + // we'll need to restore this once peripherals can read a + // different amount of data than what is configured in the + // buffer. + // if cfg!(esp32) { + // // NOTE: The size must be word-aligned. + // // NOTE: The buffer address must be word-aligned + // 1 + // } + if self.is_burst_enabled() { + // As described in "Accessing Internal Memory" paragraphs in the various TRMs. + 4 + } else { + 1 + } } else { // OUT transfers have no alignment requirements, except for ESP32, which is // described below. diff --git a/hil-test/tests/spi_full_duplex.rs b/hil-test/tests/spi_full_duplex.rs index df4c44466dc..35179c81749 100644 --- a/hil-test/tests/spi_full_duplex.rs +++ b/hil-test/tests/spi_full_duplex.rs @@ -191,7 +191,8 @@ mod tests { #[timeout(3)] #[cfg(pcnt)] fn test_dma_read_dma_write_pcnt(ctx: Context) { - const DMA_BUFFER_SIZE: usize = 5; + const DMA_BUFFER_SIZE: usize = 8; + const TRANSFER_SIZE: usize = 5; let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); @@ -207,20 +208,20 @@ mod tests { dma_tx_buf.as_mut_slice().fill(0b0110_1010); for i in 1..4 { - dma_rx_buf.as_mut_slice().copy_from_slice(&[5, 5, 5, 5, 5]); + dma_rx_buf.as_mut_slice()[..TRANSFER_SIZE].copy_from_slice(&[5; TRANSFER_SIZE]); let transfer = spi - .read(dma_rx_buf.len(), dma_rx_buf) + .read(TRANSFER_SIZE, dma_rx_buf) .map_err(|e| e.0) .unwrap(); (spi, dma_rx_buf) = transfer.wait(); - assert_eq!(dma_rx_buf.as_slice(), &[0, 0, 0, 0, 0]); + assert_eq!(&dma_rx_buf.as_slice()[..TRANSFER_SIZE], &[0; TRANSFER_SIZE]); let transfer = spi - .write(dma_tx_buf.len(), dma_tx_buf) + .write(TRANSFER_SIZE, dma_tx_buf) .map_err(|e| e.0) .unwrap(); (spi, dma_tx_buf) = transfer.wait(); - assert_eq!(unit.value(), (i * 3 * DMA_BUFFER_SIZE) as _); + assert_eq!(unit.value(), (i * 3 * TRANSFER_SIZE) as _); } } @@ -228,7 +229,8 @@ mod tests { #[timeout(3)] #[cfg(pcnt)] fn test_dma_read_dma_transfer_pcnt(ctx: Context) { - const DMA_BUFFER_SIZE: usize = 5; + const DMA_BUFFER_SIZE: usize = 8; + const TRANSFER_SIZE: usize = 5; let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); @@ -244,20 +246,20 @@ mod tests { dma_tx_buf.as_mut_slice().fill(0b0110_1010); for i in 1..4 { - dma_rx_buf.as_mut_slice().copy_from_slice(&[5, 5, 5, 5, 5]); + dma_rx_buf.as_mut_slice()[..TRANSFER_SIZE].copy_from_slice(&[5; TRANSFER_SIZE]); let transfer = spi - .read(dma_rx_buf.len(), dma_rx_buf) + .read(TRANSFER_SIZE, dma_rx_buf) .map_err(|e| e.0) .unwrap(); (spi, dma_rx_buf) = transfer.wait(); - assert_eq!(dma_rx_buf.as_slice(), &[0, 0, 0, 0, 0]); + assert_eq!(&dma_rx_buf.as_slice()[..TRANSFER_SIZE], &[0; TRANSFER_SIZE]); let transfer = spi - .transfer(dma_rx_buf.len(), dma_rx_buf, dma_tx_buf.len(), dma_tx_buf) + .transfer(TRANSFER_SIZE, dma_rx_buf, TRANSFER_SIZE, dma_tx_buf) .map_err(|e| e.0) .unwrap(); (spi, (dma_rx_buf, dma_tx_buf)) = transfer.wait(); - assert_eq!(unit.value(), (i * 3 * DMA_BUFFER_SIZE) as _); + assert_eq!(unit.value(), (i * 3 * TRANSFER_SIZE) as _); } } @@ -294,7 +296,9 @@ mod tests { #[test] #[timeout(3)] fn test_asymmetric_dma_transfer(ctx: Context) { - let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(2, 4); + const WRITE_SIZE: usize = 4; + const READ_SIZE: usize = 2; + let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(4, 4); let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); @@ -302,22 +306,28 @@ mod tests { let spi = ctx.spi.with_dma(ctx.dma_channel); let transfer = spi - .transfer(dma_rx_buf.len(), dma_rx_buf, dma_tx_buf.len(), dma_tx_buf) + .transfer(READ_SIZE, dma_rx_buf, WRITE_SIZE, dma_tx_buf) .map_err(|e| e.0) .unwrap(); let (spi, (dma_rx_buf, mut dma_tx_buf)) = transfer.wait(); - assert_eq!(dma_tx_buf.as_slice()[0..2], dma_rx_buf.as_slice()[0..2]); + assert_eq!( + dma_tx_buf.as_slice()[0..READ_SIZE], + dma_rx_buf.as_slice()[0..READ_SIZE] + ); // Try transfer again to make sure DMA isn't in a broken state. dma_tx_buf.fill(&[0xaa, 0xdd, 0xef, 0xbe]); let transfer = spi - .transfer(dma_rx_buf.len(), dma_rx_buf, dma_tx_buf.len(), dma_tx_buf) + .transfer(READ_SIZE, dma_rx_buf, WRITE_SIZE, dma_tx_buf) .map_err(|e| e.0) .unwrap(); let (_, (dma_rx_buf, dma_tx_buf)) = transfer.wait(); - assert_eq!(dma_tx_buf.as_slice()[0..2], dma_rx_buf.as_slice()[0..2]); + assert_eq!( + dma_tx_buf.as_slice()[0..READ_SIZE], + dma_rx_buf.as_slice()[0..READ_SIZE] + ); } #[test] @@ -386,7 +396,8 @@ mod tests { #[timeout(3)] #[cfg(pcnt)] async fn test_async_dma_read_dma_write_pcnt(ctx: Context) { - const DMA_BUFFER_SIZE: usize = 5; + const DMA_BUFFER_SIZE: usize = 8; + const TRANSFER_SIZE: usize = 5; let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); @@ -401,18 +412,18 @@ mod tests { .channel0 .set_input_mode(EdgeMode::Hold, EdgeMode::Increment); - let mut receive = [0; DMA_BUFFER_SIZE]; + let mut receive = [0; TRANSFER_SIZE]; // Fill the buffer where each byte has 3 pos edges. - let transmit = [0b0110_1010; DMA_BUFFER_SIZE]; + let transmit = [0b0110_1010; TRANSFER_SIZE]; for i in 1..4 { - receive.copy_from_slice(&[5, 5, 5, 5, 5]); + receive.copy_from_slice(&[5; TRANSFER_SIZE]); SpiBusAsync::read(&mut spi, &mut receive).await.unwrap(); - assert_eq!(receive, [0, 0, 0, 0, 0]); + assert_eq!(receive, [0; TRANSFER_SIZE]); SpiBusAsync::write(&mut spi, &transmit).await.unwrap(); - assert_eq!(ctx.pcnt_unit.value(), (i * 3 * DMA_BUFFER_SIZE) as _); + assert_eq!(ctx.pcnt_unit.value(), (i * 3 * TRANSFER_SIZE) as _); } } @@ -420,7 +431,8 @@ mod tests { #[timeout(3)] #[cfg(pcnt)] async fn test_async_dma_read_dma_transfer_pcnt(ctx: Context) { - const DMA_BUFFER_SIZE: usize = 5; + const DMA_BUFFER_SIZE: usize = 8; + const TRANSFER_SIZE: usize = 5; let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); @@ -435,10 +447,10 @@ mod tests { .channel0 .set_input_mode(EdgeMode::Hold, EdgeMode::Increment); - let mut receive = [0; DMA_BUFFER_SIZE]; + let mut receive = [0; TRANSFER_SIZE]; // Fill the buffer where each byte has 3 pos edges. - let transmit = [0b0110_1010; DMA_BUFFER_SIZE]; + let transmit = [0b0110_1010; TRANSFER_SIZE]; for i in 1..4 { receive.copy_from_slice(&[5, 5, 5, 5, 5]); @@ -448,7 +460,7 @@ mod tests { SpiBusAsync::transfer(&mut spi, &mut receive, &transmit) .await .unwrap(); - assert_eq!(ctx.pcnt_unit.value(), (i * 3 * DMA_BUFFER_SIZE) as _); + assert_eq!(ctx.pcnt_unit.value(), (i * 3 * TRANSFER_SIZE) as _); } }