From 7e2db7fbff4e05deb58f97b4558c42790440cfd5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1niel=20Buga?= Date: Fri, 15 Nov 2024 10:39:25 +0100 Subject: [PATCH 01/13] Implement burst configuration --- esp-hal/src/dma/buffers.rs | 566 ++++++++++++------ esp-hal/src/dma/gdma.rs | 10 + esp-hal/src/dma/mod.rs | 161 +++-- esp-hal/src/dma/pdma.rs | 48 ++ esp-hal/src/soc/esp32s2/mod.rs | 30 + esp-metadata/devices/esp32s2.toml | 3 + esp-metadata/devices/esp32s3.toml | 4 + examples/src/bin/spi_loopback_dma_psram.rs | 15 +- hil-test/tests/spi_half_duplex_write_psram.rs | 28 +- 9 files changed, 594 insertions(+), 271 deletions(-) diff --git a/esp-hal/src/dma/buffers.rs b/esp-hal/src/dma/buffers.rs index 8724342777d..6e0e3b26946 100644 --- a/esp-hal/src/dma/buffers.rs +++ b/esp-hal/src/dma/buffers.rs @@ -4,25 +4,252 @@ use core::{ }; use super::*; -use crate::soc::is_slice_in_dram; -#[cfg(esp32s3)] -use crate::soc::is_slice_in_psram; +#[cfg(psram_dma)] +use crate::soc::is_valid_psram_address; +use crate::soc::{is_slice_in_dram, is_slice_in_psram}; + +cfg_if::cfg_if! { + if #[cfg(psram_dma)] { + /// PSRAM access burst size. + #[derive(Clone, Copy, PartialEq, Eq, Debug)] + #[cfg_attr(feature = "defmt", derive(defmt::Format))] + pub enum ExternalBurstSize { + /// 16 bytes + Size16 = 16, + + /// 32 bytes + Size32 = 32, + + /// 64 bytes + Size64 = 64, + } -/// Burst transfer configuration. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -#[cfg_attr(feature = "defmt", derive(defmt::Format))] -pub enum BurstConfig { - /// Burst mode is disabled. - Disabled, + impl ExternalBurstSize { + /// The default external memory burst length. + pub const DEFAULT: Self = Self::Size16; + } + + impl Default for ExternalBurstSize { + fn default() -> Self { + Self::DEFAULT + } + } + + /// Internal memory access burst mode. + #[derive(Clone, Copy, PartialEq, Eq, Debug)] + #[cfg_attr(feature = "defmt", derive(defmt::Format))] + pub enum InternalBurstTransfer { + /// Burst mode is disabled. + Disabled, + + /// Burst mode is enabled. + Enabled, + } + + impl InternalBurstTransfer { + /// The default internal burst mode configuration. + pub const DEFAULT: Self = Self::Disabled; + } + + impl Default for InternalBurstTransfer { + fn default() -> Self { + Self::DEFAULT + } + } + + /// Burst transfer configuration. + #[derive(Clone, Copy, PartialEq, Eq, Debug)] + #[cfg_attr(feature = "defmt", derive(defmt::Format))] + pub struct BurstConfig { + /// Configures the burst size for PSRAM transfers. + /// + /// Burst mode is always enabled for PSRAM transfers. + pub external: ExternalBurstSize, + + /// Enables or disables the burst mode for internal memory transfers. + /// + /// The burst size is not configurable. + pub internal: InternalBurstTransfer, + } + + impl BurstConfig { + /// The default burst mode configuration. + pub const DEFAULT: Self = Self { + external: ExternalBurstSize::DEFAULT, + internal: InternalBurstTransfer::DEFAULT, + }; + } + + impl Default for BurstConfig { + fn default() -> Self { + Self::DEFAULT + } + } + } else { + /// Burst transfer configuration. + #[derive(Clone, Copy, PartialEq, Eq, Debug)] + #[cfg_attr(feature = "defmt", derive(defmt::Format))] + pub enum BurstConfig { + /// Burst mode is disabled. + Disabled, + + /// Burst mode is enabled. + Enabled, + } - /// Burst mode is enabled. - Enabled, + impl BurstConfig { + /// The default burst mode configuration. + pub const DEFAULT: Self = Self::Disabled; + } + + impl Default for BurstConfig { + fn default() -> Self { + Self::DEFAULT + } + } + + type InternalBurstTransfer = BurstConfig; + } } -impl BurstConfig { +#[cfg(psram_dma)] +impl ExternalBurstSize { + const fn min_psram_alignment(self, direction: TransferDirection) -> usize { + // S2: Specifically, size and buffer address pointer in receive descriptors + // should be 16-byte, 32-byte or 64-byte aligned. For data frame whose + // length is not a multiple of 16 bytes, 32 bytes, or 64 bytes, EDMA adds + // padding bytes to the end. + + // S3: Size and Address for IN transfers must be block aligned. For receive + // descriptors, if the data length received are not aligned with block size, + // GDMA will pad the data received with 0 until they are aligned to + // initiate burst transfer. You can read the length field in receive descriptors + // to obtain the length of valid data received + if matches!(direction, TransferDirection::In) { + self as usize + } else { + // S2: Size, length and buffer address pointer in transmit descriptors are not + // necessarily aligned with block size. + + // S3: Size, length, and buffer address pointer in transmit descriptors do not + // need to be aligned. + 1 + } + } +} + +impl InternalBurstTransfer { pub(super) fn is_burst_enabled(self) -> bool { !matches!(self, Self::Disabled) } + + const fn min_dram_alignment(self, direction: TransferDirection) -> usize { + // IN transfers must be word aligned + if matches!(direction, TransferDirection::In) { + 4 + } else { + // OUT transfers have no alignment requirements, except for ESP32, which is + // described below. + if cfg!(esp32) { + // SPI DMA: Burst transmission is supported. The data size for + // a single transfer must be four bytes aligned. + // I2S DMA: Burst transfer is supported. However, unlike the + // SPI DMA channels, the data size for a single transfer is + // one word, or four bytes. + 4 + } else { + 1 + } + } + } +} + +const fn max(a: usize, b: usize) -> usize { + if a > b { + a + } else { + b + } +} + +impl BurstConfig { + delegate::delegate! { + #[cfg(psram_dma)] + to self.internal { + pub(super) const fn min_dram_alignment(self, direction: TransferDirection) -> usize; + pub(super) fn is_burst_enabled(self) -> bool; + } + } + + /// Calculates an alignment that is compatible with the current burst + /// configuration. + /// + /// This is an over-estimation so that Descriptors can be safely used with + /// any DMA channel in any direction. + pub const fn min_compatible_alignment(self) -> usize { + let in_alignment = self.min_dram_alignment(TransferDirection::In); + let out_alignment = self.min_dram_alignment(TransferDirection::Out); + let alignment = max(in_alignment, out_alignment); + + #[cfg(psram_dma)] + let alignment = max(alignment, self.external as usize); + + alignment + } + + /// Calculates a chunk size that is compatible with the current burst + /// configuration's alignment requirements. + /// + /// This is an over-estimation so that Descriptors can be safely used with + /// any DMA channel in any direction. + pub const fn max_compatible_chunk_size(self) -> usize { + 4096 - self.min_compatible_alignment() + } + + fn min_alignment(self, _buffer: &[u8], direction: TransferDirection) -> usize { + let alignment = self.min_dram_alignment(direction); + + cfg_if::cfg_if! { + if #[cfg(psram_dma)] { + let mut alignment = alignment; + if is_valid_psram_address(_buffer.as_ptr() as usize) { + alignment = max(alignment, self.external.min_psram_alignment(direction)); + } + } + } + + alignment + } + + // Note: this function ignores address alignment as we assume the buffers are + // aligned. + fn max_chunk_size_for(self, buffer: &[u8], direction: TransferDirection) -> usize { + 4096 - self.min_alignment(buffer, direction) + } + + fn is_buffer_aligned(self, buffer: &[u8], direction: TransferDirection) -> bool { + let alignment = self.min_alignment(buffer, direction); + buffer.as_ptr() as usize % alignment == 0 + } + + fn ensure_buffer_compatible( + self, + buffer: &[u8], + direction: TransferDirection, + ) -> Result<(), DmaBufError> { + // buffer can be either DRAM or PSRAM (if supported) + let is_in_dram = is_slice_in_dram(buffer); + let is_in_psram = cfg!(psram_dma) && is_slice_in_psram(buffer); + if !(is_in_dram || is_in_psram) { + return Err(DmaBufError::UnsupportedMemoryRegion); + } + + if !self.is_buffer_aligned(buffer, direction) { + return Err(DmaBufError::InvalidAlignment); + } + + Ok(()) + } } /// The direction of the DMA transfer. @@ -43,29 +270,14 @@ pub struct Preparation { /// The direction of the DMA transfer. pub direction: TransferDirection, - /// Block size for PSRAM transfers. - /// - /// If the buffer is in PSRAM, the implementation must ensure the following: - /// - /// - The implementation of the buffer must provide a non-`None` block size. - /// - For [`TransferDirection::In`] transfers, the implementation of the - /// buffer must invalidate the cache that contains the buffer before the - /// DMA starts. - /// - For [`TransferDirection::Out`] transfers, the implementation of the - /// buffer must write back the cache that contains the buffer before the - /// DMA starts. - #[cfg(esp32s3)] - pub external_memory_block_size: Option, - /// Configures the DMA to transfer data in bursts. /// - /// The implementation of the buffer must ensure that burst mode is only - /// enabled when alignment requirements are met. + /// The implementation of the buffer must ensure that buffer size + /// and alignment in each descriptor is compatible with the burst + /// transfer configuration. /// - /// There are no additional alignment requirements for - /// [`TransferDirection::Out`] burst transfers, but - /// [`TransferDirection::In`] transfers require all descriptors to have - /// buffer pointers and sizes that are a multiple of 4 (word aligned). + /// For details on alignment requirements, refer to your chip's + #[doc = crate::trm_markdown_link!()] pub burst_transfer: BurstConfig, /// Configures the "check owner" feature of the DMA channel. @@ -171,18 +383,6 @@ pub enum DmaBufError { InvalidChunkSize, } -/// DMA buffer alignments -#[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "defmt", derive(defmt::Format))] -pub enum DmaBufBlkSize { - /// 16 bytes - Size16 = 16, - /// 32 bytes - Size32 = 32, - /// 64 bytes - Size64 = 64, -} - /// DMA transmit buffer /// /// This is a contiguous buffer linked together by DMA descriptors of length @@ -193,14 +393,15 @@ pub enum DmaBufBlkSize { pub struct DmaTxBuf { descriptors: DescriptorSet<'static>, buffer: &'static mut [u8], - block_size: Option, + burst: BurstConfig, } impl DmaTxBuf { /// Creates a new [DmaTxBuf] from some descriptors and a buffer. /// /// There must be enough descriptors for the provided buffer. - /// Each descriptor can handle 4092 bytes worth of buffer. + /// Depending on alignment requirements, each descriptor can handle at most + /// 4095 bytes worth of buffer. /// /// Both the descriptors and buffer must be in DMA-capable memory. /// Only DRAM is supported for descriptors. @@ -208,81 +409,57 @@ impl DmaTxBuf { descriptors: &'static mut [DmaDescriptor], buffer: &'static mut [u8], ) -> Result { - Self::new_with_block_size(descriptors, buffer, None) - } - - /// Compute max chunk size based on block size - pub const fn compute_chunk_size(block_size: Option) -> usize { - max_chunk_size(block_size) - } - - /// Compute the number of descriptors required for a given block size and - /// buffer size - pub const fn compute_descriptor_count( - buffer_size: usize, - block_size: Option, - ) -> usize { - descriptor_count(buffer_size, Self::compute_chunk_size(block_size), false) + Self::new_with_config(descriptors, buffer, BurstConfig::default()) } /// Creates a new [DmaTxBuf] from some descriptors and a buffer. /// /// There must be enough descriptors for the provided buffer. - /// Each descriptor can handle at most 4095 bytes worth of buffer. - /// Optionally, a block size can be provided for PSRAM & Burst transfers. + /// Depending on alignment requirements, each descriptor can handle at most + /// 4095 bytes worth of buffer. /// /// Both the descriptors and buffer must be in DMA-capable memory. /// Only DRAM is supported for descriptors. - pub fn new_with_block_size( + pub fn new_with_config( descriptors: &'static mut [DmaDescriptor], buffer: &'static mut [u8], - block_size: Option, + config: BurstConfig, ) -> Result { - cfg_if::cfg_if! { - if #[cfg(esp32s3)] { - // buffer can be either DRAM or PSRAM (if supported) - if !is_slice_in_dram(buffer) && !is_slice_in_psram(buffer) { - return Err(DmaBufError::UnsupportedMemoryRegion); - } - // if its PSRAM, the block_size/alignment must be specified - if is_slice_in_psram(buffer) && block_size.is_none() { - return Err(DmaBufError::InvalidAlignment); - } - } else { - #[cfg(any(esp32,esp32s2))] - if buffer.len() % 4 != 0 && buffer.as_ptr() as usize % 4 != 0 { - // ESP32 requires word alignment for DMA buffers. - // ESP32-S2 technically supports byte-aligned DMA buffers, but the - // transfer ends up writing out of bounds if the buffer's length - // is 2 or 3 (mod 4). - return Err(DmaBufError::InvalidAlignment); - } - // buffer can only be DRAM - if !is_slice_in_dram(buffer) { - return Err(DmaBufError::UnsupportedMemoryRegion); - } - } - } - - let block_size = if is_slice_in_dram(buffer) { - // no need for block size if the buffer is in DRAM - None - } else { - block_size - }; let mut buf = Self { descriptors: DescriptorSet::new(descriptors)?, buffer, - block_size, + burst: config, }; - buf.descriptors - .link_with_buffer(buf.buffer, max_chunk_size(block_size))?; - buf.set_length(buf.capacity()); + let capacity = buf.capacity(); + buf.configure(config, capacity)?; Ok(buf) } + fn configure(&mut self, burst: BurstConfig, length: usize) -> Result<(), DmaBufError> { + burst.ensure_buffer_compatible(self.buffer, TransferDirection::Out)?; + + self.descriptors.link_with_buffer( + self.buffer, + burst.max_chunk_size_for(self.buffer, TransferDirection::Out), + )?; + self.set_length_fallible(length, burst)?; + + self.burst = burst; + Ok(()) + } + + /// Configures the DMA to use burst transfers to access this buffer. + /// + /// Note that the hardware is allowed to ignore this setting. If you attempt + /// to use burst transfers with improperly aligned buffers, starting the + /// transfer will result in [`DmaError::InvalidAlignment`]. + pub fn set_burst_transfer(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> { + let len = self.len(); + self.configure(burst, len) + } + /// Consume the buf, returning the descriptors and buffer. pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) { (self.descriptors.into_inner(), self.buffer) @@ -302,17 +479,22 @@ impl DmaTxBuf { .sum::() } + fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> { + assert!(len <= self.buffer.len()); + + self.descriptors.set_tx_length( + len, + burst.max_chunk_size_for(self.buffer, TransferDirection::Out), + ) + } + /// Reset the descriptors to only transmit `len` amount of bytes from this /// buf. /// /// The number of bytes in data must be less than or equal to the buffer /// size. pub fn set_length(&mut self, len: usize) { - assert!(len <= self.buffer.len()); - - unwrap!(self - .descriptors - .set_tx_length(len, max_chunk_size(self.block_size))); + unwrap!(self.set_length_fallible(len, self.burst)) } /// Fills the TX buffer with the bytes provided in `data` and reset the @@ -346,8 +528,8 @@ unsafe impl DmaTxBuffer for DmaTxBuf { desc.reset_for_tx(desc.next.is_null()); } - #[cfg(esp32s3)] - if crate::soc::is_valid_psram_address(self.buffer.as_ptr() as usize) { + #[cfg(psram_dma)] + if is_valid_psram_address(self.buffer.as_ptr() as usize) { unsafe { crate::soc::cache_writeback_addr( self.buffer.as_ptr() as u32, @@ -359,10 +541,7 @@ unsafe impl DmaTxBuffer for DmaTxBuf { Preparation { start: self.descriptors.head(), direction: TransferDirection::Out, - #[cfg(esp32s3)] - external_memory_block_size: self.block_size, - // TODO: support burst transfers. - burst_transfer: BurstConfig::Disabled, + burst_transfer: self.burst, check_owner: None, } } @@ -384,6 +563,7 @@ unsafe impl DmaTxBuffer for DmaTxBuf { pub struct DmaRxBuf { descriptors: DescriptorSet<'static>, buffer: &'static mut [u8], + burst: BurstConfig, } impl DmaRxBuf { @@ -398,22 +578,40 @@ impl DmaRxBuf { descriptors: &'static mut [DmaDescriptor], buffer: &'static mut [u8], ) -> Result { - if !is_slice_in_dram(buffer) { - return Err(DmaBufError::UnsupportedMemoryRegion); - } - let mut buf = Self { descriptors: DescriptorSet::new(descriptors)?, buffer, + burst: BurstConfig::default(), }; - buf.descriptors - .link_with_buffer(buf.buffer, max_chunk_size(None))?; - buf.set_length(buf.capacity()); + buf.configure(buf.burst, buf.capacity())?; Ok(buf) } + fn configure(&mut self, burst: BurstConfig, length: usize) -> Result<(), DmaBufError> { + burst.ensure_buffer_compatible(self.buffer, TransferDirection::In)?; + + self.descriptors.link_with_buffer( + self.buffer, + burst.max_chunk_size_for(self.buffer, TransferDirection::In), + )?; + self.set_length_fallible(length, burst)?; + + self.burst = burst; + Ok(()) + } + + /// Configures the DMA to use burst transfers to access this buffer. + /// + /// Note that the hardware is allowed to ignore this setting. If you attempt + /// to use burst transfers with improperly aligned buffers, starting the + /// transfer will result in [`DmaError::InvalidAlignment`]. + pub fn set_burst_transfer(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> { + let len = self.len(); + self.configure(burst, len) + } + /// Consume the buf, returning the descriptors and buffer. pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) { (self.descriptors.into_inner(), self.buffer) @@ -434,15 +632,22 @@ impl DmaRxBuf { .sum::() } + fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> { + assert!(len <= self.buffer.len()); + + self.descriptors.set_rx_length( + len, + burst.max_chunk_size_for(self.buffer, TransferDirection::In), + ) + } + /// Reset the descriptors to only receive `len` amount of bytes into this /// buf. /// /// The number of bytes in data must be less than or equal to the buffer /// size. pub fn set_length(&mut self, len: usize) { - assert!(len <= self.buffer.len()); - - unwrap!(self.descriptors.set_rx_length(len, max_chunk_size(None))); + unwrap!(self.set_length_fallible(len, self.burst)); } /// Returns the entire underlying buffer as a slice than can be read. @@ -506,15 +711,7 @@ unsafe impl DmaRxBuffer for DmaRxBuf { Preparation { start: self.descriptors.head(), direction: TransferDirection::In, - - // TODO: support external memory access. - #[cfg(esp32s3)] - external_memory_block_size: None, - - // TODO: DmaRxBuf doesn't currently enforce the alignment requirements required for - // bursting. In the future, it could either enforce the alignment or - // calculate if the alignment requirements happen to be met. - burst_transfer: BurstConfig::Disabled, + burst_transfer: self.burst, check_owner: None, } } @@ -538,6 +735,7 @@ pub struct DmaRxTxBuf { rx_descriptors: DescriptorSet<'static>, tx_descriptors: DescriptorSet<'static>, buffer: &'static mut [u8], + burst: BurstConfig, } impl DmaRxTxBuf { @@ -553,24 +751,48 @@ impl DmaRxTxBuf { tx_descriptors: &'static mut [DmaDescriptor], buffer: &'static mut [u8], ) -> Result { - if !is_slice_in_dram(buffer) { - return Err(DmaBufError::UnsupportedMemoryRegion); - } - let mut buf = Self { rx_descriptors: DescriptorSet::new(rx_descriptors)?, tx_descriptors: DescriptorSet::new(tx_descriptors)?, buffer, + burst: BurstConfig::default(), }; - buf.rx_descriptors - .link_with_buffer(buf.buffer, max_chunk_size(None))?; - buf.tx_descriptors - .link_with_buffer(buf.buffer, max_chunk_size(None))?; - buf.set_length(buf.capacity()); + + let capacity = buf.capacity(); + buf.configure(buf.burst, capacity)?; Ok(buf) } + fn configure(&mut self, burst: BurstConfig, length: usize) -> Result<(), DmaBufError> { + burst.ensure_buffer_compatible(self.buffer, TransferDirection::In)?; + burst.ensure_buffer_compatible(self.buffer, TransferDirection::Out)?; + + self.rx_descriptors.link_with_buffer( + self.buffer, + burst.max_chunk_size_for(self.buffer, TransferDirection::In), + )?; + self.tx_descriptors.link_with_buffer( + self.buffer, + burst.max_chunk_size_for(self.buffer, TransferDirection::Out), + )?; + + self.set_length_fallible(length, burst)?; + self.burst = burst; + + Ok(()) + } + + /// Configures the DMA to use burst transfers to access this buffer. + /// + /// Note that the hardware is allowed to ignore this setting. If you attempt + /// to use burst transfers with improperly aligned buffers, starting the + /// transfer will result in [`DmaError::InvalidAlignment`]. + pub fn set_burst_transfer(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> { + let len = self.len(); + self.configure(burst, len) + } + /// Consume the buf, returning the rx descriptors, tx descriptors and /// buffer. pub fn split( @@ -611,15 +833,27 @@ impl DmaRxTxBuf { self.buffer } + fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> { + assert!(len <= self.buffer.len()); + + self.rx_descriptors.set_rx_length( + len, + burst.max_chunk_size_for(self.buffer, TransferDirection::In), + )?; + self.tx_descriptors.set_tx_length( + len, + burst.max_chunk_size_for(self.buffer, TransferDirection::Out), + )?; + + Ok(()) + } + /// Reset the descriptors to only transmit/receive `len` amount of bytes /// with this buf. /// /// `len` must be less than or equal to the buffer size. pub fn set_length(&mut self, len: usize) { - assert!(len <= self.buffer.len()); - - unwrap!(self.rx_descriptors.set_rx_length(len, max_chunk_size(None))); - unwrap!(self.tx_descriptors.set_tx_length(len, max_chunk_size(None))); + unwrap!(self.set_length_fallible(len, self.burst)); } } @@ -636,13 +870,7 @@ unsafe impl DmaTxBuffer for DmaRxTxBuf { Preparation { start: self.tx_descriptors.head(), direction: TransferDirection::Out, - - // TODO: support external memory access. - #[cfg(esp32s3)] - external_memory_block_size: None, - - // TODO: This is TX, the DMA channel is free to do a burst transfer. - burst_transfer: BurstConfig::Disabled, + burst_transfer: self.burst, check_owner: None, } } @@ -667,14 +895,7 @@ unsafe impl DmaRxBuffer for DmaRxTxBuf { Preparation { start: self.rx_descriptors.head(), direction: TransferDirection::In, - - // TODO: support external memory access. - #[cfg(esp32s3)] - external_memory_block_size: None, - - // TODO: DmaRxTxBuf doesn't currently enforce the alignment requirements required for - // bursting. - burst_transfer: BurstConfig::Disabled, + burst_transfer: self.burst, check_owner: None, } } @@ -731,6 +952,7 @@ unsafe impl DmaRxBuffer for DmaRxTxBuf { pub struct DmaRxStreamBuf { descriptors: &'static mut [DmaDescriptor], buffer: &'static mut [u8], + burst: BurstConfig, } impl DmaRxStreamBuf { @@ -789,6 +1011,7 @@ impl DmaRxStreamBuf { Ok(Self { descriptors, buffer, + burst: BurstConfig::default(), }) } @@ -808,14 +1031,7 @@ unsafe impl DmaRxBuffer for DmaRxStreamBuf { Preparation { start: self.descriptors.as_mut_ptr(), direction: TransferDirection::In, - - // TODO: support external memory access. - #[cfg(esp32s3)] - external_memory_block_size: None, - - // TODO: DmaRxStreamBuf doesn't currently enforce the alignment requirements required - // for bursting. - burst_transfer: BurstConfig::Disabled, + burst_transfer: self.burst, // Whilst we give ownership of the descriptors the DMA, the correctness of this buffer // implementation doesn't rely on the DMA checking for descriptor ownership. @@ -1022,9 +1238,7 @@ unsafe impl DmaTxBuffer for EmptyBuf { Preparation { start: unsafe { core::ptr::addr_of_mut!(EMPTY).cast() }, direction: TransferDirection::Out, - #[cfg(esp32s3)] - external_memory_block_size: None, - burst_transfer: BurstConfig::Disabled, + burst_transfer: BurstConfig::default(), // As we don't give ownership of the descriptor to the DMA, it's important that the DMA // channel does *NOT* check for ownership, otherwise the channel will return an error. @@ -1049,9 +1263,7 @@ unsafe impl DmaRxBuffer for EmptyBuf { Preparation { start: unsafe { core::ptr::addr_of_mut!(EMPTY).cast() }, direction: TransferDirection::In, - #[cfg(esp32s3)] - external_memory_block_size: None, - burst_transfer: BurstConfig::Disabled, + burst_transfer: BurstConfig::default(), // As we don't give ownership of the descriptor to the DMA, it's important that the DMA // channel does *NOT* check for ownership, otherwise the channel will return an error. @@ -1096,7 +1308,8 @@ impl DmaLoopBuf { return Err(DmaBufError::UnsupportedMemoryRegion); } - if buffer.len() > max_chunk_size(None) { + if buffer.len() > BurstConfig::default().max_chunk_size_for(buffer, TransferDirection::Out) + { return Err(DmaBufError::InsufficientDescriptors); } @@ -1123,10 +1336,7 @@ unsafe impl DmaTxBuffer for DmaLoopBuf { Preparation { start: self.descriptor, direction: TransferDirection::Out, - // TODO: support external memory access. - #[cfg(esp32s3)] - external_memory_block_size: None, - burst_transfer: BurstConfig::Disabled, + burst_transfer: BurstConfig::default(), // The DMA must not check the owner bit, as it is never set. check_owner: Some(false), } diff --git a/esp-hal/src/dma/gdma.rs b/esp-hal/src/dma/gdma.rs index ecee6e4dfba..fd4ccc73004 100644 --- a/esp-hal/src/dma/gdma.rs +++ b/esp-hal/src/dma/gdma.rs @@ -194,6 +194,11 @@ impl RegisterAccess for AnyGdmaTxChannel { .out_conf1() .modify(|_, w| unsafe { w.out_ext_mem_bk_size().bits(size as u8) }); } + + #[cfg(psram_dma)] + fn can_access_psram(&self) -> bool { + true + } } impl TxRegisterAccess for AnyGdmaTxChannel { @@ -431,6 +436,11 @@ impl RegisterAccess for AnyGdmaRxChannel { .in_conf1() .modify(|_, w| unsafe { w.in_ext_mem_bk_size().bits(size as u8) }); } + + #[cfg(psram_dma)] + fn can_access_psram(&self) -> bool { + true + } } impl RxRegisterAccess for AnyGdmaRxChannel { diff --git a/esp-hal/src/dma/mod.rs b/esp-hal/src/dma/mod.rs index 419014720a5..d9e8f71dd71 100644 --- a/esp-hal/src/dma/mod.rs +++ b/esp-hal/src/dma/mod.rs @@ -60,11 +60,13 @@ pub use self::gdma::*; pub use self::m2m::*; #[cfg(pdma)] pub use self::pdma::*; +#[cfg(psram_dma)] +use crate::soc::is_valid_psram_address; use crate::{ interrupt::InterruptHandler, peripheral::{Peripheral, PeripheralRef}, peripherals::Interrupt, - soc::is_slice_in_dram, + soc::{is_slice_in_dram, is_valid_memory_address, is_valid_ram_address}, system, Async, Blocking, @@ -363,6 +365,26 @@ impl DmaDescriptor { true => Owner::Dma, } } + + fn iter(&self) -> impl Iterator { + core::iter::successors(Some(self), |d| { + if d.next.is_null() { + None + } else { + Some(unsafe { &*d.next }) + } + }) + } + + fn iter_mut(&mut self) -> impl Iterator { + core::iter::successors(Some(self), |d| { + if d.next.is_null() { + None + } else { + Some(unsafe { &mut *d.next }) + } + }) + } } // The pointers in the descriptor can be Sent. @@ -675,6 +697,14 @@ macro_rules! dma_buffers_impl { ) } }}; + + ($size:expr, is_circular = $circular:tt) => { + $crate::dma_buffers_impl!( + $size, + $crate::dma::BurstConfig::DEFAULT.max_compatible_chunk_size(), + is_circular = $circular + ); + }; } #[doc(hidden)] @@ -722,7 +752,6 @@ macro_rules! dma_descriptor_count { /// ```rust,no_run #[doc = crate::before_snippet!()] /// use esp_hal::dma_tx_buffer; -/// use esp_hal::dma::DmaBufBlkSize; /// /// let tx_buf = dma_tx_buffer!(32000); /// # } @@ -730,11 +759,7 @@ macro_rules! dma_descriptor_count { #[macro_export] macro_rules! dma_tx_buffer { ($tx_size:expr) => {{ - let (tx_buffer, tx_descriptors) = $crate::dma_buffers_impl!( - $tx_size, - $crate::dma::DmaTxBuf::compute_chunk_size(None), - is_circular = false - ); + let (tx_buffer, tx_descriptors) = $crate::dma_buffers_impl!($tx_size, is_circular = false); $crate::dma::DmaTxBuf::new(tx_descriptors, tx_buffer) }}; @@ -1016,10 +1041,10 @@ impl DescriptorChain { len: usize, prepare_descriptor: impl Fn(&mut DmaDescriptor, usize), ) -> Result<(), DmaError> { - if !crate::soc::is_valid_ram_address(self.first() as usize) - || !crate::soc::is_valid_ram_address(self.last() as usize) - || !crate::soc::is_valid_memory_address(data as usize) - || !crate::soc::is_valid_memory_address(unsafe { data.add(len) } as usize) + if !is_valid_ram_address(self.first() as usize) + || !is_valid_ram_address(self.last() as usize) + || !is_valid_memory_address(data as usize) + || !is_valid_memory_address(unsafe { data.add(len) } as usize) { return Err(DmaError::UnsupportedMemoryRegion); } @@ -1066,17 +1091,6 @@ pub const fn descriptor_count(buffer_size: usize, chunk_size: usize, is_circular buffer_size.div_ceil(chunk_size) } -/// Compute max chunk size based on block size. -const fn max_chunk_size(block_size: Option) -> usize { - match block_size { - Some(size) => 4096 - size as usize, - #[cfg(esp32)] - None => 4092, // esp32 requires 4 byte alignment - #[cfg(not(esp32))] - None => 4095, - } -} - #[derive(Debug)] #[cfg_attr(feature = "defmt", derive(defmt::Format))] struct DescriptorSet<'a> { @@ -1119,28 +1133,15 @@ impl<'a> DescriptorSet<'a> { /// Returns an iterator over the linked descriptors. fn linked_iter(&self) -> impl Iterator { - let mut was_last = false; - self.descriptors.iter().take_while(move |d| { - if was_last { - false - } else { - was_last = d.next.is_null(); - true - } - }) + self.descriptors.first().into_iter().flat_map(|d| d.iter()) } /// Returns an iterator over the linked descriptors. fn linked_iter_mut(&mut self) -> impl Iterator { - let mut was_last = false; - self.descriptors.iter_mut().take_while(move |d| { - if was_last { - false - } else { - was_last = d.next.is_null(); - true - } - }) + self.descriptors + .first_mut() + .into_iter() + .flat_map(|d| d.iter_mut()) } /// Associate each descriptor with a chunk of the buffer. @@ -1272,6 +1273,7 @@ impl<'a> DescriptorSet<'a> { } /// Block size for transfers to/from PSRAM +#[cfg(psram_dma)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum DmaExtMemBKSize { /// External memory block size of 16 bytes. @@ -1282,12 +1284,13 @@ pub enum DmaExtMemBKSize { Size64 = 2, } -impl From for DmaExtMemBKSize { - fn from(size: DmaBufBlkSize) -> Self { +#[cfg(psram_dma)] +impl From for DmaExtMemBKSize { + fn from(size: ExternalBurstSize) -> Self { match size { - DmaBufBlkSize::Size16 => DmaExtMemBKSize::Size16, - DmaBufBlkSize::Size32 => DmaExtMemBKSize::Size32, - DmaBufBlkSize::Size64 => DmaExtMemBKSize::Size64, + ExternalBurstSize::Size16 => DmaExtMemBKSize::Size16, + ExternalBurstSize::Size32 => DmaExtMemBKSize::Size32, + ExternalBurstSize::Size64 => DmaExtMemBKSize::Size64, } } } @@ -1758,7 +1761,7 @@ pub trait Rx: crate::private::Sealed { fn stop_transfer(&mut self); - #[cfg(esp32s3)] + #[cfg(psram_dma)] fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize); #[cfg(gdma)] @@ -1916,6 +1919,8 @@ where ) -> Result<(), DmaError> { debug_assert_eq!(preparation.direction, TransferDirection::In); + #[cfg(psram_dma)] + self.set_ext_mem_block_size(preparation.burst_transfer.external.into()); self.rx_impl.set_burst_mode(preparation.burst_transfer); self.rx_impl.set_descr_burst_mode(true); self.rx_impl.set_check_owner(preparation.check_owner); @@ -1950,17 +1955,17 @@ where peri: DmaPeripheral, chain: &DescriptorChain, ) -> Result<(), DmaError> { - // For ESP32-S3 we check each descriptor buffer that points to PSRAM for + // We check each descriptor buffer that points to PSRAM for // alignment and invalidate the cache for that buffer. // NOTE: for RX the `buffer` and `size` need to be aligned but the `len` does // not. TRM section 3.4.9 // Note that DmaBuffer implementations are required to do this for us. - #[cfg(esp32s3)] + #[cfg(psram_dma)] for des in chain.descriptors.iter() { // we are forcing the DMA alignment to the cache line size // required when we are using dcache let alignment = crate::soc::cache_get_dcache_line_size() as usize; - if crate::soc::is_valid_psram_address(des.buffer as usize) { + if is_valid_psram_address(des.buffer as usize) { // both the size and address of the buffer must be aligned if des.buffer as usize % alignment != 0 && des.size() % alignment != 0 { return Err(DmaError::InvalidAlignment); @@ -1969,17 +1974,13 @@ where } } - self.do_prepare( - Preparation { - start: chain.first().cast_mut(), - #[cfg(esp32s3)] - external_memory_block_size: None, - direction: TransferDirection::In, - burst_transfer: BurstConfig::Disabled, - check_owner: Some(false), - }, - peri, - ) + let preparation = Preparation { + start: chain.first().cast_mut(), + direction: TransferDirection::In, + burst_transfer: BurstConfig::default(), + check_owner: Some(false), + }; + self.do_prepare(preparation, peri) } unsafe fn prepare_transfer( @@ -2009,7 +2010,7 @@ where self.rx_impl.stop() } - #[cfg(esp32s3)] + #[cfg(psram_dma)] fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize) { self.rx_impl.set_ext_mem_block_size(size); } @@ -2082,7 +2083,7 @@ pub trait Tx: crate::private::Sealed { fn stop_transfer(&mut self); - #[cfg(esp32s3)] + #[cfg(psram_dma)] fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize); fn is_done(&self) -> bool { @@ -2200,11 +2201,8 @@ where ) -> Result<(), DmaError> { debug_assert_eq!(preparation.direction, TransferDirection::Out); - #[cfg(esp32s3)] - if let Some(block_size) = preparation.external_memory_block_size { - self.set_ext_mem_block_size(block_size.into()); - } - + #[cfg(psram_dma)] + self.set_ext_mem_block_size(preparation.burst_transfer.external.into()); self.tx_impl.set_burst_mode(preparation.burst_transfer); self.tx_impl.set_descr_burst_mode(true); self.tx_impl.set_check_owner(preparation.check_owner); @@ -2241,10 +2239,10 @@ where ) -> Result<(), DmaError> { // Based on the ESP32-S3 TRM the alignment check is not needed for TX - // For esp32s3 we check each descriptor buffer that points to PSRAM for + // We check each descriptor buffer that points to PSRAM for // alignment and writeback the cache for that buffer. // Note that DmaBuffer implementations are required to do this for us. - #[cfg(esp32s3)] + #[cfg(psram_dma)] for des in chain.descriptors.iter() { // we are forcing the DMA alignment to the cache line size // required when we are using dcache @@ -2258,17 +2256,13 @@ where } } - self.do_prepare( - Preparation { - start: chain.first().cast_mut(), - #[cfg(esp32s3)] - external_memory_block_size: None, - direction: TransferDirection::Out, - burst_transfer: BurstConfig::Disabled, - check_owner: Some(false), - }, - peri, - )?; + let preparation = Preparation { + start: chain.first().cast_mut(), + direction: TransferDirection::Out, + burst_transfer: BurstConfig::default(), + check_owner: Some(false), + }; + self.do_prepare(preparation, peri)?; // enable descriptor write back in circular mode self.tx_impl @@ -2304,7 +2298,7 @@ where self.tx_impl.stop() } - #[cfg(esp32s3)] + #[cfg(psram_dma)] fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize) { self.tx_impl.set_ext_mem_block_size(size); } @@ -2379,11 +2373,14 @@ pub trait RegisterAccess: crate::private::Sealed { /// descriptor. fn set_check_owner(&self, check_owner: Option); - #[cfg(esp32s3)] + #[cfg(psram_dma)] fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize); #[cfg(pdma)] fn is_compatible_with(&self, peripheral: DmaPeripheral) -> bool; + + #[cfg(psram_dma)] + fn can_access_psram(&self) -> bool; } #[doc(hidden)] diff --git a/esp-hal/src/dma/pdma.rs b/esp-hal/src/dma/pdma.rs index 425ad6c3ac4..612f23a6ddc 100644 --- a/esp-hal/src/dma/pdma.rs +++ b/esp-hal/src/dma/pdma.rs @@ -122,6 +122,18 @@ impl RegisterAccess for AnySpiDmaTxChannel { fn is_compatible_with(&self, peripheral: DmaPeripheral) -> bool { self.0.is_compatible_with(peripheral) } + + #[cfg(psram_dma)] + fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize) { + let spi = self.0.register_block(); + spi.dma_conf() + .modify(|_, w| unsafe { w.ext_mem_bk_size().bits(size as u8) }); + } + + #[cfg(psram_dma)] + fn can_access_psram(&self) -> bool { + matches!(self.0, AnySpiDmaChannel(AnySpiDmaChannelInner::Spi2(_))) + } } impl TxRegisterAccess for AnySpiDmaTxChannel { @@ -280,6 +292,18 @@ impl RegisterAccess for AnySpiDmaRxChannel { fn is_compatible_with(&self, peripheral: DmaPeripheral) -> bool { self.0.is_compatible_with(peripheral) } + + #[cfg(psram_dma)] + fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize) { + let spi = self.0.register_block(); + spi.dma_conf() + .modify(|_, w| unsafe { w.ext_mem_bk_size().bits(size as u8) }); + } + + #[cfg(psram_dma)] + fn can_access_psram(&self) -> bool { + matches!(self.0, AnySpiDmaChannel(AnySpiDmaChannelInner::Spi2(_))) + } } impl RxRegisterAccess for AnySpiDmaRxChannel { @@ -475,6 +499,18 @@ impl RegisterAccess for AnyI2sDmaTxChannel { fn is_compatible_with(&self, peripheral: DmaPeripheral) -> bool { self.0.is_compatible_with(peripheral) } + + #[cfg(psram_dma)] + fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize) { + let spi = self.0.register_block(); + spi.lc_conf() + .modify(|_, w| unsafe { w.ext_mem_bk_size().bits(size as u8) }); + } + + #[cfg(psram_dma)] + fn can_access_psram(&self) -> bool { + matches!(self.0, AnyI2sDmaChannel(AnyI2sDmaChannelInner::I2s0(_))) + } } impl TxRegisterAccess for AnyI2sDmaTxChannel { @@ -645,6 +681,18 @@ impl RegisterAccess for AnyI2sDmaRxChannel { fn is_compatible_with(&self, peripheral: DmaPeripheral) -> bool { self.0.is_compatible_with(peripheral) } + + #[cfg(psram_dma)] + fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize) { + let spi = self.0.register_block(); + spi.lc_conf() + .modify(|_, w| unsafe { w.ext_mem_bk_size().bits(size as u8) }); + } + + #[cfg(psram_dma)] + fn can_access_psram(&self) -> bool { + matches!(self.0, AnyI2sDmaChannel(AnyI2sDmaChannelInner::I2s0(_))) + } } impl RxRegisterAccess for AnyI2sDmaRxChannel { diff --git a/esp-hal/src/soc/esp32s2/mod.rs b/esp-hal/src/soc/esp32s2/mod.rs index c6fdd8db12e..b85203c4d59 100644 --- a/esp-hal/src/soc/esp32s2/mod.rs +++ b/esp-hal/src/soc/esp32s2/mod.rs @@ -133,3 +133,33 @@ pub unsafe extern "C" fn ESP32Reset() -> ! { pub extern "Rust" fn __init_data() -> bool { false } + +/// Write back a specific range of data in the cache. +#[doc(hidden)] +#[link_section = ".rwtext"] +pub unsafe fn cache_writeback_addr(addr: u32, size: u32) { + extern "C" { + fn Cache_WriteBack_Addr(addr: u32, size: u32); + } + Cache_WriteBack_Addr(addr, size); +} + +/// Invalidate a specific range of addresses in the cache. +#[doc(hidden)] +#[link_section = ".rwtext"] +pub unsafe fn cache_invalidate_addr(addr: u32, size: u32) { + extern "C" { + fn Cache_Invalidate_Addr(addr: u32, size: u32); + } + Cache_Invalidate_Addr(addr, size); +} + +/// Get the size of a cache line in the DCache. +#[doc(hidden)] +#[link_section = ".rwtext"] +pub unsafe fn cache_get_dcache_line_size() -> u32 { + extern "C" { + fn Cache_Get_DCache_Line_Size() -> u32; + } + Cache_Get_DCache_Line_Size() +} diff --git a/esp-metadata/devices/esp32s2.toml b/esp-metadata/devices/esp32s2.toml index 32db8a32335..c9c70c97d78 100644 --- a/esp-metadata/devices/esp32s2.toml +++ b/esp-metadata/devices/esp32s2.toml @@ -72,4 +72,7 @@ symbols = [ "uart_support_wakeup_int", "ulp_supported", "riscv_coproc_supported", + + # Other capabilities + "psram_dma", ] diff --git a/esp-metadata/devices/esp32s3.toml b/esp-metadata/devices/esp32s3.toml index 31e1f3cf4b0..8dfc2fe1ae2 100644 --- a/esp-metadata/devices/esp32s3.toml +++ b/esp-metadata/devices/esp32s3.toml @@ -68,6 +68,7 @@ symbols = [ "bt", "wifi", "psram", + "psram_dma", "octal_psram", "ulp_riscv_core", "timg_timer1", @@ -88,4 +89,7 @@ symbols = [ "uart_support_wakeup_int", "ulp_supported", "riscv_coproc_supported", + + # Other capabilities + "psram_dma", ] diff --git a/examples/src/bin/spi_loopback_dma_psram.rs b/examples/src/bin/spi_loopback_dma_psram.rs index de15364a473..45026c163d0 100644 --- a/examples/src/bin/spi_loopback_dma_psram.rs +++ b/examples/src/bin/spi_loopback_dma_psram.rs @@ -25,7 +25,7 @@ use esp_backtrace as _; use esp_hal::{ delay::Delay, - dma::{DmaBufBlkSize, DmaRxBuf, DmaTxBuf}, + dma::{BurstConfig, DmaRxBuf, DmaTxBuf, ExternalBurstSize}, peripheral::Peripheral, prelude::*, spi::{ @@ -51,7 +51,7 @@ macro_rules! dma_alloc_buffer { } const DMA_BUFFER_SIZE: usize = 8192; -const DMA_ALIGNMENT: DmaBufBlkSize = DmaBufBlkSize::Size64; +const DMA_ALIGNMENT: ExternalBurstSize = ExternalBurstSize::Size64; const DMA_CHUNK_SIZE: usize = 4096 - DMA_ALIGNMENT as usize; #[entry] @@ -76,8 +76,15 @@ fn main() -> ! { tx_buffer.len(), tx_descriptors.len() ); - let mut dma_tx_buf = - DmaTxBuf::new_with_block_size(tx_descriptors, tx_buffer, Some(DMA_ALIGNMENT)).unwrap(); + let mut dma_tx_buf = DmaTxBuf::new_with_config( + tx_descriptors, + tx_buffer, + BurstConfig { + external: DMA_ALIGNMENT, + ..BurstConfig::default() + }, + ) + .unwrap(); let (rx_buffer, rx_descriptors, _, _) = esp_hal::dma_buffers!(DMA_BUFFER_SIZE, 0); info!( "RX: {:p} len {} ({} descripters)", diff --git a/hil-test/tests/spi_half_duplex_write_psram.rs b/hil-test/tests/spi_half_duplex_write_psram.rs index c21b8daae71..023cbda9b09 100644 --- a/hil-test/tests/spi_half_duplex_write_psram.rs +++ b/hil-test/tests/spi_half_duplex_write_psram.rs @@ -7,7 +7,7 @@ use defmt::error; use esp_alloc as _; use esp_hal::{ - dma::{DmaBufBlkSize, DmaRxBuf, DmaTxBuf}, + dma::{BurstConfig, DmaRxBuf, DmaTxBuf, ExternalBurstSize, InternalBurstTransfer}, dma_buffers, dma_descriptors_chunk_size, gpio::interconnect::InputSignal, @@ -83,13 +83,20 @@ mod tests { #[test] fn test_spi_writes_are_correctly_by_pcnt(ctx: Context) { const DMA_BUFFER_SIZE: usize = 4; - const DMA_ALIGNMENT: DmaBufBlkSize = DmaBufBlkSize::Size32; + const DMA_ALIGNMENT: ExternalBurstSize = ExternalBurstSize::Size32; const DMA_CHUNK_SIZE: usize = 4096 - DMA_ALIGNMENT as usize; let (_, descriptors) = dma_descriptors_chunk_size!(0, DMA_BUFFER_SIZE, DMA_CHUNK_SIZE); let buffer = dma_alloc_buffer!(DMA_BUFFER_SIZE, DMA_ALIGNMENT as usize); - let mut dma_tx_buf = - DmaTxBuf::new_with_block_size(descriptors, buffer, Some(DMA_ALIGNMENT)).unwrap(); + let mut dma_tx_buf = DmaTxBuf::new_with_config( + descriptors, + buffer, + BurstConfig { + internal: InternalBurstTransfer::default(), + external: ExternalBurstSize::Size32, + }, + ) + .unwrap(); let unit = ctx.pcnt_unit; let mut spi = ctx.spi; @@ -134,13 +141,20 @@ mod tests { #[test] fn test_spidmabus_writes_are_correctly_by_pcnt(ctx: Context) { const DMA_BUFFER_SIZE: usize = 4; - const DMA_ALIGNMENT: DmaBufBlkSize = DmaBufBlkSize::Size32; // matches dcache line size + const DMA_ALIGNMENT: ExternalBurstSize = ExternalBurstSize::Size32; // matches dcache line size const DMA_CHUNK_SIZE: usize = 4096 - DMA_ALIGNMENT as usize; // 64 byte aligned let (_, descriptors) = dma_descriptors_chunk_size!(0, DMA_BUFFER_SIZE, DMA_CHUNK_SIZE); let buffer = dma_alloc_buffer!(DMA_BUFFER_SIZE, DMA_ALIGNMENT as usize); - let dma_tx_buf = - DmaTxBuf::new_with_block_size(descriptors, buffer, Some(DMA_ALIGNMENT)).unwrap(); + let dma_tx_buf = DmaTxBuf::new_with_config( + descriptors, + buffer, + BurstConfig { + internal: InternalBurstTransfer::default(), + external: ExternalBurstSize::Size32, + }, + ) + .unwrap(); let (rx, rxd, _, _) = dma_buffers!(1, 0); let dma_rx_buf = DmaRxBuf::new(rxd, rx).unwrap(); From 0891295f427f2e87b9cc950f8de0c7138b23a09d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1niel=20Buga?= Date: Fri, 15 Nov 2024 14:06:05 +0100 Subject: [PATCH 02/13] Reject transfers if the DMA in incapable of accessing external memory --- esp-hal/src/dma/buffers.rs | 90 +++++++++++++++++++++++++++++---- esp-hal/src/dma/mod.rs | 67 ++++++++++++++++-------- esp-hal/src/soc/mod.rs | 8 +-- esp-hal/src/soc/psram_common.rs | 6 --- 4 files changed, 130 insertions(+), 41 deletions(-) diff --git a/esp-hal/src/dma/buffers.rs b/esp-hal/src/dma/buffers.rs index 6e0e3b26946..d8c3375b74d 100644 --- a/esp-hal/src/dma/buffers.rs +++ b/esp-hal/src/dma/buffers.rs @@ -4,9 +4,9 @@ use core::{ }; use super::*; -#[cfg(psram_dma)] -use crate::soc::is_valid_psram_address; use crate::soc::{is_slice_in_dram, is_slice_in_psram}; +#[cfg(psram_dma)] +use crate::soc::{is_valid_psram_address, is_valid_ram_address}; cfg_if::cfg_if! { if #[cfg(psram_dma)] { @@ -270,6 +270,10 @@ pub struct Preparation { /// The direction of the DMA transfer. pub direction: TransferDirection, + /// Must be `true` if any of the DMA descriptors contain data in PSRAM. + #[cfg(psram_dma)] + pub accesses_psram: bool, + /// Configures the DMA to transfer data in bursts. /// /// The implementation of the buffer must ensure that buffer size @@ -528,19 +532,26 @@ unsafe impl DmaTxBuffer for DmaTxBuf { desc.reset_for_tx(desc.next.is_null()); } - #[cfg(psram_dma)] - if is_valid_psram_address(self.buffer.as_ptr() as usize) { - unsafe { - crate::soc::cache_writeback_addr( - self.buffer.as_ptr() as u32, - self.buffer.len() as u32, - ) - }; + cfg_if::cfg_if! { + if #[cfg(psram_dma)] { + // Optimization: avoid locking for PSRAM range. + let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize); + if is_data_in_psram { + unsafe { + crate::soc::cache_writeback_addr( + self.buffer.as_ptr() as u32, + self.buffer.len() as u32, + ) + }; + } + } } Preparation { start: self.descriptors.head(), direction: TransferDirection::Out, + #[cfg(psram_dma)] + accesses_psram: is_data_in_psram, burst_transfer: self.burst, check_owner: None, } @@ -708,9 +719,26 @@ unsafe impl DmaRxBuffer for DmaRxBuf { desc.reset_for_rx(); } + cfg_if::cfg_if! { + if #[cfg(psram_dma)] { + // Optimization: avoid locking for PSRAM range. + let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize); + if is_data_in_psram { + unsafe { + crate::soc::cache_invalidate_addr( + self.buffer.as_ptr() as u32, + self.buffer.len() as u32, + ) + }; + } + } + } + Preparation { start: self.descriptors.head(), direction: TransferDirection::In, + #[cfg(psram_dma)] + accesses_psram: is_data_in_psram, burst_transfer: self.burst, check_owner: None, } @@ -867,9 +895,26 @@ unsafe impl DmaTxBuffer for DmaRxTxBuf { desc.reset_for_tx(desc.next.is_null()); } + cfg_if::cfg_if! { + if #[cfg(psram_dma)] { + // Optimization: avoid locking for PSRAM range. + let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize); + if is_data_in_psram { + unsafe { + crate::soc::cache_writeback_addr( + self.buffer.as_ptr() as u32, + self.buffer.len() as u32, + ) + }; + } + } + } + Preparation { start: self.tx_descriptors.head(), direction: TransferDirection::Out, + #[cfg(psram_dma)] + accesses_psram: is_data_in_psram, burst_transfer: self.burst, check_owner: None, } @@ -892,9 +937,26 @@ unsafe impl DmaRxBuffer for DmaRxTxBuf { desc.reset_for_rx(); } + cfg_if::cfg_if! { + if #[cfg(psram_dma)] { + // Optimization: avoid locking for PSRAM range. + let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize); + if is_data_in_psram { + unsafe { + crate::soc::cache_invalidate_addr( + self.buffer.as_ptr() as u32, + self.buffer.len() as u32, + ) + }; + } + } + } + Preparation { start: self.rx_descriptors.head(), direction: TransferDirection::In, + #[cfg(psram_dma)] + accesses_psram: is_data_in_psram, burst_transfer: self.burst, check_owner: None, } @@ -1031,6 +1093,8 @@ unsafe impl DmaRxBuffer for DmaRxStreamBuf { Preparation { start: self.descriptors.as_mut_ptr(), direction: TransferDirection::In, + #[cfg(psram_dma)] + accesses_psram: false, burst_transfer: self.burst, // Whilst we give ownership of the descriptors the DMA, the correctness of this buffer @@ -1238,6 +1302,8 @@ unsafe impl DmaTxBuffer for EmptyBuf { Preparation { start: unsafe { core::ptr::addr_of_mut!(EMPTY).cast() }, direction: TransferDirection::Out, + #[cfg(psram_dma)] + accesses_psram: false, burst_transfer: BurstConfig::default(), // As we don't give ownership of the descriptor to the DMA, it's important that the DMA @@ -1263,6 +1329,8 @@ unsafe impl DmaRxBuffer for EmptyBuf { Preparation { start: unsafe { core::ptr::addr_of_mut!(EMPTY).cast() }, direction: TransferDirection::In, + #[cfg(psram_dma)] + accesses_psram: false, burst_transfer: BurstConfig::default(), // As we don't give ownership of the descriptor to the DMA, it's important that the DMA @@ -1335,6 +1403,8 @@ unsafe impl DmaTxBuffer for DmaLoopBuf { fn prepare(&mut self) -> Preparation { Preparation { start: self.descriptor, + #[cfg(psram_dma)] + accesses_psram: false, direction: TransferDirection::Out, burst_transfer: BurstConfig::default(), // The DMA must not check the owner bit, as it is never set. diff --git a/esp-hal/src/dma/mod.rs b/esp-hal/src/dma/mod.rs index d9e8f71dd71..13b7cbee6f0 100644 --- a/esp-hal/src/dma/mod.rs +++ b/esp-hal/src/dma/mod.rs @@ -60,8 +60,6 @@ pub use self::gdma::*; pub use self::m2m::*; #[cfg(pdma)] pub use self::pdma::*; -#[cfg(psram_dma)] -use crate::soc::is_valid_psram_address; use crate::{ interrupt::InterruptHandler, peripheral::{Peripheral, PeripheralRef}, @@ -1919,6 +1917,11 @@ where ) -> Result<(), DmaError> { debug_assert_eq!(preparation.direction, TransferDirection::In); + #[cfg(psram_dma)] + if preparation.accesses_psram && !self.rx_impl.can_access_psram() { + return Err(DmaError::UnsupportedMemoryRegion); + } + #[cfg(psram_dma)] self.set_ext_mem_block_size(preparation.burst_transfer.external.into()); self.rx_impl.set_burst_mode(preparation.burst_transfer); @@ -1960,23 +1963,31 @@ where // NOTE: for RX the `buffer` and `size` need to be aligned but the `len` does // not. TRM section 3.4.9 // Note that DmaBuffer implementations are required to do this for us. - #[cfg(psram_dma)] - for des in chain.descriptors.iter() { - // we are forcing the DMA alignment to the cache line size - // required when we are using dcache - let alignment = crate::soc::cache_get_dcache_line_size() as usize; - if is_valid_psram_address(des.buffer as usize) { - // both the size and address of the buffer must be aligned - if des.buffer as usize % alignment != 0 && des.size() % alignment != 0 { - return Err(DmaError::InvalidAlignment); + cfg_if::cfg_if! { + if #[cfg(psram_dma)] { + let mut uses_psram = false; + let psram_range = crate::soc::psram_range(); + for des in chain.descriptors.iter() { + // we are forcing the DMA alignment to the cache line size + // required when we are using dcache + let alignment = crate::soc::cache_get_dcache_line_size() as usize; + if crate::soc::addr_in_range(des.buffer as usize, psram_range.clone()) { + uses_psram = true; + // both the size and address of the buffer must be aligned + if des.buffer as usize % alignment != 0 && des.size() % alignment != 0 { + return Err(DmaError::InvalidAlignment); + } + crate::soc::cache_invalidate_addr(des.buffer as u32, des.size() as u32); + } } - crate::soc::cache_invalidate_addr(des.buffer as u32, des.size() as u32); } } let preparation = Preparation { start: chain.first().cast_mut(), direction: TransferDirection::In, + #[cfg(psram_dma)] + accesses_psram: uses_psram, burst_transfer: BurstConfig::default(), check_owner: Some(false), }; @@ -2201,6 +2212,11 @@ where ) -> Result<(), DmaError> { debug_assert_eq!(preparation.direction, TransferDirection::Out); + #[cfg(psram_dma)] + if preparation.accesses_psram && !self.tx_impl.can_access_psram() { + return Err(DmaError::UnsupportedMemoryRegion); + } + #[cfg(psram_dma)] self.set_ext_mem_block_size(preparation.burst_transfer.external.into()); self.tx_impl.set_burst_mode(preparation.burst_transfer); @@ -2243,22 +2259,31 @@ where // alignment and writeback the cache for that buffer. // Note that DmaBuffer implementations are required to do this for us. #[cfg(psram_dma)] - for des in chain.descriptors.iter() { - // we are forcing the DMA alignment to the cache line size - // required when we are using dcache - let alignment = crate::soc::cache_get_dcache_line_size() as usize; - if crate::soc::is_valid_psram_address(des.buffer as usize) { - // both the size and address of the buffer must be aligned - if des.buffer as usize % alignment != 0 && des.size() % alignment != 0 { - return Err(DmaError::InvalidAlignment); + cfg_if::cfg_if! { + if #[cfg(psram_dma)] { + let mut uses_psram = false; + let psram_range = crate::soc::psram_range(); + for des in chain.descriptors.iter() { + // we are forcing the DMA alignment to the cache line size + // required when we are using dcache + let alignment = crate::soc::cache_get_dcache_line_size() as usize; + if crate::soc::addr_in_range(des.buffer as usize, psram_range.clone()) { + uses_psram = true; + // both the size and address of the buffer must be aligned + if des.buffer as usize % alignment != 0 && des.size() % alignment != 0 { + return Err(DmaError::InvalidAlignment); + } + crate::soc::cache_writeback_addr(des.buffer as u32, des.size() as u32); + } } - crate::soc::cache_writeback_addr(des.buffer as u32, des.size() as u32); } } let preparation = Preparation { start: chain.first().cast_mut(), direction: TransferDirection::Out, + #[cfg(psram_dma)] + accesses_psram: uses_psram, burst_transfer: BurstConfig::default(), check_owner: Some(false), }; diff --git a/esp-hal/src/soc/mod.rs b/esp-hal/src/soc/mod.rs index 7be7c1ae294..e5a41924589 100644 --- a/esp-hal/src/soc/mod.rs +++ b/esp-hal/src/soc/mod.rs @@ -27,7 +27,7 @@ mod psram_common; #[cfg(any(feature = "quad-psram", feature = "octal-psram"))] static mut MAPPED_PSRAM: MappedPsram = MappedPsram { memory_range: 0..0 }; -fn psram_range_internal() -> Range { +pub(crate) fn psram_range() -> Range { cfg_if::cfg_if! { if #[cfg(any(feature = "quad-psram", feature = "octal-psram"))] { unsafe { MAPPED_PSRAM.memory_range.clone() } @@ -112,12 +112,12 @@ pub(crate) fn is_slice_in_dram(slice: &[T]) -> bool { #[allow(unused)] pub(crate) fn is_valid_psram_address(address: usize) -> bool { - addr_in_range(address, psram_range_internal()) + addr_in_range(address, psram_range()) } #[allow(unused)] pub(crate) fn is_slice_in_psram(slice: &[T]) -> bool { - slice_in_range(slice, psram_range_internal()) + slice_in_range(slice, psram_range()) } #[allow(unused)] @@ -135,6 +135,6 @@ fn slice_in_range(slice: &[T], range: Range) -> bool { addr_in_range(start, range.clone()) && end <= range.end } -fn addr_in_range(addr: usize, range: Range) -> bool { +pub(crate) fn addr_in_range(addr: usize, range: Range) -> bool { range.contains(&addr) } diff --git a/esp-hal/src/soc/psram_common.rs b/esp-hal/src/soc/psram_common.rs index eb114067f2b..9d709956c61 100644 --- a/esp-hal/src/soc/psram_common.rs +++ b/esp-hal/src/soc/psram_common.rs @@ -26,12 +26,6 @@ impl PsramSize { } } -/// Returns the address range available in external memory. -#[cfg(any(feature = "quad-psram", feature = "octal-psram"))] -pub(crate) fn psram_range(_psram: &crate::peripherals::PSRAM) -> Range { - unsafe { super::MAPPED_PSRAM.memory_range.clone() } -} - /// Returns the address and size of the available in external memory. #[cfg(any(feature = "quad-psram", feature = "octal-psram"))] pub fn psram_raw_parts(psram: &crate::peripherals::PSRAM) -> (*mut u8, usize) { From 30ab7b58ce55a22ee70bc5125d10530633fce003 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1niel=20Buga?= Date: Mon, 25 Nov 2024 20:09:17 +0100 Subject: [PATCH 03/13] Fix psram --- esp-hal/src/soc/psram_common.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/esp-hal/src/soc/psram_common.rs b/esp-hal/src/soc/psram_common.rs index 9d709956c61..b46e67a1e25 100644 --- a/esp-hal/src/soc/psram_common.rs +++ b/esp-hal/src/soc/psram_common.rs @@ -1,5 +1,3 @@ -use core::ops::Range; - /// Size of PSRAM /// /// [PsramSize::AutoDetect] will try to detect the size of PSRAM @@ -28,7 +26,7 @@ impl PsramSize { /// Returns the address and size of the available in external memory. #[cfg(any(feature = "quad-psram", feature = "octal-psram"))] -pub fn psram_raw_parts(psram: &crate::peripherals::PSRAM) -> (*mut u8, usize) { - let range = psram_range(psram); +pub fn psram_raw_parts(_psram: &crate::peripherals::PSRAM) -> (*mut u8, usize) { + let range = crate::soc::psram_range(); (range.start as *mut u8, range.end - range.start) } From d3ad829525a673f730e50f901efdcb9c4253c06f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1niel=20Buga?= Date: Tue, 26 Nov 2024 10:39:02 +0100 Subject: [PATCH 04/13] Rename, documentation --- esp-hal/CHANGELOG.md | 4 ++++ esp-hal/MIGRATING-0.22.md | 10 ++++++++-- esp-hal/src/dma/buffers.rs | 8 ++++---- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/esp-hal/CHANGELOG.md b/esp-hal/CHANGELOG.md index 2739b191957..2972a3c4b3a 100644 --- a/esp-hal/CHANGELOG.md +++ b/esp-hal/CHANGELOG.md @@ -29,6 +29,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added `OutputOpenDrain::unlisten` (#2625) - Added `{Input, Flex}::wait_for` (#2625) - Peripheral singletons now implement `Debug`, `PartialEq`, `defmt::Format` and `Eq` (except AnyPeripherals) (#2682) +- `BurstConfig`, a device-specific configuration for configuring DMA transfers in burst mode (#2543) +- `{DmaRxBuf, DmaTxBuf, DmaRxTxBuf}::set_burst_config` (#2543) ### Changed @@ -75,6 +77,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - The `Dma` structure has been removed. (#2545) - Removed `embedded-hal 0.2.x` impls and deps from `esp-hal` (#2593) - Removed `Camera::set_` functions (#2610) +- Remove `embedded-hal 0.2.x` impls and deps from `esp-hal` (#2593) +- `DmaTxBuf::{compute_chunk_size, compute_descriptor_count, new_with_block_size}` (#2543) ## [0.22.0] - 2024-11-20 diff --git a/esp-hal/MIGRATING-0.22.md b/esp-hal/MIGRATING-0.22.md index c10e2afebf1..b129771528c 100644 --- a/esp-hal/MIGRATING-0.22.md +++ b/esp-hal/MIGRATING-0.22.md @@ -22,10 +22,10 @@ by `esp_hal::init()`. The channels themselves have been renamed to match other p +let channel = peripherals.DMA_CH2; ``` -### Configuration changes +### Channel configuration changes - `configure_for_async` and `configure` have been removed -- PDMA devices (ESP32, ESP32-S2) provide no configurability +- PDMA devices (ESP32, ESP32-S2) provide no channel configurability - GDMA devices provide `set_priority` to change DMA in/out channel priority ```diff @@ -49,6 +49,12 @@ by `esp_hal::init()`. The channels themselves have been renamed to match other p +.with_dma(dma_channel); ``` +### Burst mode configuration + +Burst mode is now a property of buffers, instead of DMA channels. Configuration can be done by +calling `set_burst_config` on buffers that support it. The configuration options and the +corresponding `BurstConfig` type are device specfic. + ### Usability changes affecting applications Individual channels are no longer wrapped in `Channel`, but they implement the `DmaChannel` trait. diff --git a/esp-hal/src/dma/buffers.rs b/esp-hal/src/dma/buffers.rs index d8c3375b74d..83085de618b 100644 --- a/esp-hal/src/dma/buffers.rs +++ b/esp-hal/src/dma/buffers.rs @@ -10,7 +10,7 @@ use crate::soc::{is_valid_psram_address, is_valid_ram_address}; cfg_if::cfg_if! { if #[cfg(psram_dma)] { - /// PSRAM access burst size. + /// Burst size used when transferring to and from external memory. #[derive(Clone, Copy, PartialEq, Eq, Debug)] #[cfg_attr(feature = "defmt", derive(defmt::Format))] pub enum ExternalBurstSize { @@ -459,7 +459,7 @@ impl DmaTxBuf { /// Note that the hardware is allowed to ignore this setting. If you attempt /// to use burst transfers with improperly aligned buffers, starting the /// transfer will result in [`DmaError::InvalidAlignment`]. - pub fn set_burst_transfer(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> { + pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> { let len = self.len(); self.configure(burst, len) } @@ -618,7 +618,7 @@ impl DmaRxBuf { /// Note that the hardware is allowed to ignore this setting. If you attempt /// to use burst transfers with improperly aligned buffers, starting the /// transfer will result in [`DmaError::InvalidAlignment`]. - pub fn set_burst_transfer(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> { + pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> { let len = self.len(); self.configure(burst, len) } @@ -816,7 +816,7 @@ impl DmaRxTxBuf { /// Note that the hardware is allowed to ignore this setting. If you attempt /// to use burst transfers with improperly aligned buffers, starting the /// transfer will result in [`DmaError::InvalidAlignment`]. - pub fn set_burst_transfer(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> { + pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> { let len = self.len(); self.configure(burst, len) } From 3bf226f09a3f5378746e85660c7fd0c5d5ac01e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1niel=20Buga?= Date: Wed, 27 Nov 2024 10:13:47 +0100 Subject: [PATCH 05/13] Address a few review comments --- esp-hal/src/dma/buffers.rs | 51 +++++-------- esp-hal/src/dma/m2m.rs | 17 ----- esp-hal/src/dma/mod.rs | 75 +++++++------------ examples/src/bin/spi_loopback_dma_psram.rs | 4 +- hil-test/tests/spi_half_duplex_write_psram.rs | 14 ++-- 5 files changed, 55 insertions(+), 106 deletions(-) diff --git a/esp-hal/src/dma/buffers.rs b/esp-hal/src/dma/buffers.rs index 83085de618b..caf3dff0db3 100644 --- a/esp-hal/src/dma/buffers.rs +++ b/esp-hal/src/dma/buffers.rs @@ -13,7 +13,7 @@ cfg_if::cfg_if! { /// Burst size used when transferring to and from external memory. #[derive(Clone, Copy, PartialEq, Eq, Debug)] #[cfg_attr(feature = "defmt", derive(defmt::Format))] - pub enum ExternalBurstSize { + pub enum ExternalBurstConfig { /// 16 bytes Size16 = 16, @@ -24,12 +24,12 @@ cfg_if::cfg_if! { Size64 = 64, } - impl ExternalBurstSize { + impl ExternalBurstConfig { /// The default external memory burst length. pub const DEFAULT: Self = Self::Size16; } - impl Default for ExternalBurstSize { + impl Default for ExternalBurstConfig { fn default() -> Self { Self::DEFAULT } @@ -38,7 +38,7 @@ cfg_if::cfg_if! { /// Internal memory access burst mode. #[derive(Clone, Copy, PartialEq, Eq, Debug)] #[cfg_attr(feature = "defmt", derive(defmt::Format))] - pub enum InternalBurstTransfer { + pub enum InternalBurstConfig { /// Burst mode is disabled. Disabled, @@ -46,12 +46,12 @@ cfg_if::cfg_if! { Enabled, } - impl InternalBurstTransfer { + impl InternalBurstConfig { /// The default internal burst mode configuration. pub const DEFAULT: Self = Self::Disabled; } - impl Default for InternalBurstTransfer { + impl Default for InternalBurstConfig { fn default() -> Self { Self::DEFAULT } @@ -64,19 +64,19 @@ cfg_if::cfg_if! { /// Configures the burst size for PSRAM transfers. /// /// Burst mode is always enabled for PSRAM transfers. - pub external: ExternalBurstSize, + pub external: ExternalBurstConfig, /// Enables or disables the burst mode for internal memory transfers. /// /// The burst size is not configurable. - pub internal: InternalBurstTransfer, + pub internal: InternalBurstConfig, } impl BurstConfig { /// The default burst mode configuration. pub const DEFAULT: Self = Self { - external: ExternalBurstSize::DEFAULT, - internal: InternalBurstTransfer::DEFAULT, + external: ExternalBurstConfig::DEFAULT, + internal: InternalBurstConfig::DEFAULT, }; } @@ -108,19 +108,19 @@ cfg_if::cfg_if! { } } - type InternalBurstTransfer = BurstConfig; + type InternalBurstConfig = BurstConfig; } } #[cfg(psram_dma)] -impl ExternalBurstSize { +impl ExternalBurstConfig { const fn min_psram_alignment(self, direction: TransferDirection) -> usize { - // S2: Specifically, size and buffer address pointer in receive descriptors + // S2 TRM: Specifically, size and buffer address pointer in receive descriptors // should be 16-byte, 32-byte or 64-byte aligned. For data frame whose // length is not a multiple of 16 bytes, 32 bytes, or 64 bytes, EDMA adds // padding bytes to the end. - // S3: Size and Address for IN transfers must be block aligned. For receive + // S3 TRM: Size and Address for IN transfers must be block aligned. For receive // descriptors, if the data length received are not aligned with block size, // GDMA will pad the data received with 0 until they are aligned to // initiate burst transfer. You can read the length field in receive descriptors @@ -128,17 +128,17 @@ impl ExternalBurstSize { if matches!(direction, TransferDirection::In) { self as usize } else { - // S2: Size, length and buffer address pointer in transmit descriptors are not - // necessarily aligned with block size. + // S2 TRM: Size, length and buffer address pointer in transmit descriptors are + // not necessarily aligned with block size. - // S3: Size, length, and buffer address pointer in transmit descriptors do not - // need to be aligned. + // S3 TRM: Size, length, and buffer address pointer in transmit descriptors do + // not need to be aligned. 1 } } } -impl InternalBurstTransfer { +impl InternalBurstConfig { pub(super) fn is_burst_enabled(self) -> bool { !matches!(self, Self::Disabled) } @@ -455,10 +455,6 @@ impl DmaTxBuf { } /// Configures the DMA to use burst transfers to access this buffer. - /// - /// Note that the hardware is allowed to ignore this setting. If you attempt - /// to use burst transfers with improperly aligned buffers, starting the - /// transfer will result in [`DmaError::InvalidAlignment`]. pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> { let len = self.len(); self.configure(burst, len) @@ -534,7 +530,6 @@ unsafe impl DmaTxBuffer for DmaTxBuf { cfg_if::cfg_if! { if #[cfg(psram_dma)] { - // Optimization: avoid locking for PSRAM range. let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize); if is_data_in_psram { unsafe { @@ -614,10 +609,6 @@ impl DmaRxBuf { } /// Configures the DMA to use burst transfers to access this buffer. - /// - /// Note that the hardware is allowed to ignore this setting. If you attempt - /// to use burst transfers with improperly aligned buffers, starting the - /// transfer will result in [`DmaError::InvalidAlignment`]. pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> { let len = self.len(); self.configure(burst, len) @@ -812,10 +803,6 @@ impl DmaRxTxBuf { } /// Configures the DMA to use burst transfers to access this buffer. - /// - /// Note that the hardware is allowed to ignore this setting. If you attempt - /// to use burst transfers with improperly aligned buffers, starting the - /// transfer will result in [`DmaError::InvalidAlignment`]. pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> { let len = self.len(); self.configure(burst, len) diff --git a/esp-hal/src/dma/m2m.rs b/esp-hal/src/dma/m2m.rs index 495d6378843..55356354ebe 100644 --- a/esp-hal/src/dma/m2m.rs +++ b/esp-hal/src/dma/m2m.rs @@ -1,5 +1,3 @@ -#[cfg(esp32s3)] -use crate::dma::DmaExtMemBKSize; use crate::{ dma::{ dma_private::{DmaSupport, DmaSupportRx}, @@ -152,21 +150,6 @@ where .prepare_transfer_without_start(self.peripheral, &self.rx_chain)?; self.channel.rx.set_mem2mem_mode(true); } - #[cfg(esp32s3)] - { - let align = match unsafe { crate::soc::cache_get_dcache_line_size() } { - 16 => DmaExtMemBKSize::Size16, - 32 => DmaExtMemBKSize::Size32, - 64 => DmaExtMemBKSize::Size64, - _ => panic!("unsupported cache line size"), - }; - if crate::soc::is_valid_psram_address(tx_ptr as usize) { - self.channel.tx.set_ext_mem_block_size(align); - } - if crate::soc::is_valid_psram_address(rx_ptr as usize) { - self.channel.rx.set_ext_mem_block_size(align); - } - } self.channel.tx.start_transfer()?; self.channel.rx.start_transfer()?; Ok(DmaTransferRx::new(self)) diff --git a/esp-hal/src/dma/mod.rs b/esp-hal/src/dma/mod.rs index 13b7cbee6f0..36350d97548 100644 --- a/esp-hal/src/dma/mod.rs +++ b/esp-hal/src/dma/mod.rs @@ -363,26 +363,6 @@ impl DmaDescriptor { true => Owner::Dma, } } - - fn iter(&self) -> impl Iterator { - core::iter::successors(Some(self), |d| { - if d.next.is_null() { - None - } else { - Some(unsafe { &*d.next }) - } - }) - } - - fn iter_mut(&mut self) -> impl Iterator { - core::iter::successors(Some(self), |d| { - if d.next.is_null() { - None - } else { - Some(unsafe { &mut *d.next }) - } - }) - } } // The pointers in the descriptor can be Sent. @@ -1131,15 +1111,28 @@ impl<'a> DescriptorSet<'a> { /// Returns an iterator over the linked descriptors. fn linked_iter(&self) -> impl Iterator { - self.descriptors.first().into_iter().flat_map(|d| d.iter()) + let mut was_last = false; + self.descriptors.iter().take_while(move |d| { + if was_last { + false + } else { + was_last = d.next.is_null(); + true + } + }) } /// Returns an iterator over the linked descriptors. fn linked_iter_mut(&mut self) -> impl Iterator { - self.descriptors - .first_mut() - .into_iter() - .flat_map(|d| d.iter_mut()) + let mut was_last = false; + self.descriptors.iter_mut().take_while(move |d| { + if was_last { + false + } else { + was_last = d.next.is_null(); + true + } + }) } /// Associate each descriptor with a chunk of the buffer. @@ -1283,12 +1276,12 @@ pub enum DmaExtMemBKSize { } #[cfg(psram_dma)] -impl From for DmaExtMemBKSize { - fn from(size: ExternalBurstSize) -> Self { +impl From for DmaExtMemBKSize { + fn from(size: ExternalBurstConfig) -> Self { match size { - ExternalBurstSize::Size16 => DmaExtMemBKSize::Size16, - ExternalBurstSize::Size32 => DmaExtMemBKSize::Size32, - ExternalBurstSize::Size64 => DmaExtMemBKSize::Size64, + ExternalBurstConfig::Size16 => DmaExtMemBKSize::Size16, + ExternalBurstConfig::Size32 => DmaExtMemBKSize::Size32, + ExternalBurstConfig::Size64 => DmaExtMemBKSize::Size64, } } } @@ -1759,9 +1752,6 @@ pub trait Rx: crate::private::Sealed { fn stop_transfer(&mut self); - #[cfg(psram_dma)] - fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize); - #[cfg(gdma)] fn set_mem2mem_mode(&mut self, value: bool); @@ -1923,7 +1913,8 @@ where } #[cfg(psram_dma)] - self.set_ext_mem_block_size(preparation.burst_transfer.external.into()); + self.rx_impl + .set_ext_mem_block_size(preparation.burst_transfer.external.into()); self.rx_impl.set_burst_mode(preparation.burst_transfer); self.rx_impl.set_descr_burst_mode(true); self.rx_impl.set_check_owner(preparation.check_owner); @@ -2021,11 +2012,6 @@ where self.rx_impl.stop() } - #[cfg(psram_dma)] - fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize) { - self.rx_impl.set_ext_mem_block_size(size); - } - #[cfg(gdma)] fn set_mem2mem_mode(&mut self, value: bool) { self.rx_impl.set_mem2mem_mode(value); @@ -2094,9 +2080,6 @@ pub trait Tx: crate::private::Sealed { fn stop_transfer(&mut self); - #[cfg(psram_dma)] - fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize); - fn is_done(&self) -> bool { self.pending_out_interrupts() .contains(DmaTxInterrupt::TotalEof) @@ -2218,7 +2201,8 @@ where } #[cfg(psram_dma)] - self.set_ext_mem_block_size(preparation.burst_transfer.external.into()); + self.tx_impl + .set_ext_mem_block_size(preparation.burst_transfer.external.into()); self.tx_impl.set_burst_mode(preparation.burst_transfer); self.tx_impl.set_descr_burst_mode(true); self.tx_impl.set_check_owner(preparation.check_owner); @@ -2323,11 +2307,6 @@ where self.tx_impl.stop() } - #[cfg(psram_dma)] - fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize) { - self.tx_impl.set_ext_mem_block_size(size); - } - fn listen_out(&self, interrupts: impl Into>) { self.tx_impl.listen(interrupts); } diff --git a/examples/src/bin/spi_loopback_dma_psram.rs b/examples/src/bin/spi_loopback_dma_psram.rs index 45026c163d0..18881077113 100644 --- a/examples/src/bin/spi_loopback_dma_psram.rs +++ b/examples/src/bin/spi_loopback_dma_psram.rs @@ -25,7 +25,7 @@ use esp_backtrace as _; use esp_hal::{ delay::Delay, - dma::{BurstConfig, DmaRxBuf, DmaTxBuf, ExternalBurstSize}, + dma::{BurstConfig, DmaRxBuf, DmaTxBuf, ExternalBurstConfig}, peripheral::Peripheral, prelude::*, spi::{ @@ -51,7 +51,7 @@ macro_rules! dma_alloc_buffer { } const DMA_BUFFER_SIZE: usize = 8192; -const DMA_ALIGNMENT: ExternalBurstSize = ExternalBurstSize::Size64; +const DMA_ALIGNMENT: ExternalBurstConfig = ExternalBurstConfig::Size64; const DMA_CHUNK_SIZE: usize = 4096 - DMA_ALIGNMENT as usize; #[entry] diff --git a/hil-test/tests/spi_half_duplex_write_psram.rs b/hil-test/tests/spi_half_duplex_write_psram.rs index 023cbda9b09..6a7a5f1dbf7 100644 --- a/hil-test/tests/spi_half_duplex_write_psram.rs +++ b/hil-test/tests/spi_half_duplex_write_psram.rs @@ -7,7 +7,7 @@ use defmt::error; use esp_alloc as _; use esp_hal::{ - dma::{BurstConfig, DmaRxBuf, DmaTxBuf, ExternalBurstSize, InternalBurstTransfer}, + dma::{BurstConfig, DmaRxBuf, DmaTxBuf, ExternalBurstConfig, InternalBurstConfig}, dma_buffers, dma_descriptors_chunk_size, gpio::interconnect::InputSignal, @@ -83,7 +83,7 @@ mod tests { #[test] fn test_spi_writes_are_correctly_by_pcnt(ctx: Context) { const DMA_BUFFER_SIZE: usize = 4; - const DMA_ALIGNMENT: ExternalBurstSize = ExternalBurstSize::Size32; + const DMA_ALIGNMENT: ExternalBurstConfig = ExternalBurstConfig::Size32; const DMA_CHUNK_SIZE: usize = 4096 - DMA_ALIGNMENT as usize; let (_, descriptors) = dma_descriptors_chunk_size!(0, DMA_BUFFER_SIZE, DMA_CHUNK_SIZE); @@ -92,8 +92,8 @@ mod tests { descriptors, buffer, BurstConfig { - internal: InternalBurstTransfer::default(), - external: ExternalBurstSize::Size32, + internal: InternalBurstConfig::default(), + external: ExternalBurstConfig::Size32, }, ) .unwrap(); @@ -141,7 +141,7 @@ mod tests { #[test] fn test_spidmabus_writes_are_correctly_by_pcnt(ctx: Context) { const DMA_BUFFER_SIZE: usize = 4; - const DMA_ALIGNMENT: ExternalBurstSize = ExternalBurstSize::Size32; // matches dcache line size + const DMA_ALIGNMENT: ExternalBurstConfig = ExternalBurstConfig::Size32; // matches dcache line size const DMA_CHUNK_SIZE: usize = 4096 - DMA_ALIGNMENT as usize; // 64 byte aligned let (_, descriptors) = dma_descriptors_chunk_size!(0, DMA_BUFFER_SIZE, DMA_CHUNK_SIZE); @@ -150,8 +150,8 @@ mod tests { descriptors, buffer, BurstConfig { - internal: InternalBurstTransfer::default(), - external: ExternalBurstSize::Size32, + internal: InternalBurstConfig::default(), + external: ExternalBurstConfig::Size32, }, ) .unwrap(); From b83475aa3f7f21563617292093c3f3bfe902ca04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1niel=20Buga?= Date: Mon, 2 Dec 2024 13:42:44 +0100 Subject: [PATCH 06/13] Check buffer length, too --- esp-hal/src/dma/buffers.rs | 103 ++++++++++++++++++++++++------------- esp-hal/src/dma/mod.rs | 10 ++-- 2 files changed, 71 insertions(+), 42 deletions(-) diff --git a/esp-hal/src/dma/buffers.rs b/esp-hal/src/dma/buffers.rs index caf3dff0db3..9d005bce9db 100644 --- a/esp-hal/src/dma/buffers.rs +++ b/esp-hal/src/dma/buffers.rs @@ -8,6 +8,39 @@ use crate::soc::{is_slice_in_dram, is_slice_in_psram}; #[cfg(psram_dma)] use crate::soc::{is_valid_psram_address, is_valid_ram_address}; +/// Error returned from Dma[Rx|Tx|RxTx]Buf operations. +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum DmaBufError { + /// More descriptors are needed for the buffer size. + InsufficientDescriptors, + + /// Descriptors or buffers are not located in a supported memory region. + UnsupportedMemoryRegion, + + /// Buffer address or size is not properly aligned. + InvalidAlignment(DmaAlignmentError), + + /// Invalid chunk size: must be > 0 and <= 4095. + InvalidChunkSize, +} + +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum DmaAlignmentError { + /// Buffer address is not properly aligned. + Address, + + /// Buffer size is not properly aligned. + Size, +} + +impl From for DmaBufError { + fn from(err: DmaAlignmentError) -> Self { + DmaBufError::InvalidAlignment(err) + } +} + cfg_if::cfg_if! { if #[cfg(psram_dma)] { /// Burst size used when transferring to and from external memory. @@ -64,19 +97,19 @@ cfg_if::cfg_if! { /// Configures the burst size for PSRAM transfers. /// /// Burst mode is always enabled for PSRAM transfers. - pub external: ExternalBurstConfig, + pub external_memory: ExternalBurstConfig, /// Enables or disables the burst mode for internal memory transfers. /// /// The burst size is not configurable. - pub internal: InternalBurstConfig, + pub internal_memory: InternalBurstConfig, } impl BurstConfig { /// The default burst mode configuration. pub const DEFAULT: Self = Self { - external: ExternalBurstConfig::DEFAULT, - internal: InternalBurstConfig::DEFAULT, + external_memory: ExternalBurstConfig::DEFAULT, + internal_memory: InternalBurstConfig::DEFAULT, }; } @@ -175,7 +208,7 @@ const fn max(a: usize, b: usize) -> usize { impl BurstConfig { delegate::delegate! { #[cfg(psram_dma)] - to self.internal { + to self.internal_memory { pub(super) const fn min_dram_alignment(self, direction: TransferDirection) -> usize; pub(super) fn is_burst_enabled(self) -> bool; } @@ -192,7 +225,7 @@ impl BurstConfig { let alignment = max(in_alignment, out_alignment); #[cfg(psram_dma)] - let alignment = max(alignment, self.external as usize); + let alignment = max(alignment, self.external_memory as usize); alignment } @@ -213,7 +246,7 @@ impl BurstConfig { if #[cfg(psram_dma)] { let mut alignment = alignment; if is_valid_psram_address(_buffer.as_ptr() as usize) { - alignment = max(alignment, self.external.min_psram_alignment(direction)); + alignment = max(alignment, self.external_memory.min_psram_alignment(direction)); } } } @@ -227,9 +260,24 @@ impl BurstConfig { 4096 - self.min_alignment(buffer, direction) } - fn is_buffer_aligned(self, buffer: &[u8], direction: TransferDirection) -> bool { + fn ensure_buffer_aligned( + self, + buffer: &[u8], + direction: TransferDirection, + ) -> Result<(), DmaAlignmentError> { let alignment = self.min_alignment(buffer, direction); - buffer.as_ptr() as usize % alignment == 0 + if buffer.as_ptr() as usize % alignment != 0 { + return Err(DmaAlignmentError::Address); + } + + // NB: the TRMs suggest that buffer length don't need to be aligned, but + // for IN transfers, we configure the DMA descriptors' size field, which needs + // to be aligned. + if direction == TransferDirection::In && buffer.len() % alignment != 0 { + return Err(DmaAlignmentError::Size); + } + + Ok(()) } fn ensure_buffer_compatible( @@ -244,9 +292,7 @@ impl BurstConfig { return Err(DmaBufError::UnsupportedMemoryRegion); } - if !self.is_buffer_aligned(buffer, direction) { - return Err(DmaBufError::InvalidAlignment); - } + self.ensure_buffer_aligned(buffer, direction)?; Ok(()) } @@ -373,20 +419,6 @@ pub unsafe trait DmaRxBuffer { /// descriptors/buffers. pub struct BufView(T); -/// Error returned from Dma[Rx|Tx|RxTx]Buf operations. -#[derive(Debug, PartialEq, Clone, Copy)] -#[cfg_attr(feature = "defmt", derive(defmt::Format))] -pub enum DmaBufError { - /// More descriptors are needed for the buffer size - InsufficientDescriptors, - /// Descriptors or buffers are not located in a supported memory region - UnsupportedMemoryRegion, - /// Buffer is not aligned to the required size - InvalidAlignment, - /// Invalid chunk size: must be > 0 and <= 4095 - InvalidChunkSize, -} - /// DMA transmit buffer /// /// This is a contiguous buffer linked together by DMA descriptors of length @@ -442,13 +474,12 @@ impl DmaTxBuf { } fn configure(&mut self, burst: BurstConfig, length: usize) -> Result<(), DmaBufError> { - burst.ensure_buffer_compatible(self.buffer, TransferDirection::Out)?; + self.set_length_fallible(length, burst)?; self.descriptors.link_with_buffer( self.buffer, burst.max_chunk_size_for(self.buffer, TransferDirection::Out), )?; - self.set_length_fallible(length, burst)?; self.burst = burst; Ok(()) @@ -480,7 +511,7 @@ impl DmaTxBuf { } fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> { - assert!(len <= self.buffer.len()); + burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::Out)?; self.descriptors.set_tx_length( len, @@ -596,13 +627,12 @@ impl DmaRxBuf { } fn configure(&mut self, burst: BurstConfig, length: usize) -> Result<(), DmaBufError> { - burst.ensure_buffer_compatible(self.buffer, TransferDirection::In)?; + self.set_length_fallible(length, burst)?; self.descriptors.link_with_buffer( self.buffer, burst.max_chunk_size_for(self.buffer, TransferDirection::In), )?; - self.set_length_fallible(length, burst)?; self.burst = burst; Ok(()) @@ -635,11 +665,11 @@ impl DmaRxBuf { } fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> { - assert!(len <= self.buffer.len()); + burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::In)?; self.descriptors.set_rx_length( len, - burst.max_chunk_size_for(self.buffer, TransferDirection::In), + burst.max_chunk_size_for(&self.buffer[..len], TransferDirection::In), ) } @@ -784,8 +814,7 @@ impl DmaRxTxBuf { } fn configure(&mut self, burst: BurstConfig, length: usize) -> Result<(), DmaBufError> { - burst.ensure_buffer_compatible(self.buffer, TransferDirection::In)?; - burst.ensure_buffer_compatible(self.buffer, TransferDirection::Out)?; + self.set_length_fallible(length, burst)?; self.rx_descriptors.link_with_buffer( self.buffer, @@ -796,7 +825,6 @@ impl DmaRxTxBuf { burst.max_chunk_size_for(self.buffer, TransferDirection::Out), )?; - self.set_length_fallible(length, burst)?; self.burst = burst; Ok(()) @@ -849,7 +877,8 @@ impl DmaRxTxBuf { } fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> { - assert!(len <= self.buffer.len()); + burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::In)?; + burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::Out)?; self.rx_descriptors.set_rx_length( len, diff --git a/esp-hal/src/dma/mod.rs b/esp-hal/src/dma/mod.rs index 36350d97548..318d8e97878 100644 --- a/esp-hal/src/dma/mod.rs +++ b/esp-hal/src/dma/mod.rs @@ -797,11 +797,11 @@ macro_rules! dma_loop_buffer { } /// DMA Errors -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] #[cfg_attr(feature = "defmt", derive(defmt::Format))] pub enum DmaError { /// The alignment of data is invalid - InvalidAlignment, + InvalidAlignment(DmaAlignmentError), /// More descriptors are needed for the buffer size OutOfDescriptors, /// DescriptorError the DMA rejected the descriptor configuration. This @@ -827,7 +827,7 @@ impl From for DmaError { match error { DmaBufError::InsufficientDescriptors => DmaError::OutOfDescriptors, DmaBufError::UnsupportedMemoryRegion => DmaError::UnsupportedMemoryRegion, - DmaBufError::InvalidAlignment => DmaError::InvalidAlignment, + DmaBufError::InvalidAlignment(err) => DmaError::InvalidAlignment(err), DmaBufError::InvalidChunkSize => DmaError::InvalidChunkSize, } } @@ -1914,7 +1914,7 @@ where #[cfg(psram_dma)] self.rx_impl - .set_ext_mem_block_size(preparation.burst_transfer.external.into()); + .set_ext_mem_block_size(preparation.burst_transfer.external_memory.into()); self.rx_impl.set_burst_mode(preparation.burst_transfer); self.rx_impl.set_descr_burst_mode(true); self.rx_impl.set_check_owner(preparation.check_owner); @@ -2202,7 +2202,7 @@ where #[cfg(psram_dma)] self.tx_impl - .set_ext_mem_block_size(preparation.burst_transfer.external.into()); + .set_ext_mem_block_size(preparation.burst_transfer.external_memory.into()); self.tx_impl.set_burst_mode(preparation.burst_transfer); self.tx_impl.set_descr_burst_mode(true); self.tx_impl.set_check_owner(preparation.check_owner); From 5c033d7d19e986d0c99314a8c165ff3ce781177b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1niel=20Buga?= Date: Mon, 2 Dec 2024 13:44:12 +0100 Subject: [PATCH 07/13] Return error instead of panic --- esp-hal/src/dma/buffers.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/esp-hal/src/dma/buffers.rs b/esp-hal/src/dma/buffers.rs index 9d005bce9db..a3955922fb3 100644 --- a/esp-hal/src/dma/buffers.rs +++ b/esp-hal/src/dma/buffers.rs @@ -12,6 +12,9 @@ use crate::soc::{is_valid_psram_address, is_valid_ram_address}; #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] #[cfg_attr(feature = "defmt", derive(defmt::Format))] pub enum DmaBufError { + /// The buffer is smaller than the requested size. + BufferTooSmall, + /// More descriptors are needed for the buffer size. InsufficientDescriptors, @@ -511,6 +514,9 @@ impl DmaTxBuf { } fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> { + if len > self.capacity() { + return Err(DmaBufError::BufferTooSmall); + } burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::Out)?; self.descriptors.set_tx_length( @@ -665,6 +671,9 @@ impl DmaRxBuf { } fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> { + if len > self.capacity() { + return Err(DmaBufError::BufferTooSmall); + } burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::In)?; self.descriptors.set_rx_length( @@ -877,6 +886,9 @@ impl DmaRxTxBuf { } fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> { + if len > self.capacity() { + return Err(DmaBufError::BufferTooSmall); + } burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::In)?; burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::Out)?; From e3c4f840b1ab38df72d90c1b51eb642a8e73abb0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1niel=20Buga?= Date: Mon, 2 Dec 2024 13:58:02 +0100 Subject: [PATCH 08/13] Allow users to only specify the relevant burst config --- esp-hal/src/dma/buffers.rs | 43 ++++++++++++++++--- examples/src/bin/spi_loopback_dma_psram.rs | 13 ++---- hil-test/tests/spi_half_duplex_write_psram.rs | 22 ++-------- 3 files changed, 44 insertions(+), 34 deletions(-) diff --git a/esp-hal/src/dma/buffers.rs b/esp-hal/src/dma/buffers.rs index a3955922fb3..dec9a4e4ff3 100644 --- a/esp-hal/src/dma/buffers.rs +++ b/esp-hal/src/dma/buffers.rs @@ -121,6 +121,24 @@ cfg_if::cfg_if! { Self::DEFAULT } } + + impl From for BurstConfig { + fn from(internal_memory: InternalBurstConfig) -> Self { + Self { + external_memory: ExternalBurstConfig::DEFAULT, + internal_memory, + } + } + } + + impl From for BurstConfig { + fn from(external_memory: ExternalBurstConfig) -> Self { + Self { + external_memory, + internal_memory: InternalBurstConfig::DEFAULT, + } + } + } } else { /// Burst transfer configuration. #[derive(Clone, Copy, PartialEq, Eq, Debug)] @@ -462,12 +480,12 @@ impl DmaTxBuf { pub fn new_with_config( descriptors: &'static mut [DmaDescriptor], buffer: &'static mut [u8], - config: BurstConfig, + config: impl Into, ) -> Result { let mut buf = Self { descriptors: DescriptorSet::new(descriptors)?, buffer, - burst: config, + burst: BurstConfig::default(), }; let capacity = buf.capacity(); @@ -476,7 +494,12 @@ impl DmaTxBuf { Ok(buf) } - fn configure(&mut self, burst: BurstConfig, length: usize) -> Result<(), DmaBufError> { + fn configure( + &mut self, + burst: impl Into, + length: usize, + ) -> Result<(), DmaBufError> { + let burst = burst.into(); self.set_length_fallible(length, burst)?; self.descriptors.link_with_buffer( @@ -632,7 +655,12 @@ impl DmaRxBuf { Ok(buf) } - fn configure(&mut self, burst: BurstConfig, length: usize) -> Result<(), DmaBufError> { + fn configure( + &mut self, + burst: impl Into, + length: usize, + ) -> Result<(), DmaBufError> { + let burst = burst.into(); self.set_length_fallible(length, burst)?; self.descriptors.link_with_buffer( @@ -822,7 +850,12 @@ impl DmaRxTxBuf { Ok(buf) } - fn configure(&mut self, burst: BurstConfig, length: usize) -> Result<(), DmaBufError> { + fn configure( + &mut self, + burst: impl Into, + length: usize, + ) -> Result<(), DmaBufError> { + let burst = burst.into(); self.set_length_fallible(length, burst)?; self.rx_descriptors.link_with_buffer( diff --git a/examples/src/bin/spi_loopback_dma_psram.rs b/examples/src/bin/spi_loopback_dma_psram.rs index 18881077113..faceedb7342 100644 --- a/examples/src/bin/spi_loopback_dma_psram.rs +++ b/examples/src/bin/spi_loopback_dma_psram.rs @@ -25,7 +25,7 @@ use esp_backtrace as _; use esp_hal::{ delay::Delay, - dma::{BurstConfig, DmaRxBuf, DmaTxBuf, ExternalBurstConfig}, + dma::{DmaRxBuf, DmaTxBuf, ExternalBurstConfig}, peripheral::Peripheral, prelude::*, spi::{ @@ -76,15 +76,8 @@ fn main() -> ! { tx_buffer.len(), tx_descriptors.len() ); - let mut dma_tx_buf = DmaTxBuf::new_with_config( - tx_descriptors, - tx_buffer, - BurstConfig { - external: DMA_ALIGNMENT, - ..BurstConfig::default() - }, - ) - .unwrap(); + let mut dma_tx_buf = + DmaTxBuf::new_with_config(tx_descriptors, tx_buffer, DMA_ALIGNMENT).unwrap(); let (rx_buffer, rx_descriptors, _, _) = esp_hal::dma_buffers!(DMA_BUFFER_SIZE, 0); info!( "RX: {:p} len {} ({} descripters)", diff --git a/hil-test/tests/spi_half_duplex_write_psram.rs b/hil-test/tests/spi_half_duplex_write_psram.rs index 6a7a5f1dbf7..17358b0505b 100644 --- a/hil-test/tests/spi_half_duplex_write_psram.rs +++ b/hil-test/tests/spi_half_duplex_write_psram.rs @@ -7,7 +7,7 @@ use defmt::error; use esp_alloc as _; use esp_hal::{ - dma::{BurstConfig, DmaRxBuf, DmaTxBuf, ExternalBurstConfig, InternalBurstConfig}, + dma::{DmaRxBuf, DmaTxBuf, ExternalBurstConfig}, dma_buffers, dma_descriptors_chunk_size, gpio::interconnect::InputSignal, @@ -88,15 +88,7 @@ mod tests { let (_, descriptors) = dma_descriptors_chunk_size!(0, DMA_BUFFER_SIZE, DMA_CHUNK_SIZE); let buffer = dma_alloc_buffer!(DMA_BUFFER_SIZE, DMA_ALIGNMENT as usize); - let mut dma_tx_buf = DmaTxBuf::new_with_config( - descriptors, - buffer, - BurstConfig { - internal: InternalBurstConfig::default(), - external: ExternalBurstConfig::Size32, - }, - ) - .unwrap(); + let mut dma_tx_buf = DmaTxBuf::new_with_config(descriptors, buffer, DMA_ALIGNMENT).unwrap(); let unit = ctx.pcnt_unit; let mut spi = ctx.spi; @@ -146,15 +138,7 @@ mod tests { let (_, descriptors) = dma_descriptors_chunk_size!(0, DMA_BUFFER_SIZE, DMA_CHUNK_SIZE); let buffer = dma_alloc_buffer!(DMA_BUFFER_SIZE, DMA_ALIGNMENT as usize); - let dma_tx_buf = DmaTxBuf::new_with_config( - descriptors, - buffer, - BurstConfig { - internal: InternalBurstConfig::default(), - external: ExternalBurstConfig::Size32, - }, - ) - .unwrap(); + let dma_tx_buf = DmaTxBuf::new_with_config(descriptors, buffer, DMA_ALIGNMENT).unwrap(); let (rx, rxd, _, _) = dma_buffers!(1, 0); let dma_rx_buf = DmaRxBuf::new(rxd, rx).unwrap(); From 5eb5f07543d3e2e8b6fc2cd2906abd55539ede7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1niel=20Buga?= Date: Mon, 2 Dec 2024 14:47:48 +0100 Subject: [PATCH 09/13] Add missing conversion --- esp-hal/src/dma/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/esp-hal/src/dma/mod.rs b/esp-hal/src/dma/mod.rs index 318d8e97878..d454fa8f87b 100644 --- a/esp-hal/src/dma/mod.rs +++ b/esp-hal/src/dma/mod.rs @@ -829,6 +829,7 @@ impl From for DmaError { DmaBufError::UnsupportedMemoryRegion => DmaError::UnsupportedMemoryRegion, DmaBufError::InvalidAlignment(err) => DmaError::InvalidAlignment(err), DmaBufError::InvalidChunkSize => DmaError::InvalidChunkSize, + DmaBufError::BufferTooSmall => DmaError::BufferTooSmall, } } } From b6e044d197198dc676945d9075a8dd8d3ec5f43d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1niel=20Buga?= Date: Mon, 2 Dec 2024 14:52:55 +0100 Subject: [PATCH 10/13] Add missing docs --- esp-hal/src/dma/buffers.rs | 1 + esp-hal/src/dma/mod.rs | 14 ++++++++++---- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/esp-hal/src/dma/buffers.rs b/esp-hal/src/dma/buffers.rs index dec9a4e4ff3..a42eaaf2baf 100644 --- a/esp-hal/src/dma/buffers.rs +++ b/esp-hal/src/dma/buffers.rs @@ -28,6 +28,7 @@ pub enum DmaBufError { InvalidChunkSize, } +/// DMA buffer alignment errors. #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] #[cfg_attr(feature = "defmt", derive(defmt::Format))] pub enum DmaAlignmentError { diff --git a/esp-hal/src/dma/mod.rs b/esp-hal/src/dma/mod.rs index d454fa8f87b..2862bf53b4a 100644 --- a/esp-hal/src/dma/mod.rs +++ b/esp-hal/src/dma/mod.rs @@ -1966,8 +1966,11 @@ where if crate::soc::addr_in_range(des.buffer as usize, psram_range.clone()) { uses_psram = true; // both the size and address of the buffer must be aligned - if des.buffer as usize % alignment != 0 && des.size() % alignment != 0 { - return Err(DmaError::InvalidAlignment); + if des.buffer as usize % alignment != 0 { + return Err(DmaError::InvalidAlignment(DmaAlignmentError::Address)); + } + if des.size() % alignment != 0 { + return Err(DmaError::InvalidAlignment(DmaAlignmentError::Size)); } crate::soc::cache_invalidate_addr(des.buffer as u32, des.size() as u32); } @@ -2255,8 +2258,11 @@ where if crate::soc::addr_in_range(des.buffer as usize, psram_range.clone()) { uses_psram = true; // both the size and address of the buffer must be aligned - if des.buffer as usize % alignment != 0 && des.size() % alignment != 0 { - return Err(DmaError::InvalidAlignment); + if des.buffer as usize % alignment != 0 { + return Err(DmaError::InvalidAlignment(DmaAlignmentError::Address)); + } + if des.size() % alignment != 0 { + return Err(DmaError::InvalidAlignment(DmaAlignmentError::Size)); } crate::soc::cache_writeback_addr(des.buffer as u32, des.size() as u32); } From f595fc272debaf94df14d1b5b17c3b0634780bdd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1niel=20Buga?= Date: Mon, 2 Dec 2024 15:35:26 +0100 Subject: [PATCH 11/13] Fix IN alignment requirements --- esp-hal/CHANGELOG.md | 1 - esp-hal/src/dma/buffers.rs | 20 ++++++++-- hil-test/tests/spi_full_duplex.rs | 66 ++++++++++++++++++------------- 3 files changed, 56 insertions(+), 31 deletions(-) diff --git a/esp-hal/CHANGELOG.md b/esp-hal/CHANGELOG.md index 2972a3c4b3a..ff4cf0d20fa 100644 --- a/esp-hal/CHANGELOG.md +++ b/esp-hal/CHANGELOG.md @@ -77,7 +77,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - The `Dma` structure has been removed. (#2545) - Removed `embedded-hal 0.2.x` impls and deps from `esp-hal` (#2593) - Removed `Camera::set_` functions (#2610) -- Remove `embedded-hal 0.2.x` impls and deps from `esp-hal` (#2593) - `DmaTxBuf::{compute_chunk_size, compute_descriptor_count, new_with_block_size}` (#2543) ## [0.22.0] - 2024-11-20 diff --git a/esp-hal/src/dma/buffers.rs b/esp-hal/src/dma/buffers.rs index a42eaaf2baf..54412b9451d 100644 --- a/esp-hal/src/dma/buffers.rs +++ b/esp-hal/src/dma/buffers.rs @@ -194,14 +194,28 @@ impl ExternalBurstConfig { } impl InternalBurstConfig { - pub(super) fn is_burst_enabled(self) -> bool { + pub(super) const fn is_burst_enabled(self) -> bool { !matches!(self, Self::Disabled) } + // Size and address alignment as those come in pairs on current hardware. const fn min_dram_alignment(self, direction: TransferDirection) -> usize { - // IN transfers must be word aligned if matches!(direction, TransferDirection::In) { - 4 + // NOTE(danielb): commenting this check is incorrect as per TRM, but works. + // we'll need to restore this once peripherals can read a + // different amount of data than what is configured in the + // buffer. + // if cfg!(esp32) { + // // NOTE: The size must be word-aligned. + // // NOTE: The buffer address must be word-aligned + // 4 + // } + if self.is_burst_enabled() { + // As described in "Accessing Internal Memory" paragraphs in the various TRMs. + 4 + } else { + 1 + } } else { // OUT transfers have no alignment requirements, except for ESP32, which is // described below. diff --git a/hil-test/tests/spi_full_duplex.rs b/hil-test/tests/spi_full_duplex.rs index 58ad1e46213..30a3323e1b6 100644 --- a/hil-test/tests/spi_full_duplex.rs +++ b/hil-test/tests/spi_full_duplex.rs @@ -184,7 +184,8 @@ mod tests { #[test] #[cfg(pcnt)] fn test_dma_read_dma_write_pcnt(ctx: Context) { - const DMA_BUFFER_SIZE: usize = 5; + const DMA_BUFFER_SIZE: usize = 8; + const TRANSFER_SIZE: usize = 5; let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); @@ -200,27 +201,28 @@ mod tests { dma_tx_buf.as_mut_slice().fill(0b0110_1010); for i in 1..4 { - dma_rx_buf.as_mut_slice().copy_from_slice(&[5, 5, 5, 5, 5]); + dma_rx_buf.as_mut_slice()[..TRANSFER_SIZE].copy_from_slice(&[5; TRANSFER_SIZE]); let transfer = spi - .read(dma_rx_buf.len(), dma_rx_buf) + .read(TRANSFER_SIZE, dma_rx_buf) .map_err(|e| e.0) .unwrap(); (spi, dma_rx_buf) = transfer.wait(); - assert_eq!(dma_rx_buf.as_slice(), &[0, 0, 0, 0, 0]); + assert_eq!(&dma_rx_buf.as_slice()[..TRANSFER_SIZE], &[0; TRANSFER_SIZE]); let transfer = spi - .write(dma_tx_buf.len(), dma_tx_buf) + .write(TRANSFER_SIZE, dma_tx_buf) .map_err(|e| e.0) .unwrap(); (spi, dma_tx_buf) = transfer.wait(); - assert_eq!(unit.value(), (i * 3 * DMA_BUFFER_SIZE) as _); + assert_eq!(unit.value(), (i * 3 * TRANSFER_SIZE) as _); } } #[test] #[cfg(pcnt)] fn test_dma_read_dma_transfer_pcnt(ctx: Context) { - const DMA_BUFFER_SIZE: usize = 5; + const DMA_BUFFER_SIZE: usize = 8; + const TRANSFER_SIZE: usize = 5; let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); @@ -236,20 +238,20 @@ mod tests { dma_tx_buf.as_mut_slice().fill(0b0110_1010); for i in 1..4 { - dma_rx_buf.as_mut_slice().copy_from_slice(&[5, 5, 5, 5, 5]); + dma_rx_buf.as_mut_slice()[..TRANSFER_SIZE].copy_from_slice(&[5; TRANSFER_SIZE]); let transfer = spi - .read(dma_rx_buf.len(), dma_rx_buf) + .read(TRANSFER_SIZE, dma_rx_buf) .map_err(|e| e.0) .unwrap(); (spi, dma_rx_buf) = transfer.wait(); - assert_eq!(dma_rx_buf.as_slice(), &[0, 0, 0, 0, 0]); + assert_eq!(&dma_rx_buf.as_slice()[..TRANSFER_SIZE], &[0; TRANSFER_SIZE]); let transfer = spi - .transfer(dma_rx_buf.len(), dma_rx_buf, dma_tx_buf.len(), dma_tx_buf) + .transfer(TRANSFER_SIZE, dma_rx_buf, TRANSFER_SIZE, dma_tx_buf) .map_err(|e| e.0) .unwrap(); (spi, (dma_rx_buf, dma_tx_buf)) = transfer.wait(); - assert_eq!(unit.value(), (i * 3 * DMA_BUFFER_SIZE) as _); + assert_eq!(unit.value(), (i * 3 * TRANSFER_SIZE) as _); } } @@ -285,7 +287,9 @@ mod tests { #[test] fn test_asymmetric_dma_transfer(ctx: Context) { - let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(2, 4); + const WRITE_SIZE: usize = 4; + const READ_SIZE: usize = 2; + let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(4, 4); let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); @@ -293,22 +297,28 @@ mod tests { let spi = ctx.spi.with_dma(ctx.dma_channel); let transfer = spi - .transfer(dma_rx_buf.len(), dma_rx_buf, dma_tx_buf.len(), dma_tx_buf) + .transfer(READ_SIZE, dma_rx_buf, WRITE_SIZE, dma_tx_buf) .map_err(|e| e.0) .unwrap(); let (spi, (dma_rx_buf, mut dma_tx_buf)) = transfer.wait(); - assert_eq!(dma_tx_buf.as_slice()[0..2], dma_rx_buf.as_slice()[0..2]); + assert_eq!( + dma_tx_buf.as_slice()[0..READ_SIZE], + dma_rx_buf.as_slice()[0..READ_SIZE] + ); // Try transfer again to make sure DMA isn't in a broken state. dma_tx_buf.fill(&[0xaa, 0xdd, 0xef, 0xbe]); let transfer = spi - .transfer(dma_rx_buf.len(), dma_rx_buf, dma_tx_buf.len(), dma_tx_buf) + .transfer(READ_SIZE, dma_rx_buf, WRITE_SIZE, dma_tx_buf) .map_err(|e| e.0) .unwrap(); let (_, (dma_rx_buf, dma_tx_buf)) = transfer.wait(); - assert_eq!(dma_tx_buf.as_slice()[0..2], dma_rx_buf.as_slice()[0..2]); + assert_eq!( + dma_tx_buf.as_slice()[0..READ_SIZE], + dma_rx_buf.as_slice()[0..READ_SIZE] + ); } #[test] @@ -373,7 +383,8 @@ mod tests { #[test] #[cfg(pcnt)] async fn test_async_dma_read_dma_write_pcnt(ctx: Context) { - const DMA_BUFFER_SIZE: usize = 5; + const DMA_BUFFER_SIZE: usize = 8; + const TRANSFER_SIZE: usize = 5; let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); @@ -388,25 +399,26 @@ mod tests { .channel0 .set_input_mode(EdgeMode::Hold, EdgeMode::Increment); - let mut receive = [0; DMA_BUFFER_SIZE]; + let mut receive = [0; TRANSFER_SIZE]; // Fill the buffer where each byte has 3 pos edges. - let transmit = [0b0110_1010; DMA_BUFFER_SIZE]; + let transmit = [0b0110_1010; TRANSFER_SIZE]; for i in 1..4 { - receive.copy_from_slice(&[5, 5, 5, 5, 5]); + receive.copy_from_slice(&[5; TRANSFER_SIZE]); SpiBusAsync::read(&mut spi, &mut receive).await.unwrap(); - assert_eq!(receive, [0, 0, 0, 0, 0]); + assert_eq!(receive, [0; TRANSFER_SIZE]); SpiBusAsync::write(&mut spi, &transmit).await.unwrap(); - assert_eq!(ctx.pcnt_unit.value(), (i * 3 * DMA_BUFFER_SIZE) as _); + assert_eq!(ctx.pcnt_unit.value(), (i * 3 * TRANSFER_SIZE) as _); } } #[test] #[cfg(pcnt)] async fn test_async_dma_read_dma_transfer_pcnt(ctx: Context) { - const DMA_BUFFER_SIZE: usize = 5; + const DMA_BUFFER_SIZE: usize = 8; + const TRANSFER_SIZE: usize = 5; let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); @@ -421,10 +433,10 @@ mod tests { .channel0 .set_input_mode(EdgeMode::Hold, EdgeMode::Increment); - let mut receive = [0; DMA_BUFFER_SIZE]; + let mut receive = [0; TRANSFER_SIZE]; // Fill the buffer where each byte has 3 pos edges. - let transmit = [0b0110_1010; DMA_BUFFER_SIZE]; + let transmit = [0b0110_1010; TRANSFER_SIZE]; for i in 1..4 { receive.copy_from_slice(&[5, 5, 5, 5, 5]); @@ -434,7 +446,7 @@ mod tests { SpiBusAsync::transfer(&mut spi, &mut receive, &transmit) .await .unwrap(); - assert_eq!(ctx.pcnt_unit.value(), (i * 3 * DMA_BUFFER_SIZE) as _); + assert_eq!(ctx.pcnt_unit.value(), (i * 3 * TRANSFER_SIZE) as _); } } From 8b67e1e9c8baf1bc8f553fe2bbcd0ca026475dc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1niel=20Buga?= Date: Tue, 3 Dec 2024 16:09:48 +0100 Subject: [PATCH 12/13] Fix test --- esp-hal/src/dma/buffers.rs | 2 ++ esp-hal/src/dma/mod.rs | 6 ++++++ hil-test/tests/spi_full_duplex.rs | 6 ++++++ 3 files changed, 14 insertions(+) diff --git a/esp-hal/src/dma/buffers.rs b/esp-hal/src/dma/buffers.rs index 54412b9451d..b96dc02af8b 100644 --- a/esp-hal/src/dma/buffers.rs +++ b/esp-hal/src/dma/buffers.rs @@ -345,6 +345,8 @@ pub enum TransferDirection { } /// Holds all the information needed to configure a DMA channel for a transfer. +#[derive(PartialEq, Eq, Debug)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] pub struct Preparation { /// The descriptor the DMA will start from. pub start: *mut DmaDescriptor, diff --git a/esp-hal/src/dma/mod.rs b/esp-hal/src/dma/mod.rs index 2862bf53b4a..e1421153854 100644 --- a/esp-hal/src/dma/mod.rs +++ b/esp-hal/src/dma/mod.rs @@ -1908,6 +1908,9 @@ where ) -> Result<(), DmaError> { debug_assert_eq!(preparation.direction, TransferDirection::In); + debug!("Preparing RX transfer {:?}", preparation); + trace!("First descriptor {:?}", unsafe { &*preparation.start }); + #[cfg(psram_dma)] if preparation.accesses_psram && !self.rx_impl.can_access_psram() { return Err(DmaError::UnsupportedMemoryRegion); @@ -2199,6 +2202,9 @@ where ) -> Result<(), DmaError> { debug_assert_eq!(preparation.direction, TransferDirection::Out); + debug!("Preparing TX transfer {:?}", preparation); + trace!("First descriptor {:?}", unsafe { &*preparation.start }); + #[cfg(psram_dma)] if preparation.accesses_psram && !self.tx_impl.can_access_psram() { return Err(DmaError::UnsupportedMemoryRegion); diff --git a/hil-test/tests/spi_full_duplex.rs b/hil-test/tests/spi_full_duplex.rs index 30a3323e1b6..9c587ef6fa4 100644 --- a/hil-test/tests/spi_full_duplex.rs +++ b/hil-test/tests/spi_full_duplex.rs @@ -197,6 +197,9 @@ mod tests { unit.channel0 .set_input_mode(EdgeMode::Hold, EdgeMode::Increment); + dma_rx_buf.set_length(TRANSFER_SIZE); + dma_tx_buf.set_length(TRANSFER_SIZE); + // Fill the buffer where each byte has 3 pos edges. dma_tx_buf.as_mut_slice().fill(0b0110_1010); @@ -234,6 +237,9 @@ mod tests { unit.channel0 .set_input_mode(EdgeMode::Hold, EdgeMode::Increment); + dma_rx_buf.set_length(TRANSFER_SIZE); + dma_tx_buf.set_length(TRANSFER_SIZE); + // Fill the buffer where each byte has 3 pos edges. dma_tx_buf.as_mut_slice().fill(0b0110_1010); From 62f9d33b4ab8da7604af55747c4919d847b95cbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1niel=20Buga?= Date: Tue, 3 Dec 2024 19:07:06 +0100 Subject: [PATCH 13/13] Deduplicate chunk size literal --- esp-hal/src/dma/buffers.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/esp-hal/src/dma/buffers.rs b/esp-hal/src/dma/buffers.rs index b96dc02af8b..d28a3ecd2cf 100644 --- a/esp-hal/src/dma/buffers.rs +++ b/esp-hal/src/dma/buffers.rs @@ -266,13 +266,20 @@ impl BurstConfig { alignment } + const fn chunk_size_for_alignment(alignment: usize) -> usize { + // DMA descriptors have a 12-bit field for the size/length of the buffer they + // point at. As there is no such thing as 0-byte alignment, this means the + // maximum size is 4095 bytes. + 4096 - alignment + } + /// Calculates a chunk size that is compatible with the current burst /// configuration's alignment requirements. /// /// This is an over-estimation so that Descriptors can be safely used with /// any DMA channel in any direction. pub const fn max_compatible_chunk_size(self) -> usize { - 4096 - self.min_compatible_alignment() + Self::chunk_size_for_alignment(self.min_compatible_alignment()) } fn min_alignment(self, _buffer: &[u8], direction: TransferDirection) -> usize { @@ -293,7 +300,7 @@ impl BurstConfig { // Note: this function ignores address alignment as we assume the buffers are // aligned. fn max_chunk_size_for(self, buffer: &[u8], direction: TransferDirection) -> usize { - 4096 - self.min_alignment(buffer, direction) + Self::chunk_size_for_alignment(self.min_alignment(buffer, direction)) } fn ensure_buffer_aligned(