Skip to content

Commit

Permalink
Reuse buffer preparation code
Browse files Browse the repository at this point in the history
  • Loading branch information
bugadani committed Nov 21, 2024
1 parent a17b06f commit 44d8969
Show file tree
Hide file tree
Showing 2 changed files with 93 additions and 64 deletions.
18 changes: 13 additions & 5 deletions esp-hal/src/dma/buffers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,15 @@ pub struct Preparation {

/// Block size for PSRAM transfers.
///
/// The implementation of the buffer must provide a block size if the data
/// is in PSRAM.
/// If the buffer is in PSRAM, the implementation must ensure the following:
///
/// - The implementation of the buffer must provide a non-`None` block size.
/// - For [`TransferDirection::In`] transfers, the implementation of the
/// buffer must invalidate the cache that contains the buffer before the
/// DMA starts.
/// - For [`TransferDirection::Out`] transfers, the implementation of the
/// buffer must write back the cache that contains the buffer before the
/// DMA starts.
#[cfg(esp32s3)]
pub external_memory_block_size: Option<DmaBufBlkSize>,

Expand All @@ -55,9 +62,10 @@ pub struct Preparation {
/// The implementation of the buffer must ensure that burst mode is only
/// enabled when alignment requirements are met.
///
/// There are no additional alignment requirements for TX burst transfers,
/// but RX transfers require all descriptors to have buffer pointers and
/// sizes that are a multiple of 4 (word aligned).
/// There are no additional alignment requirements for
/// [`TransferDirection::Out`] burst transfers, but
/// [`TransferDirection::In`] transfers require all descriptors to have
/// buffer pointers and sizes that are a multiple of 4 (word aligned).
pub burst_transfer: BurstTransfer,

/// Configures the "check owner" feature of the DMA channel.
Expand Down
139 changes: 80 additions & 59 deletions esp-hal/src/dma/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1780,6 +1780,27 @@ where
pub fn set_priority(&mut self, priority: DmaPriority) {
self.rx_impl.set_priority(priority);
}

fn do_prepare(
&mut self,
preparation: Preparation,
peri: DmaPeripheral,
) -> Result<(), DmaError> {
debug_assert_eq!(preparation.direction, TransferDirection::In);

self.rx_impl.set_burst_mode(preparation.burst_transfer);
self.rx_impl.set_descr_burst_mode(true);
self.rx_impl.set_check_owner(preparation.check_owner);

compiler_fence(core::sync::atomic::Ordering::SeqCst);

self.rx_impl.clear_all();
self.rx_impl.reset();
self.rx_impl.set_link_addr(preparation.start as u32);
self.rx_impl.set_peripheral(peri as u8);

Ok(())
}
}

impl<M, CH> crate::private::Sealed for ChannelRx<'_, M, CH>
Expand All @@ -1794,24 +1815,18 @@ where
M: Mode,
CH: DmaChannel,
{
// TODO: used by I2S, which should be rewritten to use the Preparation-based
// API.
unsafe fn prepare_transfer_without_start(
&mut self,
peri: DmaPeripheral,
chain: &DescriptorChain,
) -> Result<(), DmaError> {
// if self.burst_mode
// && chain
// .descriptors
// .iter()
// .any(|d| d.len() % 4 != 0 || d.buffer as u32 % 4 != 0)
//{
// return Err(DmaError::InvalidAlignment);
//}

// for esp32s3 we check each descriptor buffer that points to psram for
// alignment and invalidate the cache for that buffer
// For ESP32-S3 we check each descriptor buffer that points to PSRAM for
// alignment and invalidate the cache for that buffer.
// NOTE: for RX the `buffer` and `size` need to be aligned but the `len` does
// not. TRM section 3.4.9
// Note that DmaBuffer implementations are required to do this for us.
#[cfg(esp32s3)]
for des in chain.descriptors.iter() {
// we are forcing the DMA alignment to the cache line size
Expand All @@ -1826,14 +1841,17 @@ where
}
}

compiler_fence(core::sync::atomic::Ordering::SeqCst);

self.rx_impl.clear_all();
self.rx_impl.reset();
self.rx_impl.set_link_addr(chain.first() as u32);
self.rx_impl.set_peripheral(peri as u8);

Ok(())
self.do_prepare(
Preparation {
start: chain.first().cast_mut(),
#[cfg(esp32s3)]
external_memory_block_size: None,
direction: TransferDirection::In,
burst_transfer: BurstTransfer::Disabled,
check_owner: Some(false),
},
peri,
)
}

unsafe fn prepare_transfer<BUF: DmaRxBuffer>(
Expand All @@ -1843,20 +1861,7 @@ where
) -> Result<(), DmaError> {
let preparation = buffer.prepare();

debug_assert_eq!(preparation.direction, TransferDirection::In);

self.rx_impl.set_burst_mode(preparation.burst_transfer);
self.rx_impl.set_descr_burst_mode(true);
self.rx_impl.set_check_owner(preparation.check_owner);

compiler_fence(core::sync::atomic::Ordering::SeqCst);

self.rx_impl.clear_all();
self.rx_impl.reset();
self.rx_impl.set_link_addr(preparation.start as u32);
self.rx_impl.set_peripheral(peri as u8);

Ok(())
self.do_prepare(preparation, peri)
}

fn start_transfer(&mut self) -> Result<(), DmaError> {
Expand Down Expand Up @@ -2065,6 +2070,32 @@ where
pub fn set_priority(&mut self, priority: DmaPriority) {
self.tx_impl.set_priority(priority);
}

fn do_prepare(
&mut self,
preparation: Preparation,
peri: DmaPeripheral,
) -> Result<(), DmaError> {
debug_assert_eq!(preparation.direction, TransferDirection::Out);

#[cfg(esp32s3)]
if let Some(block_size) = preparation.external_memory_block_size {
self.set_ext_mem_block_size(block_size.into());
}

self.tx_impl.set_burst_mode(preparation.burst_transfer);
self.tx_impl.set_descr_burst_mode(true);
self.tx_impl.set_check_owner(preparation.check_owner);

compiler_fence(core::sync::atomic::Ordering::SeqCst);

self.tx_impl.clear_all();
self.tx_impl.reset();
self.tx_impl.set_link_addr(preparation.start as u32);
self.tx_impl.set_peripheral(peri as u8);

Ok(())
}
}

impl<M, CH> crate::private::Sealed for ChannelTx<'_, M, CH>
Expand All @@ -2079,6 +2110,8 @@ where
M: Mode,
CH: DmaChannel,
{
// TODO: used by I2S, which should be rewritten to use the Preparation-based
// API.
unsafe fn prepare_transfer_without_start(
&mut self,
peri: DmaPeripheral,
Expand All @@ -2087,7 +2120,8 @@ where
// Based on the ESP32-S3 TRM the alignment check is not needed for TX

// For esp32s3 we check each descriptor buffer that points to PSRAM for
// alignment and writeback the cache for that buffer
// alignment and writeback the cache for that buffer.
// Note that DmaBuffer implementations are required to do this for us.
#[cfg(esp32s3)]
for des in chain.descriptors.iter() {
// we are forcing the DMA alignment to the cache line size
Expand All @@ -2102,12 +2136,17 @@ where
}
}

compiler_fence(core::sync::atomic::Ordering::SeqCst);

self.tx_impl.clear_all();
self.tx_impl.reset();
self.tx_impl.set_link_addr(chain.first() as u32);
self.tx_impl.set_peripheral(peri as u8);
self.do_prepare(
Preparation {
start: chain.first().cast_mut(),
#[cfg(esp32s3)]
external_memory_block_size: None,
direction: TransferDirection::Out,
burst_transfer: BurstTransfer::Disabled,
check_owner: Some(false),
},
peri,
)?;

// enable descriptor write back in circular mode
self.tx_impl
Expand All @@ -2123,25 +2162,7 @@ where
) -> Result<(), DmaError> {
let preparation = buffer.prepare();

debug_assert_eq!(preparation.direction, TransferDirection::Out);

#[cfg(esp32s3)]
if let Some(block_size) = preparation.external_memory_block_size {
self.set_ext_mem_block_size(block_size.into());
}

self.tx_impl.set_burst_mode(preparation.burst_transfer);
self.tx_impl.set_descr_burst_mode(true);
self.tx_impl.set_check_owner(preparation.check_owner);

compiler_fence(core::sync::atomic::Ordering::SeqCst);

self.tx_impl.clear_all();
self.tx_impl.reset();
self.tx_impl.set_link_addr(preparation.start as u32);
self.tx_impl.set_peripheral(peri as u8);

Ok(())
self.do_prepare(preparation, peri)
}

fn start_transfer(&mut self) -> Result<(), DmaError> {
Expand Down

0 comments on commit 44d8969

Please sign in to comment.