Skip to content

Commit

Permalink
Reuse buffer preparation code
Browse files Browse the repository at this point in the history
  • Loading branch information
bugadani committed Nov 14, 2024
1 parent d79a9ab commit c71ed16
Show file tree
Hide file tree
Showing 2 changed files with 93 additions and 64 deletions.
18 changes: 13 additions & 5 deletions esp-hal/src/dma/buffers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,15 @@ pub struct Preparation {

/// Block size for PSRAM transfers.
///
/// The implementation of the buffer must provide a block size if the data
/// is in PSRAM.
/// If the buffer is in PSRAM, the implementation must ensure the following:
///
/// - The implementation of the buffer must provide a non-`None` block size.
/// - For [`TransferDirection::In`] transfers, the implementation of the
/// buffer must invalidate the cache that contains the buffer before the
/// DMA starts.
/// - For [`TransferDirection::Out`] transfers, the implementation of the
/// buffer must write back the cache that contains the buffer before the
/// DMA starts.
#[cfg(esp32s3)]
pub external_memory_block_size: Option<DmaBufBlkSize>,

Expand All @@ -52,9 +59,10 @@ pub struct Preparation {
/// The implementation of the buffer must ensure that burst mode is only
/// enabled when alignment requirements are met.
///
/// There are no additional alignment requirements for TX burst transfers,
/// but RX transfers require all descriptors to have buffer pointers and
/// sizes that are a multiple of 4 (word aligned).
/// There are no additional alignment requirements for
/// [`TransferDirection::Out`] burst transfers, but
/// [`TransferDirection::In`] transfers require all descriptors to have
/// buffer pointers and sizes that are a multiple of 4 (word aligned).
pub burst_transfer: BurstTransfer,

/// Configures the "check owner" feature of the DMA channel.
Expand Down
139 changes: 80 additions & 59 deletions esp-hal/src/dma/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1756,6 +1756,27 @@ where
pub fn set_priority(&mut self, priority: DmaPriority) {
self.rx_impl.set_priority(priority);
}

fn do_prepare(
&mut self,
preparation: Preparation,
peri: DmaPeripheral,
) -> Result<(), DmaError> {
debug_assert_eq!(preparation.direction, TransferDirection::In);

self.rx_impl.set_burst_mode(preparation.burst_transfer);
self.rx_impl.set_descr_burst_mode(true);
self.rx_impl.set_check_owner(preparation.check_owner);

compiler_fence(core::sync::atomic::Ordering::SeqCst);

self.rx_impl.clear_all();
self.rx_impl.reset();
self.rx_impl.set_link_addr(preparation.start as u32);
self.rx_impl.set_peripheral(peri as u8);

Ok(())
}
}

impl<M, CH> crate::private::Sealed for ChannelRx<'_, M, CH>
Expand All @@ -1770,24 +1791,18 @@ where
M: Mode,
CH: DmaChannel,
{
// TODO: used by I2S, which should be rewritten to use the Preparation-based
// API.
unsafe fn prepare_transfer_without_start(
&mut self,
peri: DmaPeripheral,
chain: &DescriptorChain,
) -> Result<(), DmaError> {
// if self.burst_mode
// && chain
// .descriptors
// .iter()
// .any(|d| d.len() % 4 != 0 || d.buffer as u32 % 4 != 0)
//{
// return Err(DmaError::InvalidAlignment);
//}

// for esp32s3 we check each descriptor buffer that points to psram for
// alignment and invalidate the cache for that buffer
// For ESP32-S3 we check each descriptor buffer that points to PSRAM for
// alignment and invalidate the cache for that buffer.
// NOTE: for RX the `buffer` and `size` need to be aligned but the `len` does
// not. TRM section 3.4.9
// Note that DmaBuffer implementations are required to do this for us.
#[cfg(esp32s3)]
for des in chain.descriptors.iter() {
// we are forcing the DMA alignment to the cache line size
Expand All @@ -1802,14 +1817,17 @@ where
}
}

compiler_fence(core::sync::atomic::Ordering::SeqCst);

self.rx_impl.clear_all();
self.rx_impl.reset();
self.rx_impl.set_link_addr(chain.first() as u32);
self.rx_impl.set_peripheral(peri as u8);

Ok(())
self.do_prepare(
Preparation {
start: chain.first().cast_mut(),
#[cfg(esp32s3)]
external_memory_block_size: None,
direction: TransferDirection::In,
burst_transfer: BurstTransfer::Disabled,
check_owner: Some(false),
},
peri,
)
}

unsafe fn prepare_transfer<BUF: DmaRxBuffer>(
Expand All @@ -1819,20 +1837,7 @@ where
) -> Result<(), DmaError> {
let preparation = buffer.prepare();

debug_assert_eq!(preparation.direction, TransferDirection::In);

self.rx_impl.set_burst_mode(preparation.burst_transfer);
self.rx_impl.set_descr_burst_mode(true);
self.rx_impl.set_check_owner(preparation.check_owner);

compiler_fence(core::sync::atomic::Ordering::SeqCst);

self.rx_impl.clear_all();
self.rx_impl.reset();
self.rx_impl.set_link_addr(preparation.start as u32);
self.rx_impl.set_peripheral(peri as u8);

Ok(())
self.do_prepare(preparation, peri)
}

fn start_transfer(&mut self) -> Result<(), DmaError> {
Expand Down Expand Up @@ -2041,6 +2046,32 @@ where
pub fn set_priority(&mut self, priority: DmaPriority) {
self.tx_impl.set_priority(priority);
}

fn do_prepare(
&mut self,
preparation: Preparation,
peri: DmaPeripheral,
) -> Result<(), DmaError> {
debug_assert_eq!(preparation.direction, TransferDirection::Out);

#[cfg(esp32s3)]
if let Some(block_size) = preparation.external_memory_block_size {
self.set_ext_mem_block_size(block_size.into());
}

self.tx_impl.set_burst_mode(preparation.burst_transfer);
self.tx_impl.set_descr_burst_mode(true);
self.tx_impl.set_check_owner(preparation.check_owner);

compiler_fence(core::sync::atomic::Ordering::SeqCst);

self.tx_impl.clear_all();
self.tx_impl.reset();
self.tx_impl.set_link_addr(preparation.start as u32);
self.tx_impl.set_peripheral(peri as u8);

Ok(())
}
}

impl<M, CH> crate::private::Sealed for ChannelTx<'_, M, CH>
Expand All @@ -2055,6 +2086,8 @@ where
M: Mode,
CH: DmaChannel,
{
// TODO: used by I2S, which should be rewritten to use the Preparation-based
// API.
unsafe fn prepare_transfer_without_start(
&mut self,
peri: DmaPeripheral,
Expand All @@ -2063,7 +2096,8 @@ where
// Based on the ESP32-S3 TRM the alignment check is not needed for TX

// For esp32s3 we check each descriptor buffer that points to PSRAM for
// alignment and writeback the cache for that buffer
// alignment and writeback the cache for that buffer.
// Note that DmaBuffer implementations are required to do this for us.
#[cfg(esp32s3)]
for des in chain.descriptors.iter() {
// we are forcing the DMA alignment to the cache line size
Expand All @@ -2078,12 +2112,17 @@ where
}
}

compiler_fence(core::sync::atomic::Ordering::SeqCst);

self.tx_impl.clear_all();
self.tx_impl.reset();
self.tx_impl.set_link_addr(chain.first() as u32);
self.tx_impl.set_peripheral(peri as u8);
self.do_prepare(
Preparation {
start: chain.first().cast_mut(),
#[cfg(esp32s3)]
external_memory_block_size: None,
direction: TransferDirection::Out,
burst_transfer: BurstTransfer::Disabled,
check_owner: Some(false),
},
peri,
)?;

// enable descriptor write back in circular mode
self.tx_impl
Expand All @@ -2099,25 +2138,7 @@ where
) -> Result<(), DmaError> {
let preparation = buffer.prepare();

debug_assert_eq!(preparation.direction, TransferDirection::Out);

#[cfg(esp32s3)]
if let Some(block_size) = preparation.external_memory_block_size {
self.set_ext_mem_block_size(block_size.into());
}

self.tx_impl.set_burst_mode(preparation.burst_transfer);
self.tx_impl.set_descr_burst_mode(true);
self.tx_impl.set_check_owner(preparation.check_owner);

compiler_fence(core::sync::atomic::Ordering::SeqCst);

self.tx_impl.clear_all();
self.tx_impl.reset();
self.tx_impl.set_link_addr(preparation.start as u32);
self.tx_impl.set_peripheral(peri as u8);

Ok(())
self.do_prepare(preparation, peri)
}

fn start_transfer(&mut self) -> Result<(), DmaError> {
Expand Down

0 comments on commit c71ed16

Please sign in to comment.