Skip to content

Commit

Permalink
Fix IN alignment requirements
Browse files Browse the repository at this point in the history
  • Loading branch information
bugadani committed Dec 3, 2024
1 parent 01c70c2 commit 7d453f6
Show file tree
Hide file tree
Showing 2 changed files with 56 additions and 30 deletions.
20 changes: 17 additions & 3 deletions esp-hal/src/dma/buffers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -194,14 +194,28 @@ impl ExternalBurstConfig {
}

impl InternalBurstConfig {
pub(super) fn is_burst_enabled(self) -> bool {
pub(super) const fn is_burst_enabled(self) -> bool {
!matches!(self, Self::Disabled)
}

// Size and address alignment as those come in pairs on current hardware.
const fn min_dram_alignment(self, direction: TransferDirection) -> usize {
// IN transfers must be word aligned
if matches!(direction, TransferDirection::In) {
4
// NOTE(danielb): commenting this check is incorrect as per TRM, but works.
// we'll need to restore this once peripherals can read a
// different amount of data than what is configured in the
// buffer.
// if cfg!(esp32) {
// // NOTE: The size must be word-aligned.
// // NOTE: The buffer address must be word-aligned
// 1
// }
if self.is_burst_enabled() {
// As described in "Accessing Internal Memory" paragraphs in the various TRMs.
4
} else {
1
}
} else {
// OUT transfers have no alignment requirements, except for ESP32, which is
// described below.
Expand Down
66 changes: 39 additions & 27 deletions hil-test/tests/spi_full_duplex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,8 @@ mod tests {
#[timeout(3)]
#[cfg(pcnt)]
fn test_dma_read_dma_write_pcnt(ctx: Context) {
const DMA_BUFFER_SIZE: usize = 5;
const DMA_BUFFER_SIZE: usize = 8;
const TRANSFER_SIZE: usize = 5;
let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE);
let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
Expand All @@ -207,28 +208,29 @@ mod tests {
dma_tx_buf.as_mut_slice().fill(0b0110_1010);

for i in 1..4 {
dma_rx_buf.as_mut_slice().copy_from_slice(&[5, 5, 5, 5, 5]);
dma_rx_buf.as_mut_slice()[..TRANSFER_SIZE].copy_from_slice(&[5; TRANSFER_SIZE]);
let transfer = spi
.read(dma_rx_buf.len(), dma_rx_buf)
.read(TRANSFER_SIZE, dma_rx_buf)
.map_err(|e| e.0)
.unwrap();
(spi, dma_rx_buf) = transfer.wait();
assert_eq!(dma_rx_buf.as_slice(), &[0, 0, 0, 0, 0]);
assert_eq!(&dma_rx_buf.as_slice()[..TRANSFER_SIZE], &[0; TRANSFER_SIZE]);

let transfer = spi
.write(dma_tx_buf.len(), dma_tx_buf)
.write(TRANSFER_SIZE, dma_tx_buf)
.map_err(|e| e.0)
.unwrap();
(spi, dma_tx_buf) = transfer.wait();
assert_eq!(unit.value(), (i * 3 * DMA_BUFFER_SIZE) as _);
assert_eq!(unit.value(), (i * 3 * TRANSFER_SIZE) as _);
}
}

#[test]
#[timeout(3)]
#[cfg(pcnt)]
fn test_dma_read_dma_transfer_pcnt(ctx: Context) {
const DMA_BUFFER_SIZE: usize = 5;
const DMA_BUFFER_SIZE: usize = 8;
const TRANSFER_SIZE: usize = 5;
let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE);
let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
Expand All @@ -244,20 +246,20 @@ mod tests {
dma_tx_buf.as_mut_slice().fill(0b0110_1010);

for i in 1..4 {
dma_rx_buf.as_mut_slice().copy_from_slice(&[5, 5, 5, 5, 5]);
dma_rx_buf.as_mut_slice()[..TRANSFER_SIZE].copy_from_slice(&[5; TRANSFER_SIZE]);
let transfer = spi
.read(dma_rx_buf.len(), dma_rx_buf)
.read(TRANSFER_SIZE, dma_rx_buf)
.map_err(|e| e.0)
.unwrap();
(spi, dma_rx_buf) = transfer.wait();
assert_eq!(dma_rx_buf.as_slice(), &[0, 0, 0, 0, 0]);
assert_eq!(&dma_rx_buf.as_slice()[..TRANSFER_SIZE], &[0; TRANSFER_SIZE]);

let transfer = spi
.transfer(dma_rx_buf.len(), dma_rx_buf, dma_tx_buf.len(), dma_tx_buf)
.transfer(TRANSFER_SIZE, dma_rx_buf, TRANSFER_SIZE, dma_tx_buf)
.map_err(|e| e.0)
.unwrap();
(spi, (dma_rx_buf, dma_tx_buf)) = transfer.wait();
assert_eq!(unit.value(), (i * 3 * DMA_BUFFER_SIZE) as _);
assert_eq!(unit.value(), (i * 3 * TRANSFER_SIZE) as _);
}
}

Expand Down Expand Up @@ -294,30 +296,38 @@ mod tests {
#[test]
#[timeout(3)]
fn test_asymmetric_dma_transfer(ctx: Context) {
let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(2, 4);
const WRITE_SIZE: usize = 4;
const READ_SIZE: usize = 2;
let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(4, 4);
let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();

dma_tx_buf.fill(&[0xde, 0xad, 0xbe, 0xef]);

let spi = ctx.spi.with_dma(ctx.dma_channel);
let transfer = spi
.transfer(dma_rx_buf.len(), dma_rx_buf, dma_tx_buf.len(), dma_tx_buf)
.transfer(READ_SIZE, dma_rx_buf, WRITE_SIZE, dma_tx_buf)
.map_err(|e| e.0)
.unwrap();
let (spi, (dma_rx_buf, mut dma_tx_buf)) = transfer.wait();
assert_eq!(dma_tx_buf.as_slice()[0..2], dma_rx_buf.as_slice()[0..2]);
assert_eq!(
dma_tx_buf.as_slice()[0..READ_SIZE],
dma_rx_buf.as_slice()[0..READ_SIZE]
);

// Try transfer again to make sure DMA isn't in a broken state.

dma_tx_buf.fill(&[0xaa, 0xdd, 0xef, 0xbe]);

let transfer = spi
.transfer(dma_rx_buf.len(), dma_rx_buf, dma_tx_buf.len(), dma_tx_buf)
.transfer(READ_SIZE, dma_rx_buf, WRITE_SIZE, dma_tx_buf)
.map_err(|e| e.0)
.unwrap();
let (_, (dma_rx_buf, dma_tx_buf)) = transfer.wait();
assert_eq!(dma_tx_buf.as_slice()[0..2], dma_rx_buf.as_slice()[0..2]);
assert_eq!(
dma_tx_buf.as_slice()[0..READ_SIZE],
dma_rx_buf.as_slice()[0..READ_SIZE]
);
}

#[test]
Expand Down Expand Up @@ -386,7 +396,8 @@ mod tests {
#[timeout(3)]
#[cfg(pcnt)]
async fn test_async_dma_read_dma_write_pcnt(ctx: Context) {
const DMA_BUFFER_SIZE: usize = 5;
const DMA_BUFFER_SIZE: usize = 8;
const TRANSFER_SIZE: usize = 5;
let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE);
let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
Expand All @@ -401,26 +412,27 @@ mod tests {
.channel0
.set_input_mode(EdgeMode::Hold, EdgeMode::Increment);

let mut receive = [0; DMA_BUFFER_SIZE];
let mut receive = [0; TRANSFER_SIZE];

// Fill the buffer where each byte has 3 pos edges.
let transmit = [0b0110_1010; DMA_BUFFER_SIZE];
let transmit = [0b0110_1010; TRANSFER_SIZE];

for i in 1..4 {
receive.copy_from_slice(&[5, 5, 5, 5, 5]);
receive.copy_from_slice(&[5; TRANSFER_SIZE]);
SpiBusAsync::read(&mut spi, &mut receive).await.unwrap();
assert_eq!(receive, [0, 0, 0, 0, 0]);
assert_eq!(receive, [0; TRANSFER_SIZE]);

SpiBusAsync::write(&mut spi, &transmit).await.unwrap();
assert_eq!(ctx.pcnt_unit.value(), (i * 3 * DMA_BUFFER_SIZE) as _);
assert_eq!(ctx.pcnt_unit.value(), (i * 3 * TRANSFER_SIZE) as _);
}
}

#[test]
#[timeout(3)]
#[cfg(pcnt)]
async fn test_async_dma_read_dma_transfer_pcnt(ctx: Context) {
const DMA_BUFFER_SIZE: usize = 5;
const DMA_BUFFER_SIZE: usize = 8;
const TRANSFER_SIZE: usize = 5;
let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE);
let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
Expand All @@ -435,10 +447,10 @@ mod tests {
.channel0
.set_input_mode(EdgeMode::Hold, EdgeMode::Increment);

let mut receive = [0; DMA_BUFFER_SIZE];
let mut receive = [0; TRANSFER_SIZE];

// Fill the buffer where each byte has 3 pos edges.
let transmit = [0b0110_1010; DMA_BUFFER_SIZE];
let transmit = [0b0110_1010; TRANSFER_SIZE];

for i in 1..4 {
receive.copy_from_slice(&[5, 5, 5, 5, 5]);
Expand All @@ -448,7 +460,7 @@ mod tests {
SpiBusAsync::transfer(&mut spi, &mut receive, &transmit)
.await
.unwrap();
assert_eq!(ctx.pcnt_unit.value(), (i * 3 * DMA_BUFFER_SIZE) as _);
assert_eq!(ctx.pcnt_unit.value(), (i * 3 * TRANSFER_SIZE) as _);
}
}

Expand Down

0 comments on commit 7d453f6

Please sign in to comment.