From 41f9925e2c393b1b753585e85e21f74cf5a8d131 Mon Sep 17 00:00:00 2001 From: Dominic Fischer <14130965+Dominaezzz@users.noreply.github.com> Date: Tue, 20 Aug 2024 12:47:55 +0100 Subject: [PATCH] [3/3] DMA Move API: Introduce DMA buffer objects (#1856) * [3/3] DMA Move API: Introduce DMA buffer objects * Remove FlashSafeDma * Add async HIL test * Handle set_length(0) correctly * Fix tx/rx booleans * Unlucky * Preserve previous blocking semantics * Add delay between starting DMA TX and SPI driver * Update CHANGELOG * merge tidy * Add with_buffers builder --------- Co-authored-by: Dominic Fischer --- esp-hal/CHANGELOG.md | 3 + esp-hal/src/dma/mod.rs | 819 ++++++++---- esp-hal/src/lib.rs | 55 - esp-hal/src/soc/mod.rs | 7 + esp-hal/src/spi/master.rs | 1281 ++++++++++--------- examples/src/bin/embassy_spi.rs | 13 +- examples/src/bin/qspi_flash.rs | 50 +- examples/src/bin/spi_loopback_dma.rs | 30 +- hil-test/tests/spi_full_duplex_dma.rs | 397 +++--- hil-test/tests/spi_full_duplex_dma_async.rs | 39 +- hil-test/tests/spi_half_duplex_read.rs | 29 +- hil-test/tests/spi_half_duplex_write.rs | 23 +- 12 files changed, 1487 insertions(+), 1259 deletions(-) diff --git a/esp-hal/CHANGELOG.md b/esp-hal/CHANGELOG.md index 34e0e74bfd0..9c1c4df9517 100644 --- a/esp-hal/CHANGELOG.md +++ b/esp-hal/CHANGELOG.md @@ -9,6 +9,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added +- Introduce DMA buffer objects (#1856) - Added new `Io::new_no_bind_interrupt` constructor (#1861) - Added touch pad support for esp32 (#1873, #1956) - Allow configuration of period updating method for MCPWM timers (#1898) @@ -20,6 +21,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - Peripheral driver constructors don't take `InterruptHandler`s anymore. Use `set_interrupt_handler` to explicitly set the interrupt handler now. (#1819) +- Migrate SPI driver to use DMA buffer objects (#1856) - Use the peripheral ref pattern for `OneShotTimer` and `PeriodicTimer` (#1855) - Improve SYSTIMER API (#1870) - DMA: don't require `Sealed` to implement `ReadBuffer` and `WriteBuffer` (#1921) @@ -44,6 +46,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - This package no longer re-exports the `esp_hal_procmacros::main` macro (#1828) - The `AesFlavour` trait no longer has the `ENCRYPT_MODE`/`DECRYPT_MODE` associated constants (#1849) +- Removed `FlashSafeDma` (#1856) ## [0.19.0] - 2024-07-15 diff --git a/esp-hal/src/dma/mod.rs b/esp-hal/src/dma/mod.rs index 4adf05d97b7..3af076e8e23 100644 --- a/esp-hal/src/dma/mod.rs +++ b/esp-hal/src/dma/mod.rs @@ -28,15 +28,12 @@ //! let mosi = io.pins.gpio4; //! let cs = io.pins.gpio5; //! -//! let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = -//! dma_buffers!(32000); -//! //! let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) //! .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) //! .with_dma(dma_channel.configure( //! false, //! DmaPriority::Priority0, -//! ), tx_descriptors, rx_descriptors); +//! )); //! # } //! ``` //! @@ -50,7 +47,13 @@ //! //! For convenience you can use the [crate::dma_buffers] macro. -use core::{fmt::Debug, marker::PhantomData, ptr::addr_of_mut, sync::atomic::compiler_fence}; +use core::{ + cmp::min, + fmt::Debug, + marker::PhantomData, + ptr::addr_of_mut, + sync::atomic::compiler_fence, +}; trait Word: crate::private::Sealed {} @@ -162,7 +165,7 @@ pub unsafe trait WriteBuffer { /// /// Once this method has been called, it is unsafe to call any `&mut self` /// methods, except for `write_buffer`, on this object as long as the - /// returned value is in use (by DMA). + /// returned value is in use (by DMA). unsafe fn write_buffer(&mut self) -> (*mut u8, usize); } @@ -275,7 +278,7 @@ use enumset::{EnumSet, EnumSetType}; pub use self::gdma::*; #[cfg(pdma)] pub use self::pdma::*; -use crate::{interrupt::InterruptHandler, Mode}; +use crate::{interrupt::InterruptHandler, soc::is_slice_in_dram, Mode}; #[cfg(gdma)] mod gdma; @@ -504,8 +507,7 @@ pub enum DmaError { OutOfDescriptors, /// DescriptorError the DMA rejected the descriptor configuration. This /// could be because the source address of the data is not in RAM. Ensure - /// your source data is in a valid address space, or try using - /// [`crate::FlashSafeDma`] wrapper. + /// your source data is in a valid address space. DescriptorError, /// The available free buffer is less than the amount of data to push Overflow, @@ -1123,6 +1125,12 @@ pub trait RxPrivate: crate::private::Sealed { chain: &DescriptorChain, ) -> Result<(), DmaError>; + unsafe fn prepare_transfer( + &mut self, + peri: DmaPeripheral, + first_desc: *mut DmaDescriptor, + ) -> Result<(), DmaError>; + fn start_transfer(&mut self) -> Result<(), DmaError>; #[cfg(esp32s3)] @@ -1198,14 +1206,14 @@ where unsafe fn prepare_transfer_without_start( &mut self, - descriptors: &DescriptorChain, + first_desc: *mut DmaDescriptor, peri: DmaPeripheral, ) -> Result<(), DmaError> { compiler_fence(core::sync::atomic::Ordering::SeqCst); R::clear_in_interrupts(); R::reset_in(); - R::set_in_descriptors(descriptors.first() as u32); + R::set_in_descriptors(first_desc as u32); R::set_in_peripheral(peri as u8); Ok(()) @@ -1299,7 +1307,22 @@ where } } - self.rx_impl.prepare_transfer_without_start(chain, peri) + self.rx_impl + .prepare_transfer_without_start(chain.first() as _, peri) + } + + unsafe fn prepare_transfer( + &mut self, + peri: DmaPeripheral, + first_desc: *mut DmaDescriptor, + ) -> Result<(), DmaError> { + // TODO: Figure out burst mode for DmaBuf. + if self.burst_mode { + return Err(DmaError::InvalidAlignment); + } + + self.rx_impl + .prepare_transfer_without_start(first_desc, peri) } fn start_transfer(&mut self) -> Result<(), DmaError> { @@ -1427,6 +1450,12 @@ pub trait TxPrivate: crate::private::Sealed { chain: &DescriptorChain, ) -> Result<(), DmaError>; + unsafe fn prepare_transfer( + &mut self, + peri: DmaPeripheral, + desc: *mut DmaDescriptor, + ) -> Result<(), DmaError>; + fn start_transfer(&mut self) -> Result<(), DmaError>; #[cfg(esp32s3)] @@ -1482,14 +1511,14 @@ where unsafe fn prepare_transfer_without_start( &mut self, - descriptors: &DescriptorChain, + first_desc: *mut DmaDescriptor, peri: DmaPeripheral, ) -> Result<(), DmaError> { compiler_fence(core::sync::atomic::Ordering::SeqCst); R::clear_out_interrupts(); R::reset_out(); - R::set_out_descriptors(descriptors.first() as u32); + R::set_out_descriptors(first_desc as u32); R::set_out_peripheral(peri as u8); Ok(()) @@ -1606,7 +1635,16 @@ where crate::soc::cache_writeback_addr(des.buffer as u32, des.size() as u32); } } - self.tx_impl.prepare_transfer_without_start(chain, peri) + self.tx_impl + .prepare_transfer_without_start(chain.first() as _, peri) + } + + unsafe fn prepare_transfer( + &mut self, + peri: DmaPeripheral, + desc: *mut DmaDescriptor, + ) -> Result<(), DmaError> { + self.tx_impl.prepare_transfer_without_start(desc, peri) } fn start_transfer(&mut self) -> Result<(), DmaError> { @@ -1840,6 +1878,519 @@ where } } +/// Error returned from Dma[Tx|Rx|TxRx]Buf operations. +#[derive(Debug)] +pub enum DmaBufError { + /// More descriptors are needed for the buffer size + InsufficientDescriptors, + /// Descriptors or buffers are not located in a supported memory region + UnsupportedMemoryRegion, +} + +/// DMA transmit buffer +/// +/// This is a contiguous buffer linked together by DMA descriptors of length +/// 4092. It can only be used for transmitting data to a peripheral's FIFO. +/// See [DmaRxBuf] for receiving data. +#[derive(Debug)] +pub struct DmaTxBuf { + descriptors: &'static mut [DmaDescriptor], + buffer: &'static mut [u8], +} + +impl DmaTxBuf { + /// Creates a new [DmaTxBuf] from some descriptors and a buffer. + /// + /// There must be enough descriptors for the provided buffer. + /// Each descriptor can handle 4092 bytes worth of buffer. + /// + /// Both the descriptors and buffer must be in DMA-capable memory. + /// Only DRAM is supported. + pub fn new( + descriptors: &'static mut [DmaDescriptor], + buffer: &'static mut [u8], + ) -> Result { + let min_descriptors = buffer.len().div_ceil(CHUNK_SIZE); + if descriptors.len() < min_descriptors { + return Err(DmaBufError::InsufficientDescriptors); + } + + if !is_slice_in_dram(descriptors) || !is_slice_in_dram(buffer) { + return Err(DmaBufError::UnsupportedMemoryRegion); + } + + // Setup size and buffer pointer as these will not change for the remainder of + // this object's lifetime + let chunk_iter = descriptors.iter_mut().zip(buffer.chunks_mut(CHUNK_SIZE)); + for (desc, chunk) in chunk_iter { + desc.set_size(chunk.len()); + desc.buffer = chunk.as_mut_ptr(); + } + + let mut buf = Self { + descriptors, + buffer, + }; + buf.set_length(buf.capacity()); + + Ok(buf) + } + + /// Consume the buf, returning the descriptors and buffer. + pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) { + (self.descriptors, self.buffer) + } + + /// Returns the size of the underlying buffer + pub fn capacity(&self) -> usize { + self.buffer.len() + } + + /// Return the number of bytes that would be transmitted by this buf. + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + let mut result = 0; + for desc in self.descriptors.iter() { + result += desc.len(); + if desc.next.is_null() { + break; + } + } + result + } + + /// Reset the descriptors to only transmit `len` amount of bytes from this + /// buf. + /// + /// The number of bytes in data must be less than or equal to the buffer + /// size. + pub fn set_length(&mut self, len: usize) { + assert!(len <= self.buffer.len()); + + // Get the minimum number of descriptors needed for this length of data. + let descriptor_count = len.div_ceil(CHUNK_SIZE).max(1); + let required_descriptors = &mut self.descriptors[0..descriptor_count]; + + // Link up the relevant descriptors. + let mut next = core::ptr::null_mut(); + for desc in required_descriptors.iter_mut().rev() { + desc.next = next; + next = desc; + } + + let mut remaining_length = len; + for desc in required_descriptors.iter_mut() { + // As this is a simple dma buffer implementation we won't + // be making use of this feature. + desc.set_suc_eof(false); + + // This isn't strictly needed for this simple implementation, + // but it is useful for debugging. + desc.set_owner(Owner::Dma); + + let chunk_size = min(remaining_length, desc.flags.size() as usize); + desc.set_length(chunk_size); + remaining_length -= chunk_size; + } + debug_assert_eq!(remaining_length, 0); + + required_descriptors.last_mut().unwrap().set_suc_eof(true); + } + + /// Fills the TX buffer with the bytes provided in `data` and reset the + /// descriptors to only cover the filled section. + /// + /// The number of bytes in data must be less than or equal to the buffer + /// size. + pub fn fill(&mut self, data: &[u8]) { + self.set_length(data.len()); + self.as_mut_slice()[..data.len()].copy_from_slice(data); + } + + /// Returns the buf as a mutable slice than can be written. + pub fn as_mut_slice(&mut self) -> &mut [u8] { + &mut self.buffer[..] + } + + /// Returns the buf as a slice than can be read. + pub fn as_slice(&self) -> &[u8] { + self.buffer + } + + pub(crate) fn first(&self) -> *mut DmaDescriptor { + self.descriptors.as_ptr() as _ + } +} + +/// DMA receive buffer +/// +/// This is a contiguous buffer linked together by DMA descriptors of length +/// 4092. It can only be used for receiving data from a peripheral's FIFO. +/// See [DmaTxBuf] for transmitting data. +pub struct DmaRxBuf { + descriptors: &'static mut [DmaDescriptor], + buffer: &'static mut [u8], +} + +impl DmaRxBuf { + /// Creates a new [DmaRxBuf] from some descriptors and a buffer. + /// + /// There must be enough descriptors for the provided buffer. + /// Each descriptor can handle 4092 bytes worth of buffer. + /// + /// Both the descriptors and buffer must be in DMA-capable memory. + /// Only DRAM is supported. + pub fn new( + descriptors: &'static mut [DmaDescriptor], + buffer: &'static mut [u8], + ) -> Result { + let min_descriptors = buffer.len().div_ceil(CHUNK_SIZE); + if descriptors.len() < min_descriptors { + return Err(DmaBufError::InsufficientDescriptors); + } + + if !is_slice_in_dram(descriptors) || !is_slice_in_dram(buffer) { + return Err(DmaBufError::UnsupportedMemoryRegion); + } + + // Setup size and buffer pointer as these will not change for the remainder of + // this object's lifetime + let chunk_iter = descriptors.iter_mut().zip(buffer.chunks_mut(CHUNK_SIZE)); + for (desc, chunk) in chunk_iter { + desc.set_size(chunk.len()); + desc.buffer = chunk.as_mut_ptr(); + } + + let mut buf = Self { + descriptors, + buffer, + }; + + buf.set_length(buf.capacity()); + + Ok(buf) + } + + /// Consume the buf, returning the descriptors and buffer. + pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) { + (self.descriptors, self.buffer) + } + + /// Returns the size of the underlying buffer + pub fn capacity(&self) -> usize { + self.buffer.len() + } + + /// Returns the maximum number of bytes that this buf has been configured to + /// receive. + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + let mut result = 0; + for desc in self.descriptors.iter() { + result += desc.flags.size() as usize; + if desc.next.is_null() { + break; + } + } + result + } + + /// Reset the descriptors to only receive `len` amount of bytes into this + /// buf. + /// + /// The number of bytes in data must be less than or equal to the buffer + /// size. + pub fn set_length(&mut self, len: usize) { + assert!(len <= self.buffer.len()); + + // Get the minimum number of descriptors needed for this length of data. + let descriptor_count = len.div_ceil(CHUNK_SIZE).max(1); + let required_descriptors = &mut self.descriptors[..descriptor_count]; + + // Link up the relevant descriptors. + let mut next = core::ptr::null_mut(); + for desc in required_descriptors.iter_mut().rev() { + desc.next = next; + next = desc; + } + + // Get required part of the buffer. + let mut remaining_length = len; + for desc in required_descriptors.iter_mut() { + // Clear this to allow hardware to set it when the peripheral returns an EOF + // bit. + desc.set_suc_eof(false); + + // This isn't strictly needed for this simple implementation, + // but it is useful for debugging. + desc.set_owner(Owner::Dma); + + // Clear this to allow hardware to set it when it's + // done receiving data for this descriptor. + desc.set_length(0); + + let chunk_size = min(CHUNK_SIZE, remaining_length); + desc.set_size(chunk_size); + remaining_length -= chunk_size; + } + debug_assert_eq!(remaining_length, 0); + } + + /// Returns the entire underlying buffer as a slice than can be read. + pub fn as_slice(&self) -> &[u8] { + self.buffer + } + + /// Returns the entire underlying buffer as a slice than can be written. + pub fn as_mut_slice(&mut self) -> &mut [u8] { + &mut self.buffer[..] + } + + /// Return the number of bytes that was received by this buf. + pub fn number_of_received_bytes(&self) -> usize { + let mut result = 0; + for desc in self.descriptors.iter() { + result += desc.len(); + if desc.next.is_null() { + break; + } + } + result + } + + /// Reads the received data into the provided `buf`. + /// + /// If `buf.len()` is less than the amount of received data then only the + /// first `buf.len()` bytes of received data is written into `buf`. + /// + /// Returns the number of bytes in written to `buf`. + pub fn read_received_data(&self, buf: &mut [u8]) -> usize { + let mut remaining = &mut buf[..]; + + let mut buffer_offset = 0; + for desc in self.descriptors.iter() { + if remaining.is_empty() { + break; + } + + let amount_to_copy = min(desc.len(), remaining.len()); + + let (to_fill, to_remain) = remaining.split_at_mut(amount_to_copy); + to_fill.copy_from_slice(&self.buffer[buffer_offset..][..amount_to_copy]); + remaining = to_remain; + + if desc.next.is_null() { + break; + } + buffer_offset += desc.flags.size() as usize; + } + + let remaining_bytes = remaining.len(); + buf.len() - remaining_bytes + } + + /// Returns the received data as an iterator of slices. + pub fn received_data(&self) -> impl Iterator { + let mut descriptors = self.descriptors.iter(); + #[allow(clippy::redundant_slicing)] // Clippy can't see why this is needed. + let mut buf = &self.buffer[..]; + + core::iter::from_fn(move || { + let mut chunk_size = 0; + let mut skip_size = 0; + while let Some(desc) = descriptors.next() { + chunk_size += desc.len(); + skip_size += desc.flags.size() as usize; + + // If this is the end of the linked list, we can skip the remaining descriptors. + if desc.next.is_null() { + while descriptors.next().is_some() { + // Drain the iterator so the next call to from_fn return + // None. + } + break; + } + + // This typically happens when the DMA gets an EOF bit from the peripheral. + // It can also happen if the DMA is restarted. + if desc.len() < desc.flags.size() as usize { + break; + } + } + + if chunk_size == 0 { + return None; + } + + let chunk = &buf[..chunk_size]; + buf = &buf[skip_size..]; + Some(chunk) + }) + } + + pub(crate) fn first(&self) -> *mut DmaDescriptor { + self.descriptors.as_ptr() as _ + } +} + +/// DMA transmit and receive buffer. +/// +/// This is a (single) contiguous buffer linked together by two sets of DMA +/// descriptors of length 4092 each. +/// It can be used for simultaneously transmitting to and receiving from a +/// peripheral's FIFO. These are typically full-duplex transfers. +pub struct DmaTxRxBuf { + tx_descriptors: &'static mut [DmaDescriptor], + rx_descriptors: &'static mut [DmaDescriptor], + buffer: &'static mut [u8], +} + +impl DmaTxRxBuf { + /// Creates a new [DmaTxRxBuf] from some descriptors and a buffer. + /// + /// There must be enough descriptors for the provided buffer. + /// Each descriptor can handle 4092 bytes worth of buffer. + /// + /// Both the descriptors and buffer must be in DMA-capable memory. + /// Only DRAM is supported. + pub fn new( + tx_descriptors: &'static mut [DmaDescriptor], + rx_descriptors: &'static mut [DmaDescriptor], + buffer: &'static mut [u8], + ) -> Result { + let min_descriptors = buffer.len().div_ceil(CHUNK_SIZE); + if tx_descriptors.len() < min_descriptors { + return Err(DmaBufError::InsufficientDescriptors); + } + if rx_descriptors.len() < min_descriptors { + return Err(DmaBufError::InsufficientDescriptors); + } + + if !is_slice_in_dram(tx_descriptors) + || !is_slice_in_dram(rx_descriptors) + || !is_slice_in_dram(buffer) + { + return Err(DmaBufError::UnsupportedMemoryRegion); + } + + // Reset the provided descriptors + tx_descriptors.fill(DmaDescriptor::EMPTY); + rx_descriptors.fill(DmaDescriptor::EMPTY); + + let descriptors = tx_descriptors.iter_mut().zip(rx_descriptors.iter_mut()); + let chunks = buffer.chunks_mut(CHUNK_SIZE); + + for ((tx_desc, rx_desc), chunk) in descriptors.zip(chunks) { + tx_desc.set_size(chunk.len()); + tx_desc.buffer = chunk.as_mut_ptr(); + rx_desc.set_size(chunk.len()); + rx_desc.buffer = chunk.as_mut_ptr(); + } + + let mut buf = Self { + tx_descriptors, + rx_descriptors, + buffer, + }; + buf.set_length(buf.capacity()); + + Ok(buf) + } + + /// Consume the buf, returning the tx descriptors, rx descriptors and + /// buffer. + pub fn split( + self, + ) -> ( + &'static mut [DmaDescriptor], + &'static mut [DmaDescriptor], + &'static mut [u8], + ) { + (self.tx_descriptors, self.rx_descriptors, self.buffer) + } + + /// Return the size of the underlying buffer. + pub fn capacity(&self) -> usize { + self.buffer.len() + } + + /// Returns the entire buf as a slice than can be read. + pub fn as_slice(&self) -> &[u8] { + self.buffer + } + + /// Returns the entire buf as a slice than can be written. + pub fn as_slice_mut(&mut self) -> &mut [u8] { + &mut self.buffer[..] + } + + /// Reset the descriptors to only transmit/receive `len` amount of bytes + /// with this buf. + /// + /// `len` must be less than or equal to the buffer size. + pub fn set_length(&mut self, len: usize) { + assert!(len <= self.buffer.len()); + + // Get the minimum number of descriptors needed for this length of data. + let descriptor_count = len.div_ceil(CHUNK_SIZE).max(1); + + let relevant_tx_descriptors = &mut self.tx_descriptors[..descriptor_count]; + let relevant_rx_descriptors = &mut self.rx_descriptors[..descriptor_count]; + + // Link up the relevant descriptors. + for descriptors in [ + &mut relevant_tx_descriptors[..], + &mut relevant_rx_descriptors[..], + ] { + let mut next = core::ptr::null_mut(); + for desc in descriptors.iter_mut().rev() { + desc.next = next; + next = desc; + } + } + + let mut remaining_length = len; + for desc in relevant_tx_descriptors.iter_mut() { + // As this is a simple dma buffer implementation we won't + // be making use of this feature. + desc.set_suc_eof(false); + + // This isn't strictly needed for this simple implementation, + // but it is useful for debugging. + desc.set_owner(Owner::Dma); + + let chunk_size = min(desc.size(), remaining_length); + desc.set_length(chunk_size); + remaining_length -= chunk_size; + } + debug_assert_eq!(remaining_length, 0); + relevant_tx_descriptors + .last_mut() + .unwrap() + .set_suc_eof(true); + + let mut remaining_length = len; + for desc in relevant_rx_descriptors.iter_mut() { + // Clear this to allow hardware to set it when the peripheral returns an EOF + // bit. + desc.set_suc_eof(false); + + // This isn't strictly needed for this simple implementation, + // but it is useful for debugging. + desc.set_owner(Owner::Dma); + + // Clear this to allow hardware to set it when it is + // done receiving data for this descriptor. + desc.set_length(0); + + let chunk_size = min(CHUNK_SIZE, remaining_length); + desc.set_size(chunk_size); + remaining_length -= chunk_size; + } + debug_assert_eq!(remaining_length, 0); + } +} + pub(crate) mod dma_private { use super::*; @@ -1992,6 +2543,7 @@ impl<'a, I> DmaTransferTxRx<'a, I> where I: dma_private::DmaSupportTx + dma_private::DmaSupportRx, { + #[allow(dead_code)] pub(crate) fn new(instance: &'a mut I) -> Self { Self { instance } } @@ -2022,238 +2574,6 @@ where } } -/// DMA transaction for TX transfers with moved-in/moved-out peripheral and -/// buffer -/// -/// # Safety -/// -/// Never use [core::mem::forget] on an in-progress transfer -#[non_exhaustive] -#[must_use] -pub struct DmaTransferTxOwned -where - I: dma_private::DmaSupportTx, - T: ReadBuffer, -{ - instance: I, - tx_buffer: T, -} - -impl DmaTransferTxOwned -where - I: dma_private::DmaSupportTx, - T: ReadBuffer, -{ - pub(crate) fn new(instance: I, tx_buffer: T) -> Self { - Self { - instance, - tx_buffer, - } - } - - /// Wait for the transfer to finish and return the peripheral and the - /// buffer. - pub fn wait(mut self) -> Result<(I, T), (DmaError, I, T)> { - self.instance.peripheral_wait_dma(true, false); - - let err = self.instance.tx().has_error(); - - // We need to have a `Drop` implementation, because we accept - // managed buffers that can free their memory on drop. Because of that - // we can't move out of the `Transfer`'s fields, so we use `ptr::read` - // and `mem::forget`. - // - // NOTE(unsafe) There is no panic branch between getting the resources - // and forgetting `self`. - - let (instance, tx_buffer) = unsafe { - let instance = core::ptr::read(&self.instance); - let tx_buffer = core::ptr::read(&self.tx_buffer); - core::mem::forget(self); - - (instance, tx_buffer) - }; - - if err { - Err((DmaError::DescriptorError, instance, tx_buffer)) - } else { - Ok((instance, tx_buffer)) - } - } - - /// Check if the transfer is finished. - pub fn is_done(&mut self) -> bool { - self.instance.tx().is_done() - } -} - -impl Drop for DmaTransferTxOwned -where - I: dma_private::DmaSupportTx, - T: ReadBuffer, -{ - fn drop(&mut self) { - self.instance.peripheral_wait_dma(true, false); - } -} - -/// DMA transaction for RX transfers with moved-in/moved-out peripheral and -/// buffer -/// -/// # Safety -/// -/// Never use [core::mem::forget] on an in-progress transfer -#[non_exhaustive] -#[must_use] -pub struct DmaTransferRxOwned -where - I: dma_private::DmaSupportRx, - R: WriteBuffer, -{ - instance: I, - rx_buffer: R, -} - -impl DmaTransferRxOwned -where - I: dma_private::DmaSupportRx, - R: WriteBuffer, -{ - pub(crate) fn new(instance: I, rx_buffer: R) -> Self { - Self { - instance, - rx_buffer, - } - } - - /// Wait for the transfer to finish and return the peripheral and the - /// buffers. - pub fn wait(mut self) -> Result<(I, R), (DmaError, I, R)> { - self.instance.peripheral_wait_dma(false, true); - - let err = self.instance.rx().has_error(); - - // We need to have a `Drop` implementation, because we accept - // managed buffers that can free their memory on drop. Because of that - // we can't move out of the `Transfer`'s fields, so we use `ptr::read` - // and `mem::forget`. - // - // NOTE(unsafe) There is no panic branch between getting the resources - // and forgetting `self`. - - let (instance, rx_buffer) = unsafe { - let instance = core::ptr::read(&self.instance); - let rx_buffer = core::ptr::read(&self.rx_buffer); - core::mem::forget(self); - - (instance, rx_buffer) - }; - - if err { - Err((DmaError::DescriptorError, instance, rx_buffer)) - } else { - Ok((instance, rx_buffer)) - } - } - - /// Check if the transfer is finished. - pub fn is_done(&mut self) -> bool { - self.instance.rx().is_done() - } -} - -impl Drop for DmaTransferRxOwned -where - I: dma_private::DmaSupportRx, - R: WriteBuffer, -{ - fn drop(&mut self) { - self.instance.peripheral_wait_dma(false, true); - } -} - -/// DMA transaction for TX+RX transfers with moved-in/moved-out peripheral and -/// buffers -/// -/// # Safety -/// -/// Never use [core::mem::forget] on an in-progress transfer -#[non_exhaustive] -#[must_use] -pub struct DmaTransferTxRxOwned -where - I: dma_private::DmaSupportTx + dma_private::DmaSupportRx, - T: ReadBuffer, - R: WriteBuffer, -{ - instance: I, - tx_buffer: T, - rx_buffer: R, -} - -impl DmaTransferTxRxOwned -where - I: dma_private::DmaSupportTx + dma_private::DmaSupportRx, - T: ReadBuffer, - R: WriteBuffer, -{ - pub(crate) fn new(instance: I, tx_buffer: T, rx_buffer: R) -> Self { - Self { - instance, - tx_buffer, - rx_buffer, - } - } - - /// Wait for the transfer to finish and return the peripheral and the - /// buffers. - #[allow(clippy::type_complexity)] - pub fn wait(mut self) -> Result<(I, T, R), (DmaError, I, T, R)> { - self.instance.peripheral_wait_dma(true, true); - - let err = self.instance.tx().has_error() || self.instance.rx().has_error(); - - // We need to have a `Drop` implementation, because we accept - // managed buffers that can free their memory on drop. Because of that - // we can't move out of the `Transfer`'s fields, so we use `ptr::read` - // and `mem::forget`. - // - // NOTE(unsafe) There is no panic branch between getting the resources - // and forgetting `self`. - - let (instance, tx_buffer, rx_buffer) = unsafe { - let instance = core::ptr::read(&self.instance); - let tx_buffer = core::ptr::read(&self.tx_buffer); - let rx_buffer = core::ptr::read(&self.rx_buffer); - core::mem::forget(self); - - (instance, tx_buffer, rx_buffer) - }; - - if err { - Err((DmaError::DescriptorError, instance, tx_buffer, rx_buffer)) - } else { - Ok((instance, tx_buffer, rx_buffer)) - } - } - - /// Check if the transfer is finished. - pub fn is_done(&mut self) -> bool { - self.instance.tx().is_done() && self.instance.rx().is_done() - } -} - -impl Drop for DmaTransferTxRxOwned -where - I: dma_private::DmaSupportTx + dma_private::DmaSupportRx, - T: ReadBuffer, - R: WriteBuffer, -{ - fn drop(&mut self) { - self.instance.peripheral_wait_dma(true, true); - } -} - /// DMA transaction for TX only circular transfers /// /// # Safety @@ -2400,10 +2720,6 @@ pub(crate) mod asynch { pub fn new(tx: &'a mut TX) -> Self { Self { tx, _a: () } } - - pub fn tx(&mut self) -> &mut TX { - self.tx - } } impl<'a, TX> core::future::Future for DmaTxFuture<'a, TX> @@ -2457,6 +2773,7 @@ pub(crate) mod asynch { Self { rx, _a: () } } + #[allow(dead_code)] // Dead on the C2 pub fn rx(&mut self) -> &mut RX { self.rx } diff --git a/esp-hal/src/lib.rs b/esp-hal/src/lib.rs index ca6574dec21..9710e12bea0 100644 --- a/esp-hal/src/lib.rs +++ b/esp-hal/src/lib.rs @@ -572,61 +572,6 @@ mod critical_section_impl { } } -/// FlashSafeDma -/// -/// The embedded-hal traits make no guarantees about -/// where the buffers are placed. The DMA implementation in Espressif chips has -/// a limitation in that it can only access the RAM address space, meaning data -/// to be transmitted from the flash address space must be copied into RAM -/// first. -/// -/// This wrapper struct should be used when a peripheral using the DMA engine -/// needs to transmit data from flash (ROM) via the embedded-hal traits. This is -/// often a `const` variable. -/// -/// Example usage using [`spi::master::dma::SpiDma`] -/// ```rust, ignore -/// const ARRAY_IN_FLASH = [0xAA; 128]; -/// -/// let spi = SpiDma::new(/* */); -/// -/// spi.write(&ARRAY_IN_FLASH[..]).unwrap(); // error when transmission starts -/// -/// let spi = FlashSafeDma::new(spi); -/// -/// spi.write(&ARRAY_IN_FLASH[..]).unwrap(); // success -/// ``` -pub struct FlashSafeDma { - inner: T, - #[allow(unused)] - buffer: [u8; SIZE], -} - -impl FlashSafeDma { - /// Create a new instance wrapping a given buffer - pub fn new(inner: T) -> Self { - Self { - inner, - buffer: [0u8; SIZE], - } - } - - /// Return a mutable reference to the inner buffer - pub fn inner_mut(&mut self) -> &mut T { - &mut self.inner - } - - /// Return an immutable reference to the inner buffer - pub fn inner(&self) -> &T { - &self.inner - } - - /// Free the inner buffer - pub fn free(self) -> T { - self.inner - } -} - /// Default (unhandled) interrupt handler pub const DEFAULT_INTERRUPT_HANDLER: interrupt::InterruptHandler = interrupt::InterruptHandler::new( unsafe { core::mem::transmute::<*const (), extern "C" fn()>(EspDefaultHandler as *const ()) }, diff --git a/esp-hal/src/soc/mod.rs b/esp-hal/src/soc/mod.rs index 8f2b49538e4..47d00c33452 100644 --- a/esp-hal/src/soc/mod.rs +++ b/esp-hal/src/soc/mod.rs @@ -74,6 +74,13 @@ pub(crate) fn is_valid_ram_address(address: u32) -> bool { (self::constants::SOC_DRAM_LOW..=self::constants::SOC_DRAM_HIGH).contains(&address) } +#[allow(unused)] +pub(crate) fn is_slice_in_dram(slice: &[T]) -> bool { + let start = slice.as_ptr() as u32; + let end = start + slice.len() as u32; + self::constants::SOC_DRAM_LOW <= start && end <= self::constants::SOC_DRAM_HIGH +} + #[allow(unused)] pub(crate) fn is_valid_psram_address(address: u32) -> bool { #[cfg(psram)] diff --git a/esp-hal/src/spi/master.rs b/esp-hal/src/spi/master.rs index d028837b2a8..651542bdc32 100644 --- a/esp-hal/src/spi/master.rs +++ b/esp-hal/src/spi/master.rs @@ -79,7 +79,7 @@ use super::{ }; use crate::{ clock::Clocks, - dma::{DescriptorChain, DmaPeripheral, Rx, Tx}, + dma::{DmaDescriptor, DmaPeripheral, Rx, Tx}, gpio::{InputPin, InputSignal, OutputPin, OutputSignal}, interrupt::InterruptHandler, peripheral::{Peripheral, PeripheralRef}, @@ -841,30 +841,28 @@ where } pub mod dma { + use core::{ + cmp::min, + sync::atomic::{fence, Ordering}, + }; + use super::*; + #[cfg(feature = "async")] + use crate::dma::asynch::{DmaRxFuture, DmaTxFuture}; #[cfg(spi3)] use crate::dma::Spi3Peripheral; use crate::{ dma::{ - dma_private::{DmaSupport, DmaSupportRx, DmaSupportTx}, Channel, - ChannelRx, - ChannelTx, - DescriptorChain, DmaChannel, - DmaDescriptor, - DmaTransferRx, - DmaTransferRxOwned, - DmaTransferTx, - DmaTransferTxOwned, - DmaTransferTxRx, - DmaTransferTxRxOwned, - ReadBuffer, + DmaRxBuf, + DmaTxBuf, + RxPrivate, Spi2Peripheral, SpiPeripheral, TxPrivate, - WriteBuffer, }, + Blocking, InterruptConfigurable, Mode, }; @@ -879,8 +877,6 @@ pub mod dma { fn with_dma( self, channel: Channel<'d, C, DmaMode>, - tx_descriptors: &'static mut [DmaDescriptor], - rx_descriptors: &'static mut [DmaDescriptor], ) -> SpiDma<'d, crate::peripherals::SPI2, C, M, DmaMode>; } @@ -895,8 +891,6 @@ pub mod dma { fn with_dma( self, channel: Channel<'d, C, DmaMode>, - tx_descriptors: &'static mut [DmaDescriptor], - rx_descriptors: &'static mut [DmaDescriptor], ) -> SpiDma<'d, crate::peripherals::SPI3, C, M, DmaMode>; } @@ -910,16 +904,12 @@ pub mod dma { fn with_dma( self, mut channel: Channel<'d, C, DmaMode>, - tx_descriptors: &'static mut [DmaDescriptor], - rx_descriptors: &'static mut [DmaDescriptor], ) -> SpiDma<'d, crate::peripherals::SPI2, C, M, DmaMode> { channel.tx.init_channel(); // no need to call this for both, TX and RX SpiDma { spi: self.spi, channel, - tx_chain: DescriptorChain::new(tx_descriptors), - rx_chain: DescriptorChain::new(rx_descriptors), _mode: PhantomData, } } @@ -936,16 +926,12 @@ pub mod dma { fn with_dma( self, mut channel: Channel<'d, C, DmaMode>, - tx_descriptors: &'static mut [DmaDescriptor], - rx_descriptors: &'static mut [DmaDescriptor], ) -> SpiDma<'d, crate::peripherals::SPI3, C, M, DmaMode> { channel.tx.init_channel(); // no need to call this for both, TX and RX SpiDma { spi: self.spi, channel, - tx_chain: DescriptorChain::new(tx_descriptors), - rx_chain: DescriptorChain::new(rx_descriptors), _mode: PhantomData, } } @@ -961,8 +947,6 @@ pub mod dma { { pub(crate) spi: PeripheralRef<'d, T>, pub(crate) channel: Channel<'d, C, DmaMode>, - tx_chain: DescriptorChain, - rx_chain: DescriptorChain, _mode: PhantomData, } @@ -980,7 +964,7 @@ pub mod dma { impl<'d, T, C, M, DmaMode> SpiDma<'d, T, C, M, DmaMode> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, M: DuplexMode, @@ -1020,7 +1004,7 @@ pub mod dma { impl<'d, T, C, M, DmaMode> crate::private::Sealed for SpiDma<'d, T, C, M, DmaMode> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, M: DuplexMode, @@ -1030,7 +1014,7 @@ pub mod dma { impl<'d, T, C, M, DmaMode> InterruptConfigurable for SpiDma<'d, T, C, M, DmaMode> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, M: DuplexMode, @@ -1043,7 +1027,7 @@ pub mod dma { impl<'d, T, C, M, DmaMode> SpiDma<'d, T, C, M, DmaMode> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, M: DuplexMode, @@ -1054,64 +1038,134 @@ pub mod dma { } } - impl<'d, T, C, M, DmaMode> DmaSupport for SpiDma<'d, T, C, M, DmaMode> + impl<'d, T, C> SpiDma<'d, T, C, FullDuplexMode, Blocking> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, - M: DuplexMode, - DmaMode: Mode, { - fn peripheral_wait_dma(&mut self, _is_tx: bool, _is_rx: bool) { - self.spi.flush().ok(); + pub fn with_buffers( + self, + dma_tx_buf: DmaTxBuf, + dma_rx_buf: DmaRxBuf, + ) -> SpiDmaBus<'d, T, C> { + SpiDmaBus::new(self, dma_tx_buf, dma_rx_buf) } + } - fn peripheral_dma_stop(&mut self) { - unreachable!("unsupported") + #[cfg(feature = "async")] + impl<'d, T, C> SpiDma<'d, T, C, FullDuplexMode, crate::Async> + where + T: InstanceDma, + C: DmaChannel, + C::P: SpiPeripheral, + { + pub fn with_buffers( + self, + dma_tx_buf: DmaTxBuf, + dma_rx_buf: DmaRxBuf, + ) -> asynch::SpiDmaAsyncBus<'d, T, C> { + asynch::SpiDmaAsyncBus::new(self, dma_tx_buf, dma_rx_buf) } } - impl<'d, T, C, M, DmaMode> DmaSupportTx for SpiDma<'d, T, C, M, DmaMode> + pub struct SpiDmaTransfer<'d, T, C, M, DmaMode, Buf> where - T: InstanceDma, ChannelRx<'d, C>>, C: DmaChannel, C::P: SpiPeripheral, M: DuplexMode, DmaMode: Mode, { - type TX = ChannelTx<'d, C>; + spi_dma: SpiDma<'d, T, C, M, DmaMode>, + dma_buf: Buf, + is_rx: bool, + is_tx: bool, + + rx_future_awaited: bool, + tx_future_awaited: bool, + } - fn tx(&mut self) -> &mut Self::TX { - &mut self.channel.tx + impl<'d, T, C, M, DmaMode, Buf> SpiDmaTransfer<'d, T, C, M, DmaMode, Buf> + where + T: Instance, + C: DmaChannel, + C::P: SpiPeripheral, + M: DuplexMode, + DmaMode: Mode, + { + fn new( + spi_dma: SpiDma<'d, T, C, M, DmaMode>, + dma_buf: Buf, + is_rx: bool, + is_tx: bool, + ) -> Self { + Self { + spi_dma, + dma_buf, + is_rx, + is_tx, + rx_future_awaited: false, + tx_future_awaited: false, + } + } + + pub fn is_done(&self) -> bool { + if self.is_tx && !self.tx_future_awaited && !self.spi_dma.channel.tx.is_done() { + return false; + } + if self.spi_dma.spi.busy() { + return false; + } + if self.is_rx && !self.rx_future_awaited { + // If this is an asymmetric transfer and the RX side is smaller, the RX channel + // will never be "done" as it won't have enough descriptors/buffer to receive + // the EOF bit from the SPI. So instead the RX channel will hit + // a "descriptor empty" which means the DMA is written as much + // of the received data as possible into the buffer and + // discarded the rest. The user doesn't care about this discarded data. + + if !self.spi_dma.channel.rx.is_done() + && !self.spi_dma.channel.rx.has_dscr_empty_error() + { + return false; + } + } + true } - fn chain(&mut self) -> &mut DescriptorChain { - &mut self.tx_chain + pub fn wait(mut self) -> (SpiDma<'d, T, C, M, DmaMode>, Buf) { + self.spi_dma.spi.flush().ok(); + fence(Ordering::Acquire); + (self.spi_dma, self.dma_buf) } } - impl<'d, T, C, M, DmaMode> DmaSupportRx for SpiDma<'d, T, C, M, DmaMode> + #[cfg(feature = "async")] + impl<'d, T, C, M, Buf> SpiDmaTransfer<'d, T, C, M, crate::Async, Buf> where - T: InstanceDma, ChannelRx<'d, C>>, + T: Instance, C: DmaChannel, C::P: SpiPeripheral, M: DuplexMode, - DmaMode: Mode, { - type RX = ChannelRx<'d, C>; + pub async fn wait_for_done(&mut self) { + if self.is_tx && !self.tx_future_awaited { + let _ = DmaTxFuture::new(&mut self.spi_dma.channel.tx).await; + self.tx_future_awaited = true; + } - fn rx(&mut self) -> &mut Self::RX { - &mut self.channel.rx - } + // As a future enhancement, setup Spi Future in here as well. - fn chain(&mut self) -> &mut DescriptorChain { - &mut self.rx_chain + if self.is_rx && !self.rx_future_awaited { + let _ = DmaRxFuture::new(&mut self.spi_dma.channel.rx).await; + self.rx_future_awaited = true; + } } } impl<'d, T, C, M, DmaMode> SpiDma<'d, T, C, M, DmaMode> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, M: IsFullDuplex, @@ -1119,207 +1173,131 @@ pub mod dma { { /// Perform a DMA write. /// - /// This will return a [DmaTransferTx]. The maximum amount of data to be - /// sent is 32736 bytes. - #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - pub fn dma_write<'t, TXBUF>( - &'t mut self, - words: &'t TXBUF, - ) -> Result, super::Error> - where - TXBUF: ReadBuffer, - { - self.dma_write_start(words)?; - Ok(DmaTransferTx::new(self)) - } - - /// Perform a DMA write. - /// - /// This will return a [DmaTransferTxOwned] owning the buffer and the + /// This will return a [SpiDmaTransfer] owning the buffer and the /// SPI instance. The maximum amount of data to be sent is 32736 /// bytes. + #[allow(clippy::type_complexity)] #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - pub fn dma_write_owned( + pub fn dma_write( mut self, - words: TXBUF, - ) -> Result, super::Error> - where - TXBUF: ReadBuffer, + buffer: DmaTxBuf, + ) -> Result, (Error, Self, DmaTxBuf)> { - self.dma_write_start(&words)?; - Ok(DmaTransferTxOwned::new(self, words)) - } - - #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - fn dma_write_start<'t, TXBUF>(&'t mut self, words: &'t TXBUF) -> Result<(), super::Error> - where - TXBUF: ReadBuffer, - { - let (ptr, len) = unsafe { words.read_buffer() }; - - if len > MAX_DMA_SIZE { - return Err(super::Error::MaxDmaTransferSizeExceeded); + let bytes_to_write = buffer.len(); + if bytes_to_write > MAX_DMA_SIZE { + return Err((Error::MaxDmaTransferSizeExceeded, self, buffer)); } - unsafe { - self.spi.start_write_bytes_dma( - &mut self.tx_chain, - ptr, - len, - &mut self.channel.tx, - )?; + let result = unsafe { + self.spi + .start_write_bytes_dma(buffer.first(), bytes_to_write, &mut self.channel.tx) + }; + if let Err(e) = result { + return Err((e, self, buffer)); } - Ok(()) - } - /// Perform a DMA read. - /// - /// This will return a [DmaTransferRx]. The maximum amount of data to be - /// received is 32736 bytes. - #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - pub fn dma_read<'t, RXBUF>( - &'t mut self, - words: &'t mut RXBUF, - ) -> Result, super::Error> - where - RXBUF: WriteBuffer, - { - self.dma_read_start(words)?; - Ok(DmaTransferRx::new(self)) + Ok(SpiDmaTransfer::new(self, buffer, false, true)) } /// Perform a DMA read. /// - /// This will return a [DmaTransferRxOwned] owning the buffer and + /// This will return a [SpiDmaTransfer] owning the buffer and /// the SPI instance. The maximum amount of data to be /// received is 32736 bytes. + #[allow(clippy::type_complexity)] #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - pub fn dma_read_owned( + pub fn dma_read( mut self, - mut words: RXBUF, - ) -> Result, super::Error> - where - RXBUF: WriteBuffer, + buffer: DmaRxBuf, + ) -> Result, (Error, Self, DmaRxBuf)> { - self.dma_read_start(&mut words)?; - Ok(DmaTransferRxOwned::new(self, words)) - } - - #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - fn dma_read_start<'t, RXBUF>(&'t mut self, words: &'t mut RXBUF) -> Result<(), super::Error> - where - RXBUF: WriteBuffer, - { - let (ptr, len) = unsafe { words.write_buffer() }; - - if len > MAX_DMA_SIZE { - return Err(super::Error::MaxDmaTransferSizeExceeded); + let bytes_to_read = buffer.len(); + if bytes_to_read > MAX_DMA_SIZE { + return Err((Error::MaxDmaTransferSizeExceeded, self, buffer)); } - unsafe { - self.spi.start_read_bytes_dma( - &mut self.rx_chain, - ptr, - len, - &mut self.channel.rx, - )?; + let result = unsafe { + self.spi + .start_read_bytes_dma(buffer.first(), bytes_to_read, &mut self.channel.rx) + }; + if let Err(e) = result { + return Err((e, self, buffer)); } - Ok(()) - } - - /// Perform a DMA transfer. - /// - /// This will return a [DmaTransferTxRx]. - /// The maximum amount of data to be sent/received is 32736 bytes. - pub fn dma_transfer<'t, TXBUF, RXBUF>( - &'t mut self, - words: &'t TXBUF, - read_buffer: &'t mut RXBUF, - ) -> Result, super::Error> - where - TXBUF: ReadBuffer, - RXBUF: WriteBuffer, - { - self.dma_transfer_start(words, read_buffer)?; - Ok(DmaTransferTxRx::new(self)) + Ok(SpiDmaTransfer::new(self, buffer, true, false)) } /// Perform a DMA transfer /// - /// This will return a [DmaTransferTxRxOwned] owning the buffers and + /// This will return a [SpiDmaTransfer] owning the buffers and /// the SPI instance. The maximum amount of data to be /// sent/received is 32736 bytes. - pub fn dma_transfer_owned( + #[allow(clippy::type_complexity)] + pub fn dma_transfer( mut self, - words: TXBUF, - mut read_buffer: RXBUF, - ) -> Result, super::Error> - where - TXBUF: ReadBuffer, - RXBUF: WriteBuffer, - { - self.dma_transfer_start(&words, &mut read_buffer)?; - Ok(DmaTransferTxRxOwned::new(self, words, read_buffer)) - } - - fn dma_transfer_start<'t, TXBUF, RXBUF>( - &'t mut self, - words: &'t TXBUF, - read_buffer: &'t mut RXBUF, - ) -> Result<(), super::Error> - where - TXBUF: ReadBuffer, - RXBUF: WriteBuffer, - { - let (write_ptr, write_len) = unsafe { words.read_buffer() }; - let (read_ptr, read_len) = unsafe { read_buffer.write_buffer() }; - - if write_len > MAX_DMA_SIZE || read_len > MAX_DMA_SIZE { - return Err(super::Error::MaxDmaTransferSizeExceeded); - } - - unsafe { + tx_buffer: DmaTxBuf, + rx_buffer: DmaRxBuf, + ) -> Result< + SpiDmaTransfer<'d, T, C, M, DmaMode, (DmaTxBuf, DmaRxBuf)>, + (Error, Self, DmaTxBuf, DmaRxBuf), + > { + let bytes_to_write = tx_buffer.len(); + let bytes_to_read = rx_buffer.len(); + + if bytes_to_write > MAX_DMA_SIZE || bytes_to_read > MAX_DMA_SIZE { + return Err(( + Error::MaxDmaTransferSizeExceeded, + self, + tx_buffer, + rx_buffer, + )); + } + + let result = unsafe { self.spi.start_transfer_dma( - &mut self.tx_chain, - &mut self.rx_chain, - write_ptr, - write_len, - read_ptr, - read_len, + tx_buffer.first(), + rx_buffer.first(), + bytes_to_write, + bytes_to_read, &mut self.channel.tx, &mut self.channel.rx, - )?; + ) + }; + if let Err(e) = result { + return Err((e, self, tx_buffer, rx_buffer)); } - Ok(()) + Ok(SpiDmaTransfer::new( + self, + (tx_buffer, rx_buffer), + true, + true, + )) } } impl<'d, T, C, M, DmaMode> SpiDma<'d, T, C, M, DmaMode> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, M: IsHalfDuplex, DmaMode: Mode, { + #[allow(clippy::type_complexity)] #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - pub fn read<'t, RXBUF>( - &'t mut self, + pub fn read( + mut self, data_mode: SpiDataMode, cmd: Command, address: Address, dummy: u8, - buffer: &'t mut RXBUF, - ) -> Result, super::Error> - where - RXBUF: WriteBuffer, + buffer: DmaRxBuf, + ) -> Result, (Error, Self, DmaRxBuf)> { - let (ptr, len) = unsafe { buffer.write_buffer() }; - - if len > MAX_DMA_SIZE { - return Err(super::Error::MaxDmaTransferSizeExceeded); + let bytes_to_read = buffer.len(); + if bytes_to_read > MAX_DMA_SIZE { + return Err((Error::MaxDmaTransferSizeExceeded, self, buffer)); } self.spi.init_half_duplex( @@ -1328,7 +1306,7 @@ pub mod dma { !address.is_none(), false, dummy != 0, - len == 0, + bytes_to_read == 0, ); self.spi .init_spi_data_mode(cmd.mode(), address.mode(), data_mode); @@ -1372,33 +1350,31 @@ pub mod dma { .modify(|_, w| unsafe { w.usr_dummy_cyclelen().bits(dummy - 1) }); } - unsafe { - self.spi.start_read_bytes_dma( - &mut self.rx_chain, - ptr, - len, - &mut self.channel.rx, - )?; + let result = unsafe { + self.spi + .start_read_bytes_dma(buffer.first(), bytes_to_read, &mut self.channel.rx) + }; + if let Err(e) = result { + return Err((e, self, buffer)); } - Ok(DmaTransferRx::new(self)) + + Ok(SpiDmaTransfer::new(self, buffer, bytes_to_read > 0, false)) } + #[allow(clippy::type_complexity)] #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - pub fn write<'t, TXBUF>( - &'t mut self, + pub fn write( + mut self, data_mode: SpiDataMode, cmd: Command, address: Address, dummy: u8, - buffer: &'t TXBUF, - ) -> Result, super::Error> - where - TXBUF: ReadBuffer, + buffer: DmaTxBuf, + ) -> Result, (Error, Self, DmaTxBuf)> { - let (ptr, len) = unsafe { buffer.read_buffer() }; - - if len > MAX_DMA_SIZE { - return Err(super::Error::MaxDmaTransferSizeExceeded); + let bytes_to_write = buffer.len(); + if bytes_to_write > MAX_DMA_SIZE { + return Err((Error::MaxDmaTransferSizeExceeded, self, buffer)); } self.spi.init_half_duplex( @@ -1407,7 +1383,7 @@ pub mod dma { !address.is_none(), false, dummy != 0, - len == 0, + bytes_to_write == 0, ); self.spi .init_spi_data_mode(cmd.mode(), address.mode(), data_mode); @@ -1451,270 +1427,481 @@ pub mod dma { .modify(|_, w| unsafe { w.usr_dummy_cyclelen().bits(dummy - 1) }); } - unsafe { - self.spi.start_write_bytes_dma( - &mut self.tx_chain, - ptr, - len, - &mut self.channel.tx, - )?; + let result = unsafe { + self.spi + .start_write_bytes_dma(buffer.first(), bytes_to_write, &mut self.channel.tx) + }; + if let Err(e) = result { + return Err((e, self, buffer)); } - Ok(DmaTransferTx::new(self)) + + Ok(SpiDmaTransfer::new(self, buffer, false, bytes_to_write > 0)) } } - #[cfg(feature = "embedded-hal-02")] - impl<'d, T, C, M, DmaMode> embedded_hal_02::blocking::spi::Transfer - for SpiDma<'d, T, C, M, DmaMode> + pub struct SpiDmaBus<'d, T, C> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, - M: IsFullDuplex, - DmaMode: Mode, { - type Error = super::Error; - - fn transfer<'w>(&mut self, words: &'w mut [u8]) -> Result<&'w [u8], Self::Error> { - self.spi.transfer_in_place_dma( - &mut self.tx_chain, - &mut self.rx_chain, - words, - &mut self.channel.tx, - &mut self.channel.rx, - ) - } + spi_dma: Option>, + buffers: Option<(DmaTxBuf, DmaRxBuf)>, } - #[cfg(feature = "embedded-hal-02")] - impl<'d, T, C, M, DmaMode> embedded_hal_02::blocking::spi::Write - for SpiDma<'d, T, C, M, DmaMode> + impl<'d, T, C> SpiDmaBus<'d, T, C> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, - M: IsFullDuplex, - DmaMode: Mode, { - type Error = super::Error; + pub fn new( + spi_dma: SpiDma<'d, T, C, FullDuplexMode, crate::Blocking>, + tx_buffer: DmaTxBuf, + rx_buffer: DmaRxBuf, + ) -> Self { + Self { + spi_dma: Some(spi_dma), + buffers: Some((tx_buffer, rx_buffer)), + } + } + + pub fn read(&mut self, words: &mut [u8]) -> Result<(), Error> { + let mut spi_dma = self.spi_dma.take().unwrap(); + let (tx_buf, mut rx_buf) = self.buffers.take().unwrap(); + + for chunk in words.chunks_mut(rx_buf.capacity()) { + rx_buf.set_length(chunk.len()); + + let transfer = match spi_dma.dma_read(rx_buf) { + Ok(transfer) => transfer, + Err((e, spi, rx)) => { + self.spi_dma = Some(spi); + self.buffers = Some((tx_buf, rx)); + return Err(e); + } + }; + (spi_dma, rx_buf) = transfer.wait(); + + let bytes_read = rx_buf.read_received_data(chunk); + debug_assert_eq!(bytes_read, chunk.len()); + } + + self.spi_dma = Some(spi_dma); + self.buffers = Some((tx_buf, rx_buf)); - fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { - self.spi - .write_bytes_dma(&mut self.tx_chain, words, &mut self.channel.tx)?; - self.spi.flush()?; Ok(()) } - } - #[cfg(feature = "embedded-hal-02")] - impl, const SIZE: usize> - embedded_hal_02::blocking::spi::Transfer for crate::FlashSafeDma - { - type Error = T::Error; + pub fn write(&mut self, words: &[u8]) -> Result<(), Error> { + let mut spi_dma = self.spi_dma.take().unwrap(); + let (mut tx_buf, rx_buf) = self.buffers.take().unwrap(); - fn transfer<'w>(&mut self, words: &'w mut [u8]) -> Result<&'w [u8], Self::Error> { - self.inner.transfer(words) + for chunk in words.chunks(tx_buf.capacity()) { + tx_buf.fill(chunk); + + let transfer = match spi_dma.dma_write(tx_buf) { + Ok(transfer) => transfer, + Err((e, spi, tx)) => { + self.spi_dma = Some(spi); + self.buffers = Some((tx, rx_buf)); + return Err(e); + } + }; + (spi_dma, tx_buf) = transfer.wait(); + } + + self.spi_dma = Some(spi_dma); + self.buffers = Some((tx_buf, rx_buf)); + + Ok(()) } - } - #[cfg(feature = "embedded-hal-02")] - impl, const SIZE: usize> - embedded_hal_02::blocking::spi::Write for crate::FlashSafeDma - { - type Error = T::Error; + pub fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Error> { + let mut spi_dma = self.spi_dma.take().unwrap(); + let (mut tx_buf, mut rx_buf) = self.buffers.take().unwrap(); - fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { - if !crate::soc::is_valid_ram_address(&words[0] as *const _ as u32) { - for chunk in words.chunks(SIZE) { - self.buffer[..chunk.len()].copy_from_slice(chunk); - self.inner.write(&self.buffer[..chunk.len()])?; - } + let chunk_size = min(tx_buf.capacity(), rx_buf.capacity()); + + let common_length = min(read.len(), write.len()); + let (read_common, read_remainder) = read.split_at_mut(common_length); + let (write_common, write_remainder) = write.split_at(common_length); + + for (read_chunk, write_chunk) in read_common + .chunks_mut(chunk_size) + .zip(write_common.chunks(chunk_size)) + { + tx_buf.fill(write_chunk); + rx_buf.set_length(read_chunk.len()); + + let transfer = match spi_dma.dma_transfer(tx_buf, rx_buf) { + Ok(transfer) => transfer, + Err((e, spi, tx, rx)) => { + self.spi_dma = Some(spi); + self.buffers = Some((tx, rx)); + return Err(e); + } + }; + (spi_dma, (tx_buf, rx_buf)) = transfer.wait(); + + let bytes_read = rx_buf.read_received_data(read_chunk); + debug_assert_eq!(bytes_read, read_chunk.len()); + } + + self.spi_dma = Some(spi_dma); + self.buffers = Some((tx_buf, rx_buf)); + + if !read_remainder.is_empty() { + self.read(read_remainder) + } else if !write_remainder.is_empty() { + self.write(write_remainder) } else { - self.inner.write(words)?; - }; + Ok(()) + } + } + + pub fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Error> { + let mut spi_dma = self.spi_dma.take().unwrap(); + let (mut tx_buf, mut rx_buf) = self.buffers.take().unwrap(); + + let chunk_size = min(tx_buf.capacity(), rx_buf.capacity()); + + for chunk in words.chunks_mut(chunk_size) { + tx_buf.fill(chunk); + rx_buf.set_length(chunk.len()); + + let transfer = match spi_dma.dma_transfer(tx_buf, rx_buf) { + Ok(transfer) => transfer, + Err((e, spi, tx, rx)) => { + self.spi_dma = Some(spi); + self.buffers = Some((tx, rx)); + return Err(e); + } + }; + (spi_dma, (tx_buf, rx_buf)) = transfer.wait(); + + let bytes_read = rx_buf.read_received_data(chunk); + debug_assert_eq!(bytes_read, chunk.len()); + } + + self.spi_dma = Some(spi_dma); + self.buffers = Some((tx_buf, rx_buf)); Ok(()) } } #[cfg(feature = "embedded-hal-02")] - impl, const SIZE: usize> - embedded_hal_02::spi::FullDuplex for crate::FlashSafeDma + impl<'d, T, C> embedded_hal_02::blocking::spi::Transfer for SpiDmaBus<'d, T, C> where - Self: embedded_hal_02::blocking::spi::Transfer, - Self: embedded_hal_02::blocking::spi::Write, + T: InstanceDma, + C: DmaChannel, + C::P: SpiPeripheral, { - type Error = T::Error; + type Error = super::Error; - fn read(&mut self) -> nb::Result { - use embedded_hal_02::blocking::spi::Transfer; - let mut buf = [0; 1]; - self.transfer(&mut buf)?; - Ok(buf[0]) + fn transfer<'w>(&mut self, words: &'w mut [u8]) -> Result<&'w [u8], Self::Error> { + self.transfer_in_place(words)?; + Ok(words) } + } - fn send(&mut self, word: u8) -> nb::Result<(), Self::Error> { - use embedded_hal_02::blocking::spi::Write; - self.write(&[word])?; + #[cfg(feature = "embedded-hal-02")] + impl<'d, T, C> embedded_hal_02::blocking::spi::Write for SpiDmaBus<'d, T, C> + where + T: InstanceDma, + C: DmaChannel, + C::P: SpiPeripheral, + { + type Error = super::Error; + + fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { + self.write(words)?; Ok(()) } } #[cfg(feature = "async")] - mod asynch { + pub mod asynch { + use core::{cmp::min, mem::take}; + + use embedded_hal::spi::ErrorType; + use super::*; - impl<'d, T, C, M> embedded_hal_async::spi::SpiBus for SpiDma<'d, T, C, M, crate::Async> + #[derive(Default)] + enum State<'d, T, C> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, - M: IsFullDuplex, { - async fn read(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { - let mut future = crate::dma::asynch::DmaRxFuture::new(&mut self.channel.rx); - unsafe { - self.spi.start_read_bytes_dma( - &mut self.rx_chain, - words.as_mut_ptr(), - words.len(), - future.rx(), - )?; - } - future.await?; + Idle( + SpiDma<'d, T, C, FullDuplexMode, crate::Async>, + DmaTxBuf, + DmaRxBuf, + ), + Reading( + SpiDmaTransfer<'d, T, C, FullDuplexMode, crate::Async, DmaRxBuf>, + DmaTxBuf, + ), + Writing( + SpiDmaTransfer<'d, T, C, FullDuplexMode, crate::Async, DmaTxBuf>, + DmaRxBuf, + ), + Transferring( + SpiDmaTransfer<'d, T, C, FullDuplexMode, crate::Async, (DmaTxBuf, DmaRxBuf)>, + ), + #[default] + InUse, + } + + pub struct SpiDmaAsyncBus<'d, T, C> + where + T: InstanceDma, + C: DmaChannel, + C::P: SpiPeripheral, + { + state: State<'d, T, C>, + } - Ok(()) + impl<'d, T, C> SpiDmaAsyncBus<'d, T, C> + where + T: InstanceDma, + C: DmaChannel, + C::P: SpiPeripheral, + { + pub fn new( + spi: SpiDma<'d, T, C, FullDuplexMode, crate::Async>, + dma_tx_buf: DmaTxBuf, + dma_rx_buf: DmaRxBuf, + ) -> Self { + Self { + state: State::Idle(spi, dma_tx_buf, dma_rx_buf), + } } - async fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { - for chunk in words.chunks(MAX_DMA_SIZE) { - let mut future = crate::dma::asynch::DmaTxFuture::new(&mut self.channel.tx); - unsafe { - self.spi.start_write_bytes_dma( - &mut self.tx_chain, - chunk.as_ptr(), - chunk.len(), - future.tx(), - )?; + async fn wait_for_idle( + &mut self, + ) -> ( + SpiDma<'d, T, C, FullDuplexMode, crate::Async>, + DmaTxBuf, + DmaRxBuf, + ) { + match &mut self.state { + State::Idle(_, _, _) => (), + State::Reading(transfer, _) => transfer.wait_for_done().await, + State::Writing(transfer, _) => transfer.wait_for_done().await, + State::Transferring(transfer) => transfer.wait_for_done().await, + State::InUse => unreachable!(), + } + match take(&mut self.state) { + State::Idle(spi, tx_buf, rx_buf) => (spi, tx_buf, rx_buf), + State::Reading(transfer, tx_buf) => { + let (spi, rx_buf) = transfer.wait(); + (spi, tx_buf, rx_buf) } - future.await?; - - self.spi.flush()?; + State::Writing(transfer, rx_buf) => { + let (spi, tx_buf) = transfer.wait(); + (spi, tx_buf, rx_buf) + } + State::Transferring(transfer) => { + let (spi, (tx_buf, rx_buf)) = transfer.wait(); + (spi, tx_buf, rx_buf) + } + State::InUse => unreachable!(), } - - Ok(()) } + } - async fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Self::Error> { - let mut idx = 0; - loop { - let write_idx = isize::min(idx, write.len() as isize); - let write_len = usize::min(write.len() - idx as usize, MAX_DMA_SIZE); - - let read_idx = isize::min(idx, read.len() as isize); - let read_len = usize::min(read.len() - idx as usize, MAX_DMA_SIZE); - - let mut tx_future = crate::dma::asynch::DmaTxFuture::new(&mut self.channel.tx); - let mut rx_future = crate::dma::asynch::DmaRxFuture::new(&mut self.channel.rx); - - unsafe { - self.spi.start_transfer_dma( - &mut self.tx_chain, - &mut self.rx_chain, - write.as_ptr().offset(write_idx), - write_len, - read.as_mut_ptr().offset(read_idx), - read_len, - tx_future.tx(), - rx_future.rx(), - )?; - } - let (tx_res, rx_res) = embassy_futures::join::join(tx_future, rx_future).await; - tx_res?; - rx_res?; - - self.spi.flush()?; + impl<'d, T, C> ErrorType for SpiDmaAsyncBus<'d, T, C> + where + T: InstanceDma, + C: DmaChannel, + C::P: SpiPeripheral, + { + type Error = Error; + } - idx += MAX_DMA_SIZE as isize; - if idx >= write.len() as isize && idx >= read.len() as isize { - break; - } + impl<'d, T, C> embedded_hal_async::spi::SpiBus for SpiDmaAsyncBus<'d, T, C> + where + T: InstanceDma, + C: DmaChannel, + C::P: SpiPeripheral, + { + async fn read(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { + // Get previous transfer. + let (mut spi_dma, mut tx_buf, mut rx_buf) = self.wait_for_idle().await; + + for chunk in words.chunks_mut(rx_buf.capacity()) { + rx_buf.set_length(chunk.len()); + + match spi_dma.dma_read(rx_buf) { + Ok(transfer) => { + self.state = State::Reading(transfer, tx_buf); + } + Err((e, spi, rx)) => { + self.state = State::Idle(spi, tx_buf, rx); + return Err(e); + } + }; + + match &mut self.state { + State::Reading(transfer, _) => transfer.wait_for_done().await, + _ => unreachable!(), + }; + (spi_dma, tx_buf, rx_buf) = match take(&mut self.state) { + State::Reading(transfer, tx_buf) => { + let (spi, rx_buf) = transfer.wait(); + (spi, tx_buf, rx_buf) + } + _ => unreachable!(), + }; + + let bytes_read = rx_buf.read_received_data(chunk); + debug_assert_eq!(bytes_read, chunk.len()); } + self.state = State::Idle(spi_dma, tx_buf, rx_buf); + Ok(()) } - async fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { - for chunk in words.chunks_mut(MAX_DMA_SIZE) { - let mut tx_future = crate::dma::asynch::DmaTxFuture::new(&mut self.channel.tx); - let mut rx_future = crate::dma::asynch::DmaRxFuture::new(&mut self.channel.rx); - - unsafe { - self.spi.start_transfer_dma( - &mut self.tx_chain, - &mut self.rx_chain, - chunk.as_ptr(), - chunk.len(), - chunk.as_mut_ptr(), - chunk.len(), - tx_future.tx(), - rx_future.rx(), - )?; - } - - let (tx_res, rx_res) = embassy_futures::join::join(tx_future, rx_future).await; - tx_res?; - rx_res?; - - self.spi.flush()?; + async fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { + // Get previous transfer. + let (mut spi_dma, mut tx_buf, mut rx_buf) = self.wait_for_idle().await; + + for chunk in words.chunks(tx_buf.capacity()) { + tx_buf.fill(chunk); + + match spi_dma.dma_write(tx_buf) { + Ok(transfer) => { + self.state = State::Writing(transfer, rx_buf); + } + Err((e, spi, tx)) => { + self.state = State::Idle(spi, tx, rx_buf); + return Err(e); + } + }; + + match &mut self.state { + State::Writing(transfer, _) => transfer.wait_for_done().await, + _ => unreachable!(), + }; + + (spi_dma, tx_buf, rx_buf) = match take(&mut self.state) { + State::Writing(transfer, rx_buf) => { + let (spi, tx_buf) = transfer.wait(); + (spi, tx_buf, rx_buf) + } + _ => unreachable!(), + }; } + self.state = State::Idle(spi_dma, tx_buf, rx_buf); + Ok(()) } - async fn flush(&mut self) -> Result<(), Self::Error> { - self.spi.flush() - } - } + async fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Self::Error> { + // Get previous transfer. + let (mut spi_dma, mut tx_buf, mut rx_buf) = self.wait_for_idle().await; + + let chunk_size = min(tx_buf.capacity(), rx_buf.capacity()); + + let common_length = min(read.len(), write.len()); + let (read_common, read_remainder) = read.split_at_mut(common_length); + let (write_common, write_remainder) = write.split_at(common_length); + + for (read_chunk, write_chunk) in read_common + .chunks_mut(chunk_size) + .zip(write_common.chunks(chunk_size)) + { + tx_buf.fill(write_chunk); + rx_buf.set_length(read_chunk.len()); + + match spi_dma.dma_transfer(tx_buf, rx_buf) { + Ok(transfer) => { + self.state = State::Transferring(transfer); + } + Err((e, spi, tx, rx)) => { + self.state = State::Idle(spi, tx, rx); + return Err(e); + } + }; + + match &mut self.state { + State::Transferring(transfer) => transfer.wait_for_done().await, + _ => unreachable!(), + }; + + (spi_dma, tx_buf, rx_buf) = match take(&mut self.state) { + State::Transferring(transfer) => { + let (spi, (tx_buf, rx_buf)) = transfer.wait(); + (spi, tx_buf, rx_buf) + } + _ => unreachable!(), + }; + + let bytes_read = rx_buf.read_received_data(read_chunk); + assert_eq!(bytes_read, read_chunk.len()); + } - impl embedded_hal_async::spi::SpiBus - for crate::FlashSafeDma - { - async fn read(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { - self.inner.read(words).await - } + self.state = State::Idle(spi_dma, tx_buf, rx_buf); - async fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { - if !crate::soc::is_valid_ram_address(&words[0] as *const _ as u32) { - for chunk in words.chunks(SIZE) { - self.buffer[..chunk.len()].copy_from_slice(chunk); - self.inner.write(&self.buffer[..chunk.len()]).await?; - } + if !read_remainder.is_empty() { + self.read(read_remainder).await + } else if !write_remainder.is_empty() { + self.write(write_remainder).await } else { - self.inner.write(words).await?; + Ok(()) } - Ok(()) - } - - async fn flush(&mut self) -> Result<(), Self::Error> { - self.inner.flush().await } async fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { - self.inner.transfer_in_place(words).await + // Get previous transfer. + let (mut spi_dma, mut tx_buf, mut rx_buf) = self.wait_for_idle().await; + + for chunk in words.chunks_mut(tx_buf.capacity()) { + tx_buf.fill(chunk); + rx_buf.set_length(chunk.len()); + + match spi_dma.dma_transfer(tx_buf, rx_buf) { + Ok(transfer) => { + self.state = State::Transferring(transfer); + } + Err((e, spi, tx, rx)) => { + self.state = State::Idle(spi, tx, rx); + return Err(e); + } + }; + + match &mut self.state { + State::Transferring(transfer) => transfer.wait_for_done().await, + _ => unreachable!(), + }; + + (spi_dma, tx_buf, rx_buf) = match take(&mut self.state) { + State::Transferring(transfer) => { + let (spi, (tx_buf, rx_buf)) = transfer.wait(); + (spi, tx_buf, rx_buf) + } + _ => unreachable!(), + }; + + let bytes_read = rx_buf.read_received_data(chunk); + debug_assert_eq!(bytes_read, chunk.len()); + } + + self.state = State::Idle(spi_dma, tx_buf, rx_buf); + + Ok(()) } - async fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Self::Error> { - if !crate::soc::is_valid_ram_address(&write[0] as *const _ as u32) { - for (read, write) in read.chunks_mut(SIZE).zip(write.chunks(SIZE)) { - self.buffer[..write.len()].copy_from_slice(write); - self.inner - .transfer(read, &self.buffer[..write.len()]) - .await?; - } - } else { - self.inner.transfer(read, write).await?; - } + async fn flush(&mut self) -> Result<(), Self::Error> { + // Get previous transfer. + let (spi_dma, tx_buf, rx_buf) = self.wait_for_idle().await; + self.state = State::Idle(spi_dma, tx_buf, rx_buf); Ok(()) } } @@ -1726,116 +1913,41 @@ pub mod dma { use super::*; - impl<'d, T, C, M, DmaMode> ErrorType for SpiDma<'d, T, C, M, DmaMode> + impl<'d, T, C> ErrorType for SpiDmaBus<'d, T, C> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, - M: IsFullDuplex, - DmaMode: Mode, { type Error = Error; } - impl<'d, T, C, M> SpiBus for SpiDma<'d, T, C, M, crate::Blocking> + impl<'d, T, C> SpiBus for SpiDmaBus<'d, T, C> where - T: InstanceDma, ChannelRx<'d, C>>, + T: InstanceDma, C: DmaChannel, C::P: SpiPeripheral, - M: IsFullDuplex, { fn read(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { - self.spi.transfer_dma( - &mut self.tx_chain, - &mut self.rx_chain, - &[], - words, - &mut self.channel.tx, - &mut self.channel.rx, - )?; - self.flush() + self.read(words) } fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { - self.spi - .write_bytes_dma(&mut self.tx_chain, words, &mut self.channel.tx)?; - self.flush() + self.write(words) } fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Self::Error> { - self.spi.transfer_dma( - &mut self.tx_chain, - &mut self.rx_chain, - write, - read, - &mut self.channel.tx, - &mut self.channel.rx, - )?; - self.flush() + self.transfer(read, write) } - /// Transfer data in place. - /// - /// Writes data from `words` out on the bus and stores the reply - /// into `words`. A convenient wrapper around - /// [`write`](SpiBus::write), [`flush`](SpiBus::flush) and - /// [`read`](SpiBus::read). fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { - self.spi.transfer_in_place_dma( - &mut self.tx_chain, - &mut self.rx_chain, - words, - &mut self.channel.tx, - &mut self.channel.rx, - )?; - self.flush() + self.transfer_in_place(words) } fn flush(&mut self) -> Result<(), Self::Error> { - self.spi.flush() - } - } - - impl ErrorType for crate::FlashSafeDma { - type Error = T::Error; - } - - impl SpiBus for crate::FlashSafeDma { - fn read(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { - self.inner.read(words) - } - - fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> { - if !crate::soc::is_valid_ram_address(&words[0] as *const _ as u32) { - for chunk in words.chunks(SIZE) { - self.buffer[..chunk.len()].copy_from_slice(chunk); - self.inner.write(&self.buffer[..chunk.len()])?; - } - } else { - self.inner.write(words)?; - } + // All operations currently flush so this is no-op. Ok(()) } - - fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Self::Error> { - if !crate::soc::is_valid_ram_address(&write[0] as *const _ as u32) { - for (read, write) in read.chunks_mut(SIZE).zip(write.chunks(SIZE)) { - self.buffer[..write.len()].copy_from_slice(write); - self.inner.transfer(read, &self.buffer[..write.len()])?; - } - } else { - self.inner.transfer(read, write)?; - } - Ok(()) - } - - fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Self::Error> { - self.inner.transfer_in_place(words) - } - - fn flush(&mut self) -> Result<(), Self::Error> { - self.inner.flush() - } } } } @@ -1940,90 +2052,13 @@ mod ehal1 { } #[doc(hidden)] -pub trait InstanceDma: Instance -where - TX: Tx, - RX: Rx, -{ - fn transfer_in_place_dma<'w>( - &mut self, - tx_chain: &mut DescriptorChain, - rx_chain: &mut DescriptorChain, - words: &'w mut [u8], - tx: &mut TX, - rx: &mut RX, - ) -> Result<&'w [u8], Error> { - for chunk in words.chunks_mut(MAX_DMA_SIZE) { - unsafe { - self.start_transfer_dma( - tx_chain, - rx_chain, - chunk.as_ptr(), - chunk.len(), - chunk.as_mut_ptr(), - chunk.len(), - tx, - rx, - )?; - } - - while !tx.is_done() && !rx.is_done() {} - self.flush().unwrap(); - } - - Ok(words) - } - - fn transfer_dma<'w>( - &mut self, - tx_chain: &mut DescriptorChain, - rx_chain: &mut DescriptorChain, - write_buffer: &'w [u8], - read_buffer: &'w mut [u8], - tx: &mut TX, - rx: &mut RX, - ) -> Result<&'w [u8], Error> { - let mut idx = 0; - loop { - let write_idx = isize::min(idx, write_buffer.len() as isize); - let write_len = usize::min(write_buffer.len() - idx as usize, MAX_DMA_SIZE); - - let read_idx = isize::min(idx, read_buffer.len() as isize); - let read_len = usize::min(read_buffer.len() - idx as usize, MAX_DMA_SIZE); - - unsafe { - self.start_transfer_dma( - tx_chain, - rx_chain, - write_buffer.as_ptr().offset(write_idx), - write_len, - read_buffer.as_mut_ptr().offset(read_idx), - read_len, - tx, - rx, - )?; - } - - while !tx.is_done() && !rx.is_done() {} - self.flush().unwrap(); - - idx += MAX_DMA_SIZE as isize; - if idx >= write_buffer.len() as isize && idx >= read_buffer.len() as isize { - break; - } - } - - Ok(read_buffer) - } - +pub trait InstanceDma: Instance { #[allow(clippy::too_many_arguments)] - unsafe fn start_transfer_dma( + unsafe fn start_transfer_dma( &mut self, - tx_chain: &mut DescriptorChain, - rx_chain: &mut DescriptorChain, - write_buffer_ptr: *const u8, + tx_desc: *mut DmaDescriptor, + rx_desc: *mut DmaDescriptor, write_buffer_len: usize, - read_buffer_ptr: *mut u8, read_buffer_len: usize, tx: &mut TX, rx: &mut RX, @@ -2042,13 +2077,11 @@ where self.enable_dma(); self.update(); - reset_dma_before_load_dma_dscr(reg_block); self.clear_dma_interrupts(); - tx_chain.fill_for_tx(false, write_buffer_ptr, write_buffer_len)?; - tx.prepare_transfer_without_start(self.dma_peripheral(), tx_chain) + reset_dma_before_load_dma_dscr(reg_block); + tx.prepare_transfer(self.dma_peripheral(), tx_desc) .and_then(|_| tx.start_transfer())?; - rx_chain.fill_for_rx(false, read_buffer_ptr, read_buffer_len)?; - rx.prepare_transfer_without_start(self.dma_peripheral(), rx_chain) + rx.prepare_transfer(self.dma_peripheral(), rx_desc) .and_then(|_| rx.start_transfer())?; reset_dma_before_usr_cmd(reg_block); @@ -2058,29 +2091,10 @@ where Ok(()) } - fn write_bytes_dma<'w>( - &mut self, - chain: &mut DescriptorChain, - words: &'w [u8], - tx: &mut TX, - ) -> Result<&'w [u8], Error> { - for chunk in words.chunks(MAX_DMA_SIZE) { - unsafe { - self.start_write_bytes_dma(chain, chunk.as_ptr(), chunk.len(), tx)?; - } - - while !tx.is_done() {} - self.flush().unwrap(); // seems "is_done" doesn't work as intended? - } - - Ok(words) - } - #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - unsafe fn start_write_bytes_dma( + unsafe fn start_write_bytes_dma( &mut self, - chain: &mut DescriptorChain, - ptr: *const u8, + first_desc: *mut DmaDescriptor, len: usize, tx: &mut TX, ) -> Result<(), Error> { @@ -2099,29 +2113,32 @@ where reset_dma_before_load_dma_dscr(reg_block); self.clear_dma_interrupts(); - chain.fill_for_tx(false, ptr, len)?; - unsafe { - tx.prepare_transfer_without_start(self.dma_peripheral(), chain) - .and_then(|_| tx.start_transfer())?; - } + tx.prepare_transfer(self.dma_peripheral(), first_desc)?; + tx.start_transfer()?; reset_dma_before_usr_cmd(reg_block); + // Wait for at least one clock cycle for the DMA to fill the SPI async FIFO, + // before starting the SPI + #[cfg(riscv)] + riscv::asm::delay(1); + #[cfg(xtensa)] + xtensa_lx::timer::delay(1); + reg_block.cmd().modify(|_, w| w.usr().set_bit()); Ok(()) } #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] - unsafe fn start_read_bytes_dma( + unsafe fn start_read_bytes_dma( &mut self, - chain: &mut DescriptorChain, - ptr: *mut u8, - len: usize, + desc: *mut DmaDescriptor, + data_length: usize, rx: &mut RX, ) -> Result<(), Error> { let reg_block = self.register_block(); - self.configure_datalen(len as u32 * 8); + self.configure_datalen(data_length as u32 * 8); rx.is_done(); @@ -2134,13 +2151,13 @@ where self.update(); reset_dma_before_load_dma_dscr(reg_block); - self.clear_dma_interrupts(); - chain.fill_for_rx(false, ptr, len)?; - rx.prepare_transfer_without_start(self.dma_peripheral(), chain) - .and_then(|_| rx.start_transfer())?; + self.clear_dma_interrupts(); reset_dma_before_usr_cmd(reg_block); + rx.prepare_transfer(self.dma_peripheral(), desc)?; + rx.start_transfer()?; + reg_block.cmd().modify(|_, w| w.usr().set_bit()); Ok(()) @@ -2253,20 +2270,10 @@ fn reset_dma_before_load_dma_dscr(reg_block: &RegisterBlock) { }); } -impl InstanceDma for crate::peripherals::SPI2 -where - TX: Tx, - RX: Rx, -{ -} +impl InstanceDma for crate::peripherals::SPI2 {} #[cfg(spi3)] -impl InstanceDma for crate::peripherals::SPI3 -where - TX: Tx, - RX: Rx, -{ -} +impl InstanceDma for crate::peripherals::SPI3 {} #[doc(hidden)] pub trait ExtendedInstance: Instance { diff --git a/examples/src/bin/embassy_spi.rs b/examples/src/bin/embassy_spi.rs index e406089626c..9924a6c27a3 100644 --- a/examples/src/bin/embassy_spi.rs +++ b/examples/src/bin/embassy_spi.rs @@ -24,7 +24,7 @@ use esp_backtrace as _; use esp_hal::{ clock::ClockControl, dma::*, - dma_descriptors, + dma_buffers, gpio::Io, peripherals::Peripherals, prelude::*, @@ -59,15 +59,14 @@ async fn main(_spawner: Spawner) { #[cfg(not(any(feature = "esp32", feature = "esp32s2")))] let dma_channel = dma.channel0; - let (descriptors, rx_descriptors) = dma_descriptors!(32000); + let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(32000); + let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure_for_async(false, DmaPriority::Priority0), - descriptors, - rx_descriptors, - ); + .with_dma(dma_channel.configure_for_async(false, DmaPriority::Priority0)) + .with_buffers(dma_tx_buf, dma_rx_buf); let send_buffer = [0, 1, 2, 3, 4, 5, 6, 7]; loop { diff --git a/examples/src/bin/qspi_flash.rs b/examples/src/bin/qspi_flash.rs index cabe617872a..b0aacf8fbb3 100644 --- a/examples/src/bin/qspi_flash.rs +++ b/examples/src/bin/qspi_flash.rs @@ -31,7 +31,7 @@ use esp_backtrace as _; use esp_hal::{ clock::ClockControl, delay::Delay, - dma::{Dma, DmaPriority}, + dma::{Dma, DmaPriority, DmaRxBuf, DmaTxBuf}, dma_buffers, gpio::Io, peripherals::Peripherals, @@ -77,6 +77,8 @@ fn main() -> ! { let dma_channel = dma.channel0; let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(256, 320); + let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let mut spi = Spi::new_half_duplex(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins( @@ -87,30 +89,23 @@ fn main() -> ! { Some(sio3), Some(cs), ) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); let delay = Delay::new(&clocks); - // DMA buffer require a static life-time - let (zero_buf, _, _, _) = dma_buffers!(0); - let send = tx_buffer; - let mut receive = rx_buffer; - // write enable + dma_tx_buf.set_length(0); let transfer = spi .write( SpiDataMode::Single, Command::Command8(0x06, SpiDataMode::Single), Address::None, 0, - &zero_buf, + dma_tx_buf, ) + .map_err(|e| e.0) .unwrap(); - transfer.wait().unwrap(); + (spi, dma_tx_buf) = transfer.wait(); delay.delay_millis(250); // erase sector @@ -120,10 +115,11 @@ fn main() -> ! { Command::Command8(0x20, SpiDataMode::Single), Address::Address24(0x000000, SpiDataMode::Single), 0, - &zero_buf, + dma_tx_buf, ) + .map_err(|e| e.0) .unwrap(); - transfer.wait().unwrap(); + (spi, dma_tx_buf) = transfer.wait(); delay.delay_millis(250); // write enable @@ -133,25 +129,28 @@ fn main() -> ! { Command::Command8(0x06, SpiDataMode::Single), Address::None, 0, - &zero_buf, + dma_tx_buf, ) + .map_err(|e| e.0) .unwrap(); - transfer.wait().unwrap(); + (spi, dma_tx_buf) = transfer.wait(); delay.delay_millis(250); // write data / program page - send.fill(b'!'); - send[0..][..5].copy_from_slice(&b"Hello"[..]); + dma_tx_buf.set_length(dma_tx_buf.capacity()); + dma_tx_buf.as_mut_slice().fill(b'!'); + dma_tx_buf.as_mut_slice()[0..][..5].copy_from_slice(&b"Hello"[..]); let transfer = spi .write( SpiDataMode::Quad, Command::Command8(0x32, SpiDataMode::Single), Address::Address24(0x000000, SpiDataMode::Single), 0, - &send, + dma_tx_buf, ) + .map_err(|e| e.0) .unwrap(); - transfer.wait().unwrap(); + (spi, _) = transfer.wait(); delay.delay_millis(250); loop { @@ -162,17 +161,18 @@ fn main() -> ! { Command::Command8(0xeb, SpiDataMode::Single), Address::Address32(0x000000 << 8, SpiDataMode::Quad), 4, - &mut receive, + dma_rx_buf, ) + .map_err(|e| e.0) .unwrap(); // here we could do something else while DMA transfer is in progress // the buffers and spi is moved into the transfer and we can get it back via // `wait` - transfer.wait().unwrap(); + (spi, dma_rx_buf) = transfer.wait(); - println!("{:x?}", &receive); - for b in &mut receive.iter() { + println!("{:x?}", dma_rx_buf.as_slice()); + for b in &mut dma_rx_buf.as_slice().iter() { if *b >= 32 && *b <= 127 { print!("{}", *b as char); } else { diff --git a/examples/src/bin/spi_loopback_dma.rs b/examples/src/bin/spi_loopback_dma.rs index 991b856a163..52f329c4a43 100644 --- a/examples/src/bin/spi_loopback_dma.rs +++ b/examples/src/bin/spi_loopback_dma.rs @@ -22,7 +22,7 @@ use esp_backtrace as _; use esp_hal::{ clock::ClockControl, delay::Delay, - dma::{Dma, DmaPriority}, + dma::{Dma, DmaPriority, DmaRxBuf, DmaTxBuf}, dma_buffers, gpio::Io, peripherals::Peripherals, @@ -55,32 +55,30 @@ fn main() -> ! { let dma_channel = dma.channel0; let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(32000); + let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); let delay = Delay::new(&clocks); - // DMA buffer require a static life-time - let mut send = tx_buffer; - let mut receive = rx_buffer; let mut i = 0; - for (i, v) in send.iter_mut().enumerate() { + for (i, v) in dma_tx_buf.as_mut_slice().iter_mut().enumerate() { *v = (i % 255) as u8; } loop { - send[0] = i; - send[send.len() - 1] = i; + dma_tx_buf.as_mut_slice()[0] = i; + *dma_tx_buf.as_mut_slice().last_mut().unwrap() = i; i = i.wrapping_add(1); - let mut transfer = spi.dma_transfer(&mut send, &mut receive).unwrap(); + let transfer = spi + .dma_transfer(dma_tx_buf, dma_rx_buf) + .map_err(|e| e.0) + .unwrap(); // here we could do something else while DMA transfer is in progress let mut n = 0; // Check is_done until the transfer is almost done (32000 bytes at 100kHz is @@ -90,11 +88,11 @@ fn main() -> ! { n += 1; } - transfer.wait().unwrap(); + (spi, (dma_tx_buf, dma_rx_buf)) = transfer.wait(); println!( "{:x?} .. {:x?}", - &receive[..10], - &receive[receive.len() - 10..] + &dma_rx_buf.as_slice()[..10], + &dma_rx_buf.as_slice().last_chunk::<10>().unwrap() ); delay.delay_millis(250); diff --git a/hil-test/tests/spi_full_duplex_dma.rs b/hil-test/tests/spi_full_duplex_dma.rs index b80e178d7fb..cfe96728b34 100644 --- a/hil-test/tests/spi_full_duplex_dma.rs +++ b/hil-test/tests/spi_full_duplex_dma.rs @@ -40,6 +40,10 @@ use hil_test as _; #[embedded_test::tests] mod tests { use defmt::assert_eq; + use esp_hal::{ + dma::{DmaRxBuf, DmaTxBuf}, + spi::master::dma::SpiDmaBus, + }; use super::*; @@ -66,23 +70,21 @@ mod tests { let dma_channel = dma.channel0; let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); + let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); - let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) + let spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); - - let mut send = tx_buffer; - let mut receive = rx_buffer; + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); - send.copy_from_slice(&[0xde, 0xad, 0xbe, 0xef]); + dma_tx_buf.fill(&[0xde, 0xad, 0xbe, 0xef]); - let transfer = spi.dma_transfer(&mut send, &mut receive).unwrap(); - transfer.wait().unwrap(); - assert_eq!(send, receive); + let transfer = spi + .dma_transfer(dma_tx_buf, dma_rx_buf) + .map_err(|e| e.0) + .unwrap(); + (_, (dma_tx_buf, dma_rx_buf)) = transfer.wait(); + assert_eq!(dma_tx_buf.as_slice(), dma_rx_buf.as_slice()); } #[test] @@ -108,23 +110,21 @@ mod tests { let dma_channel = dma.channel0; let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(4, 2); + let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); - let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) + let spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); - - let mut send = tx_buffer; - let mut receive = rx_buffer; + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); - send.copy_from_slice(&[0xde, 0xad, 0xbe, 0xef]); + dma_tx_buf.fill(&[0xde, 0xad, 0xbe, 0xef]); - let transfer = spi.dma_transfer(&mut send, &mut receive).unwrap(); - transfer.wait().unwrap(); - assert_eq!(send[0..1], receive[0..1]); + let transfer = spi + .dma_transfer(dma_tx_buf, dma_rx_buf) + .map_err(|e| e.0) + .unwrap(); + (_, (dma_tx_buf, dma_rx_buf)) = transfer.wait(); + assert_eq!(dma_tx_buf.as_slice()[0..1], dma_rx_buf.as_slice()[0..1]); } #[test] @@ -150,176 +150,23 @@ mod tests { let dma_channel = dma.channel0; let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); - - let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) - .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); - - let mut send = tx_buffer; - let mut receive = rx_buffer; - - send.copy_from_slice(&[0x55u8; 4096]); - for byte in 0..send.len() { - send[byte] = byte as u8; - } - - let transfer = spi.dma_transfer(&mut send, &mut receive).unwrap(); - transfer.wait().unwrap(); - assert_eq!(send, receive); - } - - #[test] - #[timeout(3)] - fn test_try_using_non_dma_memory_tx_buffer() { - const DMA_BUFFER_SIZE: usize = 4096; - - let peripherals = Peripherals::take(); - let system = SystemControl::new(peripherals.SYSTEM); - let clocks = ClockControl::boot_defaults(system.clock_control).freeze(); - - let io = Io::new(peripherals.GPIO, peripherals.IO_MUX); - let sclk = io.pins.gpio0; - let miso = io.pins.gpio2; - let mosi = io.pins.gpio3; - let cs = io.pins.gpio8; - - let dma = Dma::new(peripherals.DMA); - - #[cfg(any(feature = "esp32", feature = "esp32s2"))] - let dma_channel = dma.spi2channel; - #[cfg(not(any(feature = "esp32", feature = "esp32s2")))] - let dma_channel = dma.channel0; - - let (_, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); - - let tx_buffer = { - // using `static`, not `static mut`, places the array in .rodata - static TX_BUFFER: [u8; DMA_BUFFER_SIZE] = [42u8; DMA_BUFFER_SIZE]; - unsafe { - core::slice::from_raw_parts( - &mut *(core::ptr::addr_of!(TX_BUFFER) as *mut u8), - DMA_BUFFER_SIZE, - ) - } - }; - - let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) - .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); - - let mut receive = rx_buffer; - - assert!(matches!( - spi.dma_transfer(&tx_buffer, &mut receive), - Err(esp_hal::spi::Error::DmaError( - esp_hal::dma::DmaError::UnsupportedMemoryRegion - )) - )); - } - - #[test] - #[timeout(3)] - fn test_try_using_non_dma_memory_rx_buffer() { - const DMA_BUFFER_SIZE: usize = 4096; - - let peripherals = Peripherals::take(); - let system = SystemControl::new(peripherals.SYSTEM); - let clocks = ClockControl::boot_defaults(system.clock_control).freeze(); - - let io = Io::new(peripherals.GPIO, peripherals.IO_MUX); - let sclk = io.pins.gpio0; - let miso = io.pins.gpio2; - let mosi = io.pins.gpio3; - let cs = io.pins.gpio8; - - let dma = Dma::new(peripherals.DMA); - - #[cfg(any(feature = "esp32", feature = "esp32s2"))] - let dma_channel = dma.spi2channel; - #[cfg(not(any(feature = "esp32", feature = "esp32s2")))] - let dma_channel = dma.channel0; - - let (tx_buffer, tx_descriptors, _, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); - - let rx_buffer = { - // using `static`, not `static mut`, places the array in .rodata - static RX_BUFFER: [u8; DMA_BUFFER_SIZE] = [42u8; DMA_BUFFER_SIZE]; - unsafe { - core::slice::from_raw_parts_mut( - &mut *(core::ptr::addr_of!(RX_BUFFER) as *mut u8), - DMA_BUFFER_SIZE, - ) - } - }; - - let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) - .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); - - let mut receive = rx_buffer; - assert!(matches!( - spi.dma_transfer(&tx_buffer, &mut receive), - Err(esp_hal::spi::Error::DmaError( - esp_hal::dma::DmaError::UnsupportedMemoryRegion - )) - )); - } - - #[test] - #[timeout(3)] - fn test_symmetric_dma_transfer_owned() { - const DMA_BUFFER_SIZE: usize = 4096; - - let peripherals = Peripherals::take(); - let system = SystemControl::new(peripherals.SYSTEM); - let clocks = ClockControl::boot_defaults(system.clock_control).freeze(); - - let io = Io::new(peripherals.GPIO, peripherals.IO_MUX); - let sclk = io.pins.gpio0; - let miso = io.pins.gpio2; - let mosi = io.pins.gpio3; - let cs = io.pins.gpio8; - - let dma = Dma::new(peripherals.DMA); - - #[cfg(any(feature = "esp32", feature = "esp32s2"))] - let dma_channel = dma.spi2channel; - #[cfg(not(any(feature = "esp32", feature = "esp32s2")))] - let dma_channel = dma.channel0; - - let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); + let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); - - let send = tx_buffer; - let receive = rx_buffer; - - send.copy_from_slice(&[0x55u8; 4096]); - for byte in 0..send.len() { - send[byte] = byte as u8; + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); + + for (i, d) in dma_tx_buf.as_mut_slice().iter_mut().enumerate() { + *d = i as _; } - let transfer = spi.dma_transfer_owned(send, receive).unwrap(); - let (_, send, receive) = transfer.wait().unwrap(); - assert_eq!(send, receive); + let transfer = spi + .dma_transfer(dma_tx_buf, dma_rx_buf) + .map_err(|e| e.0) + .unwrap(); + (_, (dma_tx_buf, dma_rx_buf)) = transfer.wait(); + assert_eq!(dma_tx_buf.as_slice(), dma_rx_buf.as_slice()); } #[test] @@ -365,14 +212,12 @@ mod tests { let dma_channel = dma.channel0; let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); + let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); let unit = pcnt.unit0; unit.channel0.set_edge_signal(PcntSource::from_pin( @@ -382,21 +227,19 @@ mod tests { unit.channel0 .set_input_mode(EdgeMode::Hold, EdgeMode::Increment); - let mut receive = rx_buffer; - // Fill the buffer where each byte has 3 pos edges. - tx_buffer.fill(0b0110_1010); + dma_tx_buf.as_mut_slice().fill(0b0110_1010); assert_eq!(out_pin.is_set_low(), true); for i in 1..4 { - receive.copy_from_slice(&[5, 5, 5, 5, 5]); - let transfer = spi.dma_read(&mut receive).unwrap(); - transfer.wait().unwrap(); - assert_eq!(receive, &[0, 0, 0, 0, 0]); + dma_rx_buf.as_mut_slice().copy_from_slice(&[5, 5, 5, 5, 5]); + let transfer = spi.dma_read(dma_rx_buf).map_err(|e| e.0).unwrap(); + (spi, dma_rx_buf) = transfer.wait(); + assert_eq!(dma_rx_buf.as_slice(), &[0, 0, 0, 0, 0]); - let transfer = spi.dma_write(&tx_buffer).unwrap(); - transfer.wait().unwrap(); + let transfer = spi.dma_write(dma_tx_buf).map_err(|e| e.0).unwrap(); + (spi, dma_tx_buf) = transfer.wait(); assert_eq!(unit.get_value(), (i * 3 * DMA_BUFFER_SIZE) as _); } } @@ -444,14 +287,12 @@ mod tests { let dma_channel = dma.channel0; let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); + let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); let unit = pcnt.unit0; unit.channel0.set_edge_signal(PcntSource::from_pin( @@ -461,22 +302,138 @@ mod tests { unit.channel0 .set_input_mode(EdgeMode::Hold, EdgeMode::Increment); - let mut receive = rx_buffer; - // Fill the buffer where each byte has 3 pos edges. - tx_buffer.fill(0b0110_1010); + dma_tx_buf.as_mut_slice().fill(0b0110_1010); assert_eq!(out_pin.is_set_low(), true); for i in 1..4 { - receive.copy_from_slice(&[5, 5, 5, 5, 5]); - let transfer = spi.dma_read(&mut receive).unwrap(); - transfer.wait().unwrap(); - assert_eq!(receive, &[0, 0, 0, 0, 0]); - - let transfer = spi.dma_transfer(&tx_buffer, &mut receive).unwrap(); - transfer.wait().unwrap(); + dma_rx_buf.as_mut_slice().copy_from_slice(&[5, 5, 5, 5, 5]); + let transfer = spi.dma_read(dma_rx_buf).map_err(|e| e.0).unwrap(); + (spi, dma_rx_buf) = transfer.wait(); + assert_eq!(dma_rx_buf.as_slice(), &[0, 0, 0, 0, 0]); + + let transfer = spi + .dma_transfer(dma_tx_buf, dma_rx_buf) + .map_err(|e| e.0) + .unwrap(); + (spi, (dma_tx_buf, dma_rx_buf)) = transfer.wait(); assert_eq!(unit.get_value(), (i * 3 * DMA_BUFFER_SIZE) as _); } } + + #[test] + #[timeout(3)] + fn test_dma_bus_symmetric_transfer() { + const DMA_BUFFER_SIZE: usize = 4; + + let peripherals = Peripherals::take(); + let system = SystemControl::new(peripherals.SYSTEM); + let clocks = ClockControl::boot_defaults(system.clock_control).freeze(); + + let io = Io::new(peripherals.GPIO, peripherals.IO_MUX); + let sclk = io.pins.gpio0; + let miso = io.pins.gpio2; + let mosi = io.pins.gpio3; + let cs = io.pins.gpio8; + + let dma = Dma::new(peripherals.DMA); + + #[cfg(any(feature = "esp32", feature = "esp32s2"))] + let dma_channel = dma.spi2channel; + #[cfg(not(any(feature = "esp32", feature = "esp32s2")))] + let dma_channel = dma.channel0; + + let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); + let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); + + let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) + .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)) + .with_buffers(dma_tx_buf, dma_rx_buf); + + let tx_buf = [0xde, 0xad, 0xbe, 0xef]; + let mut rx_buf = [0; 4]; + + spi.transfer(&mut rx_buf, &tx_buf).unwrap(); + + assert_eq!(tx_buf, rx_buf); + } + + #[test] + #[timeout(3)] + fn test_dma_bus_asymmetric_transfer() { + let peripherals = Peripherals::take(); + let system = SystemControl::new(peripherals.SYSTEM); + let clocks = ClockControl::boot_defaults(system.clock_control).freeze(); + + let io = Io::new(peripherals.GPIO, peripherals.IO_MUX); + let sclk = io.pins.gpio0; + let miso = io.pins.gpio2; + let mosi = io.pins.gpio3; + let cs = io.pins.gpio8; + + let dma = Dma::new(peripherals.DMA); + + #[cfg(any(feature = "esp32", feature = "esp32s2"))] + let dma_channel = dma.spi2channel; + #[cfg(not(any(feature = "esp32", feature = "esp32s2")))] + let dma_channel = dma.channel0; + + let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(4); + let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); + + let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) + .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)) + .with_buffers(dma_tx_buf, dma_rx_buf); + + let tx_buf = [0xde, 0xad, 0xbe, 0xef]; + let mut rx_buf = [0; 4]; + + spi.transfer(&mut rx_buf, &tx_buf).unwrap(); + + assert_eq!(&tx_buf[0..1], &rx_buf[0..1]); + } + + #[test] + #[timeout(3)] + fn test_dma_bus_symmetric_transfer_huge_buffer() { + const DMA_BUFFER_SIZE: usize = 4096; + + let peripherals = Peripherals::take(); + let system = SystemControl::new(peripherals.SYSTEM); + let clocks = ClockControl::boot_defaults(system.clock_control).freeze(); + + let io = Io::new(peripherals.GPIO, peripherals.IO_MUX); + let sclk = io.pins.gpio0; + let miso = io.pins.gpio2; + let mosi = io.pins.gpio3; + let cs = io.pins.gpio8; + + let dma = Dma::new(peripherals.DMA); + + #[cfg(any(feature = "esp32", feature = "esp32s2"))] + let dma_channel = dma.spi2channel; + #[cfg(not(any(feature = "esp32", feature = "esp32s2")))] + let dma_channel = dma.channel0; + + let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(40); + let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); + + let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) + .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)) + .with_buffers(dma_tx_buf, dma_rx_buf); + + let tx_buf = core::array::from_fn(|i| i as _); + let mut rx_buf = [0; DMA_BUFFER_SIZE]; + + spi.transfer(&mut rx_buf, &tx_buf).unwrap(); + + assert_eq!(tx_buf, rx_buf); + } } diff --git a/hil-test/tests/spi_full_duplex_dma_async.rs b/hil-test/tests/spi_full_duplex_dma_async.rs index d9083ec3b08..2d72757d346 100644 --- a/hil-test/tests/spi_full_duplex_dma_async.rs +++ b/hil-test/tests/spi_full_duplex_dma_async.rs @@ -43,6 +43,7 @@ use hil_test as _; #[embedded_test::tests(executor = esp_hal_embassy::Executor::new())] mod tests { use defmt::assert_eq; + use esp_hal::dma::{DmaRxBuf, DmaTxBuf}; use super::*; @@ -75,14 +76,13 @@ mod tests { let dma_channel = dma.channel0; let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); + let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure_for_async(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); + .with_dma(dma_channel.configure_for_async(false, DmaPriority::Priority0)) + .with_buffers(dma_tx_buf, dma_rx_buf); let unit = pcnt.unit0; unit.channel0.set_edge_signal(PcntSource::from_pin( @@ -92,19 +92,19 @@ mod tests { unit.channel0 .set_input_mode(EdgeMode::Hold, EdgeMode::Increment); - let receive = rx_buffer; + let mut receive = [0; DMA_BUFFER_SIZE]; // Fill the buffer where each byte has 3 pos edges. - tx_buffer.fill(0b0110_1010); + let transmit = [0b0110_1010; DMA_BUFFER_SIZE]; assert_eq!(out_pin.is_set_low(), true); for i in 1..4 { receive.copy_from_slice(&[5, 5, 5, 5, 5]); - SpiBus::read(&mut spi, receive).await.unwrap(); - assert_eq!(receive, &[0, 0, 0, 0, 0]); + SpiBus::read(&mut spi, &mut receive).await.unwrap(); + assert_eq!(receive, [0, 0, 0, 0, 0]); - SpiBus::write(&mut spi, tx_buffer).await.unwrap(); + SpiBus::write(&mut spi, &transmit).await.unwrap(); assert_eq!(unit.get_value(), (i * 3 * DMA_BUFFER_SIZE) as _); } } @@ -138,14 +138,13 @@ mod tests { let dma_channel = dma.channel0; let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE); + let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap(); + let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap(); let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) - .with_dma( - dma_channel.configure_for_async(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); + .with_dma(dma_channel.configure_for_async(false, DmaPriority::Priority0)) + .with_buffers(dma_tx_buf, dma_rx_buf); let unit = pcnt.unit0; unit.channel0.set_edge_signal(PcntSource::from_pin( @@ -155,19 +154,19 @@ mod tests { unit.channel0 .set_input_mode(EdgeMode::Hold, EdgeMode::Increment); - let receive = rx_buffer; + let mut receive = [0; DMA_BUFFER_SIZE]; // Fill the buffer where each byte has 3 pos edges. - tx_buffer.fill(0b0110_1010); + let transmit = [0b0110_1010; DMA_BUFFER_SIZE]; assert_eq!(out_pin.is_set_low(), true); for i in 1..4 { receive.copy_from_slice(&[5, 5, 5, 5, 5]); - SpiBus::read(&mut spi, receive).await.unwrap(); - assert_eq!(receive, &[0, 0, 0, 0, 0]); + SpiBus::read(&mut spi, &mut receive).await.unwrap(); + assert_eq!(receive, [0, 0, 0, 0, 0]); - SpiBus::transfer(&mut spi, receive, tx_buffer) + SpiBus::transfer(&mut spi, &mut receive, &transmit) .await .unwrap(); assert_eq!(unit.get_value(), (i * 3 * DMA_BUFFER_SIZE) as _); diff --git a/hil-test/tests/spi_half_duplex_read.rs b/hil-test/tests/spi_half_duplex_read.rs index 91809c7bca1..944e554205d 100644 --- a/hil-test/tests/spi_half_duplex_read.rs +++ b/hil-test/tests/spi_half_duplex_read.rs @@ -20,7 +20,7 @@ use hil_test as _; mod tests { use esp_hal::{ clock::ClockControl, - dma::{Dma, DmaPriority}, + dma::{Dma, DmaPriority, DmaRxBuf}, dma_buffers, gpio::{Io, Level, Output}, peripherals::Peripherals, @@ -58,19 +58,13 @@ mod tests { #[cfg(not(any(feature = "esp32", feature = "esp32s2")))] let dma_channel = dma.channel0; - let (_, tx_descriptors, mut rx_buffer, rx_descriptors) = dma_buffers!(0, DMA_BUFFER_SIZE); + let (buffer, descriptors, _, _) = dma_buffers!(DMA_BUFFER_SIZE, 0); + let mut dma_rx_buf = DmaRxBuf::new(descriptors, buffer).unwrap(); let mut spi = Spi::new_half_duplex(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_sck(sclk) .with_miso(miso) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); - - // Fill with neither 0x00 nor 0xFF. - rx_buffer.fill(5); + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); // SPI should read '0's from the MISO pin miso_mirror.set_low(); @@ -81,12 +75,13 @@ mod tests { Command::None, Address::None, 0, - &mut rx_buffer, + dma_rx_buf, ) + .map_err(|e| e.0) .unwrap(); - transfer.wait().unwrap(); + (spi, dma_rx_buf) = transfer.wait(); - assert_eq!(rx_buffer, &[0x00; DMA_BUFFER_SIZE]); + assert_eq!(dma_rx_buf.as_slice(), &[0x00; DMA_BUFFER_SIZE]); // SPI should read '1's from the MISO pin miso_mirror.set_high(); @@ -97,11 +92,13 @@ mod tests { Command::None, Address::None, 0, - &mut rx_buffer, + dma_rx_buf, ) + .map_err(|e| e.0) .unwrap(); - transfer.wait().unwrap(); - assert_eq!(rx_buffer, &[0xFF; DMA_BUFFER_SIZE]); + (_, dma_rx_buf) = transfer.wait(); + + assert_eq!(dma_rx_buf.as_slice(), &[0xFF; DMA_BUFFER_SIZE]); } } diff --git a/hil-test/tests/spi_half_duplex_write.rs b/hil-test/tests/spi_half_duplex_write.rs index e136c778b7e..8726dc8ce58 100644 --- a/hil-test/tests/spi_half_duplex_write.rs +++ b/hil-test/tests/spi_half_duplex_write.rs @@ -20,7 +20,7 @@ use hil_test as _; mod tests { use esp_hal::{ clock::ClockControl, - dma::{Dma, DmaPriority}, + dma::{Dma, DmaPriority, DmaTxBuf}, dma_buffers, gpio::{Io, Pull}, pcnt::{ @@ -62,16 +62,13 @@ mod tests { #[cfg(not(any(feature = "esp32", feature = "esp32s2")))] let dma_channel = dma.channel0; - let (tx_buffer, tx_descriptors, _, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE, 0); + let (buffer, descriptors, _, _) = dma_buffers!(DMA_BUFFER_SIZE, 0); + let mut dma_tx_buf = DmaTxBuf::new(descriptors, buffer).unwrap(); let mut spi = Spi::new_half_duplex(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) .with_sck(sclk) .with_mosi(mosi) - .with_dma( - dma_channel.configure(false, DmaPriority::Priority0), - tx_descriptors, - rx_descriptors, - ); + .with_dma(dma_channel.configure(false, DmaPriority::Priority0)); let unit = pcnt.unit0; unit.channel0.set_edge_signal(PcntSource::from_pin( @@ -82,7 +79,7 @@ mod tests { .set_input_mode(EdgeMode::Hold, EdgeMode::Increment); // Fill the buffer where each byte has 3 pos edges. - tx_buffer.fill(0b0110_1010); + dma_tx_buf.fill(&[0b0110_1010; DMA_BUFFER_SIZE]); let transfer = spi .write( @@ -90,10 +87,11 @@ mod tests { Command::None, Address::None, 0, - &tx_buffer, + dma_tx_buf, ) + .map_err(|e| e.0) .unwrap(); - transfer.wait().unwrap(); + (spi, dma_tx_buf) = transfer.wait(); assert_eq!(unit.get_value(), (3 * DMA_BUFFER_SIZE) as _); @@ -103,10 +101,11 @@ mod tests { Command::None, Address::None, 0, - &tx_buffer, + dma_tx_buf, ) + .map_err(|e| e.0) .unwrap(); - transfer.wait().unwrap(); + transfer.wait(); assert_eq!(unit.get_value(), (6 * DMA_BUFFER_SIZE) as _); }