Skip to content

Commit fb39bc7

Browse files
bors[bot]pennaermjaDirbaio
authored
Merge #1404
1404: feat(stm32): Add DMA based, ring-buffer based rx uart, v3 r=Dirbaio a=rmja This PR replaces #1150. Comparing to that PR, this one has the following changes: * The implementation now aligns with the new stm32 dma module, thanks `@Dirbaio!` * Calls to `read()` now returns on either 1) idle line, or 2) ring buffer is at most half full. This is different from the previous pr, which would return a lot of 1 byte reads. Thank you `@chemicstry` for making me realize that it was actually not what I wanted. This is accomplished using half-transfer completed and full-transfer completed interrupts. Both seems to be supported on both dma and bdma. The implementation still have the issue mentioned here: #1150 (comment) Regarding the todos here: #1150 (comment). I have removed the exposure of ndtr from `dma::RingBuffer` to the uart so that the uart now simply calls `ringbuf::reload_position()` to align the position within the ring buffer to that of the actual running dma controller. BDMA and GPDMA is not implemented. I do not have any chips with those dma controllers, so maybe someone else should to this so that it can be tested. The `saturate_serial` test utility inside `tests/utils` has an `--idles` switch which can be used to saturate the uart from a pc, but with random idles. Because embassy-stm32 now can have tests, we should probably run them in ci. I do this locally to test the DmaRingBuffer: `cargo test --no-default-features --features stm32f429ig`. cc `@chemicstry` `@Dirbaio` Co-authored-by: pennae <github@quasiparticle.net> Co-authored-by: Rasmus Melchior Jacobsen <rmja@laesoe.org> Co-authored-by: Dario Nieuwenhuis <dirbaio@dirbaio.net>
2 parents 05c36e0 + a614fe2 commit fb39bc7

File tree

13 files changed

+1455
-71
lines changed

13 files changed

+1455
-71
lines changed

embassy-stm32/src/dma/bdma.rs

Lines changed: 185 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,18 +3,20 @@
33
use core::future::Future;
44
use core::pin::Pin;
55
use core::sync::atomic::{fence, Ordering};
6-
use core::task::{Context, Poll};
6+
use core::task::{Context, Poll, Waker};
77

8+
use atomic_polyfill::AtomicUsize;
89
use embassy_cortex_m::interrupt::Priority;
910
use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
1011
use embassy_sync::waitqueue::AtomicWaker;
1112

13+
use super::ringbuffer::{DmaCtrl, DmaRingBuffer, OverrunError};
1214
use super::word::{Word, WordSize};
1315
use super::Dir;
1416
use crate::_generated::BDMA_CHANNEL_COUNT;
1517
use crate::interrupt::{Interrupt, InterruptExt};
1618
use crate::pac;
17-
use crate::pac::bdma::vals;
19+
use crate::pac::bdma::{regs, vals};
1820

1921
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
2022
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
@@ -48,13 +50,16 @@ impl From<Dir> for vals::Dir {
4850

4951
struct State {
5052
ch_wakers: [AtomicWaker; BDMA_CHANNEL_COUNT],
53+
complete_count: [AtomicUsize; BDMA_CHANNEL_COUNT],
5154
}
5255

5356
impl State {
5457
const fn new() -> Self {
58+
const ZERO: AtomicUsize = AtomicUsize::new(0);
5559
const AW: AtomicWaker = AtomicWaker::new();
5660
Self {
5761
ch_wakers: [AW; BDMA_CHANNEL_COUNT],
62+
complete_count: [ZERO; BDMA_CHANNEL_COUNT],
5863
}
5964
}
6065
}
@@ -105,8 +110,23 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::bdma::Dma, channel_num: usize, index
105110
if isr.teif(channel_num) {
106111
panic!("DMA: error on BDMA@{:08x} channel {}", dma.0 as u32, channel_num);
107112
}
113+
114+
let mut wake = false;
115+
116+
if isr.htif(channel_num) && cr.read().htie() {
117+
// Acknowledge half transfer complete interrupt
118+
dma.ifcr().write(|w| w.set_htif(channel_num, true));
119+
wake = true;
120+
}
121+
108122
if isr.tcif(channel_num) && cr.read().tcie() {
109-
cr.write(|_| ()); // Disable channel interrupts with the default value.
123+
// Acknowledge transfer complete interrupt
124+
dma.ifcr().write(|w| w.set_tcif(channel_num, true));
125+
STATE.complete_count[index].fetch_add(1, Ordering::Release);
126+
wake = true;
127+
}
128+
129+
if wake {
110130
STATE.ch_wakers[index].wake();
111131
}
112132
}
@@ -252,6 +272,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
252272

253273
let mut this = Self { channel };
254274
this.clear_irqs();
275+
STATE.complete_count[this.channel.index()].store(0, Ordering::Release);
255276

256277
#[cfg(dmamux)]
257278
super::dmamux::configure_dmamux(&mut *this.channel, _request);
@@ -299,7 +320,9 @@ impl<'a, C: Channel> Transfer<'a, C> {
299320

300321
pub fn is_running(&mut self) -> bool {
301322
let ch = self.channel.regs().ch(self.channel.num());
302-
unsafe { ch.cr().read() }.en()
323+
let en = unsafe { ch.cr().read() }.en();
324+
let tcif = STATE.complete_count[self.channel.index()].load(Ordering::Acquire) != 0;
325+
en && !tcif
303326
}
304327

305328
/// Gets the total remaining transfers for the channel
@@ -342,3 +365,161 @@ impl<'a, C: Channel> Future for Transfer<'a, C> {
342365
}
343366
}
344367
}
368+
369+
// ==============================
370+
371+
struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>);
372+
373+
impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> {
374+
fn ndtr(&self) -> usize {
375+
let ch = self.0.regs().ch(self.0.num());
376+
unsafe { ch.ndtr().read() }.ndt() as usize
377+
}
378+
379+
fn get_complete_count(&self) -> usize {
380+
STATE.complete_count[self.0.index()].load(Ordering::Acquire)
381+
}
382+
383+
fn reset_complete_count(&mut self) -> usize {
384+
STATE.complete_count[self.0.index()].swap(0, Ordering::AcqRel)
385+
}
386+
}
387+
388+
pub struct RingBuffer<'a, C: Channel, W: Word> {
389+
cr: regs::Cr,
390+
channel: PeripheralRef<'a, C>,
391+
ringbuf: DmaRingBuffer<'a, W>,
392+
}
393+
394+
impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
395+
pub unsafe fn new_read(
396+
channel: impl Peripheral<P = C> + 'a,
397+
_request: Request,
398+
peri_addr: *mut W,
399+
buffer: &'a mut [W],
400+
_options: TransferOptions,
401+
) -> Self {
402+
into_ref!(channel);
403+
404+
let len = buffer.len();
405+
assert!(len > 0 && len <= 0xFFFF);
406+
407+
let dir = Dir::PeripheralToMemory;
408+
let data_size = W::size();
409+
410+
let channel_number = channel.num();
411+
let dma = channel.regs();
412+
413+
// "Preceding reads and writes cannot be moved past subsequent writes."
414+
fence(Ordering::SeqCst);
415+
416+
#[cfg(bdma_v2)]
417+
critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request)));
418+
419+
let mut w = regs::Cr(0);
420+
w.set_psize(data_size.into());
421+
w.set_msize(data_size.into());
422+
w.set_minc(vals::Inc::ENABLED);
423+
w.set_dir(dir.into());
424+
w.set_teie(true);
425+
w.set_htie(true);
426+
w.set_tcie(true);
427+
w.set_circ(vals::Circ::ENABLED);
428+
w.set_pl(vals::Pl::VERYHIGH);
429+
w.set_en(true);
430+
431+
let buffer_ptr = buffer.as_mut_ptr();
432+
let mut this = Self {
433+
channel,
434+
cr: w,
435+
ringbuf: DmaRingBuffer::new(buffer),
436+
};
437+
this.clear_irqs();
438+
439+
#[cfg(dmamux)]
440+
super::dmamux::configure_dmamux(&mut *this.channel, _request);
441+
442+
let ch = dma.ch(channel_number);
443+
ch.par().write_value(peri_addr as u32);
444+
ch.mar().write_value(buffer_ptr as u32);
445+
ch.ndtr().write(|w| w.set_ndt(len as u16));
446+
447+
this
448+
}
449+
450+
pub fn start(&mut self) {
451+
let ch = self.channel.regs().ch(self.channel.num());
452+
unsafe { ch.cr().write_value(self.cr) }
453+
}
454+
455+
pub fn clear(&mut self) {
456+
self.ringbuf.clear(DmaCtrlImpl(self.channel.reborrow()));
457+
}
458+
459+
/// Read bytes from the ring buffer
460+
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
461+
pub fn read(&mut self, buf: &mut [W]) -> Result<usize, OverrunError> {
462+
self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf)
463+
}
464+
465+
pub fn is_empty(&self) -> bool {
466+
self.ringbuf.is_empty()
467+
}
468+
469+
pub fn len(&self) -> usize {
470+
self.ringbuf.len()
471+
}
472+
473+
pub fn capacity(&self) -> usize {
474+
self.ringbuf.dma_buf.len()
475+
}
476+
477+
pub fn set_waker(&mut self, waker: &Waker) {
478+
STATE.ch_wakers[self.channel.index()].register(waker);
479+
}
480+
481+
fn clear_irqs(&mut self) {
482+
let dma = self.channel.regs();
483+
unsafe {
484+
dma.ifcr().write(|w| {
485+
w.set_htif(self.channel.num(), true);
486+
w.set_tcif(self.channel.num(), true);
487+
w.set_teif(self.channel.num(), true);
488+
})
489+
}
490+
}
491+
492+
pub fn request_stop(&mut self) {
493+
let ch = self.channel.regs().ch(self.channel.num());
494+
495+
// Disable the channel. Keep the IEs enabled so the irqs still fire.
496+
unsafe {
497+
ch.cr().write(|w| {
498+
w.set_teie(true);
499+
w.set_htie(true);
500+
w.set_tcie(true);
501+
})
502+
}
503+
}
504+
505+
pub fn is_running(&mut self) -> bool {
506+
let ch = self.channel.regs().ch(self.channel.num());
507+
unsafe { ch.cr().read() }.en()
508+
}
509+
510+
/// Synchronize the position of the ring buffer to the actual DMA controller position
511+
pub fn reload_position(&mut self) {
512+
let ch = self.channel.regs().ch(self.channel.num());
513+
self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize;
514+
}
515+
}
516+
517+
impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> {
518+
fn drop(&mut self) {
519+
self.request_stop();
520+
while self.is_running() {}
521+
522+
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
523+
fence(Ordering::SeqCst);
524+
}
525+
}

0 commit comments

Comments
 (0)