|
3 | 3 | use core::future::Future;
|
4 | 4 | use core::pin::Pin;
|
5 | 5 | use core::sync::atomic::{fence, Ordering};
|
6 |
| -use core::task::{Context, Poll}; |
| 6 | +use core::task::{Context, Poll, Waker}; |
7 | 7 |
|
| 8 | +use atomic_polyfill::AtomicUsize; |
8 | 9 | use embassy_cortex_m::interrupt::Priority;
|
9 | 10 | use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
|
10 | 11 | use embassy_sync::waitqueue::AtomicWaker;
|
11 | 12 |
|
| 13 | +use super::ringbuffer::{DmaCtrl, DmaRingBuffer, OverrunError}; |
12 | 14 | use super::word::{Word, WordSize};
|
13 | 15 | use super::Dir;
|
14 | 16 | use crate::_generated::BDMA_CHANNEL_COUNT;
|
15 | 17 | use crate::interrupt::{Interrupt, InterruptExt};
|
16 | 18 | use crate::pac;
|
17 |
| -use crate::pac::bdma::vals; |
| 19 | +use crate::pac::bdma::{regs, vals}; |
18 | 20 |
|
19 | 21 | #[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
20 | 22 | #[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
@@ -48,13 +50,16 @@ impl From<Dir> for vals::Dir {
|
48 | 50 |
|
49 | 51 | struct State {
|
50 | 52 | ch_wakers: [AtomicWaker; BDMA_CHANNEL_COUNT],
|
| 53 | + complete_count: [AtomicUsize; BDMA_CHANNEL_COUNT], |
51 | 54 | }
|
52 | 55 |
|
53 | 56 | impl State {
|
54 | 57 | const fn new() -> Self {
|
| 58 | + const ZERO: AtomicUsize = AtomicUsize::new(0); |
55 | 59 | const AW: AtomicWaker = AtomicWaker::new();
|
56 | 60 | Self {
|
57 | 61 | ch_wakers: [AW; BDMA_CHANNEL_COUNT],
|
| 62 | + complete_count: [ZERO; BDMA_CHANNEL_COUNT], |
58 | 63 | }
|
59 | 64 | }
|
60 | 65 | }
|
@@ -105,8 +110,23 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::bdma::Dma, channel_num: usize, index
|
105 | 110 | if isr.teif(channel_num) {
|
106 | 111 | panic!("DMA: error on BDMA@{:08x} channel {}", dma.0 as u32, channel_num);
|
107 | 112 | }
|
| 113 | + |
| 114 | + let mut wake = false; |
| 115 | + |
| 116 | + if isr.htif(channel_num) && cr.read().htie() { |
| 117 | + // Acknowledge half transfer complete interrupt |
| 118 | + dma.ifcr().write(|w| w.set_htif(channel_num, true)); |
| 119 | + wake = true; |
| 120 | + } |
| 121 | + |
108 | 122 | if isr.tcif(channel_num) && cr.read().tcie() {
|
109 |
| - cr.write(|_| ()); // Disable channel interrupts with the default value. |
| 123 | + // Acknowledge transfer complete interrupt |
| 124 | + dma.ifcr().write(|w| w.set_tcif(channel_num, true)); |
| 125 | + STATE.complete_count[index].fetch_add(1, Ordering::Release); |
| 126 | + wake = true; |
| 127 | + } |
| 128 | + |
| 129 | + if wake { |
110 | 130 | STATE.ch_wakers[index].wake();
|
111 | 131 | }
|
112 | 132 | }
|
@@ -252,6 +272,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
|
252 | 272 |
|
253 | 273 | let mut this = Self { channel };
|
254 | 274 | this.clear_irqs();
|
| 275 | + STATE.complete_count[this.channel.index()].store(0, Ordering::Release); |
255 | 276 |
|
256 | 277 | #[cfg(dmamux)]
|
257 | 278 | super::dmamux::configure_dmamux(&mut *this.channel, _request);
|
@@ -299,7 +320,9 @@ impl<'a, C: Channel> Transfer<'a, C> {
|
299 | 320 |
|
300 | 321 | pub fn is_running(&mut self) -> bool {
|
301 | 322 | let ch = self.channel.regs().ch(self.channel.num());
|
302 |
| - unsafe { ch.cr().read() }.en() |
| 323 | + let en = unsafe { ch.cr().read() }.en(); |
| 324 | + let tcif = STATE.complete_count[self.channel.index()].load(Ordering::Acquire) != 0; |
| 325 | + en && !tcif |
303 | 326 | }
|
304 | 327 |
|
305 | 328 | /// Gets the total remaining transfers for the channel
|
@@ -342,3 +365,161 @@ impl<'a, C: Channel> Future for Transfer<'a, C> {
|
342 | 365 | }
|
343 | 366 | }
|
344 | 367 | }
|
| 368 | + |
| 369 | +// ============================== |
| 370 | + |
| 371 | +struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>); |
| 372 | + |
| 373 | +impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> { |
| 374 | + fn ndtr(&self) -> usize { |
| 375 | + let ch = self.0.regs().ch(self.0.num()); |
| 376 | + unsafe { ch.ndtr().read() }.ndt() as usize |
| 377 | + } |
| 378 | + |
| 379 | + fn get_complete_count(&self) -> usize { |
| 380 | + STATE.complete_count[self.0.index()].load(Ordering::Acquire) |
| 381 | + } |
| 382 | + |
| 383 | + fn reset_complete_count(&mut self) -> usize { |
| 384 | + STATE.complete_count[self.0.index()].swap(0, Ordering::AcqRel) |
| 385 | + } |
| 386 | +} |
| 387 | + |
| 388 | +pub struct RingBuffer<'a, C: Channel, W: Word> { |
| 389 | + cr: regs::Cr, |
| 390 | + channel: PeripheralRef<'a, C>, |
| 391 | + ringbuf: DmaRingBuffer<'a, W>, |
| 392 | +} |
| 393 | + |
| 394 | +impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { |
| 395 | + pub unsafe fn new_read( |
| 396 | + channel: impl Peripheral<P = C> + 'a, |
| 397 | + _request: Request, |
| 398 | + peri_addr: *mut W, |
| 399 | + buffer: &'a mut [W], |
| 400 | + _options: TransferOptions, |
| 401 | + ) -> Self { |
| 402 | + into_ref!(channel); |
| 403 | + |
| 404 | + let len = buffer.len(); |
| 405 | + assert!(len > 0 && len <= 0xFFFF); |
| 406 | + |
| 407 | + let dir = Dir::PeripheralToMemory; |
| 408 | + let data_size = W::size(); |
| 409 | + |
| 410 | + let channel_number = channel.num(); |
| 411 | + let dma = channel.regs(); |
| 412 | + |
| 413 | + // "Preceding reads and writes cannot be moved past subsequent writes." |
| 414 | + fence(Ordering::SeqCst); |
| 415 | + |
| 416 | + #[cfg(bdma_v2)] |
| 417 | + critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request))); |
| 418 | + |
| 419 | + let mut w = regs::Cr(0); |
| 420 | + w.set_psize(data_size.into()); |
| 421 | + w.set_msize(data_size.into()); |
| 422 | + w.set_minc(vals::Inc::ENABLED); |
| 423 | + w.set_dir(dir.into()); |
| 424 | + w.set_teie(true); |
| 425 | + w.set_htie(true); |
| 426 | + w.set_tcie(true); |
| 427 | + w.set_circ(vals::Circ::ENABLED); |
| 428 | + w.set_pl(vals::Pl::VERYHIGH); |
| 429 | + w.set_en(true); |
| 430 | + |
| 431 | + let buffer_ptr = buffer.as_mut_ptr(); |
| 432 | + let mut this = Self { |
| 433 | + channel, |
| 434 | + cr: w, |
| 435 | + ringbuf: DmaRingBuffer::new(buffer), |
| 436 | + }; |
| 437 | + this.clear_irqs(); |
| 438 | + |
| 439 | + #[cfg(dmamux)] |
| 440 | + super::dmamux::configure_dmamux(&mut *this.channel, _request); |
| 441 | + |
| 442 | + let ch = dma.ch(channel_number); |
| 443 | + ch.par().write_value(peri_addr as u32); |
| 444 | + ch.mar().write_value(buffer_ptr as u32); |
| 445 | + ch.ndtr().write(|w| w.set_ndt(len as u16)); |
| 446 | + |
| 447 | + this |
| 448 | + } |
| 449 | + |
| 450 | + pub fn start(&mut self) { |
| 451 | + let ch = self.channel.regs().ch(self.channel.num()); |
| 452 | + unsafe { ch.cr().write_value(self.cr) } |
| 453 | + } |
| 454 | + |
| 455 | + pub fn clear(&mut self) { |
| 456 | + self.ringbuf.clear(DmaCtrlImpl(self.channel.reborrow())); |
| 457 | + } |
| 458 | + |
| 459 | + /// Read bytes from the ring buffer |
| 460 | + /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. |
| 461 | + pub fn read(&mut self, buf: &mut [W]) -> Result<usize, OverrunError> { |
| 462 | + self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf) |
| 463 | + } |
| 464 | + |
| 465 | + pub fn is_empty(&self) -> bool { |
| 466 | + self.ringbuf.is_empty() |
| 467 | + } |
| 468 | + |
| 469 | + pub fn len(&self) -> usize { |
| 470 | + self.ringbuf.len() |
| 471 | + } |
| 472 | + |
| 473 | + pub fn capacity(&self) -> usize { |
| 474 | + self.ringbuf.dma_buf.len() |
| 475 | + } |
| 476 | + |
| 477 | + pub fn set_waker(&mut self, waker: &Waker) { |
| 478 | + STATE.ch_wakers[self.channel.index()].register(waker); |
| 479 | + } |
| 480 | + |
| 481 | + fn clear_irqs(&mut self) { |
| 482 | + let dma = self.channel.regs(); |
| 483 | + unsafe { |
| 484 | + dma.ifcr().write(|w| { |
| 485 | + w.set_htif(self.channel.num(), true); |
| 486 | + w.set_tcif(self.channel.num(), true); |
| 487 | + w.set_teif(self.channel.num(), true); |
| 488 | + }) |
| 489 | + } |
| 490 | + } |
| 491 | + |
| 492 | + pub fn request_stop(&mut self) { |
| 493 | + let ch = self.channel.regs().ch(self.channel.num()); |
| 494 | + |
| 495 | + // Disable the channel. Keep the IEs enabled so the irqs still fire. |
| 496 | + unsafe { |
| 497 | + ch.cr().write(|w| { |
| 498 | + w.set_teie(true); |
| 499 | + w.set_htie(true); |
| 500 | + w.set_tcie(true); |
| 501 | + }) |
| 502 | + } |
| 503 | + } |
| 504 | + |
| 505 | + pub fn is_running(&mut self) -> bool { |
| 506 | + let ch = self.channel.regs().ch(self.channel.num()); |
| 507 | + unsafe { ch.cr().read() }.en() |
| 508 | + } |
| 509 | + |
| 510 | + /// Synchronize the position of the ring buffer to the actual DMA controller position |
| 511 | + pub fn reload_position(&mut self) { |
| 512 | + let ch = self.channel.regs().ch(self.channel.num()); |
| 513 | + self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize; |
| 514 | + } |
| 515 | +} |
| 516 | + |
| 517 | +impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> { |
| 518 | + fn drop(&mut self) { |
| 519 | + self.request_stop(); |
| 520 | + while self.is_running() {} |
| 521 | + |
| 522 | + // "Subsequent reads and writes cannot be moved ahead of preceding reads." |
| 523 | + fence(Ordering::SeqCst); |
| 524 | + } |
| 525 | +} |
0 commit comments