Select Git revision
rtt_rtic_i2c.rs
dma.rs 7.89 KiB
#![allow(dead_code)]
#![allow(unused_code)]
#![allow(missing_docs)]
use core::marker::{PhantomData, Unsize};
use core::ops;
use rcc::AHB1;
use stm32f4x::USART2;
use stm32f4x::{DMA1, dma2};
#[derive(Debug)]
pub enum Error {
Overrun,
#[doc(hidden)] _Extensible,
}
pub enum Event {
HalfTransfer,
TransferComplete,
}
#[derive(Clone, Copy, PartialEq)]
pub enum Half {
First,
Second,
}
pub struct CircBuffer<BUFFER, STREAM>
where
BUFFER: 'static,
{
buffer: &'static mut [BUFFER; 2],
channel: STREAM,
readable_half: Half,
}
impl<BUFFER, STREAM> CircBuffer<BUFFER, STREAM> {
pub(crate) fn new(buf: &'static mut [BUFFER; 2], chan: STREAM) -> Self {
CircBuffer {
buffer: buf,
channel: chan,
readable_half: Half::Second,
}
}
}
pub trait Static<B> {
fn borrow(&self) -> &B;
}
impl<B> Static<B> for &'static B {
fn borrow(&self) -> &B {
*self
}
}
impl<B> Static<B> for &'static mut B {
fn borrow(&self) -> &B {
*self
}
}
pub trait DmaExt {
type Streams;
fn split(self, ahb: &mut AHB1) -> Self::Streams;
}
//
// ndtr: u16, par: u32, m0: u32
pub unsafe trait StartTransfer {
fn _start_transfer(&mut self, u16, u32, u32);
}
pub struct Transfer<MODE, BUFFER, STREAM, PAYLOAD> {
_mode: PhantomData<MODE>,
buffer: BUFFER,
stream: STREAM,
payload: PAYLOAD,
}
impl<BUFFER, STREAM, PAYLOAD> Transfer<R, BUFFER, STREAM, PAYLOAD> {
pub(crate) fn r(buffer: BUFFER, stream: STREAM, payload: PAYLOAD) -> Self {
Transfer {
_mode: PhantomData,
buffer,
stream,
payload,
}
}
}
impl<BUFFER, STREAM, PAYLOAD> Transfer<W, BUFFER, STREAM, PAYLOAD> {
pub(crate) fn w(buffer: BUFFER, stream: STREAM, payload: PAYLOAD) -> Self {
Transfer {
_mode: PhantomData,
buffer,
stream,
payload,
}
}
}
impl<BUFFER, STREAM, PAYLOAD> ops::Deref for Transfer<R, BUFFER, STREAM, PAYLOAD> {
type Target = BUFFER;
fn deref(&self) -> &BUFFER {
&self.buffer
}
}
/// Read transfer
pub struct R;
/// Write transfer
pub struct W;
/// Output mode (type state)
pub struct Output<MODE> {
_mode: PhantomData<MODE>,
}
//
pub unsafe trait UsartTxStream<USART> {
fn start_transfer(&mut self, ndtr: u16, par: u32, m0: u32);
}
pub unsafe trait UsartRxStream<USART> {}
pub mod dma1 {
use core::sync::atomic::{self, Ordering};
use core::marker::{PhantomData, Unsize};
use cast::u16;
use stm32f4x::{DMA1, dma2};
use stm32f4x::USART2;
use dma::{CircBuffer, DmaExt, Error, Event, Half, Transfer};
use rcc::AHB1;
// Stream
pub struct S0<CHANNEL> {
_channel: PhantomData<CHANNEL>,
}
pub struct S4<CHANNEL> {
_channel: PhantomData<CHANNEL>,
}
pub struct S5<CHANNEL> {
_channel: PhantomData<CHANNEL>,
}
pub struct S6<CHANNEL> {
_channel: PhantomData<CHANNEL>,
}
pub struct S7<CHANNEL> {
_channel: PhantomData<CHANNEL>,
}
//pub struct S7<CHANNEL> {}
// Channels === Alternate function
pub struct C0;
pub struct C1;
pub struct C2;
pub struct C3;
pub struct C4;
pub struct C5;
pub struct C6;
pub struct C7;
pub struct C8;
pub struct C9;
// into_channel
impl<CHANNEL> S5<CHANNEL> {
pub fn into_channel4(self) -> S5<C4> {
S5 {
_channel: PhantomData,
}
}
}
impl<CHANNEL> S6<CHANNEL> {
pub fn into_channel4(self) -> S6<C4> {
S6 {
_channel: PhantomData,
}
}
}
impl<CHANNEL> S7<CHANNEL> {
pub fn into_channel6(self) -> S7<C6> {
S7 {
_channel: PhantomData,
}
}
}
unsafe impl super::UsartTxStream<USART2> for S6<C4> {
fn start_transfer(&mut self, ndtr: u16, par: u32, m0: u32) {
start_transfer_s6_c4(ndtr, par, m0);
}
}
unsafe impl super::UsartRxStream<USART2> for S5<C4> {}
unsafe impl super::UsartRxStream<USART2> for S7<C6> {}
pub struct Streams(pub S5<C0>, pub S6<C0>, pub S7<C0>);
impl<CHANNEL> S5<CHANNEL> {
pub fn listen(&mut self, event: Event) {
match event {
Event::HalfTransfer => (), // self.ccr().modify(|_, w| w.htie().set_bit()),
Event::TransferComplete => {
// self.ccr().modify(|_, w| w.tcie().set_bit())
}
}
}
}
use cortex_m::asm;
fn start_transfer_s6_c4(ndtr: u16, par: u32, m0: u32) {
let dma = unsafe { &*DMA1::ptr() };
// nr data transfers
dma.s6ndtr.write(|w| unsafe { w.ndt().bits(ndtr) });
// peripheral address
dma.s6par.write(|w| unsafe { w.bits(par) });
// memory address 0
dma.s6m0ar.write(|w| unsafe { w.bits(m0) });
// TX DMA transfer
// chsel: Channel 4 (RM0368 9.3.3 Table 27)
// pl: Medium priority
// msize: Memory size = 8 bits
// psize: Peripheral size = 8 bits
// minc: Memory increment mode enabled
// pinc: Peripheral increment mode disabled
// circ: Circular mode disabled
// dir: Transfer from memory to peripheral
// tcie: Transfer complete interrupt enabled
// en: Disabled
dma.s6cr.write(|w| unsafe {
w.chsel()
.bits(4) // channel 4
.pl()
.bits(0b01) // medium priority
.msize()
.bits(0b00) // memory 8 bits
.psize()
.bits(0b00) // peripheral 8 bits
.minc() // memory increment
.set_bit()
.circ()
.clear_bit() // not circular
.pinc()
.clear_bit() // no peripheral increment
.dir()
.bits(1) // memory -> peripheral
.tcie()
.clear_bit() // no interrupt
.en()
.clear_bit() // setup
});
dma.s6cr.modify(|_, w| w.en().set_bit());
}
impl DmaExt for DMA1 {
type Streams = Streams;
fn split(self, ahb1: &mut AHB1) -> Streams {
//ahb.ahb1enr().modify(|_, w| w.dma1en().set_bit());
// power and clk to dma1
ahb1.enr().modify(|_, w| w.dma1en().set_bit());
// // reset the DMA control registers (stops all on-going transfers)
// $(
// self.$ccrX.reset();
// )+
// Channels((), $($CX { _0: () }),+)
Streams(
S5 {
_channel: PhantomData,
},
S6 {
_channel: PhantomData,
},
S7 {
_channel: PhantomData,
},
)
}
}
impl<BUFFER, PAYLOAD, MODE> Transfer<MODE, BUFFER, S6<C4>, PAYLOAD> {
pub fn wait(mut self) -> (BUFFER, S6<C4>, PAYLOAD) {
// XXX should we check for transfer errors here?
// The manual says "A DMA transfer error can be generated by reading
// from or writing to a reserved address space". I think it's impossible
// to get to that state with our type safe API and *safe* Rust.
let dma = unsafe { &*DMA1::ptr() };
while dma.hisr.read().tcif6().bit_is_clear() {}
dma.hifcr.write(|w| w.ctcif6().set_bit());
dma.s6cr.modify(|_, w| w.en().clear_bit());
// TODO can we weaken this compiler barrier?
// NOTE(compiler_fence) operations on `buffer` should not be reordered
// before the previous statement, which marks the DMA transfer as done
atomic::compiler_fence(Ordering::SeqCst);
(self.buffer, self.stream, self.payload)
}
}
}