Skip to content
Snippets Groups Projects
Commit e76abd91 authored by Per Lindgren's avatar Per Lindgren
Browse files

bare9 polished and tested

parent e249e0e6
Branches
No related tags found
No related merge requests found
//! bare9.rs //! bare9.rs
//! //!
//! Heapless //! Heapless
//! //!
//! What it covers: //! What it covers:
//! - Heapless Ringbuffer //! - Heapless Ringbuffer
//! - Heapless Producer/Consumer lockfree data access //! - Heapless Producer/Consumer lockfree data access
//! - Interrupt driven I/O //! - Interrupt driven I/O
//! //!
#![no_main] #![no_main]
#![no_std] #![no_std]
...@@ -26,24 +26,29 @@ use nb::block; ...@@ -26,24 +26,29 @@ use nb::block;
use rtfm::app; use rtfm::app;
#[app(device = hal::stm32)] #[app(device = hal::stm32, peripherals = true)]
const APP: () = { const APP: () = {
// Late resources struct Resources {
static mut TX: Tx<hal::stm32::USART2> = (); // Late resources
static mut RX: Rx<hal::stm32::USART2> = (); TX: Tx<hal::stm32::USART2>,
static mut PRODUCER: Producer<'static, u8, U3> = (); RX: Rx<hal::stm32::USART2>,
static mut CONSUMER: Consumer<'static, u8, U3> = (); PRODUCER: Producer<'static, u8, U3>,
static mut ITM: ITM = (); CONSUMER: Consumer<'static, u8, U3>,
ITM: ITM,
// An initialized resource
#[init(None)]
RB: Option<Queue<u8, U3>>,
}
// init runs in an interrupt free section // init runs in an interrupt free section
#[init] #[init(resources = [RB])]
fn init() { fn init(cx: init::Context) -> init::LateResources {
let mut core = cx.core;
let device = cx.device;
// A ring buffer for our data // A ring buffer for our data
static mut RB: Option<Queue<u8, U3>> = None; *cx.resources.RB = Some(Queue::new());
*RB = Some(Queue::new());
// Split into producer/consumer pair // Split into producer/consumer pair
let (producer, consumer) = RB.as_mut().unwrap().split(); let (producer, consumer) = cx.resources.RB.as_mut().unwrap().split();
let stim = &mut core.ITM.stim[0]; let stim = &mut core.ITM.stim[0];
iprintln!(stim, "bare9"); iprintln!(stim, "bare9");
...@@ -56,7 +61,7 @@ const APP: () = { ...@@ -56,7 +61,7 @@ const APP: () = {
let gpioa = device.GPIOA.split(); let gpioa = device.GPIOA.split();
let tx = gpioa.pa2.into_alternate_af7(); let tx = gpioa.pa2.into_alternate_af7();
let rx = gpioa.pa3.into_alternate_af7(); let rx = gpioa.pa3.into_alternate_af7();
let mut serial = Serial::usart2( let mut serial = Serial::usart2(
device.USART2, device.USART2,
...@@ -72,26 +77,27 @@ const APP: () = { ...@@ -72,26 +77,27 @@ const APP: () = {
let (tx, rx) = serial.split(); let (tx, rx) = serial.split();
// Late resources // Late resources
// Our split queue init::LateResources {
PRODUCER = producer; // Our split queue
CONSUMER = consumer; PRODUCER: producer,
CONSUMER: consumer,
// Our split serial // Our split serial
TX = tx; TX: tx,
RX = rx; RX: rx,
// For debugging // For debugging
ITM = core.ITM; ITM: core.ITM,
}
} }
// idle may be interrupted by other interrupt/tasks in the system // idle may be interrupted by other interrupt/tasks in the system
// #[idle(resources = [RX, TX, ITM])]
#[idle(resources = [ITM, CONSUMER])] #[idle(resources = [ITM, CONSUMER])]
fn idle() -> ! { fn idle(cx: idle::Context) -> ! {
let stim = &mut resources.ITM.stim[0]; let stim = &mut cx.resources.ITM.stim[0];
loop { loop {
while let Some(byte) = resources.CONSUMER.dequeue() { while let Some(byte) = cx.resources.CONSUMER.dequeue() {
iprintln!(stim, "data {}", byte); iprintln!(stim, "data {}", byte);
} }
...@@ -102,16 +108,18 @@ const APP: () = { ...@@ -102,16 +108,18 @@ const APP: () = {
} }
} }
#[interrupt(resources = [RX, TX, PRODUCER])] // task run on USART2 interrupt (set to fire for each byte received)
fn USART2() { #[task(binds = USART2, resources = [RX, TX, PRODUCER])]
let rx = resources.RX; fn usart2(cx: usart2::Context) {
let tx = resources.TX; let rx = cx.resources.RX;
let tx = cx.resources.TX;
// at this point we know there must be a byte to read
match rx.read() { match rx.read() {
Ok(byte) => { Ok(byte) => {
tx.write(byte).unwrap(); tx.write(byte).unwrap();
match resources.PRODUCER.enqueue(byte) { match cx.resources.PRODUCER.enqueue(byte) {
Ok(_) => {} Ok(_) => {}
Err(_) => asm::bkpt(), Err(_) => asm::bkpt(),
} }
...@@ -121,18 +129,19 @@ const APP: () = { ...@@ -121,18 +129,19 @@ const APP: () = {
} }
}; };
// Optional
// 0. Compile and run the project at 16MHz in release mode // 0. Compile and run the project at 16MHz in release mode
// make sure its running (not paused). // make sure its running (not paused).
// //
// > cargo build --example bare9 --features "hal rtfm" --release // > cargo build --example bare9 --features "rtfm" --release
// (or use the vscode build task) // (or use the vscode build task)
// //
// 1. Start a terminal program, connect with 15200 8N1 // 1. Start a terminal program, connect with 15200 8N1
// //
// You should now be able to send data and recive an echo from the MCU // You should now be able to send data and receive an echo from the MCU
// //
// Try sending: "abcd" as a single sequence (set the option No end in moserial), // Try sending: "abcd" as a single sequence (set the option No end in moserial),
// don't send the quation marks, just abcd. // don't send the quotation marks, just abcd.
// //
// What did you receive, and what was the output of the ITM trace. // What did you receive, and what was the output of the ITM trace.
// //
...@@ -152,9 +161,9 @@ const APP: () = { ...@@ -152,9 +161,9 @@ const APP: () = {
// //
// > cargo build --example bare9 --features "hal rtfm" // > cargo build --example bare9 --features "hal rtfm"
// (or use the vscode build task) // (or use the vscode build task)
// //
// Try sending: "abcd" as a single sequence (set the option No end in moserial), // Try sending: "abcd" as a single sequence (set the option No end in moserial),
// don't send the quation marks, just abcd. // don't send the quotation marks, just abcd.
// //
// What did you receive, and what was the output of the ITM trace. // What did you receive, and what was the output of the ITM trace.
// //
...@@ -176,18 +185,18 @@ const APP: () = { ...@@ -176,18 +185,18 @@ const APP: () = {
// The concurrency model behind RTFM offers // The concurrency model behind RTFM offers
// 1. Race-free resource access // 1. Race-free resource access
// //
// 2. Deadlock-free exection // 2. Deadlock-free execution
// //
// 3. Shared execution stack (no pre-allocated stack regions) // 3. Shared execution stack (no pre-allocated stack regions)
// //
// 4. Bound priority inversion // 4. Bound priority inversion
// //
// 5. Theoretical underpinning -> // 5. Theoretical underpinning ->
// + proofs of soundness // + (pen and paper) proofs of soundness
// + schedulability analysis // + schedulability analysis
// + response time analysis // + response time analysis
// + stack memory analysis // + stack memory analysis
// + ... leverages on >25 years of reseach in the real-time community // + ... leverages on >25 years of research in the real-time community
// based on the seminal work of Baker in the early 1990s // based on the seminal work of Baker in the early 1990s
// (known as the Stack Resource Policy, SRP) // (known as the Stack Resource Policy, SRP)
// //
...@@ -195,47 +204,45 @@ const APP: () = { ...@@ -195,47 +204,45 @@ const APP: () = {
// 1. compile check and analysis of tasks and resources // 1. compile check and analysis of tasks and resources
// + the API implementation together with the Rust compiler will ensure that // + the API implementation together with the Rust compiler will ensure that
// both RTFM (SRP) soundness and the Rust memory model invariants // both RTFM (SRP) soundness and the Rust memory model invariants
// are upheld (under all circumpstances). // are upheld (under all circumstances).
// //
// 2. arguably the worlds fastest real time scheduler * // 2. arguably the worlds fastest real time scheduler *
// + task invocation 0-cycle OH on top of HW interrupt handling // + task invocation 0-cycle OH on top of HW interrupt handling
// + 2 cycle OH for locking a shared resource (on lock/claim entry) // + 2 cycle OH for locking a shared resource (on lock/claim entry)
// + 1 cycle OH for releasineg a shared resoure (on lock/claim exit) // + 1 cycle OH for releasing a shared resource (on lock/claim exit)
// //
// 3. arguably the worlds most memory efficient scheduler * // 3. arguably the worlds most memory efficient scheduler *
// + 1 byte stack memory OH for each (nested) lock/claim // + 1 byte stack memory OH for each (nested) lock/claim
// (no additional book-keeping during run-time) // (no additional book-keeping during run-time)
// //
// * applies to static task/resource models with single core // * applies to static task/resource models with single core
// pre-emptive, static priority scheduling // pre-emptive, static priority scheduling
// //
// In comparison "real-time" schedulers for threaded models like FreeRTOS // In comparison "real-time" schedulers for threaded models (like FreeRTOS)
// - CPU and memory OH magnitudes larger (100s of cycles/kilobytes of memory) // - CPU and memory OH magnitudes larger
// - ... and what's worse OH is typically unbound (no proofs of worst case) // - ... and what's worse OH is typically unbound (no proofs of worst case)
// And additionally threaded models typically imposes
// - potential race conditions (up to the user to verify) // - potential race conditions (up to the user to verify)
// - potential dead-locks (up to the implementation) // - potential dead-locks (up to the implementation)
// - potential unbound priority inversion (up to the implementation) // - potential unbound priority inversion (up to the implementation)
// //
// Rust RTFM (currently) target ONLY STATIC SYSTEMS, there is no notion // However, Rust RTFM (currently) target ONLY STATIC SYSTEMS,
// of dynamically creating new executions contexts/threads // there is no notion of dynamically creating new executions contexts/threads
// so a direct comparison is not completely fair. // so a direct comparison is not completely fair.
// //
// On the other hand, embedded applications are typically static by nature // On the other hand, embedded applications are typically static by nature
// so a STATIC model is to that end better suitable. // so a STATIC model is to that end better suitable.
// //
// RTFM is reactive by nature, a task execute to end, triggered // RTFM is reactive by nature, a task execute to end, triggered
// by an internal or external event, (where an interrupt is an external event // by an internal or external event, (where an interrupt is an external event
// from the environment, like a HW peripheral such as the USART2). // from the environment, like a HW peripheral such as the USART2).
// //
// Threads on the other hand are concurrent and infinte by nature and // Threads on the other hand are concurrent and infinite by nature and
// actively blocking/yeilding awaiting stimuli. Hence reactivity needs to be CODED. // actively blocking/yielding awaiting stischedulers
// This leads to an anomaly, the underlying HW is reactive (interrupts),
// requiring an interrupt handler, that creates a signal to the scheduler.
//
// The scheduler then needs to keep track of all threads and at some point choose // The scheduler then needs to keep track of all threads and at some point choose
// to dispatch the awaiting thread. So reactivity is bottlenecked to the point // to dispatch the awaiting thread. So reactivity is bottle-necked to the point
// of scheduling by queue management, context switching and other additional // of scheduling by queue management, context switching and other additional
// book keeping. // book keeping.
// //
// In essence, the thread scheduler tries to re-establish the reactivity that // In essence, the thread scheduler tries to re-establish the reactivity that
// were there from the beginning (interrupts), a battle that cannot be won... // were there from the beginning (interrupts), a battle that cannot be won...
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment