From 305dc9f4d364e57da7caf96826d0bddae78fdb2e Mon Sep 17 00:00:00 2001
From: Per Lindgren <per.lindgren@ltu.se>
Date: Thu, 10 Jan 2019 00:20:49 +0100
Subject: [PATCH] bare8

---
 examples/bare8.rs | 287 +++++++++++++++++++++++++++++++++++++++++++++
 examples/bare9.rs | 289 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 576 insertions(+)
 create mode 100644 examples/bare8.rs
 create mode 100644 examples/bare9.rs

diff --git a/examples/bare8.rs b/examples/bare8.rs
new file mode 100644
index 0000000..8d5f156
--- /dev/null
+++ b/examples/bare8.rs
@@ -0,0 +1,287 @@
+// #![deny(unsafe_code)]
+// #![deny(warnings)]
+#![no_main]
+#![no_std]
+
+extern crate panic_halt;
+
+use cortex_m::iprintln;
+use nb::block;
+
+extern crate stm32f4xx_hal as hal;
+use crate::hal::prelude::*;
+use crate::hal::serial::{config::Config, Rx, Serial, Tx};
+use hal::stm32::{ITM, USART2};
+
+// use crate::hal::stm32::Interrupt::EXTI0;
+use rtfm::app;
+// use hal::stm32::Interrupt::EXTI0;
+
+#[app(device = hal::stm32)]
+// #[app(device = stm32f4xx_hal::stm32)]
+
+const APP: () = {
+    // Late resources
+    static mut TX: Tx<USART2> = ();
+    static mut RX: Rx<USART2> = ();
+    static mut ITM: ITM = ();
+
+    // init runs in an interrupt free section
+    #[init]
+    fn init() {
+        let stim = &mut core.ITM.stim[0];
+        iprintln!(stim, "start");
+
+        let rcc = device.RCC.constrain();
+
+        // 16 MHz (default, all clocks)
+        let clocks = rcc.cfgr.freeze();
+
+        let gpioa = device.GPIOA.split();
+
+        let tx = gpioa.pa2.into_alternate_af7();
+        let rx = gpioa.pa3.into_alternate_af7(); // try comment out
+
+        let serial = Serial::usart2(
+            device.USART2,
+            (tx, rx),
+            Config::default().baudrate(115_200.bps()),
+            clocks,
+        )
+        .unwrap();
+
+        // Separate out the sender and receiver of the serial port
+        let (tx, rx) = serial.split();
+
+        // Late resources
+        TX = tx;
+        RX = rx;
+        ITM = core.ITM;
+    }
+
+    // idle may be interrupted by other interrupt/tasks in the system
+    #[idle(resources = [RX, TX, ITM])]
+    fn idle() -> ! {
+        let rx = resources.RX;
+        let tx = resources.TX;
+        let stim = &mut resources.ITM.stim[0];
+
+        loop {
+            match block!(rx.read()) {
+                Ok(byte) => {
+                    iprintln!(stim, "Ok {:?}", byte);
+                    let _ = tx.write(byte);
+                }
+                Err(err) => {
+                    iprintln!(stim, "Error {:?}", err);
+                }
+            }
+        }
+    }
+};
+
+// extern crate cortex_m_rtfm as rtfm;
+// extern crate f4;
+// extern crate heapless;
+
+// #[macro_use]
+// extern crate cortex_m_debug;
+
+// use f4::prelude::*;
+// use f4::Serial;
+// use f4::time::Hertz;
+// use heapless::Vec;
+// use rtfm::{app, Resource, Threshold};
+
+// // CONFIGURATION
+// const BAUD_RATE: Hertz = Hertz(115_200);
+
+// // RTFM FRAMEWORK
+// app! {
+//     device: f4::stm32f40x,
+
+//     resources: {
+//         static VECTOR: Vec<u8, [u8; 4]> = Vec::new();
+//     },
+
+//     tasks: {
+//         USART2: {
+//             path: rx,
+//             priority: 2,
+//             resources: [VECTOR, USART2],
+//         },
+//         EXTI1: {
+//             path: trace,
+//             priority: 1,
+//             resources: [VECTOR],
+//         }
+//     },
+// }
+
+// // `rx` task trigger on arrival of a USART2 interrupt
+// fn rx(t: &mut Threshold, r: USART2::Resources) {
+//     let serial = Serial(&**r.USART2);
+
+//     // we don't need to block waiting for data to arrive
+//     // (as we were triggered) by the data arrival (or error)
+//     match serial.read() {
+//         Ok(byte) => {
+//             // received byte correct
+//             r.VECTOR.claim_mut(t, |vector, _| {
+//                 // critical section for the shared vector
+//                 let _ = vector.push(byte);
+//                 // here you could put your error handling for vector full
+//             });
+//             let _ = serial.write(byte);
+//         }
+//         Err(err) => {
+//             // some transmission error
+//             ipln!("Error {:?}", err);
+//             r.USART2.dr.read(); // clear the error by reading the data register
+//         }
+//     }
+
+//     // trigger the `trace` task
+//     rtfm::set_pending(f4::stm32f40x::Interrupt::EXTI1);
+// }
+
+// // `trace` task triggered by the hight priority `rx` task
+// // a low priority task for the background processing (like tracing)
+// fn trace(t: &mut Threshold, r: EXTI1::Resources) {
+//     let mut b = [0; 4]; // local buffer
+//     let mut l = 0; // length of the received vector
+
+//     r.VECTOR.claim(t, |vector, _| {
+//         // critical section for the shared vector
+//         // here the task `rx` will be blocked from executing
+//         l = vector.len();
+//         b[..l].copy_from_slice(&***vector); // efficent copy vector to the local buffer
+//     });
+//     // since we do the actual tracing (relatively slow)
+//     // OUTSIDE the claim (critical section), there will be no
+//     // additional blocking of `rx`
+//     ipln!("Vec {:?}", &b[..l]);
+// }
+
+// // Here we see the typical use of init INITIALIZING the system
+// fn init(p: init::Peripherals, _r: init::Resources) {
+//     ipln!("init");
+//     let serial = Serial(p.USART2);
+
+//     serial.init(BAUD_RATE.invert(), None, p.GPIOA, p.RCC);
+//     // in effect telling the USART2 to trigger the `rx` task/interrupt
+//     serial.listen(f4::serial::Event::Rxne);
+// }
+
+// // We will spend all time sleeping (unless we have work to do)
+// // reactive programming in RTFM ftw!!!
+// fn idle() -> ! {
+//     // Sleep
+//     loop {
+//         rtfm::wfi();
+//     }
+// }
+
+// // 1. compile and run the project at 16MHz
+// // make sure its running (not paused)
+// // start a terminal program, e.g., `moserial`
+// // connect to the port
+// //
+// // Device       /dev/ttyACM0
+// // Baude Rate   115200
+// // Data Bits    8
+// // Stop Bits    1
+// // Parity       None
+// // Handshake    None
+// //
+// // (this is also known in short as 15200 8N1)
+// //
+// // you should now be able to send data and recive an echo from the MCU
+// //
+// // try sending: "abcd" as a single sequence (set the option No end in moserial)
+// // (don't send the quation marks, just abcd)
+// //
+// // what did you receive, and what was the output of the ITM trace
+// // ** your answer here **
+// //
+// // did you experience any over-run errors?
+// // ** your answer here **
+// //
+// // what is the key problem and its solution (try to follow the commented code)
+// // ** your answer here **
+// //
+// // commit your answers (bare8_1)
+// //
+// // 2. now catch the case when we are trying to write to a full vector/buffer
+// // and write a suiteble error message
+// //
+// // commit your answers (bare8_2)
+// //
+// // as a side note....
+// //
+// // The concurrency model behind RTFM offers
+// // 1. Race-free resource access
+// //
+// // 2. Deadlock-free exection
+// //
+// // 3. Shared execution stack (no pre-allocated stack regions)
+// //
+// // 4. Bound priority inversion
+// //
+// // 5. Theoretical underpinning ->
+// //    + proofs of soundness
+// //    + schedulability analysis
+// //    + response time analysis
+// //    + stack memory analysis
+// //    + ... leverages on 25 years of reseach in the real-time community
+// //      based on the seminal work of Baker in the early 1990s
+// //      (known as the Stack Resource Policy, SRP)
+// //
+// // Our implementation in Rust offers
+// // 1. compile check and analysis of tasks and resources
+// //    + the API implementation together with the Rust compiler will ensure that
+// //      both RTFM (SRP) soundness and the Rust memory model invariants
+// //      are upheld (under all circumpstances).
+// //
+// // 2. arguably the worlds fastest real time scheduler *
+// //    + task invocation 0-cycle OH on top of HW interrupt handling
+// //    + 2 cycle OH for locking a shared resource (on claim entry)
+// //    + 1 cycle OH for releasineg a shared resoure (on claim exit)
+// //
+// // 3. arguably the worlds most memory efficient scheduler *
+// //    + 1 byte stack memory OH for each (nested) claim
+// //      (no additional book-keeping during run-time)
+// //
+// //    * applies to static task/resource models with single core
+// //      pre-emptive, static priority scheduling
+// //
+// // in comparison "real-time" schedulers for threaded models like FreeRTOS
+// //    - CPU and memory OH magnitudes larger (100s of cycles/kilobytes of memory)
+// //    - ... and what's worse OH is typically unbound (no proofs of worst case)
+// //    - potential race conditions (up to the user to verify)
+// //    - potential dead-locks (up to the implementation)
+// //    - potential unbound priority inversion (up to the implementation)
+// //
+// // Rust RTFM (currently) target ONLY STATIC SYSTEMS, there is no notion
+// // of dynamically creating new executions contexts/threads
+// // so a direct comparison is not completely fair.
+// //
+// // On the other hand, embedded applications are typically static by nature
+// // so a STATIC model is to that end better suitable.
+// //
+// // RTFM is reactive by nature, a task execute to end, triggered
+// // by an internal or external event, (where an interrupt is an external event
+// // from the environment, like a HW peripheral such as the USART2).
+// //
+// // Threads on the other hand are concurrent and infinte by nature and
+// // actively blocking/yeilding awaiting stimuli. Hence reactivity needs to be CODED.
+// // This leads to an anomaly, the underlying HW is reactive (interrupts),
+// // requiring an interrupt handler, that creates a signal to the scheduler.
+// //
+// // The scheduler then needs to keep track of all threads and at some point choose
+// // to dispatch the awaiting thread. So reactivity is bottlenecked to the point
+// // of scheduling by que management, context switching and other additional
+// // book keeping.
+// //
+// // In essence, the thread scheduler tries to re-establish the reactivity that
+// // were there (interrupts), a battle that cannot be won...
diff --git a/examples/bare9.rs b/examples/bare9.rs
new file mode 100644
index 0000000..bc6abcc
--- /dev/null
+++ b/examples/bare9.rs
@@ -0,0 +1,289 @@
+// #![deny(unsafe_code)]
+// #![deny(warnings)]
+#![no_main]
+#![no_std]
+
+extern crate panic_halt;
+
+use cortex_m::iprintln;
+use nb::block;
+
+extern crate stm32f4xx_hal as hal;
+use crate::hal::prelude::*;
+use crate::hal::serial::{config::Config, Rx, Serial, Tx};
+use hal::stm32::{ITM, USART2};
+
+// use crate::hal::stm32::Interrupt::EXTI0;
+use rtfm::app;
+// use hal::stm32::Interrupt::EXTI0;
+
+#[app(device = hal::stm32)]
+// #[app(device = stm32f4xx_hal::stm32)]
+
+const APP: () = {
+    // Late resources
+    static mut TX: Tx<USART2> = ();
+    static mut RX: Rx<USART2> = ();
+    static mut ITM: ITM = ();
+
+    // init runs in an interrupt free section
+    #[init]
+    fn init() {
+        let stim = &mut core.ITM.stim[0];
+        iprintln!(stim, "start");
+
+        let rcc = device.RCC.constrain();
+
+        // 16 MHz (default, all clocks)
+        let clocks = rcc.cfgr.freeze();
+
+        let gpioa = device.GPIOA.split();
+
+        let tx = gpioa.pa2.into_alternate_af7();
+        let rx = gpioa.pa3.into_alternate_af7(); // try comment out
+
+        let serial = Serial::usart2(
+            device.USART2,
+            (tx, rx),
+            Config::default().baudrate(115_200.bps()),
+            clocks,
+        )
+        .unwrap();
+
+        // Separate out the sender and receiver of the serial port
+        let (tx, rx) = serial.split();
+
+        // Late resources
+        TX = tx;
+        RX = rx;
+        ITM = core.ITM;
+    }
+
+    // idle may be interrupted by other interrupt/tasks in the system
+    #[idle(resources = [RX, TX, ITM])]
+    fn idle() -> ! {
+        let rx = resources.RX;
+        let tx = resources.TX;
+        let stim = &mut resources.ITM.stim[0];
+
+        loop {
+            match block!(rx.read()) {
+                Ok(byte) => {
+                    iprintln!(stim, "Ok {:?}", byte);
+                    let _ = tx.write(byte);
+                }
+                Err(err) => {
+                    iprintln!(stim, "Error {:?}", err);
+                }
+            }
+        }
+    }
+    #[interrupt]
+    fn EXTI0() {}
+};
+
+// extern crate cortex_m_rtfm as rtfm;
+// extern crate f4;
+// extern crate heapless;
+
+// #[macro_use]
+// extern crate cortex_m_debug;
+
+// use f4::prelude::*;
+// use f4::Serial;
+// use f4::time::Hertz;
+// use heapless::Vec;
+// use rtfm::{app, Resource, Threshold};
+
+// // CONFIGURATION
+// const BAUD_RATE: Hertz = Hertz(115_200);
+
+// // RTFM FRAMEWORK
+// app! {
+//     device: f4::stm32f40x,
+
+//     resources: {
+//         static VECTOR: Vec<u8, [u8; 4]> = Vec::new();
+//     },
+
+//     tasks: {
+//         USART2: {
+//             path: rx,
+//             priority: 2,
+//             resources: [VECTOR, USART2],
+//         },
+//         EXTI1: {
+//             path: trace,
+//             priority: 1,
+//             resources: [VECTOR],
+//         }
+//     },
+// }
+
+// // `rx` task trigger on arrival of a USART2 interrupt
+// fn rx(t: &mut Threshold, r: USART2::Resources) {
+//     let serial = Serial(&**r.USART2);
+
+//     // we don't need to block waiting for data to arrive
+//     // (as we were triggered) by the data arrival (or error)
+//     match serial.read() {
+//         Ok(byte) => {
+//             // received byte correct
+//             r.VECTOR.claim_mut(t, |vector, _| {
+//                 // critical section for the shared vector
+//                 let _ = vector.push(byte);
+//                 // here you could put your error handling for vector full
+//             });
+//             let _ = serial.write(byte);
+//         }
+//         Err(err) => {
+//             // some transmission error
+//             ipln!("Error {:?}", err);
+//             r.USART2.dr.read(); // clear the error by reading the data register
+//         }
+//     }
+
+//     // trigger the `trace` task
+//     rtfm::set_pending(f4::stm32f40x::Interrupt::EXTI1);
+// }
+
+// // `trace` task triggered by the hight priority `rx` task
+// // a low priority task for the background processing (like tracing)
+// fn trace(t: &mut Threshold, r: EXTI1::Resources) {
+//     let mut b = [0; 4]; // local buffer
+//     let mut l = 0; // length of the received vector
+
+//     r.VECTOR.claim(t, |vector, _| {
+//         // critical section for the shared vector
+//         // here the task `rx` will be blocked from executing
+//         l = vector.len();
+//         b[..l].copy_from_slice(&***vector); // efficent copy vector to the local buffer
+//     });
+//     // since we do the actual tracing (relatively slow)
+//     // OUTSIDE the claim (critical section), there will be no
+//     // additional blocking of `rx`
+//     ipln!("Vec {:?}", &b[..l]);
+// }
+
+// // Here we see the typical use of init INITIALIZING the system
+// fn init(p: init::Peripherals, _r: init::Resources) {
+//     ipln!("init");
+//     let serial = Serial(p.USART2);
+
+//     serial.init(BAUD_RATE.invert(), None, p.GPIOA, p.RCC);
+//     // in effect telling the USART2 to trigger the `rx` task/interrupt
+//     serial.listen(f4::serial::Event::Rxne);
+// }
+
+// // We will spend all time sleeping (unless we have work to do)
+// // reactive programming in RTFM ftw!!!
+// fn idle() -> ! {
+//     // Sleep
+//     loop {
+//         rtfm::wfi();
+//     }
+// }
+
+// // 1. compile and run the project at 16MHz
+// // make sure its running (not paused)
+// // start a terminal program, e.g., `moserial`
+// // connect to the port
+// //
+// // Device       /dev/ttyACM0
+// // Baude Rate   115200
+// // Data Bits    8
+// // Stop Bits    1
+// // Parity       None
+// // Handshake    None
+// //
+// // (this is also known in short as 15200 8N1)
+// //
+// // you should now be able to send data and recive an echo from the MCU
+// //
+// // try sending: "abcd" as a single sequence (set the option No end in moserial)
+// // (don't send the quation marks, just abcd)
+// //
+// // what did you receive, and what was the output of the ITM trace
+// // ** your answer here **
+// //
+// // did you experience any over-run errors?
+// // ** your answer here **
+// //
+// // what is the key problem and its solution (try to follow the commented code)
+// // ** your answer here **
+// //
+// // commit your answers (bare8_1)
+// //
+// // 2. now catch the case when we are trying to write to a full vector/buffer
+// // and write a suiteble error message
+// //
+// // commit your answers (bare8_2)
+// //
+// // as a side note....
+// //
+// // The concurrency model behind RTFM offers
+// // 1. Race-free resource access
+// //
+// // 2. Deadlock-free exection
+// //
+// // 3. Shared execution stack (no pre-allocated stack regions)
+// //
+// // 4. Bound priority inversion
+// //
+// // 5. Theoretical underpinning ->
+// //    + proofs of soundness
+// //    + schedulability analysis
+// //    + response time analysis
+// //    + stack memory analysis
+// //    + ... leverages on 25 years of reseach in the real-time community
+// //      based on the seminal work of Baker in the early 1990s
+// //      (known as the Stack Resource Policy, SRP)
+// //
+// // Our implementation in Rust offers
+// // 1. compile check and analysis of tasks and resources
+// //    + the API implementation together with the Rust compiler will ensure that
+// //      both RTFM (SRP) soundness and the Rust memory model invariants
+// //      are upheld (under all circumpstances).
+// //
+// // 2. arguably the worlds fastest real time scheduler *
+// //    + task invocation 0-cycle OH on top of HW interrupt handling
+// //    + 2 cycle OH for locking a shared resource (on claim entry)
+// //    + 1 cycle OH for releasineg a shared resoure (on claim exit)
+// //
+// // 3. arguably the worlds most memory efficient scheduler *
+// //    + 1 byte stack memory OH for each (nested) claim
+// //      (no additional book-keeping during run-time)
+// //
+// //    * applies to static task/resource models with single core
+// //      pre-emptive, static priority scheduling
+// //
+// // in comparison "real-time" schedulers for threaded models like FreeRTOS
+// //    - CPU and memory OH magnitudes larger (100s of cycles/kilobytes of memory)
+// //    - ... and what's worse OH is typically unbound (no proofs of worst case)
+// //    - potential race conditions (up to the user to verify)
+// //    - potential dead-locks (up to the implementation)
+// //    - potential unbound priority inversion (up to the implementation)
+// //
+// // Rust RTFM (currently) target ONLY STATIC SYSTEMS, there is no notion
+// // of dynamically creating new executions contexts/threads
+// // so a direct comparison is not completely fair.
+// //
+// // On the other hand, embedded applications are typically static by nature
+// // so a STATIC model is to that end better suitable.
+// //
+// // RTFM is reactive by nature, a task execute to end, triggered
+// // by an internal or external event, (where an interrupt is an external event
+// // from the environment, like a HW peripheral such as the USART2).
+// //
+// // Threads on the other hand are concurrent and infinte by nature and
+// // actively blocking/yeilding awaiting stimuli. Hence reactivity needs to be CODED.
+// // This leads to an anomaly, the underlying HW is reactive (interrupts),
+// // requiring an interrupt handler, that creates a signal to the scheduler.
+// //
+// // The scheduler then needs to keep track of all threads and at some point choose
+// // to dispatch the awaiting thread. So reactivity is bottlenecked to the point
+// // of scheduling by que management, context switching and other additional
+// // book keeping.
+// //
+// // In essence, the thread scheduler tries to re-establish the reactivity that
+// // were there (interrupts), a battle that cannot be won...
-- 
GitLab