diff --git a/examples/bare0.rs b/examples/bare0.rs
index 17e1a50fca5e01617c06eb87c524a4d0b161ad12..8358e63420c73b20ec2326d2c84c786ee6fc8207 100644
--- a/examples/bare0.rs
+++ b/examples/bare0.rs
@@ -40,6 +40,8 @@ fn main() -> ! {
     }
 }
 
+// 0. Compile/build the example in debug (dev) mode.
+//
 // 1. Run the program in the debugger, let the program run for a while and
 //    then press pause. Look in the (Local -vscode) Variables view what do you find.
 //
diff --git a/examples/bare1.rs b/examples/bare1.rs
index b11eb3a004884c3ee0a1043611683a3e365c0dc9..04405f0a8573f7b9a9ddd68890b47b502174b970 100644
--- a/examples/bare1.rs
+++ b/examples/bare1.rs
@@ -44,7 +44,7 @@ fn main() -> ! {
 //    to that end look at the install section in the README.md.
 //    If you change toolchain, exit and re-start `vscode`.
 //
-//    1. Build and run the application
+// 1. Build and run the application
 //    Look at the `hello.rs` and `itm.rs` examples to setup the tracing.
 //
 //    When debugging the application it should get stuck in the
diff --git a/examples/bare10.rs b/examples/bare10.rs
index 2f39b89373a2b3a270932f4f66f77770f466757d..78c993e662bea6130f963886905f7f527a68926c 100644
--- a/examples/bare10.rs
+++ b/examples/bare10.rs
@@ -38,7 +38,7 @@ const APP: () = {
     #[init]
     fn init() {
         let stim = &mut core.ITM.stim[0];
-        iprintln!(stim, "start");
+        iprintln!(stim, "bare10");
 
         let rcc = device.RCC.constrain();
 
@@ -79,10 +79,13 @@ const APP: () = {
         }
     }
 
-    #[task(priority = 1, resources = [ITM])]
+    #[task(priority = 1, resources = [ITM],capacity = 4)]
     fn trace_data(byte: u8) {
         let stim = &mut resources.ITM.stim[0];
         iprintln!(stim, "data {}", byte);
+        // for _ in 0..10000 {
+        //     asm::nop();
+        // }
     }
 
     #[task(priority = 1, resources = [ITM])]
@@ -96,8 +99,9 @@ const APP: () = {
         let tx = resources.TX;
 
         if block!(tx.write(byte)).is_err() {
-            spawn.trace_error(Error::UsartSendOverflow).unwrap();
+            let _ = spawn.trace_error(Error::UsartSendOverflow);
         }
+
     }
 
     #[interrupt(priority = 3, resources = [RX], spawn = [trace_data, trace_error, echo])]
@@ -106,19 +110,98 @@ const APP: () = {
 
         match rx.read() {
             Ok(byte) => {
-                spawn.echo(byte).unwrap();
+                let _ = spawn.echo(byte);
                 if spawn.trace_data(byte).is_err() {
-                    spawn.trace_error(Error::RingBufferOverflow).unwrap();
+                    let _ = spawn.trace_error(Error::RingBufferOverflow);
                 }
             }
             Err(_err) => {
-                spawn.trace_error(Error::UsartReceiveOverflow).unwrap()
+                let _ = spawn.trace_error(Error::UsartReceiveOverflow);
             }
         }
     }
 
+    // Set of interrupt vectors, free to use for RTFM tasks
+    // 1 per priority level suffices
     extern "C" {
         fn EXTI0();
         fn EXTI1();
     }
 };
+
+// 0. Compile and run the project at 16MHz in release mode
+//    make sure its running (not paused).
+//
+//    Connect a terminal program.
+//    Verify that it works as bare9.
+// 
+// 1. Comment out the loop in `trace_data`
+//  
+//    Try now to send a sequence `abcd`
+//
+//    Did you loose any data (was the data correctly echoed)?
+//
+//    ** your answer here **
+//
+//    Was the data correctly traced over the ITM?
+//
+//    ** your answer here **
+//
+//    Why did you loose data?
+//
+//    ** your asnwer here **
+//
+//    Commit your answers (bare10_1)
+//
+// 2. Read the RTFM manual (book).
+//    Figure out a way to accomdate for 4 outstanding messages to the `trac_data` task.
+//
+//    Verify that you can now correctly trace sequences of 4 characters sent.
+//
+//    Commit your answers (bare10_2)
+//    
+// 3. Implement a command line interpreter as a new task.
+//    It should:  
+//    - have priority 1.
+//    - take a byte as an argument (passed from the USART2 interrupt).
+//    - have a local buffer B of 10 characters
+//    - have sufficent capacity to recieve 10 charactes sent in a sequence
+//    - analyse the input buffer B checking the commands
+//      set <int> <RETURN>       // to set blinking frequency
+//      on <RETURN>              // to enable the led blinking
+//      of <RETURN>              // to disable the led blinking
+// 
+//      <int> should be decoded to an integer value T, and <RETURN> accept either <CR> or <LF>.
+//
+//    The set value should blink the LED in according the set value in Herz, 
+//    (so `set 1 <RETURN>` should blink with 1Hz)
+//    
+//    Tips:
+//    Create two tasks, (`on', that turns the led on, and a task `off` that turns the led off).
+//    `on` calls `off` with a timer offset (check the RTFM manual).
+//    `off` calls `on` with a timer offset.
+//
+//    The timing offset can implemented as a shared resource T between the command line interpreter and
+//    the  'on/off ' tasks. From `init` you can give an initial timing offset T, and send an
+//    initial message to `on` triggering the periodic behavior. 
+//
+//    The 'on/off ' tasks can have a high priority 4, and use locking (in the low prio task)
+//    parsing the input. This way, the led will have a very low jitter.
+//
+//    (You can even use an atomic data structure, which allows for lock free access.)
+//    
+//  
+//    The on/off is easiest impemented by having another shared variable used as a condition
+//    for the `on` task to set the GPIO. Other solutions could be to stop the sequence (i.e.)
+//    contiditionally call the `off` task instead. Then the command interpreted would
+//    trigger a new sequence when an "on" command is detected. (Should you allow multiple, overlapping)
+//    sequences? Why not, could be cool ;)
+//    The main advantage of the stopping approach is that the system will be truly idle
+//    if not blinking, and thus more power efficient.
+//
+//    You can reuse the code for setting up and controlling the GPIO/led.
+//
+//    You can come up with various extensions to this application, setting the
+//    the duty cycle (on/off ratio in %), etc.
+//
+//    Commit your solution (bare10_3)
\ No newline at end of file
diff --git a/examples/bare2.rs b/examples/bare2.rs
index e6b3c3d3b077acff1ff180fa460932ad8dd9a25b..6d1178b5a256aeb3c0b80f7938a29afb16b08df3 100644
--- a/examples/bare2.rs
+++ b/examples/bare2.rs
@@ -3,9 +3,10 @@
 //! Measuring execution time
 //!
 //! What it covers
-//! - generating documentation
-//! - using core peripherals
-//! - measuring time using the DWT
+//! - Generating documentation
+//! - Using core peripherals
+//! - Measuring time using the DWT
+//! - ITM tracing
 //!
 
 #![no_main]
@@ -31,7 +32,7 @@ fn main() -> ! {
     let stim = &mut p.ITM.stim[0];
     let mut dwt = p.DWT;
 
-    iprintln!(stim, "Measure Me!");
+    iprintln!(stim, "bare2");
 
     dwt.enable_cycle_counter();
 
diff --git a/examples/bare3.rs b/examples/bare3.rs
index c5974cb2064e9cb0816bb1834fd7644d0304069c..f40073c4d5fd54c5ae4f931e730d0a8597595f18 100644
--- a/examples/bare3.rs
+++ b/examples/bare3.rs
@@ -3,8 +3,9 @@
 //! String types in Rust
 //!
 //! What it covers:
-//! - types, str, arrays ([u8;uszie]), slices (&[u8])
-//! - iteration, copy
+//! - Types, str, arrays ([u8;uszie]), slices (&[u8])
+//! - Iteration, copy
+//! - Semihosting (tracing)
 
 #![no_main]
 #![no_std]
@@ -16,6 +17,7 @@ use cortex_m_semihosting::{hprint, hprintln};
 
 #[entry]
 fn main() -> ! {
+    hprintln!("bare3").unwrap();
     let s = "ABCD";
     let bs = s.as_bytes();
 
@@ -43,9 +45,9 @@ fn main() -> ! {
     loop {}
 }
 
-// 1. Build and run the application (debug build).
+// 0. Build and run the application (debug build).
 //
-//    What is the output in the `openocd` (Adapter Output) console?
+// 1. What is the output in the `openocd` (Adapter Output) console?
 //
 //    ** your answer here **
 //
diff --git a/examples/bare4.rs b/examples/bare4.rs
index 5b7184b37a36cfa1791811866b2a426e96fa8064..d3ac6257e7883370bba1fa63717cec45b92c840b 100644
--- a/examples/bare4.rs
+++ b/examples/bare4.rs
@@ -3,10 +3,10 @@
 //! Access to Peripherals
 //!
 //! What it covers:
-//! - raw pointers
-//! - volatile read/write
-//! - busses and clocking
-//! - gpio
+//! - Raw pointers
+//! - Volatile read/write
+//! - Busses and clocking
+//! - GPIO
 
 #![no_std]
 #![no_main]
@@ -77,21 +77,22 @@ fn main() -> ! {
     }
 }
 
-// 1. Build and run the application (debug build).
-//    Did you enjoy the blinking?
+// 0.  Build and run the application (debug build).
 //
-//    ** your answer here **
-//
-//    Now lookup the data-sheets, and read each section referred,
-//    6.3.11, 8.4.1, 8.4.7
-//
-//    Document each low level access *code* by the appropriate section in the
-//    data sheet.
-//
-//    commit your answers (bare4_1)
-//
-// 2. Comment out line 40 and uncomment line 41 (essentially omitting the `unsafe`)
+// 1.  Did you enjoy the blinking?
 //
+//    ** your answer here **
+//fn wait(i: u32) {
+//fn wait(i: u32) {each section referred,
+//fn wait(i: u32) {
+//fn wait(i: u32) {
+//fn wait(i: u32) { by the appropriate section in the
+//fn wait(i: u32) {
+//fn wait(i: u32) {
+//fn wait(i: u32) {
+//fn wait(i: u32) {
+//fn wait(i: u32) {e 41 (essentially omitting the `unsafe`)
+//fn wait(i: u32) {
 //    //unsafe { core::ptr::read_volatile(addr as *const _) }
 //    core::ptr::read_volatile(addr as *const _)
 //
diff --git a/examples/bare5.rs b/examples/bare5.rs
index 514f5a1a62a6789b4d2a482526bc52c677f3806e..d1258aaf366750fccdcc6690a32eb407603d9bf5 100644
--- a/examples/bare5.rs
+++ b/examples/bare5.rs
@@ -194,6 +194,8 @@ fn idle(rcc: &mut RCC, gpioa: &mut GPIOA) {
     }
 }
 
+// 0. Build and run the application.
+//
 // 1. C like API.
 //    In C the .h files are used for defining interfaces, like function signatures (prototypes),
 //    structs and macros (but usually not the functions themselves)
diff --git a/examples/bare6.rs b/examples/bare6.rs
index c71aadf40755f51488593739b4d7943262ca6dda..7ea0b3d45c32e43eb4fc920b8f2de32d19a14b0a 100644
--- a/examples/bare6.rs
+++ b/examples/bare6.rs
@@ -23,7 +23,7 @@ fn main() -> ! {
     let mut c = stm32f413::CorePeripherals::take().unwrap();
 
     let stim = &mut c.ITM.stim[0];
-    iprintln!(stim, "bare6!");
+    iprintln!(stim, "bare6");
 
     c.DWT.enable_cycle_counter();
     unsafe {
@@ -105,8 +105,9 @@ fn clock_out(rcc: &RCC, gpioc: &GPIOC) {
     gpioc.ospeedr.modify(|_, w| w.ospeedr9().very_high_speed())
 }
 
-// 1. Compile and run the example, in 16Mhz
-//    The processor SYSCLK defaults to HCI 16Mhz
+// 0. Compile and run the example, in 16Mhz
+//
+// 1. The processor SYSCLK defaults to HCI 16Mhz
 //    (this is what you get after a `monitor reset halt`).
 //
 //    Confirm that your ITM dump traces the init, idle and led on/off.
diff --git a/examples/bare7.rs b/examples/bare7.rs
index 3de0f051d739412042dd6728e31d3442adc01595..4ebb86a3bcd442e8cef85e62968f3ab69fd7c362 100644
--- a/examples/bare7.rs
+++ b/examples/bare7.rs
@@ -1,8 +1,12 @@
+//! bare7.rs
+//! 
 //! Serial echo
 //!
 //! What it covers:
 //! - changing the clock using Rust code
 //! - working with the svd2rust API
+//! - working with the HAL (Hardware Abstraction Layer)
+//! - USART polling (blocking wait)
 
 #![deny(unsafe_code)]
 #![deny(warnings)]
@@ -52,7 +56,7 @@ fn main() -> ! {
         match block!(rx.read()) {
             Ok(byte) => {
                 iprintln!(stim, "Ok {:?}", byte);
-                let _ = tx.write(byte);
+                tx.write(byte).unwrap(); 
             }
             Err(err) => {
                 iprintln!(stim, "Error {:?}", err);
@@ -83,7 +87,7 @@ fn main() -> ! {
 //    PCLK2  - The clock driving the APB2 (<= 84 MHz)
 //             Timers on the APB2 bus will be triggered at PCLK2
 //
-// 1. The rcc.cfgr. ... .freeze set the clock according to the configuration given.
+// 1. The rcc.cfgr.x.freeze() sets the clock according to the configuration x given.
 //
 //    rcc.cfgr.freeze(); sets a default configuration.
 //    sysclk = hclk = pclk1 = pclk2 = 16MHz
@@ -142,16 +146,27 @@ fn main() -> ! {
 //    commit your answers (bare7_3)
 //
 // 4. Revisit the `README.md` regarding serial communication.
-//    Setup `minicom` or similar to `/dev/ttyACM0`, 115200 8N1.
+//    start a terminal program, e.g., `moserial`.
+//    Connect to the port
+//
+//    Device       /dev/ttyACM0
+//    Baude Rate   115200
+//    Data Bits    8
+//    Stop Bits    1
+//    Parity       None
+//    
+//    This setting is typically abbreviated as 115200 8N1.
 //
 //    Run the example, make sure your ITM is set to 84MHz.
 //
+//    Send a single character (byte), (set the option No end in moserial).
 //    Verify that sent bytes are echoed back, and that ITM tracing is working.
+// 
 //    If not go back check your ITM setting, clocks etc.
 //
-//    Now Try sending a string of characters `abcdef`.
+//    Try sending: "abcd" as a single sequence, don't send the quation marks, just abcd.
 //
-//    What was the ITM trace output?
+//    What did you receive, and what was the output of the ITM trace.
 //
 //    ** your answer here **
 //
diff --git a/examples/bare8.rs b/examples/bare8.rs
index f3bbc4bcfb9d3b2472dd1f9c1ed78409f281d785..ecc5ed31632839ccdc184055b921042884efc75e 100644
--- a/examples/bare8.rs
+++ b/examples/bare8.rs
@@ -1,3 +1,5 @@
+//! bare8.rs
+//!
 //! The RTFM framework
 //!
 //! What it covers:
@@ -5,6 +7,7 @@
 //! - singletons (enteties with a singe instance)
 //! - owned resources
 //! - peripheral access in RTFM
+//! - polling in `idle`
 
 #![no_main]
 #![no_std]
@@ -42,7 +45,7 @@ const APP: () = {
         let gpioa = device.GPIOA.split();
 
         let tx = gpioa.pa2.into_alternate_af7();
-        let rx = gpioa.pa3.into_alternate_af7(); // try comment out
+        let rx = gpioa.pa3.into_alternate_af7(); 
 
         asm::bkpt();
 
@@ -74,8 +77,7 @@ const APP: () = {
             match block!(rx.read()) {
                 Ok(byte) => {
                     iprintln!(stim, "Ok {:?}", byte);
-                    test(byte);
-                    let _ = tx.write(byte);
+                    tx.write(byte).unwrap(); 
                 }
                 Err(err) => {
                     iprintln!(stim, "Error {:?}", err);
@@ -85,19 +87,37 @@ const APP: () = {
     }
 };
 
-#[inline(never)]
-fn test(byte: u8) {
-    unsafe {
-        core::ptr::read_volatile(&byte);
-    }
-}
 
-// 1. Compile and run the example.
-//    Verify that it has the same behavior as bare7.
+// 0. Compile and run the example. Notice, we use the default 16MHz clock.
+//
+// 1. Our CPU now runs slower, did it effect the behavior?
+//
+//    ** your answer here **
+//
+//    Commit your answer (bare8_1)
 //
 // 2. Add a local variable `received` that counts the number of bytes received.
 //    Add a local variable `errors` that counts the number of errors.
 //
 //    Adjust the ITM trace to include the additional information.
 //
-//    commit your development (bare8_2)
+//    Commit your development (bare8_2)
+//
+// 3. The added tracing, how did that effect the performance,
+//    (are you know loosing more data)?
+//
+//    ** your answer here **
+// 
+//    Commit your answer (bare8_3)
+//
+// 4. *Optional
+//    Compile and run the program in release mode.
+//    If using vscode, look at the `.vscode` folder `task.json` and `lounch.json`,
+//    and add a new "profile" (a bit of copy paste).
+//
+//    How did the optimized build compare to the debug build (performance/lost bytes)
+//
+//    ** your answer here **
+// 
+//    Commit your answer (bare8_4)
+
diff --git a/examples/bare9.rs b/examples/bare9.rs
index 6000b7396b3d542bd79840aebd47ff5a8443509c..c675955d6e1cd228c1004e863bf911ca296b365a 100644
--- a/examples/bare9.rs
+++ b/examples/bare9.rs
@@ -1,5 +1,13 @@
-// #![deny(unsafe_code)]
-// #![deny(warnings)]
+//! bare9.rs
+//! 
+//! Heapless 
+//! 
+//! What it covers:
+//! - Heapless Ringbuffer
+//! - Heapless Producer/Consumer lockfree data access
+//! - Interrupt driven I/O
+//! 
+
 #![no_main]
 #![no_std]
 
@@ -48,8 +56,7 @@ const APP: () = {
         let gpioa = device.GPIOA.split();
 
         let tx = gpioa.pa2.into_alternate_af7();
-        let rx = gpioa.pa3.into_alternate_af7(); // try comment out
-                                                 // let rx = gpioa.pa3.into_alternate_af6(); // try uncomment
+        let rx = gpioa.pa3.into_alternate_af7(); 
 
         let mut serial = Serial::usart2(
             device.USART2,
@@ -102,7 +109,8 @@ const APP: () = {
 
         match rx.read() {
             Ok(byte) => {
-                block!(tx.write(byte)).unwrap();
+                tx.write(byte).unwrap();
+                
                 match resources.PRODUCER.enqueue(byte) {
                     Ok(_) => {}
                     Err(_) => asm::bkpt(),
@@ -113,208 +121,115 @@ const APP: () = {
     }
 };
 
-// extern crate cortex_m_rtfm as rtfm;
-// extern crate f4;
-// extern crate heapless;
-
-// #[macro_use]
-// extern crate cortex_m_debug;
-
-// use f4::prelude::*;
-// use f4::Serial;
-// use f4::time::Hertz;
-// use heapless::Vec;
-// use rtfm::{app, Resource, Threshold};
-
-// // CONFIGURATION
-// const BAUD_RATE: Hertz = Hertz(115_200);
-
-// // RTFM FRAMEWORK
-// app! {
-//     device: f4::stm32f40x,
-
-//     resources: {
-//         static VECTOR: Vec<u8, [u8; 4]> = Vec::new();
-//     },
-
-//     tasks: {
-//         USART2: {
-//             path: rx,
-//             priority: 2,
-//             resources: [VECTOR, USART2],
-//         },
-//         EXTI1: {
-//             path: trace,
-//             priority: 1,
-//             resources: [VECTOR],
-//         }
-//     },
-// }
-
-// // `rx` task trigger on arrival of a USART2 interrupt
-// fn rx(t: &mut Threshold, r: USART2::Resources) {
-//     let serial = Serial(&**r.USART2);
-
-//     // we don't need to block waiting for data to arrive
-//     // (as we were triggered) by the data arrival (or error)
-//     match serial.read() {
-//         Ok(byte) => {
-//             // received byte correct
-//             r.VECTOR.claim_mut(t, |vector, _| {
-//                 // critical section for the shared vector
-//                 let _ = vector.push(byte);
-//                 // here you could put your error handling for vector full
-//             });
-//             let _ = serial.write(byte);
-//         }
-//         Err(err) => {
-//             // some transmission error
-//             ipln!("Error {:?}", err);
-//             r.USART2.dr.read(); // clear the error by reading the data register
-//         }
-//     }
-
-//     // trigger the `trace` task
-//     rtfm::set_pending(f4::stm32f40x::Interrupt::EXTI1);
-// }
-
-// // `trace` task triggered by the hight priority `rx` task
-// // a low priority task for the background processing (like tracing)
-// fn trace(t: &mut Threshold, r: EXTI1::Resources) {
-//     let mut b = [0; 4]; // local buffer
-//     let mut l = 0; // length of the received vector
-
-//     r.VECTOR.claim(t, |vector, _| {
-//         // critical section for the shared vector
-//         // here the task `rx` will be blocked from executing
-//         l = vector.len();
-//         b[..l].copy_from_slice(&***vector); // efficent copy vector to the local buffer
-//     });
-//     // since we do the actual tracing (relatively slow)
-//     // OUTSIDE the claim (critical section), there will be no
-//     // additional blocking of `rx`
-//     ipln!("Vec {:?}", &b[..l]);
-// }
-
-// // Here we see the typical use of init INITIALIZING the system
-// fn init(p: init::Peripherals, _r: init::Resources) {
-//     ipln!("init");
-//     let serial = Serial(p.USART2);
-
-//     serial.init(BAUD_RATE.invert(), None, p.GPIOA, p.RCC);
-//     // in effect telling the USART2 to trigger the `rx` task/interrupt
-//     serial.listen(f4::serial::Event::Rxne);
-// }
-
-// // We will spend all time sleeping (unless we have work to do)
-// // reactive programming in RTFM ftw!!!
-// fn idle() -> ! {
-//     // Sleep
-//     loop {
-//         rtfm::wfi();
-//     }
-// }
-
-// // 1. compile and run the project at 16MHz
-// // make sure its running (not paused)
-// // start a terminal program, e.g., `moserial`
-// // connect to the port
-// //
-// // Device       /dev/ttyACM0
-// // Baude Rate   115200
-// // Data Bits    8
-// // Stop Bits    1
-// // Parity       None
-// // Handshake    None
-// //
-// // (this is also known in short as 15200 8N1)
-// //
-// // you should now be able to send data and recive an echo from the MCU
-// //
-// // try sending: "abcd" as a single sequence (set the option No end in moserial)
-// // (don't send the quation marks, just abcd)
-// //
-// // what did you receive, and what was the output of the ITM trace
-// // ** your answer here **
-// //
-// // did you experience any over-run errors?
-// // ** your answer here **
-// //
-// // what is the key problem and its solution (try to follow the commented code)
-// // ** your answer here **
-// //
-// // commit your answers (bare8_1)
-// //
-// // 2. now catch the case when we are trying to write to a full vector/buffer
-// // and write a suiteble error message
-// //
-// // commit your answers (bare8_2)
-// //
-// // as a side note....
-// //
-// // The concurrency model behind RTFM offers
-// // 1. Race-free resource access
-// //
-// // 2. Deadlock-free exection
-// //
-// // 3. Shared execution stack (no pre-allocated stack regions)
-// //
-// // 4. Bound priority inversion
-// //
-// // 5. Theoretical underpinning ->
-// //    + proofs of soundness
-// //    + schedulability analysis
-// //    + response time analysis
-// //    + stack memory analysis
-// //    + ... leverages on 25 years of reseach in the real-time community
-// //      based on the seminal work of Baker in the early 1990s
-// //      (known as the Stack Resource Policy, SRP)
-// //
-// // Our implementation in Rust offers
-// // 1. compile check and analysis of tasks and resources
-// //    + the API implementation together with the Rust compiler will ensure that
-// //      both RTFM (SRP) soundness and the Rust memory model invariants
-// //      are upheld (under all circumpstances).
-// //
-// // 2. arguably the worlds fastest real time scheduler *
-// //    + task invocation 0-cycle OH on top of HW interrupt handling
-// //    + 2 cycle OH for locking a shared resource (on claim entry)
-// //    + 1 cycle OH for releasineg a shared resoure (on claim exit)
-// //
-// // 3. arguably the worlds most memory efficient scheduler *
-// //    + 1 byte stack memory OH for each (nested) claim
-// //      (no additional book-keeping during run-time)
-// //
-// //    * applies to static task/resource models with single core
-// //      pre-emptive, static priority scheduling
-// //
-// // in comparison "real-time" schedulers for threaded models like FreeRTOS
-// //    - CPU and memory OH magnitudes larger (100s of cycles/kilobytes of memory)
-// //    - ... and what's worse OH is typically unbound (no proofs of worst case)
-// //    - potential race conditions (up to the user to verify)
-// //    - potential dead-locks (up to the implementation)
-// //    - potential unbound priority inversion (up to the implementation)
-// //
-// // Rust RTFM (currently) target ONLY STATIC SYSTEMS, there is no notion
-// // of dynamically creating new executions contexts/threads
-// // so a direct comparison is not completely fair.
-// //
-// // On the other hand, embedded applications are typically static by nature
-// // so a STATIC model is to that end better suitable.
-// //
-// // RTFM is reactive by nature, a task execute to end, triggered
-// // by an internal or external event, (where an interrupt is an external event
-// // from the environment, like a HW peripheral such as the USART2).
-// //
-// // Threads on the other hand are concurrent and infinte by nature and
-// // actively blocking/yeilding awaiting stimuli. Hence reactivity needs to be CODED.
-// // This leads to an anomaly, the underlying HW is reactive (interrupts),
-// // requiring an interrupt handler, that creates a signal to the scheduler.
-// //
-// // The scheduler then needs to keep track of all threads and at some point choose
-// // to dispatch the awaiting thread. So reactivity is bottlenecked to the point
-// // of scheduling by que management, context switching and other additional
-// // book keeping.
-// //
-// // In essence, the thread scheduler tries to re-establish the reactivity that
-// // were there (interrupts), a battle that cannot be won...
+// 0. Compile and run the project at 16MHz in release mode
+//    make sure its running (not paused).
+// 
+// 1. Start a terminal program, connect with 15200 8N1
+//
+//    You should now be able to send data and recive an echo from the MCU
+//
+//    Try sending: "abcd" as a single sequence (set the option No end in moserial),
+//    don't send the quation marks, just abcd.
+//
+//    What did you receive, and what was the output of the ITM trace.
+//
+//    ** your answer here **
+//
+//    Did you experience any over-run errors?
+//
+//    ** your answer here **
+//
+//    Why does it behave differently than bare7/bare8?
+//
+//    ** your answer here **
+//
+//    Commit your answers (bare9_1)
+//
+// 2. Compile and run the project at 16MHz in debug mode.
+//    
+//    Try sending: "abcd" as a single sequence (set the option No end in moserial),
+//    don't send the quation marks, just abcd.
+//
+//    What did you receive, and what was the output of the ITM trace.
+//
+//    ** your answer here **
+//
+//    Did you experience any over-run errors?
+//
+//    ** your answer here **
+//
+//    Why does it behave differently than in release mode?
+//    Recall how the execution overhead changed with optimization level.
+//
+//    ** your answer here **
+//
+//    Commit your answers (bare9_2)
+//
+//    Discussion:
+//
+//    The concurrency model behind RTFM offers
+//    1. Race-free resource access
+//
+//    2. Deadlock-free exection
+//
+//    3. Shared execution stack (no pre-allocated stack regions)
+//
+//    4. Bound priority inversion
+//
+//    5. Theoretical underpinning ->
+//       + proofs of soundness
+//       + schedulability analysis
+//       + response time analysis
+//       + stack memory analysis
+//       + ... leverages on >25 years of reseach in the real-time community
+//         based on the seminal work of Baker in the early 1990s
+//         (known as the Stack Resource Policy, SRP)
+//
+//    Our implementation in Rust offers
+//    1. compile check and analysis of tasks and resources
+//       + the API implementation together with the Rust compiler will ensure that
+//          both RTFM (SRP) soundness and the Rust memory model invariants
+//          are upheld (under all circumpstances).
+//   
+//    2. arguably the worlds fastest real time scheduler *
+//       + task invocation 0-cycle OH on top of HW interrupt handling
+//       + 2 cycle OH for locking a shared resource (on claim entry)
+//       + 1 cycle OH for releasineg a shared resoure (on claim exit)
+//   
+//    3. arguably the worlds most memory efficient scheduler *
+//       + 1 byte stack memory OH for each (nested) claim
+//         (no additional book-keeping during run-time)
+//   
+//       * applies to static task/resource models with single core
+//         pre-emptive, static priority scheduling
+//   
+//    in comparison "real-time" schedulers for threaded models like FreeRTOS
+//       - CPU and memory OH magnitudes larger (100s of cycles/kilobytes of memory)
+//       - ... and what's worse OH is typically unbound (no proofs of worst case)
+//       - potential race conditions (up to the user to verify)
+//       - potential dead-locks (up to the implementation)
+//       - potential unbound priority inversion (up to the implementation)
+//   
+//    Rust RTFM (currently) target ONLY STATIC SYSTEMS, there is no notion
+//    of dynamically creating new executions contexts/threads
+//    so a direct comparison is not completely fair.
+//   
+//    On the other hand, embedded applications are typically static by nature
+//    so a STATIC model is to that end better suitable.
+//   
+//    RTFM is reactive by nature, a task execute to end, triggered
+//    by an internal or external event, (where an interrupt is an external event
+//    from the environment, like a HW peripheral such as the USART2).
+//   
+//    Threads on the other hand are concurrent and infinte by nature and
+//    actively blocking/yeilding awaiting stimuli. Hence reactivity needs to be CODED.
+//    This leads to an anomaly, the underlying HW is reactive (interrupts),
+//    requiring an interrupt handler, that creates a signal to the scheduler.
+//   
+//    The scheduler then needs to keep track of all threads and at some point choose
+//    to dispatch the awaiting thread. So reactivity is bottlenecked to the point
+//    of scheduling by que management, context switching and other additional
+//    book keeping.
+//   
+//    In essence, the thread scheduler tries to re-establish the reactivity that
+//    were there (interrupts), a battle that cannot be won...
\ No newline at end of file