diff --git a/.vscode/launch.json b/.vscode/launch.json
index 49e3199ced4ab2ef856a120abd4771ec77e1137a..384233c27b2a16db3ffc626dc1c4e445f7644701 100644
--- a/.vscode/launch.json
+++ b/.vscode/launch.json
@@ -464,8 +464,30 @@
                 "interface/stlink.cfg",
                 "target/stm32f4x.cfg"
             ],
-            "postLaunchCommands": [
-                "continue"
+            "swoConfig": {
+                "enabled": true,
+                "cpuFrequency": 16000000,
+                "swoFrequency": 2000000, // you may try 1000000 if not working
+                "source": "probe",
+                "decoders": [
+                    {
+                        "type": "console",
+                        "label": "Name",
+                        "port": 0
+                    }
+                ]
+            },
+            "cwd": "${workspaceRoot}"
+        },
+        {
+            "type": "cortex-debug",
+            "request": "launch",
+            "servertype": "openocd",
+            "name": "c bare8 16Mhz",
+            "executable": "./target/thumbv7em-none-eabihf/debug/examples/bare8",
+            "configFiles": [
+                "interface/stlink.cfg",
+                "target/stm32f4x.cfg"
             ],
             "swoConfig": {
                 "enabled": true,
diff --git a/examples/bare7.rs b/examples/bare7.rs
index ecce8786ae5d405c35cfebe94fb4bc1cf6344ba2..101a29ea0ed6b89a27a91b01969f43cced223e28 100644
--- a/examples/bare7.rs
+++ b/examples/bare7.rs
@@ -1,6 +1,6 @@
 //! Serial interface loopback
 #![deny(unsafe_code)]
-#![deny(warnings)]
+//#![deny(warnings)]
 #![feature(proc_macro)]
 #![no_std]
 
@@ -27,7 +27,6 @@ const BAUD_RATE: Hertz = Hertz(115_200);
 app! {
     device: f4::stm32f40x,
 }
-// static BUFFER: Vec<u8, [u8; 8]> = Vec::new();
 
 // Init executes with interrupts disabled
 // Hence its safe to access all peripherals (no race-conditions)
@@ -40,13 +39,13 @@ fn init(p: init::Peripherals) {
 
     serial.init(BAUD_RATE.invert(), None, p.GPIOA, p.RCC);
 
-    let mut buffer: Vec<u8, [u8; 4]> = Vec::new();
+    let mut vector: Vec<u8, [u8; 4]> = Vec::new();
     loop {
         match block!(serial.read()) {
             Ok(byte) => {
-                let _ = buffer.push(byte);
-                ipln!("Ok {:?}", buffer);
-                block!(serial.write(byte)).ok();
+                let _ = vector.push(byte);
+                ipln!("Ok {:?}", vector);
+                let _ = serial.write(byte);
             }
             Err(err) => {
                 ipln!("Error {:?}", err);
@@ -108,7 +107,7 @@ fn idle() -> ! {
 // your job now is to check the API of `heapless`
 // https://docs.rs/heapless/0.2.1/heapless/
 //
-// and catch the case we are trying to write to a full buffer
+// and catch the case we are trying to write to a full buffer/vector
 // and write a suiteble error message
 //
 // commit your answers (bare7_2)
diff --git a/examples/bare8.rs b/examples/bare8.rs
new file mode 100644
index 0000000000000000000000000000000000000000..e3befba16e6a0ebb27341d111854b6392bdb6013
--- /dev/null
+++ b/examples/bare8.rs
@@ -0,0 +1,211 @@
+//! Serial interface loopback
+#![deny(unsafe_code)]
+#![deny(warnings)]
+#![feature(proc_macro)]
+#![no_std]
+
+extern crate cortex_m_rtfm as rtfm;
+extern crate f4;
+extern crate heapless;
+
+#[macro_use]
+extern crate cortex_m_debug;
+
+use f4::prelude::*;
+use f4::Serial;
+use f4::time::Hertz;
+use heapless::Vec;
+use rtfm::{app, Resource, Threshold};
+
+// CONFIGURATION
+const BAUD_RATE: Hertz = Hertz(115_200);
+
+// RTFM FRAMEWORK
+app! {
+    device: f4::stm32f40x,
+
+    resources: {
+        static VECTOR: Vec<u8, [u8; 4]> = Vec::new();
+    },
+
+    tasks: {
+        USART2: {
+            path: rx,
+            priority: 2,
+            resources: [VECTOR, USART2],
+        },
+        EXTI1: {
+            path: trace,
+            priority: 1,
+            resources: [VECTOR],
+        }
+    },
+}
+
+// `rx` task trigger on arrival of a USART2 interrupt
+fn rx(t: &mut Threshold, r: USART2::Resources) {
+    let serial = Serial(&**r.USART2);
+
+    // we don't need to block waiting for data to arrive
+    // (as we were triggered) by the data arrival (or error)
+    match serial.read() {
+        Ok(byte) => {
+            // received byte correct
+            r.VECTOR.claim_mut(t, |vector, _| {
+                // critical section for the shared vector
+                let _ = vector.push(byte);
+                // here you could put your error handling for vector full
+            });
+            let _ = serial.write(byte);
+        }
+        Err(err) => {
+            // some transmission error
+            ipln!("Error {:?}", err);
+            r.USART2.dr.read(); // clear the error by reading the data register
+        }
+    }
+
+    // trigger the `trace` task
+    rtfm::set_pending(f4::stm32f40x::Interrupt::EXTI1);
+}
+
+// `trace` task triggered by the hight priority `rx` task
+// a low priority task for the background processing (like tracing)
+fn trace(t: &mut Threshold, r: EXTI1::Resources) {
+    let mut b = [0; 4]; // local buffer
+    let mut l = 0; // length of the received vector
+
+    r.VECTOR.claim(t, |vector, _| {
+        // critical section for the shared vector
+        // here the task `rx` will be blocked from executing
+        l = vector.len();
+        b[..l].copy_from_slice(&***vector); // efficent copy vector to the local buffer
+    });
+    // since we do the actual tracing (relatively slow)
+    // OUTSIDE the claim (critical section), there will be no
+    // additional blocking of `rx`
+    ipln!("Vec {:?}", &b[..l]);
+}
+
+// Here we see the typical use of init INITIALIZING the system
+fn init(p: init::Peripherals, _r: init::Resources) {
+    ipln!("init");
+    let serial = Serial(p.USART2);
+
+    serial.init(BAUD_RATE.invert(), None, p.GPIOA, p.RCC);
+    // in effect telling the USART2 to trigger the `rx` task/interrupt
+    serial.listen(f4::serial::Event::Rxne);
+}
+
+// We will spend all time sleeping (unless we have work to do)
+// reactive programming in RTFM ftw!!!
+fn idle() -> ! {
+    // Sleep
+    loop {
+        rtfm::wfi();
+    }
+}
+
+// 1. compile and run the project at 16MHz
+// make sure its running (not paused)
+// start a terminal program, e.g., `moserial`
+// connect to the port
+//
+// Device       /dev/ttyACM0
+// Baude Rate   115200
+// Data Bits    8
+// Stop Bits    1
+// Parity       None
+// Handshake    None
+//
+// (this is also known in short as 15200 8N1)
+//
+// you should now be able to send data and recive an echo from the MCU
+//
+// try sending: "abcd" as a single sequence (set the option No end in moserial)
+// (don't send the quation marks, just abcd)
+//
+// what did you receive, and what was the output of the ITM trace
+// ** your answer here **
+//
+// did you experience any over-run errors?
+// ** your answer here **
+//
+// what is the key problem and its solution (try to follow the commented code)
+// ** your answer here **
+//
+// commit your answers (bare8_1)
+//
+// 2. now catch the case when we are trying to write to a full vector/buffer
+// and write a suiteble error message
+//
+// commit your answers (bare8_2)
+//
+// as a side note....
+//
+// The concurrency model behind RTFM offers
+// 1. Race-free resource access
+//
+// 2. Deadlock-free exection
+//
+// 3. Shared execution stack (no pre-allocated stack regions)
+//
+// 4. Bound priority inversion
+//
+// 5. Theoretical underpinning ->
+//    + proofs of soundness
+//    + schedulability analysis
+//    + response time analysis
+//    + stack memory analysis
+//    + ... leverages on 25 years of reseach in the real-time community
+//      based on the seminal work of Baker in the early 1990s
+//      (known as the Stack Resource Policy, SRP)
+//
+// Our implementation in Rust offers
+// 1. compile check and analysis of tasks and resources
+//    + the API implementation together with the Rust compiler will ensure that
+//      both RTFM (SRP) soundness and the Rust memory model invariants
+//      are upheld (under all circumpstances).
+//
+// 2. arguably the worlds fastest real time scheduler *
+//    + task invocation 0-cycle OH on top of HW interrupt handling
+//    + 2 cycle OH for locking a shared resource (on claim entry)
+//    + 1 cycle OH for releasineg a shared resoure (on claim exit)
+//
+// 3. arguably the worlds most memory efficient scheduler *
+//    + 1 byte stack memory OH for each (nested) claim
+//      (no additional book-keeping during run-time)
+//
+//    * applies to static task/resource models with single core
+//      pre-emptive, static priority scheduling
+//
+// in comparison "real-time" schedulers for threaded models like FreeRTOS
+//    - CPU and memory OH magnitudes larger (100s of cycles/kilobytes of memory)
+//    - ... and what's worse OH is typically unbound (no proofs of worst case)
+//    - potential race conditions (up to the user to verify)
+//    - potential dead-locks (up to the implementation)
+//    - potential unbound priority inversion (up to the implementation)
+//
+// Rust RTFM (currently) target ONLY STATIC SYSTEMS, there is no notion
+// of dynamically creating new executions contexts/threads
+// so a direct comparison is not completely fair.
+//
+// On the other hand, embedded applications are typically static by nature
+// so a STATIC model is to that end better suitable.
+//
+// RTFM is reactive by nature, a task execute to end, triggered
+// by an internal or external event, (where an interrupt is an external event
+// from the environment, like a HW peripheral such as the USART2).
+//
+// Threads on the other hand are concurrent and infinte by nature and
+// actively blocking/yeilding awaiting stimuli. Hence reactivity needs to be CODED.
+// This leads to an anomaly, the underlying HW is reactive (interrupts),
+// requiring an interrupt handler, that creates a signal to the scheduler.
+//
+// The scheduler then needs to keep track of all threads and at some point choose
+// to dispatch the awaiting thread. So reactivity is bottlenecked to the point
+// of scheduling by que management, context switching and other additional
+// book keeping.
+//
+// In essence, the thread scheduler tries to re-establish the reactivity that
+// were there (interrupts), a battle that cannot be won...