diff --git a/examples/timing_resources.rs b/examples/timing_resources.rs
index f35c33ac2bb8e483f144d7ebbaf701ecf85e67c9..acdff4b3c2544a96615e0e6bb8b9bc05b96cd012 100644
--- a/examples/timing_resources.rs
+++ b/examples/timing_resources.rs
@@ -1,11 +1,10 @@
 //! examples/timing_resources.rs
 
 // #![deny(unsafe_code)]
-// #![deny(warnings)]
+#![deny(warnings)]
 #![no_main]
 #![no_std]
 
-use core::ptr::read_volatile;
 use cortex_m::{asm, peripheral::DWT};
 use panic_halt as _;
 use stm32f4::stm32f411;
@@ -13,8 +12,10 @@ use stm32f4::stm32f411;
 #[rtic::app(device = stm32f411)]
 const APP: () = {
     struct Resources {
-        // A resource
         dwt: DWT,
+
+        #[init(0)]
+        shared: u64, // non atomic data
     }
 
     #[init]
@@ -22,27 +23,32 @@ const APP: () = {
         // Initialize (enable) the monotonic timer (CYCCNT)
         cx.core.DCB.enable_trace();
         cx.core.DWT.enable_cycle_counter();
+        rtic::pend(stm32f411::Interrupt::EXTI1);
         init::LateResources { dwt: cx.core.DWT }
     }
 
-    #[idle(resources = [dwt])]
-    fn idle(mut cx: idle::Context) -> ! {
+    #[task(binds = EXTI0, resources = [shared], priority = 2)]
+    fn exti0(cx: exti0::Context) {
+        asm::bkpt();
+        *cx.resources.shared += 1;
+    }
+
+    #[task(binds = EXTI1, resources = [dwt, shared], priority = 1)]
+    fn exti1(mut cx: exti1::Context) {
         unsafe { cx.resources.dwt.cyccnt.write(0) };
         asm::bkpt();
         rtic::pend(stm32f411::Interrupt::EXTI0);
         asm::bkpt();
-        loop {
-            continue;
-        }
-    }
-
-    #[task(binds = EXTI0)]
-    fn exti0(_cx: exti0::Context) {
+        cx.resources.shared.lock(|shared| {
+            // asm::bkpt();
+            *shared += 1;
+            // asm::bkpt();
+        });
         asm::bkpt();
     }
 };
 
-// Now we are going to have a look at the scheduling of RTIC tasks
+// Now we are going to have a look at the resource management of RTIC.
 //
 // First create an objdump file:
 // >  cargo objdump --example timing_resources --release  --features nightly -- --disassemble > timing_resources.objdump
@@ -51,35 +57,36 @@ const APP: () = {
 //
 // You should find something like:
 //
-//  08000232 <EXTI0>:
-//  8000232: 00 be        	bkpt	#0
-//  8000234: 00 20        	movs	r0, #0
-//  8000236: 80 f3 11 88  	msr	basepri, r0
-//  800023a: 70 47        	bx	lr
+// 08000232 <EXTI0>:
+//  8000232: 40 f2 00 01  	movw	r1, #0
+//  8000236: ef f3 11 80  	mrs	r0, basepri
+//  800023a: 00 be        	bkpt	#0
+//  800023c: c2 f2 00 01  	movt	r1, #8192
+//  8000240: d1 e9 00 23  	ldrd	r2, r3, [r1]
+//  8000244: 01 32        	adds	r2, #1
+//  8000246: 43 f1 00 03  	adc	r3, r3, #0
+//  800024a: c1 e9 00 23  	strd	r2, r3, [r1]
+//  800024e: 80 f3 11 88  	msr	basepri, r0
+//  8000252: 70 47        	bx	lr
+//
+// Explain what is happening here in your own words.
 //
-// The application triggers the `exti0` task from `idle`, let's see
-// how that pans out.
+// [Your code here]
 //
 // > cargo run --example timing_resources --release --features nightly
 // Then continue to the first breakpoint instruction:
 // (gdb) c
-// timing_resources::idle (cx=...) at examples/timing_resources.rs:32
-// 32              asm::bkpt();
+// Program
+//  received signal SIGTRAP, Trace/breakpoint trap.
+// timing_resources::exti1 (cx=...) at examples/timing_resources.rs:39
+// 39	        asm::bkpt();
 //
 // (gdb) x 0xe0001004
-// 0
-//
-// Here we see, that we have successfully set the cycle counter to zero.
-// The `rtic::pend(stm32f411::Interrupt::EXTI0)` "emulates" the
-// arrival/triggering of an external interrupt associated with
-// the `exti0` task.
+// 2
 //
 // (gdb) c
-// timing_resources::APP::EXTI0 () at examples/timing_resources.rs:13
-// 13      #[rtic::app(device = stm32f411)]
-//
-// Since `exti0` has a default prio = 1, it will preempt `idle` (at prio = 0),
-// and the debugger breaks in the `exti0` task.
+//  received signal SIGTRAP, Trace/breakpoint trap.
+// rtic::export::run<closure-0> (priority=2, f=...) at /home/pln/.cargo/registry/src/github.com-1ecc6299db9ec823/cortex-m-rtic-0.5.5/src/export.rs:38
 //
 // (gdb) x 0xe0001004
 //
@@ -92,9 +99,6 @@ const APP: () = {
 // You should see that we hit the breakpoint in `exti0`, and
 // that the code complies to the objdump EXTI disassembly.
 //
-// Confer to the document:
-// https://community.arm.com/developer/ip-products/processors/b/processors-ip-blog/posts/beginner-guide-on-interrupt-latency-and-interrupt-latency-of-the-arm-cortex-m-processors
-//
 // What was the software latency observed to enter the task?
 //
 // [Your answer here]
@@ -103,20 +107,159 @@ const APP: () = {
 //
 // [Your answer here]
 //
+// The debugger reports that the breakpoint was hit in the `run<closure>`.
+// The reason is that the RTIC implements the actual interrupt handler,
+// from within it calls a function `run` taking the user task as a function.
+//
+// (Functions in Rust can be seen as closures without captured variables.)
+//
 // Now we can continue to measure the round trip time.
 //
 // (gdb) c
-// timing_resources::idle (cx=...) at examples/timing_resources.rs:34
-// 34              asm::bkpt();
+//
+//  received signal SIGTRAP, Trace/breakpoint trap.
+// timing_resources::exti1 (cx=...) at examples/timing_resources.rs:41
+// 41	        asm::bkpt();
 //
 // (gdb) x 0xe0001004
 //
 // [Your answer here]
 //
-// Looking at the EXTI0 (exti0) code, we see two additional
-// instructions used to restore the BASEPRI register.
-// This OH will be removed in next release of RTIC.
-// So we can conclude RTIC to have a 2-cycle OH (in this case).
-// (In the general case, as we will see later restoring BASEPRI
-// is actually necessary so its just this corner case that is
-// sub-optimal.)
+// You should have a total execution time in the range of 30-40 cycles.
+//
+// Explain the reason (for this case) that resource access in
+// `exti0` was safe without locking the resource.
+//
+// [Your answer here]
+//
+// In `exti1` we also access `shared` but this time through a lock.
+//
+// (gdb) disassemble
+// => 0x08000270 <+28>:	bkpt	0x0000
+//    0x08000272 <+30>:	msr	BASEPRI, r0
+//    0x08000276 <+34>:	movw	r0, #0
+//    0x0800027a <+38>:	movt	r0, #8192	; 0x2000
+//    0x0800027e <+42>:	ldrd	r2, r3, [r0]
+//    0x08000282 <+46>:	adds	r2, #1
+//    0x08000284 <+48>:	adc.w	r3, r3, #0
+//    0x08000288 <+52>:	strd	r2, r3, [r0]
+//    0x0800028c <+56>:	movs	r0, #240	; 0xf0
+//    0x0800028e <+58>:	msr	BASEPRI, r0
+//    0x08000292 <+62>:	bkpt	0x0000
+//    0x08000294 <+64>:	msr	BASEPRI, r1
+//    0x08000298 <+68>:	bx	lr
+//
+// We can now execute the code to the next breakpoint to get the
+// execution time of the lock.
+//
+// (gdb) c
+// timing_resources::idle (cx=...) at examples/timing_resources.rs:36
+// 36              asm::bkpt();
+//
+// (gdb) x 0xe0001004
+//
+// [Your answer here]
+//
+// Calculate the total time (in cycles), for this section of code.
+//
+// [Your answer here]
+//
+// You should get a value around 15 cycles.
+//
+// Now look at the "critical section", i.e., how many cycles
+// are the lock held?
+// To this end you need to insert `asm::bkpt()` on entry and exit
+// inside the closure.
+//
+// cx.resources.shared.lock(|shared| {
+//     asm::bkpt();
+//     *shared += 1;
+//     asm::bkpt();
+// });
+//
+// Change the code, and compile it from withing gdb
+//
+// If you debug in vscode, just Shift-F5 to terminate session, and F5 to start debugging.
+//
+// If debugging in terminal you may recompile without exiting the debug session:
+//
+// (gdb) shell cargo build --example timing_resources --release  --features nightly
+//   Compiling app v0.1.0 (/home/pln/courses/e7020e/app)
+//     Finished release [optimized + debuginfo] target(s) in 0.32s
+//
+// and load the newly compiled executable:
+// (gdb) load
+// ...
+// Transfer rate: 1 KB/sec, 406 bytes/write.
+//
+// Now you can continue until you hit the first breakpoint in the lock closure.
+//
+// (gdb) c
+//
+//  received signal SIGTRAP, Trace/breakpoint trap.
+// timing_resources::exti1::{{closure}} (shared=<optimized out>) at examples/timing_resources.rs:43
+// 43	            asm::bkpt();
+//
+// (gdb) x 0xe0001004
+//
+// [Your answer here]
+//
+// (gdb) c
+//
+//  received signal SIGTRAP, Trace/breakpoint trap.
+// timing_resources::exti1::{{closure}} (shared=0x20000000 <timing_resources::APP::shared>) at examples/timing_resources.rs:45
+// 45	            asm::bkpt();
+//
+// (gdb) x 0xe0001004
+//
+// [Your answer here]
+//
+// From a real-time perspective the critical section infers
+// blocking (of higher priority tasks).
+//
+// How many clock cycles is the blocking?
+//
+// [Your answer here]
+//
+// Finally continue out of the closure.
+//
+// (gdb) c
+//  received signal SIGTRAP, Trace/breakpoint trap.
+// timing_resources::exti1 (cx=...) at examples/timing_resources.rs:47
+//
+// (gdb) x 0xe0001004
+//
+// [Your answer here]
+//
+// This is the total execution time of.
+//
+// - pending a task `exti0` for execution
+// - preempt `exti1`
+// - inside `exti0` safely access and update a shared (non atomic) resource.
+// - returning to `exti1`
+// - inside `exti1` safely access and update a shared (non atomic) resource
+//
+// Notice here, the breakpoints infer some OH and may disable
+// some potential LLVM optimizations, so we obtain a "safe" (pessimistic) estimate.
+//
+// http://www.diva-portal.se/smash/get/diva2:1005680/FULLTEXT01.pdf
+//
+// You find a comparison to a typical threaded counterpart `freeRTOS` in Table 1.
+//
+// Give a rough estimate based on this info how long the complete task `uart1`,
+// would take to execute if written in FreeRTOS. (Include the context switch, to higher
+// prio task, the mutex lock/unlock in both "threads".)
+//
+// Motivate your answer (not just a number).
+//
+// [Your answer here]
+//
+// Notice, the Rust implementation is significantly faster than the C code version
+// of Real-Time For the Masses back in 2013.
+//
+// Why do you think RTIC + Rust + LLVM can do a better job than hand written
+// C code + Macros + gcc?
+//
+// (Hint, what possible optimization can safely be applied.)
+//
+// [Your answer here]
diff --git a/examples/timing_resources2.rs b/examples/timing_resources2.rs
deleted file mode 100644
index 1d6bf27b9c512bc9e3870eecc946bc0bc0f70369..0000000000000000000000000000000000000000
--- a/examples/timing_resources2.rs
+++ /dev/null
@@ -1,253 +0,0 @@
-//! examples/timing_resources.rs
-
-// #![deny(unsafe_code)]
-#![deny(warnings)]
-#![no_main]
-#![no_std]
-
-use cortex_m::peripheral::DWT;
-//use cortex_m::{asm, peripheral::DWT};
-use panic_halt as _;
-use stm32f4::stm32f411;
-
-#[rtic::app(device = stm32f411)]
-const APP: () = {
-    struct Resources {
-        dwt: DWT,
-
-        #[init(0)]
-        shared: u64, // non atomic data
-    }
-
-    #[init]
-    fn init(mut cx: init::Context) -> init::LateResources {
-        // Initialize (enable) the monotonic timer (CYCCNT)
-        cx.core.DCB.enable_trace();
-        cx.core.DWT.enable_cycle_counter();
-        init::LateResources { dwt: cx.core.DWT }
-    }
-
-    #[idle(resources = [shared])]
-    fn idle(_cx: idle::Context) -> ! {
-        // unsafe { cx.resources.dwt.cyccnt.write(0) };
-        // // asm::bkpt();
-        // rtic::pend(stm32f411::Interrupt::EXTI0);
-        // // asm::bkpt();
-        // cx.resources.shared.lock(|shared| {
-        //     // asm::bkpt();
-        //     *shared += 1;
-        //     // asm::bkpt();
-        // });
-        // asm::bkpt();
-        loop {
-            continue;
-        }
-    }
-
-    #[task(binds = EXTI0, resources = [shared], priority = 2)]
-    fn exti0(cx: exti0::Context) {
-        // asm::bkpt();
-        *cx.resources.shared += 1;
-    }
-
-    #[task(binds = EXTI1, resources = [dwt, shared], priority = 1)]
-    fn exti1(mut cx: exti1::Context) {
-        unsafe { cx.resources.dwt.cyccnt.write(0) };
-        // asm::bkpt();
-        rtic::pend(stm32f411::Interrupt::EXTI0);
-        // asm::bkpt();
-        cx.resources.shared.lock(|shared| {
-            // asm::bkpt();
-            *shared += 1;
-            // asm::bkpt();
-        });
-        // asm::bkpt();
-    }
-};
-
-// Now we are going to have a look at the resource management of RTIC.
-//
-// First create an objdump file:
-// >  cargo objdump --example timing_resources --release  --features nightly -- --disassemble > timing_resources.objdump
-//
-// Lookup the EXTI0 symbol (RTIC binds the exti0 task to the interrupt vector).
-//
-// You should find something like:
-//
-// 080002b6 <EXTI0>:
-//  80002b6: 40 f2 00 00  	movw	r0, #0
-//  80002ba: 00 be        	bkpt	#0
-//  80002bc: c2 f2 00 00  	movt	r0, #8192
-//  80002c0: d0 e9 00 12  	ldrd	r1, r2, [r0]
-//  80002c4: 01 31        	adds	r1, #1
-//  80002c6: 42 f1 00 02  	adc	r2, r2, #0
-//  80002ca: c0 e9 00 12  	strd	r1, r2, [r0]
-//  80002ce: 00 20        	movs	r0, #0
-//  80002d0: 80 f3 11 88  	msr	basepri, r0
-//  80002d4: 70 47        	bx	lr
-//
-// Explain what is happening here in your own words.
-//
-// [Your code here]
-//
-// > cargo run --example timing_resources --release --features nightly
-// Then continue to the first breakpoint instruction:
-// (gdb) c
-// timing_resources::idle (cx=...) at examples/timing_resources.rs:32
-// 32              asm::bkpt();
-//
-// (gdb) x 0xe0001004
-// 0
-//
-// (gdb) c
-// timing_resources::exti0 (cx=...) at examples/timing_resources.rs:44
-// 44              asm::bkpt();
-//
-// (gdb) x 0xe0001004
-//
-// [Your answer here]
-//
-// (gdb) disassemble
-//
-// [Your answer here]
-//
-// You should see that we hit the breakpoint in `exti0`, and
-// that the code complies to the objdump EXTI disassembly.
-//
-// What was the software latency observed to enter the task?
-//
-// [Your answer here]
-//
-// Does RTIC infer any overhead?
-//
-// [Your answer here]
-//
-// Now we can continue to measure the round trip time.
-//
-// (gdb) c
-//
-// (gdb) x 0xe0001004
-// timing_resources::idle (cx=...) at examples/timing_resources.rs:34
-// 34              asm::bkpt();
-//
-// [Your answer here]
-//
-// You should have a total execution time in the range of 30 cycles.
-//
-// Explain the reason (for this case) that resource access in
-// `exti0` was safe without locking the resource.
-//
-// [Your answer here]
-//
-// In `idle` we also access `shared` but this time through a lock.
-//
-// (gdb) disassemble
-// => 0x0800026e <+26>:    bkpt    0x0000
-//    0x08000270 <+28>:    ldrb    r2, [r0, #0]
-//    0x08000272 <+30>:    cbz     r2, 0x800028c <timing_resources::idle+56>
-//    0x08000274 <+32>:    movw    r0, #0
-//    0x08000278 <+36>:    movt    r0, #8192       ; 0x2000
-//    0x0800027c <+40>:    ldrd    r1, r2, [r0]
-//    0x08000280 <+44>:    adds    r1, #1
-//    0x08000282 <+46>:    adc.w   r2, r2, #0
-//    0x08000286 <+50>:    strd    r1, r2, [r0]
-//    0x0800028a <+54>:    b.n     0x80002b2 <timing_resources::idle+94>
-//    0x0800028c <+56>:    movs    r2, #1
-//    0x0800028e <+58>:    movw    r12, #0
-//    0x08000292 <+62>:    strb    r2, [r0, #0]
-//    0x08000294 <+64>:    movs    r2, #240        ; 0xf0
-//    0x08000296 <+66>:    msr     BASEPRI, r2
-//    0x0800029a <+70>:    movt    r12, #8192      ; 0x2000
-//    0x0800029e <+74>:    ldrd    r3, r2, [r12]
-//    0x080002a2 <+78>:    adds    r3, #1
-//    0x080002a4 <+80>:    adc.w   r2, r2, #0
-//    0x080002a8 <+84>:    strd    r3, r2, [r12]
-//    0x080002ac <+88>:    msr     BASEPRI, r1
-//    0x080002b0 <+92>:    strb    r1, [r0, #0]
-//    0x080002b2 <+94>:    bkpt    0x0000
-//
-// We can now execute the code to the next breakpoint to get the
-// execution time of the lock.
-//
-// (gdb) c
-// timing_resources::idle (cx=...) at examples/timing_resources.rs:36
-// 36              asm::bkpt();
-//
-// (gdb) x 0xe0001004
-//
-// [Your answer here]
-//
-// Calculate the total time (in cycles), for this section of code.
-//
-// [Your answer here]
-//
-// You should get a value around 25 cycles.
-//
-// Now look at the "critical section", i.e., how many cycles
-// are the lock held?
-// To this end you need to insert `asm::bkpt()` on entry and exit
-// inside the closure.
-//
-// cx.resources.shared.lock(|shared| {
-//     asm::bkpt();
-//     *shared += 1;
-//     asm::bkpt();
-// });
-//
-// Change the code, and compile it from withing gdb
-// (gdb) shell cargo build --example timing_resources --release  --features nightly
-//   Compiling app v0.1.0 (/home/pln/courses/e7020e/app)
-//     Finished release [optimized + debuginfo] target(s) in 0.32s
-//
-// and load the newly compiled executable:
-// (gdb) load
-// ...
-// Transfer rate: 1 KB/sec, 406 bytes/write.
-//
-// Now you can continue until you hit the first breakpoint in the lock closure.
-//
-// (gdb) c
-// rtic::export::lock<u64,(),closure-0> (ptr=<optimized out>, priority=0x2000ffef, ceiling=1, nvic_prio_bits=4, f=...) at /home/pln/.cargo/registry/src/github.com-1ecc6299db9ec823/cortex-m-0.6.4/src/asm.rs:11
-// 11              () => unsafe { llvm_asm!("bkpt" :::: "volatile") },
-//
-// (gdb) x 0xe0001004
-//
-// [Your answer here]
-//
-// (gdb) c
-// rtic::export::lock<u64,(),closure-0> (ptr=<optimized out>, priority=0x2000ffef, ceiling=1, nvic_prio_bits=4, f=...) at /home/pln/.cargo/registry/src/github.com-1ecc6299db9ec823/cortex-m-0.6.4/src/asm.rs:11
-// 11              () => unsafe { llvm_asm!("bkpt" :::: "volatile") },
-//
-// (gdb) x 0xe0001004
-//
-// [Your answer here]
-//
-// From a real-time perspective the critical section infers
-// blocking (of higher priority tasks).
-//
-// How many clock cycles is the blocking?
-//
-// [Your answer here]
-//
-// Finally continue out of the closure.
-//
-// (gdb) c
-// timing_resources::idle (cx=...) at examples/timing_resources.rs:40
-// 40              asm::bkpt();
-//
-// (gdb) x 0xe0001004
-//
-// [Your answer here]
-//
-// This is the total execution time of.
-//
-// - pending a task `exti` for execution
-// - preempt `idle`
-// - inside `exti` safely access and update a shared (non atomic resource).
-// - returning to `idle`
-// - safely access and update a shared (non atomic) resource
-//
-// Notice here, the breakpoints infer some OH and may disable
-// some potential LLVM optimizations.
-//
-//