From 68ddbc22a96ddb3f4ce8be4a8c4e0dcc8c130332 Mon Sep 17 00:00:00 2001
From: Per <Per Lindgren>
Date: Mon, 19 Feb 2018 23:19:40 +0100
Subject: [PATCH] parse

---
 examples/parse.rs | 263 +++-------------------------------------------
 1 file changed, 17 insertions(+), 246 deletions(-)

diff --git a/examples/parse.rs b/examples/parse.rs
index 4bfccab..3e4ce51 100644
--- a/examples/parse.rs
+++ b/examples/parse.rs
@@ -1,6 +1,6 @@
-//! Serial interface loopback
+//! Example of parsing an &str
 #![deny(unsafe_code)]
-//#![deny(warnings)]
+#![deny(warnings)]
 #![feature(proc_macro)]
 #![no_std]
 
@@ -11,92 +11,14 @@ extern crate heapless;
 #[macro_use]
 extern crate cortex_m_debug;
 
-use f4::prelude::*;
-use f4::{clock, Serial};
-use f4::time::Hertz;
-use heapless::Vec;
-use rtfm::{app, Resource, Threshold};
-
-// CONFIGURATION
-const BAUD_RATE: Hertz = Hertz(115_200);
+use rtfm::app;
 
 // RTFM FRAMEWORK
 app! {
     device: f4::stm32f40x,
 
-    resources: {
-        static VECTOR: Vec<u8, [u8; 4]> = Vec::new();
-    },
-
-    tasks: {
-        USART2: {
-            path: rx,
-            priority: 2,
-            resources: [VECTOR, USART2],
-        },
-        EXTI1: {
-            path: trace,
-            priority: 1,
-            resources: [VECTOR],
-        }
-    },
-}
-
-// `rx` task trigger on arrival of a USART2 interrupt
-fn rx(t: &mut Threshold, r: USART2::Resources) {
-    let serial = Serial(&**r.USART2);
-
-    // we don't need to block waiting for data to arrive
-    // (as we were triggered) by the data arrival (or error)
-    match serial.read() {
-        Ok(byte) => {
-            // received byte correct
-            r.VECTOR.claim_mut(t, |vector, _| {
-                // critical section for the shared vector
-                let _ = vector.push(byte);
-                // here you could put your error handling for vector full
-            });
-            let _ = serial.write(byte);
-        }
-        Err(err) => {
-            // some transmission error
-            ipln!("Error {:?}", err);
-            r.USART2.dr.read(); // clear the error by reading the data register
-        }
-    }
-
-    // trigger the `trace` task
-    rtfm::set_pending(f4::stm32f40x::Interrupt::EXTI1);
-}
-
-// `trace` task triggered by the hight priority `rx` task
-// a low priority task for the background processing (like tracing)
-fn trace(t: &mut Threshold, r: EXTI1::Resources) {
-    let mut b = [0; 4]; // local buffer
-    let mut l = 0; // length of the received vector
-
-    r.VECTOR.claim(t, |vector, _| {
-        // critical section for the shared vector
-        // here the task `rx` will be blocked from executing
-        l = vector.len();
-        b[..l].copy_from_slice(&***vector); // efficent copy vector to the local buffer
-    });
-    // since we do the actual tracing (relatively slow)
-    // OUTSIDE the claim (critical section), there will be no
-    // additional blocking of `rx`
-    ipln!("Vec {:?}", &b[..l]);
-}
-
-macro_rules! scan {
-    ( $string:expr, $sep:expr, $( $x:ty ),+ ) => {{
-        let mut iter = $string.split($sep);
-        ($(iter.next().and_then(|word| word.parse::<$x>().ok()),)*)
-    }}
 }
 
-// [macro_use]
-// use core::fmt;
-
 #[derive(Debug)]
 enum Command {
     Start,
@@ -110,185 +32,34 @@ fn parse(s: &str) -> Result<Command, &str> {
     match iter.next() {
         Some("Stop") => Ok(Command::Stop),
         Some("Start") => Ok(Command::Start),
-        Some("Freq") => {
-
-            match iter.next() {
-                Some(fs) => {
-
-                    if let Ok(f) = fs.parse::<u32>() {
-                        Ok(Command::Freq(f))
-                    } else {
-                        Err("Invalid frequency")
-                    }
-
+        Some("Freq") => match iter.next() {
+            Some(fs) => {
+                if let Ok(f) = fs.parse::<u32>() {
+                    Ok(Command::Freq(f))
+                } else {
+                    Err("Invalid frequency")
                 }
-                None => Err("No frequency")
             }
-
-        }
-        Some(_) => {
-            Err("Invalid command")
+            None => Err("No frequency"),
         },
-        None => Err("No input")
-        //Err(format!("Invald Command : {:?}", s)),
+        Some(_) => Err("Invalid command"),
+        None => Err("No input"),
     }
 }
 
-// Here we see the typical use of init INITIALIZING the system
-fn init(p: init::Peripherals, _r: init::Resources) {
-    //clock::set_84_mhz(p.RCC, p.FLASH);
+fn init(_p: init::Peripherals) {
     ipln!("init");
-    ipln!("{:?}", parse("Start"));
+    ipln!("{:?}", parse("Start")); // works
     ipln!("{:?}", parse("  Start  ")); // works with white spaces
     ipln!("{:?}", parse("  Freq  122  ")); // works with white spaces
-    ipln!("{:?}", parse("  Freq  a122  ")); //
-    ipln!("{:?}", parse("  Freq    ")); //
-    ipln!("{:?}", parse("")); // error
-
-    let serial = Serial(p.USART2);
-
-    serial.init(BAUD_RATE.invert(), None, p.GPIOA, p.RCC);
-    // in effect telling the USART2 to trigger the `rx` task/interrupt
-    serial.listen(f4::serial::Event::Rxne);
-
-    // let s = scan!(" 55 123", |c| c == ' ', u32, u32);
-    // ipln!("{:?}", s);
-
-    // let b = char::is_whitespace;
-    // //let scanned = scan!("  55 123  ", core::char::is_whitespace, u32, u32);
-    // //ipln!("{:?}", scanned);
-
-    // let mut v: Vec<&str, [&str; 4]> = Vec::new();
-
-    // // collect into a vector of &str
-    // for i in s.split(" ") {
-    //     v.push(&i);
-    // }
-
-    // if let Some(command) = v.pop() {
-    //     ipln!("here {:?}", command);
-    //     match command {
-    //         "freq" => {
-    //             if let Some(freq) = v.pop() {
-    //                 if let Ok(f) = freq.parse::<i32>() {
-    //                     ipln!("freq {:?}", f);
-    //                 }
-    //             }
-    //         }
-    //         _ => {}
-    //     }
-    // }
+    ipln!("{:?}", parse("  Freq  a122  ")); // Invalid frequency
+    ipln!("{:?}", parse("  Freq    ")); // No frequency
+    ipln!("{:?}", parse("")); // No input
 }
 
-// We will spend all time sleeping (unless we have work to do)
-// reactive programming in RTFM ftw!!!
 fn idle() -> ! {
     // Sleep
     loop {
         rtfm::wfi();
     }
 }
-
-// 1. compile and run the project at 16MHz
-// make sure its running (not paused)
-// start a terminal program, e.g., `moserial`
-// connect to the port
-//
-// Device       /dev/ttyACM0
-// Baude Rate   115200
-// Data Bits    8
-// Stop Bits    1
-// Parity       None
-// Handshake    None
-//
-// (this is also known in short as 15200 8N1)
-//
-// you should now be able to send data and recive an echo from the MCU
-//
-// try sending: "abcd" as a single sequence (set the option No end in moserial)
-// (don't send the quation marks, just abcd)
-//
-// what did you receive, and what was the output of the ITM trace
-// ** your answer here **
-//
-// did you experience any over-run errors?
-// ** your answer here **
-//
-// what is the key problem and its solution (try to follow the commented code)
-// ** your answer here **
-//
-// commit your answers (bare8_1)
-//
-// 2. now catch the case when we are trying to write to a full vector/buffer
-// and write a suiteble error message
-//
-// commit your answers (bare8_2)
-//
-// as a side note....
-//
-// The concurrency model behind RTFM offers
-// 1. Race-free resource access
-//
-// 2. Deadlock-free exection
-//
-// 3. Shared execution stack (no pre-allocated stack regions)
-//
-// 4. Bound priority inversion
-//
-// 5. Theoretical underpinning ->
-//    + proofs of soundness
-//    + schedulability analysis
-//    + response time analysis
-//    + stack memory analysis
-//    + ... leverages on 25 years of reseach in the real-time community
-//      based on the seminal work of Baker in the early 1990s
-//      (known as the Stack Resource Policy, SRP)
-//
-// Our implementation in Rust offers
-// 1. compile check and analysis of tasks and resources
-//    + the API implementation together with the Rust compiler will ensure that
-//      both RTFM (SRP) soundness and the Rust memory model invariants
-//      are upheld (under all circumpstances).
-//
-// 2. arguably the worlds fastest real time scheduler *
-//    + task invocation 0-cycle OH on top of HW interrupt handling
-//    + 2 cycle OH for locking a shared resource (on claim entry)
-//    + 1 cycle OH for releasineg a shared resoure (on claim exit)
-//
-// 3. arguably the worlds most memory efficient scheduler *
-//    + 1 byte stack memory OH for each (nested) claim
-//      (no additional book-keeping during run-time)
-//
-//    * applies to static task/resource models with single core
-//      pre-emptive, static priority scheduling
-//
-// in comparison "real-time" schedulers for threaded models like FreeRTOS
-//    - CPU and memory OH magnitudes larger (100s of cycles/kilobytes of memory)
-//    - ... and what's worse OH is typically unbound (no proofs of worst case)
-//    - potential race conditions (up to the user to verify)
-//    - potential dead-locks (up to the implementation)
-//    - potential unbound priority inversion (up to the implementation)
-//
-// Rust RTFM (currently) target ONLY STATIC SYSTEMS, there is no notion
-// of dynamically creating new executions contexts/threads
-// so a direct comparison is not completely fair.
-//
-// On the other hand, embedded applications are typically static by nature
-// so a STATIC model is to that end better suitable.
-//
-// RTFM is reactive by nature, a task execute to end, triggered
-// by an internal or external event, (where an interrupt is an external event
-// from the environment, like a HW peripheral such as the USART2).
-//
-// Threads on the other hand are concurrent and infinte by nature and
-// actively blocking/yeilding awaiting stimuli. Hence reactivity needs to be CODED.
-// This leads to an anomaly, the underlying HW is reactive (interrupts),
-// requiring an interrupt handler, that creates a signal to the scheduler.
-//
-// The scheduler then needs to keep track of all threads and at some point choose
-// to dispatch the awaiting thread. So reactivity is bottlenecked to the point
-// of scheduling by que management, context switching and other additional
-// book keeping.
-//
-// In essence, the thread scheduler tries to re-establish the reactivity that
-// were there (interrupts), a battle that cannot be won...
-- 
GitLab