Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
Loading items

Target

Select target project
  • pln/e7020e_2020
  • 97gushan/e7020e_2020
  • markhakansson/e7020e_2020
  • Grumme2/e7020e_2020
  • Hammarkvast/e7020e_2020
5 results
Select Git revision
Loading items
Show changes
Commits on Source (10)
......@@ -5,7 +5,7 @@ authors = ["Per Lindgren <per.lindgren@ltu.se>"]
description = "Example project (app)"
keywords = ["arm", "cortex-m", "rtfm", "e7020e"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/korken89/trustflight_firmware"
repository = "https://gitlab.henriktjader.com/pln/e7020e_2020"
version = "0.1.0"
edition = "2018"
......@@ -17,6 +17,7 @@ cortex-m-semihosting = "0.3.5"
aligned = "0.3.2"
ufmt = "0.1.0"
nb = "0.1.2"
heapless = "0.5.3"
[dependencies.cortex-m]
version = "0.6.2"
......@@ -62,9 +63,24 @@ name = "serial"
required-features = ["stm32f4xx-hal"]
[[example]]
name = "rtfm_itm"
name = "bare7"
required-features = ["stm32f4xx-hal"]
[[example]]
name = "bare8"
required-features = ["rtfm"]
[[example]]
name = "bare9"
required-features = ["rtfm"]
[[example]]
name = "bare10"
required-features = ["rtfm"]
[[example]]
name = "rtfm_itm"
required-features = ["rtfm"]
[[example]]
name = "rtfm_itm_spawn"
required-features = ["rtfm"]
......
//! The RTFM framework
//!
//! What it covers:
//! - Priority based scheduling
//! - Message passing
#![no_main]
#![no_std]
extern crate panic_halt;
use cortex_m::{asm, iprintln};
extern crate stm32f4xx_hal as hal;
use crate::hal::prelude::*;
use crate::hal::serial::{config::Config, Event, Rx, Serial, Tx};
use hal::stm32::ITM;
use nb::block;
use rtfm::app;
// Our error type
#[derive(Debug)]
pub enum Error {
RingBufferOverflow,
UsartSendOverflow,
UsartReceiveOverflow,
}
#[app(device = hal::stm32, peripherals = true)]
const APP: () = {
struct Resources {
// Late resources
TX: Tx<hal::stm32::USART2>,
RX: Rx<hal::stm32::USART2>,
ITM: ITM,
}
// init runs in an interrupt free section>
#[init]
fn init(cx: init::Context) -> init::LateResources {
let mut core = cx.core;
let device = cx.device;
let stim = &mut core.ITM.stim[0];
iprintln!(stim, "bare10");
let rcc = device.RCC.constrain();
// 16 MHz (default, all clocks)
let clocks = rcc.cfgr.freeze();
let gpioa = device.GPIOA.split();
let tx = gpioa.pa2.into_alternate_af7();
let rx = gpioa.pa3.into_alternate_af7(); // try comment out
let mut serial = Serial::usart2(
device.USART2,
(tx, rx),
Config::default().baudrate(115_200.bps()),
clocks,
)
.unwrap();
// generate interrupt on Rxne
serial.listen(Event::Rxne);
// Separate out the sender and receiver of the serial port
let (tx, rx) = serial.split();
// Late resources
init::LateResources {
// Our split serial
TX: tx,
RX: rx,
// For debugging
ITM: core.ITM,
}
}
// idle may be interrupted by other interrupt/tasks in the system
#[idle]
fn idle(_cx: idle::Context) -> ! {
loop {
asm::wfi();
}
}
#[task(priority = 1, resources = [ITM])]
fn trace_data(cx: trace_data::Context, byte: u8) {
let stim = &mut cx.resources.ITM.stim[0];
iprintln!(stim, "data {}", byte);
// for _ in 0..10000 {
// asm::nop();
// }
}
#[task(priority = 1, resources = [ITM])]
fn trace_error(cx: trace_error::Context, error: Error) {
let stim = &mut cx.resources.ITM.stim[0];
iprintln!(stim, "{:?}", error);
}
#[task(priority = 2, resources = [TX], spawn = [trace_error])]
fn echo(cx: echo::Context, byte: u8) {
let tx = cx.resources.TX;
if block!(tx.write(byte)).is_err() {
let _ = cx.spawn.trace_error(Error::UsartSendOverflow);
}
}
#[task(binds = USART2, priority = 3, resources = [RX], spawn = [trace_data, trace_error, echo])]
fn usart2(cx: usart2::Context) {
let rx = cx.resources.RX;
match rx.read() {
Ok(byte) => {
let _ = cx.spawn.echo(byte);
if cx.spawn.trace_data(byte).is_err() {
let _ = cx.spawn.trace_error(Error::RingBufferOverflow);
}
}
Err(_err) => {
let _ = cx.spawn.trace_error(Error::UsartReceiveOverflow);
}
}
}
// Set of interrupt vectors, free to use for RTFM tasks
// 1 per priority level suffices
extern "C" {
fn EXTI0();
fn EXTI1();
}
};
// Optional
// 0. Compile and run the project at 16MHz in release mode
// make sure its running (not paused).
//
// > cargo build --example bare10 --features "rtfm" --release
// (or use the vscode build task)
//
// Connect a terminal program.
// Verify that it works as bare9.
//
// 1. Now, comment out the loop in `trace_data`.
// The loop is just there to simulate some workload...
//
// Try now to send a sequence `abcd`
//
// Did you loose any data (was the data correctly echoed)?
//
// ** your answer here **
//
// Was the data correctly traced over the ITM?
//
// ** your answer here **
//
// Why did you loose trace information?
//
// ** your answer here **
//
// Commit your answers (bare10_1)
//
// 2. Read the RTFM manual (book).
// Figure out a way to accomodate for 4 outstanding messages to the `trace_data` task.
//
// Verify that you can now correctly trace sequences of 4 characters sent.
//
// Can you think of how to determine a safe bound on the message buffer size?
// (Safe meaning, that message would never be lost due to the buffer being full.)
//
// What information would you need?
//
// ** your answer here **
//
// Commit your answers (bare10_2)
//
// 3. Implement a command line interpreter as a new task.
// It should:
// - have priority 1.
// - take a byte as an argument (passed from the USART2 interrupt).
// - have a local buffer B of 10 characters
// - have sufficient capacity to receive 10 characters sent in a sequence
// - analyse the input buffer B checking the commands
// set <int> <RETURN> // to set blinking frequency
// on <RETURN> // to enable the led blinking
// of <RETURN> // to disable the led blinking
//
// <int> should be decoded to an integer value T, and <RETURN> accept either <CR> or <LF>.
//
// The set value should blink the LED in according the set value in Hertz,
// (so `set 1 <RETURN>` should blink with 1Hz)
//
// Tips:
// Create two tasks, (`on', that turns the led on, and a task `off` that turns the led off).
// `on` calls `off` with a timer offset (check the RTFM manual).
// `off` calls `on` with a timer offset.
//
// The timing offset can implemented as a shared resource T between the command line interpreter and
// the 'on/off ' tasks. From `init` you can give an initial timing offset T, and send an
// initial message to `on` triggering the periodic behavior.
//
// The 'on/off ' tasks can have a high priority 4, and use locking (in the low priority task)
// parsing the input. This way, the led will have a very low jitter.
//
// (You can even use an atomic data structure, which allows for lock free access.)
//
//
// The on/off is easiest implemented by having another shared variable used as a condition
// for the `on` task to set the GPIO. Other solutions could be to stop the sequence (i.e.)
// conditionally call the `off` task instead. Then the command interpreted would
// trigger a new sequence when an "on" command is detected. (Should you allow multiple, overlapping)
// sequences? Why not, could be cool ;)
// The main advantage of the stopping approach is that the system will be truly idle
// if not blinking, and thus more power efficient.
//
// You can reuse the code for setting up and controlling the GPIO/led.
//
// You can come up with various extensions to this application, setting the
// the duty cycle (on/off ratio in %), etc.
//
// Commit your solution (bare10_3)
......@@ -40,6 +40,7 @@ fn main() -> ! {
let tx = gpioa.pa2.into_alternate_af7();
let rx = gpioa.pa3.into_alternate_af7(); // try comment out
// let rx = gpioa.pa3.into_alternate_af6(); // try uncomment
let serial = Serial::usart2(
......
//! bare8.rs
//!
//! The RTFM framework
//!
//! What it covers:
//! - utilizing the RTFM framework for serial communication
//! - singletons (entities with a singe instance)
//! - owned resources
//! - peripheral access in RTFM
//! - polling in `idle`
#![no_main]
#![no_std]
extern crate panic_halt;
use cortex_m::iprintln;
use nb::block;
extern crate stm32f4xx_hal as hal;
use crate::hal::prelude::*;
use crate::hal::serial::{config::Config, Rx, Serial, Tx};
use hal::stm32::{ITM, USART2};
use rtfm::app;
#[app(device = hal::stm32, peripherals = true)]
const APP: () = {
struct Resources {
// Late resources
TX: Tx<USART2>,
RX: Rx<USART2>,
ITM: ITM,
}
// init runs in an interrupt free section
#[init]
fn init(cx: init::Context) -> init::LateResources {
let mut core = cx.core;
let device = cx.device;
let stim = &mut core.ITM.stim[0];
iprintln!(stim, "bare8");
let rcc = device.RCC.constrain();
// 16 MHz (default, all clocks)
let clocks = rcc.cfgr.freeze();
let gpioa = device.GPIOA.split();
let tx = gpioa.pa2.into_alternate_af7();
let rx = gpioa.pa3.into_alternate_af7();
let serial = Serial::usart2(
device.USART2,
(tx, rx),
Config::default().baudrate(115_200.bps()),
clocks,
)
.unwrap();
// Separate out the sender and receiver of the serial port
let (tx, rx) = serial.split();
// Late resources
init::LateResources {
TX: tx,
RX: rx,
ITM: core.ITM,
}
}
// idle may be interrupted by other interrupts/tasks in the system
#[idle(resources = [RX, TX, ITM])]
fn idle(cx: idle::Context) -> ! {
let rx = cx.resources.RX;
let tx = cx.resources.TX;
let stim = &mut cx.resources.ITM.stim[0];
loop {
match block!(rx.read()) {
Ok(byte) => {
iprintln!(stim, "Ok {:?}", byte);
tx.write(byte).unwrap();
}
Err(err) => {
iprintln!(stim, "Error {:?}", err);
}
}
}
}
};
// Optional assignment
// 0. Compile and run the example. Notice, we use the default 16MHz clock.
//
// > cargo build --example bare8 --features "rtfm"
// (or use the vscode build task)
//
// 1. What is the behavior in comparison to bare7.4 and bare7.5
//
// ** your answer here **
//
// Commit your answer (bare8_1)
//
// 2. Add a local variable `received` that counts the number of bytes received.
// Add a local variable `errors` that counts the number of errors.
//
// Adjust the ITM trace to include the additional information.
//
// Commit your development (bare8_2)
//
// 3. The added tracing, how did that effect the performance,
// (are you know loosing more data)?
//
// ** your answer here **
//
// Commit your answer (bare8_3)
//
// 4. *Optional
// Compile and run the program in release mode.
// If using vscode, look at the `.vscode` folder `task.json` and `launch.json`,
// you may need to add a new "profile" (a bit of copy paste).
//
// How did the optimized build compare to the debug build (performance/lost bytes)
//
// ** your answer here **
//
// Commit your answer (bare8_4)
//! bare9.rs
//!
//! Heapless
//!
//! What it covers:
//! - Heapless Ringbuffer
//! - Heapless Producer/Consumer lockfree data access
//! - Interrupt driven I/O
//!
#![no_main]
#![no_std]
extern crate panic_halt;
use cortex_m::{asm, iprintln};
extern crate stm32f4xx_hal as hal;
use crate::hal::prelude::*;
use crate::hal::serial::{config::Config, Event, Rx, Serial, Tx};
use hal::stm32::ITM;
use heapless::consts::*;
use heapless::spsc::{Consumer, Producer, Queue};
use nb::block;
use rtfm::app;
#[app(device = hal::stm32, peripherals = true)]
const APP: () = {
struct Resources {
// Late resources
TX: Tx<hal::stm32::USART2>,
RX: Rx<hal::stm32::USART2>,
PRODUCER: Producer<'static, u8, U3>,
CONSUMER: Consumer<'static, u8, U3>,
ITM: ITM,
// An initialized resource
#[init(None)]
RB: Option<Queue<u8, U3>>,
}
// init runs in an interrupt free section
#[init(resources = [RB])]
fn init(cx: init::Context) -> init::LateResources {
let mut core = cx.core;
let device = cx.device;
// A ring buffer for our data
*cx.resources.RB = Some(Queue::new());
// Split into producer/consumer pair
let (producer, consumer) = cx.resources.RB.as_mut().unwrap().split();
let stim = &mut core.ITM.stim[0];
iprintln!(stim, "bare9");
let rcc = device.RCC.constrain();
// 16 MHz (default, all clocks)
let clocks = rcc.cfgr.freeze();
let gpioa = device.GPIOA.split();
let tx = gpioa.pa2.into_alternate_af7();
let rx = gpioa.pa3.into_alternate_af7();
let mut serial = Serial::usart2(
device.USART2,
(tx, rx),
Config::default().baudrate(115_200.bps()),
clocks,
)
.unwrap();
// generate interrupt on Rxne
serial.listen(Event::Rxne);
// Separate out the sender and receiver of the serial port
let (tx, rx) = serial.split();
// Late resources
init::LateResources {
// Our split queue
PRODUCER: producer,
CONSUMER: consumer,
// Our split serial
TX: tx,
RX: rx,
// For debugging
ITM: core.ITM,
}
}
// idle may be interrupted by other interrupt/tasks in the system
#[idle(resources = [ITM, CONSUMER])]
fn idle(cx: idle::Context) -> ! {
let stim = &mut cx.resources.ITM.stim[0];
loop {
while let Some(byte) = cx.resources.CONSUMER.dequeue() {
iprintln!(stim, "data {}", byte);
}
iprintln!(stim, "goto sleep");
asm::wfi();
iprintln!(stim, "woken..");
}
}
// task run on USART2 interrupt (set to fire for each byte received)
#[task(binds = USART2, resources = [RX, TX, PRODUCER])]
fn usart2(cx: usart2::Context) {
let rx = cx.resources.RX;
let tx = cx.resources.TX;
// at this point we know there must be a byte to read
match rx.read() {
Ok(byte) => {
tx.write(byte).unwrap();
match cx.resources.PRODUCER.enqueue(byte) {
Ok(_) => {}
Err(_) => asm::bkpt(),
}
}
Err(_err) => asm::bkpt(),
}
}
};
// Optional
// 0. Compile and run the project at 16MHz in release mode
// make sure its running (not paused).
//
// > cargo build --example bare9 --features "rtfm" --release
// (or use the vscode build task)
//
// 1. Start a terminal program, connect with 15200 8N1
//
// You should now be able to send data and receive an echo from the MCU
//
// Try sending: "abcd" as a single sequence (set the option No end in moserial),
// don't send the quotation marks, just abcd.
//
// What did you receive, and what was the output of the ITM trace.
//
// ** your answer here **
//
// Did you experience any over-run errors?
//
// ** your answer here **
//
// Why does it behave differently than bare7/bare8?
//
// ** your answer here **
//
// Commit your answers (bare9_1)
//
// 2. Compile and run the project at 16MHz in debug mode.
//
// > cargo build --example bare9 --features "hal rtfm"
// (or use the vscode build task)
//
// Try sending: "abcd" as a single sequence (set the option No end in moserial),
// don't send the quotation marks, just abcd.
//
// What did you receive, and what was the output of the ITM trace.
//
// ** your answer here **
//
// Did you experience any over-run errors?
//
// ** your answer here **
//
// Why does it behave differently than in release mode?
// Recall how the execution overhead changed with optimization level.
//
// ** your answer here **
//
// Commit your answers (bare9_2)
//
// Discussion:
//
// The concurrency model behind RTFM offers
// 1. Race-free resource access
//
// 2. Deadlock-free execution
//
// 3. Shared execution stack (no pre-allocated stack regions)
//
// 4. Bound priority inversion
//
// 5. Theoretical underpinning ->
// + (pen and paper) proofs of soundness
// + schedulability analysis
// + response time analysis
// + stack memory analysis
// + ... leverages on >25 years of research in the real-time community
// based on the seminal work of Baker in the early 1990s
// (known as the Stack Resource Policy, SRP)
//
// Our implementation in Rust offers
// 1. compile check and analysis of tasks and resources
// + the API implementation together with the Rust compiler will ensure that
// both RTFM (SRP) soundness and the Rust memory model invariants
// are upheld (under all circumstances).
//
// 2. arguably the worlds fastest real time scheduler *
// + task invocation 0-cycle OH on top of HW interrupt handling
// + 2 cycle OH for locking a shared resource (on lock/claim entry)
// + 1 cycle OH for releasing a shared resource (on lock/claim exit)
//
// 3. arguably the worlds most memory efficient scheduler *
// + 1 byte stack memory OH for each (nested) lock/claim
// (no additional book-keeping during run-time)
//
// * applies to static task/resource models with single core
// pre-emptive, static priority scheduling
//
// In comparison "real-time" schedulers for threaded models (like FreeRTOS)
// - CPU and memory OH magnitudes larger
// - ... and what's worse OH is typically unbound (no proofs of worst case)
// And additionally threaded models typically imposes
// - potential race conditions (up to the user to verify)
// - potential dead-locks (up to the implementation)
// - potential unbound priority inversion (up to the implementation)
//
// However, Rust RTFM (currently) target ONLY STATIC SYSTEMS,
// there is no notion of dynamically creating new executions contexts/threads
// so a direct comparison is not completely fair.
//
// On the other hand, embedded applications are typically static by nature
// so a STATIC model is to that end better suitable.
//
// RTFM is reactive by nature, a task execute to end, triggered
// by an internal or external event, (where an interrupt is an external event
// from the environment, like a HW peripheral such as the USART2).
//
// Threads on the other hand are concurrent and infinite by nature and
// actively blocking/yielding awaiting stischedulers
// The scheduler then needs to keep track of all threads and at some point choose
// to dispatch the awaiting thread. So reactivity is bottle-necked to the point
// of scheduling by queue management, context switching and other additional
// book keeping.
//
// In essence, the thread scheduler tries to re-establish the reactivity that
// were there from the beginning (interrupts), a battle that cannot be won...