diff --git a/examples/timing_exam.rs b/examples/timing_exam.rs index b4f7c5821ecf456e281e70bb96bd74e19bca8cb1..b7c03573ed610ba8d3e838159468a2ba20ab9245 100644 --- a/examples/timing_exam.rs +++ b/examples/timing_exam.rs @@ -7,8 +7,15 @@ use cortex_m::{asm, peripheral::DWT}; use panic_halt as _; +use rtic::cyccnt::{Duration, Instant, U32Ext}; use stm32f4::stm32f411; -use rtic::cyccnt::{Instant, Duration, U32Ext}; + +#[no_mangle] +static mut T1_MAX_RP: u32 = 0; +#[no_mangle] +static mut T2_MAX_RP: u32 = 0; +#[no_mangle] +static mut T3_MAX_RP: u32 = 0; #[rtic::app(device = stm32f411, monotonic = rtic::cyccnt::CYCCNT)] const APP: () = { @@ -24,36 +31,55 @@ const APP: () = { // Initialize (enable) the monotonic timer (CYCCNT) cx.core.DCB.enable_trace(); cx.core.DWT.enable_cycle_counter(); - // cx.schedule.t1(cx.start).unwrap(); - // cx.schedule.t2(cx.start).unwrap(); - // cx.schedule.t3(cx.start).unwrap(); + cx.schedule.t1(cx.start + 100_000.cycles()).unwrap(); + cx.schedule.t2(cx.start + 200_000.cycles()).unwrap(); + cx.schedule.t3(cx.start + 50_000.cycles()).unwrap(); } // Deadline 100, Inter-arrival 100 + #[inline(never)] #[task(schedule = [t1], priority = 1)] fn t1(cx: t1::Context) { - // 1) your code here to emulate timing behavior of t1 - - // 2) your code here to check for overrun + asm::bkpt(); cx.schedule.t1(cx.scheduled + 100_000.cycles()).unwrap(); + asm::bkpt(); + + // emulates timing behavior of t1 + cortex_m::asm::delay(10_000); + asm::bkpt(); + + // 2) your code here to update T1_MAX_RP and + // break if deadline missed } // Deadline 200, Inter-arrival 200 + #[inline(never)] #[task(schedule = [t2], resources = [R1, R2], priority = 2)] fn t2(cx: t2::Context) { + asm::bkpt(); + cx.schedule.t2(cx.scheduled + 200_000.cycles()).unwrap(); + asm::bkpt(); + // 1) your code here to emulate timing behavior of t2 + asm::bkpt(); - // 2) your code here to check for overrun - cx.schedule.t2(cx.scheduled + 200_000.cycles()).unwrap(); + // 2) your code here to update T2_MAX_RP and + // break if deadline missed } // Deadline 50, Inter-arrival 50 + #[inline(never)] #[task(schedule = [t3], resources = [R2], priority = 3)] fn t3(cx: t3::Context) { + asm::bkpt(); + cx.schedule.t3(cx.scheduled + 50_000.cycles()).unwrap(); + asm::bkpt(); + // 1) your code here to emulate timing behavior of t3 + asm::bkpt(); - // 2) your code here to check for overrun - cx.schedule.t3(cx.scheduled + 50_000.cycles()).unwrap(); + // 2) your code here to update T3_MAX_RP and + // break if deadline missed } // RTIC requires that unused interrupts are declared in an extern block when @@ -66,34 +92,206 @@ const APP: () = { } }; -fn delay_duration(from: Instant, until: Duration) { - // implement a delay that busy waits for a Duration of time - // Use `cargo doc` to generate documentation to lookup `Duration` - // and `Instance` and corresponding operations and conversions. - // - // In particular, the `elapsed` is useful. - // Notice you can compare durations. -} - -// 1) For this assignment you should first generate a task set that -// matches the example task set from `klee_tutorial/srp_analysis/main.rs`. +// !!!! NOTICE !!!! +// +// Use either vscode with the `Cortex Nightly` launch profile, +// or compile with the feature `--features nightly` in order to +// get inlined assembly! // -// The task set should have the same relative timing properties as given in `main.rs`. +// 1) For this assignment you should first generate a task set that +// matches the example task set from `klee_tutorial/srp_analysis/main.rs`. // // Assume that each time unit amounts to 1_000 clock cycles, then // the execution time of `t1` should be 10_000 clock cycles. // -// To emulate corresponding workload you should implement `delay_duration` -// and use that to get the relative timings. -// -// So, instead of measuring execution time of an existing application, you are to create -// one with given timing properties. +// So, instead of measuring execution time of an existing application, +// you are to create a task set according to given timing properties. +// +// Do this naively, by just calling `asm::delay(x)`, where x +// amounts to the number of clock cycles to spend. +// +// Commit your repository once your task set is implemented. +// +// 2) Code instrumentation: +// Now its time to see if your scheduling analysis is accurate +// in comparison to a real running system. +// +// First explain in your own words how the `Instance` is +// used to generate a periodic task instance arrivals. +// +// `cx.schedule.t1(cx.scheduled + 100_000.cycles()).unwrap();` +// +// [Your answer here] +// +// Explain in your own words the difference between: +// +// `cx.schedule.t1(Instance::now() + 100_000.cycles()).unwrap();` +// and +// `cx.schedule.t1(cx.scheduled + 100_000.cycles()).unwrap();` +// +// [Your answer here] +// +// Explain in your own words why we use the latter +// in order to generate a periodic task. +// +// [Your answer here] +// +// Hint, look at https://rtic.rs/0.5/book/en/by-example/timer-queue.html +// +// Once you understand how `Instance` is used, document your crate: +// > cargo doc --open +// +// Once you have the documentation open, search for `Instance` +// Hint, you can search docs by pressing S. +// +// Now figure out how to calculate the actual response time. +// If the new response time is larger than the stored response time +// then update it (`T1_MAX_RP`, `T2_MAX_RP`, `T3_MAX_RP` respectively). +// If the response time is larger than the deadline, you should +// hit a `asm::bkpt()`, to indicate that an error occurred. +// +// You will need `unsafe` code to access the global variables. +// +// Explain why this is needed (there is a good reason for it). +// +// [Your answer here] +// +// Implement this functionality for all tasks. +// +// Commit your repository once you are done with the instrumentation. +// +// 3) Code Testing: +// +// Once the instrumentation code is in place, its finally time +// to test/probe/validate the system. +// +// Make sure that all tasks is initially scheduled from `init`. +// +// You can put WATCHES in vscode for the symbols +// WATCH +// `T1_MAX_RP` +// `T2_MAX_RP` +// `T3_MAX_RP` +// To see them being updated during the test. +// +// The first breakpoint hit should be: +// fn t3(cx: t3::Context) { +// asm::bkpt(); +// +// Check the value of the CYCCNT register. +// (In vscode look under CORTEX PERIPHERALS > DWT > CYCCNT) +// +// Your values may differ slightly but should be in the same +// territory (if not, check your task implementation(s).) +// +// Task Entry Times, Task Nr, Response time Update +// 50240 t3 - +// 30362 +// 100295 t3 +// 30426 +// +// 130595 t1 +// +// At this point we can ask ourselves a number of +// interesting questions. Try answering in your own words. +// +// 3A) Why is there an offset 50240 (instead of 50000)? +// +// [Your answer here] +// +// 3B) Why is the calculated response time larger than the +// delays you inserted to simulate workload? +// +// [Your answer here] +// +// 3C) Why is the second arrival of `t3` further delayed? +// +// [Your answer here] +// Hint, think about what happens at time 100_000, what tasks +// are set to `arrive` at that point compared to time 50_000. +// +// 3D) What is the scheduled time for task `t1` (130595 is the +// measured time according to CYCYCNT). +// +// [Your answer here] +// +// Why is the measured value much higher than the scheduled time? +// +// [Your answer here] +// +// Now you can continue until you get a first update of `T1_MAX_RP`. +// +// What is the first update of `T1_MAX_RP`? +// +// [Your answer here] +// +// Explain the obtained value in terms of: +// Execution time, blocking and preemptions +// (that occurred for this task instance). +// +// [Your answer here] +// +// Now continue until you get a first timing measurement for `T2_MAX_RP`. +// +// What is the first update of `T2_MAX_RP`? +// +// [Your answer here] +// +// Now continue until you get a second timing measurement for `T1_MAX_RP`. +// +// What is the second update of `T3_MAX_RP`? +// +// [Your answer here] +// +// Now you should have ended up in a deadline miss right!!!! +// +// Why did this happen? +// +// [Your answer here] +// +// Compare that to the result obtained from your analysis tool. +// +// Do they differ, if so why? +// +// [Your answer here] +// +// Commit your repository once you completed this part. +// +// 4) Delay tuning. +// +// So there were some discrepancy between the timing properties +// introduced by the `delay::asm` and the real measurements. +// +// Adjust delays to compensate for the OH to make it fit to +// to the theoretical task set. +// +// In order to do so test each task individually, schedule ony one +// task from `init` at a time. +// +// You may need to insert additional breakpoints to tune the timing. +// +// Once you are convinced that each task now adheres to +// the timing specification you can re-run part 3. +// +// If some task still misses its deadline go back and adjust +// the timing until it just passes. +// +// Commit your tuned task set. +// +// 5) Final remarks and learning outcomes. +// +// This exercise is of course a bit contrived, in the normal case +// you would start out with a real task set and then pass it +// onto analysis. +// +// Essay question: +// +// Reflect in your own words on: // -// To verify that you have implemented the tasks correctly you should trigger them -// one at a time, put breakpoints at each point of interest and check the CYCCNT manually. +// - RTIC and scheduling overhead +// - Coupling in between theoretical model and measurements +// - How would an ideal tool for static analysis of RTIC models look like. // -// (Verify the timing properties for each task separately.) -// -// Commit your repository once you have done all validation. +// [Your ideas and reflections here] // -// 2) \ No newline at end of file +// Commit your thoughts, we will discuss further when we meet.