From a8b6f47bd08396bd1070bbe87eab268c413ca671 Mon Sep 17 00:00:00 2001 From: Per <Per Lindgren> Date: Thu, 16 Jan 2020 02:07:10 +0100 Subject: [PATCH] EXAM --- EXAM.md | 157 +++++++++++++++++++++++++++++++++++++ runner/src/bin/generate.rs | 76 ++++++++++++++++++ runner/src/common.rs | 72 +++++++++++++++++ 3 files changed, 305 insertions(+) create mode 100644 EXAM.md create mode 100644 runner/src/bin/generate.rs create mode 100644 runner/src/common.rs diff --git a/EXAM.md b/EXAM.md new file mode 100644 index 0000000..50cd424 --- /dev/null +++ b/EXAM.md @@ -0,0 +1,157 @@ +# Home Exam January 2020. + +## Grading: + +3) Implement the response time analysis, and overall scheduleability. + +4) Generate report on the alaysis results, this could be as a generated html (or xml and use same xml redering engine) or however you feel like results are best reported and visualized, as discussed in class. + +5) Integrate your analysis to the “trustit” framework (KLEE + automated test bed). The complete testbed will be provided later. + +## Procedure + +Start by reading 1, 2 and 3: + +1) [A Stack-Based Rersource Allocation Policy for Realtime Processes](https://www.math.unipd.it/~tullio/RTS/2009/Baker-1991.pdf), which refers to + +2) [Stack-Based Scheduling of Realtime Processes](https://link.springer.com/content/pdf/10.1007/BF00365393.pdf), journal publication based on technical report [3] of the 1991 paper. The underlying model is the same in both papers. + +3) [Rate Monotonic Analysis](http://www.di.unito.it/~bini/publications/2003BinButBut.pdf) , especially equation 3 is of interest to us. (It should be familiar for the real-time systems course you have taken previously.) + +## Prestentation + +Make a git repo of your solution(s) with documentation (README.md) sufficient to reproduce your results. + +Notify me (Telegram or mail) and we decide on time for individual presentation. 30 minutes should be sufficient. + +--- + +## Definitions + +A task `t` is defined by: + +- `P(t)` the priority of task `t` +- `D(t)` the deadline of taks `t` +- `A(t)` the inter-arrival of task `t` + +A resource `r` is defined by: + +- `π(r)` the highest priority of any task accessing `r` + +For SRP based analysis we assume task to perform a finite sequence of operations (run-to-end or run-to-completition sematintics). During execution, a task can claim a resources `Rj` in nested fashion, sequentially re-claim resources is allowed but NOT re-claiming an already held resource. + +E.g., a possible trace for a task can look like: + + `[t:...[r1:...[r2:...]...]...[r2:...]...]`, where `[r:...]` denotes a critical section of task `t` holding the resource `r`. In this case the task starts, and at some point claims `r1` and inside the critical section claims `r2` (nested claim), at some point it exits `r2`, exits `r1` and continues exectuting where it executes a critical section on `r2`, and then finally executes until completion. + +## Grade 3 + +Analysis: + +### 1. Total CPU utilization + +WCET for tasks and critical sections + +In general determining WCET is a rather tricky, in our case we adopt a measurement based technique, that spans all possible paths of the task. Tests triggering the execution paths are automatically generated by symbolic execution. To correctly take concurrency into account resource state is treated symbolically. Thus, for a critical section, the resource is given a fresh (new) symbolic value for each critical section. Inside the critical section we are ensured exclusive access (and thus the value can be constrained). The resource model can be further extended by contracts (as shown by the `assume_assert.rs` example). + +We model hardware (peripherals) as shared resources (shared by the environment), with *atomic* read/write/modify. Rationale, we must assume that the state of the hardware resources may be changed at any time, thus only *atomic* access can be allowed. + +For now, we just assume we have the WCETs information, in terms of `start` and `end` time-stamps (`u32`) for each section `[_: ... ]`. We represent that by the `Task` and `Trace` data structures in `common.rs`. + +### Total CPU request (or total load factor) + +Each task `t` has a WCET `C(t)` and inter-arrival time `A(t)`. The CPU request (or load) inferred by a task is `L(t)` = `C(t)`/`A(t)`. Ask yourself, what is the consequence of `C(t)` > `A(t)`? + +We can compute the total CPU request (or load factor), as `Ltot` = sum(`L(T)`), `T` being the set of tasks. + +Ask yourself, what is the consequence of `Ltot` > 1? + +Implement a function taking `Vec<Task>` and returning the load factor. + +### Response time (simple over-approximation) + +Under SRP response time can be computed by equation 7.22 in [Hard Real-Time Computing Systems]( +https://doc.lagout.org/science/0_Computer%20Science/2_Algorithms/Hard%20Real-Time%20Computing%20Systems_%20Predictable%20Scheduling%20Algorithms%20and%20Applications%20%283rd%20ed.%29%20%5BButtazzo%202011-09-15%5D.pdf). + +In general the response time is computed as. + +- `R(t)` = `C(t)` + `B(t)` + `I(t)`, where + - `B(t)` is the blocking time for task `t`, and + - `I(t)` is the interference (preemptions) to task `t` + +For a task set to be scheduleable under SRP we have two requirements: + +- `Ltot` < 1 +- `R(t)` < `D(t)`, for all tasks. (`R(t)` > `D(t)` implies a deadline miss.) + +#### Blocking + +SRP brings the outstanding property of single blocking. In words, a task `t` is blocked by the maximal critical section a task `l` with lower priority (`P(l)`< `P(t)`) holds a resource `l_r`, with a ceiling `π(l_r)` equal or higher than the priority of `t`. + +- `B(t)` = max(`C(l_r)`), where `P(l)`< `P(t)`, `π(l_r) >= P(t)` + +Implement a function that takes a `Task` and returns the corresponding blocking time. + +#### Preemptions + +- `I(t)` = sum((`A(h)`/`Bp(t)`)*`C(h)`), forall tasks `h`, `P(h)` >= `P(t)`, where +- `Bp(t)` is the *busy-period* + +We can make the over approximation `Bp(i)` = `D(i)` (assuming the worst allowed *busy-period*). + +Implement a function that takes a `Task` and returns the corresponding preemption time. + +Now make a function that computes the response time for a `Task`, by combing `C(t)`, `B(t)` and `I(t)`. + +Finally, make a function that iterates over the task set and returns a vector with containing: +`Vec<Task, R(t), C(t), B(t), I(t)>`. Just a simple `println!` of that vector gives the essential information on the analysis. + +#### Preemptions revisited + +The *busy-period* is in `7.22` computed by a recurrence equation. + +Implement the requrrence equation starting from the base case `C(t)`. The requrrence might diverge in case the `Bp(t) > A(t)`, this is a pathological case, where the task becomes non-schedulable, in that case terminate the requrrence. You might want to indicate that a non feasible response time have been reached by using the `Result<u32, ())>` type or some other means e.g., (`Option<u32>`). + +You can let your `preemption` function take a parameter indicating if the exact solution or approximation should be used. + +## Grade 4 + +Here you can go wild, and use your creativity to present task set and results of analysis in the best informative manner. We will discuss some possible visualisations during class. + +## Grade 5 + +If you aim for the highest grade, let me know and I will hook you up with the current state of the development. The goal is to derive the task set characterisation by means of the automated test-bed, (test case generation + test runner based on the `probe.rs` library.) All the primitives are there, and re-implementing (back-porting) previous work based on `RTFM3` is mostly an engineering effort. + +--- + +## Resurces + +`common.rs` gives the basic data structures, and some helper functions. + +`generate.rs` gives an example on how `Tasks` can be manually constructed. This is vastly helpful for your development, when getting started. + +## Tips + +For workning with Rust, the standard library documentation is excellent, and easy to search (just press S). For most cases, you will find examples on inteded use, and cross referencing is just a click away. + +Use the `generate` example to get started. Initially you may simlify it further, reduce the number of tasks/and or resources. Make sure you understand the helper functions given in `common.rs`, (your code will likely look quite similar). You might want to add further `common` types and helper functions to streamline your development, along the way. + +Generate your own task sets to make sure your code works in the general case not only for the `Tasks` provided. Heads up, I will expose your code to some other more complex task sets. + +--- + +## Robust and Energy Efficient Real-Time Systems + +In this part of the course, we have covered. + +- Software robustness. We have adopted Rust and Symbolic Execution to achieve guarenteed memory safety and defined behavior (panic free execution). With this at hand, we have a strong (and theoretically underpinned) foundation for improved robustness and reliablity proven at compile time. + +- Real-Time Scheduling and Analysis. SRP provides an execution model and resource menagement policy with outstanding properties of race-and deadlock free execution, single blockning and stack sharing. Our Rust RTFM framework provides a correct by construction implementation of SRP, exploiting zero-cost (software) abstractions. Using Rust RTFM resource management and scheduling, is done by directly by the hardware, which allows for efficiency (zero OH) and predictability. + + The SRP model is amenable to static analysis, which you have now internalised through an actual implementation of the theoretical foundations. We have also covered methods for Worst Case Execution Time analysis by cycle accurate measurements, which in combination with Symbolic Exectution for test case generation allows for high degree of automation. + +- Energy Consumption is roughly proportional to the supply voltage (due to static leakage), and exponential to the frequency (due to dynamic/switching activity). In the case of embedded systems, low-power modes allows part of the system to be powered down while retaining sufficient functionality to wake on external (and/or internal) events. In sleep mode, both static and dynamic power dissipation is minimized typically to the order of uAmp (in comparison to mAmp in run mode). + + Rust RTFM adopts an event driven approach allowing the system to automatically sleep in case no further tasks are eligable for scheduling. Morover, leveraging on the zero-cost abstractions in Rust and the guarentees provided by the analysis framework, we do not need to sacrifice correnctness/robustness and reliability in order to obtain highly efficient executables. + +Robust and Energy Efficient Real-Time Systems for real, This is the Way! diff --git a/runner/src/bin/generate.rs b/runner/src/bin/generate.rs new file mode 100644 index 0000000..3c862ef --- /dev/null +++ b/runner/src/bin/generate.rs @@ -0,0 +1,76 @@ +// use std::collections::{HashMap, HashSet}; +use runner::common::*; + +fn main() { + let t1 = Task { + id: "T1".to_string(), + prio: 1, + deadline: 100, + inter_arrival: 100, + trace: Trace { + id: "T1".to_string(), + start: 0, + end: 10, + inner: vec![], + }, + }; + + let t2 = Task { + id: "T2".to_string(), + prio: 2, + deadline: 200, + inter_arrival: 200, + trace: Trace { + id: "T2".to_string(), + start: 0, + end: 30, + inner: vec![ + Trace { + id: "R1".to_string(), + start: 10, + end: 20, + inner: vec![Trace { + id: "R2".to_string(), + start: 12, + end: 16, + inner: vec![], + }], + }, + Trace { + id: "R1".to_string(), + start: 22, + end: 28, + inner: vec![], + }, + ], + }, + }; + + let t3 = Task { + id: "T3".to_string(), + prio: 3, + deadline: 50, + inter_arrival: 50, + trace: Trace { + id: "T3".to_string(), + start: 0, + end: 30, + inner: vec![Trace { + id: "R2".to_string(), + start: 10, + end: 20, + inner: vec![], + }], + }, + }; + + // builds a vector of tasks t1, t2, t3 + let tasks: Tasks = vec![t1, t2, t3]; + + println!("tasks {:?}", &tasks); + // println!("tot_util {}", tot_util(&tasks)); + + let (ip, tr) = pre_analysis(&tasks); + println!("ip: {:?}", ip); + println!("tr: {:?}", tr); +} diff --git a/runner/src/common.rs b/runner/src/common.rs new file mode 100644 index 0000000..e7ff2c5 --- /dev/null +++ b/runner/src/common.rs @@ -0,0 +1,72 @@ +use std::collections::{HashMap, HashSet}; + +// common data structures + +#[derive(Debug)] +pub struct Task { + pub id: String, + pub prio: u8, + pub deadline: u32, + pub inter_arrival: u32, + pub trace: Trace, +} + +//#[derive(Debug, Clone)] +#[derive(Debug)] +pub struct Trace { + pub id: String, + pub start: u32, + pub end: u32, + pub inner: Vec<Trace>, +} + +// uselful types + +// Our task set +pub type Tasks = Vec<Task>; + +// A map from Task/Resource identifiers to priority +pub type IdPrio = HashMap<String, u8>; + +// A map from Task identifiers to a set of Resource identifiers +pub type TaskResources = HashMap<String, HashSet<String>>; + +// Derives the above maps from a set of tasks +pub fn pre_analysis(tasks: &Tasks) -> (IdPrio, TaskResources) { + let mut ip = HashMap::new(); + let mut tr: TaskResources = HashMap::new(); + for t in tasks { + update_prio(t.prio, &t.trace, &mut ip); + for i in &t.trace.inner { + update_tr(t.id.clone(), i, &mut tr); + } + } + (ip, tr) +} + +// helper functions +fn update_prio(prio: u8, trace: &Trace, hm: &mut IdPrio) { + if let Some(old_prio) = hm.get(&trace.id) { + if prio > *old_prio { + hm.insert(trace.id.clone(), prio); + } + } else { + hm.insert(trace.id.clone(), prio); + } + for cs in &trace.inner { + update_prio(prio, cs, hm); + } +} + +fn update_tr(s: String, trace: &Trace, trmap: &mut TaskResources) { + if let Some(seen) = trmap.get_mut(&s) { + seen.insert(trace.id.clone()); + } else { + let mut hs = HashSet::new(); + hs.insert(trace.id.clone()); + trmap.insert(s.clone(), hs); + } + for trace in &trace.inner { + update_tr(s.clone(), trace, trmap); + } +} -- GitLab