mod common; use common::*; fn main() { let t1 = Task { id: "T1".to_string(), prio: 1, deadline: 100, inter_arrival: 100, trace: Trace { id: "T1".to_string(), start: 0, end: 10, inner: vec![], }, }; let t2 = Task { id: "T2".to_string(), prio: 2, deadline: 200, inter_arrival: 200, trace: Trace { id: "T2".to_string(), start: 0, end: 30, inner: vec![ Trace { id: "R1".to_string(), start: 10, end: 20, inner: vec![Trace { id: "R2".to_string(), start: 12, end: 16, inner: vec![], }], }, Trace { id: "R1".to_string(), start: 22, end: 28, inner: vec![], }, ], }, }; let t3 = Task { id: "T3".to_string(), prio: 3, deadline: 50, inter_arrival: 50, trace: Trace { id: "T3".to_string(), start: 0, end: 30, inner: vec![Trace { id: "R2".to_string(), start: 10, end: 20, inner: vec![], }], }, }; // builds a vector of tasks t1, t2, t3 let tasks: Tasks = vec![t1, t2, t3]; println!("tasks {:?}", &tasks); println!("tot_util {}", total_load_factor(&tasks)); let (ip, tr) = pre_analysis(&tasks); println!("ip: {:?}", ip); println!("tr: {:?}", tr); let analysis = analyse(&tasks, &ip, &tr); println!("Analysis {:#?}", analysis); } /* * Calculates the total load factor(Ltot). * Ltot * * Note: We can compute the total CPU request (or load factor), as Ltot = sum(L(T)), T being the set of tasks. */ fn total_load_factor(tasks: &Tasks) -> f32 { let mut ltot: f32 = 0.0; for t in tasks { ltot += cpu_load(t); } return ltot; } /* * Calculates the cpu load of a task(L(t) where t is a task). * L(t) * * Note: Each task t has a WCET C(t) and given (assumed) inter-arrival time A(t). The CPU request (or load) inferred by a task is L(t) = C(t)/A(t). Ask yourself, what is the consequence of C(t) > A(t)? * Answer: If C(t) > A(t) then the cpu load of task t is more then the available cpu power in the * worst case. And the task t will not be able to finish in time in the worst case. */ fn cpu_load(task: &Task) -> f32 { return (wcet(&task.trace) as f32) / (task.inter_arrival as f32) } /* * Worst case execution time(WCET) of a task t(C(t)). * C(t) */ fn wcet(trace: &Trace) -> u32 { return trace.end.wrapping_sub(trace.start); } /* * Calculates the response time of task(R(t)). * R(t) * * Note: * R(t) = B(t) + C(t) + I(t), where * - B(t) is the blocking time for task t, and * - I(t) is the interference (preemptions) to task t */ fn response_time(task: &Task, tasks: &Tasks, ip: &IdPrio, tr: &TaskResources) -> u32 { let r: u32 = block_time(task, tasks, ip, tr) + wcet(&task.trace) + interference_time(task, tasks); println!("response_time {:?}", r); return r; } /* * Calculates the blocking time for task t(B(t)). * B(t) * * Note: B(t) = max(C(l_r)), where P(l) < P(t), π(l_r) >= P(t) */ fn block_time(task: &Task, tasks: &Tasks, ip: &IdPrio, tr: &TaskResources) -> u32 { /* * Helper function for finding the trace of a resource using only the id an a trace. */ fn find_res_tr<'a>(res_id: &String, trace: &'a Trace) -> Option<&'a Trace> { if trace.id == *res_id { return Some(trace); } else { for tr in &trace.inner { match find_res_tr(res_id, tr) { Some(val) => return Some(val), None => (), } } } return None; } // Find all lower priority tasks let mut lower_prio: Vec<&Task> = vec!(); for t in tasks { if t.prio < task.prio { lower_prio.push(t); } } //println!("prio {:?}", task.prio); //println!("lower_prio {:?}", lower_prio); // Find all resources that will block the task(resources with a higher of equal priority) from // the resources that the lower priority tasks use. let mut block_res: Vec<&Trace> = vec!(); for lpt in lower_prio { match tr.get(&lpt.id) { Some(resources) =>{ for r in resources { if *ip.get(r).unwrap() >= task.prio { block_res.push(find_res_tr(r, &lpt.trace).unwrap()); } } }, None => (), }; } //println!("block_res {:?}", block_res); // Calculate the max wcet of the list of blocking resource traces. let mut block_time: u32 = 0; for tr in block_res { let wcet = wcet(tr); if wcet > block_time { block_time = wcet; } } println!("block time {:?}", block_time); return block_time; } /* * Calculates the interference (preemptions) to task t(I(t)). * I(t) * * Note: I(t) = sum(C(h) * ceiling(Bp(t)/A(h))), forall tasks h, P(h) > P(t), where * Bp(t) is the busy-period */ fn interference_time(task: &Task, tasks: &Tasks) -> u32 { let mut interference: u32 = 0; for t in tasks { if t.prio > task.prio { interference += wcet(&t.trace) * (((busy_period(t) as f32) / (t.inter_arrival as f32)).ceil() as u32); } } println!("interference_time {:?}", interference); return interference; } /* * Caclulates the busy period of task t(Bp(t)). * Bp(t) * * Note: We can over approximate the busy period Bp(i) = D(i) (assuming the worst allowed busy-period). * D(i) is the deadline of task i. */ fn busy_period(task: &Task) -> u32 { return task.deadline; } /* * Analyse tasks. * * Note: Finally, make a function that iterates over the task set and returns a vector with containing: Vec<Task, R(t), C(t), B(t), I(t)>. Just a simple println! of that vector gives the essential information on the analysis. */ fn analyse(tasks: &Tasks, ip: &IdPrio, tr: &TaskResources) -> Vec<(Task, u32, u32, u32, u32)> { let mut analysis: Vec<(Task, u32, u32, u32, u32)> = vec!(); for t in tasks { let r_t = response_time(t, tasks, ip, tr); let c_t = wcet(&t.trace); let b_t = block_time(t, tasks, ip, tr); let i_t = interference_time(t, tasks); analysis.push((t.clone(), r_t, c_t, b_t, i_t)); } return analysis; }