Skip to content
Snippets Groups Projects
Commit 024527b4 authored by homunkulus's avatar homunkulus
Browse files

Auto merge of #69 - japaric:revise-api, r=japaric

revise peripheral API

This PR changes the signature of many of the high level methods available on peripherals like
`NVIC::get_priority`. Additionally some instance methods have been turned into static methods. The
following guidelines have been used to apply the changes:

- If the method body contains a single, atomic read operation with *no* side effects (e.g. the read
  operation clears one of the bits of the register): the signature changed to make the method
  static, i.e. `&self` was removed from the signature.

- If the method involves writing to or a RMW operation on a register: the signature changed to take
  the singleton by `&mut self` reference.

- If the method involves only read operations where at least one of them modifies the value
  of a register: the signature changed to take the singleton by `&mut self` reference.

The rationale for this last guideline is that using `&self`, instead of `&mut self`, lets the user
(unintentionally) break abstractions in the presence of generators. Example below:

``` rust
let peripherals = Peripherals::take().unwrap();
let syst = &peripherals.SYST;

// tasks
let mut a = || {
    loop {
        // yielding "busy wait"
        while !a.has_wrapped() {
            yield;
        }

        // do stuff
    }
};

let mut b = || {
    // ..

    // *NOTE* the problem is in the line below: this `is_counter_enabled` method reads the CSR
    // register and that read operation clears the COUNTFLAG bit of the register (if set), which is
    // the bit the `has_wrapped` method checks for.
    if syst.is_counter_enabled() {
        // ..
    }

    // ..
};
```

One more guideline was considered but the required conditions didn't apply to any of the existing
methods:

- If the method involves only non side effectful, non necessarily atomic read operations: the
  signature of the method should remain as `&self`.

The rationale for this guideline is that a static method (no `self` argument) wouldn't be
appropriate because that can result in a torn read if the read operation can be preempted by some
context that modifies the register.

In any case, this last guideline doesn't seem to apply well to the peripherals structs exposed by
this crate because they *deref* to a `RegisterBlock` that allows mutation through a `&self`
reference. When these two properties (the guideline and `Deref<Target=RegisterBlock>`) are mixed
the user can potentially break abstractions using generators (as shown in the `syst` example).

cc @hannobraun
closes #67
parents 34f66210 47623ceb
No related branches found
No related tags found
No related merge requests found
...@@ -22,8 +22,7 @@ pub enum Exception { ...@@ -22,8 +22,7 @@ pub enum Exception {
/// An interrupt /// An interrupt
Interrupt(u8), Interrupt(u8),
// Unreachable variant // Unreachable variant
#[doc(hidden)] #[doc(hidden)] Reserved,
Reserved,
} }
impl Exception { impl Exception {
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
use volatile_register::WO; use volatile_register::WO;
use peripheral::CBP;
/// Register block /// Register block
#[repr(C)] #[repr(C)]
pub struct RegisterBlock { pub struct RegisterBlock {
...@@ -33,10 +35,10 @@ const CBP_SW_WAY_MASK: u32 = 0x3 << CBP_SW_WAY_POS; ...@@ -33,10 +35,10 @@ const CBP_SW_WAY_MASK: u32 = 0x3 << CBP_SW_WAY_POS;
const CBP_SW_SET_POS: u32 = 5; const CBP_SW_SET_POS: u32 = 5;
const CBP_SW_SET_MASK: u32 = 0x1FF << CBP_SW_SET_POS; const CBP_SW_SET_MASK: u32 = 0x1FF << CBP_SW_SET_POS;
impl RegisterBlock { impl CBP {
/// I-cache invalidate all to PoU /// I-cache invalidate all to PoU
#[inline] #[inline]
pub fn iciallu(&self) { pub fn iciallu(&mut self) {
unsafe { unsafe {
self.iciallu.write(0); self.iciallu.write(0);
} }
...@@ -44,7 +46,7 @@ impl RegisterBlock { ...@@ -44,7 +46,7 @@ impl RegisterBlock {
/// I-cache invalidate by MVA to PoU /// I-cache invalidate by MVA to PoU
#[inline] #[inline]
pub fn icimvau(&self, mva: u32) { pub fn icimvau(&mut self, mva: u32) {
unsafe { unsafe {
self.icimvau.write(mva); self.icimvau.write(mva);
} }
...@@ -52,7 +54,7 @@ impl RegisterBlock { ...@@ -52,7 +54,7 @@ impl RegisterBlock {
/// D-cache invalidate by MVA to PoC /// D-cache invalidate by MVA to PoC
#[inline] #[inline]
pub fn dcimvac(&self, mva: u32) { pub fn dcimvac(&mut self, mva: u32) {
unsafe { unsafe {
self.dcimvac.write(mva); self.dcimvac.write(mva);
} }
...@@ -62,7 +64,7 @@ impl RegisterBlock { ...@@ -62,7 +64,7 @@ impl RegisterBlock {
/// ///
/// `set` is masked to be between 0 and 3, and `way` between 0 and 511. /// `set` is masked to be between 0 and 3, and `way` between 0 and 511.
#[inline] #[inline]
pub fn dcisw(&self, set: u16, way: u16) { pub fn dcisw(&mut self, set: u16, way: u16) {
// The ARMv7-M Architecture Reference Manual, as of Revision E.b, says these set/way // The ARMv7-M Architecture Reference Manual, as of Revision E.b, says these set/way
// operations have a register data format which depends on the implementation's // operations have a register data format which depends on the implementation's
// associativity and number of sets. Specifically the 'way' and 'set' fields have // associativity and number of sets. Specifically the 'way' and 'set' fields have
...@@ -82,7 +84,7 @@ impl RegisterBlock { ...@@ -82,7 +84,7 @@ impl RegisterBlock {
/// D-cache clean by MVA to PoU /// D-cache clean by MVA to PoU
#[inline] #[inline]
pub fn dccmvau(&self, mva: u32) { pub fn dccmvau(&mut self, mva: u32) {
unsafe { unsafe {
self.dccmvau.write(mva); self.dccmvau.write(mva);
} }
...@@ -90,7 +92,7 @@ impl RegisterBlock { ...@@ -90,7 +92,7 @@ impl RegisterBlock {
/// D-cache clean by MVA to PoC /// D-cache clean by MVA to PoC
#[inline] #[inline]
pub fn dccmvac(&self, mva: u32) { pub fn dccmvac(&mut self, mva: u32) {
unsafe { unsafe {
self.dccmvac.write(mva); self.dccmvac.write(mva);
} }
...@@ -100,7 +102,7 @@ impl RegisterBlock { ...@@ -100,7 +102,7 @@ impl RegisterBlock {
/// ///
/// `set` is masked to be between 0 and 3, and `way` between 0 and 511. /// `set` is masked to be between 0 and 3, and `way` between 0 and 511.
#[inline] #[inline]
pub fn dccsw(&self, set: u16, way: u16) { pub fn dccsw(&mut self, set: u16, way: u16) {
// See comment for dcisw() about the format here // See comment for dcisw() about the format here
unsafe { unsafe {
self.dccsw.write( self.dccsw.write(
...@@ -112,7 +114,7 @@ impl RegisterBlock { ...@@ -112,7 +114,7 @@ impl RegisterBlock {
/// D-cache clean and invalidate by MVA to PoC /// D-cache clean and invalidate by MVA to PoC
#[inline] #[inline]
pub fn dccimvac(&self, mva: u32) { pub fn dccimvac(&mut self, mva: u32) {
unsafe { unsafe {
self.dccimvac.write(mva); self.dccimvac.write(mva);
} }
...@@ -122,7 +124,7 @@ impl RegisterBlock { ...@@ -122,7 +124,7 @@ impl RegisterBlock {
/// ///
/// `set` is masked to be between 0 and 3, and `way` between 0 and 511. /// `set` is masked to be between 0 and 3, and `way` between 0 and 511.
#[inline] #[inline]
pub fn dccisw(&self, set: u16, way: u16) { pub fn dccisw(&mut self, set: u16, way: u16) {
// See comment for dcisw() about the format here // See comment for dcisw() about the format here
unsafe { unsafe {
self.dccisw.write( self.dccisw.write(
...@@ -134,7 +136,7 @@ impl RegisterBlock { ...@@ -134,7 +136,7 @@ impl RegisterBlock {
/// Branch predictor invalidate all /// Branch predictor invalidate all
#[inline] #[inline]
pub fn bpiall(&self) { pub fn bpiall(&mut self) {
unsafe { unsafe {
self.bpiall.write(0); self.bpiall.write(0);
} }
......
...@@ -4,6 +4,9 @@ use volatile_register::RO; ...@@ -4,6 +4,9 @@ use volatile_register::RO;
#[cfg(any(armv7m, test))] #[cfg(any(armv7m, test))]
use volatile_register::RW; use volatile_register::RW;
#[cfg(armv7m)]
use peripheral::CPUID;
/// Register block /// Register block
#[repr(C)] #[repr(C)]
pub struct RegisterBlock { pub struct RegisterBlock {
...@@ -45,14 +48,14 @@ pub enum CsselrCacheType { ...@@ -45,14 +48,14 @@ pub enum CsselrCacheType {
} }
#[cfg(armv7m)] #[cfg(armv7m)]
impl RegisterBlock { impl CPUID {
/// Selects the current CCSIDR /// Selects the current CCSIDR
/// ///
/// * `level`: the required cache level minus 1, e.g. 0 for L1, 1 for L2 /// * `level`: the required cache level minus 1, e.g. 0 for L1, 1 for L2
/// * `ind`: select instruction cache or data/unified cache /// * `ind`: select instruction cache or data/unified cache
/// ///
/// `level` is masked to be between 0 and 7. /// `level` is masked to be between 0 and 7.
pub fn select_cache(&self, level: u8, ind: CsselrCacheType) { pub fn select_cache(&mut self, level: u8, ind: CsselrCacheType) {
const CSSELR_IND_POS: u32 = 0; const CSSELR_IND_POS: u32 = 0;
const CSSELR_IND_MASK: u32 = 1 << CSSELR_IND_POS; const CSSELR_IND_MASK: u32 = 1 << CSSELR_IND_POS;
const CSSELR_LEVEL_POS: u32 = 1; const CSSELR_LEVEL_POS: u32 = 1;
...@@ -67,7 +70,7 @@ impl RegisterBlock { ...@@ -67,7 +70,7 @@ impl RegisterBlock {
} }
/// Returns the number of sets and ways in the selected cache /// Returns the number of sets and ways in the selected cache
pub fn cache_num_sets_ways(&self, level: u8, ind: CsselrCacheType) -> (u16, u16) { pub fn cache_num_sets_ways(&mut self, level: u8, ind: CsselrCacheType) -> (u16, u16) {
const CCSIDR_NUMSETS_POS: u32 = 13; const CCSIDR_NUMSETS_POS: u32 = 13;
const CCSIDR_NUMSETS_MASK: u32 = 0x7FFF << CCSIDR_NUMSETS_POS; const CCSIDR_NUMSETS_MASK: u32 = 0x7FFF << CCSIDR_NUMSETS_POS;
const CCSIDR_ASSOCIATIVITY_POS: u32 = 3; const CCSIDR_ASSOCIATIVITY_POS: u32 = 3;
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
use volatile_register::{RO, RW, WO}; use volatile_register::{RO, RW, WO};
use peripheral::DWT;
/// Register block /// Register block
#[repr(C)] #[repr(C)]
pub struct RegisterBlock { pub struct RegisterBlock {
...@@ -30,13 +32,6 @@ pub struct RegisterBlock { ...@@ -30,13 +32,6 @@ pub struct RegisterBlock {
pub lsr: RO<u32>, pub lsr: RO<u32>,
} }
impl RegisterBlock {
/// Enables the cycle counter
pub fn enable_cycle_counter(&self) {
unsafe { self.ctrl.modify(|r| r | 1) }
}
}
/// Comparator /// Comparator
#[repr(C)] #[repr(C)]
pub struct Comparator { pub struct Comparator {
...@@ -48,3 +43,16 @@ pub struct Comparator { ...@@ -48,3 +43,16 @@ pub struct Comparator {
pub function: RW<u32>, pub function: RW<u32>,
reserved: u32, reserved: u32,
} }
impl DWT {
/// Enables the cycle counter
pub fn enable_cycle_counter(&mut self) {
unsafe { self.ctrl.modify(|r| r | 1) }
}
/// Returns the current clock cycle count
pub fn get_cycle_count() -> u32 {
// NOTE(unsafe) atomic read with no side effects
unsafe { (*Self::ptr()).cyccnt.read() }
}
}
//! Core peripherals //! Core peripherals
//! //!
//! # API
//!
//! To use (most of) the peripheral API first you must get an *instance* of the peripheral. All the
//! core peripherals are modeled as singletons (there can only ever be, at most, one instance of
//! them at any given point in time) and the only way to get an instance of them is through the
//! [`Peripherals::take`](struct.Peripherals.html#method.take) method.
//!
//! ``` no_run
//! extern crate cortex_m;
//!
//! use cortex_m::peripheral::Peripherals;
//!
//! fn main() {
//! let mut peripherals = Peripherals::take().unwrap();
//! peripherals.DWT.enable_cycle_counter();
//! }
//! ```
//!
//! This method can only be successfully called *once* -- this is why the method returns an
//! `Option`. Subsequent calls to the method will result in a `None` value being returned.
//!
//! A part of the peripheral API doesn't require access to a peripheral instance. This part of the
//! API is provided as static methods on the peripheral types. One example is the
//! [`DWT::cyccnt`](struct.DWT.html#method.cyccnt) method.
//!
//! ``` no_run
//! extern crate cortex_m;
//!
//! use cortex_m::peripheral::{DWT, Peripherals};
//!
//! fn main() {
//! {
//! let mut peripherals = Peripherals::take().unwrap();
//! peripherals.DWT.enable_cycle_counter();
//! } // all the peripheral singletons are destroyed here
//!
//! // but this method can be called without a DWT instance
//! let cyccnt = DWT::get_cycle_count();
//! }
//! ```
//!
//! The singleton property can be *unsafely* bypassed using the `ptr` static method which is
//! available on all the peripheral types. This method is a useful building block for implementing
//! higher level and safe abstractions.
//!
//! ``` no_run
//! extern crate cortex_m;
//!
//! use cortex_m::peripheral::{DWT, Peripherals};
//!
//! fn main() {
//! {
//! let mut peripherals = Peripherals::take().unwrap();
//! peripherals.DWT.enable_cycle_counter();
//! } // all the peripheral singletons are destroyed here
//!
//! // actually safe because this is an atomic read with no side effects
//! let cyccnt = unsafe { (*DWT::ptr()).cyccnt.read() };
//! }
//! ```
//!
//! # References //! # References
//! //!
//! - ARMv7-M Architecture Reference Manual (Issue E.b) - Chapter B3 //! - ARMv7-M Architecture Reference Manual (Issue E.b) - Chapter B3
...@@ -80,7 +141,7 @@ impl Peripherals { ...@@ -80,7 +141,7 @@ impl Peripherals {
}) })
} }
/// Unchecked version of `Peripherals::steal` /// Unchecked version of `Peripherals::take`
pub unsafe fn steal() -> Self { pub unsafe fn steal() -> Self {
debug_assert!(!CORE_PERIPHERALS); debug_assert!(!CORE_PERIPHERALS);
...@@ -136,6 +197,12 @@ pub struct CBP { ...@@ -136,6 +197,12 @@ pub struct CBP {
#[cfg(armv7m)] #[cfg(armv7m)]
impl CBP { impl CBP {
pub(crate) unsafe fn new() -> Self {
CBP {
_marker: PhantomData,
}
}
/// Returns a pointer to the register block /// Returns a pointer to the register block
pub fn ptr() -> *const self::cbp::RegisterBlock { pub fn ptr() -> *const self::cbp::RegisterBlock {
0xE000_EF50 as *const _ 0xE000_EF50 as *const _
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
use volatile_register::{RO, RW}; use volatile_register::{RO, RW};
use peripheral::NVIC;
use interrupt::Nr; use interrupt::Nr;
/// Register block /// Register block
...@@ -52,9 +53,9 @@ pub struct RegisterBlock { ...@@ -52,9 +53,9 @@ pub struct RegisterBlock {
pub ipr: [RW<u32>; 8], pub ipr: [RW<u32>; 8],
} }
impl RegisterBlock { impl NVIC {
/// Clears `interrupt`'s pending state /// Clears `interrupt`'s pending state
pub fn clear_pending<I>(&self, interrupt: I) pub fn clear_pending<I>(&mut self, interrupt: I)
where where
I: Nr, I: Nr,
{ {
...@@ -64,7 +65,7 @@ impl RegisterBlock { ...@@ -64,7 +65,7 @@ impl RegisterBlock {
} }
/// Disables `interrupt` /// Disables `interrupt`
pub fn disable<I>(&self, interrupt: I) pub fn disable<I>(&mut self, interrupt: I)
where where
I: Nr, I: Nr,
{ {
...@@ -74,7 +75,7 @@ impl RegisterBlock { ...@@ -74,7 +75,7 @@ impl RegisterBlock {
} }
/// Enables `interrupt` /// Enables `interrupt`
pub fn enable<I>(&self, interrupt: I) pub fn enable<I>(&mut self, interrupt: I)
where where
I: Nr, I: Nr,
{ {
...@@ -83,64 +84,69 @@ impl RegisterBlock { ...@@ -83,64 +84,69 @@ impl RegisterBlock {
unsafe { self.iser[usize::from(nr / 32)].write(1 << (nr % 32)) } unsafe { self.iser[usize::from(nr / 32)].write(1 << (nr % 32)) }
} }
/// Gets the "priority" of `interrupt` /// Returns the NVIC priority of `interrupt`
/// ///
/// NOTE NVIC encodes priority in the highest bits of a byte so values like /// *NOTE* NVIC encodes priority in the highest bits of a byte so values like `1` and `2` map
/// `1` and `2` have the same priority. Also for NVIC priorities, a lower /// to the same priority. Also for NVIC priorities, a lower value (e.g. `16`) has higher
/// value (e.g. `16`) has higher priority than a larger value (e.g. `32`). /// priority (urgency) than a larger value (e.g. `32`).
pub fn get_priority<I>(&self, interrupt: I) -> u8 pub fn get_priority<I>(interrupt: I) -> u8
where where
I: Nr, I: Nr,
{ {
#[cfg(not(armv6m))] #[cfg(not(armv6m))]
{ {
let nr = interrupt.nr(); let nr = interrupt.nr();
self.ipr[usize::from(nr)].read() // NOTE(unsafe) atomic read with no side effects
unsafe { (*Self::ptr()).ipr[usize::from(nr)].read() }
} }
#[cfg(armv6m)] #[cfg(armv6m)]
{ {
let ipr_n = self.ipr[Self::ipr_index(&interrupt)].read(); // NOTE(unsafe) atomic read with no side effects
let ipr_n = unsafe { (*Self::ptr()).ipr[Self::ipr_index(&interrupt)].read() };
let prio = (ipr_n >> Self::ipr_shift(&interrupt)) & 0x000000ff; let prio = (ipr_n >> Self::ipr_shift(&interrupt)) & 0x000000ff;
prio as u8 prio as u8
} }
} }
/// Is `interrupt` active or pre-empted and stacked /// Is `interrupt` active or pre-empted and stacked
pub fn is_active<I>(&self, interrupt: I) -> bool pub fn is_active<I>(interrupt: I) -> bool
where where
I: Nr, I: Nr,
{ {
let nr = interrupt.nr(); let nr = interrupt.nr();
let mask = 1 << (nr % 32); let mask = 1 << (nr % 32);
(self.iabr[usize::from(nr / 32)].read() & mask) == mask // NOTE(unsafe) atomic read with no side effects
unsafe { ((*Self::ptr()).iabr[usize::from(nr / 32)].read() & mask) == mask }
} }
/// Checks if `interrupt` is enabled /// Checks if `interrupt` is enabled
pub fn is_enabled<I>(&self, interrupt: I) -> bool pub fn is_enabled<I>(interrupt: I) -> bool
where where
I: Nr, I: Nr,
{ {
let nr = interrupt.nr(); let nr = interrupt.nr();
let mask = 1 << (nr % 32); let mask = 1 << (nr % 32);
(self.iser[usize::from(nr / 32)].read() & mask) == mask // NOTE(unsafe) atomic read with no side effects
unsafe { ((*Self::ptr()).iser[usize::from(nr / 32)].read() & mask) == mask }
} }
/// Checks if `interrupt` is pending /// Checks if `interrupt` is pending
pub fn is_pending<I>(&self, interrupt: I) -> bool pub fn is_pending<I>(interrupt: I) -> bool
where where
I: Nr, I: Nr,
{ {
let nr = interrupt.nr(); let nr = interrupt.nr();
let mask = 1 << (nr % 32); let mask = 1 << (nr % 32);
(self.ispr[usize::from(nr / 32)].read() & mask) == mask // NOTE(unsafe) atomic read with no side effects
unsafe { ((*Self::ptr()).ispr[usize::from(nr / 32)].read() & mask) == mask }
} }
/// Forces `interrupt` into pending state /// Forces `interrupt` into pending state
pub fn set_pending<I>(&self, interrupt: I) pub fn set_pending<I>(&mut self, interrupt: I)
where where
I: Nr, I: Nr,
{ {
...@@ -151,15 +157,12 @@ impl RegisterBlock { ...@@ -151,15 +157,12 @@ impl RegisterBlock {
/// Sets the "priority" of `interrupt` to `prio` /// Sets the "priority" of `interrupt` to `prio`
/// ///
/// NOTE See `get_priority` method for an explanation of how NVIC priorities /// *NOTE* See [`get_priority`](struct.NVIC.html#method.get_priority) method for an explanation
/// work. /// of how NVIC priorities work.
/// ///
/// On ARMv6-M, updating an interrupt priority requires a read-modify-write /// On ARMv6-M, updating an interrupt priority requires a read-modify-write operation. On
/// operation, which is not atomic. This is inherently racy, so please /// ARMv7-M, the operation is performed in a single atomic write operation.
/// ensure proper access to this method. pub unsafe fn set_priority<I>(&mut self, interrupt: I, prio: u8)
///
/// On ARMv7-M, this method is atomic.
pub unsafe fn set_priority<I>(&self, interrupt: I, prio: u8)
where where
I: Nr, I: Nr,
{ {
...@@ -181,12 +184,18 @@ impl RegisterBlock { ...@@ -181,12 +184,18 @@ impl RegisterBlock {
} }
#[cfg(armv6m)] #[cfg(armv6m)]
fn ipr_index<I>(interrupt: &I) -> usize where I: Nr { fn ipr_index<I>(interrupt: &I) -> usize
where
I: Nr,
{
usize::from(interrupt.nr()) / 4 usize::from(interrupt.nr()) / 4
} }
#[cfg(armv6m)] #[cfg(armv6m)]
fn ipr_shift<I>(interrupt: &I) -> usize where I: Nr { fn ipr_shift<I>(interrupt: &I) -> usize
where
I: Nr,
{
(usize::from(interrupt.nr()) % 4) * 8 (usize::from(interrupt.nr()) % 4) * 8
} }
} }
...@@ -2,10 +2,12 @@ ...@@ -2,10 +2,12 @@
use volatile_register::RW; use volatile_register::RW;
#[cfg(any(armv7m, has_fpu))]
use super::{CBP, SCB};
#[cfg(armv7m)] #[cfg(armv7m)]
use super::CBP; use super::CPUID;
#[cfg(armv7m)] #[cfg(armv7m)]
use super::cpuid::{self, CsselrCacheType}; use super::cpuid::CsselrCacheType;
/// Register block /// Register block
#[repr(C)] #[repr(C)]
...@@ -64,10 +66,22 @@ mod fpu_consts { ...@@ -64,10 +66,22 @@ mod fpu_consts {
use self::fpu_consts::*; use self::fpu_consts::*;
#[cfg(has_fpu)] #[cfg(has_fpu)]
impl RegisterBlock { impl SCB {
/// Shorthand for `set_fpu_access_mode(FpuAccessMode::Disabled)`
pub fn disable_fpu(&mut self) {
self.set_fpu_access_mode(FpuAccessMode::Disabled)
}
/// Shorthand for `set_fpu_access_mode(FpuAccessMode::Enabled)`
pub fn enable_fpu(&mut self) {
self.set_fpu_access_mode(FpuAccessMode::Enabled)
}
/// Gets FPU access mode /// Gets FPU access mode
pub fn fpu_access_mode(&self) -> FpuAccessMode { pub fn fpu_access_mode() -> FpuAccessMode {
let cpacr = self.cpacr.read(); // NOTE(unsafe) atomic read operation with no side effects
let cpacr = unsafe { (*Self::ptr()).cpacr.read() };
if cpacr & SCB_CPACR_FPU_MASK == SCB_CPACR_FPU_ENABLE | SCB_CPACR_FPU_USER { if cpacr & SCB_CPACR_FPU_MASK == SCB_CPACR_FPU_ENABLE | SCB_CPACR_FPU_USER {
FpuAccessMode::Enabled FpuAccessMode::Enabled
} else if cpacr & SCB_CPACR_FPU_MASK == SCB_CPACR_FPU_ENABLE { } else if cpacr & SCB_CPACR_FPU_MASK == SCB_CPACR_FPU_ENABLE {
...@@ -83,7 +97,7 @@ impl RegisterBlock { ...@@ -83,7 +97,7 @@ impl RegisterBlock {
/// floating-point arguments or have any floating-point local variables. Because the compiler /// floating-point arguments or have any floating-point local variables. Because the compiler
/// might inline such a function into a caller that does have floating-point arguments or /// might inline such a function into a caller that does have floating-point arguments or
/// variables, any such function must be also marked #[inline(never)]. /// variables, any such function must be also marked #[inline(never)].
pub fn set_fpu_access_mode(&self, mode: FpuAccessMode) { pub fn set_fpu_access_mode(&mut self, mode: FpuAccessMode) {
let mut cpacr = self.cpacr.read() & !SCB_CPACR_FPU_MASK; let mut cpacr = self.cpacr.read() & !SCB_CPACR_FPU_MASK;
match mode { match mode {
FpuAccessMode::Disabled => (), FpuAccessMode::Disabled => (),
...@@ -92,16 +106,6 @@ impl RegisterBlock { ...@@ -92,16 +106,6 @@ impl RegisterBlock {
} }
unsafe { self.cpacr.write(cpacr) } unsafe { self.cpacr.write(cpacr) }
} }
/// Shorthand for `set_fpu_access_mode(FpuAccessMode::Enabled)`
pub fn enable_fpu(&self) {
self.set_fpu_access_mode(FpuAccessMode::Enabled)
}
/// Shorthand for `set_fpu_access_mode(FpuAccessMode::Disabled)`
pub fn disable_fpu(&self) {
self.set_fpu_access_mode(FpuAccessMode::Disabled)
}
} }
#[cfg(armv7m)] #[cfg(armv7m)]
...@@ -114,17 +118,17 @@ mod scb_consts { ...@@ -114,17 +118,17 @@ mod scb_consts {
use self::scb_consts::*; use self::scb_consts::*;
#[cfg(armv7m)] #[cfg(armv7m)]
impl RegisterBlock { impl SCB {
/// Enables I-Cache if currently disabled /// Enables I-Cache if currently disabled
#[inline] #[inline]
pub fn enable_icache(&self) { pub fn enable_icache(&mut self) {
// Don't do anything if ICache is already enabled // Don't do anything if ICache is already enabled
if self.icache_enabled() { if Self::icache_enabled() {
return; return;
} }
// All of CBP is write-only so no data races are possible // NOTE(unsafe) All CBP registers are write-only and stateless
let cbp = unsafe { &*CBP::ptr() }; let mut cbp = unsafe { CBP::new() };
// Invalidate I-Cache // Invalidate I-Cache
cbp.iciallu(); cbp.iciallu();
...@@ -138,14 +142,14 @@ impl RegisterBlock { ...@@ -138,14 +142,14 @@ impl RegisterBlock {
/// Disables I-Cache if currently enabled /// Disables I-Cache if currently enabled
#[inline] #[inline]
pub fn disable_icache(&self) { pub fn disable_icache(&mut self) {
// Don't do anything if ICache is already disabled // Don't do anything if ICache is already disabled
if !self.icache_enabled() { if !Self::icache_enabled() {
return; return;
} }
// All of CBP is write-only so no data races are possible // NOTE(unsafe) All CBP registers are write-only and stateless
let cbp = unsafe { &*CBP::ptr() }; let mut cbp = unsafe { CBP::new() };
// Disable I-Cache // Disable I-Cache
unsafe { self.ccr.modify(|r| r & !SCB_CCR_IC_MASK) }; unsafe { self.ccr.modify(|r| r & !SCB_CCR_IC_MASK) };
...@@ -159,17 +163,19 @@ impl RegisterBlock { ...@@ -159,17 +163,19 @@ impl RegisterBlock {
/// Returns whether the I-Cache is currently enabled /// Returns whether the I-Cache is currently enabled
#[inline] #[inline]
pub fn icache_enabled(&self) -> bool { pub fn icache_enabled() -> bool {
::asm::dsb(); ::asm::dsb();
::asm::isb(); ::asm::isb();
self.ccr.read() & SCB_CCR_IC_MASK == SCB_CCR_IC_MASK
// NOTE(unsafe) atomic read with no side effects
unsafe { (*Self::ptr()).ccr.read() & SCB_CCR_IC_MASK == SCB_CCR_IC_MASK }
} }
/// Invalidates I-Cache /// Invalidates I-Cache
#[inline] #[inline]
pub fn invalidate_icache(&self) { pub fn invalidate_icache(&mut self) {
// All of CBP is write-only so no data races are possible // NOTE(unsafe) All CBP registers are write-only and stateless
let cbp = unsafe { &*CBP::ptr() }; let mut cbp = unsafe { CBP::new() };
// Invalidate I-Cache // Invalidate I-Cache
cbp.iciallu(); cbp.iciallu();
...@@ -180,9 +186,9 @@ impl RegisterBlock { ...@@ -180,9 +186,9 @@ impl RegisterBlock {
/// Enables D-cache if currently disabled /// Enables D-cache if currently disabled
#[inline] #[inline]
pub fn enable_dcache(&self, cpuid: &cpuid::RegisterBlock) { pub fn enable_dcache(&mut self, cpuid: &mut CPUID) {
// Don't do anything if DCache is already enabled // Don't do anything if DCache is already enabled
if self.dcache_enabled() { if Self::dcache_enabled() {
return; return;
} }
...@@ -198,9 +204,9 @@ impl RegisterBlock { ...@@ -198,9 +204,9 @@ impl RegisterBlock {
/// Disables D-cache if currently enabled /// Disables D-cache if currently enabled
#[inline] #[inline]
pub fn disable_dcache(&self, cpuid: &cpuid::RegisterBlock) { pub fn disable_dcache(&mut self, cpuid: &mut CPUID) {
// Don't do anything if DCache is already disabled // Don't do anything if DCache is already disabled
if !self.dcache_enabled() { if !Self::dcache_enabled() {
return; return;
} }
...@@ -213,10 +219,12 @@ impl RegisterBlock { ...@@ -213,10 +219,12 @@ impl RegisterBlock {
/// Returns whether the D-Cache is currently enabled /// Returns whether the D-Cache is currently enabled
#[inline] #[inline]
pub fn dcache_enabled(&self) -> bool { pub fn dcache_enabled() -> bool {
::asm::dsb(); ::asm::dsb();
::asm::isb(); ::asm::isb();
self.ccr.read() & SCB_CCR_DC_MASK == SCB_CCR_DC_MASK
// NOTE(unsafe) atomic read with no side effects
unsafe { (*Self::ptr()).ccr.read() & SCB_CCR_DC_MASK == SCB_CCR_DC_MASK }
} }
/// Invalidates D-cache /// Invalidates D-cache
...@@ -225,9 +233,9 @@ impl RegisterBlock { ...@@ -225,9 +233,9 @@ impl RegisterBlock {
/// stack, depending on optimisations, breaking returning to the call point. /// stack, depending on optimisations, breaking returning to the call point.
/// It's used immediately before enabling the dcache, but not exported publicly. /// It's used immediately before enabling the dcache, but not exported publicly.
#[inline] #[inline]
fn invalidate_dcache(&self, cpuid: &cpuid::RegisterBlock) { fn invalidate_dcache(&mut self, cpuid: &mut CPUID) {
// All of CBP is write-only so no data races are possible // NOTE(unsafe) All CBP registers are write-only and stateless
let cbp = unsafe { &*CBP::ptr() }; let mut cbp = unsafe { CBP::new() };
// Read number of sets and ways // Read number of sets and ways
let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified);
...@@ -245,9 +253,9 @@ impl RegisterBlock { ...@@ -245,9 +253,9 @@ impl RegisterBlock {
/// Cleans D-cache /// Cleans D-cache
#[inline] #[inline]
pub fn clean_dcache(&self, cpuid: &cpuid::RegisterBlock) { pub fn clean_dcache(&mut self, cpuid: &mut CPUID) {
// All of CBP is write-only so no data races are possible // NOTE(unsafe) All CBP registers are write-only and stateless
let cbp = unsafe { &*CBP::ptr() }; let mut cbp = unsafe { CBP::new() };
// Read number of sets and ways // Read number of sets and ways
let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified);
...@@ -264,9 +272,9 @@ impl RegisterBlock { ...@@ -264,9 +272,9 @@ impl RegisterBlock {
/// Cleans and invalidates D-cache /// Cleans and invalidates D-cache
#[inline] #[inline]
pub fn clean_invalidate_dcache(&self, cpuid: &cpuid::RegisterBlock) { pub fn clean_invalidate_dcache(&mut self, cpuid: &mut CPUID) {
// All of CBP is write-only so no data races are possible // NOTE(unsafe) All CBP registers are write-only and stateless
let cbp = unsafe { &*CBP::ptr() }; let mut cbp = unsafe { CBP::new() };
// Read number of sets and ways // Read number of sets and ways
let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified);
...@@ -289,14 +297,14 @@ impl RegisterBlock { ...@@ -289,14 +297,14 @@ impl RegisterBlock {
/// Invalidates cache starting from the lowest 32-byte aligned address represented by `addr`, /// Invalidates cache starting from the lowest 32-byte aligned address represented by `addr`,
/// in blocks of 32 bytes until at least `size` bytes have been invalidated. /// in blocks of 32 bytes until at least `size` bytes have been invalidated.
#[inline] #[inline]
pub fn invalidate_dcache_by_address(&self, addr: usize, size: usize) { pub fn invalidate_dcache_by_address(&mut self, addr: usize, size: usize) {
// No-op zero sized operations // No-op zero sized operations
if size == 0 { if size == 0 {
return; return;
} }
// All of CBP is write-only so no data races are possible // NOTE(unsafe) All CBP registers are write-only and stateless
let cbp = unsafe { &*CBP::ptr() }; let mut cbp = unsafe { CBP::new() };
::asm::dsb(); ::asm::dsb();
...@@ -323,14 +331,14 @@ impl RegisterBlock { ...@@ -323,14 +331,14 @@ impl RegisterBlock {
/// Cleans cache starting from the lowest 32-byte aligned address represented by `addr`, /// Cleans cache starting from the lowest 32-byte aligned address represented by `addr`,
/// in blocks of 32 bytes until at least `size` bytes have been cleaned. /// in blocks of 32 bytes until at least `size` bytes have been cleaned.
#[inline] #[inline]
pub fn clean_dcache_by_address(&self, addr: usize, size: usize) { pub fn clean_dcache_by_address(&mut self, addr: usize, size: usize) {
// No-op zero sized operations // No-op zero sized operations
if size == 0 { if size == 0 {
return; return;
} }
// All of CBP is write-only so no data races are possible // NOTE(unsafe) All CBP registers are write-only and stateless
let cbp = unsafe { &*CBP::ptr() }; let mut cbp = unsafe { CBP::new() };
::asm::dsb(); ::asm::dsb();
...@@ -358,14 +366,14 @@ impl RegisterBlock { ...@@ -358,14 +366,14 @@ impl RegisterBlock {
/// by `addr`, in blocks of 32 bytes until at least `size` bytes have been cleaned and /// by `addr`, in blocks of 32 bytes until at least `size` bytes have been cleaned and
/// invalidated. /// invalidated.
#[inline] #[inline]
pub fn clean_invalidate_dcache_by_address(&self, addr: usize, size: usize) { pub fn clean_invalidate_dcache_by_address(&mut self, addr: usize, size: usize) {
// No-op zero sized operations // No-op zero sized operations
if size == 0 { if size == 0 {
return; return;
} }
// All of CBP is write-only so no data races are possible // NOTE(unsafe) All CBP registers are write-only and stateless
let cbp = unsafe { &*CBP::ptr() }; let mut cbp = unsafe { CBP::new() };
::asm::dsb(); ::asm::dsb();
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
use volatile_register::{RO, RW}; use volatile_register::{RO, RW};
use peripheral::SYST;
/// Register block /// Register block
#[repr(C)] #[repr(C)]
pub struct RegisterBlock { pub struct RegisterBlock {
...@@ -34,39 +36,41 @@ const SYST_CSR_COUNTFLAG: u32 = 1 << 16; ...@@ -34,39 +36,41 @@ const SYST_CSR_COUNTFLAG: u32 = 1 << 16;
const SYST_CALIB_SKEW: u32 = 1 << 30; const SYST_CALIB_SKEW: u32 = 1 << 30;
const SYST_CALIB_NOREF: u32 = 1 << 31; const SYST_CALIB_NOREF: u32 = 1 << 31;
impl RegisterBlock { impl SYST {
/// Checks if counter is enabled /// Clears current value to 0
pub fn is_counter_enabled(&self) -> bool { ///
self.csr.read() & SYST_CSR_ENABLE != 0 /// After calling `clear_current()`, the next call to `has_wrapped()`
} /// will return `false`.
pub fn clear_current(&mut self) {
/// Enables counter unsafe { self.cvr.write(0) }
pub fn enable_counter(&self) {
unsafe { self.csr.modify(|v| v | SYST_CSR_ENABLE) }
} }
/// Disables counter /// Disables counter
pub fn disable_counter(&self) { pub fn disable_counter(&mut self) {
unsafe { self.csr.modify(|v| v & !SYST_CSR_ENABLE) } unsafe { self.csr.modify(|v| v & !SYST_CSR_ENABLE) }
} }
/// Checks if SysTick interrupt is enabled /// Disables SysTick interrupt
pub fn is_interrupt_enabled(&self) -> bool { pub fn disable_interrupt(&mut self) {
self.csr.read() & SYST_CSR_TICKINT != 0 unsafe { self.csr.modify(|v| v & !SYST_CSR_TICKINT) }
} }
/// Enables SysTick interrupt /// Enables counter
pub fn enable_interrupt(&self) { pub fn enable_counter(&mut self) {
unsafe { self.csr.modify(|v| v | SYST_CSR_TICKINT) } unsafe { self.csr.modify(|v| v | SYST_CSR_ENABLE) }
} }
/// Disables SysTick interrupt /// Enables SysTick interrupt
pub fn disable_interrupt(&self) { pub fn enable_interrupt(&mut self) {
unsafe { self.csr.modify(|v| v & !SYST_CSR_TICKINT) } unsafe { self.csr.modify(|v| v | SYST_CSR_TICKINT) }
} }
/// Gets clock source /// Gets clock source
pub fn get_clock_source(&self) -> SystClkSource { ///
/// *NOTE* This takes `&mut self` because the read operation is side effectful and can clear the
/// bit that indicates that the timer has wrapped (cf. `SYST.has_wrapped`)
pub fn get_clock_source(&mut self) -> SystClkSource {
// NOTE(unsafe) atomic read with no side effects
let clk_source_bit = self.csr.read() & SYST_CSR_CLKSOURCE != 0; let clk_source_bit = self.csr.read() & SYST_CSR_CLKSOURCE != 0;
match clk_source_bit { match clk_source_bit {
false => SystClkSource::External, false => SystClkSource::External,
...@@ -74,51 +78,56 @@ impl RegisterBlock { ...@@ -74,51 +78,56 @@ impl RegisterBlock {
} }
} }
/// Sets clock source /// Gets current value
pub fn set_clock_source(&self, clk_source: SystClkSource) { pub fn get_current() -> u32 {
match clk_source { // NOTE(unsafe) atomic read with no side effects
SystClkSource::External => unsafe { self.csr.modify(|v| v & !SYST_CSR_CLKSOURCE) }, unsafe { (*Self::ptr()).cvr.read() }
SystClkSource::Core => unsafe { self.csr.modify(|v| v | SYST_CSR_CLKSOURCE) },
}
}
/// Checks if the counter wrapped (underflowed) since the last check
pub fn has_wrapped(&self) -> bool {
self.csr.read() & SYST_CSR_COUNTFLAG != 0
} }
/// Gets reload value /// Gets reload value
pub fn get_reload(&self) -> u32 { pub fn get_reload() -> u32 {
self.rvr.read() // NOTE(unsafe) atomic read with no side effects
unsafe { (*Self::ptr()).rvr.read() }
} }
/// Sets reload value /// Returns the reload value with which the counter would wrap once per 10
/// ms
/// ///
/// Valid values are between `1` and `0x00ffffff`. /// Returns `0` if the value is not known (e.g. because the clock can
pub fn set_reload(&self, value: u32) { /// change dynamically).
unsafe { self.rvr.write(value) } pub fn get_ticks_per_10ms() -> u32 {
// NOTE(unsafe) atomic read with no side effects
unsafe { (*Self::ptr()).calib.read() & SYST_COUNTER_MASK }
} }
/// Gets current value /// Checks if an external reference clock is available
pub fn get_current(&self) -> u32 { pub fn has_reference_clock() -> bool {
self.cvr.read() // NOTE(unsafe) atomic read with no side effects
unsafe { (*Self::ptr()).calib.read() & SYST_CALIB_NOREF == 0 }
} }
/// Clears current value to 0 /// Checks if the counter wrapped (underflowed) since the last check
/// ///
/// After calling `clear_current()`, the next call to `has_wrapped()` /// *NOTE* This takes `&mut self` because the read operation is side effectful and will clear
/// will return `false`. /// the bit of the read register.
pub fn clear_current(&self) { pub fn has_wrapped(&mut self) -> bool {
unsafe { self.cvr.write(0) } self.csr.read() & SYST_CSR_COUNTFLAG != 0
} }
/// Returns the reload value with which the counter would wrap once per 10 /// Checks if counter is enabled
/// ms
/// ///
/// Returns `0` if the value is not known (e.g. because the clock can /// *NOTE* This takes `&mut self` because the read operation is side effectful and can clear the
/// change dynamically). /// bit that indicates that the timer has wrapped (cf. `SYST.has_wrapped`)
pub fn get_ticks_per_10ms(&self) -> u32 { pub fn is_counter_enabled(&mut self) -> bool {
self.calib.read() & SYST_COUNTER_MASK self.csr.read() & SYST_CSR_ENABLE != 0
}
/// Checks if SysTick interrupt is enabled
///
/// *NOTE* This takes `&mut self` because the read operation is side effectful and can clear the
/// bit that indicates that the timer has wrapped (cf. `SYST.has_wrapped`)
pub fn is_interrupt_enabled(&mut self) -> bool {
self.csr.read() & SYST_CSR_TICKINT != 0
} }
/// Checks if the calibration value is precise /// Checks if the calibration value is precise
...@@ -126,12 +135,26 @@ impl RegisterBlock { ...@@ -126,12 +135,26 @@ impl RegisterBlock {
/// Returns `false` if using the reload value returned by /// Returns `false` if using the reload value returned by
/// `get_ticks_per_10ms()` may result in a period significantly deviating /// `get_ticks_per_10ms()` may result in a period significantly deviating
/// from 10 ms. /// from 10 ms.
pub fn is_precise(&self) -> bool { pub fn is_precise() -> bool {
self.calib.read() & SYST_CALIB_SKEW == 0 // NOTE(unsafe) atomic read with no side effects
unsafe { (*Self::ptr()).calib.read() & SYST_CALIB_SKEW == 0 }
} }
/// Checks if an external reference clock is available /// Sets clock source
pub fn has_reference_clock(&self) -> bool { pub fn set_clock_source(&mut self, clk_source: SystClkSource) {
self.calib.read() & SYST_CALIB_NOREF == 0 match clk_source {
SystClkSource::External => unsafe {
self.csr.modify(|v| v & !SYST_CSR_CLKSOURCE)
},
SystClkSource::Core => unsafe { self.csr.modify(|v| v | SYST_CSR_CLKSOURCE) },
}
} }
/// Sets reload value
///
/// Valid values are between `1` and `0x00ffffff`.
pub fn set_reload(&mut self, value: u32) {
unsafe { self.rvr.write(value) }
}
} }
...@@ -129,7 +129,6 @@ fn scb() { ...@@ -129,7 +129,6 @@ fn scb() {
assert_eq!(address(&scb.bfar), 0xE000_ED38); assert_eq!(address(&scb.bfar), 0xE000_ED38);
assert_eq!(address(&scb.afsr), 0xE000_ED3C); assert_eq!(address(&scb.afsr), 0xE000_ED3C);
assert_eq!(address(&scb.cpacr), 0xE000_ED88); assert_eq!(address(&scb.cpacr), 0xE000_ED88);
} }
#[test] #[test]
...@@ -140,7 +139,6 @@ fn syst() { ...@@ -140,7 +139,6 @@ fn syst() {
assert_eq!(address(&syst.rvr), 0xE000_E014); assert_eq!(address(&syst.rvr), 0xE000_E014);
assert_eq!(address(&syst.cvr), 0xE000_E018); assert_eq!(address(&syst.cvr), 0xE000_E018);
assert_eq!(address(&syst.calib), 0xE000_E01C); assert_eq!(address(&syst.calib), 0xE000_E01C);
} }
#[test] #[test]
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment