diff --git a/src/exception.rs b/src/exception.rs index 7203dfacb91e277eebb24822af35d6c136a1f174..b40cf1b5b231eaa4c47800ba98e982ce4c6d02ea 100644 --- a/src/exception.rs +++ b/src/exception.rs @@ -22,8 +22,7 @@ pub enum Exception { /// An interrupt Interrupt(u8), // Unreachable variant - #[doc(hidden)] - Reserved, + #[doc(hidden)] Reserved, } impl Exception { diff --git a/src/peripheral/cbp.rs b/src/peripheral/cbp.rs index 292ba0430ef12cfc58308d84280da3ae6229c8a7..590cb7be34a57d9310f70a057fe921b9298c607e 100644 --- a/src/peripheral/cbp.rs +++ b/src/peripheral/cbp.rs @@ -2,6 +2,8 @@ use volatile_register::WO; +use peripheral::CBP; + /// Register block #[repr(C)] pub struct RegisterBlock { @@ -33,10 +35,10 @@ const CBP_SW_WAY_MASK: u32 = 0x3 << CBP_SW_WAY_POS; const CBP_SW_SET_POS: u32 = 5; const CBP_SW_SET_MASK: u32 = 0x1FF << CBP_SW_SET_POS; -impl RegisterBlock { +impl CBP { /// I-cache invalidate all to PoU #[inline] - pub fn iciallu(&self) { + pub fn iciallu(&mut self) { unsafe { self.iciallu.write(0); } @@ -44,7 +46,7 @@ impl RegisterBlock { /// I-cache invalidate by MVA to PoU #[inline] - pub fn icimvau(&self, mva: u32) { + pub fn icimvau(&mut self, mva: u32) { unsafe { self.icimvau.write(mva); } @@ -52,7 +54,7 @@ impl RegisterBlock { /// D-cache invalidate by MVA to PoC #[inline] - pub fn dcimvac(&self, mva: u32) { + pub fn dcimvac(&mut self, mva: u32) { unsafe { self.dcimvac.write(mva); } @@ -62,7 +64,7 @@ impl RegisterBlock { /// /// `set` is masked to be between 0 and 3, and `way` between 0 and 511. #[inline] - pub fn dcisw(&self, set: u16, way: u16) { + pub fn dcisw(&mut self, set: u16, way: u16) { // The ARMv7-M Architecture Reference Manual, as of Revision E.b, says these set/way // operations have a register data format which depends on the implementation's // associativity and number of sets. Specifically the 'way' and 'set' fields have @@ -82,7 +84,7 @@ impl RegisterBlock { /// D-cache clean by MVA to PoU #[inline] - pub fn dccmvau(&self, mva: u32) { + pub fn dccmvau(&mut self, mva: u32) { unsafe { self.dccmvau.write(mva); } @@ -90,7 +92,7 @@ impl RegisterBlock { /// D-cache clean by MVA to PoC #[inline] - pub fn dccmvac(&self, mva: u32) { + pub fn dccmvac(&mut self, mva: u32) { unsafe { self.dccmvac.write(mva); } @@ -100,7 +102,7 @@ impl RegisterBlock { /// /// `set` is masked to be between 0 and 3, and `way` between 0 and 511. #[inline] - pub fn dccsw(&self, set: u16, way: u16) { + pub fn dccsw(&mut self, set: u16, way: u16) { // See comment for dcisw() about the format here unsafe { self.dccsw.write( @@ -112,7 +114,7 @@ impl RegisterBlock { /// D-cache clean and invalidate by MVA to PoC #[inline] - pub fn dccimvac(&self, mva: u32) { + pub fn dccimvac(&mut self, mva: u32) { unsafe { self.dccimvac.write(mva); } @@ -122,7 +124,7 @@ impl RegisterBlock { /// /// `set` is masked to be between 0 and 3, and `way` between 0 and 511. #[inline] - pub fn dccisw(&self, set: u16, way: u16) { + pub fn dccisw(&mut self, set: u16, way: u16) { // See comment for dcisw() about the format here unsafe { self.dccisw.write( @@ -134,7 +136,7 @@ impl RegisterBlock { /// Branch predictor invalidate all #[inline] - pub fn bpiall(&self) { + pub fn bpiall(&mut self) { unsafe { self.bpiall.write(0); } diff --git a/src/peripheral/cpuid.rs b/src/peripheral/cpuid.rs index f0b7e6ec8c6fe285eb47c9ab7c7f078807768a2a..624d5c58bff30eab6b95344cf95a1004b185b7d8 100644 --- a/src/peripheral/cpuid.rs +++ b/src/peripheral/cpuid.rs @@ -4,6 +4,9 @@ use volatile_register::RO; #[cfg(any(armv7m, test))] use volatile_register::RW; +#[cfg(armv7m)] +use peripheral::CPUID; + /// Register block #[repr(C)] pub struct RegisterBlock { @@ -45,14 +48,14 @@ pub enum CsselrCacheType { } #[cfg(armv7m)] -impl RegisterBlock { +impl CPUID { /// Selects the current CCSIDR /// /// * `level`: the required cache level minus 1, e.g. 0 for L1, 1 for L2 /// * `ind`: select instruction cache or data/unified cache /// /// `level` is masked to be between 0 and 7. - pub fn select_cache(&self, level: u8, ind: CsselrCacheType) { + pub fn select_cache(&mut self, level: u8, ind: CsselrCacheType) { const CSSELR_IND_POS: u32 = 0; const CSSELR_IND_MASK: u32 = 1 << CSSELR_IND_POS; const CSSELR_LEVEL_POS: u32 = 1; @@ -67,7 +70,7 @@ impl RegisterBlock { } /// Returns the number of sets and ways in the selected cache - pub fn cache_num_sets_ways(&self, level: u8, ind: CsselrCacheType) -> (u16, u16) { + pub fn cache_num_sets_ways(&mut self, level: u8, ind: CsselrCacheType) -> (u16, u16) { const CCSIDR_NUMSETS_POS: u32 = 13; const CCSIDR_NUMSETS_MASK: u32 = 0x7FFF << CCSIDR_NUMSETS_POS; const CCSIDR_ASSOCIATIVITY_POS: u32 = 3; diff --git a/src/peripheral/dwt.rs b/src/peripheral/dwt.rs index b716369c76d324858d79ebbb240751829686ddc4..84f002e33a9d420ca74c2b5be5fda51db803ba4d 100644 --- a/src/peripheral/dwt.rs +++ b/src/peripheral/dwt.rs @@ -2,6 +2,8 @@ use volatile_register::{RO, RW, WO}; +use peripheral::DWT; + /// Register block #[repr(C)] pub struct RegisterBlock { @@ -30,13 +32,6 @@ pub struct RegisterBlock { pub lsr: RO<u32>, } -impl RegisterBlock { - /// Enables the cycle counter - pub fn enable_cycle_counter(&self) { - unsafe { self.ctrl.modify(|r| r | 1) } - } -} - /// Comparator #[repr(C)] pub struct Comparator { @@ -48,3 +43,16 @@ pub struct Comparator { pub function: RW<u32>, reserved: u32, } + +impl DWT { + /// Enables the cycle counter + pub fn enable_cycle_counter(&mut self) { + unsafe { self.ctrl.modify(|r| r | 1) } + } + + /// Returns the current clock cycle count + pub fn get_cycle_count() -> u32 { + // NOTE(unsafe) atomic read with no side effects + unsafe { (*Self::ptr()).cyccnt.read() } + } +} diff --git a/src/peripheral/mod.rs b/src/peripheral/mod.rs index d462bdb2f7312756aecff7a37bdd1ec142a32520..51e2f40b0bc3eeb126a2fbf27e9266ce6c7a3f11 100644 --- a/src/peripheral/mod.rs +++ b/src/peripheral/mod.rs @@ -1,5 +1,54 @@ //! Core peripherals //! +//! # API +//! +//! To use (most of) the peripheral API first you must get an *instance* of the peripheral. All the +//! core peripherals are modeled as singletons (there can only ever be, at most, one instance of +//! them at any given point in time) and the only way to get an instance of them is through the +//! [`Peripherals::take`](struct.Peripherals.html#method.take) method. +//! +//! ``` ignore +//! fn main() { +//! let peripherals = Peripherals::take(); +//! peripherals.dwt.enable_cycle_counter(); +//! } +//! ``` +//! +//! This method can only be successfully called *once* -- this is why the method returns an +//! `Option`. Subsequent calls to the method will result in a `None` value being returned. +//! +//! A part of the peripheral API doesn't require access to a peripheral instance. This part of the +//! API is provided as static methods on the peripheral types. One example is the +//! [`DWT::cyccnt`](struct.DWT.html#method.cyccnt) method. +//! +//! ``` ignore +//! fn main() { +//! { +//! let peripherals = Peripherals::take().unwrap(); +//! peripherals.DWT.enable_cycle_counter(); +//! } // all the peripheral singletons are destroyed here +//! +//! // but this method can be called without a DWT instance +//! let cyccnt = DWT::cyccnt(); +//! } +//! ``` +//! +//! The singleton property can be *unsafely* bypassed using the `ptr` static method which is +//! available on all the peripheral types. This method is a useful building block for implementing +//! higher level and safe abstractions. +//! +//! ``` ignore +//! fn main() { +//! { +//! let peripherals = Peripherals::take().unwrap(); +//! peripherals.DWT.enable_cycle_counter(); +//! } // all the peripheral singletons are destroyed here +//! +//! // actually safe because this is an atomic read with no side effects +//! let cyccnt = unsafe { (*DWT::ptr()).cyccnt.read() }; +//! } +//! ``` +//! //! # References //! //! - ARMv7-M Architecture Reference Manual (Issue E.b) - Chapter B3 @@ -80,7 +129,7 @@ impl Peripherals { }) } - /// Unchecked version of `Peripherals::steal` + /// Unchecked version of `Peripherals::take` pub unsafe fn steal() -> Self { debug_assert!(!CORE_PERIPHERALS); @@ -136,6 +185,12 @@ pub struct CBP { #[cfg(armv7m)] impl CBP { + pub(crate) unsafe fn new() -> Self { + CBP { + _marker: PhantomData, + } + } + /// Returns a pointer to the register block pub fn ptr() -> *const self::cbp::RegisterBlock { 0xE000_EF50 as *const _ diff --git a/src/peripheral/nvic.rs b/src/peripheral/nvic.rs index 74c6625ce75caf457ec9f089167512904f702df2..8c55a879b3f374cf873050b66c33e88db513a557 100644 --- a/src/peripheral/nvic.rs +++ b/src/peripheral/nvic.rs @@ -2,6 +2,7 @@ use volatile_register::{RO, RW}; +use peripheral::NVIC; use interrupt::Nr; /// Register block @@ -52,9 +53,9 @@ pub struct RegisterBlock { pub ipr: [RW<u32>; 8], } -impl RegisterBlock { +impl NVIC { /// Clears `interrupt`'s pending state - pub fn clear_pending<I>(&self, interrupt: I) + pub fn clear_pending<I>(&mut self, interrupt: I) where I: Nr, { @@ -64,7 +65,7 @@ impl RegisterBlock { } /// Disables `interrupt` - pub fn disable<I>(&self, interrupt: I) + pub fn disable<I>(&mut self, interrupt: I) where I: Nr, { @@ -74,7 +75,7 @@ impl RegisterBlock { } /// Enables `interrupt` - pub fn enable<I>(&self, interrupt: I) + pub fn enable<I>(&mut self, interrupt: I) where I: Nr, { @@ -83,64 +84,69 @@ impl RegisterBlock { unsafe { self.iser[usize::from(nr / 32)].write(1 << (nr % 32)) } } - /// Gets the "priority" of `interrupt` + /// Returns the NVIC priority of `interrupt` /// - /// NOTE NVIC encodes priority in the highest bits of a byte so values like - /// `1` and `2` have the same priority. Also for NVIC priorities, a lower - /// value (e.g. `16`) has higher priority than a larger value (e.g. `32`). - pub fn get_priority<I>(&self, interrupt: I) -> u8 + /// *NOTE* NVIC encodes priority in the highest bits of a byte so values like `1` and `2` map + /// the same priority. Also for NVIC priorities, a lower value (e.g. `16`) has higher priority + /// (urgency) than a larger value (e.g. `32`). + pub fn get_priority<I>(interrupt: I) -> u8 where I: Nr, { #[cfg(not(armv6m))] { let nr = interrupt.nr(); - self.ipr[usize::from(nr)].read() + // NOTE(unsafe) atomic read with no side effects + unsafe { (*Self::ptr()).ipr[usize::from(nr)].read() } } #[cfg(armv6m)] { - let ipr_n = self.ipr[Self::ipr_index(&interrupt)].read(); - let prio = (ipr_n >> Self::ipr_shift(&interrupt)) & 0x000000ff; + // NOTE(unsafe) atomic read with no side effects + let ipr_n = (*Self::ptr()).ipr[Self::ipr_index(&interrupt)].read(); + let prio = (ipr_n >> Self::ipr_shift(&interrupt)) & 0x000000ff; prio as u8 } } /// Is `interrupt` active or pre-empted and stacked - pub fn is_active<I>(&self, interrupt: I) -> bool + pub fn is_active<I>(interrupt: I) -> bool where I: Nr, { let nr = interrupt.nr(); let mask = 1 << (nr % 32); - (self.iabr[usize::from(nr / 32)].read() & mask) == mask + // NOTE(unsafe) atomic read with no side effects + unsafe { ((*Self::ptr()).iabr[usize::from(nr / 32)].read() & mask) == mask } } /// Checks if `interrupt` is enabled - pub fn is_enabled<I>(&self, interrupt: I) -> bool + pub fn is_enabled<I>(interrupt: I) -> bool where I: Nr, { let nr = interrupt.nr(); let mask = 1 << (nr % 32); - (self.iser[usize::from(nr / 32)].read() & mask) == mask + // NOTE(unsafe) atomic read with no side effects + unsafe { ((*Self::ptr()).iser[usize::from(nr / 32)].read() & mask) == mask } } /// Checks if `interrupt` is pending - pub fn is_pending<I>(&self, interrupt: I) -> bool + pub fn is_pending<I>(interrupt: I) -> bool where I: Nr, { let nr = interrupt.nr(); let mask = 1 << (nr % 32); - (self.ispr[usize::from(nr / 32)].read() & mask) == mask + // NOTE(unsafe) atomic read with no side effects + unsafe { ((*Self::ptr()).ispr[usize::from(nr / 32)].read() & mask) == mask } } /// Forces `interrupt` into pending state - pub fn set_pending<I>(&self, interrupt: I) + pub fn set_pending<I>(&mut self, interrupt: I) where I: Nr, { @@ -151,15 +157,12 @@ impl RegisterBlock { /// Sets the "priority" of `interrupt` to `prio` /// - /// NOTE See `get_priority` method for an explanation of how NVIC priorities - /// work. + /// *NOTE* See [`get_priority`](struct.NVIC.html#method.get_priority) method for an explanation + /// of how NVIC priorities work. /// - /// On ARMv6-M, updating an interrupt priority requires a read-modify-write - /// operation, which is not atomic. This is inherently racy, so please - /// ensure proper access to this method. - /// - /// On ARMv7-M, this method is atomic. - pub unsafe fn set_priority<I>(&self, interrupt: I, prio: u8) + /// On ARMv6-M, updating an interrupt priority requires a read-modify-write operation. On + /// ARMv7-M, the operation is performed in a single atomic write operation. + pub unsafe fn set_priority<I>(&mut self, interrupt: I, prio: u8) where I: Nr, { @@ -181,12 +184,18 @@ impl RegisterBlock { } #[cfg(armv6m)] - fn ipr_index<I>(interrupt: &I) -> usize where I: Nr { + fn ipr_index<I>(interrupt: &I) -> usize + where + I: Nr, + { usize::from(interrupt.nr()) / 4 } #[cfg(armv6m)] - fn ipr_shift<I>(interrupt: &I) -> usize where I: Nr { + fn ipr_shift<I>(interrupt: &I) -> usize + where + I: Nr, + { (usize::from(interrupt.nr()) % 4) * 8 } } diff --git a/src/peripheral/scb.rs b/src/peripheral/scb.rs index 2a986182b280e3180cabe85f81bb774e14473646..9a922c76700ae0be4d38a7e99d9d3e344026802a 100644 --- a/src/peripheral/scb.rs +++ b/src/peripheral/scb.rs @@ -2,10 +2,12 @@ use volatile_register::RW; +#[cfg(any(armv7m, has_fpu))] +use super::{CBP, SCB}; #[cfg(armv7m)] -use super::CBP; +use super::CPUID; #[cfg(armv7m)] -use super::cpuid::{self, CsselrCacheType}; +use super::cpuid::CsselrCacheType; /// Register block #[repr(C)] @@ -64,10 +66,22 @@ mod fpu_consts { use self::fpu_consts::*; #[cfg(has_fpu)] -impl RegisterBlock { +impl SCB { + /// Shorthand for `set_fpu_access_mode(FpuAccessMode::Disabled)` + pub fn disable_fpu(&mut self) { + self.set_fpu_access_mode(FpuAccessMode::Disabled) + } + + /// Shorthand for `set_fpu_access_mode(FpuAccessMode::Enabled)` + pub fn enable_fpu(&mut self) { + self.set_fpu_access_mode(FpuAccessMode::Enabled) + } + /// Gets FPU access mode - pub fn fpu_access_mode(&self) -> FpuAccessMode { - let cpacr = self.cpacr.read(); + pub fn fpu_access_mode() -> FpuAccessMode { + // NOTE(unsafe) atomic read operation with no side effects + let cpacr = unsafe { (*Self::ptr()).cpacr.read() }; + if cpacr & SCB_CPACR_FPU_MASK == SCB_CPACR_FPU_ENABLE | SCB_CPACR_FPU_USER { FpuAccessMode::Enabled } else if cpacr & SCB_CPACR_FPU_MASK == SCB_CPACR_FPU_ENABLE { @@ -83,7 +97,7 @@ impl RegisterBlock { /// floating-point arguments or have any floating-point local variables. Because the compiler /// might inline such a function into a caller that does have floating-point arguments or /// variables, any such function must be also marked #[inline(never)]. - pub fn set_fpu_access_mode(&self, mode: FpuAccessMode) { + pub fn set_fpu_access_mode(&mut self, mode: FpuAccessMode) { let mut cpacr = self.cpacr.read() & !SCB_CPACR_FPU_MASK; match mode { FpuAccessMode::Disabled => (), @@ -92,16 +106,6 @@ impl RegisterBlock { } unsafe { self.cpacr.write(cpacr) } } - - /// Shorthand for `set_fpu_access_mode(FpuAccessMode::Enabled)` - pub fn enable_fpu(&self) { - self.set_fpu_access_mode(FpuAccessMode::Enabled) - } - - /// Shorthand for `set_fpu_access_mode(FpuAccessMode::Disabled)` - pub fn disable_fpu(&self) { - self.set_fpu_access_mode(FpuAccessMode::Disabled) - } } #[cfg(armv7m)] @@ -114,17 +118,17 @@ mod scb_consts { use self::scb_consts::*; #[cfg(armv7m)] -impl RegisterBlock { +impl SCB { /// Enables I-Cache if currently disabled #[inline] - pub fn enable_icache(&self) { + pub fn enable_icache(&mut self) { // Don't do anything if ICache is already enabled - if self.icache_enabled() { + if Self::icache_enabled() { return; } - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &*CBP::ptr() }; + // NOTE(unsafe) All CBP registers are write-only and stateless + let mut cbp = unsafe { CBP::new() }; // Invalidate I-Cache cbp.iciallu(); @@ -138,14 +142,14 @@ impl RegisterBlock { /// Disables I-Cache if currently enabled #[inline] - pub fn disable_icache(&self) { + pub fn disable_icache(&mut self) { // Don't do anything if ICache is already disabled - if !self.icache_enabled() { + if !Self::icache_enabled() { return; } - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &*CBP::ptr() }; + // NOTE(unsafe) All CBP registers are write-only and stateless + let mut cbp = unsafe { CBP::new() }; // Disable I-Cache unsafe { self.ccr.modify(|r| r & !SCB_CCR_IC_MASK) }; @@ -159,17 +163,19 @@ impl RegisterBlock { /// Returns whether the I-Cache is currently enabled #[inline] - pub fn icache_enabled(&self) -> bool { + pub fn icache_enabled() -> bool { ::asm::dsb(); ::asm::isb(); - self.ccr.read() & SCB_CCR_IC_MASK == SCB_CCR_IC_MASK + + // NOTE(unsafe) atomic read with no side effects + unsafe { (*Self::ptr()).ccr.read() & SCB_CCR_IC_MASK == SCB_CCR_IC_MASK } } /// Invalidates I-Cache #[inline] - pub fn invalidate_icache(&self) { - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &*CBP::ptr() }; + pub fn invalidate_icache(&mut self) { + // NOTE(unsafe) All CBP registers are write-only and stateless + let mut cbp = unsafe { CBP::new() }; // Invalidate I-Cache cbp.iciallu(); @@ -180,9 +186,9 @@ impl RegisterBlock { /// Enables D-cache if currently disabled #[inline] - pub fn enable_dcache(&self, cpuid: &cpuid::RegisterBlock) { + pub fn enable_dcache(&mut self, cpuid: &mut CPUID) { // Don't do anything if DCache is already enabled - if self.dcache_enabled() { + if Self::dcache_enabled() { return; } @@ -198,9 +204,9 @@ impl RegisterBlock { /// Disables D-cache if currently enabled #[inline] - pub fn disable_dcache(&self, cpuid: &cpuid::RegisterBlock) { + pub fn disable_dcache(&mut self, cpuid: &mut CPUID) { // Don't do anything if DCache is already disabled - if !self.dcache_enabled() { + if !Self::dcache_enabled() { return; } @@ -213,10 +219,12 @@ impl RegisterBlock { /// Returns whether the D-Cache is currently enabled #[inline] - pub fn dcache_enabled(&self) -> bool { + pub fn dcache_enabled() -> bool { ::asm::dsb(); ::asm::isb(); - self.ccr.read() & SCB_CCR_DC_MASK == SCB_CCR_DC_MASK + + // NOTE(unsafe) atomic read with no side effects + unsafe { (*Self::ptr()).ccr.read() & SCB_CCR_DC_MASK == SCB_CCR_DC_MASK } } /// Invalidates D-cache @@ -225,9 +233,9 @@ impl RegisterBlock { /// stack, depending on optimisations, breaking returning to the call point. /// It's used immediately before enabling the dcache, but not exported publicly. #[inline] - fn invalidate_dcache(&self, cpuid: &cpuid::RegisterBlock) { - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &*CBP::ptr() }; + fn invalidate_dcache(&mut self, cpuid: &mut CPUID) { + // NOTE(unsafe) All CBP registers are write-only and stateless + let mut cbp = unsafe { CBP::new() }; // Read number of sets and ways let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); @@ -245,9 +253,9 @@ impl RegisterBlock { /// Cleans D-cache #[inline] - pub fn clean_dcache(&self, cpuid: &cpuid::RegisterBlock) { - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &*CBP::ptr() }; + pub fn clean_dcache(&mut self, cpuid: &mut CPUID) { + // NOTE(unsafe) All CBP registers are write-only and stateless + let mut cbp = unsafe { CBP::new() }; // Read number of sets and ways let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); @@ -264,9 +272,9 @@ impl RegisterBlock { /// Cleans and invalidates D-cache #[inline] - pub fn clean_invalidate_dcache(&self, cpuid: &cpuid::RegisterBlock) { - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &*CBP::ptr() }; + pub fn clean_invalidate_dcache(&mut self, cpuid: &mut CPUID) { + // NOTE(unsafe) All CBP registers are write-only and stateless + let mut cbp = unsafe { CBP::new() }; // Read number of sets and ways let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); @@ -289,14 +297,14 @@ impl RegisterBlock { /// Invalidates cache starting from the lowest 32-byte aligned address represented by `addr`, /// in blocks of 32 bytes until at least `size` bytes have been invalidated. #[inline] - pub fn invalidate_dcache_by_address(&self, addr: usize, size: usize) { + pub fn invalidate_dcache_by_address(&mut self, addr: usize, size: usize) { // No-op zero sized operations if size == 0 { return; } - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &*CBP::ptr() }; + // NOTE(unsafe) All CBP registers are write-only and stateless + let mut cbp = unsafe { CBP::new() }; ::asm::dsb(); @@ -323,14 +331,14 @@ impl RegisterBlock { /// Cleans cache starting from the lowest 32-byte aligned address represented by `addr`, /// in blocks of 32 bytes until at least `size` bytes have been cleaned. #[inline] - pub fn clean_dcache_by_address(&self, addr: usize, size: usize) { + pub fn clean_dcache_by_address(&mut self, addr: usize, size: usize) { // No-op zero sized operations if size == 0 { return; } - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &*CBP::ptr() }; + // NOTE(unsafe) All CBP registers are write-only and stateless + let mut cbp = unsafe { CBP::new() }; ::asm::dsb(); @@ -358,14 +366,14 @@ impl RegisterBlock { /// by `addr`, in blocks of 32 bytes until at least `size` bytes have been cleaned and /// invalidated. #[inline] - pub fn clean_invalidate_dcache_by_address(&self, addr: usize, size: usize) { + pub fn clean_invalidate_dcache_by_address(&mut self, addr: usize, size: usize) { // No-op zero sized operations if size == 0 { return; } - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &*CBP::ptr() }; + // NOTE(unsafe) All CBP registers are write-only and stateless + let mut cbp = unsafe { CBP::new() }; ::asm::dsb(); diff --git a/src/peripheral/syst.rs b/src/peripheral/syst.rs index 3f96208629cf72ee45665b9153e25212c1925186..e02275d35a32909c7b40a8857a71ea89b0e83faf 100644 --- a/src/peripheral/syst.rs +++ b/src/peripheral/syst.rs @@ -2,6 +2,8 @@ use volatile_register::{RO, RW}; +use peripheral::SYST; + /// Register block #[repr(C)] pub struct RegisterBlock { @@ -34,39 +36,41 @@ const SYST_CSR_COUNTFLAG: u32 = 1 << 16; const SYST_CALIB_SKEW: u32 = 1 << 30; const SYST_CALIB_NOREF: u32 = 1 << 31; -impl RegisterBlock { - /// Checks if counter is enabled - pub fn is_counter_enabled(&self) -> bool { - self.csr.read() & SYST_CSR_ENABLE != 0 - } - - /// Enables counter - pub fn enable_counter(&self) { - unsafe { self.csr.modify(|v| v | SYST_CSR_ENABLE) } +impl SYST { + /// Clears current value to 0 + /// + /// After calling `clear_current()`, the next call to `has_wrapped()` + /// will return `false`. + pub fn clear_current(&mut self) { + unsafe { self.cvr.write(0) } } /// Disables counter - pub fn disable_counter(&self) { + pub fn disable_counter(&mut self) { unsafe { self.csr.modify(|v| v & !SYST_CSR_ENABLE) } } - /// Checks if SysTick interrupt is enabled - pub fn is_interrupt_enabled(&self) -> bool { - self.csr.read() & SYST_CSR_TICKINT != 0 + /// Disables SysTick interrupt + pub fn disable_interrupt(&mut self) { + unsafe { self.csr.modify(|v| v & !SYST_CSR_TICKINT) } } - /// Enables SysTick interrupt - pub fn enable_interrupt(&self) { - unsafe { self.csr.modify(|v| v | SYST_CSR_TICKINT) } + /// Enables counter + pub fn enable_counter(&mut self) { + unsafe { self.csr.modify(|v| v | SYST_CSR_ENABLE) } } - /// Disables SysTick interrupt - pub fn disable_interrupt(&self) { - unsafe { self.csr.modify(|v| v & !SYST_CSR_TICKINT) } + /// Enables SysTick interrupt + pub fn enable_interrupt(&mut self) { + unsafe { self.csr.modify(|v| v | SYST_CSR_TICKINT) } } /// Gets clock source - pub fn get_clock_source(&self) -> SystClkSource { + /// + /// *NOTE* This takes `&mut self` because the read operation is side effectful and can clear the + /// bit that indicates that the timer has wrapped (cf. `SYST.has_wrapped`) + pub fn get_clock_source(&mut self) -> SystClkSource { + // NOTE(unsafe) atomic read with no side effects let clk_source_bit = self.csr.read() & SYST_CSR_CLKSOURCE != 0; match clk_source_bit { false => SystClkSource::External, @@ -74,51 +78,56 @@ impl RegisterBlock { } } - /// Sets clock source - pub fn set_clock_source(&self, clk_source: SystClkSource) { - match clk_source { - SystClkSource::External => unsafe { self.csr.modify(|v| v & !SYST_CSR_CLKSOURCE) }, - SystClkSource::Core => unsafe { self.csr.modify(|v| v | SYST_CSR_CLKSOURCE) }, - } - } - - /// Checks if the counter wrapped (underflowed) since the last check - pub fn has_wrapped(&self) -> bool { - self.csr.read() & SYST_CSR_COUNTFLAG != 0 + /// Gets current value + pub fn get_current() -> u32 { + // NOTE(unsafe) atomic read with no side effects + unsafe { (*Self::ptr()).cvr.read() } } /// Gets reload value - pub fn get_reload(&self) -> u32 { - self.rvr.read() + pub fn get_reload() -> u32 { + // NOTE(unsafe) atomic read with no side effects + unsafe { (*Self::ptr()).rvr.read() } } - /// Sets reload value + /// Returns the reload value with which the counter would wrap once per 10 + /// ms /// - /// Valid values are between `1` and `0x00ffffff`. - pub fn set_reload(&self, value: u32) { - unsafe { self.rvr.write(value) } + /// Returns `0` if the value is not known (e.g. because the clock can + /// change dynamically). + pub fn get_ticks_per_10ms() -> u32 { + // NOTE(unsafe) atomic read with no side effects + unsafe { (*Self::ptr()).calib.read() & SYST_COUNTER_MASK } } - /// Gets current value - pub fn get_current(&self) -> u32 { - self.cvr.read() + /// Checks if an external reference clock is available + pub fn has_reference_clock() -> bool { + // NOTE(unsafe) atomic read with no side effects + unsafe { (*Self::ptr()).calib.read() & SYST_CALIB_NOREF == 0 } } - /// Clears current value to 0 + /// Checks if the counter wrapped (underflowed) since the last check /// - /// After calling `clear_current()`, the next call to `has_wrapped()` - /// will return `false`. - pub fn clear_current(&self) { - unsafe { self.cvr.write(0) } + /// *NOTE* This takes `&mut self` because the read operation is side effectful and will clear + /// the bit of the read register. + pub fn has_wrapped(&mut self) -> bool { + self.csr.read() & SYST_CSR_COUNTFLAG != 0 } - /// Returns the reload value with which the counter would wrap once per 10 - /// ms + /// Checks if counter is enabled /// - /// Returns `0` if the value is not known (e.g. because the clock can - /// change dynamically). - pub fn get_ticks_per_10ms(&self) -> u32 { - self.calib.read() & SYST_COUNTER_MASK + /// *NOTE* This takes `&mut self` because the read operation is side effectful and can clear the + /// bit that indicates that the timer has wrapped (cf. `SYST.has_wrapped`) + pub fn is_counter_enabled(&mut self) -> bool { + self.csr.read() & SYST_CSR_ENABLE != 0 + } + + /// Checks if SysTick interrupt is enabled + /// + /// *NOTE* This takes `&mut self` because the read operation is side effectful and can clear the + /// bit that indicates that the timer has wrapped (cf. `SYST.has_wrapped`) + pub fn is_interrupt_enabled(&mut self) -> bool { + self.csr.read() & SYST_CSR_TICKINT != 0 } /// Checks if the calibration value is precise @@ -126,12 +135,26 @@ impl RegisterBlock { /// Returns `false` if using the reload value returned by /// `get_ticks_per_10ms()` may result in a period significantly deviating /// from 10 ms. - pub fn is_precise(&self) -> bool { - self.calib.read() & SYST_CALIB_SKEW == 0 + pub fn is_precise() -> bool { + // NOTE(unsafe) atomic read with no side effects + unsafe { (*Self::ptr()).calib.read() & SYST_CALIB_SKEW == 0 } } - /// Checks if an external reference clock is available - pub fn has_reference_clock(&self) -> bool { - self.calib.read() & SYST_CALIB_NOREF == 0 + /// Sets clock source + pub fn set_clock_source(&mut self, clk_source: SystClkSource) { + match clk_source { + SystClkSource::External => unsafe { + self.csr.modify(|v| v & !SYST_CSR_CLKSOURCE) + }, + SystClkSource::Core => unsafe { self.csr.modify(|v| v | SYST_CSR_CLKSOURCE) }, + } } + + /// Sets reload value + /// + /// Valid values are between `1` and `0x00ffffff`. + pub fn set_reload(&mut self, value: u32) { + unsafe { self.rvr.write(value) } + } + } diff --git a/src/peripheral/test.rs b/src/peripheral/test.rs index d50ece28554f394b75167464127907badc4da226..39f7de5aabed51534594911b8e2d6c2bea6ce2ac 100644 --- a/src/peripheral/test.rs +++ b/src/peripheral/test.rs @@ -129,7 +129,6 @@ fn scb() { assert_eq!(address(&scb.bfar), 0xE000_ED38); assert_eq!(address(&scb.afsr), 0xE000_ED3C); assert_eq!(address(&scb.cpacr), 0xE000_ED88); - } #[test] @@ -140,7 +139,6 @@ fn syst() { assert_eq!(address(&syst.rvr), 0xE000_E014); assert_eq!(address(&syst.cvr), 0xE000_E018); assert_eq!(address(&syst.calib), 0xE000_E01C); - } #[test]