Skip to main content
Sign in
Snippets Groups Projects
Commit 37c8b5b6 authored by Jorge Aparicio's avatar Jorge Aparicio
Browse files

create our own AtomicUsize

which works on thumbv6m-none-eabi and probably other targets with max-atomic-width = 0
parent 978f0ee2
No related branches found
No related tags found
No related merge requests found
......@@ -144,11 +144,10 @@
//! is_send::<Vec<NotSend, [NotSend; 4]>>();
//! ```
#![cfg_attr(not(target_has_atomic = "ptr"), feature(asm))]
#![cfg_attr(target_has_atomic = "ptr", feature(const_atomic_usize_new))]
#![deny(missing_docs)]
#![feature(cfg_target_has_atomic)]
#![feature(const_fn)]
#![feature(const_unsafe_cell_new)]
#![feature(core_intrinsics)]
#![feature(shared)]
#![feature(unsize)]
#![no_std]
......
......
//! Ring buffer
use core::cell::UnsafeCell;
use core::marker::{PhantomData, Unsize};
use core::ptr;
#[cfg(target_has_atomic = "ptr")]
use core::sync::atomic::{AtomicUsize, Ordering};
use core::{intrinsics, ptr};
use untagged_option::UntaggedOption;
......@@ -13,6 +12,36 @@ pub use self::spsc::{Consumer, Producer};
mod spsc;
// AtomicUsize with no CAS operations that works on targets that have "no atomic support" according
// to their specification
struct AtomicUsize {
v: UnsafeCell<usize>,
}
impl AtomicUsize {
pub const fn new(v: usize) -> AtomicUsize {
AtomicUsize {
v: UnsafeCell::new(v),
}
}
pub fn get_mut(&mut self) -> &mut usize {
unsafe { &mut *self.v.get() }
}
pub fn load_acquire(&self) -> usize {
unsafe { intrinsics::atomic_load_acq(self.v.get()) }
}
pub fn load_relaxed(&self) -> usize {
unsafe { intrinsics::atomic_load_relaxed(self.v.get()) }
}
pub fn store_release(&self, val: usize) {
unsafe { intrinsics::atomic_store_rel(self.v.get(), val) }
}
}
/// An statically allocated ring buffer backed by an array `A`
pub struct RingBuffer<T, A>
where
......@@ -22,12 +51,10 @@ where
_marker: PhantomData<[T]>,
// this is from where we dequeue items
#[cfg(target_has_atomic = "ptr")] head: AtomicUsize,
#[cfg(not(target_has_atomic = "ptr"))] head: usize,
head: AtomicUsize,
// this is where we enqueue new items
#[cfg(target_has_atomic = "ptr")] tail: AtomicUsize,
#[cfg(not(target_has_atomic = "ptr"))] tail: usize,
tail: AtomicUsize,
buffer: UntaggedOption<A>,
}
......@@ -42,14 +69,8 @@ where
RingBuffer {
_marker: PhantomData,
buffer: UntaggedOption::none(),
#[cfg(target_has_atomic = "ptr")]
head: AtomicUsize::new(0),
#[cfg(not(target_has_atomic = "ptr"))]
head: 0,
#[cfg(target_has_atomic = "ptr")]
tail: AtomicUsize::new(0),
#[cfg(not(target_has_atomic = "ptr"))]
tail: 0,
}
}
......@@ -63,15 +84,8 @@ where
pub fn dequeue(&mut self) -> Option<T> {
let n = self.capacity() + 1;
#[cfg(target_has_atomic = "ptr")]
let head = self.head.get_mut();
#[cfg(not(target_has_atomic = "ptr"))]
let head = &mut self.head;
#[cfg(target_has_atomic = "ptr")]
let tail = self.tail.get_mut();
#[cfg(not(target_has_atomic = "ptr"))]
let tail = &mut self.tail;
let buffer: &[T] = unsafe { self.buffer.as_ref() };
......@@ -90,15 +104,8 @@ where
pub fn enqueue(&mut self, item: T) -> Result<(), BufferFullError> {
let n = self.capacity() + 1;
#[cfg(target_has_atomic = "ptr")]
let head = self.head.get_mut();
#[cfg(not(target_has_atomic = "ptr"))]
let head = &mut self.head;
#[cfg(target_has_atomic = "ptr")]
let tail = self.tail.get_mut();
#[cfg(not(target_has_atomic = "ptr"))]
let tail = &mut self.tail;
let buffer: &mut [T] = unsafe { self.buffer.as_mut() };
......@@ -116,15 +123,8 @@ where
/// Returns the number of elements in the queue
pub fn len(&self) -> usize {
#[cfg(target_has_atomic = "ptr")]
let head = self.head.load(Ordering::Relaxed);
#[cfg(not(target_has_atomic = "ptr"))]
let head = self.head;
#[cfg(target_has_atomic = "ptr")]
let tail = self.tail.load(Ordering::Relaxed);
#[cfg(not(target_has_atomic = "ptr"))]
let tail = self.tail;
let head = self.head.load_relaxed();
let tail = self.tail.load_relaxed();
if head > tail {
head - tail
......@@ -221,10 +221,7 @@ where
fn next(&mut self) -> Option<&'a T> {
if self.index < self.len {
#[cfg(not(target_has_atomic = "ptr"))]
let head = self.rb.head;
#[cfg(target_has_atomic = "ptr")]
let head = self.rb.head.load(Ordering::Relaxed);
let head = self.rb.head.load_relaxed();
let buffer: &[T] = unsafe { self.rb.buffer.as_ref() };
let ptr = buffer.as_ptr();
......@@ -246,10 +243,7 @@ where
fn next(&mut self) -> Option<&'a mut T> {
if self.index < self.len {
#[cfg(not(target_has_atomic = "ptr"))]
let head = self.rb.head;
#[cfg(target_has_atomic = "ptr")]
let head = self.rb.head.load(Ordering::Relaxed);
let head = self.rb.head.load_relaxed();
let capacity = self.rb.capacity() + 1;
let buffer: &mut [T] = unsafe { self.rb.buffer.as_mut() };
......
......
use core::ptr::{self, Shared};
use core::marker::Unsize;
#[cfg(target_has_atomic = "ptr")]
use core::sync::atomic::Ordering;
use BufferFullError;
use ring_buffer::RingBuffer;
// Compiler barrier
#[cfg(not(target_has_atomic = "ptr"))]
macro_rules! barrier {
() => {
unsafe { asm!("" ::: "memory") }
}
}
impl<T, A> RingBuffer<T, A>
where
A: Unsize<[T]>,
{
/// Splits a statically allocated ring buffer into producer and consumer end points
///
/// **Warning** the current single producer single consumer implementation only supports
/// multi-core systems where `cfg(target_has_atomic = "ptr")` holds for all the cores. For
/// example, a dual core system where one core is Cortex-M0 core and the other is Cortex-M3 core
/// is not supported because Cortex-M0 (`thumbv6m-none-eabi`) doesn't satisfy
/// `cfg(target_has_atomic = "ptr")`. All single core systems are supported.
pub fn split(&'static mut self) -> (Producer<T, A>, Consumer<T, A>) {
(
Producer {
......@@ -52,44 +36,17 @@ where
A: Unsize<[T]>,
{
/// Returns the item in the front of the queue, or `None` if the queue is empty
#[cfg(target_has_atomic = "ptr")]
pub fn dequeue(&mut self) -> Option<T> {
let rb = unsafe { self.rb.as_ref() };
let tail = rb.tail.load(Ordering::Relaxed);
let head = rb.head.load(Ordering::Acquire);
let n = rb.capacity() + 1;
let buffer: &[T] = unsafe { rb.buffer.as_ref() };
let tail = rb.tail.load_relaxed();
let head = rb.head.load_acquire();
if head != tail {
let item = unsafe { ptr::read(buffer.get_unchecked(head)) };
rb.head.store((head + 1) % n, Ordering::Release);
Some(item)
} else {
None
}
}
/// Returns the item in the front of the queue, or `None` if the queue is empty
#[cfg(not(target_has_atomic = "ptr"))]
pub fn dequeue(&mut self) -> Option<T> {
let rb = unsafe { self.rb.as_mut() };
let n = rb.capacity() + 1;
let buffer: &[T] = unsafe { rb.buffer.as_ref() };
// NOTE(volatile) the value of `tail` can change at any time in the execution context of the
// consumer so we inform this to the compiler using a volatile load
if rb.head != unsafe { ptr::read_volatile(&rb.tail) } {
let item = unsafe { ptr::read(buffer.get_unchecked(rb.head)) };
// NOTE(barrier!) this ensures that the compiler won't place the instructions to read
// the data *before* the instructions to increment the `head` pointer -- note that this
// won't be enough on architectures that allow out of order execution
barrier!();
rb.head = (rb.head + 1) % n;
rb.head.store_release((head + 1) % n);
Some(item)
} else {
None
......@@ -121,50 +78,20 @@ where
/// Adds an `item` to the end of the queue
///
/// Returns `BufferFullError` if the queue is full
#[cfg(target_has_atomic = "ptr")]
pub fn enqueue(&mut self, item: T) -> Result<(), BufferFullError> {
let rb = unsafe { self.rb.as_mut() };
let head = rb.head.load(Ordering::Relaxed);
let tail = rb.tail.load(Ordering::Acquire);
let n = rb.capacity() + 1;
let next_tail = (tail + 1) % n;
let buffer: &mut [T] = unsafe { rb.buffer.as_mut() };
let head = rb.head.load_relaxed();
let tail = rb.tail.load_acquire();
let next_tail = (tail + 1) % n;
if next_tail != head {
// NOTE(ptr::write) the memory slot that we are about to write to is uninitialized. We
// use `ptr::write` to avoid running `T`'s destructor on the uninitialized memory
unsafe { ptr::write(buffer.get_unchecked_mut(tail), item) }
rb.tail.store(next_tail, Ordering::Release);
Ok(())
} else {
Err(BufferFullError)
}
}
/// Adds an `item` to the end of the queue
///
/// Returns `BufferFullError` if the queue is full
#[cfg(not(target_has_atomic = "ptr"))]
pub fn enqueue(&mut self, item: T) -> Result<(), BufferFullError> {
let rb = unsafe { self.rb.as_mut() };
let n = rb.capacity() + 1;
let buffer: &mut [T] = unsafe { rb.buffer.as_mut() };
let next_tail = (rb.tail + 1) % n;
// NOTE(volatile) the value of `head` can change at any time in the execution context of the
// producer so we inform this to the compiler using a volatile load
if next_tail != unsafe { ptr::read_volatile(&rb.head) } {
// NOTE(ptr::write) see the other `enqueue` implementation above for details
unsafe { ptr::write(buffer.get_unchecked_mut(rb.tail), item) }
// NOTE(barrier!) see the NOTE(barrier!) above
barrier!();
rb.tail = next_tail;
rb.tail.store_release(next_tail);
Ok(())
} else {
Err(BufferFullError)
......
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment