diff --git a/.gitignore b/.gitignore
index 6dc3db1ac0e732ed9e171f6cfb0a88469439cf30..e64a511ae871765faaa2b5859a83d4e31e975b60 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
 **/*.rs.bk
+.#*
 Cargo.lock
 target/
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2e740cfad0ae837823fdf62021aea3a5e726e33f
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,51 @@
+language: rust
+
+matrix:
+  include:
+    - env: TARGET=x86_64-unknown-linux-gnu
+      rust: nightly
+
+    - env: TARGET=thumbv6m-none-eabi
+      rust: nightly
+      addons:
+        apt:
+          sources:
+            - debian-sid
+          packages:
+            - binutils-arm-none-eabi
+
+    - env: TARGET=thumbv7m-none-eabi
+      rust: nightly
+      addons:
+        apt:
+          sources:
+            - debian-sid
+          packages:
+            - binutils-arm-none-eabi
+
+before_install: set -e
+
+install:
+  - bash ci/install.sh
+
+script:
+  - bash ci/script.sh
+
+after_script: set +e
+
+cache: cargo
+before_cache:
+  # Travis can't cache files that are not readable by "others"
+  - chmod -R a+r $HOME/.cargo
+
+branches:
+  only:
+    # release tags
+    - /^v\d+\.\d+\.\d+.*$/
+    - auto
+    - master
+    - try
+
+notifications:
+  email:
+    on_success: never
diff --git a/Cargo.toml b/Cargo.toml
index 34b3177cd989a546eb07aad3ed44fe19b717dbb5..13b4ab795a7a857400a51d97e63d8c8e71b604e3 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,12 +1,19 @@
 [package]
 authors = ["Jorge Aparicio <jorge@japaric.io>"]
-categories = ["data-structures", "no-std"]
+categories = [
+    "data-structures",
+    "no-std",
+]
 description = "`static` friendly data structures that don't require dynamic memory allocation"
 documentation = "https://docs.rs/heapless"
-keywords = ["static", "no-heap"]
+keywords = [
+    "static",
+    "no-heap",
+]
 license = "MIT OR Apache-2.0"
 name = "heapless"
 repository = "https://github.com/japaric/heapless"
-version = "0.1.0"
+version = "0.2.0"
 
 [dependencies]
+untagged-option = "0.1.1"
diff --git a/ci/install.sh b/ci/install.sh
new file mode 100644
index 0000000000000000000000000000000000000000..4d5d56a45bb9563ed35d06665575322b4c392172
--- /dev/null
+++ b/ci/install.sh
@@ -0,0 +1,19 @@
+set -euxo pipefail
+
+main() {
+    case $TARGET in
+        thumb*m-none-eabi)
+            local vers=0.3.8
+
+            cargo install --list | grep "xargo v$vers" || \
+                ( cd .. && cargo install xargo -f --vers $vers )
+
+            rustup component list | grep 'rust-src.*installed' || \
+                rustup component add rust-src
+            ;;
+        *)
+            ;;
+    esac
+}
+
+main
diff --git a/ci/script.sh b/ci/script.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e9ff7e38b9f350574d31a079eeb684e88a3f6d6c
--- /dev/null
+++ b/ci/script.sh
@@ -0,0 +1,12 @@
+set -euxo pipefail
+
+main() {
+    case $TARGET in
+        thumb*m-none-eabi)
+            xargo check --target $TARGET
+            ;;
+        *)
+            cargo check --target $TARGET
+            ;;
+    esac
+}
diff --git a/src/lib.rs b/src/lib.rs
index 723ea4a83b6da6bd5ee80b352abdd3a2a67d2a27..c6228e5357c2c0386b28c573bd0c9c0e79c4a328 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,159 +1,163 @@
 //! `static` friendly data structures that don't require dynamic memory
 //! allocation
+//!
+//! # Examples
+//!
+//! ## `Vec`
+//!
+//! ```
+//! use heapless::Vec;
+//!
+//! let mut xs: Vec<u8, [u8; 4]> = Vec::new();
+//!
+//! assert!(xs.push(0).is_ok());
+//! assert!(xs.push(1).is_ok());
+//! assert!(xs.push(2).is_ok());
+//! assert!(xs.push(3).is_ok());
+//! assert!(xs.push(4).is_err()); // full
+//!
+//! assert_eq!(xs.pop(), Some(3));
+//! ```
+//!
+//! ## `RingBuffer`
+//!
+//! ```
+//! use heapless::RingBuffer;
+//!
+//! let mut rb: RingBuffer<u8, [u8; 4]> = RingBuffer::new();
+//!
+//! assert!(rb.enqueue(0).is_ok());
+//! assert!(rb.enqueue(1).is_ok());
+//! assert!(rb.enqueue(2).is_ok());
+//! assert!(rb.enqueue(3).is_err()); // full
+//!
+//! assert_eq!(rb.dequeue(), Some(0));
+//! ```
+//!
+//! ### Single producer single consumer mode
+//!
+//! For use in *single core* systems like microcontrollers
+//!
+//! ```
+//! use heapless::RingBuffer;
+//!
+//! static mut RB: RingBuffer<Event, [Event; 4]> = RingBuffer::new();
+//!
+//! enum Event { A, B }
+//!
+//! fn main() {
+//!     // NOTE(unsafe) beware of aliasing the `consumer` end point
+//!     let mut consumer = unsafe { RB.split().1 };
+//!
+//!     loop {
+//!         // `dequeue` is a lockless operation
+//!         match consumer.dequeue() {
+//!             Some(Event::A) => { /* .. */ },
+//!             Some(Event::B) => { /* .. */ },
+//!             None => { /* sleep */},
+//!         }
+//! #       break
+//!     }
+//! }
+//!
+//! // this is a different execution context that can preempt `main`
+//! fn interrupt_handler() {
+//!     // NOTE(unsafe) beware of aliasing the `producer` end point
+//!     let mut producer = unsafe { RB.split().0 };
+//! #   let condition = true;
+//!
+//!     // ..
+//!
+//!     if condition {
+//!         producer.enqueue(Event::A).unwrap();
+//!     } else {
+//!         producer.enqueue(Event::B).unwrap();
+//!     }
+//!
+//!     // ..
+//! }
+//! ```
+//!
+//! # `Send`-ness
+//!
+//! Collections of `Send`-able things are `Send`
+//!
+//! ```
+//! use heapless::{RingBuffer, Vec};
+//! use heapless::ring_buffer::{Consumer, Producer};
+//!
+//! struct IsSend;
+//!
+//! unsafe impl Send for IsSend {}
+//!
+//! fn is_send<T>() where T: Send {}
+//!
+//! is_send::<Consumer<IsSend, [IsSend; 4]>>();
+//! is_send::<Producer<IsSend, [IsSend; 4]>>();
+//! is_send::<RingBuffer<IsSend, [IsSend; 4]>>();
+//! is_send::<Vec<IsSend, [IsSend; 4]>>();
+//! ```
+//!
+//! Collections of not `Send`-able things are *not* `Send`
+//!
+//! ``` compile_fail
+//! use std::marker::PhantomData;
+//! use heapless::ring_buffer::Consumer;
+//!
+//! type NotSend = PhantomData<*const ()>;
+//!
+//! fn is_send<T>() where T: Send {}
+//!
+//! is_send::<Consumer<NotSend, [NotSend; 4]>>();
+//! ```
+//!
+//! ``` compile_fail
+//! use std::marker::PhantomData;
+//! use heapless::ring_buffer::Producer;
+//!
+//! type NotSend = PhantomData<*const ()>;
+//!
+//! fn is_send<T>() where T: Send {}
+//!
+//! is_send::<Producer<NotSend, [NotSend; 4]>>();
+//! ```
+//!
+//! ``` compile_fail
+//! use std::marker::PhantomData;
+//! use heapless::RingBuffer;
+//!
+//! type NotSend = PhantomData<*const ()>;
+//!
+//! fn is_send<T>() where T: Send {}
+//!
+//! is_send::<RingBuffer<NotSend, [NotSend; 4]>>();
+//! ```
+//!
+//! ``` compile_fail
+//! use std::marker::PhantomData;
+//! use heapless::Vec;
+//!
+//! type NotSend = PhantomData<*const ()>;
+//!
+//! fn is_send<T>() where T: Send {}
+//!
+//! is_send::<Vec<NotSend, [NotSend; 4]>>();
+//! ```
 
 #![deny(missing_docs)]
-#![deny(warnings)]
 #![feature(const_fn)]
+#![feature(shared)]
+#![feature(unsize)]
 #![no_std]
 
-use core::marker::PhantomData;
-use core::ops::Deref;
-use core::slice;
+extern crate untagged_option;
 
-/// A circular buffer
-pub struct CircularBuffer<T, A>
-where
-    A: AsMut<[T]> + AsRef<[T]>,
-    T: Copy,
-{
-    _marker: PhantomData<[T]>,
-    array: A,
-    index: usize,
-    len: usize,
-}
+pub use vec::Vec;
+pub use ring_buffer::RingBuffer;
 
-impl<T, A> CircularBuffer<T, A>
-where
-    A: AsMut<[T]> + AsRef<[T]>,
-    T: Copy,
-{
-    /// Creates a new empty circular buffer using `array` as backup storage
-    pub const fn new(array: A) -> Self {
-        CircularBuffer {
-            _marker: PhantomData,
-            array: array,
-            index: 0,
-            len: 0,
-        }
-    }
+pub mod ring_buffer;
+mod vec;
 
-    /// Returns the capacity of this buffer
-    pub fn capacity(&self) -> usize {
-        self.array.as_ref().len()
-    }
-
-    /// Pushes `elem`ent into the buffer
-    ///
-    /// This will overwrite an old value if the buffer is full
-    pub fn push(&mut self, elem: T) {
-        let slice = self.array.as_mut();
-        if self.len < slice.len() {
-            self.len += 1;
-        }
-
-        unsafe { *slice.as_mut_ptr().offset(self.index as isize) = elem };
-
-        self.index = (self.index + 1) % slice.len();
-    }
-}
-
-impl<T, A> Deref for CircularBuffer<T, A>
-where
-    A: AsMut<[T]> + AsRef<[T]>,
-    T: Copy,
-{
-    type Target = [T];
-
-    fn deref(&self) -> &[T] {
-        let slice = self.array.as_ref();
-
-        if self.len == slice.len() {
-            slice
-        } else {
-            unsafe { slice::from_raw_parts(slice.as_ptr(), self.len) }
-        }
-    }
-}
-
-/// A continuous, growable array type
-pub struct Vec<T, A>
-where
-    A: AsMut<[T]> + AsRef<[T]>,
-    T: Copy,
-{
-    _marker: PhantomData<[T]>,
-    array: A,
-    len: usize,
-}
-
-impl<T, A> Vec<T, A>
-where
-    A: AsMut<[T]> + AsRef<[T]>,
-    T: Copy,
-{
-    /// Creates a new vector using `array` as the backup storage
-    pub const fn new(array: A) -> Self {
-        Vec {
-            _marker: PhantomData,
-            array: array,
-            len: 0,
-        }
-    }
-
-    /// Returns the capacity of this vector
-    pub fn capacity(&self) -> usize {
-        self.array.as_ref().len()
-    }
-
-    /// Clears the vector, removing all values
-    pub fn clear(&mut self) {
-        self.len = 0;
-    }
-
-    /// Removes the last element from this vector and returns it, or `None` if
-    /// it's empty
-    pub fn pop(&mut self) -> Option<T> {
-        if self.len == 0 {
-            None
-        } else {
-            self.len -= 1;
-            unsafe {
-                Some(
-                    *self.array
-                         .as_mut()
-                         .as_mut_ptr()
-                         .offset(self.len as isize),
-                )
-            }
-        }
-    }
-
-    /// Appends an `elem`ent to the back of the collection
-    ///
-    /// This method returns `Err` if the vector is full
-    pub fn push(&mut self, elem: T) -> Result<(), ()> {
-        let slice = self.array.as_mut();
-
-        if self.len == slice.len() {
-            Err(())
-        } else {
-            unsafe {
-                *slice.as_mut_ptr().offset(self.len as isize) = elem;
-            }
-            self.len += 1;
-            Ok(())
-        }
-    }
-}
-
-impl<T, A> Deref for Vec<T, A>
-where
-    A: AsMut<[T]> + AsRef<[T]>,
-    T: Copy,
-{
-    type Target = [T];
-
-    fn deref(&self) -> &[T] {
-        unsafe { slice::from_raw_parts(self.array.as_ref().as_ptr(), self.len) }
-    }
-}
+/// Error raised when the buffer is full
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub struct BufferFullError;
diff --git a/src/ring_buffer/mod.rs b/src/ring_buffer/mod.rs
new file mode 100644
index 0000000000000000000000000000000000000000..0b4e0ff6e1b0143a532226448fffc8b3111e7318
--- /dev/null
+++ b/src/ring_buffer/mod.rs
@@ -0,0 +1,326 @@
+//! Ring buffer
+
+use core::marker::{PhantomData, Unsize};
+use core::ptr;
+
+use untagged_option::UntaggedOption;
+
+use BufferFullError;
+
+pub use self::spsc::{Consumer, Producer};
+
+mod spsc;
+
+/// An statically allocated ring buffer backed by an array `A`
+pub struct RingBuffer<T, A>
+where
+    // FIXME(rust-lang/rust#44580) use "const generics" instead of `Unsize`
+    A: Unsize<[T]>,
+{
+    _marker: PhantomData<[T]>,
+    buffer: UntaggedOption<A>,
+    // this is from where we dequeue items
+    head: usize,
+    // this is where we enqueue new items
+    tail: usize,
+}
+
+impl<T, A> RingBuffer<T, A>
+where
+    A: Unsize<[T]>,
+{
+    /// Creates an empty ring buffer with capacity equals to the length of the array `A` *minus
+    /// one*.
+    pub const fn new() -> Self {
+        RingBuffer {
+            _marker: PhantomData,
+            buffer: UntaggedOption::none(),
+            head: 0,
+            tail: 0,
+        }
+    }
+
+    /// Returns the maximum number of elements the ring buffer can hold
+    pub fn capacity(&self) -> usize {
+        let buffer: &[T] = unsafe { self.buffer.as_ref() };
+        buffer.len() - 1
+    }
+
+    /// Returns the item in the front of the queue, or `None` if the queue is empty
+    pub fn dequeue(&mut self) -> Option<T> {
+        let n = self.capacity() + 1;
+        let buffer: &[T] = unsafe { self.buffer.as_ref() };
+
+        if self.head != self.tail {
+            let item = unsafe { ptr::read(buffer.as_ptr().offset(self.head as isize)) };
+            self.head = (self.head + 1) % n;
+            Some(item)
+        } else {
+            None
+        }
+    }
+
+    /// Adds an `item` to the end of the queue
+    ///
+    /// Returns `BufferFullError` if the queue is full
+    pub fn enqueue(&mut self, item: T) -> Result<(), BufferFullError> {
+        let n = self.capacity() + 1;
+        let buffer: &mut [T] = unsafe { self.buffer.as_mut() };
+
+        let next_tail = (self.tail + 1) % n;
+        if next_tail != self.head {
+            // NOTE(ptr::write) the memory slot that we are about to write to is uninitialized. We
+            // use `ptr::write` to avoid running `T`'s destructor on the uninitialized memory
+            unsafe { ptr::write(buffer.as_mut_ptr().offset(self.tail as isize), item) }
+            self.tail = next_tail;
+            Ok(())
+        } else {
+            Err(BufferFullError)
+        }
+    }
+
+    /// Returns the number of elements in the queue
+    pub fn len(&self) -> usize {
+        if self.head > self.tail {
+            self.head - self.tail
+        } else {
+            self.tail - self.head
+        }
+    }
+
+    /// Iterates from the front of the queue to the back
+    pub fn iter(&self) -> Iter<T, A> {
+        Iter {
+            rb: self,
+            index: 0,
+            len: self.len(),
+        }
+    }
+
+    /// Returns an iterator that allows modifying each value.
+    pub fn iter_mut(&mut self) -> IterMut<T, A> {
+        let len = self.len();
+        IterMut {
+            rb: self,
+            index: 0,
+            len,
+        }
+    }
+}
+
+impl<T, A> Drop for RingBuffer<T, A>
+where
+    A: Unsize<[T]>,
+{
+    fn drop(&mut self) {
+        for item in self {
+            unsafe {
+                ptr::drop_in_place(item);
+            }
+        }
+    }
+}
+
+impl<'a, T, A> IntoIterator for &'a RingBuffer<T, A>
+where
+    A: Unsize<[T]>,
+{
+    type Item = &'a T;
+    type IntoIter = Iter<'a, T, A>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.iter()
+    }
+}
+
+impl<'a, T, A> IntoIterator for &'a mut RingBuffer<T, A>
+where
+    A: Unsize<[T]>,
+{
+    type Item = &'a mut T;
+    type IntoIter = IterMut<'a, T, A>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.iter_mut()
+    }
+}
+
+/// An iterator over a ring buffer items
+pub struct Iter<'a, T, A>
+where
+    A: Unsize<[T]> + 'a,
+    T: 'a,
+{
+    rb: &'a RingBuffer<T, A>,
+    index: usize,
+    len: usize,
+}
+
+/// A mutable iterator over a ring buffer items
+pub struct IterMut<'a, T, A>
+where
+    A: Unsize<[T]> + 'a,
+    T: 'a,
+{
+    rb: &'a mut RingBuffer<T, A>,
+    index: usize,
+    len: usize,
+}
+
+impl<'a, T, A> Iterator for Iter<'a, T, A>
+where
+    A: Unsize<[T]> + 'a,
+    T: 'a,
+{
+    type Item = &'a T;
+
+    fn next(&mut self) -> Option<&'a T> {
+        if self.index < self.len {
+            let buffer: &[T] = unsafe { self.rb.buffer.as_ref() };
+            let ptr = buffer.as_ptr();
+            let i = (self.rb.head + self.index) % (self.rb.capacity() + 1);
+            self.index += 1;
+            Some(unsafe { &*ptr.offset(i as isize) })
+        } else {
+            None
+        }
+    }
+}
+
+impl<'a, T, A> Iterator for IterMut<'a, T, A>
+where
+    A: Unsize<[T]> + 'a,
+    T: 'a,
+{
+    type Item = &'a mut T;
+
+    fn next(&mut self) -> Option<&'a mut T> {
+        if self.index < self.len {
+            let capacity = self.rb.capacity() + 1;
+            let buffer: &mut [T] = unsafe { self.rb.buffer.as_mut() };
+            let ptr: *mut T = buffer.as_mut_ptr();
+            let i = (self.rb.head + self.index) % capacity;
+            self.index += 1;
+            Some(unsafe { &mut *ptr.offset(i as isize) })
+        } else {
+            None
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use RingBuffer;
+
+    #[test]
+    fn drop() {
+        struct Droppable;
+        impl Droppable {
+            fn new() -> Self {
+                unsafe {
+                    COUNT += 1;
+                }
+                Droppable
+            }
+        }
+        impl Drop for Droppable {
+            fn drop(&mut self) {
+                unsafe {
+                    COUNT -= 1;
+                }
+            }
+        }
+
+        static mut COUNT: i32 = 0;
+
+
+        {
+            let mut v: RingBuffer<Droppable, [Droppable; 4]> = RingBuffer::new();
+            v.enqueue(Droppable::new()).unwrap();
+            v.enqueue(Droppable::new()).unwrap();
+            v.dequeue().unwrap();
+        }
+
+        assert_eq!(unsafe { COUNT }, 0);
+
+        {
+            let mut v: RingBuffer<Droppable, [Droppable; 4]> = RingBuffer::new();
+            v.enqueue(Droppable::new()).unwrap();
+            v.enqueue(Droppable::new()).unwrap();
+        }
+
+        assert_eq!(unsafe { COUNT }, 0);
+    }
+
+    #[test]
+    fn full() {
+        let mut rb: RingBuffer<i32, [i32; 4]> = RingBuffer::new();
+
+        rb.enqueue(0).unwrap();
+        rb.enqueue(1).unwrap();
+        rb.enqueue(2).unwrap();
+
+        assert!(rb.enqueue(3).is_err());
+    }
+
+    #[test]
+    fn iter() {
+        let mut rb: RingBuffer<i32, [i32; 4]> = RingBuffer::new();
+
+        rb.enqueue(0).unwrap();
+        rb.enqueue(1).unwrap();
+        rb.enqueue(2).unwrap();
+
+        let mut items = rb.iter();
+
+        assert_eq!(items.next(), Some(&0));
+        assert_eq!(items.next(), Some(&1));
+        assert_eq!(items.next(), Some(&2));
+        assert_eq!(items.next(), None);
+    }
+
+    #[test]
+    fn iter_mut() {
+        let mut rb: RingBuffer<i32, [i32; 4]> = RingBuffer::new();
+
+        rb.enqueue(0).unwrap();
+        rb.enqueue(1).unwrap();
+        rb.enqueue(2).unwrap();
+
+        let mut items = rb.iter_mut();
+
+        assert_eq!(items.next(), Some(&mut 0));
+        assert_eq!(items.next(), Some(&mut 1));
+        assert_eq!(items.next(), Some(&mut 2));
+        assert_eq!(items.next(), None);
+    }
+
+    #[test]
+    fn sanity() {
+        let mut rb: RingBuffer<i32, [i32; 4]> = RingBuffer::new();
+
+        assert_eq!(rb.dequeue(), None);
+
+        rb.enqueue(0).unwrap();
+
+        assert_eq!(rb.dequeue(), Some(0));
+
+        assert_eq!(rb.dequeue(), None);
+    }
+
+    #[test]
+    fn wrap_around() {
+        let mut rb: RingBuffer<i32, [i32; 4]> = RingBuffer::new();
+
+        rb.enqueue(0).unwrap();
+        rb.enqueue(1).unwrap();
+        rb.enqueue(2).unwrap();
+        rb.dequeue().unwrap();
+        rb.dequeue().unwrap();
+        rb.dequeue().unwrap();
+        rb.enqueue(3).unwrap();
+        rb.enqueue(4).unwrap();
+
+        assert_eq!(rb.len(), 2);
+    }
+}
diff --git a/src/ring_buffer/spsc.rs b/src/ring_buffer/spsc.rs
new file mode 100644
index 0000000000000000000000000000000000000000..37c06716b55433386f2c86d1511d15608539fbfb
--- /dev/null
+++ b/src/ring_buffer/spsc.rs
@@ -0,0 +1,126 @@
+use core::ptr::{self, Shared};
+use core::marker::Unsize;
+
+use BufferFullError;
+use ring_buffer::RingBuffer;
+
+impl<T, A> RingBuffer<T, A>
+where
+    A: Unsize<[T]>,
+{
+    /// Splits a statically allocated ring buffer into producer and consumer end points
+    ///
+    /// *Warning* the current implementation only supports single core processors. It's also fine to
+    /// use both end points on the same core of a multi-core processor.
+    pub fn split(&'static mut self) -> (Producer<T, A>, Consumer<T, A>) {
+        (
+            Producer {
+                rb: unsafe { Shared::new_unchecked(self) },
+            },
+            Consumer {
+                rb: unsafe { Shared::new_unchecked(self) },
+            },
+        )
+    }
+}
+
+/// A ring buffer "consumer"; it can dequeue items from the ring buffer
+// NOTE the consumer semantically owns the `head` pointer of the ring buffer
+pub struct Consumer<T, A>
+where
+    A: Unsize<[T]>,
+{
+    // XXX do we need to use `Shared` (for soundness) here?
+    rb: Shared<RingBuffer<T, A>>,
+}
+
+impl<T, A> Consumer<T, A>
+where
+    A: Unsize<[T]>,
+{
+    /// Returns the item in the front of the queue, or `None` if the queue is empty
+    pub fn dequeue(&mut self) -> Option<T> {
+        let rb = unsafe { self.rb.as_mut() };
+        let n = rb.capacity() + 1;
+        let buffer: &[T] = unsafe { rb.buffer.as_ref() };
+
+        // NOTE(volatile) the value of `tail` can change at any time in the execution context of the
+        // consumer so we inform this to the compiler using a volatile load
+        if rb.head != unsafe { ptr::read_volatile(&rb.tail) } {
+            let item = unsafe { ptr::read(buffer.as_ptr().offset(rb.head as isize)) };
+            rb.head = (rb.head + 1) % n;
+            Some(item)
+        } else {
+            None
+        }
+    }
+}
+
+unsafe impl<T, A> Send for Consumer<T, A>
+where
+    A: Unsize<[T]>,
+    T: Send,
+{
+}
+
+/// A ring buffer "producer"; it can enqueue items into the ring buffer
+// NOTE the producer semantically owns the `tail` pointer of the ring buffer
+pub struct Producer<T, A>
+where
+    A: Unsize<[T]>,
+{
+    // XXX do we need to use `Shared` (for soundness) here?
+    rb: Shared<RingBuffer<T, A>>,
+}
+
+impl<T, A> Producer<T, A>
+where
+    A: Unsize<[T]>,
+{
+    /// Adds an `item` to the end of the queue
+    ///
+    /// Returns `BufferFullError` if the queue is full
+    pub fn enqueue(&mut self, item: T) -> Result<(), BufferFullError> {
+        let rb = unsafe { self.rb.as_mut() };
+        let n = rb.capacity() + 1;
+        let buffer: &mut [T] = unsafe { rb.buffer.as_mut() };
+
+        let next_tail = (rb.tail + 1) % n;
+        // NOTE(volatile) the value of `head` can change at any time in the execution context of the
+        // producer so we inform this to the compiler using a volatile load
+        if next_tail != unsafe { ptr::read_volatile(&rb.head) } {
+            // NOTE(ptr::write) the memory slot that we are about to write to is uninitialized. We
+            // use `ptr::write` to avoid running `T`'s destructor on the uninitialized memory
+            unsafe { ptr::write(buffer.as_mut_ptr().offset(rb.tail as isize), item) }
+            rb.tail = next_tail;
+            Ok(())
+        } else {
+            Err(BufferFullError)
+        }
+    }
+}
+
+unsafe impl<T, A> Send for Producer<T, A>
+where
+    A: Unsize<[T]>,
+    T: Send,
+{
+}
+
+#[cfg(test)]
+mod tests {
+    use RingBuffer;
+
+    #[test]
+    fn sanity() {
+        static mut RB: RingBuffer<i32, [i32; 2]> = RingBuffer::new();
+
+        let (mut p, mut c) = unsafe { RB.split() };
+
+        assert_eq!(c.dequeue(), None);
+
+        p.enqueue(0).unwrap();
+
+        assert_eq!(c.dequeue(), Some(0));
+    }
+}
diff --git a/src/vec.rs b/src/vec.rs
new file mode 100644
index 0000000000000000000000000000000000000000..9140cde1d14caa18682e83de5a84e5a94d468465
--- /dev/null
+++ b/src/vec.rs
@@ -0,0 +1,231 @@
+use core::marker::{PhantomData, Unsize};
+use core::{ops, ptr, slice};
+
+use untagged_option::UntaggedOption;
+
+use BufferFullError;
+
+/// A [`Vec`], *vector*, backed by a fixed size array
+///
+/// [`Vec`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
+pub struct Vec<T, A>
+where
+    // FIXME(rust-lang/rust#44580) use "const generics" instead of `Unsize`
+    A: Unsize<[T]>,
+{
+    _marker: PhantomData<[T]>,
+    buffer: UntaggedOption<A>,
+    len: usize,
+}
+
+impl<T, A> Vec<T, A>
+where
+    A: Unsize<[T]>,
+{
+    /// Constructs a new, empty `Vec<T>` backed by the array `A`
+    pub const fn new() -> Self {
+        Vec {
+            _marker: PhantomData,
+            buffer: UntaggedOption::none(),
+            len: 0,
+        }
+    }
+
+    /// Returns the maximum number of elements the vector can hold
+    pub fn capacity(&self) -> usize {
+        let buffer: &[T] = unsafe { self.buffer.as_ref() };
+        buffer.len()
+    }
+
+    /// Removes the last element from a vector and return it, or `None` if it's empty
+    pub fn pop(&mut self) -> Option<T> {
+        let buffer: &[T] = unsafe { self.buffer.as_ref() };
+
+        if self.len != 0 {
+            self.len -= 1;
+            let item = unsafe { ptr::read(&buffer[self.len]) };
+            Some(item)
+        } else {
+            None
+        }
+    }
+
+    /// Appends an element to the back of the collection
+    ///
+    /// Returns `BufferFullError` if the vector is full
+    pub fn push(&mut self, item: T) -> Result<(), BufferFullError> {
+        let capacity = self.capacity();
+        let buffer: &mut [T] = unsafe { self.buffer.as_mut() };
+
+        if self.len < capacity {
+            // NOTE(ptr::write) the memory slot that we are about to write to is uninitialized. We
+            // use `ptr::write` to avoid running `T`'s destructor on the uninitialized memory
+            unsafe { ptr::write(&mut buffer[self.len], item) }
+            self.len += 1;
+            Ok(())
+        } else {
+            Err(BufferFullError)
+        }
+    }
+}
+
+impl<T, A> Drop for Vec<T, A>
+where
+    A: Unsize<[T]>,
+{
+    fn drop(&mut self) {
+        unsafe { ptr::drop_in_place(&mut self[..]) }
+    }
+}
+
+impl<'a, T, A> IntoIterator for &'a Vec<T, A>
+where
+    A: Unsize<[T]>,
+{
+    type Item = &'a T;
+    type IntoIter = slice::Iter<'a, T>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.iter()
+    }
+}
+
+impl<'a, T, A> IntoIterator for &'a mut Vec<T, A>
+where
+    A: Unsize<[T]>,
+{
+    type Item = &'a mut T;
+    type IntoIter = slice::IterMut<'a, T>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.iter_mut()
+    }
+}
+
+impl<T, A> ops::Deref for Vec<T, A>
+where
+    A: Unsize<[T]>,
+{
+    type Target = [T];
+
+    fn deref(&self) -> &[T] {
+        let buffer: &[T] = unsafe { self.buffer.as_ref() };
+        &buffer[..self.len]
+    }
+}
+
+impl<T, A> ops::DerefMut for Vec<T, A>
+where
+    A: Unsize<[T]>,
+{
+    fn deref_mut(&mut self) -> &mut [T] {
+        let len = self.len();
+        let buffer: &mut [T] = unsafe { self.buffer.as_mut() };
+        &mut buffer[..len]
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use Vec;
+
+    #[test]
+    fn drop() {
+        struct Droppable;
+        impl Droppable {
+            fn new() -> Self {
+                unsafe {
+                    COUNT += 1;
+                }
+                Droppable
+            }
+        }
+        impl Drop for Droppable {
+            fn drop(&mut self) {
+                unsafe {
+                    COUNT -= 1;
+                }
+            }
+        }
+
+        static mut COUNT: i32 = 0;
+
+        {
+            let mut v: Vec<Droppable, [Droppable; 2]> = Vec::new();
+            v.push(Droppable::new()).unwrap();
+            v.push(Droppable::new()).unwrap();
+            v.pop().unwrap();
+        }
+
+        assert_eq!(unsafe { COUNT }, 0);
+
+        {
+            let mut v: Vec<Droppable, [Droppable; 2]> = Vec::new();
+            v.push(Droppable::new()).unwrap();
+            v.push(Droppable::new()).unwrap();
+        }
+
+        assert_eq!(unsafe { COUNT }, 0);
+    }
+
+    #[test]
+    fn full() {
+        let mut v: Vec<i32, [i32; 4]> = Vec::new();
+
+        v.push(0).unwrap();
+        v.push(1).unwrap();
+        v.push(2).unwrap();
+        v.push(3).unwrap();
+
+        assert!(v.push(4).is_err());
+    }
+
+    #[test]
+    fn iter() {
+        let mut v: Vec<i32, [i32; 4]> = Vec::new();
+
+        v.push(0).unwrap();
+        v.push(1).unwrap();
+        v.push(2).unwrap();
+        v.push(3).unwrap();
+
+        let mut items = v.iter();
+
+        assert_eq!(items.next(), Some(&0));
+        assert_eq!(items.next(), Some(&1));
+        assert_eq!(items.next(), Some(&2));
+        assert_eq!(items.next(), Some(&3));
+        assert_eq!(items.next(), None);
+    }
+
+    #[test]
+    fn iter_mut() {
+        let mut v: Vec<i32, [i32; 4]> = Vec::new();
+
+        v.push(0).unwrap();
+        v.push(1).unwrap();
+        v.push(2).unwrap();
+        v.push(3).unwrap();
+
+        let mut items = v.iter_mut();
+
+        assert_eq!(items.next(), Some(&mut 0));
+        assert_eq!(items.next(), Some(&mut 1));
+        assert_eq!(items.next(), Some(&mut 2));
+        assert_eq!(items.next(), Some(&mut 3));
+        assert_eq!(items.next(), None);
+    }
+
+    #[test]
+    fn sanity() {
+        let mut v: Vec<i32, [i32; 4]> = Vec::new();
+
+        assert_eq!(v.pop(), None);
+
+        v.push(0).unwrap();
+
+        assert_eq!(v.pop(), Some(0));
+
+        assert_eq!(v.pop(), None);
+    }
+}