Skip to content
Snippets Groups Projects
Commit 10c5542c authored by homunkulus's avatar homunkulus
Browse files

Auto merge of #4 - japaric:v2, r=japaric

support arrays of any size, don't require an initialization value, ..

single producer single consumer support for ring buffer
parents f515ed9e da5757a7
No related branches found
No related tags found
No related merge requests found
**/*.rs.bk
.#*
Cargo.lock
target/
language: rust
matrix:
include:
- env: TARGET=x86_64-unknown-linux-gnu
rust: nightly
- env: TARGET=thumbv6m-none-eabi
rust: nightly
addons:
apt:
sources:
- debian-sid
packages:
- binutils-arm-none-eabi
- env: TARGET=thumbv7m-none-eabi
rust: nightly
addons:
apt:
sources:
- debian-sid
packages:
- binutils-arm-none-eabi
before_install: set -e
install:
- bash ci/install.sh
script:
- bash ci/script.sh
after_script: set +e
cache: cargo
before_cache:
# Travis can't cache files that are not readable by "others"
- chmod -R a+r $HOME/.cargo
branches:
only:
# release tags
- /^v\d+\.\d+\.\d+.*$/
- auto
- master
- try
notifications:
email:
on_success: never
[package]
authors = ["Jorge Aparicio <jorge@japaric.io>"]
categories = ["data-structures", "no-std"]
categories = [
"data-structures",
"no-std",
]
description = "`static` friendly data structures that don't require dynamic memory allocation"
documentation = "https://docs.rs/heapless"
keywords = ["static", "no-heap"]
keywords = [
"static",
"no-heap",
]
license = "MIT OR Apache-2.0"
name = "heapless"
repository = "https://github.com/japaric/heapless"
version = "0.1.0"
version = "0.2.0"
[dependencies]
untagged-option = "0.1.1"
set -euxo pipefail
main() {
case $TARGET in
thumb*m-none-eabi)
local vers=0.3.8
cargo install --list | grep "xargo v$vers" || \
( cd .. && cargo install xargo -f --vers $vers )
rustup component list | grep 'rust-src.*installed' || \
rustup component add rust-src
;;
*)
;;
esac
}
main
set -euxo pipefail
main() {
case $TARGET in
thumb*m-none-eabi)
xargo check --target $TARGET
;;
*)
cargo check --target $TARGET
;;
esac
}
//! `static` friendly data structures that don't require dynamic memory
//! allocation
//!
//! # Examples
//!
//! ## `Vec`
//!
//! ```
//! use heapless::Vec;
//!
//! let mut xs: Vec<u8, [u8; 4]> = Vec::new();
//!
//! assert!(xs.push(0).is_ok());
//! assert!(xs.push(1).is_ok());
//! assert!(xs.push(2).is_ok());
//! assert!(xs.push(3).is_ok());
//! assert!(xs.push(4).is_err()); // full
//!
//! assert_eq!(xs.pop(), Some(3));
//! ```
//!
//! ## `RingBuffer`
//!
//! ```
//! use heapless::RingBuffer;
//!
//! let mut rb: RingBuffer<u8, [u8; 4]> = RingBuffer::new();
//!
//! assert!(rb.enqueue(0).is_ok());
//! assert!(rb.enqueue(1).is_ok());
//! assert!(rb.enqueue(2).is_ok());
//! assert!(rb.enqueue(3).is_err()); // full
//!
//! assert_eq!(rb.dequeue(), Some(0));
//! ```
//!
//! ### Single producer single consumer mode
//!
//! For use in *single core* systems like microcontrollers
//!
//! ```
//! use heapless::RingBuffer;
//!
//! static mut RB: RingBuffer<Event, [Event; 4]> = RingBuffer::new();
//!
//! enum Event { A, B }
//!
//! fn main() {
//! // NOTE(unsafe) beware of aliasing the `consumer` end point
//! let mut consumer = unsafe { RB.split().1 };
//!
//! loop {
//! // `dequeue` is a lockless operation
//! match consumer.dequeue() {
//! Some(Event::A) => { /* .. */ },
//! Some(Event::B) => { /* .. */ },
//! None => { /* sleep */},
//! }
//! # break
//! }
//! }
//!
//! // this is a different execution context that can preempt `main`
//! fn interrupt_handler() {
//! // NOTE(unsafe) beware of aliasing the `producer` end point
//! let mut producer = unsafe { RB.split().0 };
//! # let condition = true;
//!
//! // ..
//!
//! if condition {
//! producer.enqueue(Event::A).unwrap();
//! } else {
//! producer.enqueue(Event::B).unwrap();
//! }
//!
//! // ..
//! }
//! ```
//!
//! # `Send`-ness
//!
//! Collections of `Send`-able things are `Send`
//!
//! ```
//! use heapless::{RingBuffer, Vec};
//! use heapless::ring_buffer::{Consumer, Producer};
//!
//! struct IsSend;
//!
//! unsafe impl Send for IsSend {}
//!
//! fn is_send<T>() where T: Send {}
//!
//! is_send::<Consumer<IsSend, [IsSend; 4]>>();
//! is_send::<Producer<IsSend, [IsSend; 4]>>();
//! is_send::<RingBuffer<IsSend, [IsSend; 4]>>();
//! is_send::<Vec<IsSend, [IsSend; 4]>>();
//! ```
//!
//! Collections of not `Send`-able things are *not* `Send`
//!
//! ``` compile_fail
//! use std::marker::PhantomData;
//! use heapless::ring_buffer::Consumer;
//!
//! type NotSend = PhantomData<*const ()>;
//!
//! fn is_send<T>() where T: Send {}
//!
//! is_send::<Consumer<NotSend, [NotSend; 4]>>();
//! ```
//!
//! ``` compile_fail
//! use std::marker::PhantomData;
//! use heapless::ring_buffer::Producer;
//!
//! type NotSend = PhantomData<*const ()>;
//!
//! fn is_send<T>() where T: Send {}
//!
//! is_send::<Producer<NotSend, [NotSend; 4]>>();
//! ```
//!
//! ``` compile_fail
//! use std::marker::PhantomData;
//! use heapless::RingBuffer;
//!
//! type NotSend = PhantomData<*const ()>;
//!
//! fn is_send<T>() where T: Send {}
//!
//! is_send::<RingBuffer<NotSend, [NotSend; 4]>>();
//! ```
//!
//! ``` compile_fail
//! use std::marker::PhantomData;
//! use heapless::Vec;
//!
//! type NotSend = PhantomData<*const ()>;
//!
//! fn is_send<T>() where T: Send {}
//!
//! is_send::<Vec<NotSend, [NotSend; 4]>>();
//! ```
#![deny(missing_docs)]
#![deny(warnings)]
#![feature(const_fn)]
#![feature(shared)]
#![feature(unsize)]
#![no_std]
use core::marker::PhantomData;
use core::ops::Deref;
use core::slice;
extern crate untagged_option;
/// A circular buffer
pub struct CircularBuffer<T, A>
where
A: AsMut<[T]> + AsRef<[T]>,
T: Copy,
{
_marker: PhantomData<[T]>,
array: A,
index: usize,
len: usize,
}
pub use vec::Vec;
pub use ring_buffer::RingBuffer;
impl<T, A> CircularBuffer<T, A>
where
A: AsMut<[T]> + AsRef<[T]>,
T: Copy,
{
/// Creates a new empty circular buffer using `array` as backup storage
pub const fn new(array: A) -> Self {
CircularBuffer {
_marker: PhantomData,
array: array,
index: 0,
len: 0,
}
}
pub mod ring_buffer;
mod vec;
/// Returns the capacity of this buffer
pub fn capacity(&self) -> usize {
self.array.as_ref().len()
}
/// Pushes `elem`ent into the buffer
///
/// This will overwrite an old value if the buffer is full
pub fn push(&mut self, elem: T) {
let slice = self.array.as_mut();
if self.len < slice.len() {
self.len += 1;
}
unsafe { *slice.as_mut_ptr().offset(self.index as isize) = elem };
self.index = (self.index + 1) % slice.len();
}
}
impl<T, A> Deref for CircularBuffer<T, A>
where
A: AsMut<[T]> + AsRef<[T]>,
T: Copy,
{
type Target = [T];
fn deref(&self) -> &[T] {
let slice = self.array.as_ref();
if self.len == slice.len() {
slice
} else {
unsafe { slice::from_raw_parts(slice.as_ptr(), self.len) }
}
}
}
/// A continuous, growable array type
pub struct Vec<T, A>
where
A: AsMut<[T]> + AsRef<[T]>,
T: Copy,
{
_marker: PhantomData<[T]>,
array: A,
len: usize,
}
impl<T, A> Vec<T, A>
where
A: AsMut<[T]> + AsRef<[T]>,
T: Copy,
{
/// Creates a new vector using `array` as the backup storage
pub const fn new(array: A) -> Self {
Vec {
_marker: PhantomData,
array: array,
len: 0,
}
}
/// Returns the capacity of this vector
pub fn capacity(&self) -> usize {
self.array.as_ref().len()
}
/// Clears the vector, removing all values
pub fn clear(&mut self) {
self.len = 0;
}
/// Removes the last element from this vector and returns it, or `None` if
/// it's empty
pub fn pop(&mut self) -> Option<T> {
if self.len == 0 {
None
} else {
self.len -= 1;
unsafe {
Some(
*self.array
.as_mut()
.as_mut_ptr()
.offset(self.len as isize),
)
}
}
}
/// Appends an `elem`ent to the back of the collection
///
/// This method returns `Err` if the vector is full
pub fn push(&mut self, elem: T) -> Result<(), ()> {
let slice = self.array.as_mut();
if self.len == slice.len() {
Err(())
} else {
unsafe {
*slice.as_mut_ptr().offset(self.len as isize) = elem;
}
self.len += 1;
Ok(())
}
}
}
impl<T, A> Deref for Vec<T, A>
where
A: AsMut<[T]> + AsRef<[T]>,
T: Copy,
{
type Target = [T];
fn deref(&self) -> &[T] {
unsafe { slice::from_raw_parts(self.array.as_ref().as_ptr(), self.len) }
}
}
/// Error raised when the buffer is full
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct BufferFullError;
//! Ring buffer
use core::marker::{PhantomData, Unsize};
use core::ptr;
use untagged_option::UntaggedOption;
use BufferFullError;
pub use self::spsc::{Consumer, Producer};
mod spsc;
/// An statically allocated ring buffer backed by an array `A`
pub struct RingBuffer<T, A>
where
// FIXME(rust-lang/rust#44580) use "const generics" instead of `Unsize`
A: Unsize<[T]>,
{
_marker: PhantomData<[T]>,
buffer: UntaggedOption<A>,
// this is from where we dequeue items
head: usize,
// this is where we enqueue new items
tail: usize,
}
impl<T, A> RingBuffer<T, A>
where
A: Unsize<[T]>,
{
/// Creates an empty ring buffer with capacity equals to the length of the array `A` *minus
/// one*.
pub const fn new() -> Self {
RingBuffer {
_marker: PhantomData,
buffer: UntaggedOption::none(),
head: 0,
tail: 0,
}
}
/// Returns the maximum number of elements the ring buffer can hold
pub fn capacity(&self) -> usize {
let buffer: &[T] = unsafe { self.buffer.as_ref() };
buffer.len() - 1
}
/// Returns the item in the front of the queue, or `None` if the queue is empty
pub fn dequeue(&mut self) -> Option<T> {
let n = self.capacity() + 1;
let buffer: &[T] = unsafe { self.buffer.as_ref() };
if self.head != self.tail {
let item = unsafe { ptr::read(buffer.as_ptr().offset(self.head as isize)) };
self.head = (self.head + 1) % n;
Some(item)
} else {
None
}
}
/// Adds an `item` to the end of the queue
///
/// Returns `BufferFullError` if the queue is full
pub fn enqueue(&mut self, item: T) -> Result<(), BufferFullError> {
let n = self.capacity() + 1;
let buffer: &mut [T] = unsafe { self.buffer.as_mut() };
let next_tail = (self.tail + 1) % n;
if next_tail != self.head {
// NOTE(ptr::write) the memory slot that we are about to write to is uninitialized. We
// use `ptr::write` to avoid running `T`'s destructor on the uninitialized memory
unsafe { ptr::write(buffer.as_mut_ptr().offset(self.tail as isize), item) }
self.tail = next_tail;
Ok(())
} else {
Err(BufferFullError)
}
}
/// Returns the number of elements in the queue
pub fn len(&self) -> usize {
if self.head > self.tail {
self.head - self.tail
} else {
self.tail - self.head
}
}
/// Iterates from the front of the queue to the back
pub fn iter(&self) -> Iter<T, A> {
Iter {
rb: self,
index: 0,
len: self.len(),
}
}
/// Returns an iterator that allows modifying each value.
pub fn iter_mut(&mut self) -> IterMut<T, A> {
let len = self.len();
IterMut {
rb: self,
index: 0,
len,
}
}
}
impl<T, A> Drop for RingBuffer<T, A>
where
A: Unsize<[T]>,
{
fn drop(&mut self) {
for item in self {
unsafe {
ptr::drop_in_place(item);
}
}
}
}
impl<'a, T, A> IntoIterator for &'a RingBuffer<T, A>
where
A: Unsize<[T]>,
{
type Item = &'a T;
type IntoIter = Iter<'a, T, A>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a, T, A> IntoIterator for &'a mut RingBuffer<T, A>
where
A: Unsize<[T]>,
{
type Item = &'a mut T;
type IntoIter = IterMut<'a, T, A>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
/// An iterator over a ring buffer items
pub struct Iter<'a, T, A>
where
A: Unsize<[T]> + 'a,
T: 'a,
{
rb: &'a RingBuffer<T, A>,
index: usize,
len: usize,
}
/// A mutable iterator over a ring buffer items
pub struct IterMut<'a, T, A>
where
A: Unsize<[T]> + 'a,
T: 'a,
{
rb: &'a mut RingBuffer<T, A>,
index: usize,
len: usize,
}
impl<'a, T, A> Iterator for Iter<'a, T, A>
where
A: Unsize<[T]> + 'a,
T: 'a,
{
type Item = &'a T;
fn next(&mut self) -> Option<&'a T> {
if self.index < self.len {
let buffer: &[T] = unsafe { self.rb.buffer.as_ref() };
let ptr = buffer.as_ptr();
let i = (self.rb.head + self.index) % (self.rb.capacity() + 1);
self.index += 1;
Some(unsafe { &*ptr.offset(i as isize) })
} else {
None
}
}
}
impl<'a, T, A> Iterator for IterMut<'a, T, A>
where
A: Unsize<[T]> + 'a,
T: 'a,
{
type Item = &'a mut T;
fn next(&mut self) -> Option<&'a mut T> {
if self.index < self.len {
let capacity = self.rb.capacity() + 1;
let buffer: &mut [T] = unsafe { self.rb.buffer.as_mut() };
let ptr: *mut T = buffer.as_mut_ptr();
let i = (self.rb.head + self.index) % capacity;
self.index += 1;
Some(unsafe { &mut *ptr.offset(i as isize) })
} else {
None
}
}
}
#[cfg(test)]
mod tests {
use RingBuffer;
#[test]
fn drop() {
struct Droppable;
impl Droppable {
fn new() -> Self {
unsafe {
COUNT += 1;
}
Droppable
}
}
impl Drop for Droppable {
fn drop(&mut self) {
unsafe {
COUNT -= 1;
}
}
}
static mut COUNT: i32 = 0;
{
let mut v: RingBuffer<Droppable, [Droppable; 4]> = RingBuffer::new();
v.enqueue(Droppable::new()).unwrap();
v.enqueue(Droppable::new()).unwrap();
v.dequeue().unwrap();
}
assert_eq!(unsafe { COUNT }, 0);
{
let mut v: RingBuffer<Droppable, [Droppable; 4]> = RingBuffer::new();
v.enqueue(Droppable::new()).unwrap();
v.enqueue(Droppable::new()).unwrap();
}
assert_eq!(unsafe { COUNT }, 0);
}
#[test]
fn full() {
let mut rb: RingBuffer<i32, [i32; 4]> = RingBuffer::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
assert!(rb.enqueue(3).is_err());
}
#[test]
fn iter() {
let mut rb: RingBuffer<i32, [i32; 4]> = RingBuffer::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
let mut items = rb.iter();
assert_eq!(items.next(), Some(&0));
assert_eq!(items.next(), Some(&1));
assert_eq!(items.next(), Some(&2));
assert_eq!(items.next(), None);
}
#[test]
fn iter_mut() {
let mut rb: RingBuffer<i32, [i32; 4]> = RingBuffer::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
let mut items = rb.iter_mut();
assert_eq!(items.next(), Some(&mut 0));
assert_eq!(items.next(), Some(&mut 1));
assert_eq!(items.next(), Some(&mut 2));
assert_eq!(items.next(), None);
}
#[test]
fn sanity() {
let mut rb: RingBuffer<i32, [i32; 4]> = RingBuffer::new();
assert_eq!(rb.dequeue(), None);
rb.enqueue(0).unwrap();
assert_eq!(rb.dequeue(), Some(0));
assert_eq!(rb.dequeue(), None);
}
#[test]
fn wrap_around() {
let mut rb: RingBuffer<i32, [i32; 4]> = RingBuffer::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
rb.dequeue().unwrap();
rb.dequeue().unwrap();
rb.dequeue().unwrap();
rb.enqueue(3).unwrap();
rb.enqueue(4).unwrap();
assert_eq!(rb.len(), 2);
}
}
use core::ptr::{self, Shared};
use core::marker::Unsize;
use BufferFullError;
use ring_buffer::RingBuffer;
impl<T, A> RingBuffer<T, A>
where
A: Unsize<[T]>,
{
/// Splits a statically allocated ring buffer into producer and consumer end points
///
/// *Warning* the current implementation only supports single core processors. It's also fine to
/// use both end points on the same core of a multi-core processor.
pub fn split(&'static mut self) -> (Producer<T, A>, Consumer<T, A>) {
(
Producer {
rb: unsafe { Shared::new_unchecked(self) },
},
Consumer {
rb: unsafe { Shared::new_unchecked(self) },
},
)
}
}
/// A ring buffer "consumer"; it can dequeue items from the ring buffer
// NOTE the consumer semantically owns the `head` pointer of the ring buffer
pub struct Consumer<T, A>
where
A: Unsize<[T]>,
{
// XXX do we need to use `Shared` (for soundness) here?
rb: Shared<RingBuffer<T, A>>,
}
impl<T, A> Consumer<T, A>
where
A: Unsize<[T]>,
{
/// Returns the item in the front of the queue, or `None` if the queue is empty
pub fn dequeue(&mut self) -> Option<T> {
let rb = unsafe { self.rb.as_mut() };
let n = rb.capacity() + 1;
let buffer: &[T] = unsafe { rb.buffer.as_ref() };
// NOTE(volatile) the value of `tail` can change at any time in the execution context of the
// consumer so we inform this to the compiler using a volatile load
if rb.head != unsafe { ptr::read_volatile(&rb.tail) } {
let item = unsafe { ptr::read(buffer.as_ptr().offset(rb.head as isize)) };
rb.head = (rb.head + 1) % n;
Some(item)
} else {
None
}
}
}
unsafe impl<T, A> Send for Consumer<T, A>
where
A: Unsize<[T]>,
T: Send,
{
}
/// A ring buffer "producer"; it can enqueue items into the ring buffer
// NOTE the producer semantically owns the `tail` pointer of the ring buffer
pub struct Producer<T, A>
where
A: Unsize<[T]>,
{
// XXX do we need to use `Shared` (for soundness) here?
rb: Shared<RingBuffer<T, A>>,
}
impl<T, A> Producer<T, A>
where
A: Unsize<[T]>,
{
/// Adds an `item` to the end of the queue
///
/// Returns `BufferFullError` if the queue is full
pub fn enqueue(&mut self, item: T) -> Result<(), BufferFullError> {
let rb = unsafe { self.rb.as_mut() };
let n = rb.capacity() + 1;
let buffer: &mut [T] = unsafe { rb.buffer.as_mut() };
let next_tail = (rb.tail + 1) % n;
// NOTE(volatile) the value of `head` can change at any time in the execution context of the
// producer so we inform this to the compiler using a volatile load
if next_tail != unsafe { ptr::read_volatile(&rb.head) } {
// NOTE(ptr::write) the memory slot that we are about to write to is uninitialized. We
// use `ptr::write` to avoid running `T`'s destructor on the uninitialized memory
unsafe { ptr::write(buffer.as_mut_ptr().offset(rb.tail as isize), item) }
rb.tail = next_tail;
Ok(())
} else {
Err(BufferFullError)
}
}
}
unsafe impl<T, A> Send for Producer<T, A>
where
A: Unsize<[T]>,
T: Send,
{
}
#[cfg(test)]
mod tests {
use RingBuffer;
#[test]
fn sanity() {
static mut RB: RingBuffer<i32, [i32; 2]> = RingBuffer::new();
let (mut p, mut c) = unsafe { RB.split() };
assert_eq!(c.dequeue(), None);
p.enqueue(0).unwrap();
assert_eq!(c.dequeue(), Some(0));
}
}
use core::marker::{PhantomData, Unsize};
use core::{ops, ptr, slice};
use untagged_option::UntaggedOption;
use BufferFullError;
/// A [`Vec`], *vector*, backed by a fixed size array
///
/// [`Vec`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
pub struct Vec<T, A>
where
// FIXME(rust-lang/rust#44580) use "const generics" instead of `Unsize`
A: Unsize<[T]>,
{
_marker: PhantomData<[T]>,
buffer: UntaggedOption<A>,
len: usize,
}
impl<T, A> Vec<T, A>
where
A: Unsize<[T]>,
{
/// Constructs a new, empty `Vec<T>` backed by the array `A`
pub const fn new() -> Self {
Vec {
_marker: PhantomData,
buffer: UntaggedOption::none(),
len: 0,
}
}
/// Returns the maximum number of elements the vector can hold
pub fn capacity(&self) -> usize {
let buffer: &[T] = unsafe { self.buffer.as_ref() };
buffer.len()
}
/// Removes the last element from a vector and return it, or `None` if it's empty
pub fn pop(&mut self) -> Option<T> {
let buffer: &[T] = unsafe { self.buffer.as_ref() };
if self.len != 0 {
self.len -= 1;
let item = unsafe { ptr::read(&buffer[self.len]) };
Some(item)
} else {
None
}
}
/// Appends an element to the back of the collection
///
/// Returns `BufferFullError` if the vector is full
pub fn push(&mut self, item: T) -> Result<(), BufferFullError> {
let capacity = self.capacity();
let buffer: &mut [T] = unsafe { self.buffer.as_mut() };
if self.len < capacity {
// NOTE(ptr::write) the memory slot that we are about to write to is uninitialized. We
// use `ptr::write` to avoid running `T`'s destructor on the uninitialized memory
unsafe { ptr::write(&mut buffer[self.len], item) }
self.len += 1;
Ok(())
} else {
Err(BufferFullError)
}
}
}
impl<T, A> Drop for Vec<T, A>
where
A: Unsize<[T]>,
{
fn drop(&mut self) {
unsafe { ptr::drop_in_place(&mut self[..]) }
}
}
impl<'a, T, A> IntoIterator for &'a Vec<T, A>
where
A: Unsize<[T]>,
{
type Item = &'a T;
type IntoIter = slice::Iter<'a, T>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a, T, A> IntoIterator for &'a mut Vec<T, A>
where
A: Unsize<[T]>,
{
type Item = &'a mut T;
type IntoIter = slice::IterMut<'a, T>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
impl<T, A> ops::Deref for Vec<T, A>
where
A: Unsize<[T]>,
{
type Target = [T];
fn deref(&self) -> &[T] {
let buffer: &[T] = unsafe { self.buffer.as_ref() };
&buffer[..self.len]
}
}
impl<T, A> ops::DerefMut for Vec<T, A>
where
A: Unsize<[T]>,
{
fn deref_mut(&mut self) -> &mut [T] {
let len = self.len();
let buffer: &mut [T] = unsafe { self.buffer.as_mut() };
&mut buffer[..len]
}
}
#[cfg(test)]
mod tests {
use Vec;
#[test]
fn drop() {
struct Droppable;
impl Droppable {
fn new() -> Self {
unsafe {
COUNT += 1;
}
Droppable
}
}
impl Drop for Droppable {
fn drop(&mut self) {
unsafe {
COUNT -= 1;
}
}
}
static mut COUNT: i32 = 0;
{
let mut v: Vec<Droppable, [Droppable; 2]> = Vec::new();
v.push(Droppable::new()).unwrap();
v.push(Droppable::new()).unwrap();
v.pop().unwrap();
}
assert_eq!(unsafe { COUNT }, 0);
{
let mut v: Vec<Droppable, [Droppable; 2]> = Vec::new();
v.push(Droppable::new()).unwrap();
v.push(Droppable::new()).unwrap();
}
assert_eq!(unsafe { COUNT }, 0);
}
#[test]
fn full() {
let mut v: Vec<i32, [i32; 4]> = Vec::new();
v.push(0).unwrap();
v.push(1).unwrap();
v.push(2).unwrap();
v.push(3).unwrap();
assert!(v.push(4).is_err());
}
#[test]
fn iter() {
let mut v: Vec<i32, [i32; 4]> = Vec::new();
v.push(0).unwrap();
v.push(1).unwrap();
v.push(2).unwrap();
v.push(3).unwrap();
let mut items = v.iter();
assert_eq!(items.next(), Some(&0));
assert_eq!(items.next(), Some(&1));
assert_eq!(items.next(), Some(&2));
assert_eq!(items.next(), Some(&3));
assert_eq!(items.next(), None);
}
#[test]
fn iter_mut() {
let mut v: Vec<i32, [i32; 4]> = Vec::new();
v.push(0).unwrap();
v.push(1).unwrap();
v.push(2).unwrap();
v.push(3).unwrap();
let mut items = v.iter_mut();
assert_eq!(items.next(), Some(&mut 0));
assert_eq!(items.next(), Some(&mut 1));
assert_eq!(items.next(), Some(&mut 2));
assert_eq!(items.next(), Some(&mut 3));
assert_eq!(items.next(), None);
}
#[test]
fn sanity() {
let mut v: Vec<i32, [i32; 4]> = Vec::new();
assert_eq!(v.pop(), None);
v.push(0).unwrap();
assert_eq!(v.pop(), Some(0));
assert_eq!(v.pop(), None);
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment