diff --git a/src/lib.rs b/src/lib.rs
index d79b49949b8a5d45e1c698f23e558f687ae165a6..53e58ecdc55087d50cd17e2225076ae378332882 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -144,6 +144,7 @@
 //! is_send::<Vec<NotSend, [NotSend; 4]>>();
 //! ```
 
+#![cfg_attr(not(target_has_atomic = "ptr"), feature(asm))]
 #![cfg_attr(target_has_atomic = "ptr", feature(const_atomic_usize_new))]
 #![deny(missing_docs)]
 #![feature(cfg_target_has_atomic)]
diff --git a/src/ring_buffer/spsc.rs b/src/ring_buffer/spsc.rs
index 05e44357d66520be630f423e9a8566a62f137fae..6ea265ff55acbf6a88c47b7dd9d6719b787e721a 100644
--- a/src/ring_buffer/spsc.rs
+++ b/src/ring_buffer/spsc.rs
@@ -6,6 +6,14 @@ use core::sync::atomic::Ordering;
 use BufferFullError;
 use ring_buffer::RingBuffer;
 
+// Compiler barrier
+#[cfg(not(target_has_atomic = "ptr"))]
+macro_rules! barrier {
+    () => {
+        unsafe { asm!("" ::: "memory") }
+    }
+}
+
 impl<T, A> RingBuffer<T, A>
 where
     A: Unsize<[T]>,
@@ -75,6 +83,12 @@ where
         // consumer so we inform this to the compiler using a volatile load
         if rb.head != unsafe { ptr::read_volatile(&rb.tail) } {
             let item = unsafe { ptr::read(buffer.get_unchecked(rb.head)) };
+
+            // NOTE(barrier!) this ensures that the compiler won't place the instructions to read
+            // the data *before* the instructions to increment the `head` pointer -- note that this
+            // won't be enough on architectures that allow out of order execution
+            barrier!();
+
             rb.head = (rb.head + 1) % n;
             Some(item)
         } else {
@@ -146,6 +160,10 @@ where
         if next_tail != unsafe { ptr::read_volatile(&rb.head) } {
             // NOTE(ptr::write) see the other `enqueue` implementation above for details
             unsafe { ptr::write(buffer.get_unchecked_mut(rb.tail), item) }
+
+            // NOTE(barrier!) see the NOTE(barrier!) above
+            barrier!();
+
             rb.tail = next_tail;
             Ok(())
         } else {