Skip to content
981 changes: 631 additions & 350 deletions src/libarena/lib.rs

Large diffs are not rendered by default.

187 changes: 179 additions & 8 deletions src/libarena/tests.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
extern crate test;
use test::Bencher;
use super::TypedArena;
use super::{TypedArena, DroplessArena, SyncDroplessArena};
use std::cell::Cell;
use std::iter;

#[allow(dead_code)]
#[derive(Debug, Eq, PartialEq)]
@@ -12,13 +13,7 @@ struct Point {
}

#[test]
pub fn test_unused() {
let arena: TypedArena<Point> = TypedArena::default();
assert!(arena.chunks.borrow().is_empty());
}

#[test]
fn test_arena_alloc_nested() {
fn test_arena_alloc_nested_typed() {
struct Inner {
value: u8,
}
@@ -60,12 +55,163 @@ fn test_arena_alloc_nested() {
assert_eq!(result.inner.value, 10);
}

#[test]
fn test_arena_alloc_nested_dropless() {
struct Inner {
value: u8,
}
struct Outer<'a> {
inner: &'a Inner,
}
enum EI<'e> {
I(Inner),
O(Outer<'e>),
}

struct Wrap(DroplessArena);

impl Wrap {
fn alloc_inner<F: Fn() -> Inner>(&self, f: F) -> &Inner {
let r: &EI<'_> = self.0.alloc(EI::I(f()));
if let &EI::I(ref i) = r {
i
} else {
panic!("mismatch");
}
}
fn alloc_outer<'a, F: Fn() -> Outer<'a>>(&'a self, f: F) -> &Outer<'a> {
let r: &EI<'_> = self.0.alloc(EI::O(f()));
if let &EI::O(ref o) = r {
o
} else {
panic!("mismatch");
}
}
}

let arena = Wrap(DroplessArena::default());

let result = arena.alloc_outer(|| Outer {
inner: arena.alloc_inner(|| Inner { value: 10 }),
});

assert_eq!(result.inner.value, 10);
}

#[test]
fn test_arena_alloc_nested_sync() {
struct Inner {
value: u8,
}
struct Outer<'a> {
inner: &'a Inner,
}
enum EI<'e> {
I(Inner),
O(Outer<'e>),
}

struct Wrap(SyncDroplessArena);

impl Wrap {
fn alloc_inner<F: Fn() -> Inner>(&self, f: F) -> &Inner {
let r: &EI<'_> = self.0.alloc(EI::I(f()));
if let &EI::I(ref i) = r {
i
} else {
panic!("mismatch");
}
}
fn alloc_outer<'a, F: Fn() -> Outer<'a>>(&'a self, f: F) -> &Outer<'a> {
let r: &EI<'_> = self.0.alloc(EI::O(f()));
if let &EI::O(ref o) = r {
o
} else {
panic!("mismatch");
}
}
}

let arena = Wrap(SyncDroplessArena::default());

let result = arena.alloc_outer(|| Outer {
inner: arena.alloc_inner(|| Inner { value: 10 }),
});

assert_eq!(result.inner.value, 10);
}

#[test]
fn test_arena_alloc_nested_iter() {
struct Inner {
value: u8,
}
struct Outer<'a> {
inner: &'a Inner,
}
enum EI<'e> {
I(Inner),
O(Outer<'e>),
}

struct Wrap<'a>(TypedArena<EI<'a>>);

impl<'a> Wrap<'a> {
fn alloc_inner<F: Fn() -> Inner>(&self, f: F) -> &Inner {
let r: &[EI<'_>] = self.0.alloc_from_iter(iter::once_with(|| EI::I(f())));
if let &[EI::I(ref i)] = r {
i
} else {
panic!("mismatch");
}
}
fn alloc_outer<F: Fn() -> Outer<'a>>(&self, f: F) -> &Outer<'_> {
let r: &[EI<'_>] = self.0.alloc_from_iter(iter::once_with(|| EI::O(f())));
if let &[EI::O(ref o)] = r {
o
} else {
panic!("mismatch");
}
}
}

let arena = Wrap(TypedArena::default());

let result = arena.alloc_outer(|| Outer {
inner: arena.alloc_inner(|| Inner { value: 10 }),
});

assert_eq!(result.inner.value, 10);
}

#[test]
pub fn test_copy() {
let arena = TypedArena::default();
for _ in 0..100000 {
arena.alloc(Point { x: 1, y: 2, z: 3 });
}

let arena = DroplessArena::default();
for _ in 0..100000 {
arena.alloc(Point { x: 1, y: 2, z: 3 });
}

let arena = SyncDroplessArena::default();
for _ in 0..100000 {
arena.alloc(Point { x: 1, y: 2, z: 3 });
}
}

#[test]
pub fn test_align() {
#[repr(align(32))]
struct AlignedPoint(Point);

let arena = TypedArena::default();
for _ in 0..100000 {
let ptr = arena.alloc(AlignedPoint(Point { x: 1, y: 2, z: 3 }));
assert_eq!((ptr as *const _ as usize) & 31, 0);
}
}

#[bench]
@@ -165,6 +311,31 @@ fn test_typed_arena_drop_on_clear() {
}
}

struct DropOrder<'a> {
rank: u32,
count: &'a Cell<u32>,
}

impl Drop for DropOrder<'_> {
fn drop(&mut self) {
assert_eq!(self.rank, self.count.get());
self.count.set(self.count.get() + 1);
}
}

#[test]
fn test_typed_arena_drop_order() {
let counter = Cell::new(0);
{
let arena: TypedArena<DropOrder<'_>> = TypedArena::default();
for rank in 0..100 {
// Allocate something with drop glue to make sure it doesn't leak.
arena.alloc(DropOrder { rank, count: &counter });
}
};
assert_eq!(counter.get(), 100);
}

thread_local! {
static DROP_COUNTER: Cell<u32> = Cell::new(0)
}
88 changes: 88 additions & 0 deletions src/librustc_data_structures/sync.rs
Original file line number Diff line number Diff line change
@@ -266,6 +266,45 @@ cfg_if! {
}
}

#[derive(Debug, Default)]
pub struct SharedWorkerLocal<T>(T);

impl<T> SharedWorkerLocal<T> {
/// Creates a new worker local where the `initial` closure computes the
/// value this worker local should take for each thread in the thread pool.
#[inline]
pub fn new<F: FnMut(usize) -> T>(mut f: F) -> SharedWorkerLocal<T> {
SharedWorkerLocal(f(0))
}

#[inline]
pub fn iter(&self) -> impl Iterator<Item=&T> {
Some(&self.0).into_iter()
}

/// Returns the worker-local value for each thread
#[inline]
pub fn into_inner(self) -> Vec<T> {
vec![self.0]
}
}

impl<T> Deref for SharedWorkerLocal<T> {
type Target = T;

#[inline(always)]
fn deref(&self) -> &T {
&self.0
}
}

impl<T> DerefMut for SharedWorkerLocal<T> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut T {
&mut self.0
}
}

pub type MTRef<'a, T> = &'a mut T;

#[derive(Debug, Default)]
@@ -387,6 +426,55 @@ cfg_if! {
}

pub use rayon_core::WorkerLocal;
pub use rayon_core::Registry;
use rayon_core::current_thread_index;
use rayon_core::current_num_threads;

#[derive(Debug)]
pub struct SharedWorkerLocal<T>(Vec<T>);

impl<T> SharedWorkerLocal<T> {
/// Creates a new worker local where the `initial` closure computes the
/// value this worker local should take for each thread in the thread pool.
#[inline]
pub fn new<F: FnMut(usize) -> T>(mut f: F) -> SharedWorkerLocal<T> {
SharedWorkerLocal((0..current_num_threads()).map(|i| f(i)).collect())
}

#[inline]
pub fn iter(&self) -> impl Iterator<Item=&T> {
self.0.iter()
}

/// Returns the worker-local value for each thread
#[inline]
pub fn into_inner(self) -> Vec<T> {
self.0
}
}

impl<T: Default> Default for SharedWorkerLocal<T> {
#[inline]
fn default() -> Self {
SharedWorkerLocal::new(|_| Default::default())
}
}

impl<T> Deref for SharedWorkerLocal<T> {
type Target = T;

#[inline(always)]
fn deref(&self) -> &T {
&self.0[current_thread_index().unwrap()]
}
}

impl<T> DerefMut for SharedWorkerLocal<T> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut T {
&mut self.0[current_thread_index().unwrap()]
}
}

pub use rayon::iter::ParallelIterator;
use rayon::iter::IntoParallelIterator;