|
| 1 | +// Copyright (c) 2024-present, fjall-rs |
| 2 | +// This source code is licensed under both the Apache 2.0 and MIT License |
| 3 | +// (found in the LICENSE-* files in the repository) |
| 4 | + |
| 5 | +use std::{ |
| 6 | + alloc::Layout, |
| 7 | + mem::offset_of, |
| 8 | + sync::{ |
| 9 | + atomic::{AtomicPtr, AtomicUsize, Ordering}, |
| 10 | + Mutex, |
| 11 | + }, |
| 12 | +}; |
| 13 | + |
| 14 | +// DEFAULT_BUFFER_SIZE needs to be at least big enough for one fullly-aligned node |
| 15 | +// for the crate to work correctly. Anything larger than that will work. |
| 16 | +// |
| 17 | +// TODO: Justify this size. |
| 18 | +const DEFAULT_BUFFER_SIZE: usize = (32 << 10) - size_of::<AtomicUsize>(); |
| 19 | + |
| 20 | +impl<const BUFFER_SIZE: usize> Default for Arenas<BUFFER_SIZE> { |
| 21 | + fn default() -> Self { |
| 22 | + Self::new() |
| 23 | + } |
| 24 | +} |
| 25 | + |
| 26 | +unsafe impl<const N: usize> Send for Arenas<N> {} |
| 27 | +unsafe impl<const N: usize> Sync for Arenas<N> {} |
| 28 | + |
| 29 | +pub(crate) struct Arenas<const BUFFER_SIZE: usize = DEFAULT_BUFFER_SIZE> { |
| 30 | + // The current set of Arenas |
| 31 | + arenas: Mutex<Vec<*mut Buffer<BUFFER_SIZE>>>, |
| 32 | + // Cache of the currently open Arena. It'll be the last item in the buffers |
| 33 | + // vec. This atomic is only ever written while holding the buffers Mutex. |
| 34 | + open_arena: AtomicPtr<Buffer<BUFFER_SIZE>>, |
| 35 | +} |
| 36 | + |
| 37 | +impl<const BUFFER_SIZE: usize> Arenas<BUFFER_SIZE> { |
| 38 | + pub(crate) fn new() -> Self { |
| 39 | + Self { |
| 40 | + arenas: Default::default(), |
| 41 | + open_arena: AtomicPtr::default(), |
| 42 | + } |
| 43 | + } |
| 44 | +} |
| 45 | + |
| 46 | +impl<const BUFFER_SIZE: usize> Arenas<BUFFER_SIZE> { |
| 47 | + pub(crate) fn alloc(&self, layout: Layout) -> *mut u8 { |
| 48 | + loop { |
| 49 | + let buffer_tail = self.open_arena.load(Ordering::Acquire); |
| 50 | + if !buffer_tail.is_null() { |
| 51 | + if let Some(offset) = try_alloc(buffer_tail, layout) { |
| 52 | + return offset; |
| 53 | + } |
| 54 | + } |
| 55 | + let mut buffers = self.arenas.lock().unwrap(); |
| 56 | + let buffer = buffers.last().unwrap_or(&std::ptr::null_mut()); |
| 57 | + if *buffer != buffer_tail { |
| 58 | + // Lost the race with somebody else. |
| 59 | + continue; |
| 60 | + } |
| 61 | + let new_buffer: Box<Buffer<BUFFER_SIZE>> = Box::new(Buffer::default()); |
| 62 | + let new_buffer = Box::into_raw(new_buffer); |
| 63 | + self.open_arena.store(new_buffer, Ordering::Release); |
| 64 | + buffers.push(new_buffer); |
| 65 | + } |
| 66 | + } |
| 67 | +} |
| 68 | + |
| 69 | +struct Buffer<const N: usize> { |
| 70 | + offset: AtomicUsize, |
| 71 | + data: [u8; N], |
| 72 | +} |
| 73 | + |
| 74 | +impl<const N: usize> Default for Buffer<N> { |
| 75 | + fn default() -> Self { |
| 76 | + Self { |
| 77 | + offset: Default::default(), |
| 78 | + data: [0; N], |
| 79 | + } |
| 80 | + } |
| 81 | +} |
| 82 | + |
| 83 | +impl<const N: usize> Drop for Arenas<N> { |
| 84 | + fn drop(&mut self) { |
| 85 | + let mut buffers = self.arenas.lock().unwrap(); |
| 86 | + for buffer in buffers.drain(..) { |
| 87 | + drop(unsafe { Box::from_raw(buffer) }) |
| 88 | + } |
| 89 | + } |
| 90 | +} |
| 91 | + |
| 92 | +fn try_alloc<const N: usize>(buf: *mut Buffer<N>, layout: Layout) -> Option<*mut u8> { |
| 93 | + let mut cur_offset = unsafe { &(*buf).offset }.load(Ordering::Relaxed); |
| 94 | + loop { |
| 95 | + let buf_start = unsafe { buf.byte_add(offset_of!(Buffer<N>, data)) as *mut u8 }; |
| 96 | + let free_start = unsafe { buf_start.byte_add(cur_offset) }; |
| 97 | + let start_addr = unsafe { free_start.byte_add(free_start.align_offset(layout.align())) }; |
| 98 | + let new_offset = ((start_addr as usize) + layout.size()) - (buf_start as usize); |
| 99 | + if new_offset > N { |
| 100 | + return None; |
| 101 | + } |
| 102 | + |
| 103 | + // Note that we can get away with using relaxed ordering here because we're not |
| 104 | + // asserting anything about the contents of the buffer. We're just trying to |
| 105 | + // allocate a new node. |
| 106 | + match unsafe { &(*buf).offset }.compare_exchange( |
| 107 | + cur_offset, |
| 108 | + new_offset, |
| 109 | + Ordering::Relaxed, |
| 110 | + Ordering::Relaxed, |
| 111 | + ) { |
| 112 | + Ok(_offset) => return Some(start_addr), |
| 113 | + Err(offset) => cur_offset = offset, |
| 114 | + } |
| 115 | + } |
| 116 | +} |
0 commit comments