Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions include/rbs/util/rbs_allocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,12 @@
)
#endif

struct rbs_allocator;
typedef struct rbs_allocator rbs_allocator_t;
typedef struct rbs_allocator {
// The head of a linked list of pages, starting with the most recently allocated page.
struct rbs_allocator_page *page;

size_t default_page_payload_size;
} rbs_allocator_t;

rbs_allocator_t *rbs_allocator_init(void);
void rbs_allocator_free(rbs_allocator_t *);
Expand Down
164 changes: 74 additions & 90 deletions src/util/rbs_allocator.c
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,14 @@
*
* A simple arena allocator that can be freed all at once.
*
* This allocator maintains a linked list of pages, which come in two flavours:
* 1. Small allocation pages, which are the same size as the system page size.
* 2. Large allocation pages, which are the exact size requested, for sizes greater than the small page size.
*
* Small allocations always fit into the unused space at the end of the "head" page. If there isn't enough room, a new
* page is allocated, and the small allocation is placed at its start. This approach wastes that unused slack at the
* end of the previous page, but it means that allocations are instant and never scan the linked list to find a gap.
*
* This allocator doesn't support freeing individual allocations. Only the whole arena can be freed at once at the end.
*/

Expand All @@ -23,34 +31,16 @@
#include <fcntl.h>
#endif

struct rbs_allocator {
uintptr_t heap_ptr;
uintptr_t size;
};

static void *portable_mmap_anon(size_t size) {
#ifdef _WIN32
/* Windows doesn't use this function - VirtualAlloc is used instead */
return NULL;
#else
void *ptr;

#if defined(MAP_ANONYMOUS)
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
#elif defined(MAP_ANON)
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
#else
/* Fallback to /dev/zero for systems without anonymous mapping */
int fd = open("/dev/zero", O_RDWR);
rbs_assert(fd != -1, "open('/dev/zero') failed");
typedef struct rbs_allocator_page {
// The previously allocated page, or NULL if this is the first page.
struct rbs_allocator_page *next;

ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
close(fd); /* Can close fd after mapping */
#endif
// The size of the payload in bytes.
size_t size;

return ptr;
#endif
}
// The offset of the next available byte.
size_t used;
} rbs_allocator_page_t;

static size_t get_system_page_size(void) {
#ifdef _WIN32
Expand All @@ -64,73 +54,37 @@ static size_t get_system_page_size(void) {
#endif
}

static void *map_memory(size_t size) {
#ifdef _WIN32
LPVOID result = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
rbs_assert(result != NULL, "VirtualAlloc failed");
#else
void *result = portable_mmap_anon(size);
rbs_assert(result != MAP_FAILED, "mmap failed");
#endif
return result;
}
static rbs_allocator_page_t *rbs_allocator_page_new(size_t payload_size) {
const size_t page_header_size = sizeof(rbs_allocator_page_t);

static void destroy_memory(void *memory, size_t size) {
#ifdef _WIN32
VirtualFree(memory, 0, MEM_RELEASE);
#else
munmap(memory, size);
#endif
}
rbs_allocator_page_t *page = malloc(page_header_size + payload_size);
page->size = payload_size;
page->used = 0;

static void guard_page(void *memory, size_t page_size) {
#ifdef _WIN32
DWORD old_protect_;
BOOL result = VirtualProtect(memory, page_size, PAGE_NOACCESS, &old_protect_);
rbs_assert(result != 0, "VirtualProtect failed");
#else
int result = mprotect(memory, page_size, PROT_NONE);
rbs_assert(result == 0, "mprotect failed");
#endif
return page;
}

static size_t rbs_allocator_default_mem(void) {
size_t kib = 1024;
size_t mib = kib * 1024;
size_t gib = mib * 1024;
return 4 * gib;
}
rbs_allocator_t *rbs_allocator_init() {
rbs_allocator_t *allocator = malloc(sizeof(rbs_allocator_t));

static inline bool is_power_of_two(uintptr_t value) {
return value > 0 && (value & (value - 1)) == 0;
}
const size_t system_page_size = get_system_page_size();

// Align `val' to nearest multiple of `alignment'.
static uintptr_t align(uintptr_t size, uintptr_t alignment) {
rbs_assert(is_power_of_two(alignment), "alignment is not a power of two");
return (size + alignment - 1) & ~(alignment - 1);
}
allocator->default_page_payload_size = system_page_size - sizeof(rbs_allocator_page_t);

allocator->page = rbs_allocator_page_new(allocator->default_page_payload_size);
allocator->page->next = NULL;

rbs_allocator_t *rbs_allocator_init(void) {
size_t size = rbs_allocator_default_mem();
size_t page_size = get_system_page_size();
size = align(size, page_size);
void *mem = map_memory(size + page_size);
// Guard page; remove range checks in alloc fast path and hard fail if we
// consume all memory
void *last_page = (char *) mem + size;
guard_page(last_page, page_size);
uintptr_t start = (uintptr_t) mem;
rbs_allocator_t header = (rbs_allocator_t) {
.heap_ptr = start + sizeof header,
.size = size + page_size,
};
memcpy(mem, &header, sizeof header);
return (rbs_allocator_t *) mem;
return allocator;
}

void rbs_allocator_free(rbs_allocator_t *allocator) {
destroy_memory((void *) allocator, allocator->size);
rbs_allocator_page_t *page = allocator->page;
while (page) {
rbs_allocator_page_t *next = page->next;
free(page);
page = next;
}
free(allocator);
}

// Allocates `new_size` bytes from `allocator`, aligned to an `alignment`-byte boundary.
Expand All @@ -145,20 +99,50 @@ void *rbs_allocator_realloc_impl(rbs_allocator_t *allocator, void *ptr, size_t o
// Allocates `size` bytes from `allocator`, aligned to an `alignment`-byte boundary.
void *rbs_allocator_malloc_impl(rbs_allocator_t *allocator, size_t size, size_t alignment) {
rbs_assert(size % alignment == 0, "size must be a multiple of the alignment. size: %zu, alignment: %zu", size, alignment);
uintptr_t aligned = align(allocator->heap_ptr, alignment);
allocator->heap_ptr = aligned + size;
return (void *) aligned;

if (allocator->default_page_payload_size < size) { // Big allocation, give it its own page.
rbs_allocator_page_t *new_page = rbs_allocator_page_new(size);

// This simple allocator can only put small allocations into the head page.
// Naively prepending this large allocation page to the head of the allocator before the previous head page
// would waste the remaining space in the head page.
// So instead, we'll splice in the large page *after* the head page.
//
// +-------+ +-----------+ +-----------+
// | arena | | head page | | new_page |
// |-------| |-----------+ |-----------+
// | *page |--->| size | +--->| size | +---> ... previous tail
// +-------+ | offset | | | offset | |
// | *next ----+---+ | *next ----+---+
// | ... | | ... |
// +-----------+ +-----------+
//
new_page->next = allocator->page->next;
allocator->page->next = new_page;

uintptr_t pointer = (uintptr_t) new_page + sizeof(rbs_allocator_page_t);
return (void *) pointer;
}

rbs_allocator_page_t *page = allocator->page;
if (page->used + size > page->size) {
// Not enough space. Allocate a new small page and prepend it to the allocator's linked list.
rbs_allocator_page_t *new_page = rbs_allocator_page_new(allocator->default_page_payload_size);
new_page->next = allocator->page;
allocator->page = new_page;
page = new_page;
}

uintptr_t pointer = (uintptr_t) page + sizeof(rbs_allocator_page_t) + page->used;
page->used += size;
return (void *) pointer;
}

// Note: This will eagerly fill with zeroes, unlike `calloc()` which can map a page in a page to be zeroed lazily.
// It's assumed that callers to this function will immediately write to the allocated memory, anyway.
void *rbs_allocator_calloc_impl(rbs_allocator_t *allocator, size_t count, size_t size, size_t alignment) {
void *p = rbs_allocator_malloc_many_impl(allocator, count, size, alignment);
#if defined(__linux__)
// mmap with MAP_ANONYMOUS gives zero-filled pages.
#else
memset(p, 0, count * size);
#endif
return p;
}

Expand Down
Loading