Skip to content

Commit

Permalink
core: Page cache (#894)
Browse files Browse the repository at this point in the history
* core: Verify page size is always a power of two

* core: Remove diag usage in page allocators

* core: Add alloc_page_size util

* core: Skeleton for page cache

* core: Fix incorrect error message

* core: Simple pagecache implementation

* core: Limit size of pagecache

* core: Fix void func returning a val

* core: Unpoision cached pages before freeing

* core: Fix unused var warning
  • Loading branch information
BastianBlokland committed Jun 17, 2024
1 parent acfa0bb commit efa03c3
Show file tree
Hide file tree
Showing 7 changed files with 209 additions and 20 deletions.
1 change: 1 addition & 0 deletions libs/core/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ add_library(lib_core STATIC
src/alloc_chunked.c
src/alloc_heap.c
src/alloc_page_pal.c
src/alloc_pagecache.c
src/alloc_persist.c
src/alloc_scratch.c
src/alloc_tracker.c
Expand Down
17 changes: 11 additions & 6 deletions libs/core/src/alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,10 @@
#include <sanitizer/asan_interface.h>
#endif

Allocator* g_allocHeap;
Allocator* g_allocPage;
Allocator* g_allocPersist;
Allocator* g_allocHeap;
Allocator* g_allocPage;
Allocator* g_allocPageCache;
Allocator* g_allocPersist;
THREAD_LOCAL Allocator* g_allocScratch;

static void alloc_verify_allocator(const Allocator* allocator) {
Expand All @@ -21,9 +22,10 @@ static void alloc_verify_allocator(const Allocator* allocator) {
}

void alloc_init(void) {
g_allocPage = alloc_page_init();
g_allocHeap = alloc_heap_init();
g_allocPersist = alloc_persist_init();
g_allocPage = alloc_page_init();
g_allocPageCache = alloc_pagecache_init();
g_allocHeap = alloc_heap_init();
g_allocPersist = alloc_persist_init();
}

void alloc_leak_detect(void) { alloc_heap_leak_detect(); }
Expand All @@ -35,6 +37,9 @@ void alloc_teardown(void) {
alloc_heap_teardown();
g_allocHeap = null;

alloc_pagecache_teardown();
g_allocPageCache = null;

const u32 leakedPages = alloc_page_allocated_pages();
if (leakedPages) {
alloc_crash_with_msg("alloc: {} pages leaked during app runtime", fmt_int(leakedPages));
Expand Down
2 changes: 1 addition & 1 deletion libs/core/src/alloc_heap.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ static Allocator* alloc_heap_sub_allocator(AllocatorHeap* allocHeap, const usize
return allocHeap->blockBuckets[0];
}
if (UNLIKELY(powIdx > block_bucket_pow_max)) {
return g_allocPage;
return g_allocPageCache;
}
return allocHeap->blockBuckets[powIdx - block_bucket_pow_min];
}
Expand Down
6 changes: 6 additions & 0 deletions libs/core/src/alloc_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,17 +37,23 @@ struct sAllocator {
void (*reset)(Allocator*);
};

extern Allocator* g_allocPageCache;

Allocator* alloc_heap_init(void);
void alloc_heap_leak_detect(void);
void alloc_heap_teardown(void);
u64 alloc_heap_active(void);
u64 alloc_heap_counter(void); // Incremented on every heap allocation.

Allocator* alloc_page_init(void);
usize alloc_page_size(void);
u32 alloc_page_allocated_pages(void);
usize alloc_page_allocated_size(void);
u64 alloc_page_counter(void); // Incremented on every page allocation.

Allocator* alloc_pagecache_init(void);
void alloc_pagecache_teardown(void);

Allocator* alloc_persist_init(void);
void alloc_persist_teardown(void);
u64 alloc_persist_counter(void); // Incremented on every persist allocation.
Expand Down
24 changes: 17 additions & 7 deletions libs/core/src/alloc_page_pal_linux.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#include "core_bits.h"
#include "core_diag.h"
#include "core_math.h"
#include "core_thread.h"

Expand Down Expand Up @@ -30,10 +29,12 @@ static Mem alloc_page_alloc(Allocator* allocator, const usize size, const usize
AllocatorPage* allocPage = (AllocatorPage*)allocator;
(void)align;

diag_assert_msg(
bits_aligned(allocPage->pageSize, align),
"alloc_page_alloc: Alignment '{}' cannot be satisfied (stronger then pageSize alignment)",
fmt_int(align));
#ifndef VOLO_FAST
if (UNLIKELY(!bits_aligned(allocPage->pageSize, align))) {
alloc_crash_with_msg(
"alloc_page_alloc: Alignment '{}' invalid (stronger then pageSize)", fmt_int(align));
}
#endif

const u32 pages = alloc_page_num_pages(allocPage, size);
const usize realSize = pages * allocPage->pageSize;
Expand All @@ -48,8 +49,12 @@ static Mem alloc_page_alloc(Allocator* allocator, const usize size, const usize
return mem_create(res, size);
}

static void alloc_page_free(Allocator* allocator, Mem mem) {
diag_assert(mem_valid(mem));
static void alloc_page_free(Allocator* allocator, const Mem mem) {
#ifndef VOLO_FAST
if (UNLIKELY(!mem_valid(mem))) {
alloc_crash_with_msg("alloc_page_free: Invalid allocation");
}
#endif

AllocatorPage* allocPage = (AllocatorPage*)allocator;

Expand All @@ -70,6 +75,9 @@ static AllocatorPage g_allocatorIntern;

Allocator* alloc_page_init(void) {
const size_t pageSize = getpagesize();
if (UNLIKELY(!bits_ispow2(pageSize))) {
alloc_crash_with_msg("Non pow2 page-size is not supported");
}

g_allocatorIntern = (AllocatorPage){
.api =
Expand All @@ -84,6 +92,8 @@ Allocator* alloc_page_init(void) {
return (Allocator*)&g_allocatorIntern;
}

usize alloc_page_size(void) { return g_allocatorIntern.pageSize; }

u32 alloc_page_allocated_pages(void) {
return (u32)thread_atomic_load_i64(&g_allocatorIntern.allocatedPages);
}
Expand Down
22 changes: 16 additions & 6 deletions libs/core/src/alloc_page_pal_win32.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#include "core_bits.h"
#include "core_diag.h"
#include "core_math.h"
#include "core_thread.h"

Expand All @@ -26,10 +25,12 @@ static Mem alloc_page_alloc(Allocator* allocator, const usize size, const usize
AllocatorPage* allocPage = (AllocatorPage*)allocator;
(void)align;

diag_assert_msg(
bits_aligned(allocPage->pageSize, align),
"alloc_page_alloc: Alignment '{}' cannot be satisfied (stronger then pageSize alignment)",
fmt_int(align));
#ifndef VOLO_FAST
if (UNLIKELY(!bits_aligned(allocPage->pageSize, align))) {
alloc_crash_with_msg(
"alloc_page_alloc: Alignment '{}' invalid (stronger then pageSize)", fmt_int(align));
}
#endif

const u32 pages = alloc_page_num_pages(allocPage, size);
const usize realSize = pages * allocPage->pageSize;
Expand All @@ -45,7 +46,11 @@ static Mem alloc_page_alloc(Allocator* allocator, const usize size, const usize
}

static void alloc_page_free(Allocator* allocator, Mem mem) {
diag_assert(mem_valid(mem));
#ifndef VOLO_FAST
if (UNLIKELY(!mem_valid(mem))) {
alloc_crash_with_msg("alloc_page_free: Invalid allocation");
}
#endif

AllocatorPage* allocPage = (AllocatorPage*)allocator;

Expand All @@ -69,6 +74,9 @@ Allocator* alloc_page_init(void) {
SYSTEM_INFO si;
GetSystemInfo(&si);
const size_t pageSize = si.dwPageSize;
if (UNLIKELY(!bits_ispow2(pageSize))) {
alloc_crash_with_msg("Non pow2 page-size is not supported");
}

g_allocatorIntern = (AllocatorPage){
.api =
Expand All @@ -83,6 +91,8 @@ Allocator* alloc_page_init(void) {
return (Allocator*)&g_allocatorIntern;
}

usize alloc_page_size(void) { return g_allocatorIntern.pageSize; }

u32 alloc_page_allocated_pages(void) {
return (u32)thread_atomic_load_i64(&g_allocatorIntern.allocatedPages);
}
Expand Down
157 changes: 157 additions & 0 deletions libs/core/src/alloc_pagecache.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,157 @@
#include "core_array.h"
#include "core_bits.h"
#include "core_math.h"
#include "core_thread.h"

#include "alloc_internal.h"

/**
* Wrapper around the page allocator that caches allocations that are only a few pages, this avoids
* allot of sys-call traffic when relatively small allocations are freed and reallocated.
*/

#define pagecache_pages_max 4
#define pagecache_count_max 1024

typedef struct sPageCacheNode {
struct sPageCacheNode* next;
} PageCacheNode;

typedef struct {
Allocator api;
ThreadSpinLock spinLock;
usize pageSize;
PageCacheNode* freeNodes[pagecache_pages_max];
u32 freeNodesCount[pagecache_pages_max];
} AllocatorPageCache;

static u32 pagecache_num_pages(AllocatorPageCache* cache, const usize size) {
return (u32)((size + cache->pageSize - 1) / cache->pageSize);
}

static Mem pagecache_alloc(Allocator* allocator, const usize size, const usize align) {
AllocatorPageCache* cache = (AllocatorPageCache*)allocator;

#ifndef VOLO_FAST
if (UNLIKELY(!bits_aligned(cache->pageSize, align))) {
alloc_crash_with_msg(
"pagecache_alloc: Alignment '{}' invalid (stronger then pageSize)", fmt_int(align));
}
#else
(void)align;
#endif

const u32 numPages = pagecache_num_pages(cache, size);
if (numPages > pagecache_pages_max) {
goto NewAllocation;
}

Mem result = mem_empty;
thread_spinlock_lock(&cache->spinLock);
{
PageCacheNode* cacheNode = cache->freeNodes[numPages - 1];
if (cacheNode) {
alloc_unpoison(mem_create(cacheNode, numPages * cache->pageSize));

cache->freeNodes[numPages - 1] = cacheNode->next;
cache->freeNodesCount[numPages - 1]--;

result = mem_create(cacheNode, size);
}
}
thread_spinlock_unlock(&cache->spinLock);

if (mem_valid(result)) {
return result;
}

NewAllocation:;
const Mem newAlloc = alloc_alloc(g_allocPage, numPages * cache->pageSize, cache->pageSize);
if (mem_valid(newAlloc)) {
return mem_slice(newAlloc, 0, size); // Return the size in the requested size.
}
return mem_empty;
}

static void pagecache_free(Allocator* allocator, const Mem mem) {
#ifndef VOLO_FAST
if (UNLIKELY(!mem_valid(mem))) {
alloc_crash_with_msg("pagecache_free: Invalid allocation");
}
#endif
AllocatorPageCache* cache = (AllocatorPageCache*)allocator;
const u32 numPages = pagecache_num_pages(cache, mem.size);
if (numPages > pagecache_pages_max) {
goto FreeAllocation;
}
if (cache->freeNodesCount[numPages - 1] >= pagecache_count_max) {
goto FreeAllocation; // Already have enough cached of this size.
}

alloc_tag_free(mem, AllocMemType_Normal);

thread_spinlock_lock(&cache->spinLock);
{

PageCacheNode* cacheNode = mem.ptr;
*cacheNode = (PageCacheNode){.next = cache->freeNodes[numPages - 1]};

cache->freeNodes[numPages - 1] = cacheNode;
cache->freeNodesCount[numPages - 1]++;

alloc_poison(mem_create(cacheNode, numPages * cache->pageSize));
}
thread_spinlock_unlock(&cache->spinLock);
return;

FreeAllocation:
alloc_free(g_allocPage, mem_create(mem.ptr, numPages * cache->pageSize));
}

static usize pagecache_max_size(Allocator* allocator) {
(void)allocator;
return alloc_max_alloc_size;
}

static void pagecache_reset(Allocator* allocator) {
AllocatorPageCache* cache = (AllocatorPageCache*)allocator;
thread_spinlock_lock(&cache->spinLock);
{
for (u32 i = 0; i != array_elems(cache->freeNodes); ++i) {
for (PageCacheNode* cacheNode = cache->freeNodes[i]; cacheNode;) {
const Mem nodeMem = mem_create(cacheNode, (i + 1) * cache->pageSize);
alloc_unpoison(nodeMem);

cacheNode = cacheNode->next;
alloc_free(g_allocPage, nodeMem);
}
cache->freeNodes[i] = null;
cache->freeNodesCount[i] = 0;
}
}
thread_spinlock_unlock(&cache->spinLock);
}

static AllocatorPageCache g_allocatorIntern;

Allocator* alloc_pagecache_init(void) {
g_allocatorIntern = (AllocatorPageCache){
.api =
{
.alloc = pagecache_alloc,
.free = pagecache_free,
.maxSize = pagecache_max_size,
.reset = pagecache_reset,
},
.pageSize = alloc_page_size(),
};
if (UNLIKELY(!g_allocatorIntern.pageSize)) {
alloc_crash_with_msg("Invalid page-size");
}
return (Allocator*)&g_allocatorIntern;
}

void alloc_pagecache_teardown(void) {
pagecache_reset(&g_allocatorIntern.api);
g_allocatorIntern = (AllocatorPageCache){0};
}

0 comments on commit efa03c3

Please sign in to comment.