summaryrefslogtreecommitdiff
path: root/src/core/page_allocator.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/page_allocator.c')
-rw-r--r--src/core/page_allocator.c91
1 files changed, 91 insertions, 0 deletions
diff --git a/src/core/page_allocator.c b/src/core/page_allocator.c
new file mode 100644
index 0000000..a20aa76
--- /dev/null
+++ b/src/core/page_allocator.c
@@ -0,0 +1,91 @@
+#include "page_allocator.h"
+
+#include <assert.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+#define MIN(a, b) \
+ ({ \
+ __auto_type _a = (a); \
+ __auto_type _b = (b); \
+ _a > _b ? _a : _b; \
+ })
+
+static void *allocate(size_t size, size_t alignment, void *user_data) {
+ (void)user_data;
+
+ long _page_size = sysconf(_SC_PAGESIZE);
+ if (_page_size < 0) {
+ return NULL;
+ }
+ assert(_page_size > 512); // Sanity check
+ size_t page_size = _page_size;
+
+ // First of all, the actual allocation size needs to be a multiple of the page size.
+ if (size >= SIZE_MAX - page_size) {
+ return NULL; // Nice overflow, idiot.
+ }
+ size_t aligned_size = sand_align_size_forward(size, page_size);
+
+ // Furthermore, page alignment may not be enough, i.e. the required
+ // alignment is larger than the page size. So we compute how much extra
+ // memory (overallocation) is required. That gives us the size of the final allocation.
+ size_t max_drop_size = alignment - MIN(alignment, page_size);
+ size_t raw_size = (max_drop_size <= aligned_size - page_size)
+ ? aligned_size
+ : sand_align_size_forward(aligned_size + max_drop_size, page_size);
+
+ void *raw_ptr = mmap(/* hint = */ NULL,
+ /* length = */ raw_size,
+ /* protection = */ PROT_READ | PROT_WRITE,
+ /* flags = */ MAP_PRIVATE | MAP_ANONYMOUS,
+ /* fd = */ -1,
+ /* offset = */ 0);
+ if (raw_ptr == MAP_FAILED) {
+ return NULL;
+ }
+
+ // At this point we have our properly aligned pointer, which is
+ // guaranteed to point to a region of at least `size`. Now we just have
+ // to clean up after ourselves... The superflous bytes could be at the
+ // beginning or the end or both.
+ void *aligned_ptr = sand_align_pointer_forward(raw_ptr, alignment);
+
+ assert(aligned_ptr >= raw_ptr);
+ size_t drop_size = aligned_ptr - raw_ptr;
+ if (drop_size > 0) {
+ munmap(raw_ptr, drop_size);
+ }
+
+ size_t remaining_size = raw_size - drop_size;
+ if (remaining_size > aligned_size) {
+ munmap(aligned_ptr + aligned_size, remaining_size - aligned_size);
+ }
+
+ return aligned_ptr;
+}
+
+static void deallocate(void *old_ptr, size_t old_size, void *user_data) {
+ (void)user_data;
+
+ // The actual allocation will be roughly a multiple of the page size
+ // (disregarding the cleanup at the end of `allocate`). This calculation
+ // matches that of the start of `allocate`.
+ long page_size = sysconf(_SC_PAGESIZE);
+ assert(old_size < SIZE_MAX - page_size);
+ size_t aligned_size = sand_align_size_forward(old_size, page_size);
+
+ // This _should_ unmap the correct pages.
+ munmap(old_ptr, aligned_size);
+}
+static SandAllocator vtable = {
+ .allocate = allocate,
+ .deallocate = deallocate,
+ .reallocate = NULL, // TODO
+ .user_data = NULL,
+};
+
+SandAllocator *sand_get_page_allocator() {
+ return &vtable;
+}