1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
|
#include "page_allocator.h"
#include <assert.h>
#include <stdint.h>
#include <unistd.h>
#include <sys/mman.h>
#define MIN(a, b) \
({ \
__auto_type _a = (a); \
__auto_type _b = (b); \
_a > _b ? _a : _b; \
})
static void *allocate(size_t size, size_t alignment, void *user_data) {
(void)user_data;
long _page_size = sysconf(_SC_PAGESIZE);
if (_page_size < 0) {
return NULL;
}
assert(_page_size > 512); // Sanity check
size_t page_size = _page_size;
// First of all, the actual allocation size needs to be a multiple of the page size.
if (size >= SIZE_MAX - page_size) {
return NULL; // Nice overflow, idiot.
}
size_t aligned_size = sand_align_size_forward(size, page_size);
// Furthermore, page alignment may not be enough, i.e. the required
// alignment is larger than the page size. So we compute how much extra
// memory (overallocation) is required. That gives us the size of the final allocation.
size_t max_drop_size = alignment - MIN(alignment, page_size);
size_t raw_size = (max_drop_size <= aligned_size - page_size)
? aligned_size
: sand_align_size_forward(aligned_size + max_drop_size, page_size);
void *raw_ptr = mmap(/* hint = */ NULL,
/* length = */ raw_size,
/* protection = */ PROT_READ | PROT_WRITE,
/* flags = */ MAP_PRIVATE | MAP_ANONYMOUS,
/* fd = */ -1,
/* offset = */ 0);
if (raw_ptr == MAP_FAILED) {
return NULL;
}
// At this point we have our properly aligned pointer, which is
// guaranteed to point to a region of at least `size`. Now we just have
// to clean up after ourselves... The superflous bytes could be at the
// beginning or the end or both.
void *aligned_ptr = sand_align_pointer_forward(raw_ptr, alignment);
assert(aligned_ptr >= raw_ptr);
size_t drop_size = aligned_ptr - raw_ptr;
if (drop_size > 0) {
munmap(raw_ptr, drop_size);
}
size_t remaining_size = raw_size - drop_size;
if (remaining_size > aligned_size) {
munmap(aligned_ptr + aligned_size, remaining_size - aligned_size);
}
return aligned_ptr;
}
static void deallocate(void *old_ptr, size_t old_size, void *user_data) {
(void)user_data;
// The actual allocation will be roughly a multiple of the page size
// (disregarding the cleanup at the end of `allocate`). This calculation
// matches that of the start of `allocate`.
long page_size = sysconf(_SC_PAGESIZE);
assert(old_size < SIZE_MAX - page_size);
size_t aligned_size = sand_align_size_forward(old_size, page_size);
// This _should_ unmap the correct pages.
munmap(old_ptr, aligned_size);
}
static SandAllocator vtable = {
.allocate = allocate,
.deallocate = deallocate,
.reallocate = NULL, // TODO
.user_data = NULL,
};
SandAllocator *sand_get_page_allocator(void) {
return &vtable;
}
|