summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README9
-rw-r--r--buddy.h2
-rw-r--r--magic_buddy.c90
3 files changed, 80 insertions, 21 deletions
diff --git a/README b/README
index ed6ef3b..debd59b 100644
--- a/README
+++ b/README
@@ -12,6 +12,9 @@ Benefits are:
3. The library is small, simple, and convenient to use. It even supports
unaligned base addresses.
+ 4. Support for realloc and reserving subregions (e.g., device MMIO
+ addresses).
+
Caveats are:
1. The minimum allocation size is nontrivial, approximately 64 bytes
@@ -23,11 +26,7 @@ Caveats are:
corrupted; the probability is configurable by the user. The default has
a 1/2^128-chance of getting corrupted per program operation.
- 4. We do not (yet) support some common allocator features, including
- realloc and reserving a specific subregion (e.g., device MMIO
- addresses).
-
- 5. Some advanced allocator features, e.g., iterating over allocated regions
+ 4. Some advanced allocator features, e.g., iterating over allocated regions
or attempting to dynamically improve cache locality, are not supported.
Traditional optimized buddy allocators (see Knuth Vol 1) require at least 1/2
diff --git a/buddy.h b/buddy.h
index 45e62a7..c1da8eb 100644
--- a/buddy.h
+++ b/buddy.h
@@ -15,4 +15,4 @@ void debug_buddy(void);
// "advanced features"
void *reallocate(void *old, size_t old_size, size_t new_size);
-void preallocate(void *start, size_t size);
+int reserve(void *start, size_t size);
diff --git a/magic_buddy.c b/magic_buddy.c
index 4529e7d..b172677 100644
--- a/magic_buddy.c
+++ b/magic_buddy.c
@@ -9,6 +9,9 @@ struct free_list {
uint8_t magic[MAGIC_COOKIE_BYTES];
struct free_list *next;
struct free_list **pprev;
+
+ // only needed for the 'advanced features' (reserve and reallocate)
+ uint8_t logsize;
};
static struct free_list *(AVAIL[ADDRESS_BITS]);
@@ -51,6 +54,11 @@ static int isfree(struct free_list *block) {
return !memcmp(block->magic, MAGIC, sizeof(MAGIC));
}
+// interpret @block as a block at depth @logsize. is it free?
+static int isfree_at_logsize(struct free_list *block, size_t logsize) {
+ return isfree(block) && block->logsize == logsize;
+}
+
static void makefree(struct free_list *block) {
memcpy(block->magic, MAGIC, sizeof(MAGIC));
}
@@ -66,6 +74,7 @@ static void push(struct free_list *block, size_t logsize) {
if (block->next) block->next->pprev = &(block->next);
AVAIL[logsize] = block;
block->pprev = &(AVAIL[logsize]);
+ block->logsize = logsize;
}
static void *_allocate(size_t logsize) {
@@ -93,7 +102,7 @@ static void _liberate(struct free_list *block, size_t logsize) {
if (logsize == ROOT_LOGSIZE) return;
struct free_list *buddy = buddy_of(block, logsize);
- if (!isfree(buddy)) return;
+ if (!isfree_at_logsize(buddy, logsize)) return;
// coalesce up!
struct free_list *smaller = (buddy < block) ? buddy : block;
@@ -127,23 +136,23 @@ void debug_buddy(void) {
static struct free_list *rhs_child_of(struct free_list *block, size_t logsize) {
size_t virt_block = (size_t)block - (size_t)BASE;
size_t virt_child = virt_block | (1 << (logsize - 1));
- return (void*)((uint8_t*)BASE + virt_buddy);
+ return (void*)((uint8_t*)BASE + virt_child);
}
// NOTE: this method is perhaps more complicated than it needs to be because we
-// take great pains to avoid writing to the region that is being preallocated
+// take great pains to avoid writing to the region that is being reserved
// (e.g., in case it is device MMIO).
-int preallocate(void *start, size_t size) {
+int reserve(void *start, size_t size) {
assert(size);
void *end = (void*)((uint8_t*)start + size);
- // (1) check if BASE itself is a free node
+ // (1) iterate down to find the first free node to the left of @start
struct free_list *parent = (void*)BASE;
size_t parent_logsize = ROOT_LOGSIZE;
- while (!isfree(parent)) {
+ while (!isfree_at_logsize(parent, parent_logsize)) {
// pick the proper child and recurse
struct free_list *rhs_child = rhs_child_of(parent, parent_logsize);
- if (rhs_child <= start) parent = rhs_child;
+ if ((void*)rhs_child <= start) parent = rhs_child;
if (!parent_logsize--) return 0;
}
// parent is free!
@@ -151,11 +160,11 @@ int preallocate(void *start, size_t size) {
// if the size has no chance of fitting, return 0.
if (parent_end < end) return 0;
// otherwise, split this parent
- pop(parent, parent_logsize);
+ pop(parent);
while (1) {
// check whether the child could fit it.
struct free_list *rhs_child = rhs_child_of(parent, parent_logsize);
- struct free_list *next_child = (rhs_child <= start) parent : rhs_child;
+ struct free_list *next_child = ((void*)rhs_child <= start) ? parent : rhs_child;
void *child_end = (void*)((uint8_t*)next_child + (1 << (parent_logsize - 1)));
if (child_end < end) {
// the child *cannot* fit it, so allocate this parent.
@@ -163,7 +172,7 @@ int preallocate(void *start, size_t size) {
} else {
// the child *can* fit it, so split this parent into two children.
struct free_list *lhs_child = parent;
- if (rhs_child <= start) {
+ if ((void*)rhs_child <= start) {
// the lhs child will be free
makefree(lhs_child);
push(lhs_child, parent_logsize - 1);
@@ -180,11 +189,62 @@ int preallocate(void *start, size_t size) {
return 0;
}
+static void *_naive_reallocate(void *old, size_t old_size, size_t new_size) {
+ void *new = allocate(new_size);
+ if (!new) return 0;
+ assert(old_size < new_size);
+ memcpy(new, old, old_size);
+ liberate(old, old_size);
+ return new;
+}
+
void *reallocate(void *old, size_t old_size, size_t new_size) {
- // if new_size < old_size, repeatedly split ourself. TODO: does this mess
- // with fragmentation?
+ size_t old_logsize = size2log(old_size, 1);
+ size_t new_logsize = size2log(new_size, 1);
+
+ if (new_logsize == old_logsize) return old;
+
+ if (new_logsize < old_logsize) {
+ // repeatedly split, keeping lhs.
+ struct free_list *block = old;
+ while (new_logsize < old_logsize) {
+ old_logsize--;
+ struct free_list *right_half = buddy_of(block, old_logsize);
+ makefree(right_half);
+ push(right_half, old_logsize);
+ }
+ return old;
+ }
+
+ // otherwise, we must iterate up the tree and ensure that at each level:
+ // (1) we are the left-buddy, and
+ // (2) our buddy is free.
+ // up until we reach an ancestor of the proper size.
+
+ // First, just verify this claim.
+ size_t pos_logsize = old_logsize;
+ if (new_logsize > ROOT_LOGSIZE) return 0;
+ struct free_list *pos = old;
+ while (pos_logsize != new_logsize) {
+ struct free_list *buddy = buddy_of(pos, pos_logsize);
+ if (pos > buddy) {
+ // oh no, we're the right buddy!
+ return _naive_reallocate(old, old_size, new_size);
+ } else if (!isfree_at_logsize(buddy, pos_logsize)) {
+ // oh no, our buddy at this level isn't free!
+ return _naive_reallocate(old, old_size, new_size);
+ }
+ pos_logsize++;
+ }
- // otherwise, basically identical to preallocate, except what we want to
- // allocate is roughly @old + old_size (with proper rounding to
- // boundaries).
+ // Then, revisit the path and coalesce on the way up.
+ pos_logsize = old_logsize;
+ pos = old;
+ while (pos_logsize != new_logsize) {
+ struct free_list *buddy = buddy_of(pos, pos_logsize);
+ pop(buddy);
+ memset(buddy, 0, sizeof(struct free_list));
+ pos_logsize++;
+ }
+ return old;
}
generated by cgit on debian on lair
contact matthew@masot.net with questions or feedback