summaryrefslogtreecommitdiff
path: root/crates/buddy_allocator
diff options
context:
space:
mode:
Diffstat (limited to 'crates/buddy_allocator')
-rw-r--r--crates/buddy_allocator/Cargo.toml16
-rw-r--r--crates/buddy_allocator/src/bitvec.rs59
-rw-r--r--crates/buddy_allocator/src/free_list.rs72
-rw-r--r--crates/buddy_allocator/src/lib.rs222
-rw-r--r--crates/buddy_allocator/src/stripe.rs148
-rw-r--r--crates/buddy_allocator/src/tree.rs130
-rw-r--r--crates/buddy_allocator/tests/hosted_test.rs252
7 files changed, 899 insertions, 0 deletions
diff --git a/crates/buddy_allocator/Cargo.toml b/crates/buddy_allocator/Cargo.toml
new file mode 100644
index 0000000..e2a85d3
--- /dev/null
+++ b/crates/buddy_allocator/Cargo.toml
@@ -0,0 +1,16 @@
+[package]
+name = "vernos_buddy_allocator"
+version = "0.1.0"
+edition = "2021"
+publish = false
+
+[dependencies]
+allocator-api2 = { version = "0.2.18", default-features = false }
+contracts = { version = "0.6.3", default-features = false }
+static_assertions = { version = "1.1.0", default-features = false }
+vernos_physmem_free_list = { path = "../physmem_free_list" }
+vernos_utils = { path = "../utils" }
+
+[dev-dependencies]
+proptest = { version = "0.9.6", default-features = false, features = ["std"] }
+nix = { version = "0.29.0", default-features = false, features = ["mman"] }
diff --git a/crates/buddy_allocator/src/bitvec.rs b/crates/buddy_allocator/src/bitvec.rs
new file mode 100644
index 0000000..856d579
--- /dev/null
+++ b/crates/buddy_allocator/src/bitvec.rs
@@ -0,0 +1,59 @@
+use contracts::requires;
+use core::{fmt, mem::transmute};
+
+/// A fixed-length vector of bits.
+pub struct BitVec([usize]);
+
+impl BitVec {
+ fn from_mut(words: &mut [usize]) -> &mut BitVec {
+ // SAFETY: The types have a newtype relationship.
+ unsafe { transmute(words) }
+ }
+
+ fn from_ref(words: &[usize]) -> &BitVec {
+ // SAFETY: The types have a newtype relationship.
+ unsafe { transmute(words) }
+ }
+
+ /// Retrieves the value of a bit from the BitVec.
+ #[requires(i < self.len())]
+ pub fn get(&self, i: usize) -> bool {
+ let word_index = i / usize::BITS as usize;
+ let subword_index = i % usize::BITS as usize;
+ let one_hot = 1 << subword_index;
+ (self.0[word_index] & one_hot) != 0
+ }
+
+ /// Returns whether the BitVec is empty.
+ pub fn is_empty(&self) -> bool {
+ self.0.is_empty()
+ }
+
+ /// Returns the number of bits in the BitVec.
+ pub fn len(&self) -> usize {
+ self.0.len() * usize::BITS as usize
+ }
+
+ /// Sets the value of a bit in the BitVec.
+ #[requires(i < self.len())]
+ pub fn set(&mut self, i: usize, value: bool) {
+ let word_index = i / usize::BITS as usize;
+ let subword_index = i % usize::BITS as usize;
+ let one_hot = 1 << subword_index;
+ if value {
+ self.0[word_index] |= one_hot;
+ } else {
+ self.0[word_index] &= !one_hot;
+ }
+ }
+}
+
+impl fmt::Debug for BitVec {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "BitVec(")?;
+ for word in &self.0 {
+ write!(fmt, "{word:064b}")?;
+ }
+ write!(fmt, ")")
+ }
+}
diff --git a/crates/buddy_allocator/src/free_list.rs b/crates/buddy_allocator/src/free_list.rs
new file mode 100644
index 0000000..5a633c3
--- /dev/null
+++ b/crates/buddy_allocator/src/free_list.rs
@@ -0,0 +1,72 @@
+//! A doubly-linked list of power-of-two-sized subregions.
+
+use core::{marker::PhantomPinned, pin::Pin, ptr};
+
+/// The magic number a non-sentinel node should have.
+const FREE_NODE_MAGIC: u64 = u64::from_ne_bytes(*b"FREENODE");
+
+/// The magic number a sentinel node should have.
+const SENTINEL_MAGIC: u64 = u64::from_ne_bytes(*b"SENTINEL");
+
+/// A doubly-linked list of power-of-two-sized subregions.
+///
+/// This type is valid when zero-initialized.
+#[derive(Debug)]
+pub struct FreeList {
+ sentinel: FreeListNode,
+}
+
+impl FreeList {
+ /// Performs self-linking. This must be done before the free list is used in any other way.
+ pub fn self_link(mut self: Pin<&mut FreeList>) {
+ assert_eq!(self.sentinel.next, ptr::null_mut());
+ assert_eq!(self.sentinel.prev, ptr::null_mut());
+
+ let self_ptr = &self.sentinel as *const FreeListNode as *mut FreeListNode;
+ let mut sentinel = self.sentinel_unchecked();
+ *sentinel.as_mut().next_mut() = self_ptr;
+ *sentinel.as_mut().prev_mut() = self_ptr;
+ *sentinel.as_mut().magic_mut() = SENTINEL_MAGIC;
+ }
+
+ /// Projects the sentinel node out of the list.
+ fn sentinel(self: Pin<&mut FreeList>) -> Pin<&mut FreeListNode> {
+ let mut sentinel = self.sentinel_unchecked();
+ assert_eq!(*sentinel.as_mut().magic_mut(), SENTINEL_MAGIC);
+ sentinel
+ }
+
+ /// Projects the sentinel node out of the list, without checking the magic number.
+ fn sentinel_unchecked(self: Pin<&mut FreeList>) -> Pin<&mut FreeListNode> {
+ // SAFETY: sentinel is structurally pinned.
+ unsafe { self.map_unchecked_mut(|list| &mut list.sentinel) }
+ }
+}
+
+#[derive(Debug)]
+struct FreeListNode {
+ next: *mut FreeListNode,
+ prev: *mut FreeListNode,
+ magic: u64,
+ _phantom: PhantomPinned,
+}
+
+impl FreeListNode {
+ /// Projects the `next` pointer out of the node.
+ fn next_mut(self: Pin<&mut FreeListNode>) -> &mut *mut FreeListNode {
+ // SAFETY: next is not structurally pinned.
+ unsafe { &mut self.get_unchecked_mut().next }
+ }
+
+ /// Projects the `prev` pointer out of the node.
+ fn prev_mut(self: Pin<&mut FreeListNode>) -> &mut *mut FreeListNode {
+ // SAFETY: prev is structurally pinned.
+ unsafe { &mut self.get_unchecked_mut().prev }
+ }
+
+ /// Projects the `magic` number out of the node.
+ fn magic_mut(self: Pin<&mut FreeListNode>) -> &mut u64 {
+ // SAFETY: magic is structurally pinned.
+ unsafe { &mut self.get_unchecked_mut().magic }
+ }
+}
diff --git a/crates/buddy_allocator/src/lib.rs b/crates/buddy_allocator/src/lib.rs
new file mode 100644
index 0000000..fa86de7
--- /dev/null
+++ b/crates/buddy_allocator/src/lib.rs
@@ -0,0 +1,222 @@
+//! A buddy allocator, used to allocate pages.
+//!
+//! The allocator can be split into three abstractions: stripes, trees, and the allocator.
+//!
+//! TODO: See if there's standard terminology for these.
+//!
+//! ## Stripes
+//!
+//! The buddy allocator works in terms of size classes, which are power-of-two sized, starting at a
+//! single page and going up from there. Each stripe corresponds to a single size class and a
+//! particular region of memory.
+//!
+//! A stripe contains a circular doubly-linked free-list for subregions of that size class, and a
+//! bitset marking whether a particular region has been allocated or not. Being a circular
+//! doubly-linked list makes it cheap to remove an element whose address we know, as well as cheap
+//! to push and pop elements.
+//!
+//! It's efficient to go from the address of a subregion to the index of its corresponding bit, so
+//! when we hand out a subregion from the free-list or put one back, it's cheap to read or write
+//! its bit.
+//!
+//! ## Trees
+//!
+//! A tree is logically a collection of stripes, one per size class. To pack the structures more
+//! efficiently, they are stored interleaved with each other, and the tree manages this.
+//!
+//! The important logic at the level of trees happens when we allocate a subregion from a size
+//! class whose stripe's free-list is empty, and when we free subregions.
+//!
+//! When a stripe's free-list is empty, the tree instead allocates a subregion of a larger size
+//! from the next stripe. It can then store the unused portion in the current size class.
+//!
+//! The really important bit is the ability to merge subregions when they're freed. When we free a
+//! subregion of a certain size class, we can check whether its neighbor (its buddy) is unallocated
+//! as well. If so, we can remove it from the free-list by its address. We can combine the two
+//! subregions into one of the next larger size class, and then return that subregion to the next
+//! stripe.
+//!
+//! ## The buddy allocator
+//!
+//! Finally, the overall buddy allocator needs to be able to handle multiple memory regions. To
+//! facilitate this, the trees are stored in an array, which forms the overall allocator.
+#![no_std]
+
+mod bitvec;
+mod free_list;
+mod tree;
+
+// mod stripe;
+
+use crate::{bitvec::BitVec, free_list::FreeList, tree::Tree};
+use allocator_api2::alloc::{AllocError, Layout, LayoutError};
+use contracts::requires;
+use core::{mem, pin::Pin, ptr::NonNull};
+use vernos_physmem_free_list::FreeListAllocator;
+use vernos_utils::pin::pin_project_slice_mut_iter;
+
+/// A buddy allocator.
+#[derive(Debug)]
+pub struct BuddyAllocator<
+ 'allocator,
+ const PAGE_SIZE: usize,
+ const PAGE_SIZE_BITS: usize,
+ const SIZE_CLASS_COUNT: usize,
+> {
+ /// The free lists.
+ free_lists: Pin<&'allocator mut [FreeList; SIZE_CLASS_COUNT]>,
+
+ /// The metadata associated with each tree.
+ trees: &'allocator mut [Tree<'allocator, PAGE_SIZE, PAGE_SIZE_BITS>],
+
+ /// The shared bitset.
+ bitset: &'allocator mut BitVec,
+}
+
+impl<
+ 'allocator,
+ const PAGE_SIZE: usize,
+ const PAGE_SIZE_BITS: usize,
+ const SIZE_CLASS_COUNT: usize,
+ > BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>
+{
+ /// Returns a buddy allocator backed by the page ranges in the free list.
+ pub fn new(
+ mut free_list: FreeListAllocator<'allocator, PAGE_SIZE>,
+ ) -> Result<BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>, AllocError>
+ {
+ assert_eq!(PAGE_SIZE, 1 << PAGE_SIZE_BITS);
+ assert_ne!(SIZE_CLASS_COUNT, 0);
+
+ // We use one contiguous region to store the free lists and all the information about each
+ // tree (the base, the maximum order, and the bitset). We do one pass through the free list
+ // to find out how much storage we need, then we try to find a convenient spot to prune the
+ // storage we need from a page range in the list.
+ let mut range_count = 0;
+ let mut size_class_counts = [0; SIZE_CLASS_COUNT];
+ for (_, mut len_pages) in free_list.iter() {
+ while len_pages > 0 {
+ let size_class = (len_pages.trailing_zeros() as usize).min(SIZE_CLASS_COUNT - 1);
+ range_count += 1;
+ size_class_counts[size_class] += 1;
+ len_pages -= 1 << size_class;
+ }
+ }
+
+ // Lay out the metadata. The error here really ought to be impossible, because the pages in
+ // the free list needed to fit in there in the first place.
+ let metadata_layout = MetadataLayout::<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>::new(
+ range_count,
+ size_class_counts,
+ )
+ .map_err(|_| AllocError)?;
+
+ // Allocate space for the metadata.
+ let metadata = free_list.allocate_zeroed(metadata_layout.layout)?;
+
+ // Break out each component of the metadata.
+ //
+ // SAFETY: The layout is computed to make all of this sound if the data is valid. All of
+ // these types are valid when zero-initialized.
+ let free_lists: &'allocator mut [FreeList; SIZE_CLASS_COUNT] = unsafe {
+ metadata
+ .byte_add(metadata_layout.free_lists_offset)
+ .cast()
+ .as_mut()
+ };
+ let trees: &'allocator mut [Tree<PAGE_SIZE, PAGE_SIZE_BITS>] = unsafe {
+ NonNull::slice_from_raw_parts(
+ metadata.byte_add(metadata_layout.trees_offset).cast(),
+ metadata_layout.trees_len,
+ )
+ .as_mut()
+ };
+ let bitset: &'allocator mut BitVec = unsafe {
+ mem::transmute::<&mut [usize], &mut BitVec>(
+ NonNull::slice_from_raw_parts(
+ metadata.byte_add(metadata_layout.bitset_offset).cast(),
+ metadata_layout.bitset_len,
+ )
+ .as_mut(),
+ )
+ };
+
+ // Pin and self-link each free list. Although they're valid in a UB sense when zeroed,
+ // they're not yet ready to use until this is done.
+ //
+ // SAFETY: We never provide the ability to move out of a free list.
+ let mut free_lists = unsafe { Pin::new_unchecked(free_lists) };
+ for free_list in pin_project_slice_mut_iter(free_lists.as_mut()) {
+ free_list.self_link();
+ }
+
+ // TODO: Actually initialize the trees with ranges from the allocator...
+
+ // Make the buddy allocator and return.
+ Ok(BuddyAllocator {
+ free_lists,
+ trees,
+ bitset,
+ })
+ }
+
+ /// Tries to allocate a subregion of a particular size class from the allocator.
+ #[requires((0..SIZE_CLASS_COUNT).contains(&size_class))]
+ pub fn alloc(&mut self, size_class: usize) -> Result<NonNull<u8>, AllocError> {
+ Err(AllocError)
+ }
+}
+
+#[derive(Debug)]
+struct MetadataLayout<
+ const PAGE_SIZE: usize,
+ const PAGE_SIZE_BITS: usize,
+ const SIZE_CLASS_COUNT: usize,
+> {
+ layout: Layout,
+ free_lists_offset: usize,
+ trees_offset: usize,
+ trees_len: usize,
+ bitset_offset: usize,
+ bitset_len: usize,
+}
+
+impl<const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize, const SIZE_CLASS_COUNT: usize>
+ MetadataLayout<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>
+{
+ fn new(
+ range_count: usize,
+ mut size_class_counts: [usize; SIZE_CLASS_COUNT],
+ ) -> Result<MetadataLayout<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>, LayoutError> {
+ let trees_len = range_count;
+ let mut bitset_len_bits = 0;
+ // TODO: Is there a cleaner way to compute this?
+ for i in (0..size_class_counts.len()).rev() {
+ let count = size_class_counts[i];
+ if count != 0 {
+ bitset_len_bits += count;
+ if i > 0 {
+ size_class_counts[i - 1] += count * 2;
+ }
+ }
+ }
+ let bitset_len = (bitset_len_bits + usize::BITS as usize - 1) / usize::BITS as usize;
+
+ let free_lists_layout = Layout::new::<[FreeList; SIZE_CLASS_COUNT]>();
+ let trees_layout = Layout::array::<Tree<PAGE_SIZE, PAGE_SIZE_BITS>>(trees_len)?;
+ let bitset_layout = Layout::array::<usize>(bitset_len)?;
+
+ let free_lists_offset = 0;
+ let (layout, trees_offset) = free_lists_layout.extend(trees_layout)?;
+ let (layout, bitset_offset) = layout.extend(bitset_layout)?;
+
+ Ok(MetadataLayout {
+ layout,
+ free_lists_offset,
+ trees_offset,
+ trees_len,
+ bitset_offset,
+ bitset_len,
+ })
+ }
+}
diff --git a/crates/buddy_allocator/src/stripe.rs b/crates/buddy_allocator/src/stripe.rs
new file mode 100644
index 0000000..2ff686b
--- /dev/null
+++ b/crates/buddy_allocator/src/stripe.rs
@@ -0,0 +1,148 @@
+use crate::{bitvec::BitVec, PAGE_SIZE_BITS};
+use core::ptr::NonNull;
+
+/// A single size class for a single region of the allocator. See the comment on the
+/// `crate::allocators::buddy` module for more information.
+pub struct Stripe<'buddy> {
+ /// The base address of the tree this stripe is a part of.
+ pub base: *const (),
+
+ /// The order of the stripe. Order `n` means the subregions are `2ⁿ` pages in size.
+ pub order: usize,
+
+ /// The sentinel node of the free-list for this node. As an invariant of the type, there are no
+ /// live references to any node in this list.
+ pub free_list: NonNull<FreeListNode>,
+
+ /// The bitset used to track whether a given subregion is allocated or not. A `true` bit
+ /// corresponds to an allocated subregion.
+ pub bitset: &'buddy mut BitVec,
+
+ /// The offset from the start of the bitset to the region used by this stripe.
+ pub bitset_offset: usize,
+}
+
+impl<'buddy> Stripe<'buddy> {
+ /// Returns the buddy of the given pointer.
+ ///
+ /// ## Safety
+ ///
+ /// - The pointer must actually be part of this region.
+ unsafe fn buddy_of(&self, ptr: NonNull<FreeListNode>) -> NonNull<FreeListNode> {
+ let index = self.buddy_of_index(self.index_of(ptr));
+ let addr = self.base as usize + (index << (PAGE_SIZE_BITS + self.order));
+ NonNull::new_unchecked(addr as *mut FreeListNode)
+ }
+
+ /// Returns the buddy of the given index.
+ fn buddy_of_index(&self, index: usize) -> usize {
+ index ^ (1 << (PAGE_SIZE_BITS + self.order))
+ }
+
+ /// Returns the index the given pointer should have in the BitVec.
+ fn index_of(&self, ptr: NonNull<FreeListNode>) -> usize {
+ (ptr.as_ptr() as usize - self.base as usize) >> (PAGE_SIZE_BITS + self.order)
+ }
+
+ /// Pops a subregion from the free-list.
+ pub fn pop(&mut self) -> Option<NonNull<FreeListNode>> {
+ // SAFETY: The `free_list` is guaranteed to be valid by the invariants of the buddy
+ // allocator. Retrieving the next pointer doesn't, on its own, break aliasing rules.
+ let next = unsafe { self.free_list.read().next };
+
+ // If the sentinel and the next pointer refer to the same spot, the free-list was empty, so
+ // we can't pop from it.
+ if self.free_list == next {
+ return None;
+ }
+
+ // Otherwise, remove the node from the free-list.
+ unsafe {
+ FreeListNode::remove(next);
+ }
+
+ // Finally, mark the node as allocated in the bitvec.
+ let index = self.index_of(next);
+ assert!(self.bitset.get(self.bitset_offset + index));
+ self.bitset.set(self.bitset_offset + index, true);
+
+ Some(next)
+ }
+
+ /// Pushes a subregion back into the free-list.
+ ///
+ /// # Safety
+ ///
+ /// - There must be no live references to `subregion`.
+ /// - `subregion` must not be a member of any list.
+ pub unsafe fn push(&mut self, subregion: NonNull<FreeListNode>) {
+ // Insert the subregion as the first element of the free-list.
+ //
+ // SAFETY: The free-list is guaranteed to be valid by the invariants of the buddy
+ // allocator.
+ unsafe {
+ FreeListNode::insert(self.free_list, subregion);
+ }
+
+ // Mark the node as unallocated in the bitvec.
+ let index = self.index_of(subregion);
+ assert!(self.bitset.get(self.bitset_offset + index));
+ self.bitset.set(self.bitset_offset + index, false);
+ }
+
+ /// Pushes a subregion into the free-list for the first time.
+ ///
+ /// # Safety
+ ///
+ /// - There must be no live references to `subregion`.
+ /// - `subregion` must not be a member of any list.
+ pub unsafe fn push_initial(&mut self, subregion: NonNull<FreeListNode>) {
+ // Insert the subregion as the first element of the free-list.
+ //
+ // SAFETY: The free-list is guaranteed to be valid by the invariants of the buddy
+ // allocator.
+ unsafe {
+ FreeListNode::insert(self.free_list, subregion);
+ }
+
+ // Mark the node as unallocated in the bitvec.
+ let index = self.index_of(subregion);
+ assert!(self.bitset.get(self.bitset_offset + index));
+ self.bitset.set(self.bitset_offset + index, false);
+ }
+}
+
+pub struct FreeListNode {
+ next: NonNull<FreeListNode>,
+ prev: NonNull<FreeListNode>,
+}
+
+impl FreeListNode {
+ /// Inserts `new` after `prev`, initializing it. `prev` may be the sentinel.
+ ///
+ /// # Safety
+ ///
+ /// - There must be no live references to any node in the list that includes `prev`, including
+ /// the sentinel.
+ /// - There must be no live references to `new`.
+ /// - `new` must not be a member of any list.
+ pub unsafe fn insert(prev: NonNull<FreeListNode>, new: NonNull<FreeListNode>) {
+ let next = prev.read().next;
+ (*prev.as_ptr()).next = new;
+ (*next.as_ptr()).prev = new;
+ new.write(FreeListNode { next, prev });
+ }
+
+ /// Removes this node from the free list it is a part of.
+ ///
+ /// # Safety
+ ///
+ /// - The pointer must point to a node that is part of a free list.
+ /// - There must be no live references to any node in the list, including the sentinel.
+ /// - The pointer must not point to the sentinel node.
+ pub unsafe fn remove(ptr: NonNull<FreeListNode>) {
+ let FreeListNode { next, prev } = ptr.read();
+ (*next.as_ptr()).prev = prev;
+ (*prev.as_ptr()).next = next;
+ }
+}
diff --git a/crates/buddy_allocator/src/tree.rs b/crates/buddy_allocator/src/tree.rs
new file mode 100644
index 0000000..a303d40
--- /dev/null
+++ b/crates/buddy_allocator/src/tree.rs
@@ -0,0 +1,130 @@
+/// A single region of the allocator. See the comment on the `crate::allocators::buddy` module for
+/// more information.
+///
+/// This type is valid when zero-initialized.
+#[derive(Debug)]
+pub struct Tree<'buddy, const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize> {
+ /// The base address of the tree.
+ pub base: *const (),
+
+ /// The log2 of the number of pages in the region represented by the tree.
+ pub log2_page_count: usize,
+
+ /// The offset in the bitset to the bits responsible for this tree's pages.
+ bitset_offset: usize,
+
+ // /// The bitset used to track whether subregion are allocated or not in this tree.
+ // pub bitset: &'buddy mut BitVec,
+ phantom: core::marker::PhantomData<&'buddy mut [u8; PAGE_SIZE]>,
+}
+
+/*
+use crate::{
+ bitvec::BitVec,
+ stripe::{FreeListNode, Stripe},
+ MAX_ORDER, PAGE_SIZE_BITS,
+};
+use contracts::requires;
+
+/// A single region of the allocator. See the comment on the `crate::allocators::buddy` module for
+/// more information.
+pub struct Tree<'buddy> {
+ /// The base address of the tree.
+ pub base: *const (),
+
+ /// The log2 of the number of pages in the region represented by the tree.
+ pub log2_page_count: usize,
+
+ /// The array of sentinel nodes of the free-list for this node. As an invariant of the type,
+ /// there are no live references to any node in any list in this array, and there are
+ /// MAX_ORDER + 1 nodes.
+ pub free_lists: NonNull<FreeListNode>,
+
+ /// The bitset used to track whether subregion are allocated or not in this tree.
+ pub bitset: &'buddy mut BitVec,
+}
+
+impl<'buddy> Tree<'buddy> {
+ /// Tries to allocate a subregion with the given order, possibly splitting a larger subregion
+ /// in order to so so.
+ #[requires(order <= MAX_ORDER)]
+ pub fn alloc(&mut self, order: usize) -> Option<NonNull<FreeListNode>> {
+ if let Some(ptr) = self.stripe(order).pop() {
+ Some(ptr)
+ } else if order == MAX_ORDER {
+ None
+ } else {
+ // Get a larger region.
+ let ptr = self.alloc(order + 1)?;
+
+ // Get a pointer to the higher half.
+ //
+ // SAFETY: This has to be in-bounds, it's part of the same allocation!
+ let higher_half = unsafe { ptr.byte_add(1 << (PAGE_SIZE_BITS + order)) };
+
+ // Put the higher half in the current buddy's stripe.
+ //
+ // SAFETY: The higher half is from this region, not in the higher stripe, and of the
+ // right size.
+ unsafe {
+ self.stripe(order).push(higher_half);
+ }
+
+ // Return the pointer.
+ Some(ptr)
+ }
+ }
+
+ /// Returns the stripe with the given order.
+ #[requires(order <= MAX_ORDER)]
+ fn stripe<'stripe>(&'stripe mut self, order: usize) -> Stripe<'stripe> {
+ // TODO: There should be some smart bitwise-math version of this...
+ let mut bitset_offset = 0;
+ for i in 0..order {
+ bitset_offset += (1 << self.log2_page_count) >> i;
+ }
+
+ Stripe {
+ base: self.base,
+ order,
+ // SAFETY: order is constrained to be in-bounds.
+ free_list: unsafe { self.free_lists.add(order) },
+ bitset: self.bitset,
+ bitset_offset,
+ }
+ }
+}
+
+/// Evil bitwise version of the reasonable loop to compute the `bitset_offset` of a stripe.
+#[requires(log2_page_count < usize::BITS as usize)]
+#[requires(order < usize::BITS as usize)]
+fn compute_bitset_offset(log2_page_count: usize, order: usize) -> usize {
+ let ones = |i: usize| !(usize::MAX << i);
+
+ if order > log2_page_count + 1 {
+ ones(log2_page_count + 1)
+ } else {
+ ones(order).wrapping_shl((log2_page_count + 1 - order) as u32)
+ }
+}
+
+#[test]
+fn compute_bitset_offset_test() {
+ fn compute_bitset_offset_loop(log2_page_count: usize, order: usize) -> usize {
+ let mut bitset_offset = 0;
+ for i in 0..order {
+ bitset_offset += (1 << log2_page_count) >> i;
+ }
+ bitset_offset
+ }
+
+ for log2_page_count in 0..64 {
+ for order in 0..64 {
+ assert_eq!(
+ compute_bitset_offset(log2_page_count, order),
+ compute_bitset_offset_loop(log2_page_count, order),
+ );
+ }
+ }
+}
+*/
diff --git a/crates/buddy_allocator/tests/hosted_test.rs b/crates/buddy_allocator/tests/hosted_test.rs
new file mode 100644
index 0000000..d13895c
--- /dev/null
+++ b/crates/buddy_allocator/tests/hosted_test.rs
@@ -0,0 +1,252 @@
+use core::{fmt, num::NonZero, slice};
+use nix::sys::mman::{mmap_anonymous, munmap, MapFlags, ProtFlags};
+use proptest::prelude::*;
+use vernos_buddy_allocator::BuddyAllocator;
+use vernos_physmem_free_list::FreeListAllocator;
+
+proptest! {
+ #![proptest_config(proptest::test_runner::Config {
+ failure_persistence: None,
+ ..Default::default()
+ })]
+
+ #[test]
+ fn test_scenario(scenario in Scenario::any()) {
+ scenario.run(true)
+ }
+}
+
+#[test]
+fn test_simple_scenario() {
+ let scenario = Scenario {
+ range_sizes: vec![4],
+ actions: vec![
+ Action::Debug,
+ Action::Alloc {
+ sentinel_value: 0xaa,
+ size_class: 0,
+ },
+ Action::Debug,
+ ],
+ };
+ scenario.run(false)
+}
+
+const PAGE_SIZE: usize = (size_of::<*const ()>() * 4).next_power_of_two();
+const PAGE_SIZE_BITS: usize = PAGE_SIZE.trailing_zeros() as usize;
+const SIZE_CLASS_COUNT: usize = 4;
+
+/// A single action the property test should perform.
+#[derive(Debug)]
+enum Action {
+ /// Makes an allocation.
+ Alloc {
+ /// A sentinel value, which is expected to be present when the allocation is freed.
+ sentinel_value: u8,
+
+ /// The size class, which is constrained to be less than `SIZE_CLASS_COUNT`.
+ size_class: usize,
+ },
+
+ /// Deallocates an allocation.
+ Dealloc {
+ /// The index of the allocation in the set of live allocations, which is taken to be a
+ /// circular list (so this is always in-bounds if there are any allocations).
+ index: usize,
+ },
+
+ /// Prints the allocator and all live allocations, for debugging purposes. This is never
+ /// automatically generated.
+ Debug,
+
+ /// Overwrites an allocation, changing its sentinel.
+ Overwrite {
+ /// The index of the allocation in the set of live allocations, which is taken to be a
+ /// circular list (so this is always in-bounds if there are any allocations).
+ index: usize,
+
+ /// The new sentinel value.
+ sentinel_value: u8,
+ },
+}
+
+impl Action {
+ /// Generates a random action.
+ pub fn any() -> impl Strategy<Value = Action> {
+ prop_oneof![
+ (any::<u8>(), 0..SIZE_CLASS_COUNT).prop_map(|(sentinel_value, size_class)| {
+ Action::Alloc {
+ sentinel_value,
+ size_class,
+ }
+ }),
+ any::<usize>().prop_map(|index| { Action::Dealloc { index } }),
+ (any::<usize>(), any::<u8>()).prop_map(|(index, sentinel_value)| {
+ Action::Overwrite {
+ index,
+ sentinel_value,
+ }
+ })
+ ]
+ }
+
+ /// Runs an action.
+ pub fn run(
+ &self,
+ buddy: &mut BuddyAllocator<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>,
+ allocs: &mut Vec<Alloc>,
+ allow_errors: bool,
+ ) {
+ match *self {
+ Action::Alloc {
+ sentinel_value,
+ size_class,
+ } => match buddy.alloc(size_class) {
+ Ok(ptr) => unsafe {
+ let slice = slice::from_raw_parts_mut(ptr.as_ptr(), PAGE_SIZE << size_class);
+ slice.fill(sentinel_value);
+ allocs.push(Alloc {
+ slice,
+ sentinel_value,
+ });
+ },
+ Err(err) => {
+ if !allow_errors {
+ Err(err).unwrap()
+ }
+ }
+ },
+ Action::Dealloc { index } => {
+ if allow_errors && allocs.is_empty() {
+ return;
+ }
+
+ let index = index % allocs.len();
+ let alloc = allocs.remove(index);
+ alloc.check_sentinel();
+ todo!("{alloc:?}")
+ }
+ Action::Debug => {
+ dbg!(buddy);
+ dbg!(allocs);
+ }
+ Action::Overwrite {
+ index,
+ sentinel_value,
+ } => {
+ if allow_errors && allocs.is_empty() {
+ return;
+ }
+
+ let index = index % allocs.len();
+ let alloc = &mut allocs[index];
+ alloc.slice.fill(sentinel_value);
+ alloc.sentinel_value = sentinel_value;
+ }
+ }
+ }
+}
+
+/// The entire series of actions to be performed.
+#[derive(Debug)]
+struct Scenario {
+ /// The sizes of the ranges the buddy allocator should be initialized with.
+ range_sizes: Vec<usize>,
+
+ /// Actions to perform after initializing the buddy allocator.
+ actions: Vec<Action>,
+}
+
+impl Scenario {
+ /// Generates a random scenario.
+ pub fn any() -> impl Strategy<Value = Scenario> {
+ (
+ prop::collection::vec(1usize..1 << (SIZE_CLASS_COUNT + 2), 0..4),
+ prop::collection::vec(Action::any(), 0..64),
+ )
+ .prop_map(|(range_sizes, actions)| Scenario {
+ range_sizes,
+ actions,
+ })
+ }
+
+ /// Runs the scenario.
+ pub fn run(&self, allow_errors: bool) {
+ // Allocate each of the page ranges.
+ let backing_mem = self
+ .range_sizes
+ .iter()
+ .map(|&size| unsafe {
+ mmap_anonymous(
+ None,
+ NonZero::new(size * PAGE_SIZE).unwrap(),
+ ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
+ MapFlags::MAP_PRIVATE,
+ )
+ .expect("failed to allocate memory")
+ })
+ .collect::<Vec<_>>();
+
+ // Create the free list allocator and move the pages there.
+ let mut free_list: FreeListAllocator<PAGE_SIZE> = FreeListAllocator::new();
+ for (&size, &ptr) in self.range_sizes.iter().zip(backing_mem.iter()) {
+ unsafe {
+ free_list.add(ptr.cast(), size);
+ }
+ }
+
+ // Create the buddy allocator and move the pages from the free list to there.
+ match BuddyAllocator::<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>::new(free_list) {
+ Ok(mut buddy) => {
+ let mut allocs = Vec::new();
+
+ // Run each of the actions.
+ for action in &self.actions {
+ action.run(&mut buddy, &mut allocs, allow_errors);
+
+ // Check each allocation.
+ for alloc in &allocs {
+ alloc.check_sentinel();
+ }
+ }
+ }
+ Err(err) => {
+ if !allow_errors {
+ Err(err).unwrap()
+ }
+ }
+ }
+
+ // Free each of the page ranges.
+ for (&size, ptr) in self.range_sizes.iter().zip(backing_mem) {
+ unsafe {
+ munmap(ptr, size * PAGE_SIZE).expect("failed to free memory");
+ }
+ }
+ }
+}
+
+/// Information about an allocation we've made from the buddy allocator.
+struct Alloc<'alloc> {
+ slice: &'alloc mut [u8],
+ sentinel_value: u8,
+}
+
+impl<'alloc> Alloc<'alloc> {
+ pub fn check_sentinel(&self) {
+ let s = self.sentinel_value;
+ for (i, &b) in self.slice.iter().enumerate() {
+ assert_eq!(b, s, "at index {i}, {b} != {s}",);
+ }
+ }
+}
+
+impl<'alloc> fmt::Debug for Alloc<'alloc> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Alloc")
+ .field("ptr", &self.slice.as_ptr())
+ .field("len", &self.slice.len())
+ .field("sentinel_value", &self.sentinel_value)
+ .finish()
+ }
+}