summaryrefslogtreecommitdiff
path: root/crates/alloc_buddy/src
diff options
context:
space:
mode:
authorNathan Ringo <nathan@remexre.com>2024-09-01 12:42:37 -0500
committerNathan Ringo <nathan@remexre.com>2024-09-01 12:42:37 -0500
commita83d2e1c8bf968d948991869d1f082b610d9032a (patch)
tree9b8b587af5af144e9b18e697447e8a1c750754d5 /crates/alloc_buddy/src
parent2da0dad522523047cf02482caa70edcbe3605af0 (diff)
Rename crates.
Diffstat (limited to 'crates/alloc_buddy/src')
-rw-r--r--crates/alloc_buddy/src/bitvec.rs77
-rw-r--r--crates/alloc_buddy/src/free_list.rs164
-rw-r--r--crates/alloc_buddy/src/lib.rs409
-rw-r--r--crates/alloc_buddy/src/tree.rs100
4 files changed, 750 insertions, 0 deletions
diff --git a/crates/alloc_buddy/src/bitvec.rs b/crates/alloc_buddy/src/bitvec.rs
new file mode 100644
index 0000000..5cabc5e
--- /dev/null
+++ b/crates/alloc_buddy/src/bitvec.rs
@@ -0,0 +1,77 @@
+use contracts::requires;
+use core::{fmt, mem::transmute};
+
+/// A fixed-length vector of bits used for determining whether a subregion is in the free lists or
+/// not.
+pub struct Bitset([usize]);
+
+impl Bitset {
+ fn from_mut(words: &mut [usize]) -> &mut Bitset {
+ // SAFETY: The types have a newtype relationship.
+ unsafe { transmute(words) }
+ }
+
+ fn from_ref(words: &[usize]) -> &Bitset {
+ // SAFETY: The types have a newtype relationship.
+ unsafe { transmute(words) }
+ }
+
+ /// Retrieves the value of a bit from the Bitset.
+ #[requires(i < self.len())]
+ pub fn get(&self, i: usize) -> SubregionStatus {
+ let word_index = i / usize::BITS as usize;
+ let subword_index = i % usize::BITS as usize;
+ let one_hot = 1 << subword_index;
+ if (self.0[word_index] & one_hot) == 0 {
+ SubregionStatus::NotInFreeList
+ } else {
+ SubregionStatus::InFreeList
+ }
+ }
+
+ /// Returns whether the Bitset is empty.
+ pub fn is_empty(&self) -> bool {
+ self.0.is_empty()
+ }
+
+ /// Returns the number of bits in the Bitset.
+ pub fn len(&self) -> usize {
+ self.0.len() * usize::BITS as usize
+ }
+
+ /// Sets the value of a bit in the Bitset.
+ #[requires(i < self.len())]
+ pub fn set(&mut self, i: usize, status: SubregionStatus) {
+ let word_index = i / usize::BITS as usize;
+ let subword_index = i % usize::BITS as usize;
+ let one_hot = 1 << subword_index;
+ match status {
+ SubregionStatus::NotInFreeList => {
+ self.0[word_index] &= !one_hot;
+ }
+ SubregionStatus::InFreeList => {
+ self.0[word_index] |= one_hot;
+ }
+ }
+ }
+}
+
+impl fmt::Debug for Bitset {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "Bitset(")?;
+ for word in &self.0 {
+ write!(fmt, "{word:064b}")?;
+ }
+ write!(fmt, ")")
+ }
+}
+
+/// The status of a subregion, as tracked by `Bitset`.
+#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
+pub enum SubregionStatus {
+ /// The region is not in the free list.
+ NotInFreeList = 0,
+
+ /// The region is in the free list.
+ InFreeList = 1,
+}
diff --git a/crates/alloc_buddy/src/free_list.rs b/crates/alloc_buddy/src/free_list.rs
new file mode 100644
index 0000000..9b470fd
--- /dev/null
+++ b/crates/alloc_buddy/src/free_list.rs
@@ -0,0 +1,164 @@
+//! A circular doubly-linked list of power-of-two-sized subregions.
+
+use contracts::{ensures, requires};
+use core::{fmt, ptr::NonNull};
+
+/// The magic number a non-sentinel node should have.
+const FREE_NODE_MAGIC: u64 = u64::from_ne_bytes(*b"FREENODE");
+
+/// The magic number set into the spot for a node's magic number when it is initialized, but not
+/// linked into a list.
+const INITED_NODE_MAGIC: u64 = u64::from_ne_bytes(*b"NODEINIT");
+
+/// The magic number a sentinel node should have.
+const SENTINEL_MAGIC: u64 = u64::from_ne_bytes(*b"SENTINEL");
+
+/// The magic number set into the spot for a node's magic number when it gets unlinked.
+const USED_NODE_MAGIC: u64 = u64::from_ne_bytes(*b"ALLOC'ED");
+
+/// A pointer to the sentinel node of a circular doubly-linked list of power-of-two-sized subregions.
+pub struct FreeList(NonNull<FreeListNode>);
+
+impl FreeList {
+ /// Creates a free-list wrapper for a sentinel node's pointer.
+ ///
+ /// # Safety
+ ///
+ /// - `sentinel` must be a pointer to the sentinel of a valid circular doubly-linked list.
+ #[requires(unsafe { (*sentinel.as_ptr()).next }.is_some())]
+ #[requires(unsafe { (*sentinel.as_ptr()).prev }.is_some())]
+ #[requires(unsafe { (*sentinel.as_ptr()).magic } == SENTINEL_MAGIC)]
+ #[requires(unsafe { (*sentinel.as_ptr()).self_addr } == sentinel.as_ptr() as usize)]
+ pub unsafe fn from_sentinel(sentinel: NonNull<FreeListNode>) -> FreeList {
+ FreeList(sentinel)
+ }
+
+ /// Returns whether the free-list is empty.
+ pub fn is_empty(&self) -> bool {
+ let sentinel = self.0.as_ptr();
+ // SAFETY: This is sound as long as the sentinel was valid, which is a precondition of
+ // `FreeList::from_sentinel()`.
+ unsafe {
+ // UNWRAP: This is a precondition of the method.
+ let next = (*sentinel).next.unwrap();
+ (*sentinel).self_addr == next.as_ptr() as usize
+ }
+ }
+
+ /// Pops a subregion from the free-list, returning ownership to the caller.
+ pub fn pop(&mut self) -> Option<NonNull<u8>> {
+ if self.is_empty() {
+ // If the list is empty, return `None`.
+ None
+ } else {
+ // SAFETY: This is sound as long as the sentinel was valid, which is a precondition of
+ // `FreeList::from_sentinel()`.
+ unsafe {
+ // UNWRAP: In a valid circular doubly-linked list, the pointers are all non-null.
+ let node = (*self.0.as_ptr()).next.unwrap();
+ let prev = (*node.as_ptr()).prev.unwrap();
+ let next = (*node.as_ptr()).next.unwrap();
+
+ (*prev.as_ptr()).next = Some(next);
+ (*next.as_ptr()).prev = Some(prev);
+
+ (*node.as_ptr()).next = None;
+ (*node.as_ptr()).prev = None;
+ (*node.as_ptr()).magic = USED_NODE_MAGIC;
+ Some(node.cast())
+ }
+ }
+ }
+
+ /// Pushes a subregion into the free-list.
+ ///
+ /// # Safety
+ ///
+ /// - `subregion_ptr` must convey ownership over untyped memory of the correct size.
+ pub unsafe fn push(&mut self, subregion_ptr: NonNull<u8>) {
+ let node = FreeListNode::init(subregion_ptr);
+
+ let prev = self.0;
+ // UNWRAP: In a valid circular doubly-linked list, the pointers are all non-null.
+ let next = (*self.0.as_ptr()).next.unwrap();
+
+ (*node.as_ptr()).prev = Some(prev);
+ (*node.as_ptr()).next = Some(next);
+ (*node.as_ptr()).magic = FREE_NODE_MAGIC;
+
+ (*prev.as_ptr()).next = Some(node);
+ (*next.as_ptr()).prev = Some(node);
+ }
+}
+
+/// This impl will panic until the sentinel has been self-linked.
+impl fmt::Debug for FreeList {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ // SAFETY: This is sound as long as the sentinel was valid, which is a precondition of
+ // `FreeList::from_sentinel()`.
+ let sentinel_addr = unsafe { (*self.0.as_ptr()).self_addr };
+
+ let mut fmt = fmt.debug_list();
+ let mut ptr = self.0;
+ loop {
+ // SAFETY: The invariants of FreeList guarantee that the list is valid. In a valid
+ // list, the next pointer is always valid and non-null.
+ ptr = unsafe { (*ptr.as_ptr()).next.unwrap() };
+ if unsafe { (*ptr.as_ptr()).self_addr } == sentinel_addr {
+ break fmt.finish();
+ }
+ fmt.entry(&ptr);
+ }
+ }
+}
+
+/// A node in a free list. This should not be moved without careful thought.
+///
+/// This type is valid when zero-initialized.
+///
+/// # Safety
+///
+/// - This type is valid only if it is zero-initialized or if it contains a valid circular
+/// doubly-linked list of subregions of the correct size, which it owns.
+#[derive(Debug)]
+pub struct FreeListNode {
+ next: Option<NonNull<FreeListNode>>,
+ prev: Option<NonNull<FreeListNode>>,
+ magic: u64,
+ self_addr: usize,
+}
+
+impl FreeListNode {
+ /// Initializes a sentinel node.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must convey exclusive access and point to a large-enough allocation.
+ #[ensures(unsafe { (*ptr.as_ptr()).magic } == SENTINEL_MAGIC)]
+ #[ensures(unsafe { (*ptr.as_ptr()).self_addr } == ptr.as_ptr() as usize)]
+ pub unsafe fn init_sentinel(ptr: NonNull<FreeListNode>) {
+ (*ptr.as_ptr()).next = Some(ptr);
+ (*ptr.as_ptr()).prev = Some(ptr);
+ (*ptr.as_ptr()).magic = SENTINEL_MAGIC;
+ (*ptr.as_ptr()).self_addr = ptr.as_ptr() as usize;
+ }
+
+ /// Initializes a non-sentinel node.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must convey ownership and point to a large-enough allocation.
+ #[ensures(ptr.as_ptr() as usize == ret.as_ptr() as usize)]
+ #[ensures(unsafe { (*ret.as_ptr()).magic } == INITED_NODE_MAGIC)]
+ #[ensures(unsafe { (*ret.as_ptr()).self_addr } == ptr.as_ptr() as usize)]
+ unsafe fn init(ptr: NonNull<u8>) -> NonNull<FreeListNode> {
+ let ptr = ptr.cast();
+ ptr.write(FreeListNode {
+ next: None,
+ prev: None,
+ magic: INITED_NODE_MAGIC,
+ self_addr: ptr.as_ptr() as usize,
+ });
+ ptr
+ }
+}
diff --git a/crates/alloc_buddy/src/lib.rs b/crates/alloc_buddy/src/lib.rs
new file mode 100644
index 0000000..d0d4818
--- /dev/null
+++ b/crates/alloc_buddy/src/lib.rs
@@ -0,0 +1,409 @@
+//! A buddy allocator, used to allocate pages.
+//!
+//! The allocator can be split into three abstractions: stripes, trees, and the allocator.
+//!
+//! TODO: See if there's standard terminology for these.
+//!
+//! ## Stripes
+//!
+//! The buddy allocator works in terms of size classes, which are power-of-two sized, starting at a
+//! single page and going up from there. Each stripe corresponds to a single size class and a
+//! particular region of memory.
+//!
+//! A stripe contains a circular doubly-linked free-list for subregions of that size class, and a
+//! bitset marking whether a particular region has been allocated or not. Being a circular
+//! doubly-linked list makes it cheap to remove an element whose address we know, as well as cheap
+//! to push and pop elements.
+//!
+//! It's efficient to go from the address of a subregion to the index of its corresponding bit, so
+//! when we hand out a subregion from the free-list or put one back, it's cheap to read or write
+//! its bit.
+//!
+//! ## Trees
+//!
+//! A tree is logically a collection of stripes, one per size class. To pack the structures more
+//! efficiently, they are stored interleaved with each other, and the tree manages this.
+//!
+//! The important logic at the level of trees happens when we allocate a subregion from a size
+//! class whose stripe's free-list is empty, and when we free subregions.
+//!
+//! When a stripe's free-list is empty, the tree instead allocates a subregion of a larger size
+//! from the next stripe. It can then store the unused portion in the current size class.
+//!
+//! The really important bit is the ability to merge subregions when they're freed. When we free a
+//! subregion of a certain size class, we can check whether its neighbor (its buddy) is unallocated
+//! as well. If so, we can remove it from the free-list by its address. We can combine the two
+//! subregions into one of the next larger size class, and then return that subregion to the next
+//! stripe.
+//!
+//! ## The buddy allocator
+//!
+//! Finally, the overall buddy allocator needs to be able to handle multiple memory regions. To
+//! facilitate this, the trees are stored in an array, which forms the overall allocator.
+#![no_std]
+
+extern crate std; // TODO: Remove me
+
+mod bitvec;
+mod free_list;
+mod tree;
+
+use crate::{
+ bitvec::{Bitset, SubregionStatus},
+ free_list::{FreeList, FreeListNode},
+ tree::Tree,
+};
+use allocator_api2::alloc::{AllocError, Layout, LayoutError};
+use contracts::{ensures, requires};
+use core::{fmt, mem, ptr::NonNull};
+use vernos_alloc_physmem_free_list::FreeListAllocator;
+
+/// A buddy allocator.
+pub struct BuddyAllocator<
+ 'allocator,
+ const PAGE_SIZE: usize,
+ const PAGE_SIZE_BITS: usize,
+ const SIZE_CLASS_COUNT: usize,
+> {
+ /// The free list sentinels (`SIZE_CLASS_COUNT` of them).
+ free_list_sentinels: NonNull<FreeListNode>,
+
+ /// The metadata associated with each tree.
+ trees: &'allocator mut [Tree<'allocator, PAGE_SIZE, PAGE_SIZE_BITS>],
+
+ /// The shared bitset.
+ bitset: &'allocator mut Bitset,
+}
+
+impl<
+ 'allocator,
+ const PAGE_SIZE: usize,
+ const PAGE_SIZE_BITS: usize,
+ const SIZE_CLASS_COUNT: usize,
+ > BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>
+{
+ /// Returns a buddy allocator backed by the page ranges in the free list.
+ pub fn new(
+ mut free_list_alloc: FreeListAllocator<'allocator, PAGE_SIZE>,
+ ) -> Result<BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>, AllocError>
+ {
+ assert_eq!(PAGE_SIZE, 1 << PAGE_SIZE_BITS);
+ assert_ne!(SIZE_CLASS_COUNT, 0);
+
+ // We split up all the nodes into size-class-sized chunks here. After this, it's important
+ // for safety that we never split a node again -- we'll see why later.
+ free_list_alloc.split_to_powers_of_two_no_larger_than(1 << (SIZE_CLASS_COUNT - 1));
+
+ // We use one contiguous region to store the free lists and all the information about each
+ // tree (the base, the maximum order, and the bitset). We do one pass through the free list
+ // to find out how much storage we need, then we try to find a convenient spot to steal the
+ // storage we need from a page range in the list.
+ let mut range_count = 0;
+ let mut size_class_counts = [0; SIZE_CLASS_COUNT];
+ for (_, len_pages) in free_list_alloc.iter() {
+ assert!(len_pages.is_power_of_two());
+ let size_class = len_pages.trailing_zeros() as usize;
+ assert!((0..SIZE_CLASS_COUNT).contains(&size_class));
+ range_count += 1;
+ size_class_counts[size_class] += 1;
+ }
+
+ // Lay out the metadata.
+ //
+ // UNWRAP: The error here really ought to be impossible, because the pages in the free list
+ // needed to fit in there in the first place.
+ let metadata_layout = MetadataLayout::<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>::new(
+ range_count,
+ size_class_counts,
+ )
+ .unwrap();
+
+ // Allocate space for the metadata.
+ //
+ // SAFETY: We need to make _absolutely_ sure nothing in the buddy allocator uses this
+ // memory before we have a chance to remove it from the allocator. This function guarantees
+ // that the returned memory won't overlap with the first page of a live node, so as long as
+ // we don't split a node or write past the first page of one, we should be OK.
+ let metadata = unsafe {
+ free_list_alloc.allocate_without_always_removing_the_node(metadata_layout.layout)?
+ };
+ // SAFETY: The pointer must be valid to have been returned here.
+ unsafe {
+ metadata.cast::<u8>().write_bytes(0, metadata.len());
+ }
+
+ // Break out each component of the metadata.
+ //
+ // SAFETY: The layout is computed to make all of this sound if the data is valid. All of
+ // these types are valid when zero-initialized.
+ let free_list_sentinels: NonNull<FreeListNode> = unsafe {
+ metadata
+ .byte_add(metadata_layout.free_list_sentinels_offset)
+ .cast()
+ };
+ let trees: &'allocator mut [Tree<PAGE_SIZE, PAGE_SIZE_BITS>] = unsafe {
+ NonNull::slice_from_raw_parts(
+ metadata.byte_add(metadata_layout.trees_offset).cast(),
+ metadata_layout.trees_len,
+ )
+ .as_mut()
+ };
+ // SAFETY: This type is valid when zero-initialized, and is a newtype for `[usize]` so has
+ // the same layout.
+ let bitset: &'allocator mut Bitset = unsafe {
+ mem::transmute::<&mut [usize], &mut Bitset>(
+ NonNull::slice_from_raw_parts(
+ metadata.byte_add(metadata_layout.bitset_offset).cast(),
+ metadata_layout.bitset_len,
+ )
+ .as_mut(),
+ )
+ };
+
+ // Self-link each free list. Although they're valid in a UB sense when zeroed, they're not
+ // yet ready to use until this is done.
+ for size_class in 0..SIZE_CLASS_COUNT {
+ // SAFETY: This is the number of nodes we allocate, so we'll stay in-bounds.
+ unsafe { FreeListNode::init_sentinel(free_list_sentinels.add(size_class)) };
+ }
+
+ // Initialize the trees, adding subregions into the allocator as we do. We don't actually
+ // assign bitset offsets yet, because we're going to sort the trees by address before we
+ // do. (This isn't algorithmically necessary, but it makes debugging a bit easier.)
+ let mut i = 0;
+ while let Some((ptr, mut len_pages)) = free_list_alloc.pop() {
+ while len_pages > 0 {
+ let size_class = (len_pages.trailing_zeros() as usize).min(SIZE_CLASS_COUNT - 1);
+ trees[i].base_ptr = Some(ptr.cast());
+ trees[i].size_class = size_class;
+ i += 1;
+ len_pages -= 1 << size_class;
+ }
+ }
+ assert!(
+ i == trees.len() || i + 1 == trees.len(),
+ "found {} trees but allocated {} slots",
+ i,
+ trees.len()
+ );
+
+ // Slice the trees slice to fit how many trees actually remained, then sort them by
+ // address.
+ let trees = &mut trees[..i];
+ trees.sort_by_key(|tree| tree.base_addr());
+
+ // Compute the number of bits that belong to each tree and assign them.
+ let mut bitset_offset = 0;
+ for tree in &mut *trees {
+ tree.bitset_offset = bitset_offset;
+ bitset_offset += (1 << (tree.size_class + 1)) - 1;
+ }
+ assert!(bitset_offset <= metadata_layout.bitset_len_bits);
+
+ // Add regions the trees should own to the free lists, excluding the subregions that
+ // metadata is allocated in.
+
+ // Since we know that the metadata was allocated at the end of the page range it was
+ // allocated in, we can just test the end addresses.
+ //
+ // SAFETY: This is the one-past-the-end address.
+ let metadata_addr_hi =
+ unsafe { metadata.cast::<u8>().add(metadata.len()).as_ptr() } as usize;
+ for tree in &mut *trees {
+ // SAFETY: This is the one-past-the-end address.
+ let tree_addr_hi = tree.base_addr() + (PAGE_SIZE << tree.size_class);
+ if metadata_addr_hi == tree_addr_hi {
+ // If the metadata was present inside the tree... TODO.
+ std::eprintln!("TODO");
+ } else {
+ // Otherwise, we can take the whole range of the tree and add it to the free list.
+
+ // SAFETY: There are as many sentinels as size classes, so this will be in-bounds.
+ let sentinel = unsafe { free_list_sentinels.add(tree.size_class) };
+
+ // SAFETY: The free list is kept valid.
+ let mut free_list = unsafe { FreeList::from_sentinel(sentinel) };
+
+ // SAFETY: This region is not owned or borrowed by anything else (the only thing it
+ // could be would be the metadata, but that's not in this tree), is of the correct
+ // size, and is composed of untyped memory.
+ unsafe { free_list.push(tree.base_ptr.unwrap().cast()) };
+
+ // Then, set a bit in the bitset to say that this region is present.
+ tree.bitset_mark_as_present(bitset, tree.size_class, 0);
+ }
+ }
+
+ // Make the buddy allocator's struct and return it.
+ Ok(BuddyAllocator {
+ free_list_sentinels,
+ trees,
+ bitset,
+ })
+ }
+
+ /// Tries to allocate a subregion of a particular size class from the allocator.
+ #[requires(size_class < SIZE_CLASS_COUNT)]
+ pub fn alloc(&mut self, size_class: usize) -> Result<NonNull<u8>, AllocError> {
+ if let Some(ptr) = self.free_list(size_class).pop() {
+ // Fast-path: try allocating from the right size class's free list.
+
+ // Find the corresponding tree and the offset of the subregion in the tree.
+ let (tree_index, offset) = self.tree_index_and_offset(ptr);
+ let tree = &mut self.trees[tree_index];
+
+ // Mark the pointer as no longer being in the tree.
+ tree.bitset_mark_as_absent(&mut self.bitset, size_class, offset);
+
+ // Return the pointer.
+ Ok(ptr)
+ } else {
+ // Try finding a larger chunk of memory in a larger size class.
+ Err(AllocError)
+ }
+ }
+
+ /// Returns a subregion of a particular size class to the allocator.
+ ///
+ /// # Safety
+ ///
+ /// - `ptr` must have been returned by a call to `alloc` with the same `size_class`.
+ /// - `ptr` must convey ownership over the entire region.
+ #[requires(size_class < SIZE_CLASS_COUNT)]
+ pub unsafe fn dealloc(&mut self, ptr: NonNull<u8>, size_class: usize) {
+ // Find the corresponding tree and the offset of the subregion in the tree.
+ let (tree_index, offset) = self.tree_index_and_offset(ptr);
+ let tree = &mut self.trees[tree_index];
+
+ // Ensure that the memory is marked as not being in the free list.
+ assert_eq!(
+ tree.bitset_get(&mut self.bitset, size_class, offset),
+ SubregionStatus::NotInFreeList
+ );
+
+ // Start combining size classes.
+ assert!(size_class <= tree.size_class);
+ while size_class < tree.size_class {
+ let buddy_offset = offset ^ (PAGE_SIZE << size_class);
+ todo!("{buddy_offset:#x} {offset:#x}");
+ }
+
+ // Mark the pointer as being in the tree.
+ tree.bitset_mark_as_present(&mut self.bitset, size_class, offset);
+
+ // Return the pointer to the appropriate free list.
+ self.free_list(size_class).push(ptr);
+ }
+
+ /// Returns the free list with the given size class.
+ #[requires(size_class < SIZE_CLASS_COUNT)]
+ fn free_list(&mut self, size_class: usize) -> FreeList {
+ // SAFETY: The bounds-check is a precondition.
+ let sentinel = unsafe { self.free_list_sentinels.add(size_class) };
+
+ // SAFETY: The free list is kept valid.
+ unsafe { FreeList::from_sentinel(sentinel) }
+ }
+
+ /// Returns the index of the tree this pointer was allocated in.
+ #[ensures(self.trees[ret.0].contains(ptr.as_ptr() as usize))]
+ #[ensures(ret.1 < (1 << self.trees[ret.0].size_class))]
+ fn tree_index_and_offset(&self, ptr: NonNull<u8>) -> (usize, usize) {
+ let addr = ptr.as_ptr() as usize;
+ let tree_index = self
+ .trees
+ .binary_search_by_key(&addr, |tree| tree.base_addr());
+ let tree_index = match tree_index {
+ Ok(i) => i,
+ Err(i) => {
+ assert_ne!(i, 0);
+ i - 1
+ }
+ };
+ let offset = addr - self.trees[tree_index].base_addr();
+ (tree_index, offset)
+ }
+}
+
+impl<
+ 'allocator,
+ const PAGE_SIZE: usize,
+ const PAGE_SIZE_BITS: usize,
+ const SIZE_CLASS_COUNT: usize,
+ > fmt::Debug for BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ struct FreeLists<const SIZE_CLASS_COUNT: usize>(NonNull<FreeListNode>);
+
+ impl<const SIZE_CLASS_COUNT: usize> fmt::Debug for FreeLists<SIZE_CLASS_COUNT> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_list()
+ .entries((0..SIZE_CLASS_COUNT).map(|size_class| {
+ // SAFETY: The free lists are kept valid, and the range of size classes is
+ // necessarily in-bounds.
+ unsafe { FreeList::from_sentinel(self.0.add(size_class)) }
+ }))
+ .finish()
+ }
+ }
+
+ fmt.debug_struct("BuddyAllocator")
+ .field(
+ "free_lists",
+ &FreeLists::<SIZE_CLASS_COUNT>(self.free_list_sentinels),
+ )
+ .field("trees", &self.trees)
+ .field("bitset", &self.bitset)
+ .finish()
+ }
+}
+
+#[derive(Debug)]
+struct MetadataLayout<
+ const PAGE_SIZE: usize,
+ const PAGE_SIZE_BITS: usize,
+ const SIZE_CLASS_COUNT: usize,
+> {
+ layout: Layout,
+ free_list_sentinels_offset: usize,
+ trees_offset: usize,
+ trees_len: usize,
+ bitset_offset: usize,
+ bitset_len: usize,
+ bitset_len_bits: usize,
+}
+
+impl<const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize, const SIZE_CLASS_COUNT: usize>
+ MetadataLayout<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>
+{
+ fn new(
+ range_count: usize,
+ size_class_counts: [usize; SIZE_CLASS_COUNT],
+ ) -> Result<MetadataLayout<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>, LayoutError> {
+ let trees_len = range_count;
+ let mut bitset_len_bits = 0;
+ for (size_class, &count) in size_class_counts.iter().enumerate() {
+ let bits_for_size_class = (1 << (size_class + 1)) - 1;
+ bitset_len_bits += bits_for_size_class * count;
+ }
+ let bitset_len = (bitset_len_bits + usize::BITS as usize - 1) / usize::BITS as usize;
+
+ let free_list_sentinels_layout = Layout::new::<[FreeListNode; SIZE_CLASS_COUNT]>();
+ let trees_layout = Layout::array::<Tree<PAGE_SIZE, PAGE_SIZE_BITS>>(trees_len)?;
+ let bitset_layout = Layout::array::<usize>(bitset_len)?;
+
+ let free_list_sentinels_offset = 0;
+ let (layout, trees_offset) = free_list_sentinels_layout.extend(trees_layout)?;
+ let (layout, bitset_offset) = layout.extend(bitset_layout)?;
+
+ Ok(MetadataLayout {
+ layout,
+ free_list_sentinels_offset,
+ trees_offset,
+ trees_len,
+ bitset_offset,
+ bitset_len,
+ bitset_len_bits,
+ })
+ }
+}
diff --git a/crates/alloc_buddy/src/tree.rs b/crates/alloc_buddy/src/tree.rs
new file mode 100644
index 0000000..72ee466
--- /dev/null
+++ b/crates/alloc_buddy/src/tree.rs
@@ -0,0 +1,100 @@
+use crate::bitvec::{Bitset, SubregionStatus};
+use contracts::requires;
+use core::ptr::NonNull;
+
+/// A single region of the allocator. See the comment on the `crate::allocators::buddy` module for
+/// more information.
+///
+/// This type is valid when zero-initialized.
+#[derive(Debug)]
+pub struct Tree<'buddy, const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize> {
+ /// The base address of the tree.
+ pub base_ptr: Option<NonNull<[u8; PAGE_SIZE]>>,
+
+ /// The log2 of the number of pages in the region represented by the tree.
+ pub size_class: usize,
+
+ /// The offset in the bitset to the bits responsible for this tree's pages.
+ pub bitset_offset: usize,
+
+ phantom: core::marker::PhantomData<&'buddy mut [u8; PAGE_SIZE]>,
+}
+
+impl<'buddy, const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize>
+ Tree<'buddy, PAGE_SIZE, PAGE_SIZE_BITS>
+{
+ /// Returns the base address of the tree.
+ #[requires(self.base_ptr.is_some())]
+ pub fn base_addr(&self) -> usize {
+ self.base_ptr.unwrap().as_ptr() as usize
+ }
+
+ /// Reads a bit from the bitset.
+ #[requires(size_class <= self.size_class)]
+ #[requires(offset_bytes <= (PAGE_SIZE << self.size_class))]
+ #[requires(offset_bytes.trailing_zeros() as usize >= PAGE_SIZE_BITS + size_class)]
+ pub fn bitset_get(
+ &self,
+ bitset: &mut Bitset,
+ size_class: usize,
+ offset_bytes: usize,
+ ) -> SubregionStatus {
+ bitset.get(self.bitset_index(size_class, offset_bytes))
+ }
+
+ /// Returns the index of a bit in the bitset that corresponds to the given size class and
+ /// offset.
+ #[requires(size_class <= self.size_class)]
+ #[requires(offset_bytes <= (PAGE_SIZE << self.size_class))]
+ #[requires(offset_bytes.trailing_zeros() as usize >= PAGE_SIZE_BITS + size_class)]
+ fn bitset_index(&self, size_class: usize, offset_bytes: usize) -> usize {
+ // We store the largest size classes first in the bitset. Count how many we are away from
+ // the largest.
+ let skipped_size_classes = self.size_class - size_class;
+ let bits_skipped_for_size_class = (1 << skipped_size_classes) - 1;
+
+ // Next, find our index in the size class.
+ let bits_skipped_for_index = offset_bytes >> (PAGE_SIZE_BITS + size_class);
+
+ // The sum of those two is simply our index.
+ bits_skipped_for_size_class + bits_skipped_for_index
+ }
+
+ /// Changes a bit in the bitset from `InFreeList` to `NotInFreeList`.
+ #[requires(size_class <= self.size_class)]
+ #[requires(offset_bytes <= (PAGE_SIZE << self.size_class))]
+ #[requires(offset_bytes.trailing_zeros() as usize >= PAGE_SIZE_BITS + size_class)]
+ pub fn bitset_mark_as_absent(
+ &self,
+ bitset: &mut Bitset,
+ size_class: usize,
+ offset_bytes: usize,
+ ) {
+ let index = self.bitset_index(size_class, offset_bytes);
+ assert_eq!(bitset.get(index), SubregionStatus::InFreeList);
+ bitset.set(index, SubregionStatus::NotInFreeList)
+ }
+
+ /// Changes a bit in the bitset from `NotInFreeList` to `InFreeList`.
+ #[requires(size_class <= self.size_class)]
+ #[requires(offset_bytes <= (PAGE_SIZE << self.size_class))]
+ #[requires(offset_bytes.trailing_zeros() as usize >= PAGE_SIZE_BITS + size_class)]
+ pub fn bitset_mark_as_present(
+ &self,
+ bitset: &mut Bitset,
+ size_class: usize,
+ offset_bytes: usize,
+ ) {
+ let index = self.bitset_index(size_class, offset_bytes);
+ assert_eq!(bitset.get(index), SubregionStatus::NotInFreeList);
+ bitset.set(index, SubregionStatus::InFreeList)
+ }
+
+ /// Returns whether the tree contains an address.
+ #[requires(self.base_ptr.is_some())]
+ pub fn contains(&self, addr: usize) -> bool {
+ let tree_addr_lo = self.base_addr();
+ let tree_addr_hi = tree_addr_lo + (PAGE_SIZE << self.size_class);
+ (tree_addr_lo..tree_addr_hi).contains(&addr)
+ }
+}