summaryrefslogtreecommitdiff
path: root/crates/buddy_allocator/src/lib.rs
diff options
context:
space:
mode:
authorNathan Ringo <nathan@remexre.com>2024-08-31 23:30:46 -0500
committerNathan Ringo <nathan@remexre.com>2024-08-31 23:32:37 -0500
commit439b93dd3e22311caee6d69eb4aa1da5b81a0731 (patch)
treefdc2693067c82c032c8de04b62acbe99806883cd /crates/buddy_allocator/src/lib.rs
parente9a79a0ed79609c1e293c7b221fce577200b2eb7 (diff)
Almost all of buddy initialization.
Diffstat (limited to 'crates/buddy_allocator/src/lib.rs')
-rw-r--r--crates/buddy_allocator/src/lib.rs144
1 files changed, 117 insertions, 27 deletions
diff --git a/crates/buddy_allocator/src/lib.rs b/crates/buddy_allocator/src/lib.rs
index fa86de7..441c77f 100644
--- a/crates/buddy_allocator/src/lib.rs
+++ b/crates/buddy_allocator/src/lib.rs
@@ -42,6 +42,8 @@
//! facilitate this, the trees are stored in an array, which forms the overall allocator.
#![no_std]
+extern crate std; // TODO: Remove me
+
mod bitvec;
mod free_list;
mod tree;
@@ -53,7 +55,7 @@ use allocator_api2::alloc::{AllocError, Layout, LayoutError};
use contracts::requires;
use core::{mem, pin::Pin, ptr::NonNull};
use vernos_physmem_free_list::FreeListAllocator;
-use vernos_utils::pin::pin_project_slice_mut_iter;
+use vernos_utils::pin::{pin_project_slice_mut, pin_project_slice_mut_iter};
/// A buddy allocator.
#[derive(Debug)]
@@ -82,37 +84,53 @@ impl<
{
/// Returns a buddy allocator backed by the page ranges in the free list.
pub fn new(
- mut free_list: FreeListAllocator<'allocator, PAGE_SIZE>,
+ mut free_list_alloc: FreeListAllocator<'allocator, PAGE_SIZE>,
) -> Result<BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>, AllocError>
{
assert_eq!(PAGE_SIZE, 1 << PAGE_SIZE_BITS);
assert_ne!(SIZE_CLASS_COUNT, 0);
+ // We split up all the nodes into size-class-sized chunks here. After this, it's important
+ // for safety that we never split a node again -- we'll see why later.
+ free_list_alloc.split_to_powers_of_two_no_larger_than(1 << (SIZE_CLASS_COUNT - 1));
+
// We use one contiguous region to store the free lists and all the information about each
// tree (the base, the maximum order, and the bitset). We do one pass through the free list
- // to find out how much storage we need, then we try to find a convenient spot to prune the
+ // to find out how much storage we need, then we try to find a convenient spot to steal the
// storage we need from a page range in the list.
let mut range_count = 0;
let mut size_class_counts = [0; SIZE_CLASS_COUNT];
- for (_, mut len_pages) in free_list.iter() {
- while len_pages > 0 {
- let size_class = (len_pages.trailing_zeros() as usize).min(SIZE_CLASS_COUNT - 1);
- range_count += 1;
- size_class_counts[size_class] += 1;
- len_pages -= 1 << size_class;
- }
+ for (_, len_pages) in free_list_alloc.iter() {
+ assert!(len_pages.is_power_of_two());
+ let size_class = len_pages.trailing_zeros() as usize;
+ assert!((0..SIZE_CLASS_COUNT).contains(&size_class));
+ range_count += 1;
+ size_class_counts[size_class] += 1;
}
- // Lay out the metadata. The error here really ought to be impossible, because the pages in
- // the free list needed to fit in there in the first place.
+ // Lay out the metadata.
+ //
+ // UNWRAP: The error here really ought to be impossible, because the pages in the free list
+ // needed to fit in there in the first place.
let metadata_layout = MetadataLayout::<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>::new(
range_count,
size_class_counts,
)
- .map_err(|_| AllocError)?;
+ .unwrap();
// Allocate space for the metadata.
- let metadata = free_list.allocate_zeroed(metadata_layout.layout)?;
+ //
+ // SAFETY: We need to make _absolutely_ sure nothing in the buddy allocator uses this
+ // memory before we have a chance to remove it from the allocator. This function guarantees
+ // that the returned memory won't overlap with the first page of a live node, so as long as
+ // we don't split a node or write past the first page of one, we should be OK.
+ let metadata = unsafe {
+ free_list_alloc.allocate_without_always_removing_the_node(metadata_layout.layout)?
+ };
+ // SAFETY: The pointer must be valid to have been returned here.
+ unsafe {
+ metadata.cast::<u8>().write_bytes(0, metadata.len());
+ }
// Break out each component of the metadata.
//
@@ -144,15 +162,80 @@ impl<
// Pin and self-link each free list. Although they're valid in a UB sense when zeroed,
// they're not yet ready to use until this is done.
//
- // SAFETY: We never provide the ability to move out of a free list.
+ // SAFETY: We never provide the ability to move a free list.
let mut free_lists = unsafe { Pin::new_unchecked(free_lists) };
for free_list in pin_project_slice_mut_iter(free_lists.as_mut()) {
free_list.self_link();
}
- // TODO: Actually initialize the trees with ranges from the allocator...
+ // Initialize the trees, adding subregions into the allocator as we do. We don't actually
+ // assign bitset offsets yet, because we're going to sort the trees by address before we
+ // do. (This isn't algorithmically necessary, but it makes debugging a bit easier.)
+ let mut i = 0;
+ while let Some((ptr, mut len_pages)) = free_list_alloc.pop() {
+ while len_pages > 0 {
+ let size_class = (len_pages.trailing_zeros() as usize).min(SIZE_CLASS_COUNT - 1);
+ trees[i].base_ptr = ptr.cast().as_ptr();
+ trees[i].size_class = size_class;
+ i += 1;
+ len_pages -= 1 << size_class;
+ }
+ }
+ assert!(
+ i == trees.len() || i + 1 == trees.len(),
+ "found {} trees but allocated {} slots",
+ i,
+ trees.len()
+ );
+
+ // Slice the trees slice to fit how many trees actually remained, then sort them by
+ // address.
+ let trees = &mut trees[..i];
+ trees.sort_by_key(|tree| tree.base_ptr as usize);
+
+ // Compute the number of bits that belong to each tree and assign them.
+ let mut bitset_offset = 0;
+ for tree in &mut *trees {
+ tree.bitset_offset = bitset_offset;
+ bitset_offset += (1 << (tree.size_class + 1)) - 1;
+ }
+ assert!(bitset_offset <= metadata_layout.bitset_len_bits);
+
+ // Add regions the trees should own to the free lists, excluding the subregions that
+ // metadata is allocated in.
- // Make the buddy allocator and return.
+ // Since we know that the metadata was allocated at the end of the page range it was
+ // allocated in, we can just test the end addresses.
+ //
+ // SAFETY: This is the one-past-the-end address.
+ let metadata_addr_hi =
+ unsafe { metadata.cast::<u8>().add(metadata.len()).as_ptr() } as usize;
+ for tree in &mut *trees {
+ // SAFETY: This is the one-past-the-end address.
+ let tree_addr_hi = unsafe { tree.base_ptr.add(1 << tree.size_class) } as usize;
+ if metadata_addr_hi == tree_addr_hi {
+ // If the metadata was present inside the tree... TODO.
+ std::eprintln!("TODO");
+ } else {
+ // Otherwise, we can take the whole range of the tree and add it to the free list.
+ //
+ // UNWRAP: The pointer must have been non-null when it was in the free-list
+ // allocator.
+ let ptr = NonNull::new(tree.base_ptr as *mut u8).unwrap();
+ // SAFETY: This region is not owned or borrowed by anything else (the only thing it
+ // could be would be the metadata, but that's not in this tree), is of the correct
+ // size, and is composed of untyped memory.
+ unsafe {
+ pin_project_slice_mut(free_lists.as_mut(), tree.size_class).push(ptr);
+ }
+
+ // Then, set a bit in the bitset to say that this region is present.
+ assert!(!bitset.get(tree.bitset_offset));
+ bitset.set(tree.bitset_offset, true);
+ }
+ }
+
+ // Make the buddy allocator's struct and return it.
Ok(BuddyAllocator {
free_lists,
trees,
@@ -165,6 +248,17 @@ impl<
pub fn alloc(&mut self, size_class: usize) -> Result<NonNull<u8>, AllocError> {
Err(AllocError)
}
+
+ /// Returns a subregion of a particular size class to the allocator.
+ ///
+ /// # Safety
+ ///
+ /// - `ptr` must have been returned by a call to `alloc` with the same `size_class`.
+ /// - `ptr` must convey ownership over the entire region.
+ #[requires((0..SIZE_CLASS_COUNT).contains(&size_class))]
+ pub unsafe fn dealloc(&mut self, ptr: NonNull<u8>, size_class: usize) {
+ todo!()
+ }
}
#[derive(Debug)]
@@ -179,6 +273,7 @@ struct MetadataLayout<
trees_len: usize,
bitset_offset: usize,
bitset_len: usize,
+ bitset_len_bits: usize,
}
impl<const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize, const SIZE_CLASS_COUNT: usize>
@@ -186,19 +281,13 @@ impl<const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize, const SIZE_CLASS_COUNT
{
fn new(
range_count: usize,
- mut size_class_counts: [usize; SIZE_CLASS_COUNT],
+ size_class_counts: [usize; SIZE_CLASS_COUNT],
) -> Result<MetadataLayout<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>, LayoutError> {
let trees_len = range_count;
let mut bitset_len_bits = 0;
- // TODO: Is there a cleaner way to compute this?
- for i in (0..size_class_counts.len()).rev() {
- let count = size_class_counts[i];
- if count != 0 {
- bitset_len_bits += count;
- if i > 0 {
- size_class_counts[i - 1] += count * 2;
- }
- }
+ for (size_class, &count) in size_class_counts.iter().enumerate() {
+ let bits_for_size_class = (1 << (size_class + 1)) - 1;
+ bitset_len_bits += bits_for_size_class * count;
}
let bitset_len = (bitset_len_bits + usize::BITS as usize - 1) / usize::BITS as usize;
@@ -217,6 +306,7 @@ impl<const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize, const SIZE_CLASS_COUNT
trees_len,
bitset_offset,
bitset_len,
+ bitset_len_bits,
})
}
}