diff options
author | Nathan Ringo <nathan@remexre.com> | 2024-08-31 20:23:24 -0500 |
---|---|---|
committer | Nathan Ringo <nathan@remexre.com> | 2024-08-31 20:23:24 -0500 |
commit | e9a79a0ed79609c1e293c7b221fce577200b2eb7 (patch) | |
tree | e097115f4b4435caa2a26186652cbfffefb2cb28 /crates/buddy_allocator/src/lib.rs | |
parent | 4617f96a99c0e5dfac1b45b3cff037306e4edc63 (diff) |
The start of a new librarified organization.
Diffstat (limited to 'crates/buddy_allocator/src/lib.rs')
-rw-r--r-- | crates/buddy_allocator/src/lib.rs | 222 |
1 files changed, 222 insertions, 0 deletions
diff --git a/crates/buddy_allocator/src/lib.rs b/crates/buddy_allocator/src/lib.rs new file mode 100644 index 0000000..fa86de7 --- /dev/null +++ b/crates/buddy_allocator/src/lib.rs @@ -0,0 +1,222 @@ +//! A buddy allocator, used to allocate pages. +//! +//! The allocator can be split into three abstractions: stripes, trees, and the allocator. +//! +//! TODO: See if there's standard terminology for these. +//! +//! ## Stripes +//! +//! The buddy allocator works in terms of size classes, which are power-of-two sized, starting at a +//! single page and going up from there. Each stripe corresponds to a single size class and a +//! particular region of memory. +//! +//! A stripe contains a circular doubly-linked free-list for subregions of that size class, and a +//! bitset marking whether a particular region has been allocated or not. Being a circular +//! doubly-linked list makes it cheap to remove an element whose address we know, as well as cheap +//! to push and pop elements. +//! +//! It's efficient to go from the address of a subregion to the index of its corresponding bit, so +//! when we hand out a subregion from the free-list or put one back, it's cheap to read or write +//! its bit. +//! +//! ## Trees +//! +//! A tree is logically a collection of stripes, one per size class. To pack the structures more +//! efficiently, they are stored interleaved with each other, and the tree manages this. +//! +//! The important logic at the level of trees happens when we allocate a subregion from a size +//! class whose stripe's free-list is empty, and when we free subregions. +//! +//! When a stripe's free-list is empty, the tree instead allocates a subregion of a larger size +//! from the next stripe. It can then store the unused portion in the current size class. +//! +//! The really important bit is the ability to merge subregions when they're freed. When we free a +//! subregion of a certain size class, we can check whether its neighbor (its buddy) is unallocated +//! as well. If so, we can remove it from the free-list by its address. We can combine the two +//! subregions into one of the next larger size class, and then return that subregion to the next +//! stripe. +//! +//! ## The buddy allocator +//! +//! Finally, the overall buddy allocator needs to be able to handle multiple memory regions. To +//! facilitate this, the trees are stored in an array, which forms the overall allocator. +#![no_std] + +mod bitvec; +mod free_list; +mod tree; + +// mod stripe; + +use crate::{bitvec::BitVec, free_list::FreeList, tree::Tree}; +use allocator_api2::alloc::{AllocError, Layout, LayoutError}; +use contracts::requires; +use core::{mem, pin::Pin, ptr::NonNull}; +use vernos_physmem_free_list::FreeListAllocator; +use vernos_utils::pin::pin_project_slice_mut_iter; + +/// A buddy allocator. +#[derive(Debug)] +pub struct BuddyAllocator< + 'allocator, + const PAGE_SIZE: usize, + const PAGE_SIZE_BITS: usize, + const SIZE_CLASS_COUNT: usize, +> { + /// The free lists. + free_lists: Pin<&'allocator mut [FreeList; SIZE_CLASS_COUNT]>, + + /// The metadata associated with each tree. + trees: &'allocator mut [Tree<'allocator, PAGE_SIZE, PAGE_SIZE_BITS>], + + /// The shared bitset. + bitset: &'allocator mut BitVec, +} + +impl< + 'allocator, + const PAGE_SIZE: usize, + const PAGE_SIZE_BITS: usize, + const SIZE_CLASS_COUNT: usize, + > BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT> +{ + /// Returns a buddy allocator backed by the page ranges in the free list. + pub fn new( + mut free_list: FreeListAllocator<'allocator, PAGE_SIZE>, + ) -> Result<BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>, AllocError> + { + assert_eq!(PAGE_SIZE, 1 << PAGE_SIZE_BITS); + assert_ne!(SIZE_CLASS_COUNT, 0); + + // We use one contiguous region to store the free lists and all the information about each + // tree (the base, the maximum order, and the bitset). We do one pass through the free list + // to find out how much storage we need, then we try to find a convenient spot to prune the + // storage we need from a page range in the list. + let mut range_count = 0; + let mut size_class_counts = [0; SIZE_CLASS_COUNT]; + for (_, mut len_pages) in free_list.iter() { + while len_pages > 0 { + let size_class = (len_pages.trailing_zeros() as usize).min(SIZE_CLASS_COUNT - 1); + range_count += 1; + size_class_counts[size_class] += 1; + len_pages -= 1 << size_class; + } + } + + // Lay out the metadata. The error here really ought to be impossible, because the pages in + // the free list needed to fit in there in the first place. + let metadata_layout = MetadataLayout::<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>::new( + range_count, + size_class_counts, + ) + .map_err(|_| AllocError)?; + + // Allocate space for the metadata. + let metadata = free_list.allocate_zeroed(metadata_layout.layout)?; + + // Break out each component of the metadata. + // + // SAFETY: The layout is computed to make all of this sound if the data is valid. All of + // these types are valid when zero-initialized. + let free_lists: &'allocator mut [FreeList; SIZE_CLASS_COUNT] = unsafe { + metadata + .byte_add(metadata_layout.free_lists_offset) + .cast() + .as_mut() + }; + let trees: &'allocator mut [Tree<PAGE_SIZE, PAGE_SIZE_BITS>] = unsafe { + NonNull::slice_from_raw_parts( + metadata.byte_add(metadata_layout.trees_offset).cast(), + metadata_layout.trees_len, + ) + .as_mut() + }; + let bitset: &'allocator mut BitVec = unsafe { + mem::transmute::<&mut [usize], &mut BitVec>( + NonNull::slice_from_raw_parts( + metadata.byte_add(metadata_layout.bitset_offset).cast(), + metadata_layout.bitset_len, + ) + .as_mut(), + ) + }; + + // Pin and self-link each free list. Although they're valid in a UB sense when zeroed, + // they're not yet ready to use until this is done. + // + // SAFETY: We never provide the ability to move out of a free list. + let mut free_lists = unsafe { Pin::new_unchecked(free_lists) }; + for free_list in pin_project_slice_mut_iter(free_lists.as_mut()) { + free_list.self_link(); + } + + // TODO: Actually initialize the trees with ranges from the allocator... + + // Make the buddy allocator and return. + Ok(BuddyAllocator { + free_lists, + trees, + bitset, + }) + } + + /// Tries to allocate a subregion of a particular size class from the allocator. + #[requires((0..SIZE_CLASS_COUNT).contains(&size_class))] + pub fn alloc(&mut self, size_class: usize) -> Result<NonNull<u8>, AllocError> { + Err(AllocError) + } +} + +#[derive(Debug)] +struct MetadataLayout< + const PAGE_SIZE: usize, + const PAGE_SIZE_BITS: usize, + const SIZE_CLASS_COUNT: usize, +> { + layout: Layout, + free_lists_offset: usize, + trees_offset: usize, + trees_len: usize, + bitset_offset: usize, + bitset_len: usize, +} + +impl<const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize, const SIZE_CLASS_COUNT: usize> + MetadataLayout<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT> +{ + fn new( + range_count: usize, + mut size_class_counts: [usize; SIZE_CLASS_COUNT], + ) -> Result<MetadataLayout<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>, LayoutError> { + let trees_len = range_count; + let mut bitset_len_bits = 0; + // TODO: Is there a cleaner way to compute this? + for i in (0..size_class_counts.len()).rev() { + let count = size_class_counts[i]; + if count != 0 { + bitset_len_bits += count; + if i > 0 { + size_class_counts[i - 1] += count * 2; + } + } + } + let bitset_len = (bitset_len_bits + usize::BITS as usize - 1) / usize::BITS as usize; + + let free_lists_layout = Layout::new::<[FreeList; SIZE_CLASS_COUNT]>(); + let trees_layout = Layout::array::<Tree<PAGE_SIZE, PAGE_SIZE_BITS>>(trees_len)?; + let bitset_layout = Layout::array::<usize>(bitset_len)?; + + let free_lists_offset = 0; + let (layout, trees_offset) = free_lists_layout.extend(trees_layout)?; + let (layout, bitset_offset) = layout.extend(bitset_layout)?; + + Ok(MetadataLayout { + layout, + free_lists_offset, + trees_offset, + trees_len, + bitset_offset, + bitset_len, + }) + } +} |