From a83d2e1c8bf968d948991869d1f082b610d9032a Mon Sep 17 00:00:00 2001 From: Nathan Ringo Date: Sun, 1 Sep 2024 12:42:37 -0500 Subject: Rename crates. --- crates/Cargo.lock | 6 +- crates/Cargo.toml | 2 +- crates/alloc_buddy/Cargo.toml | 15 + crates/alloc_buddy/src/bitvec.rs | 77 ++++++ crates/alloc_buddy/src/free_list.rs | 164 +++++++++++ crates/alloc_buddy/src/lib.rs | 409 ++++++++++++++++++++++++++++ crates/alloc_buddy/src/tree.rs | 100 +++++++ crates/alloc_buddy/tests/hosted_test.rs | 279 +++++++++++++++++++ crates/alloc_physmem_free_list/Cargo.toml | 9 + crates/alloc_physmem_free_list/src/lib.rs | 389 ++++++++++++++++++++++++++ crates/buddy_allocator/Cargo.toml | 15 - crates/buddy_allocator/src/bitvec.rs | 77 ------ crates/buddy_allocator/src/free_list.rs | 164 ----------- crates/buddy_allocator/src/lib.rs | 409 ---------------------------- crates/buddy_allocator/src/tree.rs | 100 ------- crates/buddy_allocator/tests/hosted_test.rs | 279 ------------------- crates/physmem_free_list/Cargo.toml | 9 - crates/physmem_free_list/src/lib.rs | 389 -------------------------- 18 files changed, 1446 insertions(+), 1446 deletions(-) create mode 100644 crates/alloc_buddy/Cargo.toml create mode 100644 crates/alloc_buddy/src/bitvec.rs create mode 100644 crates/alloc_buddy/src/free_list.rs create mode 100644 crates/alloc_buddy/src/lib.rs create mode 100644 crates/alloc_buddy/src/tree.rs create mode 100644 crates/alloc_buddy/tests/hosted_test.rs create mode 100644 crates/alloc_physmem_free_list/Cargo.toml create mode 100644 crates/alloc_physmem_free_list/src/lib.rs delete mode 100644 crates/buddy_allocator/Cargo.toml delete mode 100644 crates/buddy_allocator/src/bitvec.rs delete mode 100644 crates/buddy_allocator/src/free_list.rs delete mode 100644 crates/buddy_allocator/src/lib.rs delete mode 100644 crates/buddy_allocator/src/tree.rs delete mode 100644 crates/buddy_allocator/tests/hosted_test.rs delete mode 100644 crates/physmem_free_list/Cargo.toml delete mode 100644 crates/physmem_free_list/src/lib.rs (limited to 'crates') diff --git a/crates/Cargo.lock b/crates/Cargo.lock index 33f18cf..62c6ae4 100644 --- a/crates/Cargo.lock +++ b/crates/Cargo.lock @@ -298,7 +298,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] -name = "vernos_buddy_allocator" +name = "vernos_alloc_buddy" version = "0.1.0" dependencies = [ "allocator-api2", @@ -306,11 +306,11 @@ dependencies = [ "nix", "proptest", "static_assertions", - "vernos_physmem_free_list", + "vernos_alloc_physmem_free_list", ] [[package]] -name = "vernos_physmem_free_list" +name = "vernos_alloc_physmem_free_list" version = "0.1.0" dependencies = [ "allocator-api2", diff --git a/crates/Cargo.toml b/crates/Cargo.toml index 79760cf..57dd03c 100644 --- a/crates/Cargo.toml +++ b/crates/Cargo.toml @@ -1,3 +1,3 @@ [workspace] -members = ["buddy_allocator", "physmem_free_list"] +members = ["alloc_buddy", "alloc_physmem_free_list"] resolver = "2" diff --git a/crates/alloc_buddy/Cargo.toml b/crates/alloc_buddy/Cargo.toml new file mode 100644 index 0000000..5e5c882 --- /dev/null +++ b/crates/alloc_buddy/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "vernos_alloc_buddy" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +allocator-api2 = { version = "0.2.18", default-features = false } +contracts = { version = "0.6.3", default-features = false } +static_assertions = { version = "1.1.0", default-features = false } +vernos_alloc_physmem_free_list = { path = "../alloc_physmem_free_list" } + +[dev-dependencies] +proptest = { version = "0.9.6", default-features = false, features = ["std"] } +nix = { version = "0.29.0", default-features = false, features = ["mman"] } diff --git a/crates/alloc_buddy/src/bitvec.rs b/crates/alloc_buddy/src/bitvec.rs new file mode 100644 index 0000000..5cabc5e --- /dev/null +++ b/crates/alloc_buddy/src/bitvec.rs @@ -0,0 +1,77 @@ +use contracts::requires; +use core::{fmt, mem::transmute}; + +/// A fixed-length vector of bits used for determining whether a subregion is in the free lists or +/// not. +pub struct Bitset([usize]); + +impl Bitset { + fn from_mut(words: &mut [usize]) -> &mut Bitset { + // SAFETY: The types have a newtype relationship. + unsafe { transmute(words) } + } + + fn from_ref(words: &[usize]) -> &Bitset { + // SAFETY: The types have a newtype relationship. + unsafe { transmute(words) } + } + + /// Retrieves the value of a bit from the Bitset. + #[requires(i < self.len())] + pub fn get(&self, i: usize) -> SubregionStatus { + let word_index = i / usize::BITS as usize; + let subword_index = i % usize::BITS as usize; + let one_hot = 1 << subword_index; + if (self.0[word_index] & one_hot) == 0 { + SubregionStatus::NotInFreeList + } else { + SubregionStatus::InFreeList + } + } + + /// Returns whether the Bitset is empty. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns the number of bits in the Bitset. + pub fn len(&self) -> usize { + self.0.len() * usize::BITS as usize + } + + /// Sets the value of a bit in the Bitset. + #[requires(i < self.len())] + pub fn set(&mut self, i: usize, status: SubregionStatus) { + let word_index = i / usize::BITS as usize; + let subword_index = i % usize::BITS as usize; + let one_hot = 1 << subword_index; + match status { + SubregionStatus::NotInFreeList => { + self.0[word_index] &= !one_hot; + } + SubregionStatus::InFreeList => { + self.0[word_index] |= one_hot; + } + } + } +} + +impl fmt::Debug for Bitset { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "Bitset(")?; + for word in &self.0 { + write!(fmt, "{word:064b}")?; + } + write!(fmt, ")") + } +} + +/// The status of a subregion, as tracked by `Bitset`. +#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] +pub enum SubregionStatus { + /// The region is not in the free list. + NotInFreeList = 0, + + /// The region is in the free list. + InFreeList = 1, +} diff --git a/crates/alloc_buddy/src/free_list.rs b/crates/alloc_buddy/src/free_list.rs new file mode 100644 index 0000000..9b470fd --- /dev/null +++ b/crates/alloc_buddy/src/free_list.rs @@ -0,0 +1,164 @@ +//! A circular doubly-linked list of power-of-two-sized subregions. + +use contracts::{ensures, requires}; +use core::{fmt, ptr::NonNull}; + +/// The magic number a non-sentinel node should have. +const FREE_NODE_MAGIC: u64 = u64::from_ne_bytes(*b"FREENODE"); + +/// The magic number set into the spot for a node's magic number when it is initialized, but not +/// linked into a list. +const INITED_NODE_MAGIC: u64 = u64::from_ne_bytes(*b"NODEINIT"); + +/// The magic number a sentinel node should have. +const SENTINEL_MAGIC: u64 = u64::from_ne_bytes(*b"SENTINEL"); + +/// The magic number set into the spot for a node's magic number when it gets unlinked. +const USED_NODE_MAGIC: u64 = u64::from_ne_bytes(*b"ALLOC'ED"); + +/// A pointer to the sentinel node of a circular doubly-linked list of power-of-two-sized subregions. +pub struct FreeList(NonNull); + +impl FreeList { + /// Creates a free-list wrapper for a sentinel node's pointer. + /// + /// # Safety + /// + /// - `sentinel` must be a pointer to the sentinel of a valid circular doubly-linked list. + #[requires(unsafe { (*sentinel.as_ptr()).next }.is_some())] + #[requires(unsafe { (*sentinel.as_ptr()).prev }.is_some())] + #[requires(unsafe { (*sentinel.as_ptr()).magic } == SENTINEL_MAGIC)] + #[requires(unsafe { (*sentinel.as_ptr()).self_addr } == sentinel.as_ptr() as usize)] + pub unsafe fn from_sentinel(sentinel: NonNull) -> FreeList { + FreeList(sentinel) + } + + /// Returns whether the free-list is empty. + pub fn is_empty(&self) -> bool { + let sentinel = self.0.as_ptr(); + // SAFETY: This is sound as long as the sentinel was valid, which is a precondition of + // `FreeList::from_sentinel()`. + unsafe { + // UNWRAP: This is a precondition of the method. + let next = (*sentinel).next.unwrap(); + (*sentinel).self_addr == next.as_ptr() as usize + } + } + + /// Pops a subregion from the free-list, returning ownership to the caller. + pub fn pop(&mut self) -> Option> { + if self.is_empty() { + // If the list is empty, return `None`. + None + } else { + // SAFETY: This is sound as long as the sentinel was valid, which is a precondition of + // `FreeList::from_sentinel()`. + unsafe { + // UNWRAP: In a valid circular doubly-linked list, the pointers are all non-null. + let node = (*self.0.as_ptr()).next.unwrap(); + let prev = (*node.as_ptr()).prev.unwrap(); + let next = (*node.as_ptr()).next.unwrap(); + + (*prev.as_ptr()).next = Some(next); + (*next.as_ptr()).prev = Some(prev); + + (*node.as_ptr()).next = None; + (*node.as_ptr()).prev = None; + (*node.as_ptr()).magic = USED_NODE_MAGIC; + Some(node.cast()) + } + } + } + + /// Pushes a subregion into the free-list. + /// + /// # Safety + /// + /// - `subregion_ptr` must convey ownership over untyped memory of the correct size. + pub unsafe fn push(&mut self, subregion_ptr: NonNull) { + let node = FreeListNode::init(subregion_ptr); + + let prev = self.0; + // UNWRAP: In a valid circular doubly-linked list, the pointers are all non-null. + let next = (*self.0.as_ptr()).next.unwrap(); + + (*node.as_ptr()).prev = Some(prev); + (*node.as_ptr()).next = Some(next); + (*node.as_ptr()).magic = FREE_NODE_MAGIC; + + (*prev.as_ptr()).next = Some(node); + (*next.as_ptr()).prev = Some(node); + } +} + +/// This impl will panic until the sentinel has been self-linked. +impl fmt::Debug for FreeList { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + // SAFETY: This is sound as long as the sentinel was valid, which is a precondition of + // `FreeList::from_sentinel()`. + let sentinel_addr = unsafe { (*self.0.as_ptr()).self_addr }; + + let mut fmt = fmt.debug_list(); + let mut ptr = self.0; + loop { + // SAFETY: The invariants of FreeList guarantee that the list is valid. In a valid + // list, the next pointer is always valid and non-null. + ptr = unsafe { (*ptr.as_ptr()).next.unwrap() }; + if unsafe { (*ptr.as_ptr()).self_addr } == sentinel_addr { + break fmt.finish(); + } + fmt.entry(&ptr); + } + } +} + +/// A node in a free list. This should not be moved without careful thought. +/// +/// This type is valid when zero-initialized. +/// +/// # Safety +/// +/// - This type is valid only if it is zero-initialized or if it contains a valid circular +/// doubly-linked list of subregions of the correct size, which it owns. +#[derive(Debug)] +pub struct FreeListNode { + next: Option>, + prev: Option>, + magic: u64, + self_addr: usize, +} + +impl FreeListNode { + /// Initializes a sentinel node. + /// + /// # Safety + /// + /// The pointer must convey exclusive access and point to a large-enough allocation. + #[ensures(unsafe { (*ptr.as_ptr()).magic } == SENTINEL_MAGIC)] + #[ensures(unsafe { (*ptr.as_ptr()).self_addr } == ptr.as_ptr() as usize)] + pub unsafe fn init_sentinel(ptr: NonNull) { + (*ptr.as_ptr()).next = Some(ptr); + (*ptr.as_ptr()).prev = Some(ptr); + (*ptr.as_ptr()).magic = SENTINEL_MAGIC; + (*ptr.as_ptr()).self_addr = ptr.as_ptr() as usize; + } + + /// Initializes a non-sentinel node. + /// + /// # Safety + /// + /// The pointer must convey ownership and point to a large-enough allocation. + #[ensures(ptr.as_ptr() as usize == ret.as_ptr() as usize)] + #[ensures(unsafe { (*ret.as_ptr()).magic } == INITED_NODE_MAGIC)] + #[ensures(unsafe { (*ret.as_ptr()).self_addr } == ptr.as_ptr() as usize)] + unsafe fn init(ptr: NonNull) -> NonNull { + let ptr = ptr.cast(); + ptr.write(FreeListNode { + next: None, + prev: None, + magic: INITED_NODE_MAGIC, + self_addr: ptr.as_ptr() as usize, + }); + ptr + } +} diff --git a/crates/alloc_buddy/src/lib.rs b/crates/alloc_buddy/src/lib.rs new file mode 100644 index 0000000..d0d4818 --- /dev/null +++ b/crates/alloc_buddy/src/lib.rs @@ -0,0 +1,409 @@ +//! A buddy allocator, used to allocate pages. +//! +//! The allocator can be split into three abstractions: stripes, trees, and the allocator. +//! +//! TODO: See if there's standard terminology for these. +//! +//! ## Stripes +//! +//! The buddy allocator works in terms of size classes, which are power-of-two sized, starting at a +//! single page and going up from there. Each stripe corresponds to a single size class and a +//! particular region of memory. +//! +//! A stripe contains a circular doubly-linked free-list for subregions of that size class, and a +//! bitset marking whether a particular region has been allocated or not. Being a circular +//! doubly-linked list makes it cheap to remove an element whose address we know, as well as cheap +//! to push and pop elements. +//! +//! It's efficient to go from the address of a subregion to the index of its corresponding bit, so +//! when we hand out a subregion from the free-list or put one back, it's cheap to read or write +//! its bit. +//! +//! ## Trees +//! +//! A tree is logically a collection of stripes, one per size class. To pack the structures more +//! efficiently, they are stored interleaved with each other, and the tree manages this. +//! +//! The important logic at the level of trees happens when we allocate a subregion from a size +//! class whose stripe's free-list is empty, and when we free subregions. +//! +//! When a stripe's free-list is empty, the tree instead allocates a subregion of a larger size +//! from the next stripe. It can then store the unused portion in the current size class. +//! +//! The really important bit is the ability to merge subregions when they're freed. When we free a +//! subregion of a certain size class, we can check whether its neighbor (its buddy) is unallocated +//! as well. If so, we can remove it from the free-list by its address. We can combine the two +//! subregions into one of the next larger size class, and then return that subregion to the next +//! stripe. +//! +//! ## The buddy allocator +//! +//! Finally, the overall buddy allocator needs to be able to handle multiple memory regions. To +//! facilitate this, the trees are stored in an array, which forms the overall allocator. +#![no_std] + +extern crate std; // TODO: Remove me + +mod bitvec; +mod free_list; +mod tree; + +use crate::{ + bitvec::{Bitset, SubregionStatus}, + free_list::{FreeList, FreeListNode}, + tree::Tree, +}; +use allocator_api2::alloc::{AllocError, Layout, LayoutError}; +use contracts::{ensures, requires}; +use core::{fmt, mem, ptr::NonNull}; +use vernos_alloc_physmem_free_list::FreeListAllocator; + +/// A buddy allocator. +pub struct BuddyAllocator< + 'allocator, + const PAGE_SIZE: usize, + const PAGE_SIZE_BITS: usize, + const SIZE_CLASS_COUNT: usize, +> { + /// The free list sentinels (`SIZE_CLASS_COUNT` of them). + free_list_sentinels: NonNull, + + /// The metadata associated with each tree. + trees: &'allocator mut [Tree<'allocator, PAGE_SIZE, PAGE_SIZE_BITS>], + + /// The shared bitset. + bitset: &'allocator mut Bitset, +} + +impl< + 'allocator, + const PAGE_SIZE: usize, + const PAGE_SIZE_BITS: usize, + const SIZE_CLASS_COUNT: usize, + > BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT> +{ + /// Returns a buddy allocator backed by the page ranges in the free list. + pub fn new( + mut free_list_alloc: FreeListAllocator<'allocator, PAGE_SIZE>, + ) -> Result, AllocError> + { + assert_eq!(PAGE_SIZE, 1 << PAGE_SIZE_BITS); + assert_ne!(SIZE_CLASS_COUNT, 0); + + // We split up all the nodes into size-class-sized chunks here. After this, it's important + // for safety that we never split a node again -- we'll see why later. + free_list_alloc.split_to_powers_of_two_no_larger_than(1 << (SIZE_CLASS_COUNT - 1)); + + // We use one contiguous region to store the free lists and all the information about each + // tree (the base, the maximum order, and the bitset). We do one pass through the free list + // to find out how much storage we need, then we try to find a convenient spot to steal the + // storage we need from a page range in the list. + let mut range_count = 0; + let mut size_class_counts = [0; SIZE_CLASS_COUNT]; + for (_, len_pages) in free_list_alloc.iter() { + assert!(len_pages.is_power_of_two()); + let size_class = len_pages.trailing_zeros() as usize; + assert!((0..SIZE_CLASS_COUNT).contains(&size_class)); + range_count += 1; + size_class_counts[size_class] += 1; + } + + // Lay out the metadata. + // + // UNWRAP: The error here really ought to be impossible, because the pages in the free list + // needed to fit in there in the first place. + let metadata_layout = MetadataLayout::::new( + range_count, + size_class_counts, + ) + .unwrap(); + + // Allocate space for the metadata. + // + // SAFETY: We need to make _absolutely_ sure nothing in the buddy allocator uses this + // memory before we have a chance to remove it from the allocator. This function guarantees + // that the returned memory won't overlap with the first page of a live node, so as long as + // we don't split a node or write past the first page of one, we should be OK. + let metadata = unsafe { + free_list_alloc.allocate_without_always_removing_the_node(metadata_layout.layout)? + }; + // SAFETY: The pointer must be valid to have been returned here. + unsafe { + metadata.cast::().write_bytes(0, metadata.len()); + } + + // Break out each component of the metadata. + // + // SAFETY: The layout is computed to make all of this sound if the data is valid. All of + // these types are valid when zero-initialized. + let free_list_sentinels: NonNull = unsafe { + metadata + .byte_add(metadata_layout.free_list_sentinels_offset) + .cast() + }; + let trees: &'allocator mut [Tree] = unsafe { + NonNull::slice_from_raw_parts( + metadata.byte_add(metadata_layout.trees_offset).cast(), + metadata_layout.trees_len, + ) + .as_mut() + }; + // SAFETY: This type is valid when zero-initialized, and is a newtype for `[usize]` so has + // the same layout. + let bitset: &'allocator mut Bitset = unsafe { + mem::transmute::<&mut [usize], &mut Bitset>( + NonNull::slice_from_raw_parts( + metadata.byte_add(metadata_layout.bitset_offset).cast(), + metadata_layout.bitset_len, + ) + .as_mut(), + ) + }; + + // Self-link each free list. Although they're valid in a UB sense when zeroed, they're not + // yet ready to use until this is done. + for size_class in 0..SIZE_CLASS_COUNT { + // SAFETY: This is the number of nodes we allocate, so we'll stay in-bounds. + unsafe { FreeListNode::init_sentinel(free_list_sentinels.add(size_class)) }; + } + + // Initialize the trees, adding subregions into the allocator as we do. We don't actually + // assign bitset offsets yet, because we're going to sort the trees by address before we + // do. (This isn't algorithmically necessary, but it makes debugging a bit easier.) + let mut i = 0; + while let Some((ptr, mut len_pages)) = free_list_alloc.pop() { + while len_pages > 0 { + let size_class = (len_pages.trailing_zeros() as usize).min(SIZE_CLASS_COUNT - 1); + trees[i].base_ptr = Some(ptr.cast()); + trees[i].size_class = size_class; + i += 1; + len_pages -= 1 << size_class; + } + } + assert!( + i == trees.len() || i + 1 == trees.len(), + "found {} trees but allocated {} slots", + i, + trees.len() + ); + + // Slice the trees slice to fit how many trees actually remained, then sort them by + // address. + let trees = &mut trees[..i]; + trees.sort_by_key(|tree| tree.base_addr()); + + // Compute the number of bits that belong to each tree and assign them. + let mut bitset_offset = 0; + for tree in &mut *trees { + tree.bitset_offset = bitset_offset; + bitset_offset += (1 << (tree.size_class + 1)) - 1; + } + assert!(bitset_offset <= metadata_layout.bitset_len_bits); + + // Add regions the trees should own to the free lists, excluding the subregions that + // metadata is allocated in. + + // Since we know that the metadata was allocated at the end of the page range it was + // allocated in, we can just test the end addresses. + // + // SAFETY: This is the one-past-the-end address. + let metadata_addr_hi = + unsafe { metadata.cast::().add(metadata.len()).as_ptr() } as usize; + for tree in &mut *trees { + // SAFETY: This is the one-past-the-end address. + let tree_addr_hi = tree.base_addr() + (PAGE_SIZE << tree.size_class); + if metadata_addr_hi == tree_addr_hi { + // If the metadata was present inside the tree... TODO. + std::eprintln!("TODO"); + } else { + // Otherwise, we can take the whole range of the tree and add it to the free list. + + // SAFETY: There are as many sentinels as size classes, so this will be in-bounds. + let sentinel = unsafe { free_list_sentinels.add(tree.size_class) }; + + // SAFETY: The free list is kept valid. + let mut free_list = unsafe { FreeList::from_sentinel(sentinel) }; + + // SAFETY: This region is not owned or borrowed by anything else (the only thing it + // could be would be the metadata, but that's not in this tree), is of the correct + // size, and is composed of untyped memory. + unsafe { free_list.push(tree.base_ptr.unwrap().cast()) }; + + // Then, set a bit in the bitset to say that this region is present. + tree.bitset_mark_as_present(bitset, tree.size_class, 0); + } + } + + // Make the buddy allocator's struct and return it. + Ok(BuddyAllocator { + free_list_sentinels, + trees, + bitset, + }) + } + + /// Tries to allocate a subregion of a particular size class from the allocator. + #[requires(size_class < SIZE_CLASS_COUNT)] + pub fn alloc(&mut self, size_class: usize) -> Result, AllocError> { + if let Some(ptr) = self.free_list(size_class).pop() { + // Fast-path: try allocating from the right size class's free list. + + // Find the corresponding tree and the offset of the subregion in the tree. + let (tree_index, offset) = self.tree_index_and_offset(ptr); + let tree = &mut self.trees[tree_index]; + + // Mark the pointer as no longer being in the tree. + tree.bitset_mark_as_absent(&mut self.bitset, size_class, offset); + + // Return the pointer. + Ok(ptr) + } else { + // Try finding a larger chunk of memory in a larger size class. + Err(AllocError) + } + } + + /// Returns a subregion of a particular size class to the allocator. + /// + /// # Safety + /// + /// - `ptr` must have been returned by a call to `alloc` with the same `size_class`. + /// - `ptr` must convey ownership over the entire region. + #[requires(size_class < SIZE_CLASS_COUNT)] + pub unsafe fn dealloc(&mut self, ptr: NonNull, size_class: usize) { + // Find the corresponding tree and the offset of the subregion in the tree. + let (tree_index, offset) = self.tree_index_and_offset(ptr); + let tree = &mut self.trees[tree_index]; + + // Ensure that the memory is marked as not being in the free list. + assert_eq!( + tree.bitset_get(&mut self.bitset, size_class, offset), + SubregionStatus::NotInFreeList + ); + + // Start combining size classes. + assert!(size_class <= tree.size_class); + while size_class < tree.size_class { + let buddy_offset = offset ^ (PAGE_SIZE << size_class); + todo!("{buddy_offset:#x} {offset:#x}"); + } + + // Mark the pointer as being in the tree. + tree.bitset_mark_as_present(&mut self.bitset, size_class, offset); + + // Return the pointer to the appropriate free list. + self.free_list(size_class).push(ptr); + } + + /// Returns the free list with the given size class. + #[requires(size_class < SIZE_CLASS_COUNT)] + fn free_list(&mut self, size_class: usize) -> FreeList { + // SAFETY: The bounds-check is a precondition. + let sentinel = unsafe { self.free_list_sentinels.add(size_class) }; + + // SAFETY: The free list is kept valid. + unsafe { FreeList::from_sentinel(sentinel) } + } + + /// Returns the index of the tree this pointer was allocated in. + #[ensures(self.trees[ret.0].contains(ptr.as_ptr() as usize))] + #[ensures(ret.1 < (1 << self.trees[ret.0].size_class))] + fn tree_index_and_offset(&self, ptr: NonNull) -> (usize, usize) { + let addr = ptr.as_ptr() as usize; + let tree_index = self + .trees + .binary_search_by_key(&addr, |tree| tree.base_addr()); + let tree_index = match tree_index { + Ok(i) => i, + Err(i) => { + assert_ne!(i, 0); + i - 1 + } + }; + let offset = addr - self.trees[tree_index].base_addr(); + (tree_index, offset) + } +} + +impl< + 'allocator, + const PAGE_SIZE: usize, + const PAGE_SIZE_BITS: usize, + const SIZE_CLASS_COUNT: usize, + > fmt::Debug for BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT> +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + struct FreeLists(NonNull); + + impl fmt::Debug for FreeLists { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_list() + .entries((0..SIZE_CLASS_COUNT).map(|size_class| { + // SAFETY: The free lists are kept valid, and the range of size classes is + // necessarily in-bounds. + unsafe { FreeList::from_sentinel(self.0.add(size_class)) } + })) + .finish() + } + } + + fmt.debug_struct("BuddyAllocator") + .field( + "free_lists", + &FreeLists::(self.free_list_sentinels), + ) + .field("trees", &self.trees) + .field("bitset", &self.bitset) + .finish() + } +} + +#[derive(Debug)] +struct MetadataLayout< + const PAGE_SIZE: usize, + const PAGE_SIZE_BITS: usize, + const SIZE_CLASS_COUNT: usize, +> { + layout: Layout, + free_list_sentinels_offset: usize, + trees_offset: usize, + trees_len: usize, + bitset_offset: usize, + bitset_len: usize, + bitset_len_bits: usize, +} + +impl + MetadataLayout +{ + fn new( + range_count: usize, + size_class_counts: [usize; SIZE_CLASS_COUNT], + ) -> Result, LayoutError> { + let trees_len = range_count; + let mut bitset_len_bits = 0; + for (size_class, &count) in size_class_counts.iter().enumerate() { + let bits_for_size_class = (1 << (size_class + 1)) - 1; + bitset_len_bits += bits_for_size_class * count; + } + let bitset_len = (bitset_len_bits + usize::BITS as usize - 1) / usize::BITS as usize; + + let free_list_sentinels_layout = Layout::new::<[FreeListNode; SIZE_CLASS_COUNT]>(); + let trees_layout = Layout::array::>(trees_len)?; + let bitset_layout = Layout::array::(bitset_len)?; + + let free_list_sentinels_offset = 0; + let (layout, trees_offset) = free_list_sentinels_layout.extend(trees_layout)?; + let (layout, bitset_offset) = layout.extend(bitset_layout)?; + + Ok(MetadataLayout { + layout, + free_list_sentinels_offset, + trees_offset, + trees_len, + bitset_offset, + bitset_len, + bitset_len_bits, + }) + } +} diff --git a/crates/alloc_buddy/src/tree.rs b/crates/alloc_buddy/src/tree.rs new file mode 100644 index 0000000..72ee466 --- /dev/null +++ b/crates/alloc_buddy/src/tree.rs @@ -0,0 +1,100 @@ +use crate::bitvec::{Bitset, SubregionStatus}; +use contracts::requires; +use core::ptr::NonNull; + +/// A single region of the allocator. See the comment on the `crate::allocators::buddy` module for +/// more information. +/// +/// This type is valid when zero-initialized. +#[derive(Debug)] +pub struct Tree<'buddy, const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize> { + /// The base address of the tree. + pub base_ptr: Option>, + + /// The log2 of the number of pages in the region represented by the tree. + pub size_class: usize, + + /// The offset in the bitset to the bits responsible for this tree's pages. + pub bitset_offset: usize, + + phantom: core::marker::PhantomData<&'buddy mut [u8; PAGE_SIZE]>, +} + +impl<'buddy, const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize> + Tree<'buddy, PAGE_SIZE, PAGE_SIZE_BITS> +{ + /// Returns the base address of the tree. + #[requires(self.base_ptr.is_some())] + pub fn base_addr(&self) -> usize { + self.base_ptr.unwrap().as_ptr() as usize + } + + /// Reads a bit from the bitset. + #[requires(size_class <= self.size_class)] + #[requires(offset_bytes <= (PAGE_SIZE << self.size_class))] + #[requires(offset_bytes.trailing_zeros() as usize >= PAGE_SIZE_BITS + size_class)] + pub fn bitset_get( + &self, + bitset: &mut Bitset, + size_class: usize, + offset_bytes: usize, + ) -> SubregionStatus { + bitset.get(self.bitset_index(size_class, offset_bytes)) + } + + /// Returns the index of a bit in the bitset that corresponds to the given size class and + /// offset. + #[requires(size_class <= self.size_class)] + #[requires(offset_bytes <= (PAGE_SIZE << self.size_class))] + #[requires(offset_bytes.trailing_zeros() as usize >= PAGE_SIZE_BITS + size_class)] + fn bitset_index(&self, size_class: usize, offset_bytes: usize) -> usize { + // We store the largest size classes first in the bitset. Count how many we are away from + // the largest. + let skipped_size_classes = self.size_class - size_class; + let bits_skipped_for_size_class = (1 << skipped_size_classes) - 1; + + // Next, find our index in the size class. + let bits_skipped_for_index = offset_bytes >> (PAGE_SIZE_BITS + size_class); + + // The sum of those two is simply our index. + bits_skipped_for_size_class + bits_skipped_for_index + } + + /// Changes a bit in the bitset from `InFreeList` to `NotInFreeList`. + #[requires(size_class <= self.size_class)] + #[requires(offset_bytes <= (PAGE_SIZE << self.size_class))] + #[requires(offset_bytes.trailing_zeros() as usize >= PAGE_SIZE_BITS + size_class)] + pub fn bitset_mark_as_absent( + &self, + bitset: &mut Bitset, + size_class: usize, + offset_bytes: usize, + ) { + let index = self.bitset_index(size_class, offset_bytes); + assert_eq!(bitset.get(index), SubregionStatus::InFreeList); + bitset.set(index, SubregionStatus::NotInFreeList) + } + + /// Changes a bit in the bitset from `NotInFreeList` to `InFreeList`. + #[requires(size_class <= self.size_class)] + #[requires(offset_bytes <= (PAGE_SIZE << self.size_class))] + #[requires(offset_bytes.trailing_zeros() as usize >= PAGE_SIZE_BITS + size_class)] + pub fn bitset_mark_as_present( + &self, + bitset: &mut Bitset, + size_class: usize, + offset_bytes: usize, + ) { + let index = self.bitset_index(size_class, offset_bytes); + assert_eq!(bitset.get(index), SubregionStatus::NotInFreeList); + bitset.set(index, SubregionStatus::InFreeList) + } + + /// Returns whether the tree contains an address. + #[requires(self.base_ptr.is_some())] + pub fn contains(&self, addr: usize) -> bool { + let tree_addr_lo = self.base_addr(); + let tree_addr_hi = tree_addr_lo + (PAGE_SIZE << self.size_class); + (tree_addr_lo..tree_addr_hi).contains(&addr) + } +} diff --git a/crates/alloc_buddy/tests/hosted_test.rs b/crates/alloc_buddy/tests/hosted_test.rs new file mode 100644 index 0000000..1afe263 --- /dev/null +++ b/crates/alloc_buddy/tests/hosted_test.rs @@ -0,0 +1,279 @@ +use core::{fmt, num::NonZero, slice}; +use nix::sys::mman::{mmap_anonymous, munmap, MapFlags, ProtFlags}; +use proptest::prelude::*; +use std::ptr::NonNull; +use vernos_alloc_buddy::BuddyAllocator; +use vernos_alloc_physmem_free_list::FreeListAllocator; + +proptest! { + #![proptest_config(proptest::test_runner::Config { + failure_persistence: None, + ..Default::default() + })] + + #[test] + fn test_scenario(scenario in Scenario::any()) { + scenario.run(true) + } +} + +#[test] +fn test_simple_scenario() { + let scenario = Scenario { + range_sizes: vec![7], + actions: vec![ + Action::Debug, + Action::Alloc { + sentinel_value: 0xaa, + size_class: 0, + }, + Action::Debug, + ], + }; + scenario.run(false) +} + +const PAGE_SIZE: usize = 64; +const PAGE_SIZE_BITS: usize = PAGE_SIZE.trailing_zeros() as usize; +const SIZE_CLASS_COUNT: usize = 4; + +/// A single action the property test should perform. +#[derive(Debug)] +enum Action { + /// Makes an allocation. + Alloc { + /// A sentinel value, which is expected to be present when the allocation is freed. + sentinel_value: u8, + + /// The size class, which is constrained to be less than `SIZE_CLASS_COUNT`. + size_class: usize, + }, + + /// Deallocates an allocation. + Dealloc { + /// The index of the allocation in the set of live allocations, which is taken to be a + /// circular list (so this is always in-bounds if there are any allocations). + index: usize, + }, + + /// Prints the allocator and all live allocations, for debugging purposes. This is never + /// automatically generated. + Debug, + + /// Overwrites an allocation, changing its sentinel. + Overwrite { + /// The index of the allocation in the set of live allocations, which is taken to be a + /// circular list (so this is always in-bounds if there are any allocations). + index: usize, + + /// The new sentinel value. + sentinel_value: u8, + }, +} + +impl Action { + /// Generates a random action. + pub fn any() -> impl Strategy { + prop_oneof![ + (any::(), 0..SIZE_CLASS_COUNT).prop_map(|(sentinel_value, size_class)| { + Action::Alloc { + sentinel_value, + size_class, + } + }), + any::().prop_map(|index| { Action::Dealloc { index } }), + (any::(), any::()).prop_map(|(index, sentinel_value)| { + Action::Overwrite { + index, + sentinel_value, + } + }) + ] + } + + /// Runs an action. + pub fn run( + &self, + buddy: &mut BuddyAllocator, + allocs: &mut Vec, + allow_errors: bool, + ) { + match *self { + Action::Alloc { + sentinel_value, + size_class, + } => match buddy.alloc(size_class) { + Ok(ptr) => unsafe { + let slice = slice::from_raw_parts_mut(ptr.as_ptr(), PAGE_SIZE << size_class); + slice.fill(sentinel_value); + allocs.push(Alloc { + slice, + sentinel_value, + }); + }, + Err(err) => { + if !allow_errors { + Err(err).expect("failed to perform alloc action") + } + } + }, + Action::Dealloc { index } => { + if allow_errors && allocs.is_empty() { + return; + } + + let index = index % allocs.len(); + let alloc = allocs.remove(index); + alloc.check_sentinel(); + unsafe { + buddy.dealloc( + NonNull::from(alloc.slice.as_mut()).cast(), + alloc.slice.len().trailing_zeros() as usize - PAGE_SIZE_BITS, + ); + } + } + Action::Debug => { + dbg!(buddy); + dbg!(allocs); + } + Action::Overwrite { + index, + sentinel_value, + } => { + if allow_errors && allocs.is_empty() { + return; + } + + let index = index % allocs.len(); + let alloc = &mut allocs[index]; + alloc.slice.fill(sentinel_value); + alloc.sentinel_value = sentinel_value; + } + } + } +} + +/// The entire series of actions to be performed. +#[derive(Debug)] +struct Scenario { + /// The sizes of the ranges the buddy allocator should be initialized with. + range_sizes: Vec, + + /// Actions to perform after initializing the buddy allocator. + actions: Vec, +} + +impl Scenario { + /// Generates a random scenario. + pub fn any() -> impl Strategy { + ( + prop::collection::vec(1usize..1 << (SIZE_CLASS_COUNT + 2), 0..4), + prop::collection::vec(Action::any(), 0..64), + ) + .prop_map(|(range_sizes, actions)| Scenario { + range_sizes, + actions, + }) + } + + /// Runs the scenario. + pub fn run(&self, allow_errors: bool) { + // Allocate each of the page ranges. + let backing_mem = self + .range_sizes + .iter() + .map(|&size| { + Mmap::new(NonZero::new(size * PAGE_SIZE).unwrap()) + .expect("failed to allocate memory") + }) + .collect::>(); + + // Create the free list allocator and move the pages there. + let mut free_list: FreeListAllocator = FreeListAllocator::new(); + for (&size, mmap) in self.range_sizes.iter().zip(backing_mem.iter()) { + unsafe { + free_list.add(mmap.ptr(), size); + } + } + + // Create the buddy allocator and move the pages from the free list to there. + match BuddyAllocator::::new(free_list) { + Ok(mut buddy) => { + let mut allocs = Vec::new(); + + // Run each of the actions. + for action in &self.actions { + action.run(&mut buddy, &mut allocs, allow_errors); + + // Check each allocation. + for alloc in &allocs { + alloc.check_sentinel(); + } + } + } + Err(err) => { + if !allow_errors { + Err(err).expect("failed to make buddy allocator") + } + } + } + } +} + +/// Information about an allocation we've made from the buddy allocator. +struct Alloc<'alloc> { + slice: &'alloc mut [u8], + sentinel_value: u8, +} + +impl<'alloc> Alloc<'alloc> { + pub fn check_sentinel(&self) { + let s = self.sentinel_value; + for (i, &b) in self.slice.iter().enumerate() { + assert_eq!(b, s, "at index {i}, {b} != {s}",); + } + } +} + +impl<'alloc> fmt::Debug for Alloc<'alloc> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("Alloc") + .field("ptr", &self.slice.as_ptr()) + .field("len", &self.slice.len()) + .field("sentinel_value", &self.sentinel_value) + .finish() + } +} + +/// A mmap-allocated region of memory. +struct Mmap { + ptr: NonNull, + len: NonZero, +} + +impl Mmap { + pub fn new(len: NonZero) -> Result { + let ptr = unsafe { + mmap_anonymous( + None, + len, + ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, + MapFlags::MAP_PRIVATE, + )? + .cast() + }; + Ok(Mmap { ptr, len }) + } + + pub fn ptr(&self) -> NonNull { + self.ptr.cast() + } +} + +impl Drop for Mmap { + fn drop(&mut self) { + unsafe { + munmap(self.ptr(), self.len.get()).expect("failed to free memory"); + } + } +} diff --git a/crates/alloc_physmem_free_list/Cargo.toml b/crates/alloc_physmem_free_list/Cargo.toml new file mode 100644 index 0000000..32a210c --- /dev/null +++ b/crates/alloc_physmem_free_list/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "vernos_alloc_physmem_free_list" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +allocator-api2 = { version = "0.2.18", default-features = false } +contracts = { version = "0.6.3", default-features = false } diff --git a/crates/alloc_physmem_free_list/src/lib.rs b/crates/alloc_physmem_free_list/src/lib.rs new file mode 100644 index 0000000..f99b30f --- /dev/null +++ b/crates/alloc_physmem_free_list/src/lib.rs @@ -0,0 +1,389 @@ +//! A free-list allocator for physical memory ranges. This is only used when bootstrapping the +//! buddy allocator, since it provides a way to store and manipulate physical memory ranges without +//! needing a working allocator. +#![no_std] + +use allocator_api2::alloc::{AllocError, Layout}; +use contracts::requires; +use core::{fmt, iter, marker::PhantomData, mem::size_of, ptr::NonNull, slice}; + +/// The free-list allocator. +pub struct FreeListAllocator<'allocator, const PAGE_SIZE: usize> { + head: Option>, +} + +impl<'allocator, const PAGE_SIZE: usize> FreeListAllocator<'allocator, PAGE_SIZE> { + /// Creates a new, empty allocator. + pub fn new() -> FreeListAllocator<'allocator, PAGE_SIZE> { + assert!(size_of::>() <= PAGE_SIZE); + + FreeListAllocator { head: None } + } + + /// Pushes a page range into the free list. + /// + /// # Safety + /// + /// - `ptr` must be a pointer that conveys ownership over the page range. + /// - The page range must be untyped memory. + /// - The page range must be aligned to `PAGE_SIZE`. + /// - The page range must live for `'allocator`. + /// - `len_pages` must be accurate. + pub unsafe fn add(&mut self, ptr: NonNull<[u8; PAGE_SIZE]>, len_pages: usize) { + self.head = Some(FreeListPtr::new(ptr, len_pages, self.head.take())); + } + + /// Allocates memory, *without necessarily removing it from the allocator*. + /// + /// The memory will only be removed from the allocator if there is a node from which no + /// additional pages would be able to be allocated into. If the node is not removed, the + /// returned pointer will not point into the first page, so it will not be altered by metadata + /// updates. + /// + /// Obviously, this is unsound to call while the previous allocation is alive. It is also + /// unsound to split any nodes after calling this, since splitting the node that was allocated + /// into might write the new node's metadata into that allocation. + pub unsafe fn allocate_without_always_removing_the_node( + &mut self, + layout: Layout, + ) -> Result, AllocError> { + assert!(PAGE_SIZE.is_power_of_two()); + let len_pages = (layout.size() + PAGE_SIZE - 1) >> PAGE_SIZE.trailing_zeros(); + + // Find the size of the smallest page range that would fit this. + let Some(selected_node_len) = self + .iter() + .map(|(_, range_len_pages)| range_len_pages) + .filter(|&range_len_pages| len_pages <= range_len_pages) + .min() + else { + return Err(AllocError); + }; + + if selected_node_len == len_pages { + // The less-scary case is when we're removing an entire node; we're just aiming to find + // it and unlink it. + + // If the node we're looking for is the first one, slightly different code is used to + // unlink it. + let (ptr, unlinked_len_pages) = if self + .head + .as_ref() + .map(|node| node.len_pages() == len_pages) + .unwrap() + { + // In that case, just unlink the node and hand it back. + // + // UNWRAP: The list can't have been empty or .min() above would've returned None. + self.pop().unwrap() + } else { + // Otherwise, we need to iterate through the list to find the node before it. + let mut next = self.head.as_mut(); + let mut node_before_node_to_remove = None; + while let Some(ptr) = next { + if let Some(next_header) = &ptr.header().next { + if next_header.len_pages() == selected_node_len { + node_before_node_to_remove = Some(ptr); + break; + } + } + next = ptr.next_mut(); + } + + // UNWRAP: We had to find the node in the first place, so it must be in the list. + node_before_node_to_remove.unwrap().unlink_next().unwrap() + }; + + assert_eq!(len_pages, unlinked_len_pages); + Ok(NonNull::slice_from_raw_parts( + ptr.cast(), + len_pages * PAGE_SIZE, + )) + } else { + // The scary case is when we're trying to give back memory that's part of a different + // node. First, we need to iterate through the list to find the node. + let mut next = self.head.as_mut(); + let mut node_to_steal_from = None; + while let Some(ptr) = next { + if ptr.len_pages() == selected_node_len { + node_to_steal_from = Some(ptr); + break; + } + next = ptr.next_mut(); + } + + // UNWRAP: We had to find the node in the first place, so it must be in the list. + let node_to_steal_from = node_to_steal_from.unwrap(); + + // Then, we can simply do the address arithmetic to get a pointer out with the right + // bounds. + // + // SAFETY: The node had sufficient length to add `selected_node_len`, so it must have + // sufficient length to add a lesser quantity. + let ptr = unsafe { node_to_steal_from.ptr().add(selected_node_len - len_pages) }; + + Ok(NonNull::slice_from_raw_parts( + ptr.cast(), + len_pages * PAGE_SIZE, + )) + } + } + + /// Returns an iterator over the page ranges in the free list, as pairs of `ptr` and + /// `len_pages`. The returned pointers should not be accessed. + pub fn iter(&self) -> impl '_ + Iterator, usize)> { + let mut next: Option<&_> = self.head.as_ref(); + iter::from_fn(move || match next { + Some(ptr) => { + let out = (ptr.ptr(), ptr.len_pages()); + next = ptr.next(); + Some(out) + } + None => None, + }) + } + + /// Pops the first page range from the allocator and returns it, along with its length in pages. + pub fn pop(&mut self) -> Option<(NonNull<[u8; PAGE_SIZE]>, usize)> { + let node = self.head.take()?; + let (ptr, header) = node.consume(); + self.head = header.next; + Some((ptr, header.len_pages)) + } + + /// Splits all the nodes until each page range is of a power-of-two size no larger than + /// `max_len_pages`. + pub fn split_to_powers_of_two_no_larger_than(&mut self, max_len_pages: usize) { + let mut next = self.head.as_mut(); + while let Some(ptr) = next { + while ptr.len_pages() > max_len_pages { + ptr.split(max_len_pages); + } + + while !ptr.len_pages().is_power_of_two() { + let log2_smaller_size = ptr.len_pages().trailing_zeros(); + let smaller_size = 1 << log2_smaller_size; + ptr.split(smaller_size); + } + + next = ptr.next_mut(); + } + } +} + +impl<'allocator, const PAGE_SIZE: usize> fmt::Debug for FreeListAllocator<'allocator, PAGE_SIZE> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let mut next: Option<&_> = self.head.as_ref(); + fmt.debug_list() + .entries(iter::from_fn(|| match next { + Some(ptr) => { + next = ptr.next(); + Some(ptr) + } + None => None, + })) + .finish() + } +} + +/// A pointer to a page range, which start with a header in the first page. +/// +/// # Safety +/// +/// - `ptr` must be a pointer that conveys ownership over the page range. +/// - The page range must be untyped memory. +/// - The page range must be non-empty. +/// - The first page must have a correct header written to it. +/// - The page range must live for `'allocator`. +struct FreeListPtr<'allocator, const PAGE_SIZE: usize> { + ptr: NonNull>, + _phantom: PhantomData<&'allocator FreeListNodeHeader<'allocator, PAGE_SIZE>>, +} + +impl<'allocator, const PAGE_SIZE: usize> FreeListPtr<'allocator, PAGE_SIZE> { + /// Initialize the given page range as a node in the free list. + /// + /// # Safety + /// + /// - `ptr` must be a pointer that conveys ownership over the page range. + /// - The page range must be untyped memory. + /// - The page range must be aligned to `PAGE_SIZE`. + /// - The page range must live for `'allocator`. + /// - `len_pages` must be accurate. + #[requires(len_pages > 0)] + pub unsafe fn new( + ptr: NonNull<[u8; PAGE_SIZE]>, + len_pages: usize, + next: Option>, + ) -> FreeListPtr<'allocator, PAGE_SIZE> { + assert!(size_of::>() <= PAGE_SIZE); + + let ptr: NonNull> = ptr.cast(); + // SAFETY: The safety invariants plus the assertion guarantee this write is valid. + unsafe { + ptr.write(FreeListNodeHeader { next, len_pages }); + } + FreeListPtr { + ptr, + _phantom: PhantomData, + } + } + + /// Consumes the node, returning the pointer and header. + pub fn consume( + self, + ) -> ( + NonNull<[u8; PAGE_SIZE]>, + FreeListNodeHeader<'allocator, PAGE_SIZE>, + ) { + // SAFETY: The invariants of the type ensure that the header is present and owned by us. + let header = unsafe { self.ptr.read() }; + (self.ptr.cast(), header) + } + + /// Returns a reference to the header of the page range. + fn header(&self) -> &FreeListNodeHeader<'allocator, PAGE_SIZE> { + // SAFETY: The invariants of the type ensure that the header is present and owned by us. + unsafe { self.ptr.as_ref() } + } + + /// Returns an exclusive reference to the header of the page range. + /// + /// # Safety + /// + /// The invariants of the type must be upheld. + unsafe fn header_mut(&mut self) -> &mut FreeListNodeHeader<'allocator, PAGE_SIZE> { + // SAFETY: The invariants of the type ensure that the header is present and owned by us. + unsafe { self.ptr.as_mut() } + } + + /// Returns the length of the node in pages. + pub fn len_pages(&self) -> usize { + self.header().len_pages + } + + /// Returns a shared reference to the next node in the free list. Returns `None` when there + /// are no further nodes. + pub fn next(&self) -> Option<&FreeListPtr<'allocator, PAGE_SIZE>> { + self.header().next.as_ref() + } + + /// Returns an exclusive reference to the next node in the free list. Returns `None` when there + /// are no further nodes. + pub fn next_mut(&mut self) -> Option<&mut FreeListPtr<'allocator, PAGE_SIZE>> { + // SAFETY: We just lend out the next pointer, which can't violate our invariants. + let header = unsafe { self.header_mut() }; + header.next.as_mut() + } + + /// Returns the underlying pointer. Using this may violate invariants. + pub fn ptr(&self) -> NonNull<[u8; PAGE_SIZE]> { + self.ptr.cast() + } + + /// Splits the page range in two, with the second having the given length in pages. + #[requires(split_len < self.len_pages())] + pub fn split(&mut self, split_len: usize) { + // Get the old header. After this point, `self.ptr` is considered uninitialized, since + // we've moved out of it, so we can't panic. + // + // SAFETY: The invariants of the type ensure that the header is present and owned by us. + let old_header = unsafe { self.ptr.read() }; + + // We call the two new nodes `a` and `b`. + let a_len = old_header.len_pages - split_len; + let b_len = split_len; + // SAFETY: The invariants of the type ensure this is still in-bounds, since + // the header must have been valid and `a_len <= old_header.len_pages`. + let b_ptr: NonNull<[u8; PAGE_SIZE]> = unsafe { self.ptr().add(a_len) }; + + // Construct a new node for `b`. + // + // SAFETY: The safety invariants are simply inherited from the memory being valid in the + // first place, except for the `len_pages` invariant. This is upheld because the length + // math is correct. + // + // The assertion panic in `FreeListPtr::new()` must be unreachable, or it would have been + // triggered when constructing this type in the first place. + let b = unsafe { FreeListPtr::new(b_ptr, b_len, old_header.next) }; + + // Reinitialize the header for `a`. After this point, `self.ptr` is considered initialized + // again, so we may panic again. + // + // SAFETY: The safety invariants are simply inherited from the memory being valid in the + // first place, except for the `len_pages` invariant. This is upheld because the length + // math is correct. + unsafe { + self.ptr.write(FreeListNodeHeader { + next: Some(b), + len_pages: a_len, + }); + } + } + + /// Unlinks and returns the _next_ node, returning its pointer and length in pages. + pub fn unlink_next(&mut self) -> Option<(NonNull<[u8; PAGE_SIZE]>, usize)> { + // SAFETY: We uphold the invariants. + let header = unsafe { self.header_mut() }; + let next = header.next.take()?; + let (next_ptr, next_header) = next.consume(); + header.next = next_header.next; + Some((next_ptr, next_header.len_pages)) + } +} + +impl<'allocator, const PAGE_SIZE: usize> fmt::Debug for FreeListPtr<'allocator, PAGE_SIZE> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "[{:p} × {}]", self.ptr, self.len_pages()) + } +} + +#[derive(Debug)] +struct FreeListNodeHeader<'allocator, const PAGE_SIZE: usize> { + /// The next node in the linked list. + next: Option>, + + /// The length of the allocation in pages. + len_pages: usize, +} + +/// Returns an iterator over power-of-two-sized subslices of this slice, which are returned from +/// smallest to largest. +pub fn power_of_two_subslices_mut(slice: &mut [T]) -> impl Iterator { + // Decompose the slice to its raw parts. Yep, we're gonna be doing unsafe stuff. + let mut ptr = slice.as_mut_ptr(); + let mut len = slice.len(); + + iter::from_fn(move || { + if len == 0 { + None + } else { + // Make the slice to return. + // + // SAFETY: The pointer and length must be valid, since we got them from the valid + // slice. Before returning `out`, we advance them, so that we never give out multiple + // exclusive references to the same subslice. + let out_len = 1 << len.trailing_zeros(); + let out = unsafe { slice::from_raw_parts_mut(ptr, out_len) }; + + // Advance the pointer and length. + // + // SAFETY: out_len < len, and we got len from an existing slice. + ptr = unsafe { ptr.add(out_len) }; + len -= out_len; + + Some(out) + } + }) +} + +#[test] +fn power_of_two_subslices_mut_test() { + extern crate std; + use std::vec::Vec; + + let mut data = [0, 1, 2, 3, 4, 5, 6, 7, 8]; + let subslices = power_of_two_subslices_mut(&mut data).collect::>(); + + assert_eq!(subslices, &[&[0] as &[_], &[1, 2, 3, 4, 5, 6, 7, 8]]); +} diff --git a/crates/buddy_allocator/Cargo.toml b/crates/buddy_allocator/Cargo.toml deleted file mode 100644 index 94ab236..0000000 --- a/crates/buddy_allocator/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "vernos_buddy_allocator" -version = "0.1.0" -edition = "2021" -publish = false - -[dependencies] -allocator-api2 = { version = "0.2.18", default-features = false } -contracts = { version = "0.6.3", default-features = false } -static_assertions = { version = "1.1.0", default-features = false } -vernos_physmem_free_list = { path = "../physmem_free_list" } - -[dev-dependencies] -proptest = { version = "0.9.6", default-features = false, features = ["std"] } -nix = { version = "0.29.0", default-features = false, features = ["mman"] } diff --git a/crates/buddy_allocator/src/bitvec.rs b/crates/buddy_allocator/src/bitvec.rs deleted file mode 100644 index 5cabc5e..0000000 --- a/crates/buddy_allocator/src/bitvec.rs +++ /dev/null @@ -1,77 +0,0 @@ -use contracts::requires; -use core::{fmt, mem::transmute}; - -/// A fixed-length vector of bits used for determining whether a subregion is in the free lists or -/// not. -pub struct Bitset([usize]); - -impl Bitset { - fn from_mut(words: &mut [usize]) -> &mut Bitset { - // SAFETY: The types have a newtype relationship. - unsafe { transmute(words) } - } - - fn from_ref(words: &[usize]) -> &Bitset { - // SAFETY: The types have a newtype relationship. - unsafe { transmute(words) } - } - - /// Retrieves the value of a bit from the Bitset. - #[requires(i < self.len())] - pub fn get(&self, i: usize) -> SubregionStatus { - let word_index = i / usize::BITS as usize; - let subword_index = i % usize::BITS as usize; - let one_hot = 1 << subword_index; - if (self.0[word_index] & one_hot) == 0 { - SubregionStatus::NotInFreeList - } else { - SubregionStatus::InFreeList - } - } - - /// Returns whether the Bitset is empty. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Returns the number of bits in the Bitset. - pub fn len(&self) -> usize { - self.0.len() * usize::BITS as usize - } - - /// Sets the value of a bit in the Bitset. - #[requires(i < self.len())] - pub fn set(&mut self, i: usize, status: SubregionStatus) { - let word_index = i / usize::BITS as usize; - let subword_index = i % usize::BITS as usize; - let one_hot = 1 << subword_index; - match status { - SubregionStatus::NotInFreeList => { - self.0[word_index] &= !one_hot; - } - SubregionStatus::InFreeList => { - self.0[word_index] |= one_hot; - } - } - } -} - -impl fmt::Debug for Bitset { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "Bitset(")?; - for word in &self.0 { - write!(fmt, "{word:064b}")?; - } - write!(fmt, ")") - } -} - -/// The status of a subregion, as tracked by `Bitset`. -#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] -pub enum SubregionStatus { - /// The region is not in the free list. - NotInFreeList = 0, - - /// The region is in the free list. - InFreeList = 1, -} diff --git a/crates/buddy_allocator/src/free_list.rs b/crates/buddy_allocator/src/free_list.rs deleted file mode 100644 index 9b470fd..0000000 --- a/crates/buddy_allocator/src/free_list.rs +++ /dev/null @@ -1,164 +0,0 @@ -//! A circular doubly-linked list of power-of-two-sized subregions. - -use contracts::{ensures, requires}; -use core::{fmt, ptr::NonNull}; - -/// The magic number a non-sentinel node should have. -const FREE_NODE_MAGIC: u64 = u64::from_ne_bytes(*b"FREENODE"); - -/// The magic number set into the spot for a node's magic number when it is initialized, but not -/// linked into a list. -const INITED_NODE_MAGIC: u64 = u64::from_ne_bytes(*b"NODEINIT"); - -/// The magic number a sentinel node should have. -const SENTINEL_MAGIC: u64 = u64::from_ne_bytes(*b"SENTINEL"); - -/// The magic number set into the spot for a node's magic number when it gets unlinked. -const USED_NODE_MAGIC: u64 = u64::from_ne_bytes(*b"ALLOC'ED"); - -/// A pointer to the sentinel node of a circular doubly-linked list of power-of-two-sized subregions. -pub struct FreeList(NonNull); - -impl FreeList { - /// Creates a free-list wrapper for a sentinel node's pointer. - /// - /// # Safety - /// - /// - `sentinel` must be a pointer to the sentinel of a valid circular doubly-linked list. - #[requires(unsafe { (*sentinel.as_ptr()).next }.is_some())] - #[requires(unsafe { (*sentinel.as_ptr()).prev }.is_some())] - #[requires(unsafe { (*sentinel.as_ptr()).magic } == SENTINEL_MAGIC)] - #[requires(unsafe { (*sentinel.as_ptr()).self_addr } == sentinel.as_ptr() as usize)] - pub unsafe fn from_sentinel(sentinel: NonNull) -> FreeList { - FreeList(sentinel) - } - - /// Returns whether the free-list is empty. - pub fn is_empty(&self) -> bool { - let sentinel = self.0.as_ptr(); - // SAFETY: This is sound as long as the sentinel was valid, which is a precondition of - // `FreeList::from_sentinel()`. - unsafe { - // UNWRAP: This is a precondition of the method. - let next = (*sentinel).next.unwrap(); - (*sentinel).self_addr == next.as_ptr() as usize - } - } - - /// Pops a subregion from the free-list, returning ownership to the caller. - pub fn pop(&mut self) -> Option> { - if self.is_empty() { - // If the list is empty, return `None`. - None - } else { - // SAFETY: This is sound as long as the sentinel was valid, which is a precondition of - // `FreeList::from_sentinel()`. - unsafe { - // UNWRAP: In a valid circular doubly-linked list, the pointers are all non-null. - let node = (*self.0.as_ptr()).next.unwrap(); - let prev = (*node.as_ptr()).prev.unwrap(); - let next = (*node.as_ptr()).next.unwrap(); - - (*prev.as_ptr()).next = Some(next); - (*next.as_ptr()).prev = Some(prev); - - (*node.as_ptr()).next = None; - (*node.as_ptr()).prev = None; - (*node.as_ptr()).magic = USED_NODE_MAGIC; - Some(node.cast()) - } - } - } - - /// Pushes a subregion into the free-list. - /// - /// # Safety - /// - /// - `subregion_ptr` must convey ownership over untyped memory of the correct size. - pub unsafe fn push(&mut self, subregion_ptr: NonNull) { - let node = FreeListNode::init(subregion_ptr); - - let prev = self.0; - // UNWRAP: In a valid circular doubly-linked list, the pointers are all non-null. - let next = (*self.0.as_ptr()).next.unwrap(); - - (*node.as_ptr()).prev = Some(prev); - (*node.as_ptr()).next = Some(next); - (*node.as_ptr()).magic = FREE_NODE_MAGIC; - - (*prev.as_ptr()).next = Some(node); - (*next.as_ptr()).prev = Some(node); - } -} - -/// This impl will panic until the sentinel has been self-linked. -impl fmt::Debug for FreeList { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - // SAFETY: This is sound as long as the sentinel was valid, which is a precondition of - // `FreeList::from_sentinel()`. - let sentinel_addr = unsafe { (*self.0.as_ptr()).self_addr }; - - let mut fmt = fmt.debug_list(); - let mut ptr = self.0; - loop { - // SAFETY: The invariants of FreeList guarantee that the list is valid. In a valid - // list, the next pointer is always valid and non-null. - ptr = unsafe { (*ptr.as_ptr()).next.unwrap() }; - if unsafe { (*ptr.as_ptr()).self_addr } == sentinel_addr { - break fmt.finish(); - } - fmt.entry(&ptr); - } - } -} - -/// A node in a free list. This should not be moved without careful thought. -/// -/// This type is valid when zero-initialized. -/// -/// # Safety -/// -/// - This type is valid only if it is zero-initialized or if it contains a valid circular -/// doubly-linked list of subregions of the correct size, which it owns. -#[derive(Debug)] -pub struct FreeListNode { - next: Option>, - prev: Option>, - magic: u64, - self_addr: usize, -} - -impl FreeListNode { - /// Initializes a sentinel node. - /// - /// # Safety - /// - /// The pointer must convey exclusive access and point to a large-enough allocation. - #[ensures(unsafe { (*ptr.as_ptr()).magic } == SENTINEL_MAGIC)] - #[ensures(unsafe { (*ptr.as_ptr()).self_addr } == ptr.as_ptr() as usize)] - pub unsafe fn init_sentinel(ptr: NonNull) { - (*ptr.as_ptr()).next = Some(ptr); - (*ptr.as_ptr()).prev = Some(ptr); - (*ptr.as_ptr()).magic = SENTINEL_MAGIC; - (*ptr.as_ptr()).self_addr = ptr.as_ptr() as usize; - } - - /// Initializes a non-sentinel node. - /// - /// # Safety - /// - /// The pointer must convey ownership and point to a large-enough allocation. - #[ensures(ptr.as_ptr() as usize == ret.as_ptr() as usize)] - #[ensures(unsafe { (*ret.as_ptr()).magic } == INITED_NODE_MAGIC)] - #[ensures(unsafe { (*ret.as_ptr()).self_addr } == ptr.as_ptr() as usize)] - unsafe fn init(ptr: NonNull) -> NonNull { - let ptr = ptr.cast(); - ptr.write(FreeListNode { - next: None, - prev: None, - magic: INITED_NODE_MAGIC, - self_addr: ptr.as_ptr() as usize, - }); - ptr - } -} diff --git a/crates/buddy_allocator/src/lib.rs b/crates/buddy_allocator/src/lib.rs deleted file mode 100644 index c543ca6..0000000 --- a/crates/buddy_allocator/src/lib.rs +++ /dev/null @@ -1,409 +0,0 @@ -//! A buddy allocator, used to allocate pages. -//! -//! The allocator can be split into three abstractions: stripes, trees, and the allocator. -//! -//! TODO: See if there's standard terminology for these. -//! -//! ## Stripes -//! -//! The buddy allocator works in terms of size classes, which are power-of-two sized, starting at a -//! single page and going up from there. Each stripe corresponds to a single size class and a -//! particular region of memory. -//! -//! A stripe contains a circular doubly-linked free-list for subregions of that size class, and a -//! bitset marking whether a particular region has been allocated or not. Being a circular -//! doubly-linked list makes it cheap to remove an element whose address we know, as well as cheap -//! to push and pop elements. -//! -//! It's efficient to go from the address of a subregion to the index of its corresponding bit, so -//! when we hand out a subregion from the free-list or put one back, it's cheap to read or write -//! its bit. -//! -//! ## Trees -//! -//! A tree is logically a collection of stripes, one per size class. To pack the structures more -//! efficiently, they are stored interleaved with each other, and the tree manages this. -//! -//! The important logic at the level of trees happens when we allocate a subregion from a size -//! class whose stripe's free-list is empty, and when we free subregions. -//! -//! When a stripe's free-list is empty, the tree instead allocates a subregion of a larger size -//! from the next stripe. It can then store the unused portion in the current size class. -//! -//! The really important bit is the ability to merge subregions when they're freed. When we free a -//! subregion of a certain size class, we can check whether its neighbor (its buddy) is unallocated -//! as well. If so, we can remove it from the free-list by its address. We can combine the two -//! subregions into one of the next larger size class, and then return that subregion to the next -//! stripe. -//! -//! ## The buddy allocator -//! -//! Finally, the overall buddy allocator needs to be able to handle multiple memory regions. To -//! facilitate this, the trees are stored in an array, which forms the overall allocator. -#![no_std] - -extern crate std; // TODO: Remove me - -mod bitvec; -mod free_list; -mod tree; - -use crate::{ - bitvec::{Bitset, SubregionStatus}, - free_list::{FreeList, FreeListNode}, - tree::Tree, -}; -use allocator_api2::alloc::{AllocError, Layout, LayoutError}; -use contracts::{ensures, requires}; -use core::{fmt, mem, ptr::NonNull}; -use vernos_physmem_free_list::FreeListAllocator; - -/// A buddy allocator. -pub struct BuddyAllocator< - 'allocator, - const PAGE_SIZE: usize, - const PAGE_SIZE_BITS: usize, - const SIZE_CLASS_COUNT: usize, -> { - /// The free list sentinels (`SIZE_CLASS_COUNT` of them). - free_list_sentinels: NonNull, - - /// The metadata associated with each tree. - trees: &'allocator mut [Tree<'allocator, PAGE_SIZE, PAGE_SIZE_BITS>], - - /// The shared bitset. - bitset: &'allocator mut Bitset, -} - -impl< - 'allocator, - const PAGE_SIZE: usize, - const PAGE_SIZE_BITS: usize, - const SIZE_CLASS_COUNT: usize, - > BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT> -{ - /// Returns a buddy allocator backed by the page ranges in the free list. - pub fn new( - mut free_list_alloc: FreeListAllocator<'allocator, PAGE_SIZE>, - ) -> Result, AllocError> - { - assert_eq!(PAGE_SIZE, 1 << PAGE_SIZE_BITS); - assert_ne!(SIZE_CLASS_COUNT, 0); - - // We split up all the nodes into size-class-sized chunks here. After this, it's important - // for safety that we never split a node again -- we'll see why later. - free_list_alloc.split_to_powers_of_two_no_larger_than(1 << (SIZE_CLASS_COUNT - 1)); - - // We use one contiguous region to store the free lists and all the information about each - // tree (the base, the maximum order, and the bitset). We do one pass through the free list - // to find out how much storage we need, then we try to find a convenient spot to steal the - // storage we need from a page range in the list. - let mut range_count = 0; - let mut size_class_counts = [0; SIZE_CLASS_COUNT]; - for (_, len_pages) in free_list_alloc.iter() { - assert!(len_pages.is_power_of_two()); - let size_class = len_pages.trailing_zeros() as usize; - assert!((0..SIZE_CLASS_COUNT).contains(&size_class)); - range_count += 1; - size_class_counts[size_class] += 1; - } - - // Lay out the metadata. - // - // UNWRAP: The error here really ought to be impossible, because the pages in the free list - // needed to fit in there in the first place. - let metadata_layout = MetadataLayout::::new( - range_count, - size_class_counts, - ) - .unwrap(); - - // Allocate space for the metadata. - // - // SAFETY: We need to make _absolutely_ sure nothing in the buddy allocator uses this - // memory before we have a chance to remove it from the allocator. This function guarantees - // that the returned memory won't overlap with the first page of a live node, so as long as - // we don't split a node or write past the first page of one, we should be OK. - let metadata = unsafe { - free_list_alloc.allocate_without_always_removing_the_node(metadata_layout.layout)? - }; - // SAFETY: The pointer must be valid to have been returned here. - unsafe { - metadata.cast::().write_bytes(0, metadata.len()); - } - - // Break out each component of the metadata. - // - // SAFETY: The layout is computed to make all of this sound if the data is valid. All of - // these types are valid when zero-initialized. - let free_list_sentinels: NonNull = unsafe { - metadata - .byte_add(metadata_layout.free_list_sentinels_offset) - .cast() - }; - let trees: &'allocator mut [Tree] = unsafe { - NonNull::slice_from_raw_parts( - metadata.byte_add(metadata_layout.trees_offset).cast(), - metadata_layout.trees_len, - ) - .as_mut() - }; - // SAFETY: This type is valid when zero-initialized, and is a newtype for `[usize]` so has - // the same layout. - let bitset: &'allocator mut Bitset = unsafe { - mem::transmute::<&mut [usize], &mut Bitset>( - NonNull::slice_from_raw_parts( - metadata.byte_add(metadata_layout.bitset_offset).cast(), - metadata_layout.bitset_len, - ) - .as_mut(), - ) - }; - - // Self-link each free list. Although they're valid in a UB sense when zeroed, they're not - // yet ready to use until this is done. - for size_class in 0..SIZE_CLASS_COUNT { - // SAFETY: This is the number of nodes we allocate, so we'll stay in-bounds. - unsafe { FreeListNode::init_sentinel(free_list_sentinels.add(size_class)) }; - } - - // Initialize the trees, adding subregions into the allocator as we do. We don't actually - // assign bitset offsets yet, because we're going to sort the trees by address before we - // do. (This isn't algorithmically necessary, but it makes debugging a bit easier.) - let mut i = 0; - while let Some((ptr, mut len_pages)) = free_list_alloc.pop() { - while len_pages > 0 { - let size_class = (len_pages.trailing_zeros() as usize).min(SIZE_CLASS_COUNT - 1); - trees[i].base_ptr = Some(ptr.cast()); - trees[i].size_class = size_class; - i += 1; - len_pages -= 1 << size_class; - } - } - assert!( - i == trees.len() || i + 1 == trees.len(), - "found {} trees but allocated {} slots", - i, - trees.len() - ); - - // Slice the trees slice to fit how many trees actually remained, then sort them by - // address. - let trees = &mut trees[..i]; - trees.sort_by_key(|tree| tree.base_addr()); - - // Compute the number of bits that belong to each tree and assign them. - let mut bitset_offset = 0; - for tree in &mut *trees { - tree.bitset_offset = bitset_offset; - bitset_offset += (1 << (tree.size_class + 1)) - 1; - } - assert!(bitset_offset <= metadata_layout.bitset_len_bits); - - // Add regions the trees should own to the free lists, excluding the subregions that - // metadata is allocated in. - - // Since we know that the metadata was allocated at the end of the page range it was - // allocated in, we can just test the end addresses. - // - // SAFETY: This is the one-past-the-end address. - let metadata_addr_hi = - unsafe { metadata.cast::().add(metadata.len()).as_ptr() } as usize; - for tree in &mut *trees { - // SAFETY: This is the one-past-the-end address. - let tree_addr_hi = tree.base_addr() + (PAGE_SIZE << tree.size_class); - if metadata_addr_hi == tree_addr_hi { - // If the metadata was present inside the tree... TODO. - std::eprintln!("TODO"); - } else { - // Otherwise, we can take the whole range of the tree and add it to the free list. - - // SAFETY: There are as many sentinels as size classes, so this will be in-bounds. - let sentinel = unsafe { free_list_sentinels.add(tree.size_class) }; - - // SAFETY: The free list is kept valid. - let mut free_list = unsafe { FreeList::from_sentinel(sentinel) }; - - // SAFETY: This region is not owned or borrowed by anything else (the only thing it - // could be would be the metadata, but that's not in this tree), is of the correct - // size, and is composed of untyped memory. - unsafe { free_list.push(tree.base_ptr.unwrap().cast()) }; - - // Then, set a bit in the bitset to say that this region is present. - tree.bitset_mark_as_present(bitset, tree.size_class, 0); - } - } - - // Make the buddy allocator's struct and return it. - Ok(BuddyAllocator { - free_list_sentinels, - trees, - bitset, - }) - } - - /// Tries to allocate a subregion of a particular size class from the allocator. - #[requires(size_class < SIZE_CLASS_COUNT)] - pub fn alloc(&mut self, size_class: usize) -> Result, AllocError> { - if let Some(ptr) = self.free_list(size_class).pop() { - // Fast-path: try allocating from the right size class's free list. - - // Find the corresponding tree and the offset of the subregion in the tree. - let (tree_index, offset) = self.tree_index_and_offset(ptr); - let tree = &mut self.trees[tree_index]; - - // Mark the pointer as no longer being in the tree. - tree.bitset_mark_as_absent(&mut self.bitset, size_class, offset); - - // Return the pointer. - Ok(ptr) - } else { - // Try finding a larger chunk of memory in a larger size class. - Err(AllocError) - } - } - - /// Returns a subregion of a particular size class to the allocator. - /// - /// # Safety - /// - /// - `ptr` must have been returned by a call to `alloc` with the same `size_class`. - /// - `ptr` must convey ownership over the entire region. - #[requires(size_class < SIZE_CLASS_COUNT)] - pub unsafe fn dealloc(&mut self, ptr: NonNull, size_class: usize) { - // Find the corresponding tree and the offset of the subregion in the tree. - let (tree_index, offset) = self.tree_index_and_offset(ptr); - let tree = &mut self.trees[tree_index]; - - // Ensure that the memory is marked as not being in the free list. - assert_eq!( - tree.bitset_get(&mut self.bitset, size_class, offset), - SubregionStatus::NotInFreeList - ); - - // Start combining size classes. - assert!(size_class <= tree.size_class); - while size_class < tree.size_class { - let buddy_offset = offset ^ (PAGE_SIZE << size_class); - todo!("{buddy_offset:#x} {offset:#x}"); - } - - // Mark the pointer as being in the tree. - tree.bitset_mark_as_present(&mut self.bitset, size_class, offset); - - // Return the pointer to the appropriate free list. - self.free_list(size_class).push(ptr); - } - - /// Returns the free list with the given size class. - #[requires(size_class < SIZE_CLASS_COUNT)] - fn free_list(&mut self, size_class: usize) -> FreeList { - // SAFETY: The bounds-check is a precondition. - let sentinel = unsafe { self.free_list_sentinels.add(size_class) }; - - // SAFETY: The free list is kept valid. - unsafe { FreeList::from_sentinel(sentinel) } - } - - /// Returns the index of the tree this pointer was allocated in. - #[ensures(self.trees[ret.0].contains(ptr.as_ptr() as usize))] - #[ensures(ret.1 < (1 << self.trees[ret.0].size_class))] - fn tree_index_and_offset(&self, ptr: NonNull) -> (usize, usize) { - let addr = ptr.as_ptr() as usize; - let tree_index = self - .trees - .binary_search_by_key(&addr, |tree| tree.base_addr()); - let tree_index = match tree_index { - Ok(i) => i, - Err(i) => { - assert_ne!(i, 0); - i - 1 - } - }; - let offset = addr - self.trees[tree_index].base_addr(); - (tree_index, offset) - } -} - -impl< - 'allocator, - const PAGE_SIZE: usize, - const PAGE_SIZE_BITS: usize, - const SIZE_CLASS_COUNT: usize, - > fmt::Debug for BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT> -{ - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - struct FreeLists(NonNull); - - impl fmt::Debug for FreeLists { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_list() - .entries((0..SIZE_CLASS_COUNT).map(|size_class| { - // SAFETY: The free lists are kept valid, and the range of size classes is - // necessarily in-bounds. - unsafe { FreeList::from_sentinel(self.0.add(size_class)) } - })) - .finish() - } - } - - fmt.debug_struct("BuddyAllocator") - .field( - "free_lists", - &FreeLists::(self.free_list_sentinels), - ) - .field("trees", &self.trees) - .field("bitset", &self.bitset) - .finish() - } -} - -#[derive(Debug)] -struct MetadataLayout< - const PAGE_SIZE: usize, - const PAGE_SIZE_BITS: usize, - const SIZE_CLASS_COUNT: usize, -> { - layout: Layout, - free_list_sentinels_offset: usize, - trees_offset: usize, - trees_len: usize, - bitset_offset: usize, - bitset_len: usize, - bitset_len_bits: usize, -} - -impl - MetadataLayout -{ - fn new( - range_count: usize, - size_class_counts: [usize; SIZE_CLASS_COUNT], - ) -> Result, LayoutError> { - let trees_len = range_count; - let mut bitset_len_bits = 0; - for (size_class, &count) in size_class_counts.iter().enumerate() { - let bits_for_size_class = (1 << (size_class + 1)) - 1; - bitset_len_bits += bits_for_size_class * count; - } - let bitset_len = (bitset_len_bits + usize::BITS as usize - 1) / usize::BITS as usize; - - let free_list_sentinels_layout = Layout::new::<[FreeListNode; SIZE_CLASS_COUNT]>(); - let trees_layout = Layout::array::>(trees_len)?; - let bitset_layout = Layout::array::(bitset_len)?; - - let free_list_sentinels_offset = 0; - let (layout, trees_offset) = free_list_sentinels_layout.extend(trees_layout)?; - let (layout, bitset_offset) = layout.extend(bitset_layout)?; - - Ok(MetadataLayout { - layout, - free_list_sentinels_offset, - trees_offset, - trees_len, - bitset_offset, - bitset_len, - bitset_len_bits, - }) - } -} diff --git a/crates/buddy_allocator/src/tree.rs b/crates/buddy_allocator/src/tree.rs deleted file mode 100644 index 72ee466..0000000 --- a/crates/buddy_allocator/src/tree.rs +++ /dev/null @@ -1,100 +0,0 @@ -use crate::bitvec::{Bitset, SubregionStatus}; -use contracts::requires; -use core::ptr::NonNull; - -/// A single region of the allocator. See the comment on the `crate::allocators::buddy` module for -/// more information. -/// -/// This type is valid when zero-initialized. -#[derive(Debug)] -pub struct Tree<'buddy, const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize> { - /// The base address of the tree. - pub base_ptr: Option>, - - /// The log2 of the number of pages in the region represented by the tree. - pub size_class: usize, - - /// The offset in the bitset to the bits responsible for this tree's pages. - pub bitset_offset: usize, - - phantom: core::marker::PhantomData<&'buddy mut [u8; PAGE_SIZE]>, -} - -impl<'buddy, const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize> - Tree<'buddy, PAGE_SIZE, PAGE_SIZE_BITS> -{ - /// Returns the base address of the tree. - #[requires(self.base_ptr.is_some())] - pub fn base_addr(&self) -> usize { - self.base_ptr.unwrap().as_ptr() as usize - } - - /// Reads a bit from the bitset. - #[requires(size_class <= self.size_class)] - #[requires(offset_bytes <= (PAGE_SIZE << self.size_class))] - #[requires(offset_bytes.trailing_zeros() as usize >= PAGE_SIZE_BITS + size_class)] - pub fn bitset_get( - &self, - bitset: &mut Bitset, - size_class: usize, - offset_bytes: usize, - ) -> SubregionStatus { - bitset.get(self.bitset_index(size_class, offset_bytes)) - } - - /// Returns the index of a bit in the bitset that corresponds to the given size class and - /// offset. - #[requires(size_class <= self.size_class)] - #[requires(offset_bytes <= (PAGE_SIZE << self.size_class))] - #[requires(offset_bytes.trailing_zeros() as usize >= PAGE_SIZE_BITS + size_class)] - fn bitset_index(&self, size_class: usize, offset_bytes: usize) -> usize { - // We store the largest size classes first in the bitset. Count how many we are away from - // the largest. - let skipped_size_classes = self.size_class - size_class; - let bits_skipped_for_size_class = (1 << skipped_size_classes) - 1; - - // Next, find our index in the size class. - let bits_skipped_for_index = offset_bytes >> (PAGE_SIZE_BITS + size_class); - - // The sum of those two is simply our index. - bits_skipped_for_size_class + bits_skipped_for_index - } - - /// Changes a bit in the bitset from `InFreeList` to `NotInFreeList`. - #[requires(size_class <= self.size_class)] - #[requires(offset_bytes <= (PAGE_SIZE << self.size_class))] - #[requires(offset_bytes.trailing_zeros() as usize >= PAGE_SIZE_BITS + size_class)] - pub fn bitset_mark_as_absent( - &self, - bitset: &mut Bitset, - size_class: usize, - offset_bytes: usize, - ) { - let index = self.bitset_index(size_class, offset_bytes); - assert_eq!(bitset.get(index), SubregionStatus::InFreeList); - bitset.set(index, SubregionStatus::NotInFreeList) - } - - /// Changes a bit in the bitset from `NotInFreeList` to `InFreeList`. - #[requires(size_class <= self.size_class)] - #[requires(offset_bytes <= (PAGE_SIZE << self.size_class))] - #[requires(offset_bytes.trailing_zeros() as usize >= PAGE_SIZE_BITS + size_class)] - pub fn bitset_mark_as_present( - &self, - bitset: &mut Bitset, - size_class: usize, - offset_bytes: usize, - ) { - let index = self.bitset_index(size_class, offset_bytes); - assert_eq!(bitset.get(index), SubregionStatus::NotInFreeList); - bitset.set(index, SubregionStatus::InFreeList) - } - - /// Returns whether the tree contains an address. - #[requires(self.base_ptr.is_some())] - pub fn contains(&self, addr: usize) -> bool { - let tree_addr_lo = self.base_addr(); - let tree_addr_hi = tree_addr_lo + (PAGE_SIZE << self.size_class); - (tree_addr_lo..tree_addr_hi).contains(&addr) - } -} diff --git a/crates/buddy_allocator/tests/hosted_test.rs b/crates/buddy_allocator/tests/hosted_test.rs deleted file mode 100644 index 0e502b3..0000000 --- a/crates/buddy_allocator/tests/hosted_test.rs +++ /dev/null @@ -1,279 +0,0 @@ -use core::{fmt, num::NonZero, slice}; -use nix::sys::mman::{mmap_anonymous, munmap, MapFlags, ProtFlags}; -use proptest::prelude::*; -use std::ptr::NonNull; -use vernos_buddy_allocator::BuddyAllocator; -use vernos_physmem_free_list::FreeListAllocator; - -proptest! { - #![proptest_config(proptest::test_runner::Config { - failure_persistence: None, - ..Default::default() - })] - - #[test] - fn test_scenario(scenario in Scenario::any()) { - scenario.run(true) - } -} - -#[test] -fn test_simple_scenario() { - let scenario = Scenario { - range_sizes: vec![7], - actions: vec![ - Action::Debug, - Action::Alloc { - sentinel_value: 0xaa, - size_class: 0, - }, - Action::Debug, - ], - }; - scenario.run(false) -} - -const PAGE_SIZE: usize = 64; -const PAGE_SIZE_BITS: usize = PAGE_SIZE.trailing_zeros() as usize; -const SIZE_CLASS_COUNT: usize = 4; - -/// A single action the property test should perform. -#[derive(Debug)] -enum Action { - /// Makes an allocation. - Alloc { - /// A sentinel value, which is expected to be present when the allocation is freed. - sentinel_value: u8, - - /// The size class, which is constrained to be less than `SIZE_CLASS_COUNT`. - size_class: usize, - }, - - /// Deallocates an allocation. - Dealloc { - /// The index of the allocation in the set of live allocations, which is taken to be a - /// circular list (so this is always in-bounds if there are any allocations). - index: usize, - }, - - /// Prints the allocator and all live allocations, for debugging purposes. This is never - /// automatically generated. - Debug, - - /// Overwrites an allocation, changing its sentinel. - Overwrite { - /// The index of the allocation in the set of live allocations, which is taken to be a - /// circular list (so this is always in-bounds if there are any allocations). - index: usize, - - /// The new sentinel value. - sentinel_value: u8, - }, -} - -impl Action { - /// Generates a random action. - pub fn any() -> impl Strategy { - prop_oneof![ - (any::(), 0..SIZE_CLASS_COUNT).prop_map(|(sentinel_value, size_class)| { - Action::Alloc { - sentinel_value, - size_class, - } - }), - any::().prop_map(|index| { Action::Dealloc { index } }), - (any::(), any::()).prop_map(|(index, sentinel_value)| { - Action::Overwrite { - index, - sentinel_value, - } - }) - ] - } - - /// Runs an action. - pub fn run( - &self, - buddy: &mut BuddyAllocator, - allocs: &mut Vec, - allow_errors: bool, - ) { - match *self { - Action::Alloc { - sentinel_value, - size_class, - } => match buddy.alloc(size_class) { - Ok(ptr) => unsafe { - let slice = slice::from_raw_parts_mut(ptr.as_ptr(), PAGE_SIZE << size_class); - slice.fill(sentinel_value); - allocs.push(Alloc { - slice, - sentinel_value, - }); - }, - Err(err) => { - if !allow_errors { - Err(err).expect("failed to perform alloc action") - } - } - }, - Action::Dealloc { index } => { - if allow_errors && allocs.is_empty() { - return; - } - - let index = index % allocs.len(); - let alloc = allocs.remove(index); - alloc.check_sentinel(); - unsafe { - buddy.dealloc( - NonNull::from(alloc.slice.as_mut()).cast(), - alloc.slice.len().trailing_zeros() as usize - PAGE_SIZE_BITS, - ); - } - } - Action::Debug => { - dbg!(buddy); - dbg!(allocs); - } - Action::Overwrite { - index, - sentinel_value, - } => { - if allow_errors && allocs.is_empty() { - return; - } - - let index = index % allocs.len(); - let alloc = &mut allocs[index]; - alloc.slice.fill(sentinel_value); - alloc.sentinel_value = sentinel_value; - } - } - } -} - -/// The entire series of actions to be performed. -#[derive(Debug)] -struct Scenario { - /// The sizes of the ranges the buddy allocator should be initialized with. - range_sizes: Vec, - - /// Actions to perform after initializing the buddy allocator. - actions: Vec, -} - -impl Scenario { - /// Generates a random scenario. - pub fn any() -> impl Strategy { - ( - prop::collection::vec(1usize..1 << (SIZE_CLASS_COUNT + 2), 0..4), - prop::collection::vec(Action::any(), 0..64), - ) - .prop_map(|(range_sizes, actions)| Scenario { - range_sizes, - actions, - }) - } - - /// Runs the scenario. - pub fn run(&self, allow_errors: bool) { - // Allocate each of the page ranges. - let backing_mem = self - .range_sizes - .iter() - .map(|&size| { - Mmap::new(NonZero::new(size * PAGE_SIZE).unwrap()) - .expect("failed to allocate memory") - }) - .collect::>(); - - // Create the free list allocator and move the pages there. - let mut free_list: FreeListAllocator = FreeListAllocator::new(); - for (&size, mmap) in self.range_sizes.iter().zip(backing_mem.iter()) { - unsafe { - free_list.add(mmap.ptr(), size); - } - } - - // Create the buddy allocator and move the pages from the free list to there. - match BuddyAllocator::::new(free_list) { - Ok(mut buddy) => { - let mut allocs = Vec::new(); - - // Run each of the actions. - for action in &self.actions { - action.run(&mut buddy, &mut allocs, allow_errors); - - // Check each allocation. - for alloc in &allocs { - alloc.check_sentinel(); - } - } - } - Err(err) => { - if !allow_errors { - Err(err).expect("failed to make buddy allocator") - } - } - } - } -} - -/// Information about an allocation we've made from the buddy allocator. -struct Alloc<'alloc> { - slice: &'alloc mut [u8], - sentinel_value: u8, -} - -impl<'alloc> Alloc<'alloc> { - pub fn check_sentinel(&self) { - let s = self.sentinel_value; - for (i, &b) in self.slice.iter().enumerate() { - assert_eq!(b, s, "at index {i}, {b} != {s}",); - } - } -} - -impl<'alloc> fmt::Debug for Alloc<'alloc> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("Alloc") - .field("ptr", &self.slice.as_ptr()) - .field("len", &self.slice.len()) - .field("sentinel_value", &self.sentinel_value) - .finish() - } -} - -/// A mmap-allocated region of memory. -struct Mmap { - ptr: NonNull, - len: NonZero, -} - -impl Mmap { - pub fn new(len: NonZero) -> Result { - let ptr = unsafe { - mmap_anonymous( - None, - len, - ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, - MapFlags::MAP_PRIVATE, - )? - .cast() - }; - Ok(Mmap { ptr, len }) - } - - pub fn ptr(&self) -> NonNull { - self.ptr.cast() - } -} - -impl Drop for Mmap { - fn drop(&mut self) { - unsafe { - munmap(self.ptr(), self.len.get()).expect("failed to free memory"); - } - } -} diff --git a/crates/physmem_free_list/Cargo.toml b/crates/physmem_free_list/Cargo.toml deleted file mode 100644 index c1aadd8..0000000 --- a/crates/physmem_free_list/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "vernos_physmem_free_list" -version = "0.1.0" -edition = "2021" -publish = false - -[dependencies] -allocator-api2 = { version = "0.2.18", default-features = false } -contracts = { version = "0.6.3", default-features = false } diff --git a/crates/physmem_free_list/src/lib.rs b/crates/physmem_free_list/src/lib.rs deleted file mode 100644 index f99b30f..0000000 --- a/crates/physmem_free_list/src/lib.rs +++ /dev/null @@ -1,389 +0,0 @@ -//! A free-list allocator for physical memory ranges. This is only used when bootstrapping the -//! buddy allocator, since it provides a way to store and manipulate physical memory ranges without -//! needing a working allocator. -#![no_std] - -use allocator_api2::alloc::{AllocError, Layout}; -use contracts::requires; -use core::{fmt, iter, marker::PhantomData, mem::size_of, ptr::NonNull, slice}; - -/// The free-list allocator. -pub struct FreeListAllocator<'allocator, const PAGE_SIZE: usize> { - head: Option>, -} - -impl<'allocator, const PAGE_SIZE: usize> FreeListAllocator<'allocator, PAGE_SIZE> { - /// Creates a new, empty allocator. - pub fn new() -> FreeListAllocator<'allocator, PAGE_SIZE> { - assert!(size_of::>() <= PAGE_SIZE); - - FreeListAllocator { head: None } - } - - /// Pushes a page range into the free list. - /// - /// # Safety - /// - /// - `ptr` must be a pointer that conveys ownership over the page range. - /// - The page range must be untyped memory. - /// - The page range must be aligned to `PAGE_SIZE`. - /// - The page range must live for `'allocator`. - /// - `len_pages` must be accurate. - pub unsafe fn add(&mut self, ptr: NonNull<[u8; PAGE_SIZE]>, len_pages: usize) { - self.head = Some(FreeListPtr::new(ptr, len_pages, self.head.take())); - } - - /// Allocates memory, *without necessarily removing it from the allocator*. - /// - /// The memory will only be removed from the allocator if there is a node from which no - /// additional pages would be able to be allocated into. If the node is not removed, the - /// returned pointer will not point into the first page, so it will not be altered by metadata - /// updates. - /// - /// Obviously, this is unsound to call while the previous allocation is alive. It is also - /// unsound to split any nodes after calling this, since splitting the node that was allocated - /// into might write the new node's metadata into that allocation. - pub unsafe fn allocate_without_always_removing_the_node( - &mut self, - layout: Layout, - ) -> Result, AllocError> { - assert!(PAGE_SIZE.is_power_of_two()); - let len_pages = (layout.size() + PAGE_SIZE - 1) >> PAGE_SIZE.trailing_zeros(); - - // Find the size of the smallest page range that would fit this. - let Some(selected_node_len) = self - .iter() - .map(|(_, range_len_pages)| range_len_pages) - .filter(|&range_len_pages| len_pages <= range_len_pages) - .min() - else { - return Err(AllocError); - }; - - if selected_node_len == len_pages { - // The less-scary case is when we're removing an entire node; we're just aiming to find - // it and unlink it. - - // If the node we're looking for is the first one, slightly different code is used to - // unlink it. - let (ptr, unlinked_len_pages) = if self - .head - .as_ref() - .map(|node| node.len_pages() == len_pages) - .unwrap() - { - // In that case, just unlink the node and hand it back. - // - // UNWRAP: The list can't have been empty or .min() above would've returned None. - self.pop().unwrap() - } else { - // Otherwise, we need to iterate through the list to find the node before it. - let mut next = self.head.as_mut(); - let mut node_before_node_to_remove = None; - while let Some(ptr) = next { - if let Some(next_header) = &ptr.header().next { - if next_header.len_pages() == selected_node_len { - node_before_node_to_remove = Some(ptr); - break; - } - } - next = ptr.next_mut(); - } - - // UNWRAP: We had to find the node in the first place, so it must be in the list. - node_before_node_to_remove.unwrap().unlink_next().unwrap() - }; - - assert_eq!(len_pages, unlinked_len_pages); - Ok(NonNull::slice_from_raw_parts( - ptr.cast(), - len_pages * PAGE_SIZE, - )) - } else { - // The scary case is when we're trying to give back memory that's part of a different - // node. First, we need to iterate through the list to find the node. - let mut next = self.head.as_mut(); - let mut node_to_steal_from = None; - while let Some(ptr) = next { - if ptr.len_pages() == selected_node_len { - node_to_steal_from = Some(ptr); - break; - } - next = ptr.next_mut(); - } - - // UNWRAP: We had to find the node in the first place, so it must be in the list. - let node_to_steal_from = node_to_steal_from.unwrap(); - - // Then, we can simply do the address arithmetic to get a pointer out with the right - // bounds. - // - // SAFETY: The node had sufficient length to add `selected_node_len`, so it must have - // sufficient length to add a lesser quantity. - let ptr = unsafe { node_to_steal_from.ptr().add(selected_node_len - len_pages) }; - - Ok(NonNull::slice_from_raw_parts( - ptr.cast(), - len_pages * PAGE_SIZE, - )) - } - } - - /// Returns an iterator over the page ranges in the free list, as pairs of `ptr` and - /// `len_pages`. The returned pointers should not be accessed. - pub fn iter(&self) -> impl '_ + Iterator, usize)> { - let mut next: Option<&_> = self.head.as_ref(); - iter::from_fn(move || match next { - Some(ptr) => { - let out = (ptr.ptr(), ptr.len_pages()); - next = ptr.next(); - Some(out) - } - None => None, - }) - } - - /// Pops the first page range from the allocator and returns it, along with its length in pages. - pub fn pop(&mut self) -> Option<(NonNull<[u8; PAGE_SIZE]>, usize)> { - let node = self.head.take()?; - let (ptr, header) = node.consume(); - self.head = header.next; - Some((ptr, header.len_pages)) - } - - /// Splits all the nodes until each page range is of a power-of-two size no larger than - /// `max_len_pages`. - pub fn split_to_powers_of_two_no_larger_than(&mut self, max_len_pages: usize) { - let mut next = self.head.as_mut(); - while let Some(ptr) = next { - while ptr.len_pages() > max_len_pages { - ptr.split(max_len_pages); - } - - while !ptr.len_pages().is_power_of_two() { - let log2_smaller_size = ptr.len_pages().trailing_zeros(); - let smaller_size = 1 << log2_smaller_size; - ptr.split(smaller_size); - } - - next = ptr.next_mut(); - } - } -} - -impl<'allocator, const PAGE_SIZE: usize> fmt::Debug for FreeListAllocator<'allocator, PAGE_SIZE> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let mut next: Option<&_> = self.head.as_ref(); - fmt.debug_list() - .entries(iter::from_fn(|| match next { - Some(ptr) => { - next = ptr.next(); - Some(ptr) - } - None => None, - })) - .finish() - } -} - -/// A pointer to a page range, which start with a header in the first page. -/// -/// # Safety -/// -/// - `ptr` must be a pointer that conveys ownership over the page range. -/// - The page range must be untyped memory. -/// - The page range must be non-empty. -/// - The first page must have a correct header written to it. -/// - The page range must live for `'allocator`. -struct FreeListPtr<'allocator, const PAGE_SIZE: usize> { - ptr: NonNull>, - _phantom: PhantomData<&'allocator FreeListNodeHeader<'allocator, PAGE_SIZE>>, -} - -impl<'allocator, const PAGE_SIZE: usize> FreeListPtr<'allocator, PAGE_SIZE> { - /// Initialize the given page range as a node in the free list. - /// - /// # Safety - /// - /// - `ptr` must be a pointer that conveys ownership over the page range. - /// - The page range must be untyped memory. - /// - The page range must be aligned to `PAGE_SIZE`. - /// - The page range must live for `'allocator`. - /// - `len_pages` must be accurate. - #[requires(len_pages > 0)] - pub unsafe fn new( - ptr: NonNull<[u8; PAGE_SIZE]>, - len_pages: usize, - next: Option>, - ) -> FreeListPtr<'allocator, PAGE_SIZE> { - assert!(size_of::>() <= PAGE_SIZE); - - let ptr: NonNull> = ptr.cast(); - // SAFETY: The safety invariants plus the assertion guarantee this write is valid. - unsafe { - ptr.write(FreeListNodeHeader { next, len_pages }); - } - FreeListPtr { - ptr, - _phantom: PhantomData, - } - } - - /// Consumes the node, returning the pointer and header. - pub fn consume( - self, - ) -> ( - NonNull<[u8; PAGE_SIZE]>, - FreeListNodeHeader<'allocator, PAGE_SIZE>, - ) { - // SAFETY: The invariants of the type ensure that the header is present and owned by us. - let header = unsafe { self.ptr.read() }; - (self.ptr.cast(), header) - } - - /// Returns a reference to the header of the page range. - fn header(&self) -> &FreeListNodeHeader<'allocator, PAGE_SIZE> { - // SAFETY: The invariants of the type ensure that the header is present and owned by us. - unsafe { self.ptr.as_ref() } - } - - /// Returns an exclusive reference to the header of the page range. - /// - /// # Safety - /// - /// The invariants of the type must be upheld. - unsafe fn header_mut(&mut self) -> &mut FreeListNodeHeader<'allocator, PAGE_SIZE> { - // SAFETY: The invariants of the type ensure that the header is present and owned by us. - unsafe { self.ptr.as_mut() } - } - - /// Returns the length of the node in pages. - pub fn len_pages(&self) -> usize { - self.header().len_pages - } - - /// Returns a shared reference to the next node in the free list. Returns `None` when there - /// are no further nodes. - pub fn next(&self) -> Option<&FreeListPtr<'allocator, PAGE_SIZE>> { - self.header().next.as_ref() - } - - /// Returns an exclusive reference to the next node in the free list. Returns `None` when there - /// are no further nodes. - pub fn next_mut(&mut self) -> Option<&mut FreeListPtr<'allocator, PAGE_SIZE>> { - // SAFETY: We just lend out the next pointer, which can't violate our invariants. - let header = unsafe { self.header_mut() }; - header.next.as_mut() - } - - /// Returns the underlying pointer. Using this may violate invariants. - pub fn ptr(&self) -> NonNull<[u8; PAGE_SIZE]> { - self.ptr.cast() - } - - /// Splits the page range in two, with the second having the given length in pages. - #[requires(split_len < self.len_pages())] - pub fn split(&mut self, split_len: usize) { - // Get the old header. After this point, `self.ptr` is considered uninitialized, since - // we've moved out of it, so we can't panic. - // - // SAFETY: The invariants of the type ensure that the header is present and owned by us. - let old_header = unsafe { self.ptr.read() }; - - // We call the two new nodes `a` and `b`. - let a_len = old_header.len_pages - split_len; - let b_len = split_len; - // SAFETY: The invariants of the type ensure this is still in-bounds, since - // the header must have been valid and `a_len <= old_header.len_pages`. - let b_ptr: NonNull<[u8; PAGE_SIZE]> = unsafe { self.ptr().add(a_len) }; - - // Construct a new node for `b`. - // - // SAFETY: The safety invariants are simply inherited from the memory being valid in the - // first place, except for the `len_pages` invariant. This is upheld because the length - // math is correct. - // - // The assertion panic in `FreeListPtr::new()` must be unreachable, or it would have been - // triggered when constructing this type in the first place. - let b = unsafe { FreeListPtr::new(b_ptr, b_len, old_header.next) }; - - // Reinitialize the header for `a`. After this point, `self.ptr` is considered initialized - // again, so we may panic again. - // - // SAFETY: The safety invariants are simply inherited from the memory being valid in the - // first place, except for the `len_pages` invariant. This is upheld because the length - // math is correct. - unsafe { - self.ptr.write(FreeListNodeHeader { - next: Some(b), - len_pages: a_len, - }); - } - } - - /// Unlinks and returns the _next_ node, returning its pointer and length in pages. - pub fn unlink_next(&mut self) -> Option<(NonNull<[u8; PAGE_SIZE]>, usize)> { - // SAFETY: We uphold the invariants. - let header = unsafe { self.header_mut() }; - let next = header.next.take()?; - let (next_ptr, next_header) = next.consume(); - header.next = next_header.next; - Some((next_ptr, next_header.len_pages)) - } -} - -impl<'allocator, const PAGE_SIZE: usize> fmt::Debug for FreeListPtr<'allocator, PAGE_SIZE> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "[{:p} × {}]", self.ptr, self.len_pages()) - } -} - -#[derive(Debug)] -struct FreeListNodeHeader<'allocator, const PAGE_SIZE: usize> { - /// The next node in the linked list. - next: Option>, - - /// The length of the allocation in pages. - len_pages: usize, -} - -/// Returns an iterator over power-of-two-sized subslices of this slice, which are returned from -/// smallest to largest. -pub fn power_of_two_subslices_mut(slice: &mut [T]) -> impl Iterator { - // Decompose the slice to its raw parts. Yep, we're gonna be doing unsafe stuff. - let mut ptr = slice.as_mut_ptr(); - let mut len = slice.len(); - - iter::from_fn(move || { - if len == 0 { - None - } else { - // Make the slice to return. - // - // SAFETY: The pointer and length must be valid, since we got them from the valid - // slice. Before returning `out`, we advance them, so that we never give out multiple - // exclusive references to the same subslice. - let out_len = 1 << len.trailing_zeros(); - let out = unsafe { slice::from_raw_parts_mut(ptr, out_len) }; - - // Advance the pointer and length. - // - // SAFETY: out_len < len, and we got len from an existing slice. - ptr = unsafe { ptr.add(out_len) }; - len -= out_len; - - Some(out) - } - }) -} - -#[test] -fn power_of_two_subslices_mut_test() { - extern crate std; - use std::vec::Vec; - - let mut data = [0, 1, 2, 3, 4, 5, 6, 7, 8]; - let subslices = power_of_two_subslices_mut(&mut data).collect::>(); - - assert_eq!(subslices, &[&[0] as &[_], &[1, 2, 3, 4, 5, 6, 7, 8]]); -} -- cgit v1.2.3