diff options
Diffstat (limited to 'crates/alloc_physmem_free_list/src')
-rw-r--r-- | crates/alloc_physmem_free_list/src/lib.rs | 389 |
1 files changed, 389 insertions, 0 deletions
diff --git a/crates/alloc_physmem_free_list/src/lib.rs b/crates/alloc_physmem_free_list/src/lib.rs new file mode 100644 index 0000000..f99b30f --- /dev/null +++ b/crates/alloc_physmem_free_list/src/lib.rs @@ -0,0 +1,389 @@ +//! A free-list allocator for physical memory ranges. This is only used when bootstrapping the +//! buddy allocator, since it provides a way to store and manipulate physical memory ranges without +//! needing a working allocator. +#![no_std] + +use allocator_api2::alloc::{AllocError, Layout}; +use contracts::requires; +use core::{fmt, iter, marker::PhantomData, mem::size_of, ptr::NonNull, slice}; + +/// The free-list allocator. +pub struct FreeListAllocator<'allocator, const PAGE_SIZE: usize> { + head: Option<FreeListPtr<'allocator, PAGE_SIZE>>, +} + +impl<'allocator, const PAGE_SIZE: usize> FreeListAllocator<'allocator, PAGE_SIZE> { + /// Creates a new, empty allocator. + pub fn new() -> FreeListAllocator<'allocator, PAGE_SIZE> { + assert!(size_of::<FreeListNodeHeader<'allocator, PAGE_SIZE>>() <= PAGE_SIZE); + + FreeListAllocator { head: None } + } + + /// Pushes a page range into the free list. + /// + /// # Safety + /// + /// - `ptr` must be a pointer that conveys ownership over the page range. + /// - The page range must be untyped memory. + /// - The page range must be aligned to `PAGE_SIZE`. + /// - The page range must live for `'allocator`. + /// - `len_pages` must be accurate. + pub unsafe fn add(&mut self, ptr: NonNull<[u8; PAGE_SIZE]>, len_pages: usize) { + self.head = Some(FreeListPtr::new(ptr, len_pages, self.head.take())); + } + + /// Allocates memory, *without necessarily removing it from the allocator*. + /// + /// The memory will only be removed from the allocator if there is a node from which no + /// additional pages would be able to be allocated into. If the node is not removed, the + /// returned pointer will not point into the first page, so it will not be altered by metadata + /// updates. + /// + /// Obviously, this is unsound to call while the previous allocation is alive. It is also + /// unsound to split any nodes after calling this, since splitting the node that was allocated + /// into might write the new node's metadata into that allocation. + pub unsafe fn allocate_without_always_removing_the_node( + &mut self, + layout: Layout, + ) -> Result<NonNull<[u8]>, AllocError> { + assert!(PAGE_SIZE.is_power_of_two()); + let len_pages = (layout.size() + PAGE_SIZE - 1) >> PAGE_SIZE.trailing_zeros(); + + // Find the size of the smallest page range that would fit this. + let Some(selected_node_len) = self + .iter() + .map(|(_, range_len_pages)| range_len_pages) + .filter(|&range_len_pages| len_pages <= range_len_pages) + .min() + else { + return Err(AllocError); + }; + + if selected_node_len == len_pages { + // The less-scary case is when we're removing an entire node; we're just aiming to find + // it and unlink it. + + // If the node we're looking for is the first one, slightly different code is used to + // unlink it. + let (ptr, unlinked_len_pages) = if self + .head + .as_ref() + .map(|node| node.len_pages() == len_pages) + .unwrap() + { + // In that case, just unlink the node and hand it back. + // + // UNWRAP: The list can't have been empty or .min() above would've returned None. + self.pop().unwrap() + } else { + // Otherwise, we need to iterate through the list to find the node before it. + let mut next = self.head.as_mut(); + let mut node_before_node_to_remove = None; + while let Some(ptr) = next { + if let Some(next_header) = &ptr.header().next { + if next_header.len_pages() == selected_node_len { + node_before_node_to_remove = Some(ptr); + break; + } + } + next = ptr.next_mut(); + } + + // UNWRAP: We had to find the node in the first place, so it must be in the list. + node_before_node_to_remove.unwrap().unlink_next().unwrap() + }; + + assert_eq!(len_pages, unlinked_len_pages); + Ok(NonNull::slice_from_raw_parts( + ptr.cast(), + len_pages * PAGE_SIZE, + )) + } else { + // The scary case is when we're trying to give back memory that's part of a different + // node. First, we need to iterate through the list to find the node. + let mut next = self.head.as_mut(); + let mut node_to_steal_from = None; + while let Some(ptr) = next { + if ptr.len_pages() == selected_node_len { + node_to_steal_from = Some(ptr); + break; + } + next = ptr.next_mut(); + } + + // UNWRAP: We had to find the node in the first place, so it must be in the list. + let node_to_steal_from = node_to_steal_from.unwrap(); + + // Then, we can simply do the address arithmetic to get a pointer out with the right + // bounds. + // + // SAFETY: The node had sufficient length to add `selected_node_len`, so it must have + // sufficient length to add a lesser quantity. + let ptr = unsafe { node_to_steal_from.ptr().add(selected_node_len - len_pages) }; + + Ok(NonNull::slice_from_raw_parts( + ptr.cast(), + len_pages * PAGE_SIZE, + )) + } + } + + /// Returns an iterator over the page ranges in the free list, as pairs of `ptr` and + /// `len_pages`. The returned pointers should not be accessed. + pub fn iter(&self) -> impl '_ + Iterator<Item = (NonNull<[u8; PAGE_SIZE]>, usize)> { + let mut next: Option<&_> = self.head.as_ref(); + iter::from_fn(move || match next { + Some(ptr) => { + let out = (ptr.ptr(), ptr.len_pages()); + next = ptr.next(); + Some(out) + } + None => None, + }) + } + + /// Pops the first page range from the allocator and returns it, along with its length in pages. + pub fn pop(&mut self) -> Option<(NonNull<[u8; PAGE_SIZE]>, usize)> { + let node = self.head.take()?; + let (ptr, header) = node.consume(); + self.head = header.next; + Some((ptr, header.len_pages)) + } + + /// Splits all the nodes until each page range is of a power-of-two size no larger than + /// `max_len_pages`. + pub fn split_to_powers_of_two_no_larger_than(&mut self, max_len_pages: usize) { + let mut next = self.head.as_mut(); + while let Some(ptr) = next { + while ptr.len_pages() > max_len_pages { + ptr.split(max_len_pages); + } + + while !ptr.len_pages().is_power_of_two() { + let log2_smaller_size = ptr.len_pages().trailing_zeros(); + let smaller_size = 1 << log2_smaller_size; + ptr.split(smaller_size); + } + + next = ptr.next_mut(); + } + } +} + +impl<'allocator, const PAGE_SIZE: usize> fmt::Debug for FreeListAllocator<'allocator, PAGE_SIZE> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let mut next: Option<&_> = self.head.as_ref(); + fmt.debug_list() + .entries(iter::from_fn(|| match next { + Some(ptr) => { + next = ptr.next(); + Some(ptr) + } + None => None, + })) + .finish() + } +} + +/// A pointer to a page range, which start with a header in the first page. +/// +/// # Safety +/// +/// - `ptr` must be a pointer that conveys ownership over the page range. +/// - The page range must be untyped memory. +/// - The page range must be non-empty. +/// - The first page must have a correct header written to it. +/// - The page range must live for `'allocator`. +struct FreeListPtr<'allocator, const PAGE_SIZE: usize> { + ptr: NonNull<FreeListNodeHeader<'allocator, PAGE_SIZE>>, + _phantom: PhantomData<&'allocator FreeListNodeHeader<'allocator, PAGE_SIZE>>, +} + +impl<'allocator, const PAGE_SIZE: usize> FreeListPtr<'allocator, PAGE_SIZE> { + /// Initialize the given page range as a node in the free list. + /// + /// # Safety + /// + /// - `ptr` must be a pointer that conveys ownership over the page range. + /// - The page range must be untyped memory. + /// - The page range must be aligned to `PAGE_SIZE`. + /// - The page range must live for `'allocator`. + /// - `len_pages` must be accurate. + #[requires(len_pages > 0)] + pub unsafe fn new( + ptr: NonNull<[u8; PAGE_SIZE]>, + len_pages: usize, + next: Option<FreeListPtr<'allocator, PAGE_SIZE>>, + ) -> FreeListPtr<'allocator, PAGE_SIZE> { + assert!(size_of::<FreeListNodeHeader<'allocator, PAGE_SIZE>>() <= PAGE_SIZE); + + let ptr: NonNull<FreeListNodeHeader<'allocator, PAGE_SIZE>> = ptr.cast(); + // SAFETY: The safety invariants plus the assertion guarantee this write is valid. + unsafe { + ptr.write(FreeListNodeHeader { next, len_pages }); + } + FreeListPtr { + ptr, + _phantom: PhantomData, + } + } + + /// Consumes the node, returning the pointer and header. + pub fn consume( + self, + ) -> ( + NonNull<[u8; PAGE_SIZE]>, + FreeListNodeHeader<'allocator, PAGE_SIZE>, + ) { + // SAFETY: The invariants of the type ensure that the header is present and owned by us. + let header = unsafe { self.ptr.read() }; + (self.ptr.cast(), header) + } + + /// Returns a reference to the header of the page range. + fn header(&self) -> &FreeListNodeHeader<'allocator, PAGE_SIZE> { + // SAFETY: The invariants of the type ensure that the header is present and owned by us. + unsafe { self.ptr.as_ref() } + } + + /// Returns an exclusive reference to the header of the page range. + /// + /// # Safety + /// + /// The invariants of the type must be upheld. + unsafe fn header_mut(&mut self) -> &mut FreeListNodeHeader<'allocator, PAGE_SIZE> { + // SAFETY: The invariants of the type ensure that the header is present and owned by us. + unsafe { self.ptr.as_mut() } + } + + /// Returns the length of the node in pages. + pub fn len_pages(&self) -> usize { + self.header().len_pages + } + + /// Returns a shared reference to the next node in the free list. Returns `None` when there + /// are no further nodes. + pub fn next(&self) -> Option<&FreeListPtr<'allocator, PAGE_SIZE>> { + self.header().next.as_ref() + } + + /// Returns an exclusive reference to the next node in the free list. Returns `None` when there + /// are no further nodes. + pub fn next_mut(&mut self) -> Option<&mut FreeListPtr<'allocator, PAGE_SIZE>> { + // SAFETY: We just lend out the next pointer, which can't violate our invariants. + let header = unsafe { self.header_mut() }; + header.next.as_mut() + } + + /// Returns the underlying pointer. Using this may violate invariants. + pub fn ptr(&self) -> NonNull<[u8; PAGE_SIZE]> { + self.ptr.cast() + } + + /// Splits the page range in two, with the second having the given length in pages. + #[requires(split_len < self.len_pages())] + pub fn split(&mut self, split_len: usize) { + // Get the old header. After this point, `self.ptr` is considered uninitialized, since + // we've moved out of it, so we can't panic. + // + // SAFETY: The invariants of the type ensure that the header is present and owned by us. + let old_header = unsafe { self.ptr.read() }; + + // We call the two new nodes `a` and `b`. + let a_len = old_header.len_pages - split_len; + let b_len = split_len; + // SAFETY: The invariants of the type ensure this is still in-bounds, since + // the header must have been valid and `a_len <= old_header.len_pages`. + let b_ptr: NonNull<[u8; PAGE_SIZE]> = unsafe { self.ptr().add(a_len) }; + + // Construct a new node for `b`. + // + // SAFETY: The safety invariants are simply inherited from the memory being valid in the + // first place, except for the `len_pages` invariant. This is upheld because the length + // math is correct. + // + // The assertion panic in `FreeListPtr::new()` must be unreachable, or it would have been + // triggered when constructing this type in the first place. + let b = unsafe { FreeListPtr::new(b_ptr, b_len, old_header.next) }; + + // Reinitialize the header for `a`. After this point, `self.ptr` is considered initialized + // again, so we may panic again. + // + // SAFETY: The safety invariants are simply inherited from the memory being valid in the + // first place, except for the `len_pages` invariant. This is upheld because the length + // math is correct. + unsafe { + self.ptr.write(FreeListNodeHeader { + next: Some(b), + len_pages: a_len, + }); + } + } + + /// Unlinks and returns the _next_ node, returning its pointer and length in pages. + pub fn unlink_next(&mut self) -> Option<(NonNull<[u8; PAGE_SIZE]>, usize)> { + // SAFETY: We uphold the invariants. + let header = unsafe { self.header_mut() }; + let next = header.next.take()?; + let (next_ptr, next_header) = next.consume(); + header.next = next_header.next; + Some((next_ptr, next_header.len_pages)) + } +} + +impl<'allocator, const PAGE_SIZE: usize> fmt::Debug for FreeListPtr<'allocator, PAGE_SIZE> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "[{:p} × {}]", self.ptr, self.len_pages()) + } +} + +#[derive(Debug)] +struct FreeListNodeHeader<'allocator, const PAGE_SIZE: usize> { + /// The next node in the linked list. + next: Option<FreeListPtr<'allocator, PAGE_SIZE>>, + + /// The length of the allocation in pages. + len_pages: usize, +} + +/// Returns an iterator over power-of-two-sized subslices of this slice, which are returned from +/// smallest to largest. +pub fn power_of_two_subslices_mut<T>(slice: &mut [T]) -> impl Iterator<Item = &mut [T]> { + // Decompose the slice to its raw parts. Yep, we're gonna be doing unsafe stuff. + let mut ptr = slice.as_mut_ptr(); + let mut len = slice.len(); + + iter::from_fn(move || { + if len == 0 { + None + } else { + // Make the slice to return. + // + // SAFETY: The pointer and length must be valid, since we got them from the valid + // slice. Before returning `out`, we advance them, so that we never give out multiple + // exclusive references to the same subslice. + let out_len = 1 << len.trailing_zeros(); + let out = unsafe { slice::from_raw_parts_mut(ptr, out_len) }; + + // Advance the pointer and length. + // + // SAFETY: out_len < len, and we got len from an existing slice. + ptr = unsafe { ptr.add(out_len) }; + len -= out_len; + + Some(out) + } + }) +} + +#[test] +fn power_of_two_subslices_mut_test() { + extern crate std; + use std::vec::Vec; + + let mut data = [0, 1, 2, 3, 4, 5, 6, 7, 8]; + let subslices = power_of_two_subslices_mut(&mut data).collect::<Vec<_>>(); + + assert_eq!(subslices, &[&[0] as &[_], &[1, 2, 3, 4, 5, 6, 7, 8]]); +} |