summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--crates/buddy_allocator/src/free_list.rs69
-rw-r--r--crates/buddy_allocator/src/lib.rs144
-rw-r--r--crates/buddy_allocator/src/tree.rs6
-rw-r--r--crates/buddy_allocator/tests/hosted_test.rs16
-rw-r--r--crates/physmem_free_list/src/lib.rs153
-rw-r--r--crates/utils/src/pin.rs7
6 files changed, 281 insertions, 114 deletions
diff --git a/crates/buddy_allocator/src/free_list.rs b/crates/buddy_allocator/src/free_list.rs
index 5a633c3..2e3afe2 100644
--- a/crates/buddy_allocator/src/free_list.rs
+++ b/crates/buddy_allocator/src/free_list.rs
@@ -1,6 +1,11 @@
//! A doubly-linked list of power-of-two-sized subregions.
-use core::{marker::PhantomPinned, pin::Pin, ptr};
+use core::{
+ fmt,
+ marker::PhantomPinned,
+ pin::Pin,
+ ptr::{self, NonNull},
+};
/// The magic number a non-sentinel node should have.
const FREE_NODE_MAGIC: u64 = u64::from_ne_bytes(*b"FREENODE");
@@ -11,21 +16,51 @@ const SENTINEL_MAGIC: u64 = u64::from_ne_bytes(*b"SENTINEL");
/// A doubly-linked list of power-of-two-sized subregions.
///
/// This type is valid when zero-initialized.
-#[derive(Debug)]
+///
+/// # Safety
+///
+/// - This type is valid only if it is zero-initialized or if it contains a valid doubly-linked
+/// list of subregions of the correct size, which it owns.
pub struct FreeList {
sentinel: FreeListNode,
}
impl FreeList {
+ /// Pushes a subregion into the free-list.
+ ///
+ /// # Safety
+ ///
+ /// - `subregion_ptr` must convey ownership over untyped memory of the correct size.
+ pub unsafe fn push(mut self: Pin<&mut FreeList>, subregion_ptr: NonNull<u8>) {
+ assert_ne!(self.sentinel.next, ptr::null_mut());
+ assert_ne!(self.sentinel.prev, ptr::null_mut());
+
+ // Write the header the node should have to the node.
+ let sentinel = &self.sentinel as *const FreeListNode as *mut FreeListNode;
+ let next = *self.as_mut().sentinel().next_mut();
+ let node = subregion_ptr.cast();
+ node.write(FreeListNode {
+ next,
+ prev: sentinel,
+ magic: FREE_NODE_MAGIC,
+ _phantom: PhantomPinned,
+ });
+
+ // Link the node into the list.
+ *self.sentinel().next_mut() = node.as_ptr();
+ (*next).prev = node.as_ptr();
+ }
+
/// Performs self-linking. This must be done before the free list is used in any other way.
- pub fn self_link(mut self: Pin<&mut FreeList>) {
+ pub fn self_link(self: Pin<&mut FreeList>) {
assert_eq!(self.sentinel.next, ptr::null_mut());
assert_eq!(self.sentinel.prev, ptr::null_mut());
- let self_ptr = &self.sentinel as *const FreeListNode as *mut FreeListNode;
let mut sentinel = self.sentinel_unchecked();
- *sentinel.as_mut().next_mut() = self_ptr;
- *sentinel.as_mut().prev_mut() = self_ptr;
+ // SAFETY: We do not use this pointer to move out of the sentinel.
+ let sentinel_ptr = unsafe { sentinel.as_mut().get_unchecked_mut() as *mut FreeListNode };
+ *sentinel.as_mut().next_mut() = sentinel_ptr;
+ *sentinel.as_mut().prev_mut() = sentinel_ptr;
*sentinel.as_mut().magic_mut() = SENTINEL_MAGIC;
}
@@ -43,6 +78,28 @@ impl FreeList {
}
}
+/// This impl will panic until the sentinel has been self-linked.
+impl fmt::Debug for FreeList {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ assert_ne!(self.sentinel.next, ptr::null_mut());
+ assert_ne!(self.sentinel.prev, ptr::null_mut());
+
+ let sentinel = &self.sentinel as *const FreeListNode;
+
+ // SAFETY: The invariants of FreeList guarantee that the list is valid. In a valid list,
+ // the next pointer is always valid and non-null.
+ let mut ptr = unsafe { sentinel.as_ref().unwrap().next as *const FreeListNode };
+ let mut fmt = fmt.debug_list();
+ while ptr != sentinel {
+ fmt.entry(&ptr);
+ // SAFETY: The invariants of FreeList guarantee that the list is valid. In a valid
+ // list, the next pointer is always valid and non-null.
+ ptr = unsafe { ptr.as_ref().unwrap().next as *const FreeListNode };
+ }
+ fmt.finish()
+ }
+}
+
#[derive(Debug)]
struct FreeListNode {
next: *mut FreeListNode,
diff --git a/crates/buddy_allocator/src/lib.rs b/crates/buddy_allocator/src/lib.rs
index fa86de7..441c77f 100644
--- a/crates/buddy_allocator/src/lib.rs
+++ b/crates/buddy_allocator/src/lib.rs
@@ -42,6 +42,8 @@
//! facilitate this, the trees are stored in an array, which forms the overall allocator.
#![no_std]
+extern crate std; // TODO: Remove me
+
mod bitvec;
mod free_list;
mod tree;
@@ -53,7 +55,7 @@ use allocator_api2::alloc::{AllocError, Layout, LayoutError};
use contracts::requires;
use core::{mem, pin::Pin, ptr::NonNull};
use vernos_physmem_free_list::FreeListAllocator;
-use vernos_utils::pin::pin_project_slice_mut_iter;
+use vernos_utils::pin::{pin_project_slice_mut, pin_project_slice_mut_iter};
/// A buddy allocator.
#[derive(Debug)]
@@ -82,37 +84,53 @@ impl<
{
/// Returns a buddy allocator backed by the page ranges in the free list.
pub fn new(
- mut free_list: FreeListAllocator<'allocator, PAGE_SIZE>,
+ mut free_list_alloc: FreeListAllocator<'allocator, PAGE_SIZE>,
) -> Result<BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>, AllocError>
{
assert_eq!(PAGE_SIZE, 1 << PAGE_SIZE_BITS);
assert_ne!(SIZE_CLASS_COUNT, 0);
+ // We split up all the nodes into size-class-sized chunks here. After this, it's important
+ // for safety that we never split a node again -- we'll see why later.
+ free_list_alloc.split_to_powers_of_two_no_larger_than(1 << (SIZE_CLASS_COUNT - 1));
+
// We use one contiguous region to store the free lists and all the information about each
// tree (the base, the maximum order, and the bitset). We do one pass through the free list
- // to find out how much storage we need, then we try to find a convenient spot to prune the
+ // to find out how much storage we need, then we try to find a convenient spot to steal the
// storage we need from a page range in the list.
let mut range_count = 0;
let mut size_class_counts = [0; SIZE_CLASS_COUNT];
- for (_, mut len_pages) in free_list.iter() {
- while len_pages > 0 {
- let size_class = (len_pages.trailing_zeros() as usize).min(SIZE_CLASS_COUNT - 1);
- range_count += 1;
- size_class_counts[size_class] += 1;
- len_pages -= 1 << size_class;
- }
+ for (_, len_pages) in free_list_alloc.iter() {
+ assert!(len_pages.is_power_of_two());
+ let size_class = len_pages.trailing_zeros() as usize;
+ assert!((0..SIZE_CLASS_COUNT).contains(&size_class));
+ range_count += 1;
+ size_class_counts[size_class] += 1;
}
- // Lay out the metadata. The error here really ought to be impossible, because the pages in
- // the free list needed to fit in there in the first place.
+ // Lay out the metadata.
+ //
+ // UNWRAP: The error here really ought to be impossible, because the pages in the free list
+ // needed to fit in there in the first place.
let metadata_layout = MetadataLayout::<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>::new(
range_count,
size_class_counts,
)
- .map_err(|_| AllocError)?;
+ .unwrap();
// Allocate space for the metadata.
- let metadata = free_list.allocate_zeroed(metadata_layout.layout)?;
+ //
+ // SAFETY: We need to make _absolutely_ sure nothing in the buddy allocator uses this
+ // memory before we have a chance to remove it from the allocator. This function guarantees
+ // that the returned memory won't overlap with the first page of a live node, so as long as
+ // we don't split a node or write past the first page of one, we should be OK.
+ let metadata = unsafe {
+ free_list_alloc.allocate_without_always_removing_the_node(metadata_layout.layout)?
+ };
+ // SAFETY: The pointer must be valid to have been returned here.
+ unsafe {
+ metadata.cast::<u8>().write_bytes(0, metadata.len());
+ }
// Break out each component of the metadata.
//
@@ -144,15 +162,80 @@ impl<
// Pin and self-link each free list. Although they're valid in a UB sense when zeroed,
// they're not yet ready to use until this is done.
//
- // SAFETY: We never provide the ability to move out of a free list.
+ // SAFETY: We never provide the ability to move a free list.
let mut free_lists = unsafe { Pin::new_unchecked(free_lists) };
for free_list in pin_project_slice_mut_iter(free_lists.as_mut()) {
free_list.self_link();
}
- // TODO: Actually initialize the trees with ranges from the allocator...
+ // Initialize the trees, adding subregions into the allocator as we do. We don't actually
+ // assign bitset offsets yet, because we're going to sort the trees by address before we
+ // do. (This isn't algorithmically necessary, but it makes debugging a bit easier.)
+ let mut i = 0;
+ while let Some((ptr, mut len_pages)) = free_list_alloc.pop() {
+ while len_pages > 0 {
+ let size_class = (len_pages.trailing_zeros() as usize).min(SIZE_CLASS_COUNT - 1);
+ trees[i].base_ptr = ptr.cast().as_ptr();
+ trees[i].size_class = size_class;
+ i += 1;
+ len_pages -= 1 << size_class;
+ }
+ }
+ assert!(
+ i == trees.len() || i + 1 == trees.len(),
+ "found {} trees but allocated {} slots",
+ i,
+ trees.len()
+ );
+
+ // Slice the trees slice to fit how many trees actually remained, then sort them by
+ // address.
+ let trees = &mut trees[..i];
+ trees.sort_by_key(|tree| tree.base_ptr as usize);
+
+ // Compute the number of bits that belong to each tree and assign them.
+ let mut bitset_offset = 0;
+ for tree in &mut *trees {
+ tree.bitset_offset = bitset_offset;
+ bitset_offset += (1 << (tree.size_class + 1)) - 1;
+ }
+ assert!(bitset_offset <= metadata_layout.bitset_len_bits);
+
+ // Add regions the trees should own to the free lists, excluding the subregions that
+ // metadata is allocated in.
- // Make the buddy allocator and return.
+ // Since we know that the metadata was allocated at the end of the page range it was
+ // allocated in, we can just test the end addresses.
+ //
+ // SAFETY: This is the one-past-the-end address.
+ let metadata_addr_hi =
+ unsafe { metadata.cast::<u8>().add(metadata.len()).as_ptr() } as usize;
+ for tree in &mut *trees {
+ // SAFETY: This is the one-past-the-end address.
+ let tree_addr_hi = unsafe { tree.base_ptr.add(1 << tree.size_class) } as usize;
+ if metadata_addr_hi == tree_addr_hi {
+ // If the metadata was present inside the tree... TODO.
+ std::eprintln!("TODO");
+ } else {
+ // Otherwise, we can take the whole range of the tree and add it to the free list.
+ //
+ // UNWRAP: The pointer must have been non-null when it was in the free-list
+ // allocator.
+ let ptr = NonNull::new(tree.base_ptr as *mut u8).unwrap();
+ // SAFETY: This region is not owned or borrowed by anything else (the only thing it
+ // could be would be the metadata, but that's not in this tree), is of the correct
+ // size, and is composed of untyped memory.
+ unsafe {
+ pin_project_slice_mut(free_lists.as_mut(), tree.size_class).push(ptr);
+ }
+
+ // Then, set a bit in the bitset to say that this region is present.
+ assert!(!bitset.get(tree.bitset_offset));
+ bitset.set(tree.bitset_offset, true);
+ }
+ }
+
+ // Make the buddy allocator's struct and return it.
Ok(BuddyAllocator {
free_lists,
trees,
@@ -165,6 +248,17 @@ impl<
pub fn alloc(&mut self, size_class: usize) -> Result<NonNull<u8>, AllocError> {
Err(AllocError)
}
+
+ /// Returns a subregion of a particular size class to the allocator.
+ ///
+ /// # Safety
+ ///
+ /// - `ptr` must have been returned by a call to `alloc` with the same `size_class`.
+ /// - `ptr` must convey ownership over the entire region.
+ #[requires((0..SIZE_CLASS_COUNT).contains(&size_class))]
+ pub unsafe fn dealloc(&mut self, ptr: NonNull<u8>, size_class: usize) {
+ todo!()
+ }
}
#[derive(Debug)]
@@ -179,6 +273,7 @@ struct MetadataLayout<
trees_len: usize,
bitset_offset: usize,
bitset_len: usize,
+ bitset_len_bits: usize,
}
impl<const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize, const SIZE_CLASS_COUNT: usize>
@@ -186,19 +281,13 @@ impl<const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize, const SIZE_CLASS_COUNT
{
fn new(
range_count: usize,
- mut size_class_counts: [usize; SIZE_CLASS_COUNT],
+ size_class_counts: [usize; SIZE_CLASS_COUNT],
) -> Result<MetadataLayout<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>, LayoutError> {
let trees_len = range_count;
let mut bitset_len_bits = 0;
- // TODO: Is there a cleaner way to compute this?
- for i in (0..size_class_counts.len()).rev() {
- let count = size_class_counts[i];
- if count != 0 {
- bitset_len_bits += count;
- if i > 0 {
- size_class_counts[i - 1] += count * 2;
- }
- }
+ for (size_class, &count) in size_class_counts.iter().enumerate() {
+ let bits_for_size_class = (1 << (size_class + 1)) - 1;
+ bitset_len_bits += bits_for_size_class * count;
}
let bitset_len = (bitset_len_bits + usize::BITS as usize - 1) / usize::BITS as usize;
@@ -217,6 +306,7 @@ impl<const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize, const SIZE_CLASS_COUNT
trees_len,
bitset_offset,
bitset_len,
+ bitset_len_bits,
})
}
}
diff --git a/crates/buddy_allocator/src/tree.rs b/crates/buddy_allocator/src/tree.rs
index a303d40..1673e26 100644
--- a/crates/buddy_allocator/src/tree.rs
+++ b/crates/buddy_allocator/src/tree.rs
@@ -5,13 +5,13 @@
#[derive(Debug)]
pub struct Tree<'buddy, const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize> {
/// The base address of the tree.
- pub base: *const (),
+ pub base_ptr: *const [u8; PAGE_SIZE],
/// The log2 of the number of pages in the region represented by the tree.
- pub log2_page_count: usize,
+ pub size_class: usize,
/// The offset in the bitset to the bits responsible for this tree's pages.
- bitset_offset: usize,
+ pub bitset_offset: usize,
// /// The bitset used to track whether subregion are allocated or not in this tree.
// pub bitset: &'buddy mut BitVec,
diff --git a/crates/buddy_allocator/tests/hosted_test.rs b/crates/buddy_allocator/tests/hosted_test.rs
index d13895c..a841802 100644
--- a/crates/buddy_allocator/tests/hosted_test.rs
+++ b/crates/buddy_allocator/tests/hosted_test.rs
@@ -1,6 +1,7 @@
use core::{fmt, num::NonZero, slice};
use nix::sys::mman::{mmap_anonymous, munmap, MapFlags, ProtFlags};
use proptest::prelude::*;
+use std::ptr::NonNull;
use vernos_buddy_allocator::BuddyAllocator;
use vernos_physmem_free_list::FreeListAllocator;
@@ -19,7 +20,7 @@ proptest! {
#[test]
fn test_simple_scenario() {
let scenario = Scenario {
- range_sizes: vec![4],
+ range_sizes: vec![16],
actions: vec![
Action::Debug,
Action::Alloc {
@@ -32,7 +33,7 @@ fn test_simple_scenario() {
scenario.run(false)
}
-const PAGE_SIZE: usize = (size_of::<*const ()>() * 4).next_power_of_two();
+const PAGE_SIZE: usize = (size_of::<*const ()>() * 3).next_power_of_two();
const PAGE_SIZE_BITS: usize = PAGE_SIZE.trailing_zeros() as usize;
const SIZE_CLASS_COUNT: usize = 4;
@@ -112,7 +113,7 @@ impl Action {
},
Err(err) => {
if !allow_errors {
- Err(err).unwrap()
+ Err(err).expect("failed to perform alloc action")
}
}
},
@@ -124,7 +125,12 @@ impl Action {
let index = index % allocs.len();
let alloc = allocs.remove(index);
alloc.check_sentinel();
- todo!("{alloc:?}")
+ unsafe {
+ buddy.dealloc(
+ NonNull::from(alloc.slice.as_mut()).cast(),
+ alloc.slice.len().trailing_zeros() as usize - PAGE_SIZE_BITS,
+ );
+ }
}
Action::Debug => {
dbg!(buddy);
@@ -212,7 +218,7 @@ impl Scenario {
}
Err(err) => {
if !allow_errors {
- Err(err).unwrap()
+ Err(err).expect("failed to make buddy allocator")
}
}
}
diff --git a/crates/physmem_free_list/src/lib.rs b/crates/physmem_free_list/src/lib.rs
index cb28362..f99b30f 100644
--- a/crates/physmem_free_list/src/lib.rs
+++ b/crates/physmem_free_list/src/lib.rs
@@ -33,8 +33,20 @@ impl<'allocator, const PAGE_SIZE: usize> FreeListAllocator<'allocator, PAGE_SIZE
self.head = Some(FreeListPtr::new(ptr, len_pages, self.head.take()));
}
- /// Allocates memory. This is like `Allocator::allocate`, but it takes `&mut self`.
- pub fn allocate(&mut self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ /// Allocates memory, *without necessarily removing it from the allocator*.
+ ///
+ /// The memory will only be removed from the allocator if there is a node from which no
+ /// additional pages would be able to be allocated into. If the node is not removed, the
+ /// returned pointer will not point into the first page, so it will not be altered by metadata
+ /// updates.
+ ///
+ /// Obviously, this is unsound to call while the previous allocation is alive. It is also
+ /// unsound to split any nodes after calling this, since splitting the node that was allocated
+ /// into might write the new node's metadata into that allocation.
+ pub unsafe fn allocate_without_always_removing_the_node(
+ &mut self,
+ layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
assert!(PAGE_SIZE.is_power_of_two());
let len_pages = (layout.size() + PAGE_SIZE - 1) >> PAGE_SIZE.trailing_zeros();
@@ -48,33 +60,24 @@ impl<'allocator, const PAGE_SIZE: usize> FreeListAllocator<'allocator, PAGE_SIZE
return Err(AllocError);
};
- // If the node we're looking for is the first one, and we're not planning on splitting it,
- // we have a special case.
- //
- // UNWRAP: The list can't have been empty or .min() above would've returned None.
- if self
- .head
- .as_ref()
- .map(|node| node.len_pages() == len_pages)
- .unwrap()
- {
- // In that case, just unlink the node and hand it back.
- //
- // UNWRAP: The list can't have been empty or .min() above would've returned None.
- let node = self.head.take().unwrap();
- let (ptr, header) = node.consume();
- self.head = header.next;
- Ok(NonNull::slice_from_raw_parts(
- ptr.cast(),
- header.len_pages * PAGE_SIZE,
- ))
- } else {
- // Otherwise, we want to get a node that's directly before a node that has exactly the
- // right length.
- let node_before_node_to_remove = if selected_node_len == len_pages {
- // If the node we're looking for has the same length as the allocation, we just
- // need to iterate through the list to find it. The special case where it's the
- // first element was already handled above.
+ if selected_node_len == len_pages {
+ // The less-scary case is when we're removing an entire node; we're just aiming to find
+ // it and unlink it.
+
+ // If the node we're looking for is the first one, slightly different code is used to
+ // unlink it.
+ let (ptr, unlinked_len_pages) = if self
+ .head
+ .as_ref()
+ .map(|node| node.len_pages() == len_pages)
+ .unwrap()
+ {
+ // In that case, just unlink the node and hand it back.
+ //
+ // UNWRAP: The list can't have been empty or .min() above would've returned None.
+ self.pop().unwrap()
+ } else {
+ // Otherwise, we need to iterate through the list to find the node before it.
let mut next = self.head.as_mut();
let mut node_before_node_to_remove = None;
while let Some(ptr) = next {
@@ -87,50 +90,43 @@ impl<'allocator, const PAGE_SIZE: usize> FreeListAllocator<'allocator, PAGE_SIZE
next = ptr.next_mut();
}
- // UNWRAP: We had to find the node in the first place.
- node_before_node_to_remove.unwrap()
- } else {
- // In the normal case, we'll split the node. First, we iterate to the node we
- // selected.
- let mut next = self.head.as_mut();
- let mut node_to_split = None;
- while let Some(ptr) = next {
- if ptr.len_pages() == selected_node_len {
- node_to_split = Some(ptr);
- break;
- }
- next = ptr.next_mut();
- }
-
- // UNWRAP: We had to find the node in the first place.
- let node_to_split = node_to_split.unwrap();
-
- // Split the node. The part we're going to remove gets inserted after `node_to_split`,
- // so we can just return the node after this.
- node_to_split.split(len_pages);
- node_to_split
+ // UNWRAP: We had to find the node in the first place, so it must be in the list.
+ node_before_node_to_remove.unwrap().unlink_next().unwrap()
};
- // Unlink the next node.
- //
- // UNWRAP: The next node must exist because of the above.
- let (ptr, unlinked_len_pages) = node_before_node_to_remove.unlink_next().unwrap();
assert_eq!(len_pages, unlinked_len_pages);
Ok(NonNull::slice_from_raw_parts(
ptr.cast(),
len_pages * PAGE_SIZE,
))
- }
- }
+ } else {
+ // The scary case is when we're trying to give back memory that's part of a different
+ // node. First, we need to iterate through the list to find the node.
+ let mut next = self.head.as_mut();
+ let mut node_to_steal_from = None;
+ while let Some(ptr) = next {
+ if ptr.len_pages() == selected_node_len {
+ node_to_steal_from = Some(ptr);
+ break;
+ }
+ next = ptr.next_mut();
+ }
- /// Allocates zeroed-out memory.
- pub fn allocate_zeroed(&mut self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
- let ptr = self.allocate(layout)?;
- // SAFETY: The pointer has to have a valid size.
- unsafe {
- ptr.cast::<u8>().write_bytes(0, ptr.len());
+ // UNWRAP: We had to find the node in the first place, so it must be in the list.
+ let node_to_steal_from = node_to_steal_from.unwrap();
+
+ // Then, we can simply do the address arithmetic to get a pointer out with the right
+ // bounds.
+ //
+ // SAFETY: The node had sufficient length to add `selected_node_len`, so it must have
+ // sufficient length to add a lesser quantity.
+ let ptr = unsafe { node_to_steal_from.ptr().add(selected_node_len - len_pages) };
+
+ Ok(NonNull::slice_from_raw_parts(
+ ptr.cast(),
+ len_pages * PAGE_SIZE,
+ ))
}
- Ok(ptr)
}
/// Returns an iterator over the page ranges in the free list, as pairs of `ptr` and
@@ -147,13 +143,28 @@ impl<'allocator, const PAGE_SIZE: usize> FreeListAllocator<'allocator, PAGE_SIZE
})
}
- /// Splits all the nodes until each page range is of a power-of-two size.
- pub fn split_to_powers_of_two(&mut self) {
+ /// Pops the first page range from the allocator and returns it, along with its length in pages.
+ pub fn pop(&mut self) -> Option<(NonNull<[u8; PAGE_SIZE]>, usize)> {
+ let node = self.head.take()?;
+ let (ptr, header) = node.consume();
+ self.head = header.next;
+ Some((ptr, header.len_pages))
+ }
+
+ /// Splits all the nodes until each page range is of a power-of-two size no larger than
+ /// `max_len_pages`.
+ pub fn split_to_powers_of_two_no_larger_than(&mut self, max_len_pages: usize) {
let mut next = self.head.as_mut();
while let Some(ptr) = next {
- // Compute the size the page range would have if it were
- // let log2_smaller_size = ptr.len_pages().trailing_zeros();
- // let smaller_size = 1 << log2_smaller_size;
+ while ptr.len_pages() > max_len_pages {
+ ptr.split(max_len_pages);
+ }
+
+ while !ptr.len_pages().is_power_of_two() {
+ let log2_smaller_size = ptr.len_pages().trailing_zeros();
+ let smaller_size = 1 << log2_smaller_size;
+ ptr.split(smaller_size);
+ }
next = ptr.next_mut();
}
@@ -265,7 +276,7 @@ impl<'allocator, const PAGE_SIZE: usize> FreeListPtr<'allocator, PAGE_SIZE> {
header.next.as_mut()
}
- /// Returns the underlying pointer, which should not be accessed.
+ /// Returns the underlying pointer. Using this may violate invariants.
pub fn ptr(&self) -> NonNull<[u8; PAGE_SIZE]> {
self.ptr.cast()
}
@@ -310,7 +321,7 @@ impl<'allocator, const PAGE_SIZE: usize> FreeListPtr<'allocator, PAGE_SIZE> {
}
}
- /// Unlinks and returns the _next_ node, returning its pointer and length.
+ /// Unlinks and returns the _next_ node, returning its pointer and length in pages.
pub fn unlink_next(&mut self) -> Option<(NonNull<[u8; PAGE_SIZE]>, usize)> {
// SAFETY: We uphold the invariants.
let header = unsafe { self.header_mut() };
diff --git a/crates/utils/src/pin.rs b/crates/utils/src/pin.rs
index 202904d..cdf40fd 100644
--- a/crates/utils/src/pin.rs
+++ b/crates/utils/src/pin.rs
@@ -5,8 +5,11 @@ use core::pin::Pin;
use std::ptr::NonNull;
/// Iterates over projections of elements out of a pinned slice.
-pub fn pin_project_slice_mut_iter<T>(slice: Pin<&mut [T]>) -> impl Iterator<Item = Pin<&mut T>> {
- let base_ptr: NonNull<T> = NonNull::from(slice.as_ref().get_ref()).cast();
+pub fn pin_project_slice_mut_iter<T>(
+ mut slice: Pin<&mut [T]>,
+) -> impl Iterator<Item = Pin<&mut T>> {
+ // SAFETY: We never move out of this pointer.
+ let base_ptr: NonNull<T> = NonNull::from(unsafe { slice.as_mut().get_unchecked_mut() }).cast();
(0..slice.len()).map(move |i| {
// SAFETY: This is in-bounds, since the original slice was valid. No other references to
// the data can exist, since we visit each index once and have an exclusive borrow on the