summaryrefslogtreecommitdiff
path: root/crates/buddy_allocator/src
diff options
context:
space:
mode:
Diffstat (limited to 'crates/buddy_allocator/src')
-rw-r--r--crates/buddy_allocator/src/bitvec.rs59
-rw-r--r--crates/buddy_allocator/src/free_list.rs72
-rw-r--r--crates/buddy_allocator/src/lib.rs222
-rw-r--r--crates/buddy_allocator/src/stripe.rs148
-rw-r--r--crates/buddy_allocator/src/tree.rs130
5 files changed, 631 insertions, 0 deletions
diff --git a/crates/buddy_allocator/src/bitvec.rs b/crates/buddy_allocator/src/bitvec.rs
new file mode 100644
index 0000000..856d579
--- /dev/null
+++ b/crates/buddy_allocator/src/bitvec.rs
@@ -0,0 +1,59 @@
+use contracts::requires;
+use core::{fmt, mem::transmute};
+
+/// A fixed-length vector of bits.
+pub struct BitVec([usize]);
+
+impl BitVec {
+ fn from_mut(words: &mut [usize]) -> &mut BitVec {
+ // SAFETY: The types have a newtype relationship.
+ unsafe { transmute(words) }
+ }
+
+ fn from_ref(words: &[usize]) -> &BitVec {
+ // SAFETY: The types have a newtype relationship.
+ unsafe { transmute(words) }
+ }
+
+ /// Retrieves the value of a bit from the BitVec.
+ #[requires(i < self.len())]
+ pub fn get(&self, i: usize) -> bool {
+ let word_index = i / usize::BITS as usize;
+ let subword_index = i % usize::BITS as usize;
+ let one_hot = 1 << subword_index;
+ (self.0[word_index] & one_hot) != 0
+ }
+
+ /// Returns whether the BitVec is empty.
+ pub fn is_empty(&self) -> bool {
+ self.0.is_empty()
+ }
+
+ /// Returns the number of bits in the BitVec.
+ pub fn len(&self) -> usize {
+ self.0.len() * usize::BITS as usize
+ }
+
+ /// Sets the value of a bit in the BitVec.
+ #[requires(i < self.len())]
+ pub fn set(&mut self, i: usize, value: bool) {
+ let word_index = i / usize::BITS as usize;
+ let subword_index = i % usize::BITS as usize;
+ let one_hot = 1 << subword_index;
+ if value {
+ self.0[word_index] |= one_hot;
+ } else {
+ self.0[word_index] &= !one_hot;
+ }
+ }
+}
+
+impl fmt::Debug for BitVec {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "BitVec(")?;
+ for word in &self.0 {
+ write!(fmt, "{word:064b}")?;
+ }
+ write!(fmt, ")")
+ }
+}
diff --git a/crates/buddy_allocator/src/free_list.rs b/crates/buddy_allocator/src/free_list.rs
new file mode 100644
index 0000000..5a633c3
--- /dev/null
+++ b/crates/buddy_allocator/src/free_list.rs
@@ -0,0 +1,72 @@
+//! A doubly-linked list of power-of-two-sized subregions.
+
+use core::{marker::PhantomPinned, pin::Pin, ptr};
+
+/// The magic number a non-sentinel node should have.
+const FREE_NODE_MAGIC: u64 = u64::from_ne_bytes(*b"FREENODE");
+
+/// The magic number a sentinel node should have.
+const SENTINEL_MAGIC: u64 = u64::from_ne_bytes(*b"SENTINEL");
+
+/// A doubly-linked list of power-of-two-sized subregions.
+///
+/// This type is valid when zero-initialized.
+#[derive(Debug)]
+pub struct FreeList {
+ sentinel: FreeListNode,
+}
+
+impl FreeList {
+ /// Performs self-linking. This must be done before the free list is used in any other way.
+ pub fn self_link(mut self: Pin<&mut FreeList>) {
+ assert_eq!(self.sentinel.next, ptr::null_mut());
+ assert_eq!(self.sentinel.prev, ptr::null_mut());
+
+ let self_ptr = &self.sentinel as *const FreeListNode as *mut FreeListNode;
+ let mut sentinel = self.sentinel_unchecked();
+ *sentinel.as_mut().next_mut() = self_ptr;
+ *sentinel.as_mut().prev_mut() = self_ptr;
+ *sentinel.as_mut().magic_mut() = SENTINEL_MAGIC;
+ }
+
+ /// Projects the sentinel node out of the list.
+ fn sentinel(self: Pin<&mut FreeList>) -> Pin<&mut FreeListNode> {
+ let mut sentinel = self.sentinel_unchecked();
+ assert_eq!(*sentinel.as_mut().magic_mut(), SENTINEL_MAGIC);
+ sentinel
+ }
+
+ /// Projects the sentinel node out of the list, without checking the magic number.
+ fn sentinel_unchecked(self: Pin<&mut FreeList>) -> Pin<&mut FreeListNode> {
+ // SAFETY: sentinel is structurally pinned.
+ unsafe { self.map_unchecked_mut(|list| &mut list.sentinel) }
+ }
+}
+
+#[derive(Debug)]
+struct FreeListNode {
+ next: *mut FreeListNode,
+ prev: *mut FreeListNode,
+ magic: u64,
+ _phantom: PhantomPinned,
+}
+
+impl FreeListNode {
+ /// Projects the `next` pointer out of the node.
+ fn next_mut(self: Pin<&mut FreeListNode>) -> &mut *mut FreeListNode {
+ // SAFETY: next is not structurally pinned.
+ unsafe { &mut self.get_unchecked_mut().next }
+ }
+
+ /// Projects the `prev` pointer out of the node.
+ fn prev_mut(self: Pin<&mut FreeListNode>) -> &mut *mut FreeListNode {
+ // SAFETY: prev is structurally pinned.
+ unsafe { &mut self.get_unchecked_mut().prev }
+ }
+
+ /// Projects the `magic` number out of the node.
+ fn magic_mut(self: Pin<&mut FreeListNode>) -> &mut u64 {
+ // SAFETY: magic is structurally pinned.
+ unsafe { &mut self.get_unchecked_mut().magic }
+ }
+}
diff --git a/crates/buddy_allocator/src/lib.rs b/crates/buddy_allocator/src/lib.rs
new file mode 100644
index 0000000..fa86de7
--- /dev/null
+++ b/crates/buddy_allocator/src/lib.rs
@@ -0,0 +1,222 @@
+//! A buddy allocator, used to allocate pages.
+//!
+//! The allocator can be split into three abstractions: stripes, trees, and the allocator.
+//!
+//! TODO: See if there's standard terminology for these.
+//!
+//! ## Stripes
+//!
+//! The buddy allocator works in terms of size classes, which are power-of-two sized, starting at a
+//! single page and going up from there. Each stripe corresponds to a single size class and a
+//! particular region of memory.
+//!
+//! A stripe contains a circular doubly-linked free-list for subregions of that size class, and a
+//! bitset marking whether a particular region has been allocated or not. Being a circular
+//! doubly-linked list makes it cheap to remove an element whose address we know, as well as cheap
+//! to push and pop elements.
+//!
+//! It's efficient to go from the address of a subregion to the index of its corresponding bit, so
+//! when we hand out a subregion from the free-list or put one back, it's cheap to read or write
+//! its bit.
+//!
+//! ## Trees
+//!
+//! A tree is logically a collection of stripes, one per size class. To pack the structures more
+//! efficiently, they are stored interleaved with each other, and the tree manages this.
+//!
+//! The important logic at the level of trees happens when we allocate a subregion from a size
+//! class whose stripe's free-list is empty, and when we free subregions.
+//!
+//! When a stripe's free-list is empty, the tree instead allocates a subregion of a larger size
+//! from the next stripe. It can then store the unused portion in the current size class.
+//!
+//! The really important bit is the ability to merge subregions when they're freed. When we free a
+//! subregion of a certain size class, we can check whether its neighbor (its buddy) is unallocated
+//! as well. If so, we can remove it from the free-list by its address. We can combine the two
+//! subregions into one of the next larger size class, and then return that subregion to the next
+//! stripe.
+//!
+//! ## The buddy allocator
+//!
+//! Finally, the overall buddy allocator needs to be able to handle multiple memory regions. To
+//! facilitate this, the trees are stored in an array, which forms the overall allocator.
+#![no_std]
+
+mod bitvec;
+mod free_list;
+mod tree;
+
+// mod stripe;
+
+use crate::{bitvec::BitVec, free_list::FreeList, tree::Tree};
+use allocator_api2::alloc::{AllocError, Layout, LayoutError};
+use contracts::requires;
+use core::{mem, pin::Pin, ptr::NonNull};
+use vernos_physmem_free_list::FreeListAllocator;
+use vernos_utils::pin::pin_project_slice_mut_iter;
+
+/// A buddy allocator.
+#[derive(Debug)]
+pub struct BuddyAllocator<
+ 'allocator,
+ const PAGE_SIZE: usize,
+ const PAGE_SIZE_BITS: usize,
+ const SIZE_CLASS_COUNT: usize,
+> {
+ /// The free lists.
+ free_lists: Pin<&'allocator mut [FreeList; SIZE_CLASS_COUNT]>,
+
+ /// The metadata associated with each tree.
+ trees: &'allocator mut [Tree<'allocator, PAGE_SIZE, PAGE_SIZE_BITS>],
+
+ /// The shared bitset.
+ bitset: &'allocator mut BitVec,
+}
+
+impl<
+ 'allocator,
+ const PAGE_SIZE: usize,
+ const PAGE_SIZE_BITS: usize,
+ const SIZE_CLASS_COUNT: usize,
+ > BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>
+{
+ /// Returns a buddy allocator backed by the page ranges in the free list.
+ pub fn new(
+ mut free_list: FreeListAllocator<'allocator, PAGE_SIZE>,
+ ) -> Result<BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>, AllocError>
+ {
+ assert_eq!(PAGE_SIZE, 1 << PAGE_SIZE_BITS);
+ assert_ne!(SIZE_CLASS_COUNT, 0);
+
+ // We use one contiguous region to store the free lists and all the information about each
+ // tree (the base, the maximum order, and the bitset). We do one pass through the free list
+ // to find out how much storage we need, then we try to find a convenient spot to prune the
+ // storage we need from a page range in the list.
+ let mut range_count = 0;
+ let mut size_class_counts = [0; SIZE_CLASS_COUNT];
+ for (_, mut len_pages) in free_list.iter() {
+ while len_pages > 0 {
+ let size_class = (len_pages.trailing_zeros() as usize).min(SIZE_CLASS_COUNT - 1);
+ range_count += 1;
+ size_class_counts[size_class] += 1;
+ len_pages -= 1 << size_class;
+ }
+ }
+
+ // Lay out the metadata. The error here really ought to be impossible, because the pages in
+ // the free list needed to fit in there in the first place.
+ let metadata_layout = MetadataLayout::<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>::new(
+ range_count,
+ size_class_counts,
+ )
+ .map_err(|_| AllocError)?;
+
+ // Allocate space for the metadata.
+ let metadata = free_list.allocate_zeroed(metadata_layout.layout)?;
+
+ // Break out each component of the metadata.
+ //
+ // SAFETY: The layout is computed to make all of this sound if the data is valid. All of
+ // these types are valid when zero-initialized.
+ let free_lists: &'allocator mut [FreeList; SIZE_CLASS_COUNT] = unsafe {
+ metadata
+ .byte_add(metadata_layout.free_lists_offset)
+ .cast()
+ .as_mut()
+ };
+ let trees: &'allocator mut [Tree<PAGE_SIZE, PAGE_SIZE_BITS>] = unsafe {
+ NonNull::slice_from_raw_parts(
+ metadata.byte_add(metadata_layout.trees_offset).cast(),
+ metadata_layout.trees_len,
+ )
+ .as_mut()
+ };
+ let bitset: &'allocator mut BitVec = unsafe {
+ mem::transmute::<&mut [usize], &mut BitVec>(
+ NonNull::slice_from_raw_parts(
+ metadata.byte_add(metadata_layout.bitset_offset).cast(),
+ metadata_layout.bitset_len,
+ )
+ .as_mut(),
+ )
+ };
+
+ // Pin and self-link each free list. Although they're valid in a UB sense when zeroed,
+ // they're not yet ready to use until this is done.
+ //
+ // SAFETY: We never provide the ability to move out of a free list.
+ let mut free_lists = unsafe { Pin::new_unchecked(free_lists) };
+ for free_list in pin_project_slice_mut_iter(free_lists.as_mut()) {
+ free_list.self_link();
+ }
+
+ // TODO: Actually initialize the trees with ranges from the allocator...
+
+ // Make the buddy allocator and return.
+ Ok(BuddyAllocator {
+ free_lists,
+ trees,
+ bitset,
+ })
+ }
+
+ /// Tries to allocate a subregion of a particular size class from the allocator.
+ #[requires((0..SIZE_CLASS_COUNT).contains(&size_class))]
+ pub fn alloc(&mut self, size_class: usize) -> Result<NonNull<u8>, AllocError> {
+ Err(AllocError)
+ }
+}
+
+#[derive(Debug)]
+struct MetadataLayout<
+ const PAGE_SIZE: usize,
+ const PAGE_SIZE_BITS: usize,
+ const SIZE_CLASS_COUNT: usize,
+> {
+ layout: Layout,
+ free_lists_offset: usize,
+ trees_offset: usize,
+ trees_len: usize,
+ bitset_offset: usize,
+ bitset_len: usize,
+}
+
+impl<const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize, const SIZE_CLASS_COUNT: usize>
+ MetadataLayout<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>
+{
+ fn new(
+ range_count: usize,
+ mut size_class_counts: [usize; SIZE_CLASS_COUNT],
+ ) -> Result<MetadataLayout<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>, LayoutError> {
+ let trees_len = range_count;
+ let mut bitset_len_bits = 0;
+ // TODO: Is there a cleaner way to compute this?
+ for i in (0..size_class_counts.len()).rev() {
+ let count = size_class_counts[i];
+ if count != 0 {
+ bitset_len_bits += count;
+ if i > 0 {
+ size_class_counts[i - 1] += count * 2;
+ }
+ }
+ }
+ let bitset_len = (bitset_len_bits + usize::BITS as usize - 1) / usize::BITS as usize;
+
+ let free_lists_layout = Layout::new::<[FreeList; SIZE_CLASS_COUNT]>();
+ let trees_layout = Layout::array::<Tree<PAGE_SIZE, PAGE_SIZE_BITS>>(trees_len)?;
+ let bitset_layout = Layout::array::<usize>(bitset_len)?;
+
+ let free_lists_offset = 0;
+ let (layout, trees_offset) = free_lists_layout.extend(trees_layout)?;
+ let (layout, bitset_offset) = layout.extend(bitset_layout)?;
+
+ Ok(MetadataLayout {
+ layout,
+ free_lists_offset,
+ trees_offset,
+ trees_len,
+ bitset_offset,
+ bitset_len,
+ })
+ }
+}
diff --git a/crates/buddy_allocator/src/stripe.rs b/crates/buddy_allocator/src/stripe.rs
new file mode 100644
index 0000000..2ff686b
--- /dev/null
+++ b/crates/buddy_allocator/src/stripe.rs
@@ -0,0 +1,148 @@
+use crate::{bitvec::BitVec, PAGE_SIZE_BITS};
+use core::ptr::NonNull;
+
+/// A single size class for a single region of the allocator. See the comment on the
+/// `crate::allocators::buddy` module for more information.
+pub struct Stripe<'buddy> {
+ /// The base address of the tree this stripe is a part of.
+ pub base: *const (),
+
+ /// The order of the stripe. Order `n` means the subregions are `2ⁿ` pages in size.
+ pub order: usize,
+
+ /// The sentinel node of the free-list for this node. As an invariant of the type, there are no
+ /// live references to any node in this list.
+ pub free_list: NonNull<FreeListNode>,
+
+ /// The bitset used to track whether a given subregion is allocated or not. A `true` bit
+ /// corresponds to an allocated subregion.
+ pub bitset: &'buddy mut BitVec,
+
+ /// The offset from the start of the bitset to the region used by this stripe.
+ pub bitset_offset: usize,
+}
+
+impl<'buddy> Stripe<'buddy> {
+ /// Returns the buddy of the given pointer.
+ ///
+ /// ## Safety
+ ///
+ /// - The pointer must actually be part of this region.
+ unsafe fn buddy_of(&self, ptr: NonNull<FreeListNode>) -> NonNull<FreeListNode> {
+ let index = self.buddy_of_index(self.index_of(ptr));
+ let addr = self.base as usize + (index << (PAGE_SIZE_BITS + self.order));
+ NonNull::new_unchecked(addr as *mut FreeListNode)
+ }
+
+ /// Returns the buddy of the given index.
+ fn buddy_of_index(&self, index: usize) -> usize {
+ index ^ (1 << (PAGE_SIZE_BITS + self.order))
+ }
+
+ /// Returns the index the given pointer should have in the BitVec.
+ fn index_of(&self, ptr: NonNull<FreeListNode>) -> usize {
+ (ptr.as_ptr() as usize - self.base as usize) >> (PAGE_SIZE_BITS + self.order)
+ }
+
+ /// Pops a subregion from the free-list.
+ pub fn pop(&mut self) -> Option<NonNull<FreeListNode>> {
+ // SAFETY: The `free_list` is guaranteed to be valid by the invariants of the buddy
+ // allocator. Retrieving the next pointer doesn't, on its own, break aliasing rules.
+ let next = unsafe { self.free_list.read().next };
+
+ // If the sentinel and the next pointer refer to the same spot, the free-list was empty, so
+ // we can't pop from it.
+ if self.free_list == next {
+ return None;
+ }
+
+ // Otherwise, remove the node from the free-list.
+ unsafe {
+ FreeListNode::remove(next);
+ }
+
+ // Finally, mark the node as allocated in the bitvec.
+ let index = self.index_of(next);
+ assert!(self.bitset.get(self.bitset_offset + index));
+ self.bitset.set(self.bitset_offset + index, true);
+
+ Some(next)
+ }
+
+ /// Pushes a subregion back into the free-list.
+ ///
+ /// # Safety
+ ///
+ /// - There must be no live references to `subregion`.
+ /// - `subregion` must not be a member of any list.
+ pub unsafe fn push(&mut self, subregion: NonNull<FreeListNode>) {
+ // Insert the subregion as the first element of the free-list.
+ //
+ // SAFETY: The free-list is guaranteed to be valid by the invariants of the buddy
+ // allocator.
+ unsafe {
+ FreeListNode::insert(self.free_list, subregion);
+ }
+
+ // Mark the node as unallocated in the bitvec.
+ let index = self.index_of(subregion);
+ assert!(self.bitset.get(self.bitset_offset + index));
+ self.bitset.set(self.bitset_offset + index, false);
+ }
+
+ /// Pushes a subregion into the free-list for the first time.
+ ///
+ /// # Safety
+ ///
+ /// - There must be no live references to `subregion`.
+ /// - `subregion` must not be a member of any list.
+ pub unsafe fn push_initial(&mut self, subregion: NonNull<FreeListNode>) {
+ // Insert the subregion as the first element of the free-list.
+ //
+ // SAFETY: The free-list is guaranteed to be valid by the invariants of the buddy
+ // allocator.
+ unsafe {
+ FreeListNode::insert(self.free_list, subregion);
+ }
+
+ // Mark the node as unallocated in the bitvec.
+ let index = self.index_of(subregion);
+ assert!(self.bitset.get(self.bitset_offset + index));
+ self.bitset.set(self.bitset_offset + index, false);
+ }
+}
+
+pub struct FreeListNode {
+ next: NonNull<FreeListNode>,
+ prev: NonNull<FreeListNode>,
+}
+
+impl FreeListNode {
+ /// Inserts `new` after `prev`, initializing it. `prev` may be the sentinel.
+ ///
+ /// # Safety
+ ///
+ /// - There must be no live references to any node in the list that includes `prev`, including
+ /// the sentinel.
+ /// - There must be no live references to `new`.
+ /// - `new` must not be a member of any list.
+ pub unsafe fn insert(prev: NonNull<FreeListNode>, new: NonNull<FreeListNode>) {
+ let next = prev.read().next;
+ (*prev.as_ptr()).next = new;
+ (*next.as_ptr()).prev = new;
+ new.write(FreeListNode { next, prev });
+ }
+
+ /// Removes this node from the free list it is a part of.
+ ///
+ /// # Safety
+ ///
+ /// - The pointer must point to a node that is part of a free list.
+ /// - There must be no live references to any node in the list, including the sentinel.
+ /// - The pointer must not point to the sentinel node.
+ pub unsafe fn remove(ptr: NonNull<FreeListNode>) {
+ let FreeListNode { next, prev } = ptr.read();
+ (*next.as_ptr()).prev = prev;
+ (*prev.as_ptr()).next = next;
+ }
+}
diff --git a/crates/buddy_allocator/src/tree.rs b/crates/buddy_allocator/src/tree.rs
new file mode 100644
index 0000000..a303d40
--- /dev/null
+++ b/crates/buddy_allocator/src/tree.rs
@@ -0,0 +1,130 @@
+/// A single region of the allocator. See the comment on the `crate::allocators::buddy` module for
+/// more information.
+///
+/// This type is valid when zero-initialized.
+#[derive(Debug)]
+pub struct Tree<'buddy, const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize> {
+ /// The base address of the tree.
+ pub base: *const (),
+
+ /// The log2 of the number of pages in the region represented by the tree.
+ pub log2_page_count: usize,
+
+ /// The offset in the bitset to the bits responsible for this tree's pages.
+ bitset_offset: usize,
+
+ // /// The bitset used to track whether subregion are allocated or not in this tree.
+ // pub bitset: &'buddy mut BitVec,
+ phantom: core::marker::PhantomData<&'buddy mut [u8; PAGE_SIZE]>,
+}
+
+/*
+use crate::{
+ bitvec::BitVec,
+ stripe::{FreeListNode, Stripe},
+ MAX_ORDER, PAGE_SIZE_BITS,
+};
+use contracts::requires;
+
+/// A single region of the allocator. See the comment on the `crate::allocators::buddy` module for
+/// more information.
+pub struct Tree<'buddy> {
+ /// The base address of the tree.
+ pub base: *const (),
+
+ /// The log2 of the number of pages in the region represented by the tree.
+ pub log2_page_count: usize,
+
+ /// The array of sentinel nodes of the free-list for this node. As an invariant of the type,
+ /// there are no live references to any node in any list in this array, and there are
+ /// MAX_ORDER + 1 nodes.
+ pub free_lists: NonNull<FreeListNode>,
+
+ /// The bitset used to track whether subregion are allocated or not in this tree.
+ pub bitset: &'buddy mut BitVec,
+}
+
+impl<'buddy> Tree<'buddy> {
+ /// Tries to allocate a subregion with the given order, possibly splitting a larger subregion
+ /// in order to so so.
+ #[requires(order <= MAX_ORDER)]
+ pub fn alloc(&mut self, order: usize) -> Option<NonNull<FreeListNode>> {
+ if let Some(ptr) = self.stripe(order).pop() {
+ Some(ptr)
+ } else if order == MAX_ORDER {
+ None
+ } else {
+ // Get a larger region.
+ let ptr = self.alloc(order + 1)?;
+
+ // Get a pointer to the higher half.
+ //
+ // SAFETY: This has to be in-bounds, it's part of the same allocation!
+ let higher_half = unsafe { ptr.byte_add(1 << (PAGE_SIZE_BITS + order)) };
+
+ // Put the higher half in the current buddy's stripe.
+ //
+ // SAFETY: The higher half is from this region, not in the higher stripe, and of the
+ // right size.
+ unsafe {
+ self.stripe(order).push(higher_half);
+ }
+
+ // Return the pointer.
+ Some(ptr)
+ }
+ }
+
+ /// Returns the stripe with the given order.
+ #[requires(order <= MAX_ORDER)]
+ fn stripe<'stripe>(&'stripe mut self, order: usize) -> Stripe<'stripe> {
+ // TODO: There should be some smart bitwise-math version of this...
+ let mut bitset_offset = 0;
+ for i in 0..order {
+ bitset_offset += (1 << self.log2_page_count) >> i;
+ }
+
+ Stripe {
+ base: self.base,
+ order,
+ // SAFETY: order is constrained to be in-bounds.
+ free_list: unsafe { self.free_lists.add(order) },
+ bitset: self.bitset,
+ bitset_offset,
+ }
+ }
+}
+
+/// Evil bitwise version of the reasonable loop to compute the `bitset_offset` of a stripe.
+#[requires(log2_page_count < usize::BITS as usize)]
+#[requires(order < usize::BITS as usize)]
+fn compute_bitset_offset(log2_page_count: usize, order: usize) -> usize {
+ let ones = |i: usize| !(usize::MAX << i);
+
+ if order > log2_page_count + 1 {
+ ones(log2_page_count + 1)
+ } else {
+ ones(order).wrapping_shl((log2_page_count + 1 - order) as u32)
+ }
+}
+
+#[test]
+fn compute_bitset_offset_test() {
+ fn compute_bitset_offset_loop(log2_page_count: usize, order: usize) -> usize {
+ let mut bitset_offset = 0;
+ for i in 0..order {
+ bitset_offset += (1 << log2_page_count) >> i;
+ }
+ bitset_offset
+ }
+
+ for log2_page_count in 0..64 {
+ for order in 0..64 {
+ assert_eq!(
+ compute_bitset_offset(log2_page_count, order),
+ compute_bitset_offset_loop(log2_page_count, order),
+ );
+ }
+ }
+}
+*/