summaryrefslogtreecommitdiff
path: root/kernel/src/allocators/buddy/tree.rs
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/src/allocators/buddy/tree.rs')
-rw-r--r--kernel/src/allocators/buddy/tree.rs112
1 files changed, 112 insertions, 0 deletions
diff --git a/kernel/src/allocators/buddy/tree.rs b/kernel/src/allocators/buddy/tree.rs
new file mode 100644
index 0000000..3953f39
--- /dev/null
+++ b/kernel/src/allocators/buddy/tree.rs
@@ -0,0 +1,112 @@
+use crate::allocators::{
+ buddy::{
+ bitvec::BitVec,
+ stripe::{FreeListNode, Stripe},
+ MAX_ORDER,
+ },
+ PAGE_SIZE_BITS,
+};
+use contracts::requires;
+use core::ptr::NonNull;
+
+/// A single region of the allocator. See the comment on the `crate::allocators::buddy` module for
+/// more information.
+pub struct Tree<'buddy> {
+ /// The base address of the tree.
+ pub base: *const (),
+
+ /// The log2 of the number of pages in the region represented by the tree.
+ pub log2_page_count: usize,
+
+ /// The array of sentinel nodes of the free-list for this node. As an invariant of the type,
+ /// there are no live references to any node in any list in this array, and there are
+ /// MAX_ORDER + 1 nodes.
+ pub free_lists: NonNull<FreeListNode>,
+
+ /// The bitset used to track whether subregion are allocated or not in this tree.
+ pub bitset: &'buddy mut BitVec,
+}
+
+impl<'buddy> Tree<'buddy> {
+ /// Tries to allocate a subregion with the given order, possibly splitting a larger subregion
+ /// in order to so so.
+ #[requires(order <= MAX_ORDER)]
+ pub fn alloc(&mut self, order: usize) -> Option<NonNull<FreeListNode>> {
+ if let Some(ptr) = self.stripe(order).pop() {
+ Some(ptr)
+ } else if order == MAX_ORDER {
+ None
+ } else {
+ // Get a larger region.
+ let ptr = self.alloc(order + 1)?;
+
+ // Get a pointer to the higher half.
+ //
+ // SAFETY: This has to be in-bounds, it's part of the same allocation!
+ let higher_half = unsafe { ptr.byte_add(1 << (PAGE_SIZE_BITS + order)) };
+
+ // Put the higher half in the current buddy's stripe.
+ //
+ // SAFETY: The higher half is from this region, not in the higher stripe, and of the
+ // right size.
+ unsafe {
+ self.stripe(order).push(higher_half);
+ }
+
+ // Return the pointer.
+ Some(ptr)
+ }
+ }
+
+ /// Returns the stripe with the given order.
+ #[requires(order <= MAX_ORDER)]
+ fn stripe<'stripe>(&'stripe mut self, order: usize) -> Stripe<'stripe> {
+ // TODO: There should be some smart bitwise-math version of this...
+ let mut bitset_offset = 0;
+ for i in 0..order {
+ bitset_offset += (1 << self.log2_page_count) >> i;
+ }
+
+ Stripe {
+ base: self.base,
+ order,
+ // SAFETY: order is constrained to be in-bounds.
+ free_list: unsafe { self.free_lists.add(order) },
+ bitset: self.bitset,
+ bitset_offset,
+ }
+ }
+}
+
+/// Evil bitwise version of the reasonable loop to compute the `bitset_offset` of a stripe.
+#[requires(log2_page_count < usize::BITS as usize)]
+#[requires(order < usize::BITS as usize)]
+fn compute_bitset_offset(log2_page_count: usize, order: usize) -> usize {
+ let ones = |i: usize| !(usize::MAX << i);
+
+ if order > log2_page_count + 1 {
+ ones(log2_page_count + 1)
+ } else {
+ ones(order).wrapping_shl((log2_page_count + 1 - order) as u32)
+ }
+}
+
+#[test]
+fn compute_bitset_offset_test() {
+ fn compute_bitset_offset_loop(log2_page_count: usize, order: usize) -> usize {
+ let mut bitset_offset = 0;
+ for i in 0..order {
+ bitset_offset += (1 << log2_page_count) >> i;
+ }
+ bitset_offset
+ }
+
+ for log2_page_count in 0..64 {
+ for order in 0..64 {
+ assert_eq!(
+ compute_bitset_offset(log2_page_count, order),
+ compute_bitset_offset_loop(log2_page_count, order),
+ );
+ }
+ }
+}