diff options
Diffstat (limited to 'kernel/src/allocators')
-rw-r--r-- | kernel/src/allocators/buddy/bitvec.rs | 50 | ||||
-rw-r--r-- | kernel/src/allocators/buddy/mod.rs | 49 | ||||
-rw-r--r-- | kernel/src/allocators/buddy/stripe.rs | 148 | ||||
-rw-r--r-- | kernel/src/allocators/buddy/tree.rs | 112 | ||||
-rw-r--r-- | kernel/src/allocators/mod.rs | 1 |
5 files changed, 360 insertions, 0 deletions
diff --git a/kernel/src/allocators/buddy/bitvec.rs b/kernel/src/allocators/buddy/bitvec.rs new file mode 100644 index 0000000..c7f415a --- /dev/null +++ b/kernel/src/allocators/buddy/bitvec.rs @@ -0,0 +1,50 @@ +use core::mem::transmute; + +use contracts::requires; + +/// A fixed-length vector of bits. +pub struct BitVec([usize]); + +impl BitVec { + fn from_mut(words: &mut [usize]) -> &mut BitVec { + // SAFETY: The types have a newtype relationship. + unsafe { transmute(words) } + } + + fn from_ref(words: &[usize]) -> &BitVec { + // SAFETY: The types have a newtype relationship. + unsafe { transmute(words) } + } + + /// Retrieves the value of a bit from the BitVec. + #[requires(i < self.len())] + pub fn get(&self, i: usize) -> bool { + let word_index = i / usize::BITS as usize; + let subword_index = i % usize::BITS as usize; + let one_hot = 1 << subword_index; + (self.0[word_index] & one_hot) != 0 + } + + /// Returns whether the BitVec is empty. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns the number of bits in the BitVec. + pub fn len(&self) -> usize { + self.0.len() * usize::BITS as usize + } + + /// Sets the value of a bit in the BitVec. + #[requires(i < self.len())] + pub fn set(&mut self, i: usize, value: bool) { + let word_index = i / usize::BITS as usize; + let subword_index = i % usize::BITS as usize; + let one_hot = 1 << subword_index; + if value { + self.0[word_index] |= one_hot; + } else { + self.0[word_index] &= !one_hot; + } + } +} diff --git a/kernel/src/allocators/buddy/mod.rs b/kernel/src/allocators/buddy/mod.rs new file mode 100644 index 0000000..261d076 --- /dev/null +++ b/kernel/src/allocators/buddy/mod.rs @@ -0,0 +1,49 @@ +//! A buddy allocator, used to allocate pages. +//! +//! The allocator can be split into three abstractions: stripes, trees, and the allocator. +//! +//! TODO: See if there's standard terminology for these. +//! +//! ## Stripes +//! +//! The buddy allocator works in terms of size classes, which are power-of-two sized, starting at a +//! single page and going up from there. Each stripe corresponds to a single size class and a +//! particular region of memory. +//! +//! A stripe contains a circular doubly-linked free-list for subregions of that size class, and a +//! bitset marking whether a particular region has been allocated or not. Being a circular +//! doubly-linked list makes it cheap to remove an element whose address we know, as well as cheap +//! to push and pop elements. +//! +//! It's efficient to go from the address of a subregion to the index of its corresponding bit, so +//! when we hand out a subregion from the free-list or put one back, it's cheap to read or write +//! its bit. +//! +//! ## Trees +//! +//! A tree is logically a collection of stripes, one per size class. To pack the structures more +//! efficiently, they are stored interleaved with each other, and the tree manages this. +//! +//! The important logic at the level of trees happens when we allocate a subregion from a size +//! class whose stripe's free-list is empty, and when we free subregions. +//! +//! When a stripe's free-list is empty, the tree instead allocates a subregion of a larger size +//! from the next stripe. It can then store the unused portion in the current size class. +//! +//! The really important bit is the ability to merge subregions when they're freed. When we free a +//! subregion of a certain size class, we can check whether its neighbor (its buddy) is unallocated +//! as well. If so, we can remove it from the free-list by its address. We can combine the two +//! subregions into one of the next larger size class, and then return that subregion to the next +//! stripe. +//! +//! ## The buddy allocator +//! +//! Finally, the overall buddy allocator needs to be able to handle multiple memory regions. To +//! facilitate this, the trees are stored in an array, which forms the overall allocator. + +mod bitvec; +mod stripe; +mod tree; + +/// The index of the largest size class; i.e., one less than the number of size classes. +const MAX_ORDER: usize = 18; diff --git a/kernel/src/allocators/buddy/stripe.rs b/kernel/src/allocators/buddy/stripe.rs new file mode 100644 index 0000000..9ec5985 --- /dev/null +++ b/kernel/src/allocators/buddy/stripe.rs @@ -0,0 +1,148 @@ +use crate::allocators::{buddy::bitvec::BitVec, PAGE_SIZE_BITS}; +use core::ptr::NonNull; + +/// A single size class for a single region of the allocator. See the comment on the +/// `crate::allocators::buddy` module for more information. +pub struct Stripe<'buddy> { + /// The base address of the tree this stripe is a part of. + pub base: *const (), + + /// The order of the stripe. Order `n` means the subregions are `2ⁿ` pages in size. + pub order: usize, + + /// The sentinel node of the free-list for this node. As an invariant of the type, there are no + /// live references to any node in this list. + pub free_list: NonNull<FreeListNode>, + + /// The bitset used to track whether a given subregion is allocated or not. A `true` bit + /// corresponds to an allocated subregion. + pub bitset: &'buddy mut BitVec, + + /// The offset from the start of the bitset to the region used by this stripe. + pub bitset_offset: usize, +} + +impl<'buddy> Stripe<'buddy> { + /// Returns the buddy of the given pointer. + /// + /// ## Safety + /// + /// - The pointer must actually be part of this region. + unsafe fn buddy_of(&self, ptr: NonNull<FreeListNode>) -> NonNull<FreeListNode> { + let index = self.buddy_of_index(self.index_of(ptr)); + let addr = self.base as usize + (index << (PAGE_SIZE_BITS + self.order)); + NonNull::new_unchecked(addr as *mut FreeListNode) + } + + /// Returns the buddy of the given index. + fn buddy_of_index(&self, index: usize) -> usize { + index ^ (1 << (PAGE_SIZE_BITS + self.order)) + } + + /// Returns the index the given pointer should have in the BitVec. + fn index_of(&self, ptr: NonNull<FreeListNode>) -> usize { + (ptr.as_ptr() as usize - self.base as usize) >> (PAGE_SIZE_BITS + self.order) + } + + /// Pops a subregion from the free-list. + pub fn pop(&mut self) -> Option<NonNull<FreeListNode>> { + // SAFETY: The `free_list` is guaranteed to be valid by the invariants of the buddy + // allocator. Retrieving the next pointer doesn't, on its own, break aliasing rules. + let next = unsafe { self.free_list.read().next }; + + // If the sentinel and the next pointer refer to the same spot, the free-list was empty, so + // we can't pop from it. + if self.free_list == next { + return None; + } + + // Otherwise, remove the node from the free-list. + unsafe { + FreeListNode::remove(next); + } + + // Finally, mark the node as allocated in the bitvec. + let index = self.index_of(next); + assert!(self.bitset.get(self.bitset_offset + index)); + self.bitset.set(self.bitset_offset + index, true); + + Some(next) + } + + /// Pushes a subregion back into the free-list. + /// + /// # Safety + /// + /// - There must be no live references to `subregion`. + /// - `subregion` must not be a member of any list. + pub unsafe fn push(&mut self, subregion: NonNull<FreeListNode>) { + // Insert the subregion as the first element of the free-list. + // + // SAFETY: The free-list is guaranteed to be valid by the invariants of the buddy + // allocator. + unsafe { + FreeListNode::insert(self.free_list, subregion); + } + + // Mark the node as unallocated in the bitvec. + let index = self.index_of(subregion); + assert!(self.bitset.get(self.bitset_offset + index)); + self.bitset.set(self.bitset_offset + index, false); + } + + /// Pushes a subregion into the free-list for the first time. + /// + /// # Safety + /// + /// - There must be no live references to `subregion`. + /// - `subregion` must not be a member of any list. + pub unsafe fn push_initial(&mut self, subregion: NonNull<FreeListNode>) { + // Insert the subregion as the first element of the free-list. + // + // SAFETY: The free-list is guaranteed to be valid by the invariants of the buddy + // allocator. + unsafe { + FreeListNode::insert(self.free_list, subregion); + } + + // Mark the node as unallocated in the bitvec. + let index = self.index_of(subregion); + assert!(self.bitset.get(self.bitset_offset + index)); + self.bitset.set(self.bitset_offset + index, false); + } +} + +pub struct FreeListNode { + next: NonNull<FreeListNode>, + prev: NonNull<FreeListNode>, +} + +impl FreeListNode { + /// Inserts `new` after `prev`, initializing it. `prev` may be the sentinel. + /// + /// # Safety + /// + /// - There must be no live references to any node in the list that includes `prev`, including + /// the sentinel. + /// - There must be no live references to `new`. + /// - `new` must not be a member of any list. + pub unsafe fn insert(prev: NonNull<FreeListNode>, new: NonNull<FreeListNode>) { + let next = prev.read().next; + (*prev.as_ptr()).next = new; + (*next.as_ptr()).prev = new; + new.write(FreeListNode { next, prev }); + } + + /// Removes this node from the free list it is a part of. + /// + /// # Safety + /// + /// - The pointer must point to a node that is part of a free list. + /// - There must be no live references to any node in the list, including the sentinel. + /// - The pointer must not point to the sentinel node. + pub unsafe fn remove(ptr: NonNull<FreeListNode>) { + let FreeListNode { next, prev } = ptr.read(); + (*next.as_ptr()).prev = prev; + (*prev.as_ptr()).next = next; + } +} diff --git a/kernel/src/allocators/buddy/tree.rs b/kernel/src/allocators/buddy/tree.rs new file mode 100644 index 0000000..3953f39 --- /dev/null +++ b/kernel/src/allocators/buddy/tree.rs @@ -0,0 +1,112 @@ +use crate::allocators::{ + buddy::{ + bitvec::BitVec, + stripe::{FreeListNode, Stripe}, + MAX_ORDER, + }, + PAGE_SIZE_BITS, +}; +use contracts::requires; +use core::ptr::NonNull; + +/// A single region of the allocator. See the comment on the `crate::allocators::buddy` module for +/// more information. +pub struct Tree<'buddy> { + /// The base address of the tree. + pub base: *const (), + + /// The log2 of the number of pages in the region represented by the tree. + pub log2_page_count: usize, + + /// The array of sentinel nodes of the free-list for this node. As an invariant of the type, + /// there are no live references to any node in any list in this array, and there are + /// MAX_ORDER + 1 nodes. + pub free_lists: NonNull<FreeListNode>, + + /// The bitset used to track whether subregion are allocated or not in this tree. + pub bitset: &'buddy mut BitVec, +} + +impl<'buddy> Tree<'buddy> { + /// Tries to allocate a subregion with the given order, possibly splitting a larger subregion + /// in order to so so. + #[requires(order <= MAX_ORDER)] + pub fn alloc(&mut self, order: usize) -> Option<NonNull<FreeListNode>> { + if let Some(ptr) = self.stripe(order).pop() { + Some(ptr) + } else if order == MAX_ORDER { + None + } else { + // Get a larger region. + let ptr = self.alloc(order + 1)?; + + // Get a pointer to the higher half. + // + // SAFETY: This has to be in-bounds, it's part of the same allocation! + let higher_half = unsafe { ptr.byte_add(1 << (PAGE_SIZE_BITS + order)) }; + + // Put the higher half in the current buddy's stripe. + // + // SAFETY: The higher half is from this region, not in the higher stripe, and of the + // right size. + unsafe { + self.stripe(order).push(higher_half); + } + + // Return the pointer. + Some(ptr) + } + } + + /// Returns the stripe with the given order. + #[requires(order <= MAX_ORDER)] + fn stripe<'stripe>(&'stripe mut self, order: usize) -> Stripe<'stripe> { + // TODO: There should be some smart bitwise-math version of this... + let mut bitset_offset = 0; + for i in 0..order { + bitset_offset += (1 << self.log2_page_count) >> i; + } + + Stripe { + base: self.base, + order, + // SAFETY: order is constrained to be in-bounds. + free_list: unsafe { self.free_lists.add(order) }, + bitset: self.bitset, + bitset_offset, + } + } +} + +/// Evil bitwise version of the reasonable loop to compute the `bitset_offset` of a stripe. +#[requires(log2_page_count < usize::BITS as usize)] +#[requires(order < usize::BITS as usize)] +fn compute_bitset_offset(log2_page_count: usize, order: usize) -> usize { + let ones = |i: usize| !(usize::MAX << i); + + if order > log2_page_count + 1 { + ones(log2_page_count + 1) + } else { + ones(order).wrapping_shl((log2_page_count + 1 - order) as u32) + } +} + +#[test] +fn compute_bitset_offset_test() { + fn compute_bitset_offset_loop(log2_page_count: usize, order: usize) -> usize { + let mut bitset_offset = 0; + for i in 0..order { + bitset_offset += (1 << log2_page_count) >> i; + } + bitset_offset + } + + for log2_page_count in 0..64 { + for order in 0..64 { + assert_eq!( + compute_bitset_offset(log2_page_count, order), + compute_bitset_offset_loop(log2_page_count, order), + ); + } + } +} diff --git a/kernel/src/allocators/mod.rs b/kernel/src/allocators/mod.rs index 76d85e6..49f29e2 100644 --- a/kernel/src/allocators/mod.rs +++ b/kernel/src/allocators/mod.rs @@ -1,5 +1,6 @@ use core::{ffi::c_void, ops::Range, ptr::addr_of}; +pub mod buddy; pub mod physical_memory_free_list; /// The number of bits in the offset in a page. |