summaryrefslogtreecommitdiff
path: root/crates
diff options
context:
space:
mode:
Diffstat (limited to 'crates')
-rw-r--r--crates/alloc_genmalloc/src/lib.rs261
-rw-r--r--crates/kernel/src/alloc.rs2
2 files changed, 229 insertions, 34 deletions
diff --git a/crates/alloc_genmalloc/src/lib.rs b/crates/alloc_genmalloc/src/lib.rs
index bb024a8..645754c 100644
--- a/crates/alloc_genmalloc/src/lib.rs
+++ b/crates/alloc_genmalloc/src/lib.rs
@@ -49,13 +49,18 @@ struct HeapInner<OS: OSServices> {
/// to and including 512KiB, plus one more for objects over 512KiB (huge objects).
pages: [PageLink; LARGEST_LARGE_OBJECT_SIZE_CLASS + 2],
+ /// A small, medium, or large segment that would otherwise be freed, because none of its pages
+ /// are in use.
+ free_segment_last_chance: Option<NonNull<Segment>>,
+
/// The sentinel node for segments that store small or medium objects that have free pages.
small_medium_segments_free: SegmentLink,
/// The sentinel node for segments that store small or medium objects that have no free pages.
small_medium_segments_full: SegmentLink,
- /// The sentinel node for segments that store large or huge objects.
+ /// The sentinel node for segments that store large or huge objects, i.e. segments that only
+ /// contain a single page.
large_huge_segments: SegmentLink,
_phantom: PhantomData<OS>,
@@ -142,6 +147,11 @@ impl<OS: OSServices> Heap<OS> {
})
}
}
+ // SAFETY: For the `maybe_uninit` to have been valid, this must be sound.
+ unsafe {
+ let ptr = addr_of_mut!((*inner_ptr).free_segment_last_chance);
+ ptr.write(None);
+ }
// SAFETY: For the `maybe_uninit` to have been valid, getting these pointers must be sound.
// Self-linking the pointers is also sound as a result.
unsafe {
@@ -157,37 +167,22 @@ impl<OS: OSServices> Heap<OS> {
}
}
- /// Donates a segment to be used for small or medium objects.
+ /// Donates a segment to be used for small, medium, or large objects. This should only be
+ /// called very early in the allocator's lifetime, since it will panic if there's already a
+ /// segment "kept in reserve."
///
/// # Safety
///
/// - `ptr` must convey ownership over the entire region.
/// - `thread_id` must be correct.
- pub unsafe fn donate_small_medium_segment(&self, ptr: NonNull<[u8; NON_HUGE_SEGMENT_SIZE]>) {
- self.0.borrow_mut().add_small_medium_segment(ptr)
+ pub unsafe fn donate_segment(&self, ptr: NonNull<[u8; NON_HUGE_SEGMENT_SIZE]>) {
+ let mut inner = self.0.borrow_mut();
+ assert!(inner.free_segment_last_chance.is_none());
+ inner.free_segment_last_chance = Some(ptr.cast());
}
}
impl<OS: OSServices> HeapInner<OS> {
- /// Initializes a segment for small or medium objects, adding it to this heap.
- unsafe fn add_small_medium_segment(&mut self, ptr: NonNull<[u8; NON_HUGE_SEGMENT_SIZE]>) {
- // Initialize the segment to be self-linked.
- let maybe_uninit: &mut MaybeUninit<Segment> = ptr.cast().as_mut();
- maybe_uninit.write(Segment {
- link: SegmentLink {
- prev: ptr.cast(),
- next: ptr.cast(),
- },
- header: SegmentHeader {
- thread_id: OS::current_thread_id(),
- page_shift: SMALL_MEDIUM_PAGE_SIZE_BITS,
- },
- });
-
- // Add the segment to the list of segments with free pages.
- SegmentLink::add_next(NonNull::from(&self.small_medium_segments_free), ptr.cast());
- }
-
/// Allocates small blocks of naturally-aligned memory.
#[requires(size > 0)]
#[requires(size <= LARGEST_SMALL_OBJECT)]
@@ -202,10 +197,7 @@ impl<OS: OSServices> HeapInner<OS> {
let next = unsafe { block.as_ref() }.next.load(Ordering::SeqCst);
page.header.alloc_list = NonNull::new(next);
page.header.used.fetch_add(1, Ordering::SeqCst);
- // SAFETY: `block` is a `NonNull`, so the slice pointer will non-null.
- Ok(unsafe {
- NonNull::new_unchecked(slice_from_raw_parts_mut(block.as_ptr().cast(), size))
- })
+ Ok(NonNull::slice_from_raw_parts(block.cast(), size))
}
None => self.alloc_generic(size, size),
}
@@ -213,18 +205,175 @@ impl<OS: OSServices> HeapInner<OS> {
/// The slow-path of allocation. Allocates a block of any size and alignment, and does some
/// book-keeping tasks along the way.
+ #[requires(align < NON_HUGE_SEGMENT_SIZE)]
fn alloc_generic(&mut self, size: usize, align: usize) -> Result<NonNull<[u8]>, AllocError> {
- /*
+ // TODO: Separate path for huge objects?
+ // TODO: Handle alignment.
+
+ // Find a page with free blocks.
let size_class = size_class(size);
- let sentinel: NonNull<PageLink> = NonNull::from(&self.pages[size_class]);
+ let log2_size = size.next_power_of_two().trailing_zeros();
+ let page = self.find_page_with_free_blocks(log2_size, size_class)?;
+
+ // Pop a block off the list.
+ //
+ // SAFETY: find_page_with_free_blocks always returns a valid page.
+ // UNWRAP: find_page_with_free_blocks ensures there's a block.
+ let block = unsafe { (*page.as_ptr()).header.alloc_list.take().unwrap() };
+ // SAFETY: find_page_with_free_blocks always returns a valid page, and valid pages have
+ // initialized free lists.
+ let next = unsafe { (*block.as_ptr()).next.load(Ordering::Relaxed) };
+ // SAFETY: find_page_with_free_blocks always returns a valid page.
+ unsafe { (*page.as_ptr()).header.alloc_list = NonNull::new(next) };
+
+ // Return the block.
+ Ok(NonNull::slice_from_raw_parts(block.cast(), size))
+ }
+
+ /// Allocates a new segment for small, medium, or large objects.
+ fn alloc_segment(&mut self, large: bool) -> Result<NonNull<Segment>, AllocError> {
+ // If there's a "last chance" segment, grab it.
+ let segment = if let Some(segment) = self.free_segment_last_chance.take() {
+ segment
+ } else {
+ // Allocate a new segment.
+ todo!("{self:p}.alloc_segment({large})")
+ };
+
+ // Initialize the segment to be self-linked.
+ //
+ // SAFETY: As an invariant of the Heap type, any last-chance segments are valid for small,
+ // medium, or large objects, or if we allocated a segment, it's valid.
+ let maybe_uninit: &mut MaybeUninit<Segment> = unsafe { segment.cast().as_mut() };
+ maybe_uninit.write(Segment {
+ link: SegmentLink {
+ prev: segment.cast(),
+ next: segment.cast(),
+ },
+ header: SegmentHeader {
+ thread_id: OS::current_thread_id(),
+ free_pages_bitmap: if large {
+ 0xffff_ffff_ffff_ffff
+ } else {
+ 0x0000_0000_0000_0001
+ },
+ page_count: if large { 1 } else { 64 },
+ page_shift: if large {
+ NON_HUGE_SEGMENT_SIZE_BITS as u32
+ } else {
+ SMALL_MEDIUM_PAGE_SIZE_BITS
+ },
+ },
+ });
+
+ // Add the segment to the list of segments with free pages.
+ //
+ // SAFETY: As an invariant of the type, any last-chance segments are valid for small,
+ // medium, or large objects.
+ unsafe {
+ SegmentLink::add_next(
+ NonNull::from(&self.small_medium_segments_free),
+ NonNull::from(&(*segment.as_ptr()).link),
+ );
+ }
+
+ Ok(segment)
+ }
+
+ /// Finds a page that has free blocks, possibly making them available in an existing page or
+ /// allocating a new page in an existing or new segment. This does not actually allocate a
+ /// page.
+ fn find_page_with_free_blocks(
+ &mut self,
+ log2_size: u32,
+ size_class: usize,
+ ) -> Result<NonNull<Page>, AllocError> {
+ let sentinel = NonNull::from(&self.pages[size_class]);
let mut page = sentinel;
loop {
page = unsafe { (*page.as_ptr()).next };
+ if page == sentinel {
+ break;
+ }
+
+ // Check the page for free blocks.
+ todo!("{self:p}.find_page_with_free_blocks({log2_size}, {size_class})")
+ }
+
+ // There was no page with free blocks, so allocate one.
+ let mut segment_ptr = self.find_segment_with_free_pages()?;
+ // SAFETY: find_segment_with_free_pages always returns a valid segment.
+ let segment = unsafe { segment_ptr.as_mut() };
+ assert_ne!(segment.header.free_pages_bitmap, 0);
+ let i = segment.header.free_pages_bitmap.trailing_zeros() as usize;
+
+ // Mark the page as allocated.
+ segment.header.free_pages_bitmap &= !(1 << i);
+ // SAFETY: find_segment_with_free_pages always returns a valid segment.
+ let page_ptr = unsafe { Segment::page_ptr(segment_ptr, i) };
- todo!("{page:#?}")
+ // Get the range in which to put the allocated blocks.
+ //
+ // SAFETY: find_segment_with_free_pages always returns a valid segment.
+ let page_blocks_ptr = unsafe { Segment::page_blocks_ptr(segment_ptr, i) };
+ let base_ptr: NonNull<u8> = page_blocks_ptr.cast();
+ let align_offset = base_ptr.align_offset(1 << log2_size);
+ // SAFETY: A valid segment has to be in-bounds.
+ let mut alloc_ptr: NonNull<u8> = unsafe { base_ptr.add(align_offset) };
+ // SAFETY: A valid segment has to be in-bounds.
+ let end_ptr: NonNull<u8> = unsafe { base_ptr.add(page_blocks_ptr.len()) };
+
+ // Build the alloc list.
+ let mut alloc_list: *mut Block = null_mut();
+ while alloc_ptr != end_ptr {
+ let ptr: *mut Block = alloc_ptr.cast().as_ptr();
+ // SAFETY: A valid segment has to be in-bounds.
+ alloc_ptr = unsafe { alloc_ptr.add(1 << log2_size) };
+
+ // Initialize the block.
+ //
+ // SAFETY: A valid segment has to have valid blocks.
+ unsafe { (*ptr).next = AtomicPtr::new(alloc_list) };
+ alloc_list = ptr;
}
- */
- todo!("{:p}.alloc_generic({size}, {align})", self)
+
+ // Initialize the page to be self-linked.
+ //
+ // SAFETY: page_ptr guarantees that the page is valid.
+ let maybe_uninit: &mut MaybeUninit<Page> = unsafe { page_ptr.cast().as_mut() };
+ maybe_uninit.write(Page {
+ link: PageLink {
+ prev: page_ptr.cast(),
+ next: page_ptr.cast(),
+ },
+ header: PageHeader {
+ alloc_list: NonNull::new(alloc_list),
+ local_free_list: None,
+ thread_free_list: AtomicPtr::new(null_mut()),
+ used: AtomicU16::new(0),
+ thread_freed: AtomicU16::new(0),
+ },
+ });
+
+ Ok(page_ptr)
+ }
+
+ /// Finds a segment in which a page for small or medium objects can be allocated.
+ fn find_segment_with_free_pages(&mut self) -> Result<NonNull<Segment>, AllocError> {
+ let sentinel = NonNull::from(&self.small_medium_segments_free);
+ let mut segment = sentinel;
+ loop {
+ segment = unsafe { (*segment.as_ptr()).next };
+ if segment == sentinel {
+ break;
+ }
+
+ // Check the segment for free pages.
+ todo!("{self:p}.find_segment_with_free_pages()")
+ }
+
+ // There was no segment with free pages, so allocate one.
+ self.alloc_segment(false)
}
}
@@ -307,6 +456,44 @@ struct Segment {
header: SegmentHeader,
}
+impl Segment {
+ /// Converts a pointer to the segment to a pointer to the page with this index. The pointer is
+ /// guaranteed to be valid.
+ ///
+ /// # Safety
+ ///
+ /// - The segment pointer must be valid.
+ unsafe fn page_ptr(ptr: NonNull<Segment>, i: usize) -> NonNull<Page> {
+ let page_count = (*ptr.as_ptr()).header.page_count as usize;
+ assert!(i < page_count);
+ ptr.offset(1).cast().add(i)
+ }
+
+ /// Converts a pointer to the segment to a pointer to the page's block data. The pointer is
+ /// guaranteed to be valid.
+ ///
+ /// # Safety
+ ///
+ /// - The segment pointer must be valid.
+ unsafe fn page_blocks_ptr(ptr: NonNull<Segment>, i: usize) -> NonNull<[u8]> {
+ let page_count = (*ptr.as_ptr()).header.page_count as usize;
+ let page_shift = (*ptr.as_ptr()).header.page_shift;
+ assert!(i < page_count);
+
+ let end_of_pages_ptr: NonNull<Page> = ptr.offset(1).cast().add(page_count);
+ let headers_len = end_of_pages_ptr.as_ptr() as usize - ptr.as_ptr() as usize;
+
+ let offset = i << page_shift;
+ let len = 1 << page_shift;
+ let (offset, len) = if i == 0 {
+ (offset + headers_len, len - headers_len)
+ } else {
+ (offset, len)
+ };
+ NonNull::slice_from_raw_parts(ptr.cast().byte_add(offset), len)
+ }
+}
+
/// The intrusive link in the circular doubly-linked list of pages of each size class.
#[derive(Debug)]
struct SegmentLink {
@@ -339,12 +526,19 @@ struct SegmentHeader {
/// The ID of the thread.
thread_id: usize,
+ /// A bitmap of the free pages.
+ free_pages_bitmap: u64,
+
+ /// The number of pages in the segment.
+ page_count: u32,
+
/// The amount to right-shift the offset of an object from the start of the segment by to get
/// its page index.
page_shift: u32,
}
/// A page is a block of memory that contains objects of a single size class.
+#[derive(Debug)]
#[repr(C)]
struct Page {
/// The intrusive link in the circular doubly-linked list of pages of each size class.
@@ -386,6 +580,7 @@ struct PageLink {
}
/// The fields other than the intrusive link in a page's header.
+#[derive(Debug)]
struct PageHeader {
/// The free list that the owning thread pops from.
alloc_list: Option<NonNull<Block>>,
@@ -425,7 +620,7 @@ fn atomic_push(node: &mut Block, list: &AtomicPtr<Block>) {
///
/// # Safety
///
-/// All methods must be implemented according to their doc comments.
+/// - All methods must be implemented according to their doc comments.
pub unsafe trait OSServices {
/// Returns the ID of the current thread. This can be arbitrary, so long as it is consistent
/// within a thread and no two threads with the same thread ID run at the same time.
diff --git a/crates/kernel/src/alloc.rs b/crates/kernel/src/alloc.rs
index 920fe0a..3ae72bc 100644
--- a/crates/kernel/src/alloc.rs
+++ b/crates/kernel/src/alloc.rs
@@ -151,7 +151,7 @@ pub unsafe fn init_kernel_virtual_memory_allocator(himem_top: usize) {
// Donate the bootstrap segment to the heap.
//
// UNWRAP: Himem cannot be null.
- CPULocals::get().heap().donate_small_medium_segment(
+ CPULocals::get().heap().donate_segment(
NonNull::new(bootstrap_segment as *mut [u8; NON_HUGE_SEGMENT_SIZE]).unwrap(),
);