summaryrefslogtreecommitdiff
path: root/crates/kernel/src/alloc.rs
diff options
context:
space:
mode:
authorNathan Ringo <nathan@remexre.com>2024-09-15 03:25:30 -0500
committerNathan Ringo <nathan@remexre.com>2024-09-15 03:25:30 -0500
commit49bf92a7aaf10a4777ea512303e442588f4ce2e5 (patch)
tree2ad6e4baf4ea0c2e728a5c103139da520e32f378 /crates/kernel/src/alloc.rs
parentfc918ea68d536fa9f219e7b4decdae1f561c9886 (diff)
Start of serious allocator work.
Diffstat (limited to 'crates/kernel/src/alloc.rs')
-rw-r--r--crates/kernel/src/alloc.rs159
1 files changed, 144 insertions, 15 deletions
diff --git a/crates/kernel/src/alloc.rs b/crates/kernel/src/alloc.rs
index 203ef5c..920fe0a 100644
--- a/crates/kernel/src/alloc.rs
+++ b/crates/kernel/src/alloc.rs
@@ -1,13 +1,21 @@
//! Global structures for the allocators.
-use crate::paging::{
- BuddyAllocator, MapError, MappingFlags, PageTable, ASID, HIMEM_BOT, LOMEM_TOP,
- MAX_PAGE_SIZE_BITS, PAGE_SIZE, PAGE_SIZES, PAGE_SIZE_BITS,
+use crate::{
+ cpu_locals::CPULocals,
+ paging::{
+ BuddyAllocator, MapError, MappingFlags, PageTable, ASID, HIMEM_BOT, LOMEM_TOP,
+ MAX_PAGE_SIZE_BITS, PAGE_SIZE, PAGE_SIZES, PAGE_SIZE_BITS,
+ },
};
-use allocator_api2::alloc::{AllocError, Global, GlobalAlloc, Layout};
+use allocator_api2::alloc::{AllocError, Allocator, GlobalAlloc, Layout};
use contracts::requires;
-use core::{num::NonZero, ptr::NonNull};
+use core::{
+ mem::MaybeUninit,
+ num::NonZero,
+ ptr::{null_mut, NonNull},
+};
use spin::mutex::FairMutex;
+use vernos_alloc_genmalloc::{OSServices, NON_HUGE_SEGMENT_SIZE, NON_HUGE_SEGMENT_SIZE_BITS};
use vernos_alloc_vma_tree::VMATree;
/// The global instance of the physical page allocator.
@@ -17,12 +25,15 @@ static BUDDY_ALLOCATOR: FairMutex<Option<BuddyAllocator>> = FairMutex::new(None)
static KERNEL_PAGE_TABLE: FairMutex<Option<&'static mut PageTable>> = FairMutex::new(None);
/// The kernel's virtual memory allocator.
-static KERNEL_VM_ALLOC: FairMutex<VMATree<PAGE_SIZE_BITS, Global>> =
- FairMutex::new(VMATree::new_in(Global));
+static KERNEL_VM_ALLOC: FairMutex<VMATree<PAGE_SIZE_BITS, CPULocalHeap>> =
+ FairMutex::new(VMATree::new_in(CPULocalHeap));
/// The global allocator.
#[global_allocator]
-static GLOBAL_ALLOC: GlobalGenMalloc = GlobalGenMalloc;
+static GLOBAL_ALLOC: CPULocalHeap = CPULocalHeap;
+
+/// The type of the kernel's allocator.
+pub type Heap = vernos_alloc_genmalloc::Heap<HeapOSServices>;
/// Initializes the kernel page table and enables paging.
///
@@ -82,13 +93,73 @@ pub unsafe fn init_kernel_page_table(buddy_allocator: BuddyAllocator) {
#[requires(himem_top & (PAGE_SIZE - 1) == 0)]
#[requires(HIMEM_BOT < himem_top)]
pub unsafe fn init_kernel_virtual_memory_allocator(himem_top: usize) {
- // TODO: Bootstrap the allocator.
+ let mut himem_bot = HIMEM_BOT;
+ let mut himem_top = himem_top;
+
+ // To bootstrap the allocator, we make an initial heap. First, we figure out where it should be
+ // laid out in himem, including putting a guard page beneath it.
+ let heap_top = himem_top;
+ himem_top -= size_of::<Heap>();
+ const _: () = assert!(align_of::<Heap>() < PAGE_SIZE);
+ himem_top &= !(PAGE_SIZE - 1);
+ let heap_bot = himem_top;
+ let heap = (himem_top as *mut MaybeUninit<Heap>).as_mut().unwrap();
+ himem_top -= PAGE_SIZE;
+ assert!(himem_bot < himem_top);
+
+ // Map memory to back the heap.
+ for i in (heap_bot >> PAGE_SIZE_BITS)..(heap_top >> PAGE_SIZE_BITS) {
+ let vaddr = i << PAGE_SIZE_BITS;
+ let paddr =
+ alloc_page(PAGE_SIZE).expect("failed to allocate memory to bootstrap hart0's heap");
+ kernel_map(
+ vaddr,
+ paddr.into(),
+ PAGE_SIZE,
+ MappingFlags::R | MappingFlags::W,
+ )
+ .expect("failed to map memory to bootstrap hart0's heap");
+ }
+
+ // Next, we initialize the heap, which lets us initialize the CPU-locals as well.
+ Heap::init(heap);
+ CPULocals::init(0, heap.assume_init_mut());
+
+ // We need to initialize the heap with a segment that will let the virtual memory allocator
+ // allocate nodes. We lay it out at the _bottom_ of himem, since we know that'll be aligned. We
+ // add a guard page as well.
+ assert_eq!(himem_bot % NON_HUGE_SEGMENT_SIZE, 0);
+ let bootstrap_segment = himem_bot;
+ himem_bot += NON_HUGE_SEGMENT_SIZE;
+ assert_eq!(himem_bot & (PAGE_SIZE - 1), 0);
+ himem_bot += PAGE_SIZE;
+
+ // We map the bootstrap segment.
+ for i in 0..(1 << (NON_HUGE_SEGMENT_SIZE_BITS - PAGE_SIZE_BITS)) {
+ let vaddr = bootstrap_segment + (i << PAGE_SIZE_BITS);
+ let paddr = alloc_page(PAGE_SIZE)
+ .expect("failed to allocate memory for hart0's heap's initial segment");
+ kernel_map(
+ vaddr,
+ paddr.into(),
+ PAGE_SIZE,
+ MappingFlags::R | MappingFlags::W,
+ )
+ .expect("failed to map memory for hart0's heap's initial segment");
+ }
+
+ // Donate the bootstrap segment to the heap.
+ //
+ // UNWRAP: Himem cannot be null.
+ CPULocals::get().heap().donate_small_medium_segment(
+ NonNull::new(bootstrap_segment as *mut [u8; NON_HUGE_SEGMENT_SIZE]).unwrap(),
+ );
// The error here _really_ ought to be impossible, because we just bootstrapped the allocator!
// It definitely has free memory.
let mut kernel_vm_alloc = KERNEL_VM_ALLOC.lock();
kernel_vm_alloc
- .add(HIMEM_BOT..himem_top)
+ .add(himem_bot..himem_top)
.expect("failed to set up the kernel's virtual memory allocator");
}
@@ -136,20 +207,78 @@ pub fn kernel_map(
kernel_page_table.map(&mut *buddy_allocator, vaddr, paddr, len, flags)?;
vernos_utils::first_time! {
log::warn!("TODO: sfence.vma");
- log::warn!("TODO: TLB shootdown");
}
Ok(())
}
/// A global allocator backed by a hart-local `vernos_alloc_genmalloc::Heap`.
-struct GlobalGenMalloc;
+struct CPULocalHeap;
+
+unsafe impl Allocator for CPULocalHeap {
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ CPULocals::get().heap().allocate(layout)
+ }
+
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ CPULocals::get().heap().deallocate(ptr, layout)
+ }
-unsafe impl GlobalAlloc for GlobalGenMalloc {
+ fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ CPULocals::get().heap().allocate_zeroed(layout)
+ }
+
+ unsafe fn grow(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ CPULocals::get().heap().grow(ptr, old_layout, new_layout)
+ }
+
+ unsafe fn grow_zeroed(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ CPULocals::get()
+ .heap()
+ .grow_zeroed(ptr, old_layout, new_layout)
+ }
+
+ unsafe fn shrink(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ CPULocals::get().heap().shrink(ptr, old_layout, new_layout)
+ }
+}
+
+unsafe impl GlobalAlloc for CPULocalHeap {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
- todo!("GlobalGenMalloc.alloc({layout:?})")
+ match self.allocate(layout) {
+ Ok(ptr) => ptr.as_ptr().cast(),
+ Err(AllocError) => null_mut(),
+ }
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
- todo!("GlobalGenMalloc.dealloc({ptr:?}, {layout:?})")
+ match NonNull::new(ptr) {
+ Some(ptr) => self.deallocate(ptr, layout),
+ None => unreachable!("dealloc({ptr:p}, {layout:?})"),
+ }
+ }
+}
+
+/// The OS services provided to the allocator.
+#[derive(Debug)]
+pub struct HeapOSServices;
+
+unsafe impl OSServices for HeapOSServices {
+ fn current_thread_id() -> usize {
+ CPULocals::get().cpu_number
}
}