summaryrefslogtreecommitdiff
path: root/crates/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'crates/kernel')
-rw-r--r--crates/kernel/Cargo.toml2
-rw-r--r--crates/kernel/src/alloc.rs159
-rw-r--r--crates/kernel/src/arch/riscv64/mod.rs15
-rw-r--r--crates/kernel/src/cpu_locals.rs72
-rw-r--r--crates/kernel/src/lib.rs5
5 files changed, 237 insertions, 16 deletions
diff --git a/crates/kernel/Cargo.toml b/crates/kernel/Cargo.toml
index 5fef50b..a447470 100644
--- a/crates/kernel/Cargo.toml
+++ b/crates/kernel/Cargo.toml
@@ -14,7 +14,9 @@ contracts = { version = "0.6.3", default-features = false }
either = { version = "1.13.0", default-features = false }
log = { version = "0.4.20", default-features = false }
spin = { version = "0.9.8", default-features = false, features = ["fair_mutex", "use_ticket_mutex"] }
+static_assertions = { version = "1.1.0", default-features = false }
vernos_alloc_buddy = { path = "../alloc_buddy" }
+vernos_alloc_genmalloc = { path = "../alloc_genmalloc" }
vernos_alloc_physmem_free_list = { path = "../alloc_physmem_free_list" }
vernos_alloc_vma_tree = { path = "../alloc_vma_tree" }
vernos_device_tree = { path = "../device_tree" }
diff --git a/crates/kernel/src/alloc.rs b/crates/kernel/src/alloc.rs
index 203ef5c..920fe0a 100644
--- a/crates/kernel/src/alloc.rs
+++ b/crates/kernel/src/alloc.rs
@@ -1,13 +1,21 @@
//! Global structures for the allocators.
-use crate::paging::{
- BuddyAllocator, MapError, MappingFlags, PageTable, ASID, HIMEM_BOT, LOMEM_TOP,
- MAX_PAGE_SIZE_BITS, PAGE_SIZE, PAGE_SIZES, PAGE_SIZE_BITS,
+use crate::{
+ cpu_locals::CPULocals,
+ paging::{
+ BuddyAllocator, MapError, MappingFlags, PageTable, ASID, HIMEM_BOT, LOMEM_TOP,
+ MAX_PAGE_SIZE_BITS, PAGE_SIZE, PAGE_SIZES, PAGE_SIZE_BITS,
+ },
};
-use allocator_api2::alloc::{AllocError, Global, GlobalAlloc, Layout};
+use allocator_api2::alloc::{AllocError, Allocator, GlobalAlloc, Layout};
use contracts::requires;
-use core::{num::NonZero, ptr::NonNull};
+use core::{
+ mem::MaybeUninit,
+ num::NonZero,
+ ptr::{null_mut, NonNull},
+};
use spin::mutex::FairMutex;
+use vernos_alloc_genmalloc::{OSServices, NON_HUGE_SEGMENT_SIZE, NON_HUGE_SEGMENT_SIZE_BITS};
use vernos_alloc_vma_tree::VMATree;
/// The global instance of the physical page allocator.
@@ -17,12 +25,15 @@ static BUDDY_ALLOCATOR: FairMutex<Option<BuddyAllocator>> = FairMutex::new(None)
static KERNEL_PAGE_TABLE: FairMutex<Option<&'static mut PageTable>> = FairMutex::new(None);
/// The kernel's virtual memory allocator.
-static KERNEL_VM_ALLOC: FairMutex<VMATree<PAGE_SIZE_BITS, Global>> =
- FairMutex::new(VMATree::new_in(Global));
+static KERNEL_VM_ALLOC: FairMutex<VMATree<PAGE_SIZE_BITS, CPULocalHeap>> =
+ FairMutex::new(VMATree::new_in(CPULocalHeap));
/// The global allocator.
#[global_allocator]
-static GLOBAL_ALLOC: GlobalGenMalloc = GlobalGenMalloc;
+static GLOBAL_ALLOC: CPULocalHeap = CPULocalHeap;
+
+/// The type of the kernel's allocator.
+pub type Heap = vernos_alloc_genmalloc::Heap<HeapOSServices>;
/// Initializes the kernel page table and enables paging.
///
@@ -82,13 +93,73 @@ pub unsafe fn init_kernel_page_table(buddy_allocator: BuddyAllocator) {
#[requires(himem_top & (PAGE_SIZE - 1) == 0)]
#[requires(HIMEM_BOT < himem_top)]
pub unsafe fn init_kernel_virtual_memory_allocator(himem_top: usize) {
- // TODO: Bootstrap the allocator.
+ let mut himem_bot = HIMEM_BOT;
+ let mut himem_top = himem_top;
+
+ // To bootstrap the allocator, we make an initial heap. First, we figure out where it should be
+ // laid out in himem, including putting a guard page beneath it.
+ let heap_top = himem_top;
+ himem_top -= size_of::<Heap>();
+ const _: () = assert!(align_of::<Heap>() < PAGE_SIZE);
+ himem_top &= !(PAGE_SIZE - 1);
+ let heap_bot = himem_top;
+ let heap = (himem_top as *mut MaybeUninit<Heap>).as_mut().unwrap();
+ himem_top -= PAGE_SIZE;
+ assert!(himem_bot < himem_top);
+
+ // Map memory to back the heap.
+ for i in (heap_bot >> PAGE_SIZE_BITS)..(heap_top >> PAGE_SIZE_BITS) {
+ let vaddr = i << PAGE_SIZE_BITS;
+ let paddr =
+ alloc_page(PAGE_SIZE).expect("failed to allocate memory to bootstrap hart0's heap");
+ kernel_map(
+ vaddr,
+ paddr.into(),
+ PAGE_SIZE,
+ MappingFlags::R | MappingFlags::W,
+ )
+ .expect("failed to map memory to bootstrap hart0's heap");
+ }
+
+ // Next, we initialize the heap, which lets us initialize the CPU-locals as well.
+ Heap::init(heap);
+ CPULocals::init(0, heap.assume_init_mut());
+
+ // We need to initialize the heap with a segment that will let the virtual memory allocator
+ // allocate nodes. We lay it out at the _bottom_ of himem, since we know that'll be aligned. We
+ // add a guard page as well.
+ assert_eq!(himem_bot % NON_HUGE_SEGMENT_SIZE, 0);
+ let bootstrap_segment = himem_bot;
+ himem_bot += NON_HUGE_SEGMENT_SIZE;
+ assert_eq!(himem_bot & (PAGE_SIZE - 1), 0);
+ himem_bot += PAGE_SIZE;
+
+ // We map the bootstrap segment.
+ for i in 0..(1 << (NON_HUGE_SEGMENT_SIZE_BITS - PAGE_SIZE_BITS)) {
+ let vaddr = bootstrap_segment + (i << PAGE_SIZE_BITS);
+ let paddr = alloc_page(PAGE_SIZE)
+ .expect("failed to allocate memory for hart0's heap's initial segment");
+ kernel_map(
+ vaddr,
+ paddr.into(),
+ PAGE_SIZE,
+ MappingFlags::R | MappingFlags::W,
+ )
+ .expect("failed to map memory for hart0's heap's initial segment");
+ }
+
+ // Donate the bootstrap segment to the heap.
+ //
+ // UNWRAP: Himem cannot be null.
+ CPULocals::get().heap().donate_small_medium_segment(
+ NonNull::new(bootstrap_segment as *mut [u8; NON_HUGE_SEGMENT_SIZE]).unwrap(),
+ );
// The error here _really_ ought to be impossible, because we just bootstrapped the allocator!
// It definitely has free memory.
let mut kernel_vm_alloc = KERNEL_VM_ALLOC.lock();
kernel_vm_alloc
- .add(HIMEM_BOT..himem_top)
+ .add(himem_bot..himem_top)
.expect("failed to set up the kernel's virtual memory allocator");
}
@@ -136,20 +207,78 @@ pub fn kernel_map(
kernel_page_table.map(&mut *buddy_allocator, vaddr, paddr, len, flags)?;
vernos_utils::first_time! {
log::warn!("TODO: sfence.vma");
- log::warn!("TODO: TLB shootdown");
}
Ok(())
}
/// A global allocator backed by a hart-local `vernos_alloc_genmalloc::Heap`.
-struct GlobalGenMalloc;
+struct CPULocalHeap;
+
+unsafe impl Allocator for CPULocalHeap {
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ CPULocals::get().heap().allocate(layout)
+ }
+
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ CPULocals::get().heap().deallocate(ptr, layout)
+ }
-unsafe impl GlobalAlloc for GlobalGenMalloc {
+ fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ CPULocals::get().heap().allocate_zeroed(layout)
+ }
+
+ unsafe fn grow(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ CPULocals::get().heap().grow(ptr, old_layout, new_layout)
+ }
+
+ unsafe fn grow_zeroed(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ CPULocals::get()
+ .heap()
+ .grow_zeroed(ptr, old_layout, new_layout)
+ }
+
+ unsafe fn shrink(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ CPULocals::get().heap().shrink(ptr, old_layout, new_layout)
+ }
+}
+
+unsafe impl GlobalAlloc for CPULocalHeap {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
- todo!("GlobalGenMalloc.alloc({layout:?})")
+ match self.allocate(layout) {
+ Ok(ptr) => ptr.as_ptr().cast(),
+ Err(AllocError) => null_mut(),
+ }
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
- todo!("GlobalGenMalloc.dealloc({ptr:?}, {layout:?})")
+ match NonNull::new(ptr) {
+ Some(ptr) => self.deallocate(ptr, layout),
+ None => unreachable!("dealloc({ptr:p}, {layout:?})"),
+ }
+ }
+}
+
+/// The OS services provided to the allocator.
+#[derive(Debug)]
+pub struct HeapOSServices;
+
+unsafe impl OSServices for HeapOSServices {
+ fn current_thread_id() -> usize {
+ CPULocals::get().cpu_number
}
}
diff --git a/crates/kernel/src/arch/riscv64/mod.rs b/crates/kernel/src/arch/riscv64/mod.rs
index 011e244..48718c2 100644
--- a/crates/kernel/src/arch/riscv64/mod.rs
+++ b/crates/kernel/src/arch/riscv64/mod.rs
@@ -1,9 +1,22 @@
+use crate::cpu_locals::CPULocals;
+use core::{arch::asm, ptr::NonNull};
+
pub mod interrupts;
pub mod paging;
+/// Returns a pointer to the per-CPU locals.
+pub fn get_cpu_locals() -> NonNull<CPULocals> {
+ // SAFETY: The entrypoint sets this up, and safe code cannot invalidate it.
+ unsafe {
+ let tp;
+ asm!("mv {out}, tp", out = out(reg) tp);
+ NonNull::new_unchecked(tp)
+ }
+}
+
/// Halts the hart.
pub fn sleep_forever() -> ! {
loop {
- unsafe { core::arch::asm!("wfi") }
+ unsafe { asm!("wfi") }
}
}
diff --git a/crates/kernel/src/cpu_locals.rs b/crates/kernel/src/cpu_locals.rs
new file mode 100644
index 0000000..fd826aa
--- /dev/null
+++ b/crates/kernel/src/cpu_locals.rs
@@ -0,0 +1,72 @@
+use crate::{alloc::Heap, arch};
+use core::{cell::RefCell, marker::PhantomData, ops::DerefMut, ptr::addr_of_mut};
+use static_assertions::{assert_eq_size, assert_not_impl_any};
+
+/// The data that is stored in a per-CPU structure.
+#[derive(Debug)]
+pub struct CPULocals {
+ /// The index of this CPU.
+ pub cpu_number: usize,
+
+ /// The heap used by this CPU's allocator.
+ pub heap: RefCell<&'static mut Heap>,
+
+ /// A canary for the `CPULocals` being initialized.
+ canary: usize,
+
+ // This ensures that the type is not `Send`.
+ _phantom: PhantomData<*mut ()>,
+}
+
+impl CPULocals {
+ /// Creates a new instance of the `CPULocals`, using it to initialize this CPU's `CPULocals`.
+ ///
+ /// # Safety
+ ///
+ /// - The CPULocals must not have already been initialized.
+ pub unsafe fn init(cpu_number: usize, heap: &'static mut Heap) {
+ arch::get_cpu_locals().write(CPULocals {
+ cpu_number,
+ heap: RefCell::new(heap),
+ canary: CANARY,
+ _phantom: PhantomData,
+ });
+ }
+
+ /// Returns the instance of the `CPULocals` for this CPU.
+ pub fn get() -> &'static CPULocals {
+ let ptr = arch::get_cpu_locals();
+ // SAFETY: The entrypoint sets this up for hart0, and we allocate this for other harts.
+ unsafe {
+ let canary_ptr = addr_of_mut!((*ptr.as_ptr()).canary);
+ assert_eq!(
+ *canary_ptr, CANARY,
+ "CPULocals were not initialized (and we probably just did UB)"
+ );
+ ptr.as_ref()
+ }
+ }
+
+ /// Retrieves a reference to the heap.
+ ///
+ /// # Panics
+ ///
+ /// - Panics if the CPU-local heap was already borrowed.
+ /// - The returned guard will panic on deref if the heap was not initialized.
+ pub fn heap(&'static self) -> impl DerefMut<Target = &'static mut Heap> {
+ self.heap.borrow_mut()
+ }
+}
+
+assert_eq_size!(CPULocals, [usize; 4]);
+assert_not_impl_any!(CPULocals: Send);
+
+cfg_if::cfg_if! {
+ if #[cfg(target_pointer_width = "32")] {
+ const CANARY: usize = usize::from_le_bytes(*b"locl");
+ } else if #[cfg(target_pointer_width = "64")] {
+ const CANARY: usize = usize::from_le_bytes(*b"CPULocal");
+ } else {
+ compile_error!("unsupported platform");
+ }
+}
diff --git a/crates/kernel/src/lib.rs b/crates/kernel/src/lib.rs
index 7421649..fc96950 100644
--- a/crates/kernel/src/lib.rs
+++ b/crates/kernel/src/lib.rs
@@ -21,6 +21,7 @@ mod panic;
pub mod alloc;
pub mod arch;
pub mod constants;
+pub mod cpu_locals;
pub mod logger;
pub mod paging;
@@ -98,6 +99,10 @@ pub unsafe extern "C" fn hart0_early_boot(early_boot_addrs: &mut EarlyBootAddrs)
assert!(early_boot_addrs.initial_stack_start.is_aligned());
assert!(early_boot_addrs.stack_end.is_aligned());
assert!(early_boot_addrs.trampoline_start.is_aligned());
+ assert_eq!(
+ arch::get_cpu_locals().as_ptr().wrapping_add(1) as *const (),
+ early_boot_addrs.stack_end.cast()
+ );
// Parse the DeviceTree.
let flattened_device_tree = unsafe { early_boot_addrs.flattened_device_tree() };