//! Global structures for the allocators.
use crate::{
cpu_locals::CPULocals,
paging::{
BuddyAllocator, MapError, MappingFlags, PageTable, ASID, HIMEM_BOT, LOMEM_TOP,
MAX_PAGE_SIZE_BITS, PAGE_SIZE, PAGE_SIZES, PAGE_SIZE_BITS,
},
};
use allocator_api2::alloc::{AllocError, Allocator, GlobalAlloc, Layout};
use contracts::requires;
use core::{
mem::MaybeUninit,
num::NonZero,
ptr::{null_mut, NonNull},
};
use spin::mutex::FairMutex;
use vernos_alloc_genmalloc::{OSServices, NON_HUGE_SEGMENT_SIZE, NON_HUGE_SEGMENT_SIZE_BITS};
use vernos_alloc_vma_tree::VMATree;
/// The global instance of the physical page allocator.
static BUDDY_ALLOCATOR: FairMutex> = FairMutex::new(None);
/// The global kernel page table.
static KERNEL_PAGE_TABLE: FairMutex > = FairMutex::new(None);
/// The kernel's virtual memory allocator.
static KERNEL_VM_ALLOC: FairMutex> =
FairMutex::new(VMATree::new_in(CPULocalHeap));
/// The global allocator.
#[global_allocator]
static GLOBAL_ALLOC: CPULocalHeap = CPULocalHeap;
/// The type of the kernel's allocator.
pub type Heap = vernos_alloc_genmalloc::Heap;
/// Initializes the kernel page table and enables paging.
///
/// # Safety
///
/// - Paging must not have been enabled previously.
/// - The buddy allocator must be valid.
#[requires(KERNEL_PAGE_TABLE.lock().is_none())]
#[requires(BUDDY_ALLOCATOR.lock().is_none())]
#[ensures(KERNEL_PAGE_TABLE.lock().is_some())]
#[ensures(BUDDY_ALLOCATOR.lock().is_some())]
pub unsafe fn init_kernel_page_table(buddy_allocator: BuddyAllocator) {
// Just making this mut above gets a warning thanks to the contracts macros...
let mut buddy_allocator = buddy_allocator;
// Allocate a page to use (for now) as the global kernel page table. Later we'll actually
// replace it with the hart0 initial stack's page, since we'll never free the root page of the
// kernel page table, and we can't return that page to the buddy allocator anyway.
let page_table = buddy_allocator
.alloc_zeroed::()
.expect("failed to allocate the kernel page table")
.as_mut();
// Create identity mappings for the lower half of memory.
for page_num in 0..(LOMEM_TOP >> MAX_PAGE_SIZE_BITS) {
let addr = page_num << MAX_PAGE_SIZE_BITS;
let flags = MappingFlags::R | MappingFlags::W | MappingFlags::X;
page_table
.map(
&mut buddy_allocator,
addr,
addr,
1 << MAX_PAGE_SIZE_BITS,
flags,
)
.expect("failed to set up identity mapping for low memory in the kernel page table");
}
// Set the page table as the current page table.
PageTable::make_current(NonNull::from(&*page_table), ASID::KERNEL);
// Print the page table.
vernos_utils::dbg!(&page_table);
// Save the buddy allocator and kernel page table.
*KERNEL_PAGE_TABLE.lock() = Some(page_table);
*BUDDY_ALLOCATOR.lock() = Some(buddy_allocator);
}
/// Initializes the virtual memory allocator and the regular allocator.
///
/// # Safety
///
/// - `himem_top` must be accurate.
#[requires(KERNEL_PAGE_TABLE.lock().is_some())]
#[requires(BUDDY_ALLOCATOR.lock().is_some())]
#[requires(himem_top & (PAGE_SIZE - 1) == 0)]
#[requires(HIMEM_BOT < himem_top)]
pub unsafe fn init_kernel_virtual_memory_allocator(himem_top: usize) {
let mut himem_bot = HIMEM_BOT;
let mut himem_top = himem_top;
// To bootstrap the allocator, we make an initial heap. First, we figure out where it should be
// laid out in himem, including putting a guard page beneath it.
let heap_top = himem_top;
himem_top -= size_of::();
const _: () = assert!(align_of::() < PAGE_SIZE);
himem_top &= !(PAGE_SIZE - 1);
let heap_bot = himem_top;
let heap = (himem_top as *mut MaybeUninit).as_mut().unwrap();
himem_top -= PAGE_SIZE;
assert!(himem_bot < himem_top);
// Map memory to back the heap.
for i in (heap_bot >> PAGE_SIZE_BITS)..(heap_top >> PAGE_SIZE_BITS) {
let vaddr = i << PAGE_SIZE_BITS;
let paddr =
alloc_page(PAGE_SIZE).expect("failed to allocate memory to bootstrap hart0's heap");
kernel_map(
vaddr,
paddr.into(),
PAGE_SIZE,
MappingFlags::R | MappingFlags::W,
)
.expect("failed to map memory to bootstrap hart0's heap");
}
// Next, we initialize the heap, which lets us initialize the CPU-locals as well.
Heap::init(heap);
CPULocals::init(0, heap.assume_init_mut());
// We need to initialize the heap with a segment that will let the virtual memory allocator
// allocate nodes. We lay it out at the _bottom_ of himem, since we know that'll be aligned. We
// add a guard page as well.
assert_eq!(himem_bot % NON_HUGE_SEGMENT_SIZE, 0);
let bootstrap_segment = himem_bot;
himem_bot += NON_HUGE_SEGMENT_SIZE;
assert_eq!(himem_bot & (PAGE_SIZE - 1), 0);
himem_bot += PAGE_SIZE;
// We map the bootstrap segment.
for i in 0..(1 << (NON_HUGE_SEGMENT_SIZE_BITS - PAGE_SIZE_BITS)) {
let vaddr = bootstrap_segment + (i << PAGE_SIZE_BITS);
let paddr = alloc_page(PAGE_SIZE)
.expect("failed to allocate memory for hart0's heap's initial segment");
kernel_map(
vaddr,
paddr.into(),
PAGE_SIZE,
MappingFlags::R | MappingFlags::W,
)
.expect("failed to map memory for hart0's heap's initial segment");
}
// Donate the bootstrap segment to the heap.
//
// UNWRAP: Himem cannot be null.
CPULocals::get().heap().donate_small_medium_segment(
NonNull::new(bootstrap_segment as *mut [u8; NON_HUGE_SEGMENT_SIZE]).unwrap(),
);
// The error here _really_ ought to be impossible, because we just bootstrapped the allocator!
// It definitely has free memory.
let mut kernel_vm_alloc = KERNEL_VM_ALLOC.lock();
kernel_vm_alloc
.add(himem_bot..himem_top)
.expect("failed to set up the kernel's virtual memory allocator");
}
/// Tries to allocate a page of physical memory of the given size, returning its physical address.
#[requires(PAGE_SIZES.contains(&len))]
pub fn alloc_page(len: usize) -> Result, AllocError> {
let mut buddy_allocator = BUDDY_ALLOCATOR.lock();
let buddy_allocator = buddy_allocator.as_mut().unwrap();
buddy_allocator.alloc_of_size(len).map(|addr| {
// SAFETY: NonNull guarantees the address will be nonzero.
unsafe { NonZero::new_unchecked(addr.as_ptr() as usize) }
})
}
/// Log the kernel page table.
pub fn kernel_log_page_table() {
let kernel_page_table = KERNEL_PAGE_TABLE.lock();
let kernel_page_table = kernel_page_table.as_ref().unwrap();
let count = kernel_page_table.debug_mappings().count();
log::info!(
"The kernel page table had {count} mapping{}",
match count {
0 => "s.",
1 => ":",
_ => "s:",
}
);
for mapping in kernel_page_table.debug_mappings() {
log::info!("{mapping:?}");
}
}
/// Adds a mapping into the kernel page table.
pub fn kernel_map(
vaddr: usize,
paddr: usize,
len: usize,
flags: MappingFlags,
) -> Result<(), MapError> {
let mut kernel_page_table = KERNEL_PAGE_TABLE.lock();
let mut buddy_allocator = BUDDY_ALLOCATOR.lock();
let kernel_page_table = kernel_page_table.as_mut().unwrap();
let buddy_allocator = buddy_allocator.as_mut().unwrap();
kernel_page_table.map(&mut *buddy_allocator, vaddr, paddr, len, flags)?;
vernos_utils::first_time! {
log::warn!("TODO: sfence.vma");
}
Ok(())
}
/// A global allocator backed by a hart-local `vernos_alloc_genmalloc::Heap`.
struct CPULocalHeap;
unsafe impl Allocator for CPULocalHeap {
fn allocate(&self, layout: Layout) -> Result, AllocError> {
CPULocals::get().heap().allocate(layout)
}
unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) {
CPULocals::get().heap().deallocate(ptr, layout)
}
fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> {
CPULocals::get().heap().allocate_zeroed(layout)
}
unsafe fn grow(
&self,
ptr: NonNull,
old_layout: Layout,
new_layout: Layout,
) -> Result, AllocError> {
CPULocals::get().heap().grow(ptr, old_layout, new_layout)
}
unsafe fn grow_zeroed(
&self,
ptr: NonNull,
old_layout: Layout,
new_layout: Layout,
) -> Result, AllocError> {
CPULocals::get()
.heap()
.grow_zeroed(ptr, old_layout, new_layout)
}
unsafe fn shrink(
&self,
ptr: NonNull,
old_layout: Layout,
new_layout: Layout,
) -> Result, AllocError> {
CPULocals::get().heap().shrink(ptr, old_layout, new_layout)
}
}
unsafe impl GlobalAlloc for CPULocalHeap {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
match self.allocate(layout) {
Ok(ptr) => ptr.as_ptr().cast(),
Err(AllocError) => null_mut(),
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
match NonNull::new(ptr) {
Some(ptr) => self.deallocate(ptr, layout),
None => unreachable!("dealloc({ptr:p}, {layout:?})"),
}
}
}
/// The OS services provided to the allocator.
#[derive(Debug)]
pub struct HeapOSServices;
unsafe impl OSServices for HeapOSServices {
fn current_thread_id() -> usize {
CPULocals::get().cpu_number
}
}