//! Global structures for the allocators.
use crate::paging::{
BuddyAllocator, MapError, MappingFlags, PageTable, ASID, LOMEM_TOP, MAX_PAGE_SIZE_BITS,
PAGE_SIZES,
};
use allocator_api2::alloc::AllocError;
use contracts::requires;
use core::{num::NonZero, ptr::NonNull};
use spin::mutex::FairMutex;
/// The global instance of the physical page allocator.
static BUDDY_ALLOCATOR: FairMutex> = FairMutex::new(None);
/// The global kernel page table.
static KERNEL_PAGE_TABLE: FairMutex > = FairMutex::new(None);
/// Initializes the allocator and enables paging.
///
/// # Safety
///
/// - Paging must not have been enabled previously.
/// - The buddy allocator must be valid.
#[requires(BUDDY_ALLOCATOR.lock().is_none())]
#[requires(KERNEL_PAGE_TABLE.lock().is_none())]
#[ensures(BUDDY_ALLOCATOR.lock().is_some())]
#[ensures(KERNEL_PAGE_TABLE.lock().is_some())]
pub unsafe fn init_kernel_page_table(buddy_allocator: BuddyAllocator) {
// Just making this mut above gets a warning thanks to the contracts macros...
let mut buddy_allocator = buddy_allocator;
// Allocate a page to use (for now) as the global kernel page table. Later we'll actually
// replace it with the hart0 initial stack's page, since we'll never free the root page of the
// kernel page table, and we can't return that page to the buddy allocator anyway.
let page_table = buddy_allocator
.alloc_zeroed::()
.expect("failed to allocate the kernel page table")
.as_mut();
// Create identity mappings for the lower half of memory.
for page_num in 0..(LOMEM_TOP >> MAX_PAGE_SIZE_BITS) {
let addr = page_num << MAX_PAGE_SIZE_BITS;
let flags = MappingFlags::R | MappingFlags::W | MappingFlags::X;
page_table
.map(
&mut buddy_allocator,
addr,
addr,
1 << MAX_PAGE_SIZE_BITS,
flags,
)
.expect("failed to set up identity mapping for low memory in the kernel page table");
}
// Set the page table as the current page table.
PageTable::make_current(NonNull::from(&*page_table), ASID::KERNEL);
// Print the page table.
vernos_utils::dbg!(&page_table);
// Save the buddy allocator and kernel page table.
*BUDDY_ALLOCATOR.lock() = Some(buddy_allocator);
*KERNEL_PAGE_TABLE.lock() = Some(page_table);
}
#[requires(BUDDY_ALLOCATOR.lock().is_some())]
#[requires(KERNEL_PAGE_TABLE.lock().is_some())]
pub unsafe fn init_kernel_virtual_memory_allocator(himem_top: usize) {
todo!()
}
/// Tries to allocate a page of physical memory of the given size, returning its physical address.
#[requires(PAGE_SIZES.contains(&len))]
pub fn alloc_page(len: usize) -> Result, AllocError> {
let mut buddy_allocator = BUDDY_ALLOCATOR.lock();
let buddy_allocator = buddy_allocator.as_mut().unwrap();
buddy_allocator.alloc_of_size(len).map(|addr| {
// SAFETY: NonNull guarantees the address will be nonzero.
unsafe { NonZero::new_unchecked(addr.as_ptr() as usize) }
})
}
/// Log the kernel page table.
pub fn kernel_log_page_table() {
let kernel_page_table = KERNEL_PAGE_TABLE.lock();
let kernel_page_table = kernel_page_table.as_ref().unwrap();
let count = kernel_page_table.debug_mappings().count();
log::info!(
"The kernel page table had {count} mapping{}",
match count {
0 => "s.",
1 => ":",
_ => "s:",
}
);
for mapping in kernel_page_table.debug_mappings() {
log::info!("{mapping:?}");
}
}
/// Adds a mapping into the kernel page table.
pub fn kernel_map(
vaddr: usize,
paddr: usize,
len: usize,
flags: MappingFlags,
) -> Result<(), MapError> {
let mut buddy_allocator = BUDDY_ALLOCATOR.lock();
let mut kernel_page_table = KERNEL_PAGE_TABLE.lock();
let buddy_allocator = buddy_allocator.as_mut().unwrap();
let kernel_page_table = kernel_page_table.as_mut().unwrap();
kernel_page_table.map(&mut *buddy_allocator, vaddr, paddr, len, flags)?;
log::warn!("TODO: sfence.vma");
log::warn!("TODO: TLB shootdown");
Ok(())
}