summaryrefslogtreecommitdiff
path: root/crates/kernel/src/alloc.rs
diff options
context:
space:
mode:
Diffstat (limited to 'crates/kernel/src/alloc.rs')
-rw-r--r--crates/kernel/src/alloc.rs120
1 files changed, 84 insertions, 36 deletions
diff --git a/crates/kernel/src/alloc.rs b/crates/kernel/src/alloc.rs
index 94b4267..f634c73 100644
--- a/crates/kernel/src/alloc.rs
+++ b/crates/kernel/src/alloc.rs
@@ -1,23 +1,19 @@
//! Global structures for the allocators.
-use crate::arch::{
- paging::{PageTable, PageTableEntry, ASID, PAGE_TABLE_BITS, PAGE_TABLE_LEVELS},
- MAX_PAGE_SIZE_BITS, PAGE_SIZE, PAGE_SIZE_BITS,
+use crate::paging::{
+ BuddyAllocator, MapError, MappingFlags, PageTable, ASID, LOMEM_TOP, MAX_PAGE_SIZE_BITS,
+ PAGE_SIZES,
};
+use allocator_api2::alloc::AllocError;
use contracts::requires;
-use core::ptr::NonNull;
+use core::{num::NonZero, ptr::NonNull};
use spin::mutex::FairMutex;
-use vernos_alloc_buddy::BuddyAllocator;
-use vernos_utils::BelieveMeSend;
/// The global instance of the physical page allocator.
-static BUDDY_ALLOCATOR: FairMutex<
- Option<BuddyAllocator<PAGE_SIZE, PAGE_SIZE_BITS, { 1 + MAX_PAGE_SIZE_BITS - PAGE_SIZE_BITS }>>,
-> = FairMutex::new(None);
+static BUDDY_ALLOCATOR: FairMutex<Option<BuddyAllocator>> = FairMutex::new(None);
/// The global kernel page table.
-static KERNEL_PAGE_TABLE: FairMutex<BelieveMeSend<Option<NonNull<PageTable>>>> =
- FairMutex::new(BelieveMeSend(None));
+static KERNEL_PAGE_TABLE: FairMutex<Option<&'static mut PageTable>> = FairMutex::new(None);
/// Initializes the allocator and enables paging.
///
@@ -26,44 +22,96 @@ static KERNEL_PAGE_TABLE: FairMutex<BelieveMeSend<Option<NonNull<PageTable>>>> =
/// - Paging must not have been enabled previously.
/// - The buddy allocator must be valid.
#[requires(BUDDY_ALLOCATOR.lock().is_none())]
-#[ensures(BUDDY_ALLOCATOR.lock().is_some())]
#[requires(KERNEL_PAGE_TABLE.lock().is_none())]
+#[ensures(BUDDY_ALLOCATOR.lock().is_some())]
#[ensures(KERNEL_PAGE_TABLE.lock().is_some())]
-pub unsafe fn init(
- mut buddy_allocator: BuddyAllocator<
- 'static,
- PAGE_SIZE,
- PAGE_SIZE_BITS,
- { 1 + MAX_PAGE_SIZE_BITS - PAGE_SIZE_BITS },
- >,
-) {
+pub unsafe fn init_kernel_page_table(buddy_allocator: BuddyAllocator) {
+ // Just making this mut above gets a warning thanks to the contracts macros...
+ let mut buddy_allocator = buddy_allocator;
+
// Allocate a page to use (for now) as the global kernel page table. Later we'll actually
// replace it with the hart0 initial stack's page, since we'll never free the root page of the
// kernel page table, and we can't return that page to the buddy allocator anyway.
- let mut page_table = buddy_allocator
+ let page_table = buddy_allocator
.alloc_zeroed::<PageTable>()
- .expect("failed to allocate the kernel page table");
+ .expect("failed to allocate the kernel page table")
+ .as_mut();
// Create identity mappings for the lower half of memory.
- for (i, entry) in page_table
- .as_mut()
- .iter_mut()
- .enumerate()
- .take(1 << (PAGE_TABLE_BITS - 1))
- {
- let addr = (i as u64) << ((PAGE_TABLE_LEVELS - 1) * PAGE_TABLE_BITS + PAGE_SIZE_BITS);
- let mut pte = PageTableEntry::default();
- pte.set_valid(true).set_rwx(true, true, true).set_addr(addr);
- *entry = pte;
+ for page_num in 0..(LOMEM_TOP >> MAX_PAGE_SIZE_BITS) {
+ let addr = page_num << MAX_PAGE_SIZE_BITS;
+ let flags = MappingFlags::R | MappingFlags::W | MappingFlags::X;
+ page_table
+ .map(
+ &mut buddy_allocator,
+ addr,
+ addr,
+ 1 << MAX_PAGE_SIZE_BITS,
+ flags,
+ )
+ .expect("failed to set up identity mapping for low memory in the kernel page table");
}
// Set the page table as the current page table.
- page_table.as_mut().make_current(ASID::KERNEL);
+ PageTable::make_current(NonNull::from(&*page_table), ASID::KERNEL);
+
+ // Print the page table.
+ vernos_utils::dbg!(&page_table);
// Save the buddy allocator and kernel page table.
*BUDDY_ALLOCATOR.lock() = Some(buddy_allocator);
- KERNEL_PAGE_TABLE.lock().0 = Some(page_table);
+ *KERNEL_PAGE_TABLE.lock() = Some(page_table);
+}
+
+#[requires(BUDDY_ALLOCATOR.lock().is_some())]
+#[requires(KERNEL_PAGE_TABLE.lock().is_some())]
+pub unsafe fn init_kernel_virtual_memory_allocator(himem_top: usize) {
+ todo!()
+}
+
+/// Tries to allocate a page of physical memory of the given size, returning its physical address.
+#[requires(PAGE_SIZES.contains(&len))]
+pub fn alloc_page(len: usize) -> Result<NonZero<usize>, AllocError> {
+ let mut buddy_allocator = BUDDY_ALLOCATOR.lock();
+ let buddy_allocator = buddy_allocator.as_mut().unwrap();
+ buddy_allocator.alloc_of_size(len).map(|addr| {
+ // SAFETY: NonNull guarantees the address will be nonzero.
+ unsafe { NonZero::new_unchecked(addr.as_ptr() as usize) }
+ })
+}
+
+/// Log the kernel page table.
+pub fn kernel_log_page_table() {
+ let kernel_page_table = KERNEL_PAGE_TABLE.lock();
+ let kernel_page_table = kernel_page_table.as_ref().unwrap();
+
+ let count = kernel_page_table.debug_mappings().count();
+ log::info!(
+ "The kernel page table had {count} mapping{}",
+ match count {
+ 0 => "s.",
+ 1 => ":",
+ _ => "s:",
+ }
+ );
+ for mapping in kernel_page_table.debug_mappings() {
+ log::info!("{mapping:?}");
+ }
+}
- // Print the page table after this.
- vernos_utils::dbg!(page_table.as_mut());
+/// Adds a mapping into the kernel page table.
+pub fn kernel_map(
+ vaddr: usize,
+ paddr: usize,
+ len: usize,
+ flags: MappingFlags,
+) -> Result<(), MapError> {
+ let mut buddy_allocator = BUDDY_ALLOCATOR.lock();
+ let mut kernel_page_table = KERNEL_PAGE_TABLE.lock();
+ let buddy_allocator = buddy_allocator.as_mut().unwrap();
+ let kernel_page_table = kernel_page_table.as_mut().unwrap();
+ kernel_page_table.map(&mut *buddy_allocator, vaddr, paddr, len, flags)?;
+ log::warn!("TODO: sfence.vma");
+ log::warn!("TODO: TLB shootdown");
+ Ok(())
}