//! The static library that forms the core of the kernel. #![no_std] use crate::{ alloc::{ alloc_page, init_kernel_page_table, init_kernel_virtual_memory_allocator, kernel_log_page_table, kernel_map, }, constants::STACK_SIZE, paging::{MappingFlags, PAGE_SIZE, PAGE_SIZE_BITS}, }; use core::ptr::NonNull; use log::debug; use vernos_alloc_buddy::BuddyAllocator; use vernos_alloc_physmem_free_list::FreeListAllocator; use vernos_device_tree::FlattenedDeviceTree; #[cfg(target_os = "none")] mod panic; pub mod alloc; pub mod arch; pub mod constants; pub mod logger; pub mod paging; /// Some addresses passed from the entrypoint to hart0. #[derive(Debug)] #[repr(C)] pub struct EarlyBootAddrs { device_tree: *const u8, kernel_start: *const [u8; PAGE_SIZE], kernel_end: *const [u8; PAGE_SIZE], kernel_rx_end: *const [u8; PAGE_SIZE], kernel_ro_end: *const [u8; PAGE_SIZE], kernel_rw_end: *const [u8; PAGE_SIZE], initial_stack_start: *const [u8; PAGE_SIZE], stack_end: *const [u8; PAGE_SIZE], trampoline_start: *const [u8; PAGE_SIZE], } impl EarlyBootAddrs { /// Looks for a DeviceTree at address in the `EarlyBootAddrs`, and returns it if it looks /// valid. Panics if the DeviceTree is invalid. /// /// # Safety /// /// - The `EarlyBootAddrs` must be accurate. /// - The `device_tree` pointer must be a valid pointer into physical memory. See /// `device_tree::FlattenedDeviceTree::from_ptr` for the precise requirements. unsafe fn flattened_device_tree(&self) -> FlattenedDeviceTree { FlattenedDeviceTree::from_ptr( self.device_tree, self.kernel_start as usize..self.kernel_end as usize, ) .expect("invalid DeviceTree") } } /// The first stage of booting the kernel. This should be executed by hart0 alone. It runs with /// paging disabled, and: /// /// - sets up a physical memory allocator /// - sets up paging /// - sets up a virtual memory allocator /// - sets up an allocator /// - maps the trampoline page /// - maps the kernel to higher-half memory /// - allocates and maps a kernel stack for hart0 /// /// It updates several fields in `early_boot_addrs` to point to the appropriate himem addresses: /// /// - `kernel_start` /// - `stack_end` /// /// # Safety /// /// - The `device_tree` pointer must be a valid pointer into physical memory. See /// `device_tree::FlattenedDeviceTree::from_ptr` for the precise requirements. /// - The `kernel_start`, `kernel_rx_end`, `kernel_ro_end`, `kernel_rw_end`, and `kernel_end` /// addresses must be accurate and page-aligned. /// - The `stack_start` and `stack_end` addresses must be accurate and page-aligned. /// - The `trampoline_start` pointer must be accurate and page-aligned. /// - This must be called in supervisor mode with paging disabled. /// - Any other harts must not be running concurrently with us. #[no_mangle] pub unsafe extern "C" fn hart0_early_boot(early_boot_addrs: &mut EarlyBootAddrs) { // Set up the early-boot logger. logger::init_early(); vernos_utils::dbg!(&*early_boot_addrs); // Assert that stuff is aligned properly. assert!(early_boot_addrs.kernel_start.is_aligned()); assert!(early_boot_addrs.kernel_end.is_aligned()); assert!(early_boot_addrs.kernel_rx_end.is_aligned()); assert!(early_boot_addrs.kernel_ro_end.is_aligned()); assert!(early_boot_addrs.kernel_rw_end.is_aligned()); assert!(early_boot_addrs.initial_stack_start.is_aligned()); assert!(early_boot_addrs.stack_end.is_aligned()); assert!(early_boot_addrs.trampoline_start.is_aligned()); // Parse the DeviceTree. let flattened_device_tree = unsafe { early_boot_addrs.flattened_device_tree() }; // Find the available physical memory areas and initialize the physical memory // free-list. let mut physical_memory_free_list = FreeListAllocator::::new(); let mut physical_memory_region_count = 0; flattened_device_tree .for_each_memory_range::<_, PAGE_SIZE>(|addrs| { let len_bytes = addrs.end - addrs.start; assert!(addrs.start.trailing_zeros() as usize >= PAGE_SIZE_BITS); assert!(len_bytes.trailing_zeros() as usize >= PAGE_SIZE_BITS); // UNWRAP: for_each_memory_range avoids returning the zero address. let addr = NonNull::new(addrs.start as *mut [u8; PAGE_SIZE]).unwrap(); let len_pages = len_bytes >> PAGE_SIZE_BITS; physical_memory_free_list.add(addr, len_pages); physical_memory_region_count += 1; Ok(()) }) .unwrap_or_else(|err| void::unreachable(err)); // Log the physical memory we found. debug!( "found {} usable regions of physical memory{}", physical_memory_region_count, if physical_memory_region_count == 0 { "" } else { ":" } ); for (addr, len_pages) in physical_memory_free_list.iter() { debug!( "{:p}..{:p} ({} bytes)", addr.as_ptr(), addr.as_ptr().wrapping_add(len_pages), len_pages << PAGE_SIZE_BITS, ) } // Initialize the buddy allocator. let alloc_buddy = BuddyAllocator::new(physical_memory_free_list) .expect("failed to configure the buddy allocator"); // Set up the kernel page table. init_kernel_page_table(alloc_buddy); // Map the trampoline page. let mut vaddr_bump = usize::MAX - PAGE_SIZE + 1; kernel_map( vaddr_bump, early_boot_addrs.trampoline_start as usize, PAGE_SIZE, MappingFlags::R | MappingFlags::X, ) .expect("failed to map the trampoline page to himem"); // Skip a page down for a guard page, then map the kernel. let total_kernel_pages = early_boot_addrs .kernel_rw_end .offset_from(early_boot_addrs.kernel_start) as usize; vaddr_bump -= PAGE_SIZE * (total_kernel_pages + 1); let new_kernel_start = vaddr_bump; for i in 0..total_kernel_pages { let vaddr = vaddr_bump + (i * PAGE_SIZE); let paddr = early_boot_addrs.kernel_start.add(i); let flags = if paddr < early_boot_addrs.kernel_rx_end { MappingFlags::R | MappingFlags::X } else if paddr < early_boot_addrs.kernel_ro_end { MappingFlags::R } else { MappingFlags::R | MappingFlags::W }; kernel_map(vaddr, paddr as usize, PAGE_SIZE, flags) .expect("failed to map the kernel to himem"); } // Skip a page down for a guard page, then map the top page of the stack. vaddr_bump -= PAGE_SIZE; let new_stack_end = vaddr_bump; vaddr_bump -= PAGE_SIZE; kernel_map( vaddr_bump, early_boot_addrs.initial_stack_start as usize, PAGE_SIZE, MappingFlags::R | MappingFlags::W, ) .expect("failed to map the initial stack to himem"); // Allocate and map more pages for the stack. let new_stack_start = new_stack_end - STACK_SIZE; vaddr_bump = new_stack_start; for i in 0..((STACK_SIZE >> PAGE_SIZE_BITS) - 1) { let vaddr = new_stack_start + (i << PAGE_SIZE_BITS); let paddr = alloc_page(PAGE_SIZE).expect("failed to allocate memory for a hart0 stack page"); kernel_map( vaddr, paddr.into(), PAGE_SIZE, MappingFlags::R | MappingFlags::W, ) .expect("failed to map a hart0 stack page"); } // Skip another page down for a guard page. vaddr_bump -= PAGE_SIZE; // Set up the kernel virtual memory allocator (and general allocator). init_kernel_virtual_memory_allocator(vaddr_bump); // Set the fields in `early_boot_addrs` that we promise to. early_boot_addrs.kernel_start = new_kernel_start as *const [u8; PAGE_SIZE]; early_boot_addrs.stack_end = new_stack_end as *const [u8; PAGE_SIZE]; // Log and return. kernel_log_page_table(); } /// The entrypoint to the kernel, to be run after paging and the allocator have been set up, and /// the stack has been switched to be in himem. This should be executed by hart0 alone. It performs /// some early boot tasks, then wakes up any other harts. /// /// The tasks it performs are: /// /// - converts the DeviceTree into a global key-value mapping /// - upgrades the logger to one that can dynamically grow /// - TODO /// /// # Safety /// /// - `hart0_early_boot` must have been called. /// - This must be called in supervisor mode with traps disabled, but with all traps delegated to /// supervisor mode. /// - Any other harts must not be running concurrently with us. TODO: Define their state. #[no_mangle] pub unsafe extern "C" fn hart0_boot(early_boot_addrs: &mut EarlyBootAddrs) -> ! { // Check that the stack canary was present. assert_eq!( *(early_boot_addrs.initial_stack_start as *const u64), 0xdead0bad0defaced ); todo!("hart0_boot"); }