diff options
author | Nathan Ringo <nathan@remexre.com> | 2024-09-02 02:07:07 -0500 |
---|---|---|
committer | Nathan Ringo <nathan@remexre.com> | 2024-09-02 02:07:07 -0500 |
commit | 52e33eee454766940d1987199868f0d892ce34a3 (patch) | |
tree | 64515ac26d363dcfe4304b1dd34665502746d82d | |
parent | 1afb23196b9882f85c8fbcfbfbd5f92a58960e14 (diff) |
Set up the physical memory allocators.
-rw-r--r-- | crates/alloc_buddy/src/lib.rs | 28 | ||||
-rw-r--r-- | crates/device_tree/src/lib.rs | 91 | ||||
-rw-r--r-- | crates/kernel/src/arch/riscv64/interrupts.rs | 8 | ||||
-rw-r--r-- | crates/kernel/src/lib.rs | 51 |
4 files changed, 136 insertions, 42 deletions
diff --git a/crates/alloc_buddy/src/lib.rs b/crates/alloc_buddy/src/lib.rs index c890e79..fc5d245 100644 --- a/crates/alloc_buddy/src/lib.rs +++ b/crates/alloc_buddy/src/lib.rs @@ -369,6 +369,19 @@ impl< self.free_list(size_class).push(ptr); } + /// Returns a `Debug` that prints the free lists of the allocator. + pub fn debug_free_lists(&self) -> impl '_ + fmt::Debug { + debug(|fmt| { + fmt.debug_list() + .entries((0..SIZE_CLASS_COUNT).map(|size_class| { + // SAFETY: The free lists are kept valid, and the range of size classes is + // necessarily in-bounds. + unsafe { FreeList::from_sentinel(self.free_list_sentinels.add(size_class)) } + })) + .finish() + }) + } + /// Returns the free list with the given size class. #[requires(size_class < SIZE_CLASS_COUNT)] fn free_list(&mut self, size_class: usize) -> FreeList { @@ -411,20 +424,7 @@ impl< { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("BuddyAllocator") - .field( - "free_lists", - &debug(|fmt| { - fmt.debug_list() - .entries((0..SIZE_CLASS_COUNT).map(|size_class| { - // SAFETY: The free lists are kept valid, and the range of size classes is - // necessarily in-bounds. - unsafe { - FreeList::from_sentinel(self.free_list_sentinels.add(size_class)) - } - })) - .finish() - }), - ) + .field("free_lists", &self.debug_free_lists()) .field( "trees", &debug(|fmt| { diff --git a/crates/device_tree/src/lib.rs b/crates/device_tree/src/lib.rs index 531f7ef..fe6108f 100644 --- a/crates/device_tree/src/lib.rs +++ b/crates/device_tree/src/lib.rs @@ -115,6 +115,92 @@ impl<'dt> FlattenedDeviceTree<'dt> { Ok(&out[..i]) } + /// Parses memory reservations out of the flattened DeviceTree, removes reserved ranges (see + /// `iter_reserved`), and calls the function with each range of usable memory. This does not + /// allocate memory, and may be called before the allocator is configured. + pub fn for_each_memory_range<E, const PAGE_SIZE: usize>( + &'dt self, + mut func: impl FnMut(Range<usize>) -> Result<(), E>, + ) -> Result<(), E> { + // Iterate through the nodes, looking for memory nodes. + self.for_each_node(|node| { + if node.is_unit(&["", "memory"]) { + // Get the memory ranges. + let Some(reg) = node.get_reg_usize() else { + warn!("{}reg was not valid", node.name()); + return Ok(()); + }; + + 'process_node: for (addr, size) in reg { + let mut addrs = addr..addr + size; + + // Clamp the range, so that we can't somehow end up with the zero address or + // with higher-half addresses. + addrs.start = addrs.start.max(1); + addrs.end = addrs.end.min(isize::MAX as usize); + + // Trim the range to be page-aligned. + addrs.start = (addrs.start + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1); + addrs.end &= !(PAGE_SIZE - 1); + + // Add each unreserved subrange to the allocator's list. + 'process_range: loop { + assert_eq!(addrs.start & (PAGE_SIZE - 1), 0); + assert_eq!(addrs.end & (PAGE_SIZE - 1), 0); + + // Walk forwards until the first page is unreserved. + 'ensure_first_page_unreserved: loop { + // Ensure the range is non-empty. + if addrs.end <= addrs.start { + continue 'process_node; + } + + // If the start of the range is reserved, walk forwards a page. + let first_page = addrs.start..addrs.start + PAGE_SIZE; + if self + .iter_reserved() + .any(|range| ranges_overlap(&range, &first_page)) + { + addrs.start = first_page.end; + } else { + break 'ensure_first_page_unreserved; + } + } + + // Find the first reservation that overlaps. + if let Some(reservation) = self + .iter_reserved() + .filter(|range| ranges_overlap(range, &addrs)) + .min_by_key(|range| range.start) + { + // Get the range that's before the reservation. + let mut unreserved = addrs.start..reservation.start; + + // Trim the range to be page-aligned. We don't need to trim the start, + // because addrs.start is already aligned (by our loop invariant). + unreserved.end &= !(PAGE_SIZE - 1); + + // Call the function with the range up to the start of that overlap. + func(unreserved)?; + + // Adjust the remaining range to be after the overlap. + addrs.start = reservation.end; + + // Trim the range to be page-aligned. We don't need to trim the end, + // because it's unchanged. + addrs.start = (addrs.start + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1); + } else { + // If no range overlaps, the entire remainder of this range is valid. + func(addrs)?; + break 'process_range; + } + } + } + } + Ok(()) + }) + } + /// Parses nodes out of a flattened DeviceTree and calls the function with each one. This does /// not allocate memory, and may be called before the allocator is configured (at the cost of /// decreased performance). @@ -643,3 +729,8 @@ fn for_each_node<'dt, 'iter, E>( offset = for_each_node(fdt, func, on_error, offset, new_parents)?; } } + +/// Returns whether the two ranges overlap. +fn ranges_overlap<T: Copy + Ord>(r1: &Range<T>, r2: &Range<T>) -> bool { + r1.start.max(r2.start) < r1.end.min(r2.end) +} diff --git a/crates/kernel/src/arch/riscv64/interrupts.rs b/crates/kernel/src/arch/riscv64/interrupts.rs index 84f2258..b02d750 100644 --- a/crates/kernel/src/arch/riscv64/interrupts.rs +++ b/crates/kernel/src/arch/riscv64/interrupts.rs @@ -11,10 +11,10 @@ pub(crate) fn example_timer() { let now = Instant::now(); info!("now = {now:?}"); - let in_a_sec = now + Duration::from_secs(1); - info!("in_a_sec = {in_a_sec:?}"); - info!("setting a timer for 1s..."); - set_timer(in_a_sec); + let later = now + Duration::from_secs(60); + info!("later = {later:?}"); + info!("setting a timer for 1min..."); + set_timer(later); enable_interrupts(); } diff --git a/crates/kernel/src/lib.rs b/crates/kernel/src/lib.rs index 8e418e7..6e09d2b 100644 --- a/crates/kernel/src/lib.rs +++ b/crates/kernel/src/lib.rs @@ -1,8 +1,10 @@ //! The static library that forms the core of the kernel. #![no_std] -use crate::arch::{sleep_forever, PAGE_SIZE}; +use crate::arch::{sleep_forever, PAGE_SIZE, PAGE_SIZE_BITS}; +use core::ptr::NonNull; use log::{debug, info, warn}; +use vernos_alloc_buddy::BuddyAllocator; use vernos_alloc_physmem_free_list::FreeListAllocator; use vernos_device_tree::FlattenedDeviceTree; use vernos_utils::dbg; @@ -35,22 +37,19 @@ pub unsafe extern "C" fn hart0_boot(device_tree: *const u8) -> ! { // Find the available physical memory areas and initialize the physical memory // free-list. let mut physical_memory_free_list = FreeListAllocator::<PAGE_SIZE>::new(); - dbg!(physical_memory_free_list); - - /* + let mut physical_memory_region_count = 0; flattened_device_tree - .for_each_node(|node| { - if node.is_unit(&["", "memory"]) { - // Get the memory ranges. - let Some(reg) = node.get_reg_usize() else { - warn!("{}reg was not valid", node.name()); - return Ok(()); - }; + .for_each_memory_range::<_, PAGE_SIZE>(|addrs| { + dbg!(&addrs); + let len_bytes = addrs.end - addrs.start; + assert!(addrs.start.trailing_zeros() as usize >= PAGE_SIZE_BITS); + assert!(len_bytes.trailing_zeros() as usize >= PAGE_SIZE_BITS); + // UNWRAP: for_each_memory_range avoids returning the zero address. + let addr = NonNull::new(addrs.start as *mut [u8; PAGE_SIZE]).unwrap(); + let len_pages = len_bytes >> PAGE_SIZE_BITS; - for (addr, size) in reg { - physical_memory_free_list.add_range(addr..addr + size); - } - } + physical_memory_free_list.add(addr, len_pages); + physical_memory_region_count += 1; Ok(()) }) .unwrap_or_else(|err| void::unreachable(err)); @@ -58,23 +57,27 @@ pub unsafe extern "C" fn hart0_boot(device_tree: *const u8) -> ! { // Log the physical memory we found. debug!( "found {} usable regions of physical memory{}", - physical_memory_free_list.len(), - if physical_memory_free_list.is_empty() { + physical_memory_region_count, + if physical_memory_region_count == 0 { "" } else { ":" } ); - for region in physical_memory_free_list.drain() { + for (addr, len_pages) in physical_memory_free_list.iter() { debug!( - "{:p}..{:p} ({} byte{})", - region.start as *const u8, - region.end as *const u8, - region.len(), - if region.len() == 1 { "" } else { "s" } + "{:p}..{:p} ({} bytes)", + addr.as_ptr(), + addr.as_ptr().wrapping_add(len_pages), + len_pages << PAGE_SIZE_BITS, ) } - */ + + // Initialize the buddy allocator. + let alloc_buddy = + BuddyAllocator::<PAGE_SIZE, PAGE_SIZE_BITS, 19>::new(physical_memory_free_list) + .expect("failed to configure the buddy allocator"); + dbg!(alloc_buddy.debug_free_lists()); // After this point, everything else is for debugging. #[cfg(target_arch = "riscv64")] |