summaryrefslogtreecommitdiff
path: root/kernel/src
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/src')
-rw-r--r--kernel/src/allocators/mod.rs23
-rw-r--r--kernel/src/allocators/physical_memory_free_list.rs322
-rw-r--r--kernel/src/device_tree.rs28
-rw-r--r--kernel/src/lib.rs24
-rw-r--r--kernel/src/util.rs7
5 files changed, 401 insertions, 3 deletions
diff --git a/kernel/src/allocators/mod.rs b/kernel/src/allocators/mod.rs
index e69de29..76d85e6 100644
--- a/kernel/src/allocators/mod.rs
+++ b/kernel/src/allocators/mod.rs
@@ -0,0 +1,23 @@
+use core::{ffi::c_void, ops::Range, ptr::addr_of};
+
+pub mod physical_memory_free_list;
+
+/// The number of bits in the offset in a page.
+pub const PAGE_SIZE_BITS: usize = 12;
+
+/// The size of a page, in bytes.
+pub const PAGE_SIZE: usize = 1 << PAGE_SIZE_BITS;
+
+/// Returns the physical address range the kernel is in.
+pub fn kernel_boundaries() -> Range<usize> {
+ extern "C" {
+ static kernel_start: c_void;
+ static kernel_end: c_void;
+ }
+
+ // SAFETY: We only use these as addresses, we never dereference them.
+ let (kernel_start_addr, kernel_end_addr) =
+ unsafe { (addr_of!(kernel_start), addr_of!(kernel_end)) };
+
+ kernel_start_addr as usize..kernel_end_addr as usize
+}
diff --git a/kernel/src/allocators/physical_memory_free_list.rs b/kernel/src/allocators/physical_memory_free_list.rs
new file mode 100644
index 0000000..0d2f1b4
--- /dev/null
+++ b/kernel/src/allocators/physical_memory_free_list.rs
@@ -0,0 +1,322 @@
+//! A free-list allocator that runs in physical memory. This should only be used to bootstrap the
+//! buddy allocator for physical memory.
+
+use crate::{
+ allocators::{kernel_boundaries, PAGE_SIZE, PAGE_SIZE_BITS},
+ device_tree::FlattenedDeviceTree,
+ prelude::*,
+ util::ranges_overlap,
+};
+use contracts::requires;
+use core::{
+ fmt, iter,
+ ops::Range,
+ ptr::{self, NonNull},
+};
+
+/// A free-list allocator that runs in physical memory. This should only be used to bootstrap the
+/// buddy allocator for physical memory.
+pub struct FreeList<'dt> {
+ /// The DeviceTree the machine was booted with.
+ device_tree: &'dt FlattenedDeviceTree<'dt>,
+
+ /// The physical address of the head of the free-list allocator.
+ ///
+ /// The free-list is sorted by size, smallest to largest.
+ head: Option<FreeListAddr>,
+
+ /// The number of regions in the free-list.
+ len: usize,
+}
+
+impl<'dt> FreeList<'dt> {
+ /// Creates a new, empty free-list that checks addresses against the given DeviceTree.
+ pub fn new(device_tree: &'dt FlattenedDeviceTree<'dt>) -> FreeList {
+ let out = FreeList {
+ device_tree,
+ head: None,
+ len: 0,
+ };
+
+ // Log the memory reservations.
+ let reservation_count = out.iter_reserved().count();
+ debug!(
+ "found {} memory reservations{}",
+ reservation_count,
+ if reservation_count == 0 { "" } else { ":" }
+ );
+ for reservation in out.iter_reserved() {
+ debug!(
+ "{:p}..{:p} ({} byte{})",
+ reservation.start as *const u8,
+ reservation.end as *const u8,
+ reservation.len(),
+ if reservation.len() == 1 { "" } else { "s" }
+ )
+ }
+
+ out
+ }
+
+ /// Adds a physical address range to the free-list allocator.
+ ///
+ /// ## Safety
+ ///
+ /// - This allocator must have been created with the DeviceTree that gave us this memory.
+ /// - The region must be a valid physical memory range.
+ /// - Paging must not be enabled for the entire lifetime of this allocator.
+ pub unsafe fn add_range(&mut self, mut addrs: Range<usize>) {
+ // Clamp the range, so that we can't somehow end up with the zero address or with
+ // higher-half addresses.
+ addrs.start = addrs.start.max(1);
+ addrs.end = addrs.end.min(isize::MAX as usize);
+
+ // Trim the range to be page-aligned.
+ addrs.start = (addrs.start + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1);
+ addrs.end &= !(PAGE_SIZE - 1);
+
+ // Add each unreserved subrange to the allocator's list.
+ loop {
+ assert_eq!(addrs.start & (PAGE_SIZE - 1), 0);
+ assert_eq!(addrs.end & (PAGE_SIZE - 1), 0);
+
+ // Walk forwards until the first page is unreserved.
+ loop {
+ // Ensure the range is non-empty.
+ if addrs.end <= addrs.start {
+ return;
+ }
+
+ // If the start of the range is reserved, walk forwards a page.
+ let first_page = addrs.start..addrs.start + PAGE_SIZE;
+ if self
+ .iter_reserved()
+ .any(|range| ranges_overlap(&range, &first_page))
+ {
+ addrs.start = first_page.end;
+ } else {
+ break;
+ }
+ }
+
+ // Find the first reservation that overlaps.
+ if let Some(reservation) = self
+ .iter_reserved()
+ .filter(|range| ranges_overlap(range, &addrs))
+ .min_by_key(|range| range.start)
+ {
+ // Get the range that's before the reservation.
+ let mut unreserved = addrs.start..reservation.start;
+
+ // Trim the range to be page-aligned. We don't need to trim the start, because
+ // addrs.start is already aligned (by our loop invariant).
+ unreserved.end &= !(PAGE_SIZE - 1);
+
+ // Add the range up to the start of that overlap.
+ self.add_range_unchecked(unreserved);
+
+ // Adjust the remaining range to be after the overlap.
+ addrs.start = reservation.end;
+
+ // Trim the range to be page-aligned. We don't need to trim the end, because it's
+ // unchanged.
+ addrs.start = (addrs.start + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1);
+ } else {
+ // If no range overlaps, the entire remaining range is valid.
+ self.add_range_unchecked(addrs);
+ return;
+ }
+ }
+ }
+
+ /// Adds a physical address range to the free-list allocator, without checking for reservations
+ /// inside it.
+ ///
+ /// ## Safety
+ ///
+ /// - This allocator must have been created with the DeviceTree that gave us this memory.
+ /// - The region must be a valid physical memory range.
+ /// - The region must be page-aligned.
+ /// - The region must not overlap with any reservations.
+ /// - Paging must not be enabled for the entire lifetime of this allocator.
+ pub unsafe fn add_range_unchecked(&mut self, addrs: Range<usize>) {
+ // Initialize the list node.
+ let mut new_node = FreeListAddr::new(addrs);
+
+ // Walk forwards through the list until the node we're going to put ourselves before has a
+ // size greater or equal to ours.
+ let mut spot_to_store = &mut self.head;
+ while let Some(existing_node) = spot_to_store {
+ if existing_node.pages_after() < new_node.pages_after() {
+ spot_to_store = spot_to_store.as_mut().unwrap().next_mut();
+ } else {
+ break;
+ }
+ }
+
+ *new_node.next_mut() = spot_to_store.take();
+ *spot_to_store = Some(new_node);
+ self.len += 1;
+ }
+
+ /// Returns an iterator that removes ranges from the free-list. Ranges are returned sorted from
+ /// smallest to largest.
+ pub fn drain<'iter: 'dt>(&'iter mut self) -> impl 'dt + Iterator<Item = Range<usize>> {
+ iter::from_fn(move || self.pop())
+ }
+
+ /// Returns whether the free-list is empty.
+ pub fn is_empty(&self) -> bool {
+ self.len == 0
+ }
+
+ /// Returns an iterator over the reserved ranges of addresses.
+ ///
+ /// Addresses may be reserved by:
+ ///
+ /// - Overlapping with the kernel, including the early-boot kernel stack.
+ /// - Overlapping with the DeviceTree.
+ /// - Overlapping with any memory reservations in the DeviceTree.
+ fn iter_reserved(&self) -> impl '_ + Iterator<Item = Range<usize>> {
+ // Get the boundaries of the kernel.
+ let kernel = kernel_boundaries();
+
+ // As a sanity check, make sure this function is inside those boundaries...
+ assert!(kernel.contains(&(FreeList::iter_reserved as usize)));
+
+ // Check if the address is either in the kernel or somewhere in the DeviceTree.
+ iter::once(kernel).chain(self.device_tree.iter_reserved())
+ }
+
+ /// Returns the number of regions in the free-list.
+ pub fn len(&self) -> usize {
+ self.len
+ }
+
+ /// Pops the smallest range from the free-list, returning it.
+ pub fn pop(&mut self) -> Option<Range<usize>> {
+ match self.head.take() {
+ Some(head) => {
+ let (addrs, next) = head.take();
+ self.head = next;
+ Some(addrs)
+ }
+ None => None,
+ }
+ }
+}
+
+impl<'dt> fmt::Debug for FreeList<'dt> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ struct FreeListDebug<'a>(&'a Option<FreeListAddr>);
+
+ impl<'a> fmt::Debug for FreeListDebug<'a> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ let mut addr = self.0;
+ fmt.debug_list()
+ .entries(iter::from_fn(|| match addr {
+ Some(head) => {
+ addr = head.next();
+ Some(head)
+ }
+ None => None,
+ }))
+ .finish()
+ }
+ }
+
+ fmt.debug_struct("FreeList")
+ .field("device_tree", &self.device_tree)
+ .field("addrs", &FreeListDebug(&self.head))
+ .finish()
+ }
+}
+
+/// An address in the free-list.
+///
+/// # Invariants
+///
+/// - The pointer must point to physical memory.
+/// - The FreeListHeader must be initialized.
+/// - The region being pointed to must actually be free.
+/// - The region being pointed to must be unreserved.
+struct FreeListAddr(NonNull<FreeListHeader>);
+
+impl FreeListAddr {
+ /// Initializes the page with the given address range, with no next address.
+ ///
+ /// # Safety
+ ///
+ /// - All of the invariants in the struct-level documentation, except the header being
+ /// initialized (this function will initialize it).
+ #[requires(addrs.start != 0)]
+ #[requires(addrs.start & (PAGE_SIZE - 1) == 0)]
+ #[requires(addrs.end & (PAGE_SIZE - 1) == 0)]
+ #[requires(addrs.start < addrs.end)]
+ unsafe fn new(addrs: Range<usize>) -> FreeListAddr {
+ let addr = addrs.start;
+ *(addr as *mut FreeListHeader) = FreeListHeader {
+ next: None,
+ pages_after: ((addrs.end - addrs.start) >> PAGE_SIZE_BITS) - 1,
+ };
+
+ FreeListAddr(NonNull::new_unchecked(addr as *mut FreeListHeader))
+ }
+
+ /// Returns the `next` field of the header.
+ fn next(&self) -> &Option<FreeListAddr> {
+ // SAFETY: The invariants of FreeListAddr::new are sufficient to make this valid.
+ unsafe { &self.0.as_ref().next }
+ }
+
+ /// Returns a mutable reference to the `next` field of the header.
+ fn next_mut(&mut self) -> &mut Option<FreeListAddr> {
+ // SAFETY: The invariants of FreeListAddr::new are sufficient to make this valid.
+ unsafe { &mut self.0.as_mut().next }
+ }
+
+ /// Returns the number of pages *after* the first one in this region.
+ fn pages_after(&self) -> usize {
+ // SAFETY: The invariants of FreeListAddr::new are sufficient to make this valid.
+ unsafe { self.0.as_ref().pages_after }
+ }
+
+ /// Returns the range of addresses in the region and the address of the next region.
+ fn take(self) -> (Range<usize>, Option<FreeListAddr>) {
+ let start = self.0.as_ptr() as usize;
+ let len_pages = 1 + self.pages_after();
+ let len_bytes = len_pages << PAGE_SIZE_BITS;
+ let addrs = start..start + len_bytes;
+
+ // SAFETY: The invariants of FreeListAddr::new are sufficient to make the read valid.
+ // Because we drop the address, we know we won't accidentally have any ownership issues
+ // with passing ownership up.
+ let header = unsafe { ptr::read(self.0.as_ptr()) };
+
+ (addrs, header.next)
+ }
+}
+
+impl fmt::Debug for FreeListAddr {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ let addr = self.0.as_ptr() as *const u8;
+ let len_pages = 1 + self.pages_after();
+ let len_bytes = len_pages << PAGE_SIZE_BITS;
+ write!(
+ fmt,
+ "{:p}..{:p} ({} page{})",
+ addr,
+ addr.wrapping_add(len_bytes),
+ len_pages,
+ if len_pages == 1 { "" } else { "s" }
+ )
+ }
+}
+
+struct FreeListHeader {
+ /// The physical address of the next free-list header.
+ next: Option<FreeListAddr>,
+
+ /// The number of pages after this one.
+ pages_after: usize,
+}
diff --git a/kernel/src/device_tree.rs b/kernel/src/device_tree.rs
index 5fdc3e9..c045cfd 100644
--- a/kernel/src/device_tree.rs
+++ b/kernel/src/device_tree.rs
@@ -6,13 +6,13 @@ use contracts::requires;
use core::{
fmt, iter,
num::ParseIntError,
+ ops::Range,
slice,
str::{self, Utf8Error},
};
use either::Either;
/// A reference to a flattened DeviceTree (DTB) in memory.
-#[derive(Debug)]
pub struct FlattenedDeviceTree<'dt> {
header: &'dt FdtHeader,
struct_block: &'dt [u32],
@@ -127,6 +127,24 @@ impl<'dt> FlattenedDeviceTree<'dt> {
Ok(())
}
+ /// Returns an iterator over the reserved ranges of addresses.
+ ///
+ /// Addresses may be reserved by:
+ ///
+ /// - Overlapping with the DeviceTree itself.
+ /// - Overlapping with any memory reservations in the DeviceTree.
+ pub fn iter_reserved(&self) -> impl '_ + Iterator<Item = Range<usize>> {
+ // Make the address range of the DeviceTree.
+ let dt_start = self.header as *const FdtHeader as usize;
+ let dt_end = dt_start + u32::from_be(self.header.totalsize) as usize;
+
+ iter::once(dt_start..dt_end).chain(self.memrsv_block.iter().map(|memrsv| {
+ let addr = u64::from_be(memrsv.addr) as usize;
+ let size = u64::from_be(memrsv.size) as usize;
+ addr..addr + size
+ }))
+ }
+
/// Returns an iterator over the events in the structure block of the DeviceTree, starting at
/// the current offset from the start of the structure block.
///
@@ -244,6 +262,14 @@ impl<'dt> FlattenedDeviceTree<'dt> {
}
}
+impl<'dt> fmt::Debug for FlattenedDeviceTree<'dt> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ let addr = self.header as *const FdtHeader;
+ // Intentionally not using `fmt.debug_tuple`, it's wasted space here.
+ write!(fmt, "FlattenedDeviceTree::from_ptr({addr:p})")
+ }
+}
+
/// An error encountered when reading a DeviceTree.
#[derive(Debug)]
pub enum DeviceTreeError {
diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs
index bc5c5c0..5e23109 100644
--- a/kernel/src/lib.rs
+++ b/kernel/src/lib.rs
@@ -14,7 +14,7 @@ pub mod prelude;
#[cfg(not(test))]
mod panic;
-use crate::prelude::*;
+use crate::{allocators::physical_memory_free_list::FreeList, prelude::*};
/// The entrypoint to the kernel. This should be executed by hart0 alone. It performs some early
/// boot tasks, then wakes up any other harts.
@@ -39,6 +39,7 @@ pub unsafe extern "C" fn hart0_boot(device_tree: *const u8) -> ! {
// Find the available physical memory areas and initialize the physical memory
// free-list.
+ let mut physical_memory_free_list = FreeList::new(&flattened_device_tree);
flattened_device_tree
.for_each_node(|node| {
if node.is_unit(&["", "cpus"]) {
@@ -58,6 +59,7 @@ pub unsafe extern "C" fn hart0_boot(device_tree: *const u8) -> ! {
};
for (addr, size) in reg {
+ physical_memory_free_list.add_range(addr..addr + size);
info!("found memory: addr = {addr:#x}, size = {size:#x}");
}
}
@@ -65,6 +67,26 @@ pub unsafe extern "C" fn hart0_boot(device_tree: *const u8) -> ! {
})
.unwrap_or_else(|err| void::unreachable(err));
+ // Log the physical memory we found.
+ debug!(
+ "found {} usable regions of physical memory{}",
+ physical_memory_free_list.len(),
+ if physical_memory_free_list.is_empty() {
+ ""
+ } else {
+ ":"
+ }
+ );
+ for region in physical_memory_free_list.drain() {
+ debug!(
+ "{:p}..{:p} ({} byte{})",
+ region.start as *const u8,
+ region.end as *const u8,
+ region.len(),
+ if region.len() == 1 { "" } else { "s" }
+ )
+ }
+
// After this point, everything else is for debugging.
interrupts::example_timer();
info!("sleeping forever...");
diff --git a/kernel/src/util.rs b/kernel/src/util.rs
index 29042c8..dbcb228 100644
--- a/kernel/src/util.rs
+++ b/kernel/src/util.rs
@@ -1,6 +1,6 @@
//! Miscellaneous utilities.
-use core::mem::size_of;
+use core::{mem::size_of, ops::Range};
#[cold]
#[inline(always)]
@@ -51,6 +51,11 @@ macro_rules! dbg {
};
}
+/// Returns whether the two ranges overlap.
+pub fn ranges_overlap<T: Copy + Ord>(r1: &Range<T>, r2: &Range<T>) -> bool {
+ r1.start.max(r2.start) < r1.end.min(r2.end)
+}
+
/// A trait for types that can be converted to from big-endian or little-endian byte slices.
pub trait FromEndianBytes {
/// Converts from a big-endian byte slice.