summaryrefslogtreecommitdiff
path: root/crates
diff options
context:
space:
mode:
Diffstat (limited to 'crates')
-rw-r--r--crates/Cargo.lock24
-rw-r--r--crates/Cargo.toml2
-rw-r--r--crates/alloc_vma_tree/Cargo.toml12
-rw-r--r--crates/alloc_vma_tree/src/lib.rs317
-rw-r--r--crates/kernel/Cargo.toml3
-rw-r--r--crates/kernel/src/alloc.rs58
-rw-r--r--crates/kernel/src/lib.rs4
-rw-r--r--crates/utils/src/lib.rs14
8 files changed, 417 insertions, 17 deletions
diff --git a/crates/Cargo.lock b/crates/Cargo.lock
index 5b8ca44..f844fc7 100644
--- a/crates/Cargo.lock
+++ b/crates/Cargo.lock
@@ -95,6 +95,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
[[package]]
+name = "ghost-cell"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8449d342b1c67f49169e92e71deb7b9b27f30062301a16dbc27a4cc8d2351b7"
+
+[[package]]
name = "lazy_static"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -308,6 +314,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
[[package]]
+name = "static-rc"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b91d0104a7b28aeda24b30919f83222570111ac0bf1aab23aaffb8f59330e654"
+
+[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -353,6 +365,17 @@ dependencies = [
]
[[package]]
+name = "vernos_alloc_vma_tree"
+version = "0.1.0"
+dependencies = [
+ "allocator-api2",
+ "contracts",
+ "ghost-cell",
+ "static-rc",
+ "vernos_utils",
+]
+
+[[package]]
name = "vernos_device_tree"
version = "0.1.0"
dependencies = [
@@ -380,6 +403,7 @@ dependencies = [
"spin",
"vernos_alloc_buddy",
"vernos_alloc_physmem_free_list",
+ "vernos_alloc_vma_tree",
"vernos_device_tree",
"vernos_driver_riscv_timer",
"vernos_utils",
diff --git a/crates/Cargo.toml b/crates/Cargo.toml
index 10aeec4..51eb2dc 100644
--- a/crates/Cargo.toml
+++ b/crates/Cargo.toml
@@ -1,5 +1,5 @@
[workspace]
-members = ["alloc_buddy", "alloc_genmalloc", "alloc_physmem_free_list", "device_tree", "driver_riscv_timer", "kernel", "utils"]
+members = ["alloc_buddy", "alloc_genmalloc", "alloc_physmem_free_list", "alloc_vma_tree", "device_tree", "driver_riscv_timer", "kernel", "utils"]
resolver = "2"
[profile.release]
diff --git a/crates/alloc_vma_tree/Cargo.toml b/crates/alloc_vma_tree/Cargo.toml
new file mode 100644
index 0000000..20b1189
--- /dev/null
+++ b/crates/alloc_vma_tree/Cargo.toml
@@ -0,0 +1,12 @@
+[package]
+name = "vernos_alloc_vma_tree"
+version = "0.1.0"
+edition = "2021"
+publish = false
+
+[dependencies]
+allocator-api2 = { version = "0.2.18", default-features = false }
+contracts = { version = "0.6.3", default-features = false }
+ghost-cell = { version = "0.2.6", default-features = false }
+static-rc = { version = "0.6.1", default-features = false }
+vernos_utils = { path = "../utils" }
diff --git a/crates/alloc_vma_tree/src/lib.rs b/crates/alloc_vma_tree/src/lib.rs
new file mode 100644
index 0000000..38dd312
--- /dev/null
+++ b/crates/alloc_vma_tree/src/lib.rs
@@ -0,0 +1,317 @@
+//! A data structure to manage virtual address ranges.
+#![no_std]
+
+use allocator_api2::alloc::{AllocError, Allocator, Layout};
+use contracts::{ensures, requires};
+use core::{marker::PhantomData, mem::MaybeUninit, ops::Range, ptr::NonNull};
+use vernos_utils::cold;
+
+/// A data structure to manage virtual address ranges.
+///
+/// This is a pair of sorted trees of virtual memory areas. The two trees contain the same nodes,
+/// but one is sorted by size (so we can find a best-fit allocation) and one is sorted by the start
+/// address (so we can find a neighbor to merge free VMAs with).
+#[derive(Default)]
+pub struct VMATree<const PAGE_SIZE_BITS: usize, ALLOCATOR: Allocator> {
+ roots: Option<(
+ VMARef<PAGE_SIZE_BITS, AddrTree>,
+ VMARef<PAGE_SIZE_BITS, SizeTree>,
+ )>,
+ allocator: ALLOCATOR,
+}
+
+impl<const PAGE_SIZE_BITS: usize, ALLOCATOR: Allocator> VMATree<PAGE_SIZE_BITS, ALLOCATOR> {
+ /// Creates a new VMATree that will use the given allocator.
+ pub const fn new_in(allocator: ALLOCATOR) -> VMATree<PAGE_SIZE_BITS, ALLOCATOR> {
+ VMATree {
+ roots: None,
+ allocator,
+ }
+ }
+
+ /// Adds a new VMA to the tree.
+ ///
+ /// The VMA will be marked as free.
+ #[requires(addrs.start & ((1 << PAGE_SIZE_BITS) - 1) == 0)]
+ #[requires(addrs.end & ((1 << PAGE_SIZE_BITS) - 1) == 0)]
+ pub fn add(&mut self, addrs: Range<usize>) -> Result<(), AllocError> {
+ if let Some((addr_root, size_root)) = &mut self.roots {
+ todo!()
+ } else {
+ cold();
+ let node = VMANode::new_in(addrs, &self.allocator)?;
+ self.roots = Some((VMARef(node, PhantomData), VMARef(node, PhantomData)));
+ Ok(())
+ }
+ }
+}
+
+impl<const PAGE_SIZE_BITS: usize, ALLOCATOR: Allocator> Drop
+ for VMATree<PAGE_SIZE_BITS, ALLOCATOR>
+{
+ fn drop(&mut self) {
+ todo!()
+ }
+}
+
+unsafe impl<const PAGE_SIZE_BITS: usize, ALLOCATOR: Allocator> Send
+ for VMATree<PAGE_SIZE_BITS, ALLOCATOR>
+where
+ ALLOCATOR: Send,
+{
+}
+
+/// A non-null pointer to a VMA node in one of the two trees.
+struct VMARef<const PAGE_SIZE_BITS: usize, KIND: TreeKind>(
+ NonNull<VMANode<PAGE_SIZE_BITS>>,
+ PhantomData<KIND>,
+);
+
+impl<const PAGE_SIZE_BITS: usize, KIND: TreeKind> VMARef<PAGE_SIZE_BITS, KIND> {
+ /// Returns the parent, if there was one.
+ ///
+ /// # Safety
+ ///
+ /// - The pointer must be valid.
+ pub unsafe fn parent(self) -> Option<VMARef<PAGE_SIZE_BITS, KIND>> {
+ let ptr = KIND::parent(self.0);
+ if self == ptr {
+ None
+ } else {
+ Some(ptr)
+ }
+ }
+
+ /// Returns the left child, if there was one.
+ ///
+ /// # Safety
+ ///
+ /// - The pointer must be valid.
+ pub unsafe fn left(self) -> Option<VMARef<PAGE_SIZE_BITS, KIND>> {
+ let ptr = KIND::left(self.0);
+ if self == ptr {
+ None
+ } else {
+ Some(ptr)
+ }
+ }
+
+ /// Returns the right child, if there was one.
+ ///
+ /// # Safety
+ ///
+ /// - The pointer must be valid.
+ pub unsafe fn right(self) -> Option<VMARef<PAGE_SIZE_BITS, KIND>> {
+ let ptr = KIND::right(self.0);
+ if self == ptr {
+ None
+ } else {
+ Some(ptr)
+ }
+ }
+}
+
+impl<const PAGE_SIZE_BITS: usize, KIND: TreeKind> Clone for VMARef<PAGE_SIZE_BITS, KIND> {
+ fn clone(&self) -> VMARef<PAGE_SIZE_BITS, KIND> {
+ *self
+ }
+}
+
+impl<const PAGE_SIZE_BITS: usize, KIND: TreeKind> Copy for VMARef<PAGE_SIZE_BITS, KIND> {}
+
+impl<const PAGE_SIZE_BITS: usize, KIND: TreeKind> Eq for VMARef<PAGE_SIZE_BITS, KIND> {}
+
+impl<const PAGE_SIZE_BITS: usize, KIND: TreeKind> PartialEq for VMARef<PAGE_SIZE_BITS, KIND> {
+ fn eq(&self, other: &VMARef<PAGE_SIZE_BITS, KIND>) -> bool {
+ self.0 == other.0
+ }
+}
+
+/// A "physical" VMA node.
+struct VMANode<const PAGE_SIZE_BITS: usize> {
+ addr_and_state: usize,
+ size_and_balance: usize,
+
+ /// A self-pointer if we're the root.
+ addr_parent: VMARef<PAGE_SIZE_BITS, AddrTree>,
+
+ /// A self-pointer if we have no left child.
+ addr_left_child: VMARef<PAGE_SIZE_BITS, AddrTree>,
+
+ /// A self-pointer if we have no right child.
+ addr_right_child: VMARef<PAGE_SIZE_BITS, AddrTree>,
+
+ /// A self-pointer if we're the root.
+ size_parent: VMARef<PAGE_SIZE_BITS, SizeTree>,
+
+ /// A self-pointer if we have no left child.
+ size_left_child: VMARef<PAGE_SIZE_BITS, SizeTree>,
+
+ /// A self-pointer if we have no right child.
+ size_right_child: VMARef<PAGE_SIZE_BITS, SizeTree>,
+}
+
+impl<const PAGE_SIZE_BITS: usize> VMANode<PAGE_SIZE_BITS> {
+ /// Allocates a new node in the given allocator.
+ ///
+ /// The node has the given range of addresses and is in the `Free` state. All of its pointers
+ /// are self-pointers.
+ #[requires(PAGE_SIZE_BITS > 0)]
+ #[requires(addrs.start & ((1 << PAGE_SIZE_BITS) - 1) == 0)]
+ #[requires(addrs.end & ((1 << PAGE_SIZE_BITS) - 1) == 0)]
+ pub fn new_in<ALLOCATOR: Allocator>(
+ addrs: Range<usize>,
+ allocator: &ALLOCATOR,
+ ) -> Result<NonNull<VMANode<PAGE_SIZE_BITS>>, AllocError> {
+ let layout = Layout::new::<MaybeUninit<Self>>();
+ let mut ptr: NonNull<MaybeUninit<Self>> = allocator.allocate(layout)?.cast();
+
+ // SAFETY: This needs to be OK for the allocator to meet the conditions of its trait.
+ VMANode::init_in(unsafe { ptr.as_mut() }, addrs);
+
+ Ok(ptr.cast())
+ }
+
+ /// Initializes a node.
+ ///
+ /// The node has the given range of addresses and is in the `Free` state. All of its pointers
+ /// are self-pointers.
+ #[requires(PAGE_SIZE_BITS > 0)]
+ #[requires(addrs.start & ((1 << PAGE_SIZE_BITS) - 1) == 0)]
+ #[requires(addrs.end & ((1 << PAGE_SIZE_BITS) - 1) == 0)]
+ pub fn init_in(maybe_uninit: &mut MaybeUninit<VMANode<PAGE_SIZE_BITS>>, addrs: Range<usize>) {
+ let ptr = NonNull::from(&*maybe_uninit).cast();
+ maybe_uninit.write(VMANode {
+ addr_and_state: addrs.start,
+ size_and_balance: (addrs.end - addrs.start)
+ | ((Balance::Balanced as usize) << 2)
+ | (Balance::Balanced as usize),
+ addr_parent: VMARef(ptr, PhantomData),
+ addr_left_child: VMARef(ptr, PhantomData),
+ addr_right_child: VMARef(ptr, PhantomData),
+ size_parent: VMARef(ptr, PhantomData),
+ size_left_child: VMARef(ptr, PhantomData),
+ size_right_child: VMARef(ptr, PhantomData),
+ });
+ }
+
+ /// Returns the range of addresses represented by this node.
+ #[ensures(ret.start & ((1 << PAGE_SIZE_BITS) - 1) == 0)]
+ #[ensures(ret.end & ((1 << PAGE_SIZE_BITS) - 1) == 0)]
+ pub fn addrs(&self) -> Range<usize> {
+ let addr = self.addr_and_state & !((1 << PAGE_SIZE_BITS) - 1);
+ let size = self.size_and_balance & !((1 << PAGE_SIZE_BITS) - 1);
+ addr..addr + size
+ }
+
+ /// Returns the state of the addresses represented by this node.
+ #[requires(PAGE_SIZE_BITS >= 1)]
+ pub fn state(&self) -> State {
+ match self.addr_and_state & 1 {
+ 0 => State::Free,
+ _ => State::Used,
+ }
+ }
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+enum Balance {
+ LeftHeavy = 0,
+ Balanced = 1,
+ RightHeavy = 2,
+}
+
+impl Balance {
+ pub fn from_bits(bits: usize) -> Balance {
+ match bits {
+ 0 => Balance::LeftHeavy,
+ 1 => Balance::Balanced,
+ 2 => Balance::RightHeavy,
+ _ => panic!("invalid Balance: {bits}"),
+ }
+ }
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+enum State {
+ Free,
+ Used,
+}
+
+trait TreeKind: Sized {
+ unsafe fn balance<const PAGE_SIZE_BITS: usize>(
+ node: NonNull<VMANode<PAGE_SIZE_BITS>>,
+ ) -> Balance;
+
+ unsafe fn parent<const PAGE_SIZE_BITS: usize>(
+ node: NonNull<VMANode<PAGE_SIZE_BITS>>,
+ ) -> VMARef<PAGE_SIZE_BITS, Self>;
+
+ unsafe fn left<const PAGE_SIZE_BITS: usize>(
+ node: NonNull<VMANode<PAGE_SIZE_BITS>>,
+ ) -> VMARef<PAGE_SIZE_BITS, Self>;
+
+ unsafe fn right<const PAGE_SIZE_BITS: usize>(
+ node: NonNull<VMANode<PAGE_SIZE_BITS>>,
+ ) -> VMARef<PAGE_SIZE_BITS, Self>;
+}
+
+enum AddrTree {}
+
+impl TreeKind for AddrTree {
+ #[requires(PAGE_SIZE_BITS >= 4)]
+ unsafe fn balance<const PAGE_SIZE_BITS: usize>(
+ node: NonNull<VMANode<PAGE_SIZE_BITS>>,
+ ) -> Balance {
+ let size_and_balance = (*node.as_ptr()).size_and_balance;
+ Balance::from_bits(size_and_balance & 0b11)
+ }
+
+ unsafe fn parent<const PAGE_SIZE_BITS: usize>(
+ node: NonNull<VMANode<PAGE_SIZE_BITS>>,
+ ) -> VMARef<PAGE_SIZE_BITS, Self> {
+ (*node.as_ptr()).addr_parent
+ }
+
+ unsafe fn left<const PAGE_SIZE_BITS: usize>(
+ node: NonNull<VMANode<PAGE_SIZE_BITS>>,
+ ) -> VMARef<PAGE_SIZE_BITS, Self> {
+ (*node.as_ptr()).addr_left_child
+ }
+
+ unsafe fn right<const PAGE_SIZE_BITS: usize>(
+ node: NonNull<VMANode<PAGE_SIZE_BITS>>,
+ ) -> VMARef<PAGE_SIZE_BITS, Self> {
+ (*node.as_ptr()).addr_right_child
+ }
+}
+
+enum SizeTree {}
+
+impl TreeKind for SizeTree {
+ #[requires(PAGE_SIZE_BITS >= 4)]
+ unsafe fn balance<const PAGE_SIZE_BITS: usize>(
+ node: NonNull<VMANode<PAGE_SIZE_BITS>>,
+ ) -> Balance {
+ let size_and_balance = (*node.as_ptr()).size_and_balance;
+ Balance::from_bits((size_and_balance >> 2) & 0b11)
+ }
+
+ unsafe fn parent<const PAGE_SIZE_BITS: usize>(
+ node: NonNull<VMANode<PAGE_SIZE_BITS>>,
+ ) -> VMARef<PAGE_SIZE_BITS, Self> {
+ (*node.as_ptr()).size_parent
+ }
+
+ unsafe fn left<const PAGE_SIZE_BITS: usize>(
+ node: NonNull<VMANode<PAGE_SIZE_BITS>>,
+ ) -> VMARef<PAGE_SIZE_BITS, Self> {
+ (*node.as_ptr()).size_left_child
+ }
+
+ unsafe fn right<const PAGE_SIZE_BITS: usize>(
+ node: NonNull<VMANode<PAGE_SIZE_BITS>>,
+ ) -> VMARef<PAGE_SIZE_BITS, Self> {
+ (*node.as_ptr()).size_right_child
+ }
+}
diff --git a/crates/kernel/Cargo.toml b/crates/kernel/Cargo.toml
index 6c28dd8..5fef50b 100644
--- a/crates/kernel/Cargo.toml
+++ b/crates/kernel/Cargo.toml
@@ -7,7 +7,7 @@ edition = "2021"
crate-type = ["staticlib"]
[dependencies]
-allocator-api2 = { version = "0.2.18", default-features = false }
+allocator-api2 = { version = "0.2.18", default-features = false, features = ["alloc"] }
bitflags = { version = "2.6.0", default-features = false }
cfg-if = { version = "1.0.0", default-features = false }
contracts = { version = "0.6.3", default-features = false }
@@ -16,6 +16,7 @@ log = { version = "0.4.20", default-features = false }
spin = { version = "0.9.8", default-features = false, features = ["fair_mutex", "use_ticket_mutex"] }
vernos_alloc_buddy = { path = "../alloc_buddy" }
vernos_alloc_physmem_free_list = { path = "../alloc_physmem_free_list" }
+vernos_alloc_vma_tree = { path = "../alloc_vma_tree" }
vernos_device_tree = { path = "../device_tree" }
vernos_driver_riscv_timer = { path = "../driver_riscv_timer" }
vernos_utils = { path = "../utils" }
diff --git a/crates/kernel/src/alloc.rs b/crates/kernel/src/alloc.rs
index 93d8b15..690b428 100644
--- a/crates/kernel/src/alloc.rs
+++ b/crates/kernel/src/alloc.rs
@@ -1,13 +1,14 @@
//! Global structures for the allocators.
use crate::paging::{
- BuddyAllocator, MapError, MappingFlags, PageTable, ASID, LOMEM_TOP, MAX_PAGE_SIZE_BITS,
- PAGE_SIZE, PAGE_SIZES,
+ BuddyAllocator, MapError, MappingFlags, PageTable, ASID, HIMEM_BOT, LOMEM_TOP,
+ MAX_PAGE_SIZE_BITS, PAGE_SIZE, PAGE_SIZES, PAGE_SIZE_BITS,
};
-use allocator_api2::alloc::AllocError;
+use allocator_api2::alloc::{AllocError, Global, GlobalAlloc, Layout};
use contracts::requires;
use core::{num::NonZero, ptr::NonNull};
use spin::mutex::FairMutex;
+use vernos_alloc_vma_tree::VMATree;
/// The global instance of the physical page allocator.
static BUDDY_ALLOCATOR: FairMutex<Option<BuddyAllocator>> = FairMutex::new(None);
@@ -15,16 +16,24 @@ static BUDDY_ALLOCATOR: FairMutex<Option<BuddyAllocator>> = FairMutex::new(None)
/// The global kernel page table.
static KERNEL_PAGE_TABLE: FairMutex<Option<&'static mut PageTable>> = FairMutex::new(None);
+/// The kernel's virtual memory allocator.
+static KERNEL_VM_ALLOC: FairMutex<VMATree<PAGE_SIZE_BITS, Global>> =
+ FairMutex::new(VMATree::new_in(Global));
+
+/// The global allocator.
+#[global_allocator]
+static GLOBAL_ALLOC: GlobalGenMalloc = GlobalGenMalloc;
+
/// Initializes the kernel page table and enables paging.
///
/// # Safety
///
/// - Paging must not have been enabled previously.
/// - The buddy allocator must be valid.
-#[requires(BUDDY_ALLOCATOR.lock().is_none())]
#[requires(KERNEL_PAGE_TABLE.lock().is_none())]
-#[ensures(BUDDY_ALLOCATOR.lock().is_some())]
+#[requires(BUDDY_ALLOCATOR.lock().is_none())]
#[ensures(KERNEL_PAGE_TABLE.lock().is_some())]
+#[ensures(BUDDY_ALLOCATOR.lock().is_some())]
pub unsafe fn init_kernel_page_table(buddy_allocator: BuddyAllocator) {
// Just making this mut above gets a warning thanks to the contracts macros...
let mut buddy_allocator = buddy_allocator;
@@ -59,8 +68,8 @@ pub unsafe fn init_kernel_page_table(buddy_allocator: BuddyAllocator) {
vernos_utils::dbg!(&page_table);
// Save the buddy allocator and kernel page table.
- *BUDDY_ALLOCATOR.lock() = Some(buddy_allocator);
*KERNEL_PAGE_TABLE.lock() = Some(page_table);
+ *BUDDY_ALLOCATOR.lock() = Some(buddy_allocator);
}
/// Initializes the virtual memory allocator and the regular allocator.
@@ -68,11 +77,19 @@ pub unsafe fn init_kernel_page_table(buddy_allocator: BuddyAllocator) {
/// # Safety
///
/// - `himem_top` must be accurate.
-#[requires(BUDDY_ALLOCATOR.lock().is_some())]
#[requires(KERNEL_PAGE_TABLE.lock().is_some())]
+#[requires(BUDDY_ALLOCATOR.lock().is_some())]
#[requires(himem_top & (PAGE_SIZE - 1) == 0)]
+#[requires(HIMEM_BOT < himem_top)]
pub unsafe fn init_kernel_virtual_memory_allocator(himem_top: usize) {
- todo!()
+ // TODO: Bootstrap the allocator.
+
+ // The error here _really_ ought to be impossible, because we just bootstrapped the allocator!
+ // It definitely has free memory.
+ let mut kernel_vm_alloc = KERNEL_VM_ALLOC.lock();
+ kernel_vm_alloc
+ .add(HIMEM_BOT..himem_top)
+ .expect("failed to set up the kernel's virtual memory allocator");
}
/// Tries to allocate a page of physical memory of the given size, returning its physical address.
@@ -112,12 +129,29 @@ pub fn kernel_map(
len: usize,
flags: MappingFlags,
) -> Result<(), MapError> {
- let mut buddy_allocator = BUDDY_ALLOCATOR.lock();
let mut kernel_page_table = KERNEL_PAGE_TABLE.lock();
- let buddy_allocator = buddy_allocator.as_mut().unwrap();
+ let mut buddy_allocator = BUDDY_ALLOCATOR.lock();
let kernel_page_table = kernel_page_table.as_mut().unwrap();
+ let buddy_allocator = buddy_allocator.as_mut().unwrap();
kernel_page_table.map(&mut *buddy_allocator, vaddr, paddr, len, flags)?;
- log::warn!("TODO: sfence.vma");
- log::warn!("TODO: TLB shootdown");
+ vernos_utils::first_time! {
+ {
+ log::warn!("TODO: sfence.vma");
+ log::warn!("TODO: TLB shootdown");
+ }
+ }
Ok(())
}
+
+/// A global allocator backed by a hart-local `vernos_alloc_genmalloc::Heap`.
+struct GlobalGenMalloc;
+
+unsafe impl GlobalAlloc for GlobalGenMalloc {
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ todo!("GlobalGenMalloc.alloc({layout:?})")
+ }
+
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ todo!("GlobalGenMalloc.dealloc({ptr:?}, {layout:?})")
+ }
+}
diff --git a/crates/kernel/src/lib.rs b/crates/kernel/src/lib.rs
index 0ca535a..7421649 100644
--- a/crates/kernel/src/lib.rs
+++ b/crates/kernel/src/lib.rs
@@ -43,7 +43,7 @@ impl EarlyBootAddrs {
/// Looks for a DeviceTree at address in the `EarlyBootAddrs`, and returns it if it looks
/// valid. Panics if the DeviceTree is invalid.
///
- /// ## Safety
+ /// # Safety
///
/// - The `EarlyBootAddrs` must be accurate.
/// - The `device_tree` pointer must be a valid pointer into physical memory. See
@@ -194,7 +194,7 @@ pub unsafe extern "C" fn hart0_early_boot(early_boot_addrs: &mut EarlyBootAddrs)
let new_stack_start = new_stack_end - STACK_SIZE;
vaddr_bump = new_stack_start;
for i in 0..((STACK_SIZE >> PAGE_SIZE_BITS) - 1) {
- let vaddr = new_kernel_start + (i << PAGE_SIZE_BITS);
+ let vaddr = new_stack_start + (i << PAGE_SIZE_BITS);
let paddr =
alloc_page(PAGE_SIZE).expect("failed to allocate memory for a hart0 stack page");
kernel_map(
diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs
index 6e26317..c6681d9 100644
--- a/crates/utils/src/lib.rs
+++ b/crates/utils/src/lib.rs
@@ -23,7 +23,7 @@ pub fn debug(f: impl Fn(&mut fmt::Formatter) -> fmt::Result) -> impl fmt::Debug
/// A hint that this branch is unlikely to be called.
#[cold]
#[inline(always)]
-fn cold() {}
+pub fn cold() {}
/// A hint that `b` is likely to be true. See `core::intrinsics::likely`.
#[inline(always)]
@@ -130,3 +130,15 @@ macro_rules! impl_FromEndianBytes {
}
impl_FromEndianBytes!(i8, i16, i32, i64, isize, u8, u16, u32, u64, usize);
+
+/// Runs the body block the first time it is encountered.
+#[macro_export]
+macro_rules! first_time {
+ ($($stmt:stmt);*) => {{
+ use core::cell::LazyCell;
+ static LAZY_CELL = LazyCell::new(|| {
+ $($stmt);*
+ });
+ let _: &() = core::cell::LazyCell::force(&LAZY_CELL);
+ }};
+}