summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--boards/qemu-virt/qemu-virt.s3
-rw-r--r--crates/Cargo.lock8
-rw-r--r--crates/alloc_buddy/src/lib.rs43
-rw-r--r--crates/alloc_buddy/src/tree.rs5
-rw-r--r--crates/alloc_buddy/tests/hosted_test.rs2
-rw-r--r--crates/kernel/Cargo.toml2
-rw-r--r--crates/kernel/src/alloc.rs72
-rw-r--r--crates/kernel/src/arch/hosted.rs181
-rw-r--r--crates/kernel/src/arch/mod.rs3
-rw-r--r--crates/kernel/src/arch/riscv64/mod.rs9
-rw-r--r--crates/kernel/src/arch/riscv64/paging.rs262
-rw-r--r--crates/kernel/src/lib.rs14
-rw-r--r--crates/utils/src/lib.rs25
-rw-r--r--flake.nix3
14 files changed, 611 insertions, 21 deletions
diff --git a/boards/qemu-virt/qemu-virt.s b/boards/qemu-virt/qemu-virt.s
index bd32676..f0599da 100644
--- a/boards/qemu-virt/qemu-virt.s
+++ b/boards/qemu-virt/qemu-virt.s
@@ -60,6 +60,9 @@ _start:
li t0, -1
csrw pmpaddr0, t0
+ ## Since we adjusted PMP settings, we need to issue an SFENCE.VMA.
+ sfence.vma
+
## Jump to supervisor mode.
mret
.size _start, . - _start
diff --git a/crates/Cargo.lock b/crates/Cargo.lock
index e49301e..41384f7 100644
--- a/crates/Cargo.lock
+++ b/crates/Cargo.lock
@@ -302,6 +302,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]]
+name = "spin"
+version = "0.9.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
+
+[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -365,7 +371,9 @@ name = "vernos_kernel"
version = "0.1.0"
dependencies = [
"cfg-if",
+ "contracts",
"log",
+ "spin",
"vernos_alloc_buddy",
"vernos_alloc_physmem_free_list",
"vernos_device_tree",
diff --git a/crates/alloc_buddy/src/lib.rs b/crates/alloc_buddy/src/lib.rs
index fc5d245..083d8d5 100644
--- a/crates/alloc_buddy/src/lib.rs
+++ b/crates/alloc_buddy/src/lib.rs
@@ -232,9 +232,29 @@ impl<
Ok(buddy_allocator)
}
+ /// Tries to allocate a subregion of the smallest size class that will fit the given value.
+ #[inline(always)]
+ pub fn alloc<T>(&mut self) -> Result<NonNull<T>, AllocError> {
+ let ptr = self.alloc_of_size_class(
+ (size_of::<T>().next_power_of_two().trailing_zeros() as usize)
+ .saturating_sub(PAGE_SIZE_BITS),
+ )?;
+ Ok(ptr.cast())
+ }
+
+ /// Tries to allocate a subregion of a particular size from the allocator.
+ #[requires(size.is_power_of_two())]
+ #[requires((PAGE_SIZE_BITS..PAGE_SIZE_BITS + SIZE_CLASS_COUNT).contains(
+ &(size.trailing_zeros() as usize)
+ ))]
+ #[inline(always)]
+ pub fn alloc_of_size(&mut self, size: usize) -> Result<NonNull<u8>, AllocError> {
+ self.alloc_of_size_class(size.trailing_zeros() as usize - PAGE_SIZE_BITS)
+ }
+
/// Tries to allocate a subregion of a particular size class from the allocator.
#[requires(size_class < SIZE_CLASS_COUNT)]
- pub fn alloc(&mut self, size_class: usize) -> Result<NonNull<u8>, AllocError> {
+ pub fn alloc_of_size_class(&mut self, size_class: usize) -> Result<NonNull<u8>, AllocError> {
if let Some(ptr) = self.free_list(size_class).pop() {
// Fast-path: try allocating from the right size class's free list.
@@ -309,6 +329,18 @@ impl<
}
}
+ /// Tries to allocate a subregion of the smallest size class that will fit the given value, and
+ /// zeroes it out.
+ #[inline(always)]
+ pub fn alloc_zeroed<T>(&mut self) -> Result<NonNull<T>, AllocError> {
+ let size_class = (size_of::<T>().next_power_of_two().trailing_zeros() as usize)
+ .saturating_sub(PAGE_SIZE_BITS);
+ let ptr = self.alloc_of_size_class(size_class)?;
+ // SAFETY: The allocator gives back this much memory.
+ unsafe { ptr.write_bytes(0, PAGE_SIZE << size_class) };
+ Ok(ptr.cast())
+ }
+
/// Returns a subregion of a particular size class to the allocator.
///
/// # Safety
@@ -483,6 +515,15 @@ impl<
}
}
+unsafe impl<
+ 'allocator,
+ const PAGE_SIZE: usize,
+ const PAGE_SIZE_BITS: usize,
+ const SIZE_CLASS_COUNT: usize,
+ > Send for BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>
+{
+}
+
#[derive(Debug)]
struct MetadataLayout<
const PAGE_SIZE: usize,
diff --git a/crates/alloc_buddy/src/tree.rs b/crates/alloc_buddy/src/tree.rs
index e04605b..19415f8 100644
--- a/crates/alloc_buddy/src/tree.rs
+++ b/crates/alloc_buddy/src/tree.rs
@@ -98,3 +98,8 @@ impl<const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize> Tree<PAGE_SIZE, PAGE_S
(tree_addr_lo..tree_addr_hi).contains(&addr)
}
}
+
+unsafe impl<const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize> Send
+ for Tree<PAGE_SIZE, PAGE_SIZE_BITS>
+{
+}
diff --git a/crates/alloc_buddy/tests/hosted_test.rs b/crates/alloc_buddy/tests/hosted_test.rs
index d515f6e..90245a7 100644
--- a/crates/alloc_buddy/tests/hosted_test.rs
+++ b/crates/alloc_buddy/tests/hosted_test.rs
@@ -103,7 +103,7 @@ impl Action {
Action::Alloc {
sentinel_value,
size_class,
- } => match buddy.alloc(size_class) {
+ } => match buddy.alloc_of_size_class(size_class) {
Ok(ptr) => unsafe {
let slice = slice::from_raw_parts_mut(ptr.as_ptr(), PAGE_SIZE << size_class);
slice.fill(sentinel_value);
diff --git a/crates/kernel/Cargo.toml b/crates/kernel/Cargo.toml
index 3d7b61b..6fbabc1 100644
--- a/crates/kernel/Cargo.toml
+++ b/crates/kernel/Cargo.toml
@@ -8,7 +8,9 @@ crate-type = ["staticlib"]
[dependencies]
cfg-if = { version = "1.0.0", default-features = false }
+contracts = { version = "0.6.3", default-features = false }
log = { version = "0.4.20", default-features = false }
+spin = { version = "0.9.8", default-features = false, features = ["fair_mutex", "use_ticket_mutex"] }
vernos_alloc_buddy = { path = "../alloc_buddy" }
vernos_alloc_physmem_free_list = { path = "../alloc_physmem_free_list" }
vernos_device_tree = { path = "../device_tree" }
diff --git a/crates/kernel/src/alloc.rs b/crates/kernel/src/alloc.rs
new file mode 100644
index 0000000..dbb79ee
--- /dev/null
+++ b/crates/kernel/src/alloc.rs
@@ -0,0 +1,72 @@
+//! Global structures for the allocators.
+
+use crate::arch::{
+ paging::{PageTable, PageTableEntry, ASID, PAGE_TABLE_BITS, PAGE_TABLE_LEVELS},
+ MAX_PAGE_SIZE_BITS, PAGE_SIZE, PAGE_SIZE_BITS,
+};
+use contracts::requires;
+use core::ptr::NonNull;
+use spin::mutex::FairMutex;
+use vernos_alloc_buddy::BuddyAllocator;
+use vernos_utils::BelieveMeSend;
+
+/// The global instance of the physical page allocator.
+static BUDDY_ALLOCATOR: FairMutex<
+ Option<BuddyAllocator<PAGE_SIZE, PAGE_SIZE_BITS, { 1 + MAX_PAGE_SIZE_BITS - PAGE_SIZE_BITS }>>,
+> = FairMutex::new(None);
+
+/// The global kernel page table.
+static KERNEL_PAGE_TABLE: FairMutex<BelieveMeSend<Option<NonNull<PageTable>>>> =
+ FairMutex::new(BelieveMeSend(None));
+
+/// Initializes the allocator and enables paging.
+///
+/// # Safety
+///
+/// - Paging must not have been enabled previously.
+/// - The buddy allocator must be valid.
+#[requires(BUDDY_ALLOCATOR.lock().is_none())]
+#[ensures(BUDDY_ALLOCATOR.lock().is_some())]
+#[requires(KERNEL_PAGE_TABLE.lock().is_none())]
+#[ensures(KERNEL_PAGE_TABLE.lock().is_some())]
+pub unsafe fn init(
+ mut buddy_allocator: BuddyAllocator<
+ 'static,
+ PAGE_SIZE,
+ PAGE_SIZE_BITS,
+ { 1 + MAX_PAGE_SIZE_BITS - PAGE_SIZE_BITS },
+ >,
+) {
+ // Allocate a page to use (for now) as the global kernel page table. Later we'll actually
+ // replace it with the hart0 initial stack's page, since we'll never free the root page of the
+ // kernel page table, and we can't return that page to the buddy allocator anyway.
+ let mut page_table = buddy_allocator
+ .alloc_zeroed::<PageTable>()
+ .expect("failed to allocate the kernel page table");
+
+ // Create identity mappings for the lower half of memory.
+ for (i, entry) in page_table
+ .as_mut()
+ .iter_mut()
+ .enumerate()
+ .take(1 << (PAGE_TABLE_BITS - 1))
+ {
+ let addr = (i as u64) << ((PAGE_TABLE_LEVELS - 1) * PAGE_TABLE_BITS + PAGE_SIZE_BITS);
+ let mut pte = PageTableEntry::default();
+ pte.set_valid(true)
+ .set_rwx(true, true, true)
+ .set_global(true)
+ .set_addr(addr);
+ *entry = pte;
+ }
+
+ // Set the page table as the current page table.
+ page_table.as_mut().make_current(ASID::KERNEL);
+
+ // Save the buddy allocator and kernel page table.
+ *BUDDY_ALLOCATOR.lock() = Some(buddy_allocator);
+ KERNEL_PAGE_TABLE.lock().0 = Some(page_table);
+
+ // Print the page table after this.
+ vernos_utils::dbg!(page_table.as_mut());
+}
diff --git a/crates/kernel/src/arch/hosted.rs b/crates/kernel/src/arch/hosted.rs
index df62bab..ac3d53e 100644
--- a/crates/kernel/src/arch/hosted.rs
+++ b/crates/kernel/src/arch/hosted.rs
@@ -2,18 +2,19 @@
extern crate std;
-use std::{thread::sleep, time::Duration};
-
-/// The size of a page of memory.
-///
-/// Obviously, this value is unrealistic, but for now we just need the hosted arch to compile.
-pub const PAGE_SIZE: usize = 64;
+use crate::PAGE_SIZE;
+use std::{iter, thread::sleep, time::Duration};
/// The number of bits in the size of a page of memory.
///
/// Obviously, this value is unrealistic, but for now we just need the hosted arch to compile.
pub const PAGE_SIZE_BITS: usize = 6;
+/// The number of bits in the size of the largest huge page.
+///
+/// This platform doesn't support huge pages (so we claim), so this matches the page size.
+pub const MAX_PAGE_SIZE_BITS: usize = PAGE_SIZE_BITS;
+
/// No-opped interrupt support.
///
/// TODO: Should this use Unix signals?
@@ -27,3 +28,171 @@ pub fn sleep_forever() -> ! {
sleep(Duration::from_secs(1));
}
}
+
+/// A dummy implementation of paging.
+pub mod paging {
+ use super::*;
+
+ /// The number of bits looked up in each page table entry.
+ pub const PAGE_TABLE_BITS: usize = 6;
+
+ /// The number of levels of page tables.
+ pub const PAGE_TABLE_LEVELS: usize = 2;
+
+ /// An address space ID.
+ #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+ pub struct ASID(u8);
+
+ impl ASID {
+ /// The kernel's ASID.
+ pub const KERNEL: ASID = ASID(0);
+ }
+
+ /// A dummy page table type.
+ #[derive(Debug)]
+ pub struct PageTable;
+
+ impl PageTable {
+ /// Set this as the root page table. Note that this does _not_ perform a TLB shootdown.
+ ///
+ /// # Safety
+ ///
+ /// - This is a stub.
+ pub unsafe fn make_current(&self, asid: ASID) {
+ todo!()
+ }
+
+ /// Iterates over shared references to the entries in this page table.
+ pub fn iter(&self) -> impl Iterator<Item = &PageTableEntry> {
+ iter::empty()
+ }
+
+ /// Iterates over exclusive references to the entries in this page table.
+ pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut PageTableEntry> {
+ iter::empty()
+ }
+ }
+
+ /// An dummy type for an entry in a page table.
+ #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
+ pub struct PageTableEntry;
+
+ impl PageTableEntry {
+ /// Returns the physical address of the backing page or next level page table.
+ pub fn addr(&self) -> u64 {
+ todo!()
+ }
+
+ /// Returns a pointer to the backing page.
+ pub fn page(&self) -> *mut [u8; PAGE_SIZE] {
+ todo!()
+ }
+
+ /// Returns a pointer to the backing page table.
+ pub fn page_table(&self) -> *mut PageTable {
+ todo!()
+ }
+
+ /// Sets the physical address of the backing page or next level page table.
+ pub fn set_addr(&mut self, addr: u64) -> &mut PageTableEntry {
+ todo!()
+ }
+
+ /// Returns whether the dirty bit is set.
+ pub fn dirty(&self) -> bool {
+ todo!()
+ }
+
+ /// Sets the dirty bit.
+ pub fn set_dirty(&mut self, dirty: bool) -> &mut PageTableEntry {
+ todo!()
+ }
+
+ /// Returns whether the accessed bit is set.
+ pub fn accessed(&self) -> bool {
+ todo!()
+ }
+
+ /// Sets the accessed bit.
+ pub fn set_accessed(&mut self, accessed: bool) -> &mut PageTableEntry {
+ todo!()
+ }
+
+ /// Returns whether the global bit is set.
+ pub fn global(&self) -> bool {
+ todo!()
+ }
+
+ /// Sets the global bit.
+ pub fn set_global(&mut self, global: bool) -> &mut PageTableEntry {
+ todo!()
+ }
+
+ /// Returns whether the user bit is set.
+ pub fn user(&self) -> bool {
+ todo!()
+ }
+
+ /// Sets the user bit.
+ pub fn set_user(&mut self, user: bool) -> &mut PageTableEntry {
+ todo!()
+ }
+
+ /// Returns whether the executable bit is set.
+ pub fn executable(&self) -> bool {
+ todo!()
+ }
+
+ /// Sets the executable bit.
+ pub fn set_executable(&mut self, executable: bool) -> &mut PageTableEntry {
+ todo!()
+ }
+
+ /// Returns whether the writable bit is set.
+ pub fn writable(&self) -> bool {
+ todo!()
+ }
+
+ /// Sets the writable bit.
+ pub fn set_writable(&mut self, writable: bool) -> &mut PageTableEntry {
+ todo!()
+ }
+
+ /// Returns whether the readable bit is set.
+ pub fn readable(&self) -> bool {
+ todo!()
+ }
+
+ /// Sets the readable bit.
+ pub fn set_readable(&mut self, readable: bool) -> &mut PageTableEntry {
+ todo!()
+ }
+
+ /// Returns whether the page table entry is for a leaf PTE.
+ pub fn leaf_pte(&self) -> bool {
+ todo!()
+ }
+
+ /// Sets the readable, writable, and executable bits at once.
+ pub fn set_rwx(
+ &mut self,
+ readable: bool,
+ writable: bool,
+ executable: bool,
+ ) -> &mut PageTableEntry {
+ self.set_readable(readable)
+ .set_writable(writable)
+ .set_executable(executable)
+ }
+
+ /// Returns whether the valid bit is set.
+ pub fn valid(&self) -> bool {
+ todo!()
+ }
+
+ /// Sets the valid bit.
+ pub fn set_valid(&mut self, valid: bool) -> &mut PageTableEntry {
+ todo!()
+ }
+ }
+}
diff --git a/crates/kernel/src/arch/mod.rs b/crates/kernel/src/arch/mod.rs
index bfdfcc7..1afe41c 100644
--- a/crates/kernel/src/arch/mod.rs
+++ b/crates/kernel/src/arch/mod.rs
@@ -9,3 +9,6 @@ cfg_if::cfg_if! {
compile_error!("unsupported platform");
}
}
+
+/// The size of a regular-sized page of memory.
+pub const PAGE_SIZE: usize = 1 << PAGE_SIZE_BITS;
diff --git a/crates/kernel/src/arch/riscv64/mod.rs b/crates/kernel/src/arch/riscv64/mod.rs
index 216a90c..15f44c7 100644
--- a/crates/kernel/src/arch/riscv64/mod.rs
+++ b/crates/kernel/src/arch/riscv64/mod.rs
@@ -1,11 +1,12 @@
pub mod interrupts;
+pub mod paging;
-/// The size of a page of memory.
-pub const PAGE_SIZE: usize = 4096;
-
-/// The number of bits in the size of a page of memory.
+/// The number of bits in the size of a regular-sized page of memory.
pub const PAGE_SIZE_BITS: usize = 12;
+/// The number of bits in the size of the largest huge page.
+pub const MAX_PAGE_SIZE_BITS: usize = 30;
+
/// Halts the hart.
pub fn sleep_forever() -> ! {
loop {
diff --git a/crates/kernel/src/arch/riscv64/paging.rs b/crates/kernel/src/arch/riscv64/paging.rs
new file mode 100644
index 0000000..5ebdb5f
--- /dev/null
+++ b/crates/kernel/src/arch/riscv64/paging.rs
@@ -0,0 +1,262 @@
+use crate::arch::PAGE_SIZE;
+use contracts::requires;
+use core::{arch::asm, fmt, str};
+
+/// The number of bits looked up in each page table entry.
+pub const PAGE_TABLE_BITS: usize = 9;
+
+/// The number of levels of page tables.
+pub const PAGE_TABLE_LEVELS: usize = 3;
+
+/// An address space ID.
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+pub struct ASID(u16);
+
+impl ASID {
+ /// The kernel's ASID.
+ pub const KERNEL: ASID = ASID(0);
+}
+
+/// A single page table.
+#[derive(Debug)]
+#[repr(align(4096))]
+pub struct PageTable([PageTableEntry; 512]);
+
+impl PageTable {
+ /// Set this as the root page table. Note that this does _not_ perform a TLB shootdown.
+ ///
+ /// # Safety
+ ///
+ /// - All the safety conditions that would apply for setting `satp` and issuing an
+ /// `sfence.vma`.
+ #[requires((asid.0 & !0xfff) == 0)]
+ #[requires(((self as *const PageTable as usize) & 0xff00_0000_0000_0fff) == 0)]
+ #[inline(never)]
+ pub unsafe fn make_current(&self, asid: ASID) {
+ let mode = 8; // Sv39
+ let addr = self as *const PageTable as usize as u64;
+ let satp = (mode << 60) | ((asid.0 as u64) << 44) | (addr >> 12);
+ asm!("sfence.vma", "csrw satp, {satp}", "sfence.vma", satp = in(reg) satp)
+ }
+
+ /// Iterates over shared references to the entries in this page table.
+ pub fn iter(&self) -> impl Iterator<Item = &PageTableEntry> {
+ self.0.iter()
+ }
+
+ /// Iterates over exclusive references to the entries in this page table.
+ pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut PageTableEntry> {
+ self.0.iter_mut()
+ }
+}
+
+/// An entry in a page table.
+#[derive(Clone, Copy, Default, Eq, PartialEq)]
+pub struct PageTableEntry(u64);
+
+impl PageTableEntry {
+ /// Returns the physical page number of the backing page or next level page table.
+ #[requires(self.valid())]
+ #[ensures((ret & !0x0000_0fff_ffff_ffff) == 0)]
+ fn ppn(&self) -> u64 {
+ (self.0 >> 10) & 0x0000_0fff_ffff_ffff
+ }
+
+ /// Returns the physical address of the backing page or next level page table.
+ #[requires(self.valid())]
+ #[ensures((ret & !0x003f_ffff_ffff_fc00) == 0)]
+ pub fn addr(&self) -> u64 {
+ self.ppn() << PAGE_TABLE_BITS
+ }
+
+ /// Returns a pointer to the backing page.
+ #[requires(self.valid())]
+ #[requires(self.leaf_pte())]
+ #[ensures((ret as usize & !0x003f_ffff_ffff_fc00) == 0)]
+ pub fn page(&self) -> *mut [u8; PAGE_SIZE] {
+ self.addr() as *mut [u8; PAGE_SIZE]
+ }
+
+ /// Returns a pointer to the backing page table.
+ #[requires(self.valid())]
+ #[requires(!self.leaf_pte())]
+ #[ensures((ret as usize & !0x003f_ffff_ffff_fc00) == 0)]
+ pub fn page_table(&self) -> *mut PageTable {
+ self.addr() as *mut PageTable
+ }
+
+ /// Sets the physical address of the backing page or next level page table.
+ #[requires(self.valid())]
+ #[requires((addr & !0x003f_ffff_ffff_fc00) == 0)]
+ pub fn set_addr(&mut self, addr: u64) -> &mut PageTableEntry {
+ let ppn = addr >> 12;
+ self.0 &= !0x003f_ffff_ffff_fc00;
+ self.0 |= ppn << 10;
+ self
+ }
+
+ /// Returns whether the dirty bit is set.
+ #[requires(self.valid())]
+ pub fn dirty(&self) -> bool {
+ (self.0 & (1 << 7)) != 0
+ }
+
+ /// Sets the dirty bit.
+ #[requires(self.valid())]
+ pub fn set_dirty(&mut self, dirty: bool) -> &mut PageTableEntry {
+ self.0 &= !0b10000000;
+ self.0 |= (dirty as u64) << 7;
+ self
+ }
+
+ /// Returns whether the accessed bit is set.
+ #[requires(self.valid())]
+ pub fn accessed(&self) -> bool {
+ (self.0 & (1 << 6)) != 0
+ }
+
+ /// Sets the accessed bit.
+ #[requires(self.valid())]
+ pub fn set_accessed(&mut self, accessed: bool) -> &mut PageTableEntry {
+ self.0 &= !0b01000000;
+ self.0 |= (accessed as u64) << 6;
+ self
+ }
+
+ /// Returns whether the global bit is set.
+ #[requires(self.valid())]
+ pub fn global(&self) -> bool {
+ (self.0 & (1 << 5)) != 0
+ }
+
+ /// Sets the global bit.
+ #[requires(self.valid())]
+ pub fn set_global(&mut self, global: bool) -> &mut PageTableEntry {
+ self.0 &= !0b00100000;
+ self.0 |= (global as u64) << 5;
+ self
+ }
+
+ /// Returns whether the user bit is set.
+ #[requires(self.valid())]
+ pub fn user(&self) -> bool {
+ (self.0 & (1 << 4)) != 0
+ }
+
+ /// Sets the user bit.
+ #[requires(self.valid())]
+ pub fn set_user(&mut self, user: bool) -> &mut PageTableEntry {
+ self.0 &= !0b00010000;
+ self.0 |= (user as u64) << 4;
+ self
+ }
+
+ /// Returns whether the executable bit is set.
+ #[requires(self.valid())]
+ pub fn executable(&self) -> bool {
+ (self.0 & (1 << 3)) != 0
+ }
+
+ /// Sets the executable bit.
+ #[requires(self.valid())]
+ pub fn set_executable(&mut self, executable: bool) -> &mut PageTableEntry {
+ self.0 &= !0b00001000;
+ self.0 |= (executable as u64) << 3;
+ self
+ }
+
+ /// Returns whether the writable bit is set.
+ #[requires(self.valid())]
+ pub fn writable(&self) -> bool {
+ (self.0 & (1 << 2)) != 0
+ }
+
+ /// Sets the writable bit.
+ #[requires(self.valid())]
+ pub fn set_writable(&mut self, writable: bool) -> &mut PageTableEntry {
+ self.0 &= !0b00000100;
+ self.0 |= (writable as u64) << 2;
+ self
+ }
+
+ /// Returns whether the readable bit is set.
+ #[requires(self.valid())]
+ pub fn readable(&self) -> bool {
+ (self.0 & (1 << 1)) != 0
+ }
+
+ /// Sets the readable bit.
+ #[requires(self.valid())]
+ pub fn set_readable(&mut self, readable: bool) -> &mut PageTableEntry {
+ self.0 &= !0b00000010;
+ self.0 |= (readable as u64) << 1;
+ self
+ }
+
+ /// Returns whether the page table entry is for a leaf PTE.
+ #[requires(self.valid())]
+ pub fn leaf_pte(&self) -> bool {
+ (self.0 & 0b1110) != 0
+ }
+
+ /// Sets the readable, writable, and executable bits at once.
+ #[requires(self.valid())]
+ pub fn set_rwx(
+ &mut self,
+ readable: bool,
+ writable: bool,
+ executable: bool,
+ ) -> &mut PageTableEntry {
+ self.set_readable(readable)
+ .set_writable(writable)
+ .set_executable(executable)
+ }
+
+ /// Returns whether the valid bit is set.
+ pub fn valid(&self) -> bool {
+ (self.0 & (1 << 0)) != 0
+ }
+
+ /// Sets the valid bit.
+ pub fn set_valid(&mut self, valid: bool) -> &mut PageTableEntry {
+ self.0 &= !0b00000001;
+ self.0 |= valid as u64;
+ self
+ }
+}
+
+impl fmt::Debug for PageTableEntry {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ if self.valid() {
+ let mut flags = *b"DAGUXWRV";
+ if !self.dirty() {
+ flags[0] = b' ';
+ }
+ if !self.accessed() {
+ flags[1] = b' ';
+ }
+ if !self.global() {
+ flags[2] = b' ';
+ }
+ if !self.user() {
+ flags[3] = b' ';
+ }
+ if !self.executable() {
+ flags[4] = b' ';
+ }
+ if !self.writable() {
+ flags[5] = b' ';
+ }
+ if !self.readable() {
+ flags[6] = b' ';
+ }
+
+ // UNWRAP: The flags must be ASCII.
+ let addr = self.addr() as *const ();
+ let flags = str::from_utf8(&flags).unwrap();
+ write!(fmt, "PageTableEntry({addr:018p}, {flags})")
+ } else {
+ write!(fmt, "PageTableEntry({:#018x}, INVALID)", self.0)
+ }
+ }
+}
diff --git a/crates/kernel/src/lib.rs b/crates/kernel/src/lib.rs
index c0da3bf..517ace9 100644
--- a/crates/kernel/src/lib.rs
+++ b/crates/kernel/src/lib.rs
@@ -3,15 +3,15 @@
use crate::arch::{sleep_forever, PAGE_SIZE, PAGE_SIZE_BITS};
use core::ptr::NonNull;
-use log::{debug, info, warn};
+use log::{debug, info};
use vernos_alloc_buddy::BuddyAllocator;
use vernos_alloc_physmem_free_list::FreeListAllocator;
use vernos_device_tree::FlattenedDeviceTree;
-use vernos_utils::dbg;
#[cfg(target_os = "none")]
mod panic;
+pub mod alloc;
pub mod arch;
pub mod logger;
@@ -48,7 +48,6 @@ pub unsafe extern "C" fn hart0_early_boot(device_tree: *const u8) -> ! {
let mut physical_memory_region_count = 0;
flattened_device_tree
.for_each_memory_range::<_, PAGE_SIZE>(|addrs| {
- dbg!(&addrs);
let len_bytes = addrs.end - addrs.start;
assert!(addrs.start.trailing_zeros() as usize >= PAGE_SIZE_BITS);
assert!(len_bytes.trailing_zeros() as usize >= PAGE_SIZE_BITS);
@@ -82,10 +81,11 @@ pub unsafe extern "C" fn hart0_early_boot(device_tree: *const u8) -> ! {
}
// Initialize the buddy allocator.
- let alloc_buddy =
- BuddyAllocator::<PAGE_SIZE, PAGE_SIZE_BITS, 19>::new(physical_memory_free_list)
- .expect("failed to configure the buddy allocator");
- dbg!(alloc_buddy.debug_free_lists());
+ let alloc_buddy = BuddyAllocator::new(physical_memory_free_list)
+ .expect("failed to configure the buddy allocator");
+
+ // Set up the allocators.
+ alloc::init(alloc_buddy);
todo!()
}
diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs
index 3649666..6e26317 100644
--- a/crates/utils/src/lib.rs
+++ b/crates/utils/src/lib.rs
@@ -1,7 +1,11 @@
//! Common utilities.
#![no_std]
-use core::{fmt, mem::size_of};
+use core::{
+ fmt,
+ mem::size_of,
+ ops::{Deref, DerefMut},
+};
/// Creates an ad-hoc `Debug` instance.
pub fn debug(f: impl Fn(&mut fmt::Formatter) -> fmt::Result) -> impl fmt::Debug {
@@ -66,6 +70,25 @@ macro_rules! dbg {
};
}
+/// A wrapper type that promises that its contents are Send.
+pub struct BelieveMeSend<T>(pub T);
+
+impl<T> Deref for BelieveMeSend<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.0
+ }
+}
+
+impl<T> DerefMut for BelieveMeSend<T> {
+ fn deref_mut(&mut self) -> &mut T {
+ &mut self.0
+ }
+}
+
+unsafe impl<T> Send for BelieveMeSend<T> {}
+
/// A trait for types that can be converted to from big-endian or little-endian byte slices.
pub trait FromEndianBytes {
/// Converts from a big-endian byte slice.
diff --git a/flake.nix b/flake.nix
index 03b99ac..db3a268 100644
--- a/flake.nix
+++ b/flake.nix
@@ -115,7 +115,8 @@
-ex "layout asm" \
-ex "layout regs" \
-ex "focus cmd" \
- -ex "tbreak hart0_boot" \
+ -ex "(\$sp < (char*)hart0_initial_stack+8)" \
+ -ex "tbreak hart0_early_boot" \
-ex "c"
'';
}