summaryrefslogtreecommitdiff
path: root/crates/kernel/src/arch
diff options
context:
space:
mode:
Diffstat (limited to 'crates/kernel/src/arch')
-rw-r--r--crates/kernel/src/arch/mod.rs3
-rw-r--r--crates/kernel/src/arch/riscv64/mod.rs6
-rw-r--r--crates/kernel/src/arch/riscv64/paging.rs228
3 files changed, 158 insertions, 79 deletions
diff --git a/crates/kernel/src/arch/mod.rs b/crates/kernel/src/arch/mod.rs
index 1afe41c..bfdfcc7 100644
--- a/crates/kernel/src/arch/mod.rs
+++ b/crates/kernel/src/arch/mod.rs
@@ -9,6 +9,3 @@ cfg_if::cfg_if! {
compile_error!("unsupported platform");
}
}
-
-/// The size of a regular-sized page of memory.
-pub const PAGE_SIZE: usize = 1 << PAGE_SIZE_BITS;
diff --git a/crates/kernel/src/arch/riscv64/mod.rs b/crates/kernel/src/arch/riscv64/mod.rs
index 15f44c7..011e244 100644
--- a/crates/kernel/src/arch/riscv64/mod.rs
+++ b/crates/kernel/src/arch/riscv64/mod.rs
@@ -1,12 +1,6 @@
pub mod interrupts;
pub mod paging;
-/// The number of bits in the size of a regular-sized page of memory.
-pub const PAGE_SIZE_BITS: usize = 12;
-
-/// The number of bits in the size of the largest huge page.
-pub const MAX_PAGE_SIZE_BITS: usize = 30;
-
/// Halts the hart.
pub fn sleep_forever() -> ! {
loop {
diff --git a/crates/kernel/src/arch/riscv64/paging.rs b/crates/kernel/src/arch/riscv64/paging.rs
index 6b81881..5b87b64 100644
--- a/crates/kernel/src/arch/riscv64/paging.rs
+++ b/crates/kernel/src/arch/riscv64/paging.rs
@@ -1,8 +1,22 @@
-use crate::arch::{PAGE_SIZE, PAGE_SIZE_BITS};
+use crate::paging::{BuddyAllocator, MapError, MappingFlags, PAGE_SIZE, PAGE_SIZE_BITS};
use contracts::requires;
-use core::{arch::asm, fmt, iter, ops::RangeInclusive, str};
+use core::{arch::asm, fmt, iter, ops::RangeInclusive, ptr::NonNull, str};
use either::Either;
-use vernos_utils::debug;
+
+/// One past the largest address in "low memory."
+pub const LOMEM_TOP: usize = 0x0000_0040_0000_0000;
+
+/// The smallest address in "high memory."
+pub const HIMEM_BOT: usize = 0xffff_ffc0_0000_0000;
+
+/// The number of possible page sizes.
+pub const PAGE_SIZE_COUNT: usize = 3;
+
+/// The `log2`s of the possible page sizes, from largest to smallest.
+pub const PAGE_SIZES_BITS: [usize; PAGE_SIZE_COUNT] = [30, 21, 12];
+
+/// The `log2`s of the possible page sizes, from largest to smallest.
+pub const PAGE_SIZES: [usize; PAGE_SIZE_COUNT] = [1 << 30, 1 << 21, 1 << 12];
/// The number of bits looked up in each page table entry.
pub const PAGE_TABLE_BITS: usize = 9;
@@ -11,7 +25,7 @@ pub const PAGE_TABLE_BITS: usize = 9;
pub const PAGE_TABLE_LEVELS: usize = 3;
/// An address space ID.
-#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct ASID(u16);
impl ASID {
@@ -20,22 +34,27 @@ impl ASID {
}
/// A single page table.
+#[derive(Debug)]
#[repr(align(4096))]
-pub struct PageTable([PageTableEntry; 512]);
+pub struct PageTable([PageTableEntry; 1 << PAGE_TABLE_BITS]);
impl PageTable {
- /// Set this as the root page table. Note that this does _not_ perform a TLB shootdown.
+ /// Set the page table whose _physical_ address is `page_table` as the current root page table.
+ ///
+ /// This performs appropriate invalidation or fencing as required by the platform, but does
+ /// _not_ perform a TLB shootdown.
///
/// # Safety
///
/// - All the safety conditions that would apply for setting `satp` and issuing an
/// `sfence.vma`.
+ /// - The page table must not be dropped while it is the current root page table!
#[requires((asid.0 & !0xfff) == 0)]
- #[requires(((self as *const PageTable as usize) & 0xff00_0000_0000_0fff) == 0)]
+ #[requires(((page_table.as_ptr() as usize) & 0xff00_0000_0000_0fff) == 0)]
#[inline(never)]
- pub unsafe fn make_current(&self, asid: ASID) {
+ pub unsafe fn make_current(page_table: NonNull<PageTable>, asid: ASID) {
let mode = 8; // Sv39
- let addr = self as *const PageTable as usize as u64;
+ let addr = page_table.as_ptr() as usize as u64;
let satp = (mode << 60) | ((asid.0 as u64) << 44) | (addr >> 12);
asm!("sfence.vma", "csrw satp, {satp}", "sfence.vma", satp = in(reg) satp)
}
@@ -49,56 +68,114 @@ impl PageTable {
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut PageTableEntry> {
self.0.iter_mut()
}
-}
-impl fmt::Debug for PageTable {
- fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- // Get an iterator over the valid leaf page table entries.
- let mut mappings = iter_level_2_mappings(&self.0, 0).peekable();
-
- // Make an iterator that merges adjacent entries that have the same flags.
- let mappings = iter::from_fn(|| {
- let (entry, mut vaddrs) = mappings.next()?;
+ /// Iterates over the valid mappings. Each item is a triple of virtual address range, physical
+ /// address range, and entry.
+ pub fn iter_mappings(
+ &self,
+ ) -> impl '_ + Iterator<Item = (RangeInclusive<usize>, RangeInclusive<usize>, PageTableEntry)>
+ {
+ iter_level_2_mappings(&self.0, 0).map(|(entry, vaddrs)| {
+ let len = vaddrs.end() - vaddrs.start();
let paddrs_start = entry.addr() as usize;
- let mut len = (vaddrs.end() - vaddrs.start()) + 1;
-
- while let Some((next_entry, next_vaddrs)) = mappings.peek() {
- let next_paddrs_start = next_entry.addr() as usize;
-
- if entry.flag_bits() != next_entry.flag_bits()
- || vaddrs.end().wrapping_add(1) != *next_vaddrs.start()
- || paddrs_start.wrapping_add(len) != next_paddrs_start
- {
- break;
- }
- // UNWRAP: .peek() already showed us that there's a next entry.
- let (_, next_vaddrs) = mappings.next().unwrap();
- vaddrs = *vaddrs.start()..=*next_vaddrs.end();
- len = (next_vaddrs.end() - vaddrs.start()) + 1;
+ let paddrs = paddrs_start..=paddrs_start + len;
+ (vaddrs, paddrs, entry)
+ })
+ }
+
+ /// Attempts to add a mapping of a particular size to the page tables. This does not issue an
+ /// `SFENCE.VMA`, nor a TLB shootdown.
+ ///
+ /// This may require allocating new intermediate page tables, so it may fail to allocate
+ /// memory.
+ pub fn map(
+ &mut self,
+ buddy_allocator: &mut BuddyAllocator,
+ vaddr: usize,
+ paddr: usize,
+ len: usize,
+ flags: MappingFlags,
+ ) -> Result<(), MapError> {
+ // Ignore the bits of the flags that we don't want to set.
+ let flags = MappingFlags::from_bits(flags.bits())
+ .ok_or(MapError::InvalidFlags)?
+ .difference(MappingFlags::A | MappingFlags::D);
+
+ // Check that we had some permissions bits set in the flags.
+ if !flags.intersects(MappingFlags::R | MappingFlags::W | MappingFlags::X) {
+ return Err(MapError::InvalidFlagPermissions);
+ }
+
+ // Check that the page size is valid, and that the physical and virtual addresses are
+ // aligned for it.
+ let page_size_class = PAGE_SIZES
+ .iter()
+ .position(|&page_size| page_size == len)
+ .ok_or(MapError::InvalidLength)?;
+ let page_size_bits = PAGE_SIZES_BITS[page_size_class];
+ if paddr & (len - 1) != 0 {
+ return Err(MapError::MisalignedPAddr);
+ }
+ if vaddr & (len - 1) != 0 {
+ return Err(MapError::MisalignedVAddr);
+ }
+
+ // Check that the vaddr isn't in the "dead zone" between lomem and himem.
+ if (LOMEM_TOP..HIMEM_BOT).contains(&vaddr) {
+ return Err(MapError::InvalidVAddr);
+ }
+
+ // If the virtual address is in himem, move it down to hide the dead zone. This is wrong,
+ // but makes the math simpler.
+ let vaddr = if vaddr >= HIMEM_BOT {
+ vaddr - (HIMEM_BOT - LOMEM_TOP)
+ } else {
+ vaddr
+ };
+
+ // Traverse the page tables, so that an entry in the table referenced by `page_table` is
+ // the right size class of page. This may involve allocating new page tables.
+ //
+ // TODO: Can we deallocate these if an error occurs later?
+ let mut page_table = self;
+ for page_size_bits in PAGE_SIZES_BITS.iter().take(page_size_class) {
+ let entry_slot =
+ &mut page_table.0[(vaddr >> page_size_bits) & ((1 << PAGE_TABLE_BITS) - 1)];
+ if !entry_slot.valid() {
+ // Allocate a new page table.
+ let next_page_table = buddy_allocator.alloc_zeroed::<PageTable>()?;
+ let mut entry = PageTableEntry::default();
+ entry
+ .set_valid(true)
+ .set_addr(next_page_table.as_ptr() as u64);
+ *entry_slot = entry;
+ }
+ if entry_slot.leaf_pte() {
+ return Err(MapError::MappingAlreadyExisted);
}
- let paddrs = paddrs_start..=paddrs_start + (len - 1);
- Some((entry, vaddrs, paddrs))
- });
-
- // Turn the iterator into an iterator over Debugs.
- let debug_mappings = mappings.map(|(entry, vaddrs, paddrs)| {
- debug(move |fmt| {
- let flags = entry.flags_str();
- // UNWRAP: The flags must be ASCII by the postcondition of flags_str().
- let flags = str::from_utf8(&flags).unwrap();
- write!(
- fmt,
- "[V|{:16x}-{:16x}][P|{:16x}-{:16x}][F|{}]",
- *vaddrs.start(),
- *vaddrs.end(),
- *paddrs.start(),
- *paddrs.end(),
- flags
- )
- })
- });
-
- fmt.debug_list().entries(debug_mappings).finish()
+
+ // UNSAFE, UNWRAP: We maintain the invariant that all entries marked valid actually are valid.
+ page_table = unsafe { entry_slot.page_table().as_mut().unwrap() };
+ }
+
+ // Find the entry that we need to set, making sure it's not already occupied.
+ let entry_slot =
+ &mut page_table.0[(vaddr >> page_size_bits) & ((1 << PAGE_TABLE_BITS) - 1)];
+ if entry_slot.valid() {
+ return Err(MapError::MappingAlreadyExisted);
+ }
+
+ // Otherwise, put the entry in.
+ let mut entry = PageTableEntry::default();
+ entry
+ .set_valid(true)
+ .set_readable(flags.contains(MappingFlags::R))
+ .set_writable(flags.contains(MappingFlags::W))
+ .set_executable(flags.contains(MappingFlags::X))
+ .set_user(flags.contains(MappingFlags::U))
+ .set_addr(paddr as u64);
+ *entry_slot = entry;
+ Ok(())
}
}
@@ -107,6 +184,19 @@ impl fmt::Debug for PageTable {
pub struct PageTableEntry(u64);
impl PageTableEntry {
+ /// The value that `crate::paging::MappingFlags::R` should have.
+ pub const FLAG_R: usize = 0b00000010;
+ /// The value that `crate::paging::MappingFlags::W` should have.
+ pub const FLAG_W: usize = 0b00000100;
+ /// The value that `crate::paging::MappingFlags::X` should have.
+ pub const FLAG_X: usize = 0b00001000;
+ /// The value that `crate::paging::MappingFlags::U` should have.
+ pub const FLAG_U: usize = 0b00010000;
+ /// The value that `crate::paging::MappingFlags::A` should have.
+ pub const FLAG_A: usize = 0b01000000;
+ /// The value that `crate::paging::MappingFlags::D` should have.
+ pub const FLAG_D: usize = 0b10000000;
+
/// Returns the physical page number of the backing page or next level page table.
#[requires(self.valid())]
#[ensures((ret & !0x0000_0fff_ffff_ffff) == 0)]
@@ -114,21 +204,19 @@ impl PageTableEntry {
(self.0 >> 10) & 0x0000_0fff_ffff_ffff
}
- /// Returns the bits of the entry that correspond to flags.
- ///
- /// This isn't `pub` because this isn't portable, though maybe it makes sense to instead export
- /// a predicate for "do these two entries have the _same_ flags bits," since that should be
- /// more portable.
+ /// Returns whether the flag bits of the entry matched the other entry's.
#[requires(self.valid())]
- #[ensures((ret & !0xffc0_0000_0000_03ff) == 0)]
- fn flag_bits(&self) -> u64 {
- self.0 & 0xffc0_0000_0000_03ff
+ #[requires(other.valid())]
+ pub fn flag_bits_eq(&self, other: &PageTableEntry) -> bool {
+ let lhs = self.0 & 0xffc0_0000_0000_03ff;
+ let rhs = other.0 & 0xffc0_0000_0000_03ff;
+ lhs == rhs
}
/// Returns bytes that correspond to an ASCII string with the flags.
#[requires(self.valid())]
#[ensures(ret.iter().all(|ch| ch.is_ascii()))]
- fn flags_str(&self) -> [u8; 7] {
+ pub fn flags_str_bytes(&self) -> [u8; 7] {
let mut flags = *b"rwxugad";
let char_disabled = b'-';
if !self.readable() {
@@ -322,8 +410,8 @@ impl fmt::Debug for PageTableEntry {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
if self.valid() {
let addr = self.addr() as *const ();
- let flags = self.flags_str();
- // UNWRAP: The flags must be ASCII by the postcondition of flags_str().
+ let flags = self.flags_str_bytes();
+ // UNWRAP: The flags must be ASCII by the postcondition of flags_str_bytes().
let flags = str::from_utf8(&flags).unwrap();
write!(fmt, "PageTableEntry({addr:018p}, {flags})")
} else {
@@ -335,7 +423,7 @@ impl fmt::Debug for PageTableEntry {
/// See `PageTable::iter_mappings`. This needs to be its own function because of `impl Trait`; we
/// can't allocate here, and we want a fixed-size iterator.
fn iter_level_2_mappings(
- table: &[PageTableEntry; 512],
+ table: &[PageTableEntry; 1 << PAGE_TABLE_BITS],
base_addr: usize,
) -> impl '_ + Iterator<Item = (PageTableEntry, RangeInclusive<usize>)> {
const ENTRY_SIZE: usize = 1 << (12 + 9 + 9);
@@ -363,7 +451,7 @@ fn iter_level_2_mappings(
/// See `PageTable::iter_mappings`. This needs to be its own function because of `impl Trait`; we
/// can't allocate here, and we want a fixed-size iterator.
fn iter_level_1_mappings(
- table: &[PageTableEntry; 512],
+ table: &[PageTableEntry; 1 << PAGE_TABLE_BITS],
base_addr: usize,
) -> impl '_ + Iterator<Item = (PageTableEntry, RangeInclusive<usize>)> {
const ENTRY_SIZE: usize = 1 << (12 + 9);
@@ -388,7 +476,7 @@ fn iter_level_1_mappings(
/// See `PageTable::iter_mappings`. This needs to be its own function because of `impl Trait`; we
/// can't allocate here, and we want a fixed-size iterator.
fn iter_level_0_mappings(
- table: &[PageTableEntry; 512],
+ table: &[PageTableEntry; 1 << PAGE_TABLE_BITS],
base_addr: usize,
) -> impl '_ + Iterator<Item = (PageTableEntry, RangeInclusive<usize>)> {
const ENTRY_SIZE: usize = 1 << 12;