summaryrefslogtreecommitdiff
path: root/crates/kernel/src/paging.rs
diff options
context:
space:
mode:
Diffstat (limited to 'crates/kernel/src/paging.rs')
-rw-r--r--crates/kernel/src/paging.rs193
1 files changed, 193 insertions, 0 deletions
diff --git a/crates/kernel/src/paging.rs b/crates/kernel/src/paging.rs
new file mode 100644
index 0000000..76c603d
--- /dev/null
+++ b/crates/kernel/src/paging.rs
@@ -0,0 +1,193 @@
+//! An architecture-independent interface to the page tables.
+
+use crate::arch;
+pub use crate::arch::paging::{HIMEM_BOT, LOMEM_TOP, PAGE_SIZES, PAGE_SIZES_BITS, PAGE_SIZE_COUNT};
+use allocator_api2::alloc::AllocError;
+use core::{
+ fmt, iter,
+ ptr::{addr_of_mut, NonNull},
+ str,
+};
+use vernos_utils::debug;
+
+/// The number of bits in the size of a regular-sized page of memory.
+pub const PAGE_SIZE_BITS: usize = PAGE_SIZES_BITS[PAGE_SIZE_COUNT - 1];
+
+/// The size of a regular-sized page of memory.
+pub const PAGE_SIZE: usize = 1 << PAGE_SIZE_BITS;
+
+/// The number of bits in the size of the largest huge page.
+pub const MAX_PAGE_SIZE_BITS: usize = PAGE_SIZES_BITS[0];
+
+/// The buddy allocator, specialized for the details of our paging subsystem.
+pub type BuddyAllocator = vernos_alloc_buddy::BuddyAllocator<
+ 'static,
+ PAGE_SIZE,
+ PAGE_SIZE_BITS,
+ { 1 + MAX_PAGE_SIZE_BITS - PAGE_SIZE_BITS },
+>;
+
+/// A wrapper for the root page table, providing an architecture-independent interface to it.
+///
+/// This should be behind a pointer.
+pub struct PageTable(arch::paging::PageTable);
+
+impl PageTable {
+ /// Allocates a new page table in pages retrieved from the given buddy allocator.
+ pub fn new_in(buddy_allocator: &mut BuddyAllocator) -> Result<NonNull<PageTable>, AllocError> {
+ buddy_allocator.alloc_zeroed::<PageTable>()
+ }
+
+ /// Set the page table whose _physical_ address is `page_table` as the current root page table.
+ ///
+ /// This performs appropriate invalidation or fencing as required by the platform, but does
+ /// _not_ perform a TLB shootdown.
+ ///
+ /// # Safety
+ ///
+ /// - There must not be any live references to pages that are no longer mapped or are mapped
+ /// differently in the new page table.
+ pub unsafe fn make_current(page_table: NonNull<PageTable>, asid: ASID) {
+ let page_table = NonNull::new_unchecked(addr_of_mut!((*page_table.as_ptr()).0));
+ arch::paging::PageTable::make_current(page_table, asid.0)
+ }
+
+ /// Attempts to add a mapping of a particular size to the page tables.
+ ///
+ /// This may require allocating new intermediate page tables, so it may fail to allocate
+ /// memory.
+ ///
+ /// TODO: Fences and shootdowns?
+ pub fn map(
+ &mut self,
+ buddy_allocator: &mut BuddyAllocator,
+ vaddr: usize,
+ paddr: usize,
+ len: usize,
+ flags: MappingFlags,
+ ) -> Result<(), MapError> {
+ self.0.map(buddy_allocator, vaddr, paddr, len, flags)
+ }
+
+ /// Returns an iterator over `Debug`s that show the mapping in this page table.
+ pub fn debug_mappings(&self) -> impl '_ + Iterator<Item = impl fmt::Debug> {
+ // Get an iterator over the valid leaf page table entries.
+ let mut mappings = self.0.iter_mappings().peekable();
+
+ // Make an iterator that merges adjacent entries that have the same flags.
+ let merged_mappings = iter::from_fn(move || {
+ let (mut vaddrs, mut paddrs, entry) = mappings.next()?;
+ while let Some((next_vaddrs, next_paddrs, next_entry)) = mappings.peek() {
+ // We use .checked_add() instead of .wrapping_add() because we _don't_ want to have
+ // ranges that wrap around.
+ if !entry.flag_bits_eq(next_entry)
+ || vaddrs.end().checked_add(1) != Some(*next_vaddrs.start())
+ || paddrs.end().checked_add(1) != Some(*next_paddrs.start())
+ {
+ break;
+ }
+ // UNWRAP: .peek() already showed us that there's a next entry.
+ let (next_vaddrs, next_paddrs, _) = mappings.next().unwrap();
+ vaddrs = *vaddrs.start()..=*next_vaddrs.end();
+ paddrs = *paddrs.start()..=*next_paddrs.end();
+ }
+ Some((vaddrs, paddrs, entry))
+ });
+
+ // Turn the iterator into an iterator over Debugs.
+ merged_mappings.map(|(vaddrs, paddrs, entry)| {
+ debug(move |fmt| {
+ let flags = entry.flags_str_bytes();
+ // UNWRAP: The flags must be ASCII by the postcondition of flags_str_bytes().
+ let flags = str::from_utf8(&flags).unwrap();
+ write!(
+ fmt,
+ "[V|{:16x}-{:16x}][P|{:16x}-{:16x}][F|{}]",
+ *vaddrs.start(),
+ *vaddrs.end(),
+ *paddrs.start(),
+ *paddrs.end(),
+ flags
+ )
+ })
+ })
+ }
+}
+
+impl fmt::Debug for PageTable {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_list().entries(self.debug_mappings()).finish()
+ }
+}
+
+unsafe impl Send for PageTable {}
+
+bitflags::bitflags! {
+ /// The flags associated with a mapping.
+ #[derive(Clone, Copy, Debug)]
+ pub struct MappingFlags: usize {
+ /// Whether the mapping is readable.
+ const R = arch::paging::PageTableEntry::FLAG_R;
+
+ /// Whether the mapping is writable.
+ const W = arch::paging::PageTableEntry::FLAG_W;
+
+ /// Whether the mapping is executable.
+ const X = arch::paging::PageTableEntry::FLAG_X;
+
+ /// Whether the mapping is accessible to userspace or to kernelspace. Note that a mapping
+ /// that is accessible to one is not necessarily accessible to the other.
+ const U = arch::paging::PageTableEntry::FLAG_U;
+
+ /// Whether the mapping has been read from since it was last set. This is ignored by
+ /// `PageTable::map`, but may be returned by `PageTable::get_mapping`.
+ const A = arch::paging::PageTableEntry::FLAG_A;
+
+ /// Whether the mapping has been written to since it was last set. This is ignored by
+ /// `PageTable::map`, but may be returned by `PageTable::get_mapping`.
+ const D = arch::paging::PageTableEntry::FLAG_D;
+ }
+}
+
+/// An error creating a mapping.
+#[derive(Debug)]
+pub enum MapError {
+ /// A failure to allocate memory to store the page table in.
+ AllocError,
+
+ /// An unknown flag bit was set.
+ InvalidFlags,
+
+ /// None of `R`, `W`, or `X` were set.
+ InvalidFlagPermissions,
+
+ /// The length of the mapping is not supported for this virtual address range.
+ InvalidLength,
+
+ /// The mapping would cover an invalid virtual address.
+ InvalidVAddr,
+
+ /// The mapping would overlap with an existing mapping or guard page.
+ MappingAlreadyExisted,
+
+ /// The mapping's physical address isn't aligned.
+ MisalignedPAddr,
+
+ /// The mapping's virtual address isn't aligned.
+ MisalignedVAddr,
+}
+
+impl From<AllocError> for MapError {
+ fn from(AllocError: AllocError) -> Self {
+ MapError::AllocError
+ }
+}
+
+/// The type of address-space IDs. If a target does not have ASIDs, this may be a ZST.
+#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct ASID(arch::paging::ASID);
+
+impl ASID {
+ /// The kernel's ASID.
+ pub const KERNEL: ASID = ASID(arch::paging::ASID::KERNEL);
+}