use crate::paging::{BuddyAllocator, MapError, MappingFlags, PAGE_SIZE, PAGE_SIZE_BITS}; use contracts::requires; use core::{arch::asm, fmt, iter, ops::RangeInclusive, ptr::NonNull, str}; use either::Either; /// One past the largest address in "low memory." pub const LOMEM_TOP: usize = 0x0000_0040_0000_0000; /// The smallest address in "high memory." pub const HIMEM_BOT: usize = 0xffff_ffc0_0000_0000; /// The number of possible page sizes. pub const PAGE_SIZE_COUNT: usize = 3; /// The `log2`s of the possible page sizes, from largest to smallest. pub const PAGE_SIZES_BITS: [usize; PAGE_SIZE_COUNT] = [30, 21, 12]; /// The `log2`s of the possible page sizes, from largest to smallest. pub const PAGE_SIZES: [usize; PAGE_SIZE_COUNT] = [1 << 30, 1 << 21, 1 << 12]; /// The number of bits looked up in each page table entry. pub const PAGE_TABLE_BITS: usize = 9; /// The number of levels of page tables. pub const PAGE_TABLE_LEVELS: usize = 3; /// An address space ID. #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct ASID(u16); impl ASID { /// The kernel's ASID. pub const KERNEL: ASID = ASID(0); } /// A single page table. #[derive(Debug)] #[repr(align(4096))] pub struct PageTable([PageTableEntry; 1 << PAGE_TABLE_BITS]); impl PageTable { /// Set the page table whose _physical_ address is `page_table` as the current root page table. /// /// This performs appropriate invalidation or fencing as required by the platform, but does /// _not_ perform a TLB shootdown. /// /// # Safety /// /// - All the safety conditions that would apply for setting `satp` and issuing an /// `sfence.vma`. /// - The page table must not be dropped while it is the current root page table! #[requires((asid.0 & !0xfff) == 0)] #[requires(((page_table.as_ptr() as usize) & 0xff00_0000_0000_0fff) == 0)] #[inline(never)] pub unsafe fn make_current(page_table: NonNull, asid: ASID) { let mode = 8; // Sv39 let addr = page_table.as_ptr() as usize as u64; let satp = (mode << 60) | ((asid.0 as u64) << 44) | (addr >> 12); asm!("sfence.vma", "csrw satp, {satp}", "sfence.vma", satp = in(reg) satp) } /// Iterates over shared references to the entries in this page table. pub fn iter(&self) -> impl Iterator { self.0.iter() } /// Iterates over exclusive references to the entries in this page table. pub fn iter_mut(&mut self) -> impl Iterator { self.0.iter_mut() } /// Iterates over the valid mappings. Each item is a triple of virtual address range, physical /// address range, and entry. pub fn iter_mappings( &self, ) -> impl '_ + Iterator, RangeInclusive, PageTableEntry)> { iter_level_2_mappings(&self.0, 0).map(|(entry, vaddrs)| { let len = vaddrs.end() - vaddrs.start(); let paddrs_start = entry.addr() as usize; let paddrs = paddrs_start..=paddrs_start + len; (vaddrs, paddrs, entry) }) } /// Attempts to add a mapping of a particular size to the page tables. This does not issue an /// `SFENCE.VMA`, nor a TLB shootdown. /// /// This may require allocating new intermediate page tables, so it may fail to allocate /// memory. pub fn map( &mut self, buddy_allocator: &mut BuddyAllocator, vaddr: usize, paddr: usize, len: usize, flags: MappingFlags, ) -> Result<(), MapError> { // Ignore the bits of the flags that we don't want to set. let flags = MappingFlags::from_bits(flags.bits()) .ok_or(MapError::InvalidFlags)? .difference(MappingFlags::A | MappingFlags::D); // Check that we had some permissions bits set in the flags. if !flags.intersects(MappingFlags::R | MappingFlags::W | MappingFlags::X) { return Err(MapError::InvalidFlagPermissions); } // Check that the page size is valid, and that the physical and virtual addresses are // aligned for it. let page_size_class = PAGE_SIZES .iter() .position(|&page_size| page_size == len) .ok_or(MapError::InvalidLength)?; let page_size_bits = PAGE_SIZES_BITS[page_size_class]; if paddr & (len - 1) != 0 { return Err(MapError::MisalignedPAddr); } if vaddr & (len - 1) != 0 { return Err(MapError::MisalignedVAddr); } // Check that the vaddr isn't in the "dead zone" between lomem and himem. if (LOMEM_TOP..HIMEM_BOT).contains(&vaddr) { return Err(MapError::InvalidVAddr); } // If the virtual address is in himem, move it down to hide the dead zone. This is wrong, // but makes the math simpler. let vaddr = if vaddr >= HIMEM_BOT { vaddr - (HIMEM_BOT - LOMEM_TOP) } else { vaddr }; // Traverse the page tables, so that an entry in the table referenced by `page_table` is // the right size class of page. This may involve allocating new page tables. // // TODO: Can we deallocate these if an error occurs later? let mut page_table = self; for page_size_bits in PAGE_SIZES_BITS.iter().take(page_size_class) { let entry_slot = &mut page_table.0[(vaddr >> page_size_bits) & ((1 << PAGE_TABLE_BITS) - 1)]; if !entry_slot.valid() { // Allocate a new page table. let next_page_table = buddy_allocator.alloc_zeroed::()?; let mut entry = PageTableEntry::default(); entry .set_valid(true) .set_addr(next_page_table.as_ptr() as u64); *entry_slot = entry; } if entry_slot.leaf_pte() { return Err(MapError::MappingAlreadyExisted); } // UNSAFE, UNWRAP: We maintain the invariant that all entries marked valid actually are valid. page_table = unsafe { entry_slot.page_table().as_mut().unwrap() }; } // Find the entry that we need to set, making sure it's not already occupied. let entry_slot = &mut page_table.0[(vaddr >> page_size_bits) & ((1 << PAGE_TABLE_BITS) - 1)]; if entry_slot.valid() { return Err(MapError::MappingAlreadyExisted); } // Otherwise, put the entry in. let mut entry = PageTableEntry::default(); entry .set_valid(true) .set_readable(flags.contains(MappingFlags::R)) .set_writable(flags.contains(MappingFlags::W)) .set_executable(flags.contains(MappingFlags::X)) .set_user(flags.contains(MappingFlags::U)) .set_addr(paddr as u64); *entry_slot = entry; Ok(()) } } /// An entry in a page table. #[derive(Clone, Copy, Default, Eq, PartialEq)] pub struct PageTableEntry(u64); impl PageTableEntry { /// The value that `crate::paging::MappingFlags::R` should have. pub const FLAG_R: usize = 0b00000010; /// The value that `crate::paging::MappingFlags::W` should have. pub const FLAG_W: usize = 0b00000100; /// The value that `crate::paging::MappingFlags::X` should have. pub const FLAG_X: usize = 0b00001000; /// The value that `crate::paging::MappingFlags::U` should have. pub const FLAG_U: usize = 0b00010000; /// The value that `crate::paging::MappingFlags::A` should have. pub const FLAG_A: usize = 0b01000000; /// The value that `crate::paging::MappingFlags::D` should have. pub const FLAG_D: usize = 0b10000000; /// Returns the physical page number of the backing page or next level page table. #[requires(self.valid())] #[ensures((ret & !0x0000_0fff_ffff_ffff) == 0)] fn ppn(&self) -> u64 { (self.0 >> 10) & 0x0000_0fff_ffff_ffff } /// Returns whether the flag bits of the entry matched the other entry's. #[requires(self.valid())] #[requires(other.valid())] pub fn flag_bits_eq(&self, other: &PageTableEntry) -> bool { let lhs = self.0 & 0xffc0_0000_0000_03ff; let rhs = other.0 & 0xffc0_0000_0000_03ff; lhs == rhs } /// Returns bytes that correspond to an ASCII string with the flags. #[requires(self.valid())] #[ensures(ret.iter().all(|ch| ch.is_ascii()))] pub fn flags_str_bytes(&self) -> [u8; 7] { let mut flags = *b"rwxugad"; let char_disabled = b'-'; if !self.readable() { flags[0] = char_disabled; } if !self.writable() { flags[1] = char_disabled; } if !self.executable() { flags[2] = char_disabled; } if !self.user() { flags[3] = char_disabled; } if !self.global() { flags[4] = char_disabled; } if !self.accessed() { flags[5] = char_disabled; } if !self.dirty() { flags[6] = char_disabled; } flags } /// Returns the physical address of the backing page or next level page table. #[requires(self.valid())] #[ensures((ret & !0x003f_ffff_ffff_fc00) == 0)] pub fn addr(&self) -> u64 { self.ppn() << PAGE_SIZE_BITS } /// Returns a pointer to the backing page. #[requires(self.valid())] #[requires(self.leaf_pte())] #[ensures((ret as usize & !0x003f_ffff_ffff_fc00) == 0)] pub fn page(&self) -> *mut [u8; PAGE_SIZE] { self.addr() as *mut [u8; PAGE_SIZE] } /// Returns a pointer to the backing page table. #[requires(self.valid())] #[requires(!self.leaf_pte())] #[ensures((ret as usize & !0x003f_ffff_ffff_fc00) == 0)] pub fn page_table(&self) -> *mut PageTable { self.addr() as *mut PageTable } /// Sets the physical address of the backing page or next level page table. #[requires(self.valid())] #[requires((addr & !0x003f_ffff_ffff_fc00) == 0)] pub fn set_addr(&mut self, addr: u64) -> &mut PageTableEntry { let ppn = addr >> 12; self.0 &= !0x003f_ffff_ffff_fc00; self.0 |= ppn << 10; self } /// Returns whether the dirty bit is set. #[requires(self.valid())] pub fn dirty(&self) -> bool { (self.0 & (1 << 7)) != 0 } /// Sets the dirty bit. #[requires(self.valid())] pub fn set_dirty(&mut self, dirty: bool) -> &mut PageTableEntry { self.0 &= !0b10000000; self.0 |= (dirty as u64) << 7; self } /// Returns whether the accessed bit is set. #[requires(self.valid())] pub fn accessed(&self) -> bool { (self.0 & (1 << 6)) != 0 } /// Sets the accessed bit. #[requires(self.valid())] pub fn set_accessed(&mut self, accessed: bool) -> &mut PageTableEntry { self.0 &= !0b01000000; self.0 |= (accessed as u64) << 6; self } /// Returns whether the global bit is set. #[requires(self.valid())] pub fn global(&self) -> bool { (self.0 & (1 << 5)) != 0 } /// Sets the global bit. #[requires(self.valid())] pub fn set_global(&mut self, global: bool) -> &mut PageTableEntry { self.0 &= !0b00100000; self.0 |= (global as u64) << 5; self } /// Returns whether the user bit is set. #[requires(self.valid())] pub fn user(&self) -> bool { (self.0 & (1 << 4)) != 0 } /// Sets the user bit. #[requires(self.valid())] pub fn set_user(&mut self, user: bool) -> &mut PageTableEntry { self.0 &= !0b00010000; self.0 |= (user as u64) << 4; self } /// Returns whether the executable bit is set. #[requires(self.valid())] pub fn executable(&self) -> bool { (self.0 & (1 << 3)) != 0 } /// Sets the executable bit. #[requires(self.valid())] pub fn set_executable(&mut self, executable: bool) -> &mut PageTableEntry { self.0 &= !0b00001000; self.0 |= (executable as u64) << 3; self } /// Returns whether the writable bit is set. #[requires(self.valid())] pub fn writable(&self) -> bool { (self.0 & (1 << 2)) != 0 } /// Sets the writable bit. #[requires(self.valid())] pub fn set_writable(&mut self, writable: bool) -> &mut PageTableEntry { self.0 &= !0b00000100; self.0 |= (writable as u64) << 2; self } /// Returns whether the readable bit is set. #[requires(self.valid())] pub fn readable(&self) -> bool { (self.0 & (1 << 1)) != 0 } /// Sets the readable bit. #[requires(self.valid())] pub fn set_readable(&mut self, readable: bool) -> &mut PageTableEntry { self.0 &= !0b00000010; self.0 |= (readable as u64) << 1; self } /// Returns whether the page table entry is for a leaf PTE. #[requires(self.valid())] pub fn leaf_pte(&self) -> bool { (self.0 & 0b1110) != 0 } /// Sets the readable, writable, and executable bits at once. #[requires(self.valid())] pub fn set_rwx( &mut self, readable: bool, writable: bool, executable: bool, ) -> &mut PageTableEntry { self.set_readable(readable) .set_writable(writable) .set_executable(executable) } /// Returns whether the valid bit is set. pub fn valid(&self) -> bool { (self.0 & (1 << 0)) != 0 } /// Sets the valid bit. pub fn set_valid(&mut self, valid: bool) -> &mut PageTableEntry { self.0 &= !0b00000001; self.0 |= valid as u64; self } } impl fmt::Debug for PageTableEntry { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { if self.valid() { let addr = self.addr() as *const (); let flags = self.flags_str_bytes(); // UNWRAP: The flags must be ASCII by the postcondition of flags_str_bytes(). let flags = str::from_utf8(&flags).unwrap(); write!(fmt, "PageTableEntry({addr:018p}, {flags})") } else { write!(fmt, "PageTableEntry({:#018x}, INVALID)", self.0) } } } /// See `PageTable::iter_mappings`. This needs to be its own function because of `impl Trait`; we /// can't allocate here, and we want a fixed-size iterator. fn iter_level_2_mappings( table: &[PageTableEntry; 1 << PAGE_TABLE_BITS], base_addr: usize, ) -> impl '_ + Iterator)> { const ENTRY_SIZE: usize = 1 << (12 + 9 + 9); table .iter() .enumerate() .filter(|(_, entry)| entry.valid()) .flat_map(move |(i, &entry)| { let mut start_addr = base_addr + i * ENTRY_SIZE; if i >= 256 { start_addr += 0xffff_ff80_0000_0000; } if entry.leaf_pte() { Either::Left(iter::once(( entry, start_addr..=start_addr + (ENTRY_SIZE - 1), ))) } else { let next_table = unsafe { &(*entry.page_table()).0 }; Either::Right(iter_level_1_mappings(next_table, start_addr)) } }) } /// See `PageTable::iter_mappings`. This needs to be its own function because of `impl Trait`; we /// can't allocate here, and we want a fixed-size iterator. fn iter_level_1_mappings( table: &[PageTableEntry; 1 << PAGE_TABLE_BITS], base_addr: usize, ) -> impl '_ + Iterator)> { const ENTRY_SIZE: usize = 1 << (12 + 9); table .iter() .enumerate() .filter(|(_, entry)| entry.valid()) .flat_map(move |(i, &entry)| { let start_addr = base_addr + i * ENTRY_SIZE; if entry.leaf_pte() { Either::Left(iter::once(( entry, start_addr..=start_addr + (ENTRY_SIZE - 1), ))) } else { let next_table = unsafe { &(*entry.page_table()).0 }; Either::Right(iter_level_0_mappings(next_table, start_addr)) } }) } /// See `PageTable::iter_mappings`. This needs to be its own function because of `impl Trait`; we /// can't allocate here, and we want a fixed-size iterator. fn iter_level_0_mappings( table: &[PageTableEntry; 1 << PAGE_TABLE_BITS], base_addr: usize, ) -> impl '_ + Iterator)> { const ENTRY_SIZE: usize = 1 << 12; table .iter() .enumerate() .filter(|(_, entry)| entry.valid()) .map(move |(i, &entry)| { let start_addr = base_addr + i * ENTRY_SIZE; (entry, start_addr..=start_addr + (ENTRY_SIZE - 1)) }) }