1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
|
//! An architecture-independent interface to the page tables.
use crate::arch;
pub use crate::arch::paging::{HIMEM_BOT, LOMEM_TOP, PAGE_SIZES, PAGE_SIZES_BITS, PAGE_SIZE_COUNT};
use allocator_api2::alloc::AllocError;
use core::{
fmt, iter,
ptr::{addr_of_mut, NonNull},
str,
};
use vernos_utils::debug;
/// The number of bits in the size of a regular-sized page of memory.
pub const PAGE_SIZE_BITS: usize = PAGE_SIZES_BITS[PAGE_SIZE_COUNT - 1];
/// The size of a regular-sized page of memory.
pub const PAGE_SIZE: usize = 1 << PAGE_SIZE_BITS;
/// The number of bits in the size of the largest huge page.
pub const MAX_PAGE_SIZE_BITS: usize = PAGE_SIZES_BITS[0];
/// The buddy allocator, specialized for the details of our paging subsystem.
pub type BuddyAllocator = vernos_alloc_buddy::BuddyAllocator<
'static,
PAGE_SIZE,
PAGE_SIZE_BITS,
{ 1 + MAX_PAGE_SIZE_BITS - PAGE_SIZE_BITS },
>;
/// A wrapper for the root page table, providing an architecture-independent interface to it.
///
/// This should be behind a pointer.
pub struct PageTable(arch::paging::PageTable);
impl PageTable {
/// Allocates a new page table in pages retrieved from the given buddy allocator.
pub fn new_in(buddy_allocator: &mut BuddyAllocator) -> Result<NonNull<PageTable>, AllocError> {
buddy_allocator.alloc_zeroed::<PageTable>()
}
/// Set the page table whose _physical_ address is `page_table` as the current root page table.
///
/// This performs appropriate invalidation or fencing as required by the platform, but does
/// _not_ perform a TLB shootdown.
///
/// # Safety
///
/// - There must not be any live references to pages that are no longer mapped or are mapped
/// differently in the new page table.
pub unsafe fn make_current(page_table: NonNull<PageTable>, asid: ASID) {
let page_table = NonNull::new_unchecked(addr_of_mut!((*page_table.as_ptr()).0));
arch::paging::PageTable::make_current(page_table, asid.0)
}
/// Attempts to add a mapping of a particular size to the page tables.
///
/// This may require allocating new intermediate page tables, so it may fail to allocate
/// memory.
///
/// TODO: Fences and shootdowns?
pub fn map(
&mut self,
buddy_allocator: &mut BuddyAllocator,
vaddr: usize,
paddr: usize,
len: usize,
flags: MappingFlags,
) -> Result<(), MapError> {
self.0.map(buddy_allocator, vaddr, paddr, len, flags)
}
/// Returns an iterator over `Debug`s that show the mapping in this page table.
pub fn debug_mappings(&self) -> impl '_ + Iterator<Item = impl fmt::Debug> {
// Get an iterator over the valid leaf page table entries.
let mut mappings = self.0.iter_mappings().peekable();
// Make an iterator that merges adjacent entries that have the same flags.
let merged_mappings = iter::from_fn(move || {
let (mut vaddrs, mut paddrs, entry) = mappings.next()?;
while let Some((next_vaddrs, next_paddrs, next_entry)) = mappings.peek() {
// We use .checked_add() instead of .wrapping_add() because we _don't_ want to have
// ranges that wrap around.
if !entry.flag_bits_eq(next_entry)
|| vaddrs.end().checked_add(1) != Some(*next_vaddrs.start())
|| paddrs.end().checked_add(1) != Some(*next_paddrs.start())
{
break;
}
// UNWRAP: .peek() already showed us that there's a next entry.
let (next_vaddrs, next_paddrs, _) = mappings.next().unwrap();
vaddrs = *vaddrs.start()..=*next_vaddrs.end();
paddrs = *paddrs.start()..=*next_paddrs.end();
}
Some((vaddrs, paddrs, entry))
});
// Turn the iterator into an iterator over Debugs.
merged_mappings.map(|(vaddrs, paddrs, entry)| {
debug(move |fmt| {
let flags = entry.flags_str_bytes();
// UNWRAP: The flags must be ASCII by the postcondition of flags_str_bytes().
let flags = str::from_utf8(&flags).unwrap();
write!(
fmt,
"[V|{:16x}-{:16x}][P|{:16x}-{:16x}][F|{}]",
*vaddrs.start(),
*vaddrs.end(),
*paddrs.start(),
*paddrs.end(),
flags
)
})
})
}
}
impl fmt::Debug for PageTable {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_list().entries(self.debug_mappings()).finish()
}
}
unsafe impl Send for PageTable {}
bitflags::bitflags! {
/// The flags associated with a mapping.
#[derive(Clone, Copy, Debug)]
pub struct MappingFlags: usize {
/// Whether the mapping is readable.
const R = arch::paging::PageTableEntry::FLAG_R;
/// Whether the mapping is writable.
const W = arch::paging::PageTableEntry::FLAG_W;
/// Whether the mapping is executable.
const X = arch::paging::PageTableEntry::FLAG_X;
/// Whether the mapping is accessible to userspace or to kernelspace. Note that a mapping
/// that is accessible to one is not necessarily accessible to the other.
const U = arch::paging::PageTableEntry::FLAG_U;
/// Whether the mapping has been read from since it was last set. This is ignored by
/// `PageTable::map`, but may be returned by `PageTable::get_mapping`.
const A = arch::paging::PageTableEntry::FLAG_A;
/// Whether the mapping has been written to since it was last set. This is ignored by
/// `PageTable::map`, but may be returned by `PageTable::get_mapping`.
const D = arch::paging::PageTableEntry::FLAG_D;
}
}
/// An error creating a mapping.
#[derive(Debug)]
pub enum MapError {
/// A failure to allocate memory to store the page table in.
AllocError,
/// An unknown flag bit was set.
InvalidFlags,
/// None of `R`, `W`, or `X` were set.
InvalidFlagPermissions,
/// The length of the mapping is not supported for this virtual address range.
InvalidLength,
/// The mapping would cover an invalid virtual address.
InvalidVAddr,
/// The mapping would overlap with an existing mapping or guard page.
MappingAlreadyExisted,
/// The mapping's physical address isn't aligned.
MisalignedPAddr,
/// The mapping's virtual address isn't aligned.
MisalignedVAddr,
}
impl From<AllocError> for MapError {
fn from(AllocError: AllocError) -> Self {
MapError::AllocError
}
}
/// The type of address-space IDs. If a target does not have ASIDs, this may be a ZST.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct ASID(arch::paging::ASID);
impl ASID {
/// The kernel's ASID.
pub const KERNEL: ASID = ASID(arch::paging::ASID::KERNEL);
}
|