summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNathan Ringo <nathan@remexre.com>2024-09-12 18:14:13 -0500
committerNathan Ringo <nathan@remexre.com>2024-09-12 18:14:13 -0500
commitec991590e4e3b92e407060410ff33525dc740988 (patch)
tree287c2fa46f9814ce804c2dde60aa37e5823762cb
parent128aa9bb0f0cd1410a30d45775e7627b01ed74a2 (diff)
Adds the start of a regular allocator, and some page table maintainence.
-rw-r--r--crates/Cargo.lock16
-rw-r--r--crates/Cargo.toml2
-rw-r--r--crates/alloc_buddy/Cargo.toml1
-rw-r--r--crates/alloc_genmalloc/Cargo.toml9
-rw-r--r--crates/alloc_genmalloc/src/lib.rs135
-rw-r--r--crates/kernel/Cargo.toml1
-rw-r--r--crates/kernel/src/arch/riscv64/paging.rs197
7 files changed, 324 insertions, 37 deletions
diff --git a/crates/Cargo.lock b/crates/Cargo.lock
index 41384f7..3f75ce1 100644
--- a/crates/Cargo.lock
+++ b/crates/Cargo.lock
@@ -308,12 +308,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
[[package]]
-name = "static_assertions"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
-
-[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -338,12 +332,19 @@ dependencies = [
"contracts",
"nix",
"proptest",
- "static_assertions",
"vernos_alloc_physmem_free_list",
"vernos_utils",
]
[[package]]
+name = "vernos_alloc_genmalloc"
+version = "0.1.0"
+dependencies = [
+ "allocator-api2",
+ "contracts",
+]
+
+[[package]]
name = "vernos_alloc_physmem_free_list"
version = "0.1.0"
dependencies = [
@@ -372,6 +373,7 @@ version = "0.1.0"
dependencies = [
"cfg-if",
"contracts",
+ "either",
"log",
"spin",
"vernos_alloc_buddy",
diff --git a/crates/Cargo.toml b/crates/Cargo.toml
index dd1f42a..10aeec4 100644
--- a/crates/Cargo.toml
+++ b/crates/Cargo.toml
@@ -1,5 +1,5 @@
[workspace]
-members = ["alloc_buddy", "alloc_physmem_free_list", "device_tree", "driver_riscv_timer", "kernel", "utils"]
+members = ["alloc_buddy", "alloc_genmalloc", "alloc_physmem_free_list", "device_tree", "driver_riscv_timer", "kernel", "utils"]
resolver = "2"
[profile.release]
diff --git a/crates/alloc_buddy/Cargo.toml b/crates/alloc_buddy/Cargo.toml
index 43e1cee..3d58409 100644
--- a/crates/alloc_buddy/Cargo.toml
+++ b/crates/alloc_buddy/Cargo.toml
@@ -7,7 +7,6 @@ publish = false
[dependencies]
allocator-api2 = { version = "0.2.18", default-features = false }
contracts = { version = "0.6.3", default-features = false }
-static_assertions = { version = "1.1.0", default-features = false }
vernos_alloc_physmem_free_list = { path = "../alloc_physmem_free_list" }
vernos_utils = { path = "../utils" }
diff --git a/crates/alloc_genmalloc/Cargo.toml b/crates/alloc_genmalloc/Cargo.toml
new file mode 100644
index 0000000..1099020
--- /dev/null
+++ b/crates/alloc_genmalloc/Cargo.toml
@@ -0,0 +1,9 @@
+[package]
+name = "vernos_alloc_genmalloc"
+version = "0.1.0"
+edition = "2021"
+publish = false
+
+[dependencies]
+allocator-api2 = { version = "0.2.18", default-features = false }
+contracts = { version = "0.6.3", default-features = false }
diff --git a/crates/alloc_genmalloc/src/lib.rs b/crates/alloc_genmalloc/src/lib.rs
new file mode 100644
index 0000000..06bc667
--- /dev/null
+++ b/crates/alloc_genmalloc/src/lib.rs
@@ -0,0 +1,135 @@
+//! A general-purpose allocator, based on the design of mimalloc [0, 1].
+//!
+//! [0]: https://www.microsoft.com/en-us/research/publication/mimalloc-free-list-sharding-in-action/
+//! [1]: https://github.com/microsoft/mimalloc
+#![no_std]
+
+use allocator_api2::alloc::AllocError;
+use contracts::requires;
+use core::{
+ ptr::NonNull,
+ sync::atomic::{AtomicPtr, AtomicU16, Ordering},
+};
+
+/// The per-CPU structure.
+struct Heap {
+ /// Small objects (<=1024 bytes) have pointers directly to the page they allocate into. These
+ /// are non-owning pointers.
+ pages_direct: [NonNull<Page>; 128],
+
+ /// The sentinel nodes for the circular doubly-linked lists that compose the size classes, up
+ /// to and including 512KiB, plus one more for objects over 512KiB.
+ pages: [PageLink; 18],
+}
+
+impl Heap {
+ #[requires(size > 0)]
+ #[requires(size <= 1024)]
+ fn alloc_small(&mut self, size: usize) -> Result<NonNull<u8>, AllocError> {
+ let mut page = self.pages_direct[(size + 7) >> 3];
+ let page = unsafe { page.as_mut() };
+ match page.header.alloc_list.as_mut() {
+ Some(&mut block) => {
+ let next = unsafe { block.as_ref() }.next.load(Ordering::SeqCst);
+ page.header.alloc_list = NonNull::new(next);
+ page.header.used.fetch_add(1, Ordering::SeqCst);
+ Ok(block.cast())
+ }
+ None => self.alloc_generic(size),
+ }
+ }
+
+ fn alloc_generic(&mut self, size: usize) -> Result<NonNull<u8>, AllocError> {
+ let size_class = size_class(size);
+ let sentinel: NonNull<PageLink> = NonNull::from(&self.pages[size_class]);
+ let mut page = sentinel;
+ loop {
+ page = unsafe { (*page.as_ptr()).next.unwrap() };
+
+ todo!("{page:#?}")
+ }
+ todo!()
+ }
+}
+
+/// Returns the index in the `pages` array of objects with the given size.
+fn size_class(size: usize) -> usize {
+ // Clamp the size to the range the bitwise trick will work on.
+ let size = size.clamp(8, 1024 * 1024);
+
+ // Figure out the size class.
+ (size.next_power_of_two().trailing_zeros() - 3) as usize
+}
+
+#[test]
+fn expected_size_class_values() {
+ assert_eq!(size_class(1), 0);
+ assert_eq!(size_class(8), 0);
+
+ let mut size = 8;
+ let mut expected = 0;
+ while size <= 512 * 1024 {
+ assert_eq!(size_class(size), expected);
+ size <<= 1;
+ expected += 1;
+ }
+
+ assert_eq!(size_class(512 * 1024), 16);
+ assert_eq!(size_class((512 * 1024) + 1), 17);
+ assert_eq!(size_class(4 * 1024 * 1024), 17);
+}
+
+/// A page is a block of memory that contains objects of a single size class.
+#[repr(C)]
+struct Page {
+ /// The intrusive link in the circular doubly-linked list of pages of each size class.
+ link: PageLink,
+
+ /// The fields other than the intrusive link in a page's header.
+ header: PageHeader,
+}
+
+/// The intrusive link in the circular doubly-linked list of pages of each size class.
+struct PageLink {
+ /// The previous page.
+ prev: Option<NonNull<PageLink>>,
+
+ /// The next page.
+ next: Option<NonNull<PageLink>>,
+}
+
+/// The fields other than the intrusive link in a page's header.
+struct PageHeader {
+ /// The free list that the owning thread pops from.
+ alloc_list: Option<NonNull<Block>>,
+
+ /// The free list that the owning thread pushes onto.
+ local_free_list: Option<NonNull<Block>>,
+
+ /// The free list that other threads push onto.
+ thread_free_list: AtomicPtr<Block>,
+
+ /// The number of objects in the page that have been allocated.
+ used: AtomicU16,
+
+ /// The number of objects in the thread free list.
+ thread_freed: AtomicU16,
+}
+
+/// An object in a free list.
+struct Block {
+ next: AtomicPtr<Block>,
+}
+
+fn atomic_push(node: &mut Block, list: &AtomicPtr<Block>) {
+ loop {
+ let old_list = list.load(Ordering::Acquire);
+ node.next = AtomicPtr::from(old_list);
+ if list
+ .compare_exchange(old_list, node, Ordering::Release, Ordering::Acquire)
+ .is_ok()
+ {
+ break;
+ }
+ }
+}
diff --git a/crates/kernel/Cargo.toml b/crates/kernel/Cargo.toml
index 6fbabc1..cbdac2a 100644
--- a/crates/kernel/Cargo.toml
+++ b/crates/kernel/Cargo.toml
@@ -9,6 +9,7 @@ crate-type = ["staticlib"]
[dependencies]
cfg-if = { version = "1.0.0", default-features = false }
contracts = { version = "0.6.3", default-features = false }
+either = { version = "1.13.0", default-features = false }
log = { version = "0.4.20", default-features = false }
spin = { version = "0.9.8", default-features = false, features = ["fair_mutex", "use_ticket_mutex"] }
vernos_alloc_buddy = { path = "../alloc_buddy" }
diff --git a/crates/kernel/src/arch/riscv64/paging.rs b/crates/kernel/src/arch/riscv64/paging.rs
index 5ebdb5f..6b81881 100644
--- a/crates/kernel/src/arch/riscv64/paging.rs
+++ b/crates/kernel/src/arch/riscv64/paging.rs
@@ -1,6 +1,8 @@
-use crate::arch::PAGE_SIZE;
+use crate::arch::{PAGE_SIZE, PAGE_SIZE_BITS};
use contracts::requires;
-use core::{arch::asm, fmt, str};
+use core::{arch::asm, fmt, iter, ops::RangeInclusive, str};
+use either::Either;
+use vernos_utils::debug;
/// The number of bits looked up in each page table entry.
pub const PAGE_TABLE_BITS: usize = 9;
@@ -18,7 +20,6 @@ impl ASID {
}
/// A single page table.
-#[derive(Debug)]
#[repr(align(4096))]
pub struct PageTable([PageTableEntry; 512]);
@@ -50,6 +51,57 @@ impl PageTable {
}
}
+impl fmt::Debug for PageTable {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ // Get an iterator over the valid leaf page table entries.
+ let mut mappings = iter_level_2_mappings(&self.0, 0).peekable();
+
+ // Make an iterator that merges adjacent entries that have the same flags.
+ let mappings = iter::from_fn(|| {
+ let (entry, mut vaddrs) = mappings.next()?;
+ let paddrs_start = entry.addr() as usize;
+ let mut len = (vaddrs.end() - vaddrs.start()) + 1;
+
+ while let Some((next_entry, next_vaddrs)) = mappings.peek() {
+ let next_paddrs_start = next_entry.addr() as usize;
+
+ if entry.flag_bits() != next_entry.flag_bits()
+ || vaddrs.end().wrapping_add(1) != *next_vaddrs.start()
+ || paddrs_start.wrapping_add(len) != next_paddrs_start
+ {
+ break;
+ }
+ // UNWRAP: .peek() already showed us that there's a next entry.
+ let (_, next_vaddrs) = mappings.next().unwrap();
+ vaddrs = *vaddrs.start()..=*next_vaddrs.end();
+ len = (next_vaddrs.end() - vaddrs.start()) + 1;
+ }
+ let paddrs = paddrs_start..=paddrs_start + (len - 1);
+ Some((entry, vaddrs, paddrs))
+ });
+
+ // Turn the iterator into an iterator over Debugs.
+ let debug_mappings = mappings.map(|(entry, vaddrs, paddrs)| {
+ debug(move |fmt| {
+ let flags = entry.flags_str();
+ // UNWRAP: The flags must be ASCII by the postcondition of flags_str().
+ let flags = str::from_utf8(&flags).unwrap();
+ write!(
+ fmt,
+ "[V|{:16x}-{:16x}][P|{:16x}-{:16x}][F|{}]",
+ *vaddrs.start(),
+ *vaddrs.end(),
+ *paddrs.start(),
+ *paddrs.end(),
+ flags
+ )
+ })
+ });
+
+ fmt.debug_list().entries(debug_mappings).finish()
+ }
+}
+
/// An entry in a page table.
#[derive(Clone, Copy, Default, Eq, PartialEq)]
pub struct PageTableEntry(u64);
@@ -62,11 +114,52 @@ impl PageTableEntry {
(self.0 >> 10) & 0x0000_0fff_ffff_ffff
}
+ /// Returns the bits of the entry that correspond to flags.
+ ///
+ /// This isn't `pub` because this isn't portable, though maybe it makes sense to instead export
+ /// a predicate for "do these two entries have the _same_ flags bits," since that should be
+ /// more portable.
+ #[requires(self.valid())]
+ #[ensures((ret & !0xffc0_0000_0000_03ff) == 0)]
+ fn flag_bits(&self) -> u64 {
+ self.0 & 0xffc0_0000_0000_03ff
+ }
+
+ /// Returns bytes that correspond to an ASCII string with the flags.
+ #[requires(self.valid())]
+ #[ensures(ret.iter().all(|ch| ch.is_ascii()))]
+ fn flags_str(&self) -> [u8; 7] {
+ let mut flags = *b"rwxugad";
+ let char_disabled = b'-';
+ if !self.readable() {
+ flags[0] = char_disabled;
+ }
+ if !self.writable() {
+ flags[1] = char_disabled;
+ }
+ if !self.executable() {
+ flags[2] = char_disabled;
+ }
+ if !self.user() {
+ flags[3] = char_disabled;
+ }
+ if !self.global() {
+ flags[4] = char_disabled;
+ }
+ if !self.accessed() {
+ flags[5] = char_disabled;
+ }
+ if !self.dirty() {
+ flags[6] = char_disabled;
+ }
+ flags
+ }
+
/// Returns the physical address of the backing page or next level page table.
#[requires(self.valid())]
#[ensures((ret & !0x003f_ffff_ffff_fc00) == 0)]
pub fn addr(&self) -> u64 {
- self.ppn() << PAGE_TABLE_BITS
+ self.ppn() << PAGE_SIZE_BITS
}
/// Returns a pointer to the backing page.
@@ -228,31 +321,9 @@ impl PageTableEntry {
impl fmt::Debug for PageTableEntry {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
if self.valid() {
- let mut flags = *b"DAGUXWRV";
- if !self.dirty() {
- flags[0] = b' ';
- }
- if !self.accessed() {
- flags[1] = b' ';
- }
- if !self.global() {
- flags[2] = b' ';
- }
- if !self.user() {
- flags[3] = b' ';
- }
- if !self.executable() {
- flags[4] = b' ';
- }
- if !self.writable() {
- flags[5] = b' ';
- }
- if !self.readable() {
- flags[6] = b' ';
- }
-
- // UNWRAP: The flags must be ASCII.
let addr = self.addr() as *const ();
+ let flags = self.flags_str();
+ // UNWRAP: The flags must be ASCII by the postcondition of flags_str().
let flags = str::from_utf8(&flags).unwrap();
write!(fmt, "PageTableEntry({addr:018p}, {flags})")
} else {
@@ -260,3 +331,73 @@ impl fmt::Debug for PageTableEntry {
}
}
}
+
+/// See `PageTable::iter_mappings`. This needs to be its own function because of `impl Trait`; we
+/// can't allocate here, and we want a fixed-size iterator.
+fn iter_level_2_mappings(
+ table: &[PageTableEntry; 512],
+ base_addr: usize,
+) -> impl '_ + Iterator<Item = (PageTableEntry, RangeInclusive<usize>)> {
+ const ENTRY_SIZE: usize = 1 << (12 + 9 + 9);
+ table
+ .iter()
+ .enumerate()
+ .filter(|(_, entry)| entry.valid())
+ .flat_map(move |(i, &entry)| {
+ let mut start_addr = base_addr + i * ENTRY_SIZE;
+ if i >= 256 {
+ start_addr += 0xffff_ff80_0000_0000;
+ }
+ if entry.leaf_pte() {
+ Either::Left(iter::once((
+ entry,
+ start_addr..=start_addr + (ENTRY_SIZE - 1),
+ )))
+ } else {
+ let next_table = unsafe { &(*entry.page_table()).0 };
+ Either::Right(iter_level_1_mappings(next_table, start_addr))
+ }
+ })
+}
+
+/// See `PageTable::iter_mappings`. This needs to be its own function because of `impl Trait`; we
+/// can't allocate here, and we want a fixed-size iterator.
+fn iter_level_1_mappings(
+ table: &[PageTableEntry; 512],
+ base_addr: usize,
+) -> impl '_ + Iterator<Item = (PageTableEntry, RangeInclusive<usize>)> {
+ const ENTRY_SIZE: usize = 1 << (12 + 9);
+ table
+ .iter()
+ .enumerate()
+ .filter(|(_, entry)| entry.valid())
+ .flat_map(move |(i, &entry)| {
+ let start_addr = base_addr + i * ENTRY_SIZE;
+ if entry.leaf_pte() {
+ Either::Left(iter::once((
+ entry,
+ start_addr..=start_addr + (ENTRY_SIZE - 1),
+ )))
+ } else {
+ let next_table = unsafe { &(*entry.page_table()).0 };
+ Either::Right(iter_level_0_mappings(next_table, start_addr))
+ }
+ })
+}
+
+/// See `PageTable::iter_mappings`. This needs to be its own function because of `impl Trait`; we
+/// can't allocate here, and we want a fixed-size iterator.
+fn iter_level_0_mappings(
+ table: &[PageTableEntry; 512],
+ base_addr: usize,
+) -> impl '_ + Iterator<Item = (PageTableEntry, RangeInclusive<usize>)> {
+ const ENTRY_SIZE: usize = 1 << 12;
+ table
+ .iter()
+ .enumerate()
+ .filter(|(_, entry)| entry.valid())
+ .map(move |(i, &entry)| {
+ let start_addr = base_addr + i * ENTRY_SIZE;
+ (entry, start_addr..=start_addr + (ENTRY_SIZE - 1))
+ })
+}