summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNathan Ringo <nathan@remexre.com>2024-08-31 20:23:24 -0500
committerNathan Ringo <nathan@remexre.com>2024-08-31 20:23:24 -0500
commite9a79a0ed79609c1e293c7b221fce577200b2eb7 (patch)
treee097115f4b4435caa2a26186652cbfffefb2cb28
parent4617f96a99c0e5dfac1b45b3cff037306e4edc63 (diff)
The start of a new librarified organization.
-rw-r--r--crates/Cargo.lock348
-rw-r--r--crates/Cargo.toml3
-rw-r--r--crates/buddy_allocator/Cargo.toml16
-rw-r--r--crates/buddy_allocator/src/bitvec.rs59
-rw-r--r--crates/buddy_allocator/src/free_list.rs72
-rw-r--r--crates/buddy_allocator/src/lib.rs222
-rw-r--r--crates/buddy_allocator/src/stripe.rs148
-rw-r--r--crates/buddy_allocator/src/tree.rs130
-rw-r--r--crates/buddy_allocator/tests/hosted_test.rs252
-rw-r--r--crates/physmem_free_list/Cargo.toml9
-rw-r--r--crates/physmem_free_list/src/lib.rs378
-rw-r--r--crates/utils/Cargo.toml8
-rw-r--r--crates/utils/src/lib.rs3
-rw-r--r--crates/utils/src/pin.rs25
-rw-r--r--flake.nix164
-rw-r--r--nix/miri.nix22
16 files changed, 1781 insertions, 78 deletions
diff --git a/crates/Cargo.lock b/crates/Cargo.lock
new file mode 100644
index 0000000..843d740
--- /dev/null
+++ b/crates/Cargo.lock
@@ -0,0 +1,348 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "allocator-api2"
+version = "0.2.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f"
+
+[[package]]
+name = "autocfg"
+version = "0.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78"
+dependencies = [
+ "autocfg 1.3.0",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "bitflags"
+version = "2.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
+
+[[package]]
+name = "byteorder"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "cfg_aliases"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
+
+[[package]]
+name = "cloudabi"
+version = "0.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
+dependencies = [
+ "bitflags 1.3.2",
+]
+
+[[package]]
+name = "contracts"
+version = "0.6.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1d1429e3bd78171c65aa010eabcdf8f863ba3254728dbfb0ad4b1545beac15c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "fuchsia-cprng"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
+
+[[package]]
+name = "lazy_static"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
+
+[[package]]
+name = "libc"
+version = "0.2.158"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439"
+
+[[package]]
+name = "nix"
+version = "0.29.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46"
+dependencies = [
+ "bitflags 2.6.0",
+ "cfg-if",
+ "cfg_aliases",
+ "libc",
+]
+
+[[package]]
+name = "num-traits"
+version = "0.2.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
+dependencies = [
+ "autocfg 1.3.0",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "proptest"
+version = "0.9.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "01c477819b845fe023d33583ebf10c9f62518c8d79a0960ba5c36d6ac8a55a5b"
+dependencies = [
+ "bitflags 1.3.2",
+ "byteorder",
+ "lazy_static",
+ "num-traits",
+ "quick-error",
+ "rand",
+ "rand_chacha",
+ "rand_xorshift",
+ "regex-syntax",
+]
+
+[[package]]
+name = "quick-error"
+version = "1.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rand"
+version = "0.6.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca"
+dependencies = [
+ "autocfg 0.1.8",
+ "libc",
+ "rand_chacha",
+ "rand_core 0.4.2",
+ "rand_hc",
+ "rand_isaac",
+ "rand_jitter",
+ "rand_os",
+ "rand_pcg",
+ "rand_xorshift",
+ "winapi",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef"
+dependencies = [
+ "autocfg 0.1.8",
+ "rand_core 0.3.1",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
+dependencies = [
+ "rand_core 0.4.2",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
+
+[[package]]
+name = "rand_hc"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4"
+dependencies = [
+ "rand_core 0.3.1",
+]
+
+[[package]]
+name = "rand_isaac"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08"
+dependencies = [
+ "rand_core 0.3.1",
+]
+
+[[package]]
+name = "rand_jitter"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b"
+dependencies = [
+ "libc",
+ "rand_core 0.4.2",
+ "winapi",
+]
+
+[[package]]
+name = "rand_os"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071"
+dependencies = [
+ "cloudabi",
+ "fuchsia-cprng",
+ "libc",
+ "rand_core 0.4.2",
+ "rdrand",
+ "winapi",
+]
+
+[[package]]
+name = "rand_pcg"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44"
+dependencies = [
+ "autocfg 0.1.8",
+ "rand_core 0.4.2",
+]
+
+[[package]]
+name = "rand_xorshift"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c"
+dependencies = [
+ "rand_core 0.3.1",
+]
+
+[[package]]
+name = "rdrand"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
+dependencies = [
+ "rand_core 0.3.1",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.29"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
+
+[[package]]
+name = "static_assertions"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
+
+[[package]]
+name = "syn"
+version = "1.0.109"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+
+[[package]]
+name = "vernos_buddy_allocator"
+version = "0.1.0"
+dependencies = [
+ "allocator-api2",
+ "contracts",
+ "nix",
+ "proptest",
+ "static_assertions",
+ "vernos_physmem_free_list",
+ "vernos_utils",
+]
+
+[[package]]
+name = "vernos_physmem_free_list"
+version = "0.1.0"
+dependencies = [
+ "allocator-api2",
+ "contracts",
+]
+
+[[package]]
+name = "vernos_utils"
+version = "0.1.0"
+dependencies = [
+ "contracts",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/crates/Cargo.toml b/crates/Cargo.toml
new file mode 100644
index 0000000..0911a19
--- /dev/null
+++ b/crates/Cargo.toml
@@ -0,0 +1,3 @@
+[workspace]
+members = ["buddy_allocator", "physmem_free_list", "utils"]
+resolver = "2"
diff --git a/crates/buddy_allocator/Cargo.toml b/crates/buddy_allocator/Cargo.toml
new file mode 100644
index 0000000..e2a85d3
--- /dev/null
+++ b/crates/buddy_allocator/Cargo.toml
@@ -0,0 +1,16 @@
+[package]
+name = "vernos_buddy_allocator"
+version = "0.1.0"
+edition = "2021"
+publish = false
+
+[dependencies]
+allocator-api2 = { version = "0.2.18", default-features = false }
+contracts = { version = "0.6.3", default-features = false }
+static_assertions = { version = "1.1.0", default-features = false }
+vernos_physmem_free_list = { path = "../physmem_free_list" }
+vernos_utils = { path = "../utils" }
+
+[dev-dependencies]
+proptest = { version = "0.9.6", default-features = false, features = ["std"] }
+nix = { version = "0.29.0", default-features = false, features = ["mman"] }
diff --git a/crates/buddy_allocator/src/bitvec.rs b/crates/buddy_allocator/src/bitvec.rs
new file mode 100644
index 0000000..856d579
--- /dev/null
+++ b/crates/buddy_allocator/src/bitvec.rs
@@ -0,0 +1,59 @@
+use contracts::requires;
+use core::{fmt, mem::transmute};
+
+/// A fixed-length vector of bits.
+pub struct BitVec([usize]);
+
+impl BitVec {
+ fn from_mut(words: &mut [usize]) -> &mut BitVec {
+ // SAFETY: The types have a newtype relationship.
+ unsafe { transmute(words) }
+ }
+
+ fn from_ref(words: &[usize]) -> &BitVec {
+ // SAFETY: The types have a newtype relationship.
+ unsafe { transmute(words) }
+ }
+
+ /// Retrieves the value of a bit from the BitVec.
+ #[requires(i < self.len())]
+ pub fn get(&self, i: usize) -> bool {
+ let word_index = i / usize::BITS as usize;
+ let subword_index = i % usize::BITS as usize;
+ let one_hot = 1 << subword_index;
+ (self.0[word_index] & one_hot) != 0
+ }
+
+ /// Returns whether the BitVec is empty.
+ pub fn is_empty(&self) -> bool {
+ self.0.is_empty()
+ }
+
+ /// Returns the number of bits in the BitVec.
+ pub fn len(&self) -> usize {
+ self.0.len() * usize::BITS as usize
+ }
+
+ /// Sets the value of a bit in the BitVec.
+ #[requires(i < self.len())]
+ pub fn set(&mut self, i: usize, value: bool) {
+ let word_index = i / usize::BITS as usize;
+ let subword_index = i % usize::BITS as usize;
+ let one_hot = 1 << subword_index;
+ if value {
+ self.0[word_index] |= one_hot;
+ } else {
+ self.0[word_index] &= !one_hot;
+ }
+ }
+}
+
+impl fmt::Debug for BitVec {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "BitVec(")?;
+ for word in &self.0 {
+ write!(fmt, "{word:064b}")?;
+ }
+ write!(fmt, ")")
+ }
+}
diff --git a/crates/buddy_allocator/src/free_list.rs b/crates/buddy_allocator/src/free_list.rs
new file mode 100644
index 0000000..5a633c3
--- /dev/null
+++ b/crates/buddy_allocator/src/free_list.rs
@@ -0,0 +1,72 @@
+//! A doubly-linked list of power-of-two-sized subregions.
+
+use core::{marker::PhantomPinned, pin::Pin, ptr};
+
+/// The magic number a non-sentinel node should have.
+const FREE_NODE_MAGIC: u64 = u64::from_ne_bytes(*b"FREENODE");
+
+/// The magic number a sentinel node should have.
+const SENTINEL_MAGIC: u64 = u64::from_ne_bytes(*b"SENTINEL");
+
+/// A doubly-linked list of power-of-two-sized subregions.
+///
+/// This type is valid when zero-initialized.
+#[derive(Debug)]
+pub struct FreeList {
+ sentinel: FreeListNode,
+}
+
+impl FreeList {
+ /// Performs self-linking. This must be done before the free list is used in any other way.
+ pub fn self_link(mut self: Pin<&mut FreeList>) {
+ assert_eq!(self.sentinel.next, ptr::null_mut());
+ assert_eq!(self.sentinel.prev, ptr::null_mut());
+
+ let self_ptr = &self.sentinel as *const FreeListNode as *mut FreeListNode;
+ let mut sentinel = self.sentinel_unchecked();
+ *sentinel.as_mut().next_mut() = self_ptr;
+ *sentinel.as_mut().prev_mut() = self_ptr;
+ *sentinel.as_mut().magic_mut() = SENTINEL_MAGIC;
+ }
+
+ /// Projects the sentinel node out of the list.
+ fn sentinel(self: Pin<&mut FreeList>) -> Pin<&mut FreeListNode> {
+ let mut sentinel = self.sentinel_unchecked();
+ assert_eq!(*sentinel.as_mut().magic_mut(), SENTINEL_MAGIC);
+ sentinel
+ }
+
+ /// Projects the sentinel node out of the list, without checking the magic number.
+ fn sentinel_unchecked(self: Pin<&mut FreeList>) -> Pin<&mut FreeListNode> {
+ // SAFETY: sentinel is structurally pinned.
+ unsafe { self.map_unchecked_mut(|list| &mut list.sentinel) }
+ }
+}
+
+#[derive(Debug)]
+struct FreeListNode {
+ next: *mut FreeListNode,
+ prev: *mut FreeListNode,
+ magic: u64,
+ _phantom: PhantomPinned,
+}
+
+impl FreeListNode {
+ /// Projects the `next` pointer out of the node.
+ fn next_mut(self: Pin<&mut FreeListNode>) -> &mut *mut FreeListNode {
+ // SAFETY: next is not structurally pinned.
+ unsafe { &mut self.get_unchecked_mut().next }
+ }
+
+ /// Projects the `prev` pointer out of the node.
+ fn prev_mut(self: Pin<&mut FreeListNode>) -> &mut *mut FreeListNode {
+ // SAFETY: prev is structurally pinned.
+ unsafe { &mut self.get_unchecked_mut().prev }
+ }
+
+ /// Projects the `magic` number out of the node.
+ fn magic_mut(self: Pin<&mut FreeListNode>) -> &mut u64 {
+ // SAFETY: magic is structurally pinned.
+ unsafe { &mut self.get_unchecked_mut().magic }
+ }
+}
diff --git a/crates/buddy_allocator/src/lib.rs b/crates/buddy_allocator/src/lib.rs
new file mode 100644
index 0000000..fa86de7
--- /dev/null
+++ b/crates/buddy_allocator/src/lib.rs
@@ -0,0 +1,222 @@
+//! A buddy allocator, used to allocate pages.
+//!
+//! The allocator can be split into three abstractions: stripes, trees, and the allocator.
+//!
+//! TODO: See if there's standard terminology for these.
+//!
+//! ## Stripes
+//!
+//! The buddy allocator works in terms of size classes, which are power-of-two sized, starting at a
+//! single page and going up from there. Each stripe corresponds to a single size class and a
+//! particular region of memory.
+//!
+//! A stripe contains a circular doubly-linked free-list for subregions of that size class, and a
+//! bitset marking whether a particular region has been allocated or not. Being a circular
+//! doubly-linked list makes it cheap to remove an element whose address we know, as well as cheap
+//! to push and pop elements.
+//!
+//! It's efficient to go from the address of a subregion to the index of its corresponding bit, so
+//! when we hand out a subregion from the free-list or put one back, it's cheap to read or write
+//! its bit.
+//!
+//! ## Trees
+//!
+//! A tree is logically a collection of stripes, one per size class. To pack the structures more
+//! efficiently, they are stored interleaved with each other, and the tree manages this.
+//!
+//! The important logic at the level of trees happens when we allocate a subregion from a size
+//! class whose stripe's free-list is empty, and when we free subregions.
+//!
+//! When a stripe's free-list is empty, the tree instead allocates a subregion of a larger size
+//! from the next stripe. It can then store the unused portion in the current size class.
+//!
+//! The really important bit is the ability to merge subregions when they're freed. When we free a
+//! subregion of a certain size class, we can check whether its neighbor (its buddy) is unallocated
+//! as well. If so, we can remove it from the free-list by its address. We can combine the two
+//! subregions into one of the next larger size class, and then return that subregion to the next
+//! stripe.
+//!
+//! ## The buddy allocator
+//!
+//! Finally, the overall buddy allocator needs to be able to handle multiple memory regions. To
+//! facilitate this, the trees are stored in an array, which forms the overall allocator.
+#![no_std]
+
+mod bitvec;
+mod free_list;
+mod tree;
+
+// mod stripe;
+
+use crate::{bitvec::BitVec, free_list::FreeList, tree::Tree};
+use allocator_api2::alloc::{AllocError, Layout, LayoutError};
+use contracts::requires;
+use core::{mem, pin::Pin, ptr::NonNull};
+use vernos_physmem_free_list::FreeListAllocator;
+use vernos_utils::pin::pin_project_slice_mut_iter;
+
+/// A buddy allocator.
+#[derive(Debug)]
+pub struct BuddyAllocator<
+ 'allocator,
+ const PAGE_SIZE: usize,
+ const PAGE_SIZE_BITS: usize,
+ const SIZE_CLASS_COUNT: usize,
+> {
+ /// The free lists.
+ free_lists: Pin<&'allocator mut [FreeList; SIZE_CLASS_COUNT]>,
+
+ /// The metadata associated with each tree.
+ trees: &'allocator mut [Tree<'allocator, PAGE_SIZE, PAGE_SIZE_BITS>],
+
+ /// The shared bitset.
+ bitset: &'allocator mut BitVec,
+}
+
+impl<
+ 'allocator,
+ const PAGE_SIZE: usize,
+ const PAGE_SIZE_BITS: usize,
+ const SIZE_CLASS_COUNT: usize,
+ > BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>
+{
+ /// Returns a buddy allocator backed by the page ranges in the free list.
+ pub fn new(
+ mut free_list: FreeListAllocator<'allocator, PAGE_SIZE>,
+ ) -> Result<BuddyAllocator<'allocator, PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>, AllocError>
+ {
+ assert_eq!(PAGE_SIZE, 1 << PAGE_SIZE_BITS);
+ assert_ne!(SIZE_CLASS_COUNT, 0);
+
+ // We use one contiguous region to store the free lists and all the information about each
+ // tree (the base, the maximum order, and the bitset). We do one pass through the free list
+ // to find out how much storage we need, then we try to find a convenient spot to prune the
+ // storage we need from a page range in the list.
+ let mut range_count = 0;
+ let mut size_class_counts = [0; SIZE_CLASS_COUNT];
+ for (_, mut len_pages) in free_list.iter() {
+ while len_pages > 0 {
+ let size_class = (len_pages.trailing_zeros() as usize).min(SIZE_CLASS_COUNT - 1);
+ range_count += 1;
+ size_class_counts[size_class] += 1;
+ len_pages -= 1 << size_class;
+ }
+ }
+
+ // Lay out the metadata. The error here really ought to be impossible, because the pages in
+ // the free list needed to fit in there in the first place.
+ let metadata_layout = MetadataLayout::<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>::new(
+ range_count,
+ size_class_counts,
+ )
+ .map_err(|_| AllocError)?;
+
+ // Allocate space for the metadata.
+ let metadata = free_list.allocate_zeroed(metadata_layout.layout)?;
+
+ // Break out each component of the metadata.
+ //
+ // SAFETY: The layout is computed to make all of this sound if the data is valid. All of
+ // these types are valid when zero-initialized.
+ let free_lists: &'allocator mut [FreeList; SIZE_CLASS_COUNT] = unsafe {
+ metadata
+ .byte_add(metadata_layout.free_lists_offset)
+ .cast()
+ .as_mut()
+ };
+ let trees: &'allocator mut [Tree<PAGE_SIZE, PAGE_SIZE_BITS>] = unsafe {
+ NonNull::slice_from_raw_parts(
+ metadata.byte_add(metadata_layout.trees_offset).cast(),
+ metadata_layout.trees_len,
+ )
+ .as_mut()
+ };
+ let bitset: &'allocator mut BitVec = unsafe {
+ mem::transmute::<&mut [usize], &mut BitVec>(
+ NonNull::slice_from_raw_parts(
+ metadata.byte_add(metadata_layout.bitset_offset).cast(),
+ metadata_layout.bitset_len,
+ )
+ .as_mut(),
+ )
+ };
+
+ // Pin and self-link each free list. Although they're valid in a UB sense when zeroed,
+ // they're not yet ready to use until this is done.
+ //
+ // SAFETY: We never provide the ability to move out of a free list.
+ let mut free_lists = unsafe { Pin::new_unchecked(free_lists) };
+ for free_list in pin_project_slice_mut_iter(free_lists.as_mut()) {
+ free_list.self_link();
+ }
+
+ // TODO: Actually initialize the trees with ranges from the allocator...
+
+ // Make the buddy allocator and return.
+ Ok(BuddyAllocator {
+ free_lists,
+ trees,
+ bitset,
+ })
+ }
+
+ /// Tries to allocate a subregion of a particular size class from the allocator.
+ #[requires((0..SIZE_CLASS_COUNT).contains(&size_class))]
+ pub fn alloc(&mut self, size_class: usize) -> Result<NonNull<u8>, AllocError> {
+ Err(AllocError)
+ }
+}
+
+#[derive(Debug)]
+struct MetadataLayout<
+ const PAGE_SIZE: usize,
+ const PAGE_SIZE_BITS: usize,
+ const SIZE_CLASS_COUNT: usize,
+> {
+ layout: Layout,
+ free_lists_offset: usize,
+ trees_offset: usize,
+ trees_len: usize,
+ bitset_offset: usize,
+ bitset_len: usize,
+}
+
+impl<const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize, const SIZE_CLASS_COUNT: usize>
+ MetadataLayout<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>
+{
+ fn new(
+ range_count: usize,
+ mut size_class_counts: [usize; SIZE_CLASS_COUNT],
+ ) -> Result<MetadataLayout<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>, LayoutError> {
+ let trees_len = range_count;
+ let mut bitset_len_bits = 0;
+ // TODO: Is there a cleaner way to compute this?
+ for i in (0..size_class_counts.len()).rev() {
+ let count = size_class_counts[i];
+ if count != 0 {
+ bitset_len_bits += count;
+ if i > 0 {
+ size_class_counts[i - 1] += count * 2;
+ }
+ }
+ }
+ let bitset_len = (bitset_len_bits + usize::BITS as usize - 1) / usize::BITS as usize;
+
+ let free_lists_layout = Layout::new::<[FreeList; SIZE_CLASS_COUNT]>();
+ let trees_layout = Layout::array::<Tree<PAGE_SIZE, PAGE_SIZE_BITS>>(trees_len)?;
+ let bitset_layout = Layout::array::<usize>(bitset_len)?;
+
+ let free_lists_offset = 0;
+ let (layout, trees_offset) = free_lists_layout.extend(trees_layout)?;
+ let (layout, bitset_offset) = layout.extend(bitset_layout)?;
+
+ Ok(MetadataLayout {
+ layout,
+ free_lists_offset,
+ trees_offset,
+ trees_len,
+ bitset_offset,
+ bitset_len,
+ })
+ }
+}
diff --git a/crates/buddy_allocator/src/stripe.rs b/crates/buddy_allocator/src/stripe.rs
new file mode 100644
index 0000000..2ff686b
--- /dev/null
+++ b/crates/buddy_allocator/src/stripe.rs
@@ -0,0 +1,148 @@
+use crate::{bitvec::BitVec, PAGE_SIZE_BITS};
+use core::ptr::NonNull;
+
+/// A single size class for a single region of the allocator. See the comment on the
+/// `crate::allocators::buddy` module for more information.
+pub struct Stripe<'buddy> {
+ /// The base address of the tree this stripe is a part of.
+ pub base: *const (),
+
+ /// The order of the stripe. Order `n` means the subregions are `2ⁿ` pages in size.
+ pub order: usize,
+
+ /// The sentinel node of the free-list for this node. As an invariant of the type, there are no
+ /// live references to any node in this list.
+ pub free_list: NonNull<FreeListNode>,
+
+ /// The bitset used to track whether a given subregion is allocated or not. A `true` bit
+ /// corresponds to an allocated subregion.
+ pub bitset: &'buddy mut BitVec,
+
+ /// The offset from the start of the bitset to the region used by this stripe.
+ pub bitset_offset: usize,
+}
+
+impl<'buddy> Stripe<'buddy> {
+ /// Returns the buddy of the given pointer.
+ ///
+ /// ## Safety
+ ///
+ /// - The pointer must actually be part of this region.
+ unsafe fn buddy_of(&self, ptr: NonNull<FreeListNode>) -> NonNull<FreeListNode> {
+ let index = self.buddy_of_index(self.index_of(ptr));
+ let addr = self.base as usize + (index << (PAGE_SIZE_BITS + self.order));
+ NonNull::new_unchecked(addr as *mut FreeListNode)
+ }
+
+ /// Returns the buddy of the given index.
+ fn buddy_of_index(&self, index: usize) -> usize {
+ index ^ (1 << (PAGE_SIZE_BITS + self.order))
+ }
+
+ /// Returns the index the given pointer should have in the BitVec.
+ fn index_of(&self, ptr: NonNull<FreeListNode>) -> usize {
+ (ptr.as_ptr() as usize - self.base as usize) >> (PAGE_SIZE_BITS + self.order)
+ }
+
+ /// Pops a subregion from the free-list.
+ pub fn pop(&mut self) -> Option<NonNull<FreeListNode>> {
+ // SAFETY: The `free_list` is guaranteed to be valid by the invariants of the buddy
+ // allocator. Retrieving the next pointer doesn't, on its own, break aliasing rules.
+ let next = unsafe { self.free_list.read().next };
+
+ // If the sentinel and the next pointer refer to the same spot, the free-list was empty, so
+ // we can't pop from it.
+ if self.free_list == next {
+ return None;
+ }
+
+ // Otherwise, remove the node from the free-list.
+ unsafe {
+ FreeListNode::remove(next);
+ }
+
+ // Finally, mark the node as allocated in the bitvec.
+ let index = self.index_of(next);
+ assert!(self.bitset.get(self.bitset_offset + index));
+ self.bitset.set(self.bitset_offset + index, true);
+
+ Some(next)
+ }
+
+ /// Pushes a subregion back into the free-list.
+ ///
+ /// # Safety
+ ///
+ /// - There must be no live references to `subregion`.
+ /// - `subregion` must not be a member of any list.
+ pub unsafe fn push(&mut self, subregion: NonNull<FreeListNode>) {
+ // Insert the subregion as the first element of the free-list.
+ //
+ // SAFETY: The free-list is guaranteed to be valid by the invariants of the buddy
+ // allocator.
+ unsafe {
+ FreeListNode::insert(self.free_list, subregion);
+ }
+
+ // Mark the node as unallocated in the bitvec.
+ let index = self.index_of(subregion);
+ assert!(self.bitset.get(self.bitset_offset + index));
+ self.bitset.set(self.bitset_offset + index, false);
+ }
+
+ /// Pushes a subregion into the free-list for the first time.
+ ///
+ /// # Safety
+ ///
+ /// - There must be no live references to `subregion`.
+ /// - `subregion` must not be a member of any list.
+ pub unsafe fn push_initial(&mut self, subregion: NonNull<FreeListNode>) {
+ // Insert the subregion as the first element of the free-list.
+ //
+ // SAFETY: The free-list is guaranteed to be valid by the invariants of the buddy
+ // allocator.
+ unsafe {
+ FreeListNode::insert(self.free_list, subregion);
+ }
+
+ // Mark the node as unallocated in the bitvec.
+ let index = self.index_of(subregion);
+ assert!(self.bitset.get(self.bitset_offset + index));
+ self.bitset.set(self.bitset_offset + index, false);
+ }
+}
+
+pub struct FreeListNode {
+ next: NonNull<FreeListNode>,
+ prev: NonNull<FreeListNode>,
+}
+
+impl FreeListNode {
+ /// Inserts `new` after `prev`, initializing it. `prev` may be the sentinel.
+ ///
+ /// # Safety
+ ///
+ /// - There must be no live references to any node in the list that includes `prev`, including
+ /// the sentinel.
+ /// - There must be no live references to `new`.
+ /// - `new` must not be a member of any list.
+ pub unsafe fn insert(prev: NonNull<FreeListNode>, new: NonNull<FreeListNode>) {
+ let next = prev.read().next;
+ (*prev.as_ptr()).next = new;
+ (*next.as_ptr()).prev = new;
+ new.write(FreeListNode { next, prev });
+ }
+
+ /// Removes this node from the free list it is a part of.
+ ///
+ /// # Safety
+ ///
+ /// - The pointer must point to a node that is part of a free list.
+ /// - There must be no live references to any node in the list, including the sentinel.
+ /// - The pointer must not point to the sentinel node.
+ pub unsafe fn remove(ptr: NonNull<FreeListNode>) {
+ let FreeListNode { next, prev } = ptr.read();
+ (*next.as_ptr()).prev = prev;
+ (*prev.as_ptr()).next = next;
+ }
+}
diff --git a/crates/buddy_allocator/src/tree.rs b/crates/buddy_allocator/src/tree.rs
new file mode 100644
index 0000000..a303d40
--- /dev/null
+++ b/crates/buddy_allocator/src/tree.rs
@@ -0,0 +1,130 @@
+/// A single region of the allocator. See the comment on the `crate::allocators::buddy` module for
+/// more information.
+///
+/// This type is valid when zero-initialized.
+#[derive(Debug)]
+pub struct Tree<'buddy, const PAGE_SIZE: usize, const PAGE_SIZE_BITS: usize> {
+ /// The base address of the tree.
+ pub base: *const (),
+
+ /// The log2 of the number of pages in the region represented by the tree.
+ pub log2_page_count: usize,
+
+ /// The offset in the bitset to the bits responsible for this tree's pages.
+ bitset_offset: usize,
+
+ // /// The bitset used to track whether subregion are allocated or not in this tree.
+ // pub bitset: &'buddy mut BitVec,
+ phantom: core::marker::PhantomData<&'buddy mut [u8; PAGE_SIZE]>,
+}
+
+/*
+use crate::{
+ bitvec::BitVec,
+ stripe::{FreeListNode, Stripe},
+ MAX_ORDER, PAGE_SIZE_BITS,
+};
+use contracts::requires;
+
+/// A single region of the allocator. See the comment on the `crate::allocators::buddy` module for
+/// more information.
+pub struct Tree<'buddy> {
+ /// The base address of the tree.
+ pub base: *const (),
+
+ /// The log2 of the number of pages in the region represented by the tree.
+ pub log2_page_count: usize,
+
+ /// The array of sentinel nodes of the free-list for this node. As an invariant of the type,
+ /// there are no live references to any node in any list in this array, and there are
+ /// MAX_ORDER + 1 nodes.
+ pub free_lists: NonNull<FreeListNode>,
+
+ /// The bitset used to track whether subregion are allocated or not in this tree.
+ pub bitset: &'buddy mut BitVec,
+}
+
+impl<'buddy> Tree<'buddy> {
+ /// Tries to allocate a subregion with the given order, possibly splitting a larger subregion
+ /// in order to so so.
+ #[requires(order <= MAX_ORDER)]
+ pub fn alloc(&mut self, order: usize) -> Option<NonNull<FreeListNode>> {
+ if let Some(ptr) = self.stripe(order).pop() {
+ Some(ptr)
+ } else if order == MAX_ORDER {
+ None
+ } else {
+ // Get a larger region.
+ let ptr = self.alloc(order + 1)?;
+
+ // Get a pointer to the higher half.
+ //
+ // SAFETY: This has to be in-bounds, it's part of the same allocation!
+ let higher_half = unsafe { ptr.byte_add(1 << (PAGE_SIZE_BITS + order)) };
+
+ // Put the higher half in the current buddy's stripe.
+ //
+ // SAFETY: The higher half is from this region, not in the higher stripe, and of the
+ // right size.
+ unsafe {
+ self.stripe(order).push(higher_half);
+ }
+
+ // Return the pointer.
+ Some(ptr)
+ }
+ }
+
+ /// Returns the stripe with the given order.
+ #[requires(order <= MAX_ORDER)]
+ fn stripe<'stripe>(&'stripe mut self, order: usize) -> Stripe<'stripe> {
+ // TODO: There should be some smart bitwise-math version of this...
+ let mut bitset_offset = 0;
+ for i in 0..order {
+ bitset_offset += (1 << self.log2_page_count) >> i;
+ }
+
+ Stripe {
+ base: self.base,
+ order,
+ // SAFETY: order is constrained to be in-bounds.
+ free_list: unsafe { self.free_lists.add(order) },
+ bitset: self.bitset,
+ bitset_offset,
+ }
+ }
+}
+
+/// Evil bitwise version of the reasonable loop to compute the `bitset_offset` of a stripe.
+#[requires(log2_page_count < usize::BITS as usize)]
+#[requires(order < usize::BITS as usize)]
+fn compute_bitset_offset(log2_page_count: usize, order: usize) -> usize {
+ let ones = |i: usize| !(usize::MAX << i);
+
+ if order > log2_page_count + 1 {
+ ones(log2_page_count + 1)
+ } else {
+ ones(order).wrapping_shl((log2_page_count + 1 - order) as u32)
+ }
+}
+
+#[test]
+fn compute_bitset_offset_test() {
+ fn compute_bitset_offset_loop(log2_page_count: usize, order: usize) -> usize {
+ let mut bitset_offset = 0;
+ for i in 0..order {
+ bitset_offset += (1 << log2_page_count) >> i;
+ }
+ bitset_offset
+ }
+
+ for log2_page_count in 0..64 {
+ for order in 0..64 {
+ assert_eq!(
+ compute_bitset_offset(log2_page_count, order),
+ compute_bitset_offset_loop(log2_page_count, order),
+ );
+ }
+ }
+}
+*/
diff --git a/crates/buddy_allocator/tests/hosted_test.rs b/crates/buddy_allocator/tests/hosted_test.rs
new file mode 100644
index 0000000..d13895c
--- /dev/null
+++ b/crates/buddy_allocator/tests/hosted_test.rs
@@ -0,0 +1,252 @@
+use core::{fmt, num::NonZero, slice};
+use nix::sys::mman::{mmap_anonymous, munmap, MapFlags, ProtFlags};
+use proptest::prelude::*;
+use vernos_buddy_allocator::BuddyAllocator;
+use vernos_physmem_free_list::FreeListAllocator;
+
+proptest! {
+ #![proptest_config(proptest::test_runner::Config {
+ failure_persistence: None,
+ ..Default::default()
+ })]
+
+ #[test]
+ fn test_scenario(scenario in Scenario::any()) {
+ scenario.run(true)
+ }
+}
+
+#[test]
+fn test_simple_scenario() {
+ let scenario = Scenario {
+ range_sizes: vec![4],
+ actions: vec![
+ Action::Debug,
+ Action::Alloc {
+ sentinel_value: 0xaa,
+ size_class: 0,
+ },
+ Action::Debug,
+ ],
+ };
+ scenario.run(false)
+}
+
+const PAGE_SIZE: usize = (size_of::<*const ()>() * 4).next_power_of_two();
+const PAGE_SIZE_BITS: usize = PAGE_SIZE.trailing_zeros() as usize;
+const SIZE_CLASS_COUNT: usize = 4;
+
+/// A single action the property test should perform.
+#[derive(Debug)]
+enum Action {
+ /// Makes an allocation.
+ Alloc {
+ /// A sentinel value, which is expected to be present when the allocation is freed.
+ sentinel_value: u8,
+
+ /// The size class, which is constrained to be less than `SIZE_CLASS_COUNT`.
+ size_class: usize,
+ },
+
+ /// Deallocates an allocation.
+ Dealloc {
+ /// The index of the allocation in the set of live allocations, which is taken to be a
+ /// circular list (so this is always in-bounds if there are any allocations).
+ index: usize,
+ },
+
+ /// Prints the allocator and all live allocations, for debugging purposes. This is never
+ /// automatically generated.
+ Debug,
+
+ /// Overwrites an allocation, changing its sentinel.
+ Overwrite {
+ /// The index of the allocation in the set of live allocations, which is taken to be a
+ /// circular list (so this is always in-bounds if there are any allocations).
+ index: usize,
+
+ /// The new sentinel value.
+ sentinel_value: u8,
+ },
+}
+
+impl Action {
+ /// Generates a random action.
+ pub fn any() -> impl Strategy<Value = Action> {
+ prop_oneof![
+ (any::<u8>(), 0..SIZE_CLASS_COUNT).prop_map(|(sentinel_value, size_class)| {
+ Action::Alloc {
+ sentinel_value,
+ size_class,
+ }
+ }),
+ any::<usize>().prop_map(|index| { Action::Dealloc { index } }),
+ (any::<usize>(), any::<u8>()).prop_map(|(index, sentinel_value)| {
+ Action::Overwrite {
+ index,
+ sentinel_value,
+ }
+ })
+ ]
+ }
+
+ /// Runs an action.
+ pub fn run(
+ &self,
+ buddy: &mut BuddyAllocator<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>,
+ allocs: &mut Vec<Alloc>,
+ allow_errors: bool,
+ ) {
+ match *self {
+ Action::Alloc {
+ sentinel_value,
+ size_class,
+ } => match buddy.alloc(size_class) {
+ Ok(ptr) => unsafe {
+ let slice = slice::from_raw_parts_mut(ptr.as_ptr(), PAGE_SIZE << size_class);
+ slice.fill(sentinel_value);
+ allocs.push(Alloc {
+ slice,
+ sentinel_value,
+ });
+ },
+ Err(err) => {
+ if !allow_errors {
+ Err(err).unwrap()
+ }
+ }
+ },
+ Action::Dealloc { index } => {
+ if allow_errors && allocs.is_empty() {
+ return;
+ }
+
+ let index = index % allocs.len();
+ let alloc = allocs.remove(index);
+ alloc.check_sentinel();
+ todo!("{alloc:?}")
+ }
+ Action::Debug => {
+ dbg!(buddy);
+ dbg!(allocs);
+ }
+ Action::Overwrite {
+ index,
+ sentinel_value,
+ } => {
+ if allow_errors && allocs.is_empty() {
+ return;
+ }
+
+ let index = index % allocs.len();
+ let alloc = &mut allocs[index];
+ alloc.slice.fill(sentinel_value);
+ alloc.sentinel_value = sentinel_value;
+ }
+ }
+ }
+}
+
+/// The entire series of actions to be performed.
+#[derive(Debug)]
+struct Scenario {
+ /// The sizes of the ranges the buddy allocator should be initialized with.
+ range_sizes: Vec<usize>,
+
+ /// Actions to perform after initializing the buddy allocator.
+ actions: Vec<Action>,
+}
+
+impl Scenario {
+ /// Generates a random scenario.
+ pub fn any() -> impl Strategy<Value = Scenario> {
+ (
+ prop::collection::vec(1usize..1 << (SIZE_CLASS_COUNT + 2), 0..4),
+ prop::collection::vec(Action::any(), 0..64),
+ )
+ .prop_map(|(range_sizes, actions)| Scenario {
+ range_sizes,
+ actions,
+ })
+ }
+
+ /// Runs the scenario.
+ pub fn run(&self, allow_errors: bool) {
+ // Allocate each of the page ranges.
+ let backing_mem = self
+ .range_sizes
+ .iter()
+ .map(|&size| unsafe {
+ mmap_anonymous(
+ None,
+ NonZero::new(size * PAGE_SIZE).unwrap(),
+ ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
+ MapFlags::MAP_PRIVATE,
+ )
+ .expect("failed to allocate memory")
+ })
+ .collect::<Vec<_>>();
+
+ // Create the free list allocator and move the pages there.
+ let mut free_list: FreeListAllocator<PAGE_SIZE> = FreeListAllocator::new();
+ for (&size, &ptr) in self.range_sizes.iter().zip(backing_mem.iter()) {
+ unsafe {
+ free_list.add(ptr.cast(), size);
+ }
+ }
+
+ // Create the buddy allocator and move the pages from the free list to there.
+ match BuddyAllocator::<PAGE_SIZE, PAGE_SIZE_BITS, SIZE_CLASS_COUNT>::new(free_list) {
+ Ok(mut buddy) => {
+ let mut allocs = Vec::new();
+
+ // Run each of the actions.
+ for action in &self.actions {
+ action.run(&mut buddy, &mut allocs, allow_errors);
+
+ // Check each allocation.
+ for alloc in &allocs {
+ alloc.check_sentinel();
+ }
+ }
+ }
+ Err(err) => {
+ if !allow_errors {
+ Err(err).unwrap()
+ }
+ }
+ }
+
+ // Free each of the page ranges.
+ for (&size, ptr) in self.range_sizes.iter().zip(backing_mem) {
+ unsafe {
+ munmap(ptr, size * PAGE_SIZE).expect("failed to free memory");
+ }
+ }
+ }
+}
+
+/// Information about an allocation we've made from the buddy allocator.
+struct Alloc<'alloc> {
+ slice: &'alloc mut [u8],
+ sentinel_value: u8,
+}
+
+impl<'alloc> Alloc<'alloc> {
+ pub fn check_sentinel(&self) {
+ let s = self.sentinel_value;
+ for (i, &b) in self.slice.iter().enumerate() {
+ assert_eq!(b, s, "at index {i}, {b} != {s}",);
+ }
+ }
+}
+
+impl<'alloc> fmt::Debug for Alloc<'alloc> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Alloc")
+ .field("ptr", &self.slice.as_ptr())
+ .field("len", &self.slice.len())
+ .field("sentinel_value", &self.sentinel_value)
+ .finish()
+ }
+}
diff --git a/crates/physmem_free_list/Cargo.toml b/crates/physmem_free_list/Cargo.toml
new file mode 100644
index 0000000..c1aadd8
--- /dev/null
+++ b/crates/physmem_free_list/Cargo.toml
@@ -0,0 +1,9 @@
+[package]
+name = "vernos_physmem_free_list"
+version = "0.1.0"
+edition = "2021"
+publish = false
+
+[dependencies]
+allocator-api2 = { version = "0.2.18", default-features = false }
+contracts = { version = "0.6.3", default-features = false }
diff --git a/crates/physmem_free_list/src/lib.rs b/crates/physmem_free_list/src/lib.rs
new file mode 100644
index 0000000..cb28362
--- /dev/null
+++ b/crates/physmem_free_list/src/lib.rs
@@ -0,0 +1,378 @@
+//! A free-list allocator for physical memory ranges. This is only used when bootstrapping the
+//! buddy allocator, since it provides a way to store and manipulate physical memory ranges without
+//! needing a working allocator.
+#![no_std]
+
+use allocator_api2::alloc::{AllocError, Layout};
+use contracts::requires;
+use core::{fmt, iter, marker::PhantomData, mem::size_of, ptr::NonNull, slice};
+
+/// The free-list allocator.
+pub struct FreeListAllocator<'allocator, const PAGE_SIZE: usize> {
+ head: Option<FreeListPtr<'allocator, PAGE_SIZE>>,
+}
+
+impl<'allocator, const PAGE_SIZE: usize> FreeListAllocator<'allocator, PAGE_SIZE> {
+ /// Creates a new, empty allocator.
+ pub fn new() -> FreeListAllocator<'allocator, PAGE_SIZE> {
+ assert!(size_of::<FreeListNodeHeader<'allocator, PAGE_SIZE>>() <= PAGE_SIZE);
+
+ FreeListAllocator { head: None }
+ }
+
+ /// Pushes a page range into the free list.
+ ///
+ /// # Safety
+ ///
+ /// - `ptr` must be a pointer that conveys ownership over the page range.
+ /// - The page range must be untyped memory.
+ /// - The page range must be aligned to `PAGE_SIZE`.
+ /// - The page range must live for `'allocator`.
+ /// - `len_pages` must be accurate.
+ pub unsafe fn add(&mut self, ptr: NonNull<[u8; PAGE_SIZE]>, len_pages: usize) {
+ self.head = Some(FreeListPtr::new(ptr, len_pages, self.head.take()));
+ }
+
+ /// Allocates memory. This is like `Allocator::allocate`, but it takes `&mut self`.
+ pub fn allocate(&mut self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ assert!(PAGE_SIZE.is_power_of_two());
+ let len_pages = (layout.size() + PAGE_SIZE - 1) >> PAGE_SIZE.trailing_zeros();
+
+ // Find the size of the smallest page range that would fit this.
+ let Some(selected_node_len) = self
+ .iter()
+ .map(|(_, range_len_pages)| range_len_pages)
+ .filter(|&range_len_pages| len_pages <= range_len_pages)
+ .min()
+ else {
+ return Err(AllocError);
+ };
+
+ // If the node we're looking for is the first one, and we're not planning on splitting it,
+ // we have a special case.
+ //
+ // UNWRAP: The list can't have been empty or .min() above would've returned None.
+ if self
+ .head
+ .as_ref()
+ .map(|node| node.len_pages() == len_pages)
+ .unwrap()
+ {
+ // In that case, just unlink the node and hand it back.
+ //
+ // UNWRAP: The list can't have been empty or .min() above would've returned None.
+ let node = self.head.take().unwrap();
+ let (ptr, header) = node.consume();
+ self.head = header.next;
+ Ok(NonNull::slice_from_raw_parts(
+ ptr.cast(),
+ header.len_pages * PAGE_SIZE,
+ ))
+ } else {
+ // Otherwise, we want to get a node that's directly before a node that has exactly the
+ // right length.
+ let node_before_node_to_remove = if selected_node_len == len_pages {
+ // If the node we're looking for has the same length as the allocation, we just
+ // need to iterate through the list to find it. The special case where it's the
+ // first element was already handled above.
+ let mut next = self.head.as_mut();
+ let mut node_before_node_to_remove = None;
+ while let Some(ptr) = next {
+ if let Some(next_header) = &ptr.header().next {
+ if next_header.len_pages() == selected_node_len {
+ node_before_node_to_remove = Some(ptr);
+ break;
+ }
+ }
+ next = ptr.next_mut();
+ }
+
+ // UNWRAP: We had to find the node in the first place.
+ node_before_node_to_remove.unwrap()
+ } else {
+ // In the normal case, we'll split the node. First, we iterate to the node we
+ // selected.
+ let mut next = self.head.as_mut();
+ let mut node_to_split = None;
+ while let Some(ptr) = next {
+ if ptr.len_pages() == selected_node_len {
+ node_to_split = Some(ptr);
+ break;
+ }
+ next = ptr.next_mut();
+ }
+
+ // UNWRAP: We had to find the node in the first place.
+ let node_to_split = node_to_split.unwrap();
+
+ // Split the node. The part we're going to remove gets inserted after `node_to_split`,
+ // so we can just return the node after this.
+ node_to_split.split(len_pages);
+ node_to_split
+ };
+
+ // Unlink the next node.
+ //
+ // UNWRAP: The next node must exist because of the above.
+ let (ptr, unlinked_len_pages) = node_before_node_to_remove.unlink_next().unwrap();
+ assert_eq!(len_pages, unlinked_len_pages);
+ Ok(NonNull::slice_from_raw_parts(
+ ptr.cast(),
+ len_pages * PAGE_SIZE,
+ ))
+ }
+ }
+
+ /// Allocates zeroed-out memory.
+ pub fn allocate_zeroed(&mut self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ let ptr = self.allocate(layout)?;
+ // SAFETY: The pointer has to have a valid size.
+ unsafe {
+ ptr.cast::<u8>().write_bytes(0, ptr.len());
+ }
+ Ok(ptr)
+ }
+
+ /// Returns an iterator over the page ranges in the free list, as pairs of `ptr` and
+ /// `len_pages`. The returned pointers should not be accessed.
+ pub fn iter(&self) -> impl '_ + Iterator<Item = (NonNull<[u8; PAGE_SIZE]>, usize)> {
+ let mut next: Option<&_> = self.head.as_ref();
+ iter::from_fn(move || match next {
+ Some(ptr) => {
+ let out = (ptr.ptr(), ptr.len_pages());
+ next = ptr.next();
+ Some(out)
+ }
+ None => None,
+ })
+ }
+
+ /// Splits all the nodes until each page range is of a power-of-two size.
+ pub fn split_to_powers_of_two(&mut self) {
+ let mut next = self.head.as_mut();
+ while let Some(ptr) = next {
+ // Compute the size the page range would have if it were
+ // let log2_smaller_size = ptr.len_pages().trailing_zeros();
+ // let smaller_size = 1 << log2_smaller_size;
+
+ next = ptr.next_mut();
+ }
+ }
+}
+
+impl<'allocator, const PAGE_SIZE: usize> fmt::Debug for FreeListAllocator<'allocator, PAGE_SIZE> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ let mut next: Option<&_> = self.head.as_ref();
+ fmt.debug_list()
+ .entries(iter::from_fn(|| match next {
+ Some(ptr) => {
+ next = ptr.next();
+ Some(ptr)
+ }
+ None => None,
+ }))
+ .finish()
+ }
+}
+
+/// A pointer to a page range, which start with a header in the first page.
+///
+/// # Safety
+///
+/// - `ptr` must be a pointer that conveys ownership over the page range.
+/// - The page range must be untyped memory.
+/// - The page range must be non-empty.
+/// - The first page must have a correct header written to it.
+/// - The page range must live for `'allocator`.
+struct FreeListPtr<'allocator, const PAGE_SIZE: usize> {
+ ptr: NonNull<FreeListNodeHeader<'allocator, PAGE_SIZE>>,
+ _phantom: PhantomData<&'allocator FreeListNodeHeader<'allocator, PAGE_SIZE>>,
+}
+
+impl<'allocator, const PAGE_SIZE: usize> FreeListPtr<'allocator, PAGE_SIZE> {
+ /// Initialize the given page range as a node in the free list.
+ ///
+ /// # Safety
+ ///
+ /// - `ptr` must be a pointer that conveys ownership over the page range.
+ /// - The page range must be untyped memory.
+ /// - The page range must be aligned to `PAGE_SIZE`.
+ /// - The page range must live for `'allocator`.
+ /// - `len_pages` must be accurate.
+ #[requires(len_pages > 0)]
+ pub unsafe fn new(
+ ptr: NonNull<[u8; PAGE_SIZE]>,
+ len_pages: usize,
+ next: Option<FreeListPtr<'allocator, PAGE_SIZE>>,
+ ) -> FreeListPtr<'allocator, PAGE_SIZE> {
+ assert!(size_of::<FreeListNodeHeader<'allocator, PAGE_SIZE>>() <= PAGE_SIZE);
+
+ let ptr: NonNull<FreeListNodeHeader<'allocator, PAGE_SIZE>> = ptr.cast();
+ // SAFETY: The safety invariants plus the assertion guarantee this write is valid.
+ unsafe {
+ ptr.write(FreeListNodeHeader { next, len_pages });
+ }
+ FreeListPtr {
+ ptr,
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Consumes the node, returning the pointer and header.
+ pub fn consume(
+ self,
+ ) -> (
+ NonNull<[u8; PAGE_SIZE]>,
+ FreeListNodeHeader<'allocator, PAGE_SIZE>,
+ ) {
+ // SAFETY: The invariants of the type ensure that the header is present and owned by us.
+ let header = unsafe { self.ptr.read() };
+ (self.ptr.cast(), header)
+ }
+
+ /// Returns a reference to the header of the page range.
+ fn header(&self) -> &FreeListNodeHeader<'allocator, PAGE_SIZE> {
+ // SAFETY: The invariants of the type ensure that the header is present and owned by us.
+ unsafe { self.ptr.as_ref() }
+ }
+
+ /// Returns an exclusive reference to the header of the page range.
+ ///
+ /// # Safety
+ ///
+ /// The invariants of the type must be upheld.
+ unsafe fn header_mut(&mut self) -> &mut FreeListNodeHeader<'allocator, PAGE_SIZE> {
+ // SAFETY: The invariants of the type ensure that the header is present and owned by us.
+ unsafe { self.ptr.as_mut() }
+ }
+
+ /// Returns the length of the node in pages.
+ pub fn len_pages(&self) -> usize {
+ self.header().len_pages
+ }
+
+ /// Returns a shared reference to the next node in the free list. Returns `None` when there
+ /// are no further nodes.
+ pub fn next(&self) -> Option<&FreeListPtr<'allocator, PAGE_SIZE>> {
+ self.header().next.as_ref()
+ }
+
+ /// Returns an exclusive reference to the next node in the free list. Returns `None` when there
+ /// are no further nodes.
+ pub fn next_mut(&mut self) -> Option<&mut FreeListPtr<'allocator, PAGE_SIZE>> {
+ // SAFETY: We just lend out the next pointer, which can't violate our invariants.
+ let header = unsafe { self.header_mut() };
+ header.next.as_mut()
+ }
+
+ /// Returns the underlying pointer, which should not be accessed.
+ pub fn ptr(&self) -> NonNull<[u8; PAGE_SIZE]> {
+ self.ptr.cast()
+ }
+
+ /// Splits the page range in two, with the second having the given length in pages.
+ #[requires(split_len < self.len_pages())]
+ pub fn split(&mut self, split_len: usize) {
+ // Get the old header. After this point, `self.ptr` is considered uninitialized, since
+ // we've moved out of it, so we can't panic.
+ //
+ // SAFETY: The invariants of the type ensure that the header is present and owned by us.
+ let old_header = unsafe { self.ptr.read() };
+
+ // We call the two new nodes `a` and `b`.
+ let a_len = old_header.len_pages - split_len;
+ let b_len = split_len;
+ // SAFETY: The invariants of the type ensure this is still in-bounds, since
+ // the header must have been valid and `a_len <= old_header.len_pages`.
+ let b_ptr: NonNull<[u8; PAGE_SIZE]> = unsafe { self.ptr().add(a_len) };
+
+ // Construct a new node for `b`.
+ //
+ // SAFETY: The safety invariants are simply inherited from the memory being valid in the
+ // first place, except for the `len_pages` invariant. This is upheld because the length
+ // math is correct.
+ //
+ // The assertion panic in `FreeListPtr::new()` must be unreachable, or it would have been
+ // triggered when constructing this type in the first place.
+ let b = unsafe { FreeListPtr::new(b_ptr, b_len, old_header.next) };
+
+ // Reinitialize the header for `a`. After this point, `self.ptr` is considered initialized
+ // again, so we may panic again.
+ //
+ // SAFETY: The safety invariants are simply inherited from the memory being valid in the
+ // first place, except for the `len_pages` invariant. This is upheld because the length
+ // math is correct.
+ unsafe {
+ self.ptr.write(FreeListNodeHeader {
+ next: Some(b),
+ len_pages: a_len,
+ });
+ }
+ }
+
+ /// Unlinks and returns the _next_ node, returning its pointer and length.
+ pub fn unlink_next(&mut self) -> Option<(NonNull<[u8; PAGE_SIZE]>, usize)> {
+ // SAFETY: We uphold the invariants.
+ let header = unsafe { self.header_mut() };
+ let next = header.next.take()?;
+ let (next_ptr, next_header) = next.consume();
+ header.next = next_header.next;
+ Some((next_ptr, next_header.len_pages))
+ }
+}
+
+impl<'allocator, const PAGE_SIZE: usize> fmt::Debug for FreeListPtr<'allocator, PAGE_SIZE> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "[{:p} × {}]", self.ptr, self.len_pages())
+ }
+}
+
+#[derive(Debug)]
+struct FreeListNodeHeader<'allocator, const PAGE_SIZE: usize> {
+ /// The next node in the linked list.
+ next: Option<FreeListPtr<'allocator, PAGE_SIZE>>,
+
+ /// The length of the allocation in pages.
+ len_pages: usize,
+}
+
+/// Returns an iterator over power-of-two-sized subslices of this slice, which are returned from
+/// smallest to largest.
+pub fn power_of_two_subslices_mut<T>(slice: &mut [T]) -> impl Iterator<Item = &mut [T]> {
+ // Decompose the slice to its raw parts. Yep, we're gonna be doing unsafe stuff.
+ let mut ptr = slice.as_mut_ptr();
+ let mut len = slice.len();
+
+ iter::from_fn(move || {
+ if len == 0 {
+ None
+ } else {
+ // Make the slice to return.
+ //
+ // SAFETY: The pointer and length must be valid, since we got them from the valid
+ // slice. Before returning `out`, we advance them, so that we never give out multiple
+ // exclusive references to the same subslice.
+ let out_len = 1 << len.trailing_zeros();
+ let out = unsafe { slice::from_raw_parts_mut(ptr, out_len) };
+
+ // Advance the pointer and length.
+ //
+ // SAFETY: out_len < len, and we got len from an existing slice.
+ ptr = unsafe { ptr.add(out_len) };
+ len -= out_len;
+
+ Some(out)
+ }
+ })
+}
+
+#[test]
+fn power_of_two_subslices_mut_test() {
+ extern crate std;
+ use std::vec::Vec;
+
+ let mut data = [0, 1, 2, 3, 4, 5, 6, 7, 8];
+ let subslices = power_of_two_subslices_mut(&mut data).collect::<Vec<_>>();
+
+ assert_eq!(subslices, &[&[0] as &[_], &[1, 2, 3, 4, 5, 6, 7, 8]]);
+}
diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml
new file mode 100644
index 0000000..5b55a5c
--- /dev/null
+++ b/crates/utils/Cargo.toml
@@ -0,0 +1,8 @@
+[package]
+name = "vernos_utils"
+version = "0.1.0"
+edition = "2021"
+publish = false
+
+[dependencies]
+contracts = { version = "0.6.3", default-features = false }
diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs
new file mode 100644
index 0000000..57143bc
--- /dev/null
+++ b/crates/utils/src/lib.rs
@@ -0,0 +1,3 @@
+//! Common utilities.
+
+pub mod pin;
diff --git a/crates/utils/src/pin.rs b/crates/utils/src/pin.rs
new file mode 100644
index 0000000..202904d
--- /dev/null
+++ b/crates/utils/src/pin.rs
@@ -0,0 +1,25 @@
+//! Utilities having to do with pinning. I sure hope these are sound!
+
+use contracts::requires;
+use core::pin::Pin;
+use std::ptr::NonNull;
+
+/// Iterates over projections of elements out of a pinned slice.
+pub fn pin_project_slice_mut_iter<T>(slice: Pin<&mut [T]>) -> impl Iterator<Item = Pin<&mut T>> {
+ let base_ptr: NonNull<T> = NonNull::from(slice.as_ref().get_ref()).cast();
+ (0..slice.len()).map(move |i| {
+ // SAFETY: This is in-bounds, since the original slice was valid. No other references to
+ // the data can exist, since we visit each index once and have an exclusive borrow on the
+ // slice.
+ let ptr = unsafe { base_ptr.add(i).as_mut() };
+ // SAFETY: This is not certainly sound; see https://github.com/rust-lang/rust/issues/104108
+ unsafe { Pin::new_unchecked(ptr) }
+ })
+}
+
+/// Projects a single element out of a pinned slice.
+#[requires(index < slice.len())]
+pub fn pin_project_slice_mut<T>(slice: Pin<&mut [T]>, index: usize) -> Pin<&mut T> {
+ // SAFETY: This is not certainly sound; see https://github.com/rust-lang/rust/issues/104108
+ unsafe { slice.map_unchecked_mut(|slice| &mut slice[index]) }
+}
diff --git a/flake.nix b/flake.nix
index 8d1be5f..5850e79 100644
--- a/flake.nix
+++ b/flake.nix
@@ -20,104 +20,112 @@
(
system:
let
- pkgsBuild = nixpkgs.legacyPackages.${system};
- pkgsHost = import nixpkgs {
- inherit system;
- crossSystem.config = "riscv64-unknown-none-elf";
- };
+ pkgs = nixpkgs.legacyPackages.${system};
fenix = fenix-flake.packages.${system};
- toolchain = fenix.combine [
+ rust-toolchain = fenix.combine [
fenix.stable.cargo
fenix.stable.rustc
fenix.stable.clippy
- fenix.targets.riscv64gc-unknown-none-elf.stable.rust-std
+ # fenix.targets.riscv64gc-unknown-none-elf.stable.rust-std
];
- rust = pkgsHost.makeRustPlatform {
- cargo = toolchain;
- rustc = toolchain;
- };
-
- packages = rec {
- boards = import ./boards {
- inherit libkernel;
- pkgs = pkgsHost;
- };
- libkernel = pkgsBuild.callPackage ./kernel { inherit rust; };
+ rust = pkgs.makeRustPlatform {
+ cargo = rust-toolchain;
+ rustc = rust-toolchain;
};
- run-vm = pkgsBuild.writeShellApplication {
- name = "run-vm";
-
- runtimeInputs = [ pkgsBuild.qemu ];
- text = ''
- set -x
- qemu-system-riscv64 \
- -machine virt \
- -m 1G \
- -nographic \
- -bios none \
- -kernel ${packages.boards.qemu-virt}/kernel.elf \
- "$@"
- '';
- };
+ packages = { };
in
- {
- apps = {
- default = {
- type = "app";
- program = pkgsBuild.lib.getExe run-vm;
+ /*
+ pkgsHost = import nixpkgs {
+ inherit system;
+ crossSystem.config = "riscv64-unknown-none-elf";
};
- run = {
- type = "app";
- program = pkgsBuild.lib.getExe run-vm;
+ packages = rec {
+ boards = import ./boards {
+ inherit libkernel;
+ pkgs = pkgsHost;
+ };
+ libkernel = pkgsBuild.callPackage ./kernel { inherit rust; };
};
+ run-vm = pkgsBuild.writeShellApplication {
+ name = "run-vm";
+
+ runtimeInputs = [ pkgsBuild.qemu ];
- debug = {
- type = "app";
- program = pkgsBuild.lib.getExe (
- pkgsBuild.writeShellApplication {
- name = "run-vm-debug";
- runtimeInputs = [ run-vm ];
- text = ''
- port=$(( 1000 * ("$(id -u)" - 990) ))
- run-vm -gdb tcp::$port -S "$@"
- '';
- }
- );
+ text = ''
+ set -x
+ qemu-system-riscv64 \
+ -machine virt \
+ -m 1G \
+ -nographic \
+ -bios none \
+ -kernel ${packages.boards.qemu-virt}/kernel.elf \
+ "$@"
+ '';
};
+ */
+ {
+ /*
+ apps = {
+ default = {
+ type = "app";
+ program = pkgsBuild.lib.getExe run-vm;
+ };
- gdb = {
- type = "app";
- program = pkgsBuild.lib.getExe (
- pkgsBuild.writeShellApplication {
- name = "gdb";
- runtimeInputs = [ toolchain ];
- text = ''
- port=$(( 1000 * ("$(id -u)" - 990) ))
- rust-gdb ${packages.boards.qemu-virt}/kernel.elf \
- -ex "target remote 127.0.0.1:$port" \
- -ex "set riscv use-compressed-breakpoints yes" \
- -ex "layout asm" \
- -ex "layout regs" \
- -ex "focus cmd" \
- -ex "tbreak hart0_boot" \
- -ex "c"
- '';
- }
- );
+ run = {
+ type = "app";
+ program = pkgsBuild.lib.getExe run-vm;
+ };
+
+ debug = {
+ type = "app";
+ program = pkgsBuild.lib.getExe (
+ pkgsBuild.writeShellApplication {
+ name = "run-vm-debug";
+ runtimeInputs = [ run-vm ];
+ text = ''
+ port=$(( 1000 * ("$(id -u)" - 990) ))
+ run-vm -gdb tcp::$port -S "$@"
+ '';
+ }
+ );
+ };
+
+ gdb = {
+ type = "app";
+ program = pkgsBuild.lib.getExe (
+ pkgsBuild.writeShellApplication {
+ name = "gdb";
+ runtimeInputs = [ toolchain ];
+ text = ''
+ port=$(( 1000 * ("$(id -u)" - 990) ))
+ rust-gdb ${packages.boards.qemu-virt}/kernel.elf \
+ -ex "target remote 127.0.0.1:$port" \
+ -ex "set riscv use-compressed-breakpoints yes" \
+ -ex "layout asm" \
+ -ex "layout regs" \
+ -ex "focus cmd" \
+ -ex "tbreak hart0_boot" \
+ -ex "c"
+ '';
+ }
+ );
+ };
};
- };
+ */
- devShells.default = pkgsBuild.mkShell {
+ devShells.default = pkgs.mkShell {
inputsFrom = builtins.attrValues (flake-utils.lib.flattenTree packages);
nativeBuildInputs = [
- pkgsBuild.cargo-watch
- pkgsBuild.qemu
- pkgsHost.stdenv.cc.bintools.bintools
+ (pkgs.callPackage ./nix/miri.nix { inherit fenix; })
+ pkgs.cargo-watch
+ pkgs.qemu
+ rust-toolchain
+ # pkgsHost.stdenv.cc.bintools.bintools
];
- CARGO_BUILD_TARGET = "riscv64gc-unknown-none-elf";
+ # CARGO_BUILD_TARGET = "riscv64gc-unknown-none-elf";
};
packages = flake-utils.lib.flattenTree packages;
diff --git a/nix/miri.nix b/nix/miri.nix
new file mode 100644
index 0000000..5a8c39d
--- /dev/null
+++ b/nix/miri.nix
@@ -0,0 +1,22 @@
+{ fenix, pkgs }:
+
+let
+ rust-nightly-toolchain = fenix.combine [
+ fenix.latest.cargo
+ fenix.latest.rustc
+ fenix.latest.clippy
+ fenix.latest.miri
+ ];
+in
+pkgs.writeShellApplication {
+ name = "cargo-miri";
+ runtimeInputs = [ rust-nightly-toolchain ];
+ text = ''
+ set -x
+ # https://github.com/proptest-rs/proptest/issues/253#issuecomment-1850534278
+ : "''${PROPTEST_DISABLE_FAILURE_PERSISTENCE:=true}"
+ : "''${MIRIFLAGS:=-Zmiri-env-forward=PROPTEST_DISABLE_FAILURE_PERSISTENCE}"
+ export PROPTEST_DISABLE_FAILURE_PERSISTENCE MIRIFLAGS
+ exec cargo "$@"
+ '';
+}