summaryrefslogtreecommitdiff
path: root/crates/alloc_genmalloc/src
diff options
context:
space:
mode:
authorNathan Ringo <nathan@remexre.com>2024-09-12 18:14:13 -0500
committerNathan Ringo <nathan@remexre.com>2024-09-12 18:14:13 -0500
commitec991590e4e3b92e407060410ff33525dc740988 (patch)
tree287c2fa46f9814ce804c2dde60aa37e5823762cb /crates/alloc_genmalloc/src
parent128aa9bb0f0cd1410a30d45775e7627b01ed74a2 (diff)
Adds the start of a regular allocator, and some page table maintainence.
Diffstat (limited to 'crates/alloc_genmalloc/src')
-rw-r--r--crates/alloc_genmalloc/src/lib.rs135
1 files changed, 135 insertions, 0 deletions
diff --git a/crates/alloc_genmalloc/src/lib.rs b/crates/alloc_genmalloc/src/lib.rs
new file mode 100644
index 0000000..06bc667
--- /dev/null
+++ b/crates/alloc_genmalloc/src/lib.rs
@@ -0,0 +1,135 @@
+//! A general-purpose allocator, based on the design of mimalloc [0, 1].
+//!
+//! [0]: https://www.microsoft.com/en-us/research/publication/mimalloc-free-list-sharding-in-action/
+//! [1]: https://github.com/microsoft/mimalloc
+#![no_std]
+
+use allocator_api2::alloc::AllocError;
+use contracts::requires;
+use core::{
+ ptr::NonNull,
+ sync::atomic::{AtomicPtr, AtomicU16, Ordering},
+};
+
+/// The per-CPU structure.
+struct Heap {
+ /// Small objects (<=1024 bytes) have pointers directly to the page they allocate into. These
+ /// are non-owning pointers.
+ pages_direct: [NonNull<Page>; 128],
+
+ /// The sentinel nodes for the circular doubly-linked lists that compose the size classes, up
+ /// to and including 512KiB, plus one more for objects over 512KiB.
+ pages: [PageLink; 18],
+}
+
+impl Heap {
+ #[requires(size > 0)]
+ #[requires(size <= 1024)]
+ fn alloc_small(&mut self, size: usize) -> Result<NonNull<u8>, AllocError> {
+ let mut page = self.pages_direct[(size + 7) >> 3];
+ let page = unsafe { page.as_mut() };
+ match page.header.alloc_list.as_mut() {
+ Some(&mut block) => {
+ let next = unsafe { block.as_ref() }.next.load(Ordering::SeqCst);
+ page.header.alloc_list = NonNull::new(next);
+ page.header.used.fetch_add(1, Ordering::SeqCst);
+ Ok(block.cast())
+ }
+ None => self.alloc_generic(size),
+ }
+ }
+
+ fn alloc_generic(&mut self, size: usize) -> Result<NonNull<u8>, AllocError> {
+ let size_class = size_class(size);
+ let sentinel: NonNull<PageLink> = NonNull::from(&self.pages[size_class]);
+ let mut page = sentinel;
+ loop {
+ page = unsafe { (*page.as_ptr()).next.unwrap() };
+
+ todo!("{page:#?}")
+ }
+ todo!()
+ }
+}
+
+/// Returns the index in the `pages` array of objects with the given size.
+fn size_class(size: usize) -> usize {
+ // Clamp the size to the range the bitwise trick will work on.
+ let size = size.clamp(8, 1024 * 1024);
+
+ // Figure out the size class.
+ (size.next_power_of_two().trailing_zeros() - 3) as usize
+}
+
+#[test]
+fn expected_size_class_values() {
+ assert_eq!(size_class(1), 0);
+ assert_eq!(size_class(8), 0);
+
+ let mut size = 8;
+ let mut expected = 0;
+ while size <= 512 * 1024 {
+ assert_eq!(size_class(size), expected);
+ size <<= 1;
+ expected += 1;
+ }
+
+ assert_eq!(size_class(512 * 1024), 16);
+ assert_eq!(size_class((512 * 1024) + 1), 17);
+ assert_eq!(size_class(4 * 1024 * 1024), 17);
+}
+
+/// A page is a block of memory that contains objects of a single size class.
+#[repr(C)]
+struct Page {
+ /// The intrusive link in the circular doubly-linked list of pages of each size class.
+ link: PageLink,
+
+ /// The fields other than the intrusive link in a page's header.
+ header: PageHeader,
+}
+
+/// The intrusive link in the circular doubly-linked list of pages of each size class.
+struct PageLink {
+ /// The previous page.
+ prev: Option<NonNull<PageLink>>,
+
+ /// The next page.
+ next: Option<NonNull<PageLink>>,
+}
+
+/// The fields other than the intrusive link in a page's header.
+struct PageHeader {
+ /// The free list that the owning thread pops from.
+ alloc_list: Option<NonNull<Block>>,
+
+ /// The free list that the owning thread pushes onto.
+ local_free_list: Option<NonNull<Block>>,
+
+ /// The free list that other threads push onto.
+ thread_free_list: AtomicPtr<Block>,
+
+ /// The number of objects in the page that have been allocated.
+ used: AtomicU16,
+
+ /// The number of objects in the thread free list.
+ thread_freed: AtomicU16,
+}
+
+/// An object in a free list.
+struct Block {
+ next: AtomicPtr<Block>,
+}
+
+fn atomic_push(node: &mut Block, list: &AtomicPtr<Block>) {
+ loop {
+ let old_list = list.load(Ordering::Acquire);
+ node.next = AtomicPtr::from(old_list);
+ if list
+ .compare_exchange(old_list, node, Ordering::Release, Ordering::Acquire)
+ .is_ok()
+ {
+ break;
+ }
+ }
+}