//! A general-purpose allocator, based on the design of mimalloc [0, 1]. //! //! [0]: https://www.microsoft.com/en-us/research/publication/mimalloc-free-list-sharding-in-action/ //! [1]: https://github.com/microsoft/mimalloc #![no_std] use allocator_api2::alloc::AllocError; use contracts::requires; use core::{ ptr::NonNull, sync::atomic::{AtomicPtr, AtomicU16, Ordering}, }; /// The per-CPU structure. struct Heap { /// Small objects (<=1024 bytes) have pointers directly to the page they allocate into. These /// are non-owning pointers. pages_direct: [NonNull; 128], /// The sentinel nodes for the circular doubly-linked lists that compose the size classes, up /// to and including 512KiB, plus one more for objects over 512KiB. pages: [PageLink; 18], } impl Heap { #[requires(size > 0)] #[requires(size <= 1024)] fn alloc_small(&mut self, size: usize) -> Result, AllocError> { let mut page = self.pages_direct[(size + 7) >> 3]; let page = unsafe { page.as_mut() }; match page.header.alloc_list.as_mut() { Some(&mut block) => { let next = unsafe { block.as_ref() }.next.load(Ordering::SeqCst); page.header.alloc_list = NonNull::new(next); page.header.used.fetch_add(1, Ordering::SeqCst); Ok(block.cast()) } None => self.alloc_generic(size), } } fn alloc_generic(&mut self, size: usize) -> Result, AllocError> { let size_class = size_class(size); let sentinel: NonNull = NonNull::from(&self.pages[size_class]); let mut page = sentinel; loop { page = unsafe { (*page.as_ptr()).next.unwrap() }; todo!("{page:#?}") } todo!() } } /// Returns the index in the `pages` array of objects with the given size. fn size_class(size: usize) -> usize { // Clamp the size to the range the bitwise trick will work on. let size = size.clamp(8, 1024 * 1024); // Figure out the size class. (size.next_power_of_two().trailing_zeros() - 3) as usize } #[test] fn expected_size_class_values() { assert_eq!(size_class(1), 0); assert_eq!(size_class(8), 0); let mut size = 8; let mut expected = 0; while size <= 512 * 1024 { assert_eq!(size_class(size), expected); size <<= 1; expected += 1; } assert_eq!(size_class(512 * 1024), 16); assert_eq!(size_class((512 * 1024) + 1), 17); assert_eq!(size_class(4 * 1024 * 1024), 17); } /// A page is a block of memory that contains objects of a single size class. #[repr(C)] struct Page { /// The intrusive link in the circular doubly-linked list of pages of each size class. link: PageLink, /// The fields other than the intrusive link in a page's header. header: PageHeader, } /// The intrusive link in the circular doubly-linked list of pages of each size class. struct PageLink { /// The previous page. prev: Option>, /// The next page. next: Option>, } /// The fields other than the intrusive link in a page's header. struct PageHeader { /// The free list that the owning thread pops from. alloc_list: Option>, /// The free list that the owning thread pushes onto. local_free_list: Option>, /// The free list that other threads push onto. thread_free_list: AtomicPtr, /// The number of objects in the page that have been allocated. used: AtomicU16, /// The number of objects in the thread free list. thread_freed: AtomicU16, } /// An object in a free list. struct Block { next: AtomicPtr, } fn atomic_push(node: &mut Block, list: &AtomicPtr) { loop { let old_list = list.load(Ordering::Acquire); node.next = AtomicPtr::from(old_list); if list .compare_exchange(old_list, node, Ordering::Release, Ordering::Acquire) .is_ok() { break; } } }