1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
|
//! A general-purpose allocator, based on the design of mimalloc [0, 1].
//!
//! [0]: https://www.microsoft.com/en-us/research/publication/mimalloc-free-list-sharding-in-action/
//! [1]: https://github.com/microsoft/mimalloc
#![no_std]
use allocator_api2::alloc::AllocError;
use contracts::requires;
use core::{
ptr::NonNull,
sync::atomic::{AtomicPtr, AtomicU16, Ordering},
};
/// The per-CPU structure.
struct Heap {
/// Small objects (<=1024 bytes) have pointers directly to the page they allocate into. These
/// are non-owning pointers.
pages_direct: [NonNull<Page>; 128],
/// The sentinel nodes for the circular doubly-linked lists that compose the size classes, up
/// to and including 512KiB, plus one more for objects over 512KiB.
pages: [PageLink; 18],
}
impl Heap {
#[requires(size > 0)]
#[requires(size <= 1024)]
fn alloc_small(&mut self, size: usize) -> Result<NonNull<u8>, AllocError> {
let mut page = self.pages_direct[(size + 7) >> 3];
let page = unsafe { page.as_mut() };
match page.header.alloc_list.as_mut() {
Some(&mut block) => {
let next = unsafe { block.as_ref() }.next.load(Ordering::SeqCst);
page.header.alloc_list = NonNull::new(next);
page.header.used.fetch_add(1, Ordering::SeqCst);
Ok(block.cast())
}
None => self.alloc_generic(size),
}
}
fn alloc_generic(&mut self, size: usize) -> Result<NonNull<u8>, AllocError> {
let size_class = size_class(size);
let sentinel: NonNull<PageLink> = NonNull::from(&self.pages[size_class]);
let mut page = sentinel;
loop {
page = unsafe { (*page.as_ptr()).next.unwrap() };
todo!("{page:#?}")
}
todo!()
}
}
/// Returns the index in the `pages` array of objects with the given size.
fn size_class(size: usize) -> usize {
// Clamp the size to the range the bitwise trick will work on.
let size = size.clamp(8, 1024 * 1024);
// Figure out the size class.
(size.next_power_of_two().trailing_zeros() - 3) as usize
}
#[test]
fn expected_size_class_values() {
assert_eq!(size_class(1), 0);
assert_eq!(size_class(8), 0);
let mut size = 8;
let mut expected = 0;
while size <= 512 * 1024 {
assert_eq!(size_class(size), expected);
size <<= 1;
expected += 1;
}
assert_eq!(size_class(512 * 1024), 16);
assert_eq!(size_class((512 * 1024) + 1), 17);
assert_eq!(size_class(4 * 1024 * 1024), 17);
}
/// A page is a block of memory that contains objects of a single size class.
#[repr(C)]
struct Page {
/// The intrusive link in the circular doubly-linked list of pages of each size class.
link: PageLink,
/// The fields other than the intrusive link in a page's header.
header: PageHeader,
}
/// The intrusive link in the circular doubly-linked list of pages of each size class.
struct PageLink {
/// The previous page.
prev: Option<NonNull<PageLink>>,
/// The next page.
next: Option<NonNull<PageLink>>,
}
/// The fields other than the intrusive link in a page's header.
struct PageHeader {
/// The free list that the owning thread pops from.
alloc_list: Option<NonNull<Block>>,
/// The free list that the owning thread pushes onto.
local_free_list: Option<NonNull<Block>>,
/// The free list that other threads push onto.
thread_free_list: AtomicPtr<Block>,
/// The number of objects in the page that have been allocated.
used: AtomicU16,
/// The number of objects in the thread free list.
thread_freed: AtomicU16,
}
/// An object in a free list.
struct Block {
next: AtomicPtr<Block>,
}
fn atomic_push(node: &mut Block, list: &AtomicPtr<Block>) {
loop {
let old_list = list.load(Ordering::Acquire);
node.next = AtomicPtr::from(old_list);
if list
.compare_exchange(old_list, node, Ordering::Release, Ordering::Acquire)
.is_ok()
{
break;
}
}
}
|