summaryrefslogtreecommitdiff
path: root/crates/alloc_physmem_free_list/src/lib.rs
blob: 05406726185ffe4703c541e55ee14e151c916279 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
//! A free-list allocator for physical memory ranges. This is only used when bootstrapping the
//! buddy allocator, since it provides a way to store and manipulate physical memory ranges without
//! needing a working allocator.
#![no_std]

use allocator_api2::alloc::{AllocError, Layout};
use contracts::requires;
use core::{fmt, iter, marker::PhantomData, mem::size_of, ptr::NonNull, slice};

/// The free-list allocator.
pub struct FreeListAllocator<'allocator, const PAGE_SIZE: usize> {
    head: Option<FreeListPtr<'allocator, PAGE_SIZE>>,
}

impl<'allocator, const PAGE_SIZE: usize> FreeListAllocator<'allocator, PAGE_SIZE> {
    /// Creates a new, empty allocator.
    pub fn new() -> FreeListAllocator<'allocator, PAGE_SIZE> {
        assert!(size_of::<FreeListNodeHeader<'allocator, PAGE_SIZE>>() <= PAGE_SIZE);

        FreeListAllocator { head: None }
    }

    /// Pushes a page range into the free list.
    ///
    /// # Safety
    ///
    /// - `ptr` must be a pointer that conveys ownership over the page range.
    /// - The page range must be untyped memory.
    /// - The page range must be aligned to `PAGE_SIZE`.
    /// - The page range must live for `'allocator`.
    /// - `len_pages` must be accurate.
    pub unsafe fn add(&mut self, ptr: NonNull<[u8; PAGE_SIZE]>, len_pages: usize) {
        self.head = Some(FreeListPtr::new(ptr, len_pages, self.head.take()));
    }

    /// Allocates memory, *without necessarily removing it from the allocator*.
    ///
    /// The memory will only be removed from the allocator if there is a node from which no
    /// additional pages would be able to be allocated into. If the node is not removed, the
    /// returned pointer will not point into the first page, so it will not be altered by metadata
    /// updates.
    ///
    /// Obviously, this is unsound to call while the previous allocation is alive. It is also
    /// unsound to split any nodes after calling this, since splitting the node that was allocated
    /// into might write the new node's metadata into that allocation.
    ///
    /// # Safety
    ///
    /// - Only one allocation returned by this method may be live.
    pub unsafe fn allocate_without_always_removing_the_node(
        &mut self,
        layout: Layout,
    ) -> Result<NonNull<[u8]>, AllocError> {
        assert!(PAGE_SIZE.is_power_of_two());
        let len_pages = (layout.size() + PAGE_SIZE - 1) >> PAGE_SIZE.trailing_zeros();

        // Find the size of the smallest page range that would fit this.
        let Some(selected_node_len) = self
            .iter()
            .map(|(_, range_len_pages)| range_len_pages)
            .filter(|&range_len_pages| len_pages <= range_len_pages)
            .min()
        else {
            return Err(AllocError);
        };

        if selected_node_len == len_pages {
            // The less-scary case is when we're removing an entire node; we're just aiming to find
            // it and unlink it.

            // If the node we're looking for is the first one, slightly different code is used to
            // unlink it.
            let (ptr, unlinked_len_pages) = if self
                .head
                .as_ref()
                .map(|node| node.len_pages() == len_pages)
                .unwrap()
            {
                // In that case, just unlink the node and hand it back.
                //
                // UNWRAP: The list can't have been empty or .min() above would've returned None.
                self.pop().unwrap()
            } else {
                // Otherwise, we need to iterate through the list to find the node before it.
                let mut next = self.head.as_mut();
                let mut node_before_node_to_remove = None;
                while let Some(ptr) = next {
                    if let Some(next_header) = &ptr.header().next {
                        if next_header.len_pages() == selected_node_len {
                            node_before_node_to_remove = Some(ptr);
                            break;
                        }
                    }
                    next = ptr.next_mut();
                }

                // UNWRAP: We had to find the node in the first place, so it must be in the list.
                node_before_node_to_remove.unwrap().unlink_next().unwrap()
            };

            assert_eq!(len_pages, unlinked_len_pages);
            Ok(NonNull::slice_from_raw_parts(
                ptr.cast(),
                len_pages * PAGE_SIZE,
            ))
        } else {
            // The scary case is when we're trying to give back memory that's part of a different
            // node. First, we need to iterate through the list to find the node.
            let mut next = self.head.as_mut();
            let mut node_to_steal_from = None;
            while let Some(ptr) = next {
                if ptr.len_pages() == selected_node_len {
                    node_to_steal_from = Some(ptr);
                    break;
                }
                next = ptr.next_mut();
            }

            // UNWRAP: We had to find the node in the first place, so it must be in the list.
            let node_to_steal_from = node_to_steal_from.unwrap();

            // Then, we can simply do the address arithmetic to get a pointer out with the right
            // bounds.
            //
            // SAFETY: The node had sufficient length to add `selected_node_len`, so it must have
            // sufficient length to add a lesser quantity.
            let ptr = unsafe { node_to_steal_from.ptr().add(selected_node_len - len_pages) };

            Ok(NonNull::slice_from_raw_parts(
                ptr.cast(),
                len_pages * PAGE_SIZE,
            ))
        }
    }

    /// Returns an iterator over the page ranges in the free list, as pairs of `ptr` and
    /// `len_pages`. The returned pointers should not be accessed.
    pub fn iter(&self) -> impl '_ + Iterator<Item = (NonNull<[u8; PAGE_SIZE]>, usize)> {
        let mut next: Option<&_> = self.head.as_ref();
        iter::from_fn(move || match next {
            Some(ptr) => {
                let out = (ptr.ptr(), ptr.len_pages());
                next = ptr.next();
                Some(out)
            }
            None => None,
        })
    }

    /// Pops the first page range from the allocator and returns it, along with its length in pages.
    pub fn pop(&mut self) -> Option<(NonNull<[u8; PAGE_SIZE]>, usize)> {
        let node = self.head.take()?;
        let (ptr, header) = node.consume();
        self.head = header.next;
        Some((ptr, header.len_pages))
    }

    /// Splits all the nodes until each page range is of a power-of-two size no larger than
    /// `max_len_pages`.
    pub fn split_to_powers_of_two_no_larger_than(&mut self, max_len_pages: usize) {
        let mut next = self.head.as_mut();
        while let Some(ptr) = next {
            while ptr.len_pages() > max_len_pages {
                ptr.split(max_len_pages);
            }

            while !ptr.len_pages().is_power_of_two() {
                let log2_smaller_size = ptr.len_pages().trailing_zeros();
                let smaller_size = 1 << log2_smaller_size;
                ptr.split(smaller_size);
            }

            next = ptr.next_mut();
        }
    }
}

impl<'allocator, const PAGE_SIZE: usize> fmt::Debug for FreeListAllocator<'allocator, PAGE_SIZE> {
    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
        let mut next: Option<&_> = self.head.as_ref();
        fmt.debug_list()
            .entries(iter::from_fn(|| match next {
                Some(ptr) => {
                    next = ptr.next();
                    Some(ptr)
                }
                None => None,
            }))
            .finish()
    }
}

impl<'allocator, const PAGE_SIZE: usize> Default for FreeListAllocator<'allocator, PAGE_SIZE> {
    fn default() -> FreeListAllocator<'allocator, PAGE_SIZE> {
        FreeListAllocator::new()
    }
}

/// A pointer to a page range, which start with a header in the first page.
///
/// # Safety
///
/// - `ptr` must be a pointer that conveys ownership over the page range.
/// - The page range must be untyped memory.
/// - The page range must be non-empty.
/// - The first page must have a correct header written to it.
/// - The page range must live for `'allocator`.
struct FreeListPtr<'allocator, const PAGE_SIZE: usize> {
    ptr: NonNull<FreeListNodeHeader<'allocator, PAGE_SIZE>>,
    _phantom: PhantomData<&'allocator FreeListNodeHeader<'allocator, PAGE_SIZE>>,
}

impl<'allocator, const PAGE_SIZE: usize> FreeListPtr<'allocator, PAGE_SIZE> {
    /// Initialize the given page range as a node in the free list.
    ///
    /// # Safety
    ///
    /// - `ptr` must be a pointer that conveys ownership over the page range.
    /// - The page range must be untyped memory.
    /// - The page range must be aligned to `PAGE_SIZE`.
    /// - The page range must live for `'allocator`.
    /// - `len_pages` must be accurate.
    #[requires(len_pages > 0)]
    pub unsafe fn new(
        ptr: NonNull<[u8; PAGE_SIZE]>,
        len_pages: usize,
        next: Option<FreeListPtr<'allocator, PAGE_SIZE>>,
    ) -> FreeListPtr<'allocator, PAGE_SIZE> {
        assert!(size_of::<FreeListNodeHeader<'allocator, PAGE_SIZE>>() <= PAGE_SIZE);

        let ptr: NonNull<FreeListNodeHeader<'allocator, PAGE_SIZE>> = ptr.cast();
        // SAFETY: The safety invariants plus the assertion guarantee this write is valid.
        unsafe {
            ptr.write(FreeListNodeHeader { next, len_pages });
        }
        FreeListPtr {
            ptr,
            _phantom: PhantomData,
        }
    }

    /// Consumes the node, returning the pointer and header.
    pub fn consume(
        self,
    ) -> (
        NonNull<[u8; PAGE_SIZE]>,
        FreeListNodeHeader<'allocator, PAGE_SIZE>,
    ) {
        // SAFETY: The invariants of the type ensure that the header is present and owned by us.
        let header = unsafe { self.ptr.read() };
        (self.ptr.cast(), header)
    }

    /// Returns a reference to the header of the page range.
    fn header(&self) -> &FreeListNodeHeader<'allocator, PAGE_SIZE> {
        // SAFETY: The invariants of the type ensure that the header is present and owned by us.
        unsafe { self.ptr.as_ref() }
    }

    /// Returns an exclusive reference to the header of the page range.
    ///
    /// # Safety
    ///
    /// The invariants of the type must be upheld.
    unsafe fn header_mut(&mut self) -> &mut FreeListNodeHeader<'allocator, PAGE_SIZE> {
        // SAFETY: The invariants of the type ensure that the header is present and owned by us.
        unsafe { self.ptr.as_mut() }
    }

    /// Returns the length of the node in pages.
    pub fn len_pages(&self) -> usize {
        self.header().len_pages
    }

    /// Returns a shared reference to the next node in the free list. Returns `None` when there
    /// are no further nodes.
    pub fn next(&self) -> Option<&FreeListPtr<'allocator, PAGE_SIZE>> {
        self.header().next.as_ref()
    }

    /// Returns an exclusive reference to the next node in the free list. Returns `None` when there
    /// are no further nodes.
    pub fn next_mut(&mut self) -> Option<&mut FreeListPtr<'allocator, PAGE_SIZE>> {
        // SAFETY: We just lend out the next pointer, which can't violate our invariants.
        let header = unsafe { self.header_mut() };
        header.next.as_mut()
    }

    /// Returns the underlying pointer. Using this may violate invariants.
    pub fn ptr(&self) -> NonNull<[u8; PAGE_SIZE]> {
        self.ptr.cast()
    }

    /// Splits the page range in two, with the second having the given length in pages.
    #[requires(split_len < self.len_pages())]
    pub fn split(&mut self, split_len: usize) {
        // Get the old header. After this point, `self.ptr` is considered uninitialized, since
        // we've moved out of it, so we can't panic.
        //
        // SAFETY: The invariants of the type ensure that the header is present and owned by us.
        let old_header = unsafe { self.ptr.read() };

        // We call the two new nodes `a` and `b`.
        let a_len = old_header.len_pages - split_len;
        let b_len = split_len;
        // SAFETY: The invariants of the type ensure this is still in-bounds, since
        // the header must have been valid and `a_len <= old_header.len_pages`.
        let b_ptr: NonNull<[u8; PAGE_SIZE]> = unsafe { self.ptr().add(a_len) };

        // Construct a new node for `b`.
        //
        // SAFETY: The safety invariants are simply inherited from the memory being valid in the
        // first place, except for the `len_pages` invariant. This is upheld because the length
        // math is correct.
        //
        // The assertion panic in `FreeListPtr::new()` must be unreachable, or it would have been
        // triggered when constructing this type in the first place.
        let b = unsafe { FreeListPtr::new(b_ptr, b_len, old_header.next) };

        // Reinitialize the header for `a`. After this point, `self.ptr` is considered initialized
        // again, so we may panic again.
        //
        // SAFETY: The safety invariants are simply inherited from the memory being valid in the
        // first place, except for the `len_pages` invariant. This is upheld because the length
        // math is correct.
        unsafe {
            self.ptr.write(FreeListNodeHeader {
                next: Some(b),
                len_pages: a_len,
            });
        }
    }

    /// Unlinks and returns the _next_ node, returning its pointer and length in pages.
    pub fn unlink_next(&mut self) -> Option<(NonNull<[u8; PAGE_SIZE]>, usize)> {
        // SAFETY: We uphold the invariants.
        let header = unsafe { self.header_mut() };
        let next = header.next.take()?;
        let (next_ptr, next_header) = next.consume();
        header.next = next_header.next;
        Some((next_ptr, next_header.len_pages))
    }
}

impl<'allocator, const PAGE_SIZE: usize> fmt::Debug for FreeListPtr<'allocator, PAGE_SIZE> {
    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
        write!(fmt, "[{:p} × {}]", self.ptr, self.len_pages())
    }
}

#[derive(Debug)]
struct FreeListNodeHeader<'allocator, const PAGE_SIZE: usize> {
    /// The next node in the linked list.
    next: Option<FreeListPtr<'allocator, PAGE_SIZE>>,

    /// The length of the allocation in pages.
    len_pages: usize,
}

/// Returns an iterator over power-of-two-sized subslices of this slice, which are returned from
/// smallest to largest.
pub fn power_of_two_subslices_mut<T>(slice: &mut [T]) -> impl Iterator<Item = &mut [T]> {
    // Decompose the slice to its raw parts. Yep, we're gonna be doing unsafe stuff.
    let mut ptr = slice.as_mut_ptr();
    let mut len = slice.len();

    iter::from_fn(move || {
        if len == 0 {
            None
        } else {
            // Make the slice to return.
            //
            // SAFETY: The pointer and length must be valid, since we got them from the valid
            // slice. Before returning `out`, we advance them, so that we never give out multiple
            // exclusive references to the same subslice.
            let out_len = 1 << len.trailing_zeros();
            let out = unsafe { slice::from_raw_parts_mut(ptr, out_len) };

            // Advance the pointer and length.
            //
            // SAFETY: out_len < len, and we got len from an existing slice.
            ptr = unsafe { ptr.add(out_len) };
            len -= out_len;

            Some(out)
        }
    })
}

#[test]
fn power_of_two_subslices_mut_test() {
    extern crate std;
    use std::vec::Vec;

    let mut data = [0, 1, 2, 3, 4, 5, 6, 7, 8];
    let subslices = power_of_two_subslices_mut(&mut data).collect::<Vec<_>>();

    assert_eq!(subslices, &[&[0] as &[_], &[1, 2, 3, 4, 5, 6, 7, 8]]);
}