summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--crates/alloc_buddy/src/lib.rs2
-rw-r--r--crates/alloc_physmem_free_list/src/lib.rs10
2 files changed, 11 insertions, 1 deletions
diff --git a/crates/alloc_buddy/src/lib.rs b/crates/alloc_buddy/src/lib.rs
index bbed640..fa14848 100644
--- a/crates/alloc_buddy/src/lib.rs
+++ b/crates/alloc_buddy/src/lib.rs
@@ -323,7 +323,7 @@ impl<
// Ensure that the memory is marked as not being in the free list.
assert_eq!(
- tree.bitset_get(&mut self.bitset, size_class, offset),
+ tree.bitset_get(self.bitset, size_class, offset),
SubregionStatus::NotInFreeList
);
diff --git a/crates/alloc_physmem_free_list/src/lib.rs b/crates/alloc_physmem_free_list/src/lib.rs
index f99b30f..0540672 100644
--- a/crates/alloc_physmem_free_list/src/lib.rs
+++ b/crates/alloc_physmem_free_list/src/lib.rs
@@ -43,6 +43,10 @@ impl<'allocator, const PAGE_SIZE: usize> FreeListAllocator<'allocator, PAGE_SIZE
/// Obviously, this is unsound to call while the previous allocation is alive. It is also
/// unsound to split any nodes after calling this, since splitting the node that was allocated
/// into might write the new node's metadata into that allocation.
+ ///
+ /// # Safety
+ ///
+ /// - Only one allocation returned by this method may be live.
pub unsafe fn allocate_without_always_removing_the_node(
&mut self,
layout: Layout,
@@ -186,6 +190,12 @@ impl<'allocator, const PAGE_SIZE: usize> fmt::Debug for FreeListAllocator<'alloc
}
}
+impl<'allocator, const PAGE_SIZE: usize> Default for FreeListAllocator<'allocator, PAGE_SIZE> {
+ fn default() -> FreeListAllocator<'allocator, PAGE_SIZE> {
+ FreeListAllocator::new()
+ }
+}
+
/// A pointer to a page range, which start with a header in the first page.
///
/// # Safety