summaryrefslogtreecommitdiff
path: root/crates/device_tree
diff options
context:
space:
mode:
authorNathan Ringo <nathan@remexre.com>2024-09-02 02:07:07 -0500
committerNathan Ringo <nathan@remexre.com>2024-09-02 02:07:07 -0500
commit52e33eee454766940d1987199868f0d892ce34a3 (patch)
tree64515ac26d363dcfe4304b1dd34665502746d82d /crates/device_tree
parent1afb23196b9882f85c8fbcfbfbd5f92a58960e14 (diff)
Set up the physical memory allocators.
Diffstat (limited to 'crates/device_tree')
-rw-r--r--crates/device_tree/src/lib.rs91
1 files changed, 91 insertions, 0 deletions
diff --git a/crates/device_tree/src/lib.rs b/crates/device_tree/src/lib.rs
index 531f7ef..fe6108f 100644
--- a/crates/device_tree/src/lib.rs
+++ b/crates/device_tree/src/lib.rs
@@ -115,6 +115,92 @@ impl<'dt> FlattenedDeviceTree<'dt> {
Ok(&out[..i])
}
+ /// Parses memory reservations out of the flattened DeviceTree, removes reserved ranges (see
+ /// `iter_reserved`), and calls the function with each range of usable memory. This does not
+ /// allocate memory, and may be called before the allocator is configured.
+ pub fn for_each_memory_range<E, const PAGE_SIZE: usize>(
+ &'dt self,
+ mut func: impl FnMut(Range<usize>) -> Result<(), E>,
+ ) -> Result<(), E> {
+ // Iterate through the nodes, looking for memory nodes.
+ self.for_each_node(|node| {
+ if node.is_unit(&["", "memory"]) {
+ // Get the memory ranges.
+ let Some(reg) = node.get_reg_usize() else {
+ warn!("{}reg was not valid", node.name());
+ return Ok(());
+ };
+
+ 'process_node: for (addr, size) in reg {
+ let mut addrs = addr..addr + size;
+
+ // Clamp the range, so that we can't somehow end up with the zero address or
+ // with higher-half addresses.
+ addrs.start = addrs.start.max(1);
+ addrs.end = addrs.end.min(isize::MAX as usize);
+
+ // Trim the range to be page-aligned.
+ addrs.start = (addrs.start + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1);
+ addrs.end &= !(PAGE_SIZE - 1);
+
+ // Add each unreserved subrange to the allocator's list.
+ 'process_range: loop {
+ assert_eq!(addrs.start & (PAGE_SIZE - 1), 0);
+ assert_eq!(addrs.end & (PAGE_SIZE - 1), 0);
+
+ // Walk forwards until the first page is unreserved.
+ 'ensure_first_page_unreserved: loop {
+ // Ensure the range is non-empty.
+ if addrs.end <= addrs.start {
+ continue 'process_node;
+ }
+
+ // If the start of the range is reserved, walk forwards a page.
+ let first_page = addrs.start..addrs.start + PAGE_SIZE;
+ if self
+ .iter_reserved()
+ .any(|range| ranges_overlap(&range, &first_page))
+ {
+ addrs.start = first_page.end;
+ } else {
+ break 'ensure_first_page_unreserved;
+ }
+ }
+
+ // Find the first reservation that overlaps.
+ if let Some(reservation) = self
+ .iter_reserved()
+ .filter(|range| ranges_overlap(range, &addrs))
+ .min_by_key(|range| range.start)
+ {
+ // Get the range that's before the reservation.
+ let mut unreserved = addrs.start..reservation.start;
+
+ // Trim the range to be page-aligned. We don't need to trim the start,
+ // because addrs.start is already aligned (by our loop invariant).
+ unreserved.end &= !(PAGE_SIZE - 1);
+
+ // Call the function with the range up to the start of that overlap.
+ func(unreserved)?;
+
+ // Adjust the remaining range to be after the overlap.
+ addrs.start = reservation.end;
+
+ // Trim the range to be page-aligned. We don't need to trim the end,
+ // because it's unchanged.
+ addrs.start = (addrs.start + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1);
+ } else {
+ // If no range overlaps, the entire remainder of this range is valid.
+ func(addrs)?;
+ break 'process_range;
+ }
+ }
+ }
+ }
+ Ok(())
+ })
+ }
+
/// Parses nodes out of a flattened DeviceTree and calls the function with each one. This does
/// not allocate memory, and may be called before the allocator is configured (at the cost of
/// decreased performance).
@@ -643,3 +729,8 @@ fn for_each_node<'dt, 'iter, E>(
offset = for_each_node(fdt, func, on_error, offset, new_parents)?;
}
}
+
+/// Returns whether the two ranges overlap.
+fn ranges_overlap<T: Copy + Ord>(r1: &Range<T>, r2: &Range<T>) -> bool {
+ r1.start.max(r2.start) < r1.end.min(r2.end)
+}