diff options
Diffstat (limited to 'crates/device_tree/src')
-rw-r--r-- | crates/device_tree/src/lib.rs | 91 |
1 files changed, 91 insertions, 0 deletions
diff --git a/crates/device_tree/src/lib.rs b/crates/device_tree/src/lib.rs index 531f7ef..fe6108f 100644 --- a/crates/device_tree/src/lib.rs +++ b/crates/device_tree/src/lib.rs @@ -115,6 +115,92 @@ impl<'dt> FlattenedDeviceTree<'dt> { Ok(&out[..i]) } + /// Parses memory reservations out of the flattened DeviceTree, removes reserved ranges (see + /// `iter_reserved`), and calls the function with each range of usable memory. This does not + /// allocate memory, and may be called before the allocator is configured. + pub fn for_each_memory_range<E, const PAGE_SIZE: usize>( + &'dt self, + mut func: impl FnMut(Range<usize>) -> Result<(), E>, + ) -> Result<(), E> { + // Iterate through the nodes, looking for memory nodes. + self.for_each_node(|node| { + if node.is_unit(&["", "memory"]) { + // Get the memory ranges. + let Some(reg) = node.get_reg_usize() else { + warn!("{}reg was not valid", node.name()); + return Ok(()); + }; + + 'process_node: for (addr, size) in reg { + let mut addrs = addr..addr + size; + + // Clamp the range, so that we can't somehow end up with the zero address or + // with higher-half addresses. + addrs.start = addrs.start.max(1); + addrs.end = addrs.end.min(isize::MAX as usize); + + // Trim the range to be page-aligned. + addrs.start = (addrs.start + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1); + addrs.end &= !(PAGE_SIZE - 1); + + // Add each unreserved subrange to the allocator's list. + 'process_range: loop { + assert_eq!(addrs.start & (PAGE_SIZE - 1), 0); + assert_eq!(addrs.end & (PAGE_SIZE - 1), 0); + + // Walk forwards until the first page is unreserved. + 'ensure_first_page_unreserved: loop { + // Ensure the range is non-empty. + if addrs.end <= addrs.start { + continue 'process_node; + } + + // If the start of the range is reserved, walk forwards a page. + let first_page = addrs.start..addrs.start + PAGE_SIZE; + if self + .iter_reserved() + .any(|range| ranges_overlap(&range, &first_page)) + { + addrs.start = first_page.end; + } else { + break 'ensure_first_page_unreserved; + } + } + + // Find the first reservation that overlaps. + if let Some(reservation) = self + .iter_reserved() + .filter(|range| ranges_overlap(range, &addrs)) + .min_by_key(|range| range.start) + { + // Get the range that's before the reservation. + let mut unreserved = addrs.start..reservation.start; + + // Trim the range to be page-aligned. We don't need to trim the start, + // because addrs.start is already aligned (by our loop invariant). + unreserved.end &= !(PAGE_SIZE - 1); + + // Call the function with the range up to the start of that overlap. + func(unreserved)?; + + // Adjust the remaining range to be after the overlap. + addrs.start = reservation.end; + + // Trim the range to be page-aligned. We don't need to trim the end, + // because it's unchanged. + addrs.start = (addrs.start + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1); + } else { + // If no range overlaps, the entire remainder of this range is valid. + func(addrs)?; + break 'process_range; + } + } + } + } + Ok(()) + }) + } + /// Parses nodes out of a flattened DeviceTree and calls the function with each one. This does /// not allocate memory, and may be called before the allocator is configured (at the cost of /// decreased performance). @@ -643,3 +729,8 @@ fn for_each_node<'dt, 'iter, E>( offset = for_each_node(fdt, func, on_error, offset, new_parents)?; } } + +/// Returns whether the two ranges overlap. +fn ranges_overlap<T: Copy + Ord>(r1: &Range<T>, r2: &Range<T>) -> bool { + r1.start.max(r2.start) < r1.end.min(r2.end) +} |