1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
|
//! Support for the DeviceTree format.
use crate::{collections::stack_linked_list::StackLinkedList, prelude::*, util::FromEndianBytes};
use bstr::BStr;
use contracts::requires;
use core::{
fmt, iter,
num::ParseIntError,
ops::Range,
slice,
str::{self, Utf8Error},
};
use either::Either;
/// A reference to a flattened DeviceTree (DTB) in memory.
pub struct FlattenedDeviceTree<'dt> {
header: &'dt FdtHeader,
struct_block: &'dt [u32],
strings_block: &'dt BStr,
memrsv_block: &'dt [FdtMemRsv],
}
impl<'dt> FlattenedDeviceTree<'dt> {
/// Looks for a DeviceTree at the given address, and returns it if it looks valid.
///
/// # Safety
///
/// - `ptr` must point to a spec-compliant flattened DeviceTree.
/// - The memory contained by the flattened DeviceTree must be valid for the duration of
/// lifetime `'dt`.
/// - The memory contained by the flattened DeviceTree must not be mutated for the duration of
/// lifetime `'dt`.
#[requires((ptr as *const FdtHeader).is_aligned())]
pub unsafe fn from_ptr(ptr: *const u8) -> Result<FlattenedDeviceTree<'dt>, DeviceTreeError> {
// Check that the header appears to point to a valid flattened DeviceTree.
let header: &'dt FdtHeader = &*(ptr as *const FdtHeader);
let magic = u32::from_be(header.magic);
if magic != 0xd00dfeed {
return Err(DeviceTreeError::BadMagic(magic));
}
let version = u32::from_be(header.version);
let last_comp_version = u32::from_be(header.last_comp_version);
if last_comp_version > 17 {
return Err(DeviceTreeError::IncompatibleVersion(
version,
last_comp_version,
));
}
// Get pointers to each block.
let off_dt_struct = u32::from_be(header.off_dt_struct) as usize;
let size_dt_struct = u32::from_be(header.size_dt_struct) as usize;
let off_dt_strings = u32::from_be(header.off_dt_strings) as usize;
let size_dt_strings = u32::from_be(header.size_dt_strings) as usize;
let off_mem_rsvmap = u32::from_be(header.off_mem_rsvmap) as usize;
// Check that the structure block has an aligned size.
if (size_dt_struct & 0b11) != 0 {
return Err(DeviceTreeError::InvalidDeviceTree);
}
// Extract the structure and strings blocks.
let struct_block: &[u32] =
slice::from_raw_parts(ptr.add(off_dt_struct).cast(), size_dt_struct / 4);
let strings_block = BStr::new(slice::from_raw_parts(
ptr.add(off_dt_strings),
size_dt_strings,
));
// Read memory reservations until the terminating one is found, then construct the block of
// the appropriate length.
let mut memrsv_count = 0;
let memrsv_ptr: *const FdtMemRsv = ptr.add(off_mem_rsvmap).cast();
let memrsv_block = loop {
let memrsv = *memrsv_ptr.add(memrsv_count);
// We can skip the endian conversion, since we're just testing against zero.
if memrsv == (FdtMemRsv { addr: 0, size: 0 }) {
break slice::from_raw_parts(memrsv_ptr, memrsv_count);
}
memrsv_count += 1;
};
// Try parsing all of the events in the structure block, so we can report errors from doing
// so before anything outside this module can get their hands on them.
let fdt = FlattenedDeviceTree {
header,
struct_block,
strings_block,
memrsv_block,
};
fdt.iter_struct_events_from(0)
.try_for_each(|r| r.map(|_| ()))?;
// Check that the overall structure of the structure block is correct, so we don't need to
// worry about errors later.
for_each_node(&fdt, &mut |_| Ok(()), &|err| err, 0, StackLinkedList::NIL)?;
Ok(fdt)
}
/// Returns the string at the given offset of the strings block.
fn get_string(&self, offset: u32) -> Result<&BStr, DeviceTreeError> {
let out = &self.strings_block[offset as usize..];
let i = out
.iter()
.position(|&b| b == b'\0')
.ok_or(DeviceTreeError::StringMissingNulTerminator(offset))?;
Ok(&out[..i])
}
/// Parses nodes out of a flattened DeviceTree and calls the function with each one. This does
/// not allocate memory, and may be called before the allocator is configured (at the cost of
/// decreased performance).
pub fn for_each_node<E>(
&'dt self,
mut func: impl for<'a> FnMut(FdtNode<'dt, 'a>) -> Result<(), E>,
) -> Result<(), E> {
for_each_node(
self,
&mut func,
&|_| unreachable!("checked in FlattenedDeviceTree::from_ptr"),
0,
StackLinkedList::NIL,
)?;
Ok(())
}
/// Returns an iterator over the reserved ranges of addresses.
///
/// Addresses may be reserved by:
///
/// - Overlapping with the DeviceTree itself.
/// - Overlapping with any memory reservations in the DeviceTree.
pub fn iter_reserved(&self) -> impl '_ + Iterator<Item = Range<usize>> {
// Make the address range of the DeviceTree.
let dt_start = self.header as *const FdtHeader as usize;
let dt_end = dt_start + u32::from_be(self.header.totalsize) as usize;
iter::once(dt_start..dt_end).chain(self.memrsv_block.iter().map(|memrsv| {
let addr = u64::from_be(memrsv.addr) as usize;
let size = u64::from_be(memrsv.size) as usize;
addr..addr + size
}))
}
/// Returns an iterator over the events in the structure block of the DeviceTree, starting at
/// the current offset from the start of the structure block.
///
/// Panics if the offset is out-of-bounds.
fn iter_struct_events_from<'iter: 'dt>(
&'iter self,
start: u32,
) -> impl 'iter + Iterator<Item = Result<FdtStructEvent<'dt>, DeviceTreeError>> {
let mut i = start;
iter::from_fn(move || match self.next_struct_event_from(i) {
Some(Ok((next, event))) => {
let prev = i;
assert!(prev < next);
i = next;
Some(Ok(event))
}
Some(Err(err)) => Some(Err(err)),
None => None,
})
}
/// Parses a single point in the structure block of the DeviceTree, returning the offset to
/// parse at next and the parse event.
///
/// Panics if the offset is out-of-bounds.
fn next_struct_event_from<'iter: 'dt>(
&'iter self,
mut offset: u32,
) -> Option<Result<(u32, FdtStructEvent<'dt>), DeviceTreeError>> {
loop {
// Read the token type and advance the offset.
let token_type = u32::from_be(*self.struct_block.get(offset as usize)?);
offset += 1;
// Branch on the token type to recognize an event.
let event = match token_type {
// FDT_BEGIN_NODE
0x00000001 => {
// Save a pointer to the start of the extra data.
let name_ptr = (&self.struct_block[offset as usize]) as *const u32 as *const u8;
// Look for a null terminator. `name_len` is the length of the name, _without_
// the null terminator.
//
// SAFETY: This method can only be called when the FlattenedDeviceTree was
// constructed from a valid flattened DeviceTree.
let mut name_len = 0;
while unsafe { *name_ptr.add(name_len) } != b'\0' {
name_len += 1;
}
// Create the name as a BStr.
//
// SAFETY: Well, we already accessed this memory above when finding the null
// terminator... But yes, this being valid is guaranteed by this being a
// flattened DeviceTree.
let name = BStr::new(unsafe { slice::from_raw_parts(name_ptr, name_len) });
// Parse the name.
let Ok(name) = FdtNodeName::parse(name) else {
return Some(Err(DeviceTreeError::InvalidNodeName));
};
// Compute the count and advance the offset.
offset += (name_len as u32 + 4) >> 2;
// Create the event.
FdtStructEvent::BeginNode(name)
}
// FDT_END_NODE
0x00000002 => FdtStructEvent::EndNode,
// FDT_PROP
0x00000003 => {
// Get the length of the property data and the offset of the name in the
// strings block.
let data_len = u32::from_be(self.struct_block[offset as usize]) as usize;
let name_off = u32::from_be(self.struct_block[offset as usize + 1]);
offset += 2;
// Get the property data as a BStr.
//
// SAFETY: This is valid by the requirements of a flattened DeviceTree.
//
// TODO: We could refactor this out to a to_bytes call plus a bounds-checked
// slicing operation to get a _bit_ of safety back, at least...
let data = BStr::new(unsafe {
slice::from_raw_parts(
&self.struct_block[offset as usize] as *const u32 as *const u8,
data_len,
)
});
// Advance past the property data.
offset += (data_len as u32 + 3) >> 2;
// Get the property name as a BStr.
let name = match self.get_string(name_off) {
Ok(name) => name,
Err(err) => return Some(Err(err)),
};
// Create the event.
FdtStructEvent::Prop(name, data)
}
// FDT_NOP
0x00000004 => continue,
// FDT_END
0x00000009 => return None,
_ => return Some(Err(DeviceTreeError::InvalidTokenType(token_type))),
};
// Return the new offset and the event.
return Some(Ok((offset, event)));
}
}
}
impl<'dt> fmt::Debug for FlattenedDeviceTree<'dt> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let addr = self.header as *const FdtHeader;
// Intentionally not using `fmt.debug_tuple`, it's wasted space here.
write!(fmt, "FlattenedDeviceTree::from_ptr({addr:p})")
}
}
/// An error encountered when reading a DeviceTree.
#[derive(Debug)]
pub enum DeviceTreeError {
BadMagic(u32),
IncompatibleVersion(u32, u32),
InvalidDeviceTree,
InvalidNodeName,
InvalidTokenType(u32),
StringMissingNulTerminator(u32),
UnexpectedEndOfStructBlock,
UnexpectedEvent,
}
/// The flattened DeviceTree header.
///
/// All fields are big-endian.
#[derive(Debug)]
#[repr(C)]
struct FdtHeader {
magic: u32,
totalsize: u32,
off_dt_struct: u32,
off_dt_strings: u32,
off_mem_rsvmap: u32,
version: u32,
last_comp_version: u32,
boot_cpuid_phys: u32,
size_dt_strings: u32,
size_dt_struct: u32,
}
/// A memory reservation from the appropriate block of the DeviceTree.
///
/// All fields are big-endian.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
#[repr(C)]
struct FdtMemRsv {
addr: u64,
size: u64,
}
/// An event returned from iterating over the structure block of a flattened DeviceTree.
#[derive(Clone, Copy, Debug)]
enum FdtStructEvent<'dt> {
BeginNode(FdtNodeName<'dt>),
EndNode,
Prop(&'dt BStr, &'dt BStr),
}
/// The name of a node.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct FdtNodeName<'dt> {
/// The full name, including the unit address.
pub full_name: &'dt BStr,
// The length of the unit name (i.e., the part of the name before the unit address).
unit_name_len: usize,
/// The address of the node.
pub unit_address: Option<u64>,
}
impl<'dt> FdtNodeName<'dt> {
/// Returns the unit name; i.e., the part of the name that does not include the unit address.
pub fn unit_name(&self) -> &'dt BStr {
&self.full_name[..self.unit_name_len]
}
/// Parses an `FdtNodeName` from bytes.
pub fn parse(bytes: &'dt BStr) -> Result<FdtNodeName<'dt>, Either<ParseIntError, Utf8Error>> {
if let Some(at_sign_index) = bytes.iter().position(|&b| b == b'@') {
let unit_address =
str::from_utf8(&bytes[at_sign_index + 1..]).map_err(Either::Right)?;
let unit_address = u64::from_str_radix(unit_address, 16).map_err(Either::Left)?;
Ok(FdtNodeName {
full_name: bytes,
unit_name_len: at_sign_index,
unit_address: Some(unit_address),
})
} else {
Ok(FdtNodeName {
full_name: bytes,
unit_name_len: bytes.len(),
unit_address: None,
})
}
}
}
impl<'dt> fmt::Display for FdtNodeName<'dt> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", self.full_name)
}
}
/// A reference to a node in a flattened DeviceTree.
#[derive(Clone, Copy)]
pub struct FdtNode<'dt, 'iter> {
fdt: &'dt FlattenedDeviceTree<'dt>,
index: u32,
parents: StackLinkedList<'iter, u32>,
}
impl<'dt, 'iter> FdtNode<'dt, 'iter> {
/// A constructor that checks the type's invariants.
#[requires(matches!(
fdt.next_struct_event_from(index),
Some(Ok((_, FdtStructEvent::BeginNode(_)))))
)]
#[requires(parents.into_iter().all(|parent| {
matches!(
fdt.next_struct_event_from(parent),
Some(Ok((_, FdtStructEvent::BeginNode(_))))
)
}))]
fn new(
fdt: &'dt FlattenedDeviceTree<'dt>,
index: u32,
parents: StackLinkedList<'iter, u32>,
) -> FdtNode<'dt, 'iter> {
FdtNode {
fdt,
index,
parents,
}
}
/// Returns the value corresponding to a prop name for this node.
pub fn get_prop<U>(&self, name: U) -> Option<&'dt BStr>
where
BStr: PartialEq<U>,
{
self.iter_props().find(|&(k, _)| k == &name).map(|(_, v)| v)
}
/// Returns the value corresponding to a prop name for this node as a big-endian `u32`.
pub fn get_prop_u32<U>(&self, name: U) -> Option<u32>
where
BStr: PartialEq<U>,
U: Copy + fmt::Display,
{
let value = self.get_prop(name)?;
if value.len() != 4 {
warn!(
"{}{} was expected to be 4 bytes, but was {:?}",
self.name(),
name,
value
);
return None;
}
let value = value.first_chunk()?;
Some(u32::from_be_bytes(*value))
}
/// Returns the `reg` property of this node as an iterator over `(addr, size)` pairs.
pub fn get_reg(&self) -> Option<impl Iterator<Item = (&'dt BStr, &'dt BStr)>> {
// Get the parent node.
let Some(parent) = self.parent() else {
warn!("{} did not have a parent", self.name());
return None;
};
// Get the size of addresses and sizes from the parent.
let Some(address_cells) = parent.get_prop_u32("#address-cells") else {
warn!(
"{} did not have a valid #address-cells property",
parent.name()
);
return None;
};
let Some(size_cells) = parent.get_prop_u32("#size-cells") else {
warn!(
"{} did not have a valid #size-cells property",
parent.name()
);
return None;
};
// Convert the numbers to bytes.
let address_size = (address_cells << 2) as usize;
let size_size = (size_cells << 2) as usize;
// Get the `reg` property's value.
let value = self.get_prop("reg")?;
if value.len() % (address_size + size_size) != 0 {
warn!(
"{}reg was expected to be a multiple of ({} + {}) bytes, but was {:?}",
self.name(),
address_size,
size_size,
value,
);
return None;
}
Some(
(0..value.len())
.step_by(address_size + size_size)
.map(move |i| {
(
&value[i..i + address_size],
&value[i + address_size..i + address_size + size_size],
)
}),
)
}
/// Returns the `reg` property of this node as an iterator over `(addr, size)` pairs, requiring
/// that they're correctly-sized for `usize`s.
pub fn get_reg_usize(&self) -> Option<impl 'dt + Iterator<Item = (usize, usize)>> {
let iter = self.get_reg()?.map(|(addr, size)| {
(
usize::from_big_endian_bytes(addr),
usize::from_big_endian_bytes(size),
)
});
Some(iter)
}
/// Returns whether the unit path (i.e., the path to this node, only taking unit names) matches
/// the argument.
pub fn is_unit<U>(&self, name: &[U]) -> bool
where
BStr: PartialEq<U>,
{
self.iter_names_rev()
.map(|name| name.unit_name())
.eq(name.iter().rev())
}
/// Returns an iterator over the properties of the node.
pub fn iter_props(&self) -> impl '_ + Iterator<Item = (&'dt BStr, &'dt BStr)> {
// Skip the BeginNode.
let offset = match self.fdt.next_struct_event_from(self.index) {
Some(Ok((offset, FdtStructEvent::BeginNode(_)))) => offset,
_ => unreachable!("checked in FlattenedDeviceTree::from_ptr"),
};
// Yield Prop nodes as long as we can get them.
self.fdt
.iter_struct_events_from(offset)
.map_while(|r| match r {
Ok(FdtStructEvent::Prop(key, value)) => Some((key, value)),
Ok(FdtStructEvent::BeginNode(_) | FdtStructEvent::EndNode) => None,
Err(_) => unreachable!("checked in FlattenedDeviceTree::from_ptr"),
})
}
/// Returns a value that can be `Display`ed as the fully-qualified name of the node.
pub fn name(&self) -> impl '_ + fmt::Display {
struct NameDisplay<'a, 'dt, 'iter>(&'a FdtNode<'dt, 'iter>);
impl<'a, 'dt, 'iter> fmt::Display for NameDisplay<'a, 'dt, 'iter> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fn fmt_name_rev<'dt>(
fmt: &mut fmt::Formatter,
mut iter: impl Iterator<Item = FdtNodeName<'dt>>,
) -> fmt::Result {
match iter.next() {
Some(name) => {
fmt_name_rev(fmt, iter)?;
write!(fmt, "{name}/")
}
None => Ok(()),
}
}
fmt_name_rev(fmt, self.0.iter_names_rev())
}
}
NameDisplay(self)
}
/// Returns the parent of this node.
pub fn parent(&self) -> Option<FdtNode<'dt, 'iter>> {
let (&hd, &tl) = self.parents.uncons()?;
Some(FdtNode::new(self.fdt, hd, tl))
}
/// Returns an iterator over the names of the node and its parents, in reverse order.
fn iter_names_rev(&self) -> impl '_ + Clone + Iterator<Item = FdtNodeName<'dt>> {
self.parents.cons(self.index).into_iter().map(move |i| {
match self.fdt.next_struct_event_from(i) {
Some(Ok((_, FdtStructEvent::BeginNode(name)))) => name,
_ => unreachable!("checked in FlattenedDeviceTree::from_ptr"),
}
})
}
}
impl<'dt, 'iter> fmt::Debug for FdtNode<'dt, 'iter> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{} ", self.name())?;
fmt.debug_map().entries(self.iter_props()).finish()
}
}
/// Parses nodes out of a flattened DeviceTree and calls the function with each one. This does not
/// allocate memory, and may be called before the allocator is configured (at the cost of decreased
/// performance). Returns the offset after parsing the one starting at `index`.
///
/// - `'dt` is the lifetime of the DeviceTree.
/// - `'a` is the lifetime of this function call.
/// - `'b` is the lifetime of the recursive call to this function that parses its children.
///
/// - `E` is the type of errors. We do a pass in `FlattenedDeviceTree::from_ptr` with this as
/// `DeviceTreeError` (and a `func` that's a no-op) to find any errors we might encounter. In
/// actual user invocations of this function, it's replaced with the actual user error type (and
/// `on_error` panics).
///
/// - `fdt` is flattened DeviceTree we're parsing inside.
/// - `func` is the callback to call with nodes.
/// - `on_error` is a function to call to convert a `DeviceTreeError` to an `E`.
/// - `index` is the index of the word in the structure block to start parsing at.
/// - `parents` is an accumulated list of the indices of nodes that are parents of this one.
fn for_each_node<'dt, 'iter, E>(
fdt: &'dt FlattenedDeviceTree<'dt>,
func: &mut impl for<'a> FnMut(FdtNode<'dt, 'a>) -> Result<(), E>,
on_error: &impl Fn(DeviceTreeError) -> E,
index: u32,
parents: StackLinkedList<'iter, u32>,
) -> Result<u32, E> {
// Read the first event, which should be the BeginNode starting the node we're trying to read.
// We need to ensure that `index` refers to a `BeginNode` so that methods on `FdtNode` (e.g.
// `iter_names_rev`) can rely on this. We also take as an inductive invariant that every index
// in `parents` refers to a valid `BeginNode`.
let (mut offset, event) = fdt
.next_struct_event_from(index)
.ok_or_else(|| on_error(DeviceTreeError::UnexpectedEndOfStructBlock))?
.unwrap_or_else(|_| unreachable!("checked in FlattenedDeviceTree::from_ptr"));
if !matches!(event, FdtStructEvent::BeginNode(_)) {
dbg!((event, index, parents));
return Err(on_error(DeviceTreeError::UnexpectedEvent));
}
// Create the node and call the callback with it.
func(FdtNode::new(fdt, index, parents))?;
// Create a new parents list that includes this node.
let new_parents = parents.cons(index);
// Advance past any prop events.
loop {
let (next_offset, event) = fdt
.next_struct_event_from(offset)
.ok_or_else(|| on_error(DeviceTreeError::UnexpectedEndOfStructBlock))?
.unwrap_or_else(|_| unreachable!("checked in FlattenedDeviceTree::from_ptr"));
if !matches!(event, FdtStructEvent::Prop(_, _)) {
break;
}
offset = next_offset;
}
// Until we find an end node, parse child nodes.
loop {
let (next_offset, event) = fdt
.next_struct_event_from(offset)
.ok_or_else(|| on_error(DeviceTreeError::UnexpectedEndOfStructBlock))?
.unwrap_or_else(|_| unreachable!("checked in FlattenedDeviceTree::from_ptr"));
if matches!(event, FdtStructEvent::EndNode) {
break Ok(next_offset);
}
offset = for_each_node(fdt, func, on_error, offset, new_parents)?;
}
}
|