summaryrefslogtreecommitdiff
path: root/crates/kernel/src/lib.rs
blob: 7421649b9ad949dbb4d7580a04429c72d13ca70c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
//! The static library that forms the core of the kernel.
#![no_std]

use crate::{
    alloc::{
        alloc_page, init_kernel_page_table, init_kernel_virtual_memory_allocator,
        kernel_log_page_table, kernel_map,
    },
    constants::STACK_SIZE,
    paging::{MappingFlags, PAGE_SIZE, PAGE_SIZE_BITS},
};
use core::ptr::NonNull;
use log::debug;
use vernos_alloc_buddy::BuddyAllocator;
use vernos_alloc_physmem_free_list::FreeListAllocator;
use vernos_device_tree::FlattenedDeviceTree;

#[cfg(target_os = "none")]
mod panic;

pub mod alloc;
pub mod arch;
pub mod constants;
pub mod logger;
pub mod paging;

/// Some addresses passed from the entrypoint to hart0.
#[derive(Debug)]
#[repr(C)]
pub struct EarlyBootAddrs {
    device_tree: *const u8,
    kernel_start: *const [u8; PAGE_SIZE],
    kernel_end: *const [u8; PAGE_SIZE],
    kernel_rx_end: *const [u8; PAGE_SIZE],
    kernel_ro_end: *const [u8; PAGE_SIZE],
    kernel_rw_end: *const [u8; PAGE_SIZE],
    initial_stack_start: *const [u8; PAGE_SIZE],
    stack_end: *const [u8; PAGE_SIZE],
    trampoline_start: *const [u8; PAGE_SIZE],
}

impl EarlyBootAddrs {
    /// Looks for a DeviceTree at address in the `EarlyBootAddrs`, and returns it if it looks
    /// valid. Panics if the DeviceTree is invalid.
    ///
    /// # Safety
    ///
    /// - The `EarlyBootAddrs` must be accurate.
    /// - The `device_tree` pointer must be a valid pointer into physical memory. See
    ///   `device_tree::FlattenedDeviceTree::from_ptr` for the precise requirements.
    unsafe fn flattened_device_tree(&self) -> FlattenedDeviceTree {
        FlattenedDeviceTree::from_ptr(
            self.device_tree,
            self.kernel_start as usize..self.kernel_end as usize,
        )
        .expect("invalid DeviceTree")
    }
}

/// The first stage of booting the kernel. This should be executed by hart0 alone. It runs with
/// paging disabled, and:
///
/// - sets up a physical memory allocator
/// - sets up paging
/// - sets up a virtual memory allocator
/// - sets up an allocator
/// - maps the trampoline page
/// - maps the kernel to higher-half memory
/// - allocates and maps a kernel stack for hart0
///
/// It updates several fields in `early_boot_addrs` to point to the appropriate himem addresses:
///
/// - `kernel_start`
/// - `stack_end`
///
/// # Safety
///
/// - The `device_tree` pointer must be a valid pointer into physical memory. See
///   `device_tree::FlattenedDeviceTree::from_ptr` for the precise requirements.
/// - The `kernel_start`, `kernel_rx_end`, `kernel_ro_end`, `kernel_rw_end`, and `kernel_end`
///   addresses must be accurate and page-aligned.
/// - The `stack_start` and `stack_end` addresses must be accurate and page-aligned.
/// - The `trampoline_start` pointer must be accurate and page-aligned.
/// - This must be called in supervisor mode with paging disabled.
/// - Any other harts must not be running concurrently with us.
#[no_mangle]
pub unsafe extern "C" fn hart0_early_boot(early_boot_addrs: &mut EarlyBootAddrs) {
    // Set up the early-boot logger.
    logger::init_early();
    vernos_utils::dbg!(&*early_boot_addrs);

    // Assert that stuff is aligned properly.
    assert!(early_boot_addrs.kernel_start.is_aligned());
    assert!(early_boot_addrs.kernel_end.is_aligned());
    assert!(early_boot_addrs.kernel_rx_end.is_aligned());
    assert!(early_boot_addrs.kernel_ro_end.is_aligned());
    assert!(early_boot_addrs.kernel_rw_end.is_aligned());
    assert!(early_boot_addrs.initial_stack_start.is_aligned());
    assert!(early_boot_addrs.stack_end.is_aligned());
    assert!(early_boot_addrs.trampoline_start.is_aligned());

    // Parse the DeviceTree.
    let flattened_device_tree = unsafe { early_boot_addrs.flattened_device_tree() };

    // Find the available physical memory areas and initialize the physical memory
    // free-list.
    let mut physical_memory_free_list = FreeListAllocator::<PAGE_SIZE>::new();
    let mut physical_memory_region_count = 0;
    flattened_device_tree
        .for_each_memory_range::<_, PAGE_SIZE>(|addrs| {
            let len_bytes = addrs.end - addrs.start;
            assert!(addrs.start.trailing_zeros() as usize >= PAGE_SIZE_BITS);
            assert!(len_bytes.trailing_zeros() as usize >= PAGE_SIZE_BITS);
            // UNWRAP: for_each_memory_range avoids returning the zero address.
            let addr = NonNull::new(addrs.start as *mut [u8; PAGE_SIZE]).unwrap();
            let len_pages = len_bytes >> PAGE_SIZE_BITS;

            physical_memory_free_list.add(addr, len_pages);
            physical_memory_region_count += 1;
            Ok(())
        })
        .unwrap_or_else(|err| void::unreachable(err));

    // Log the physical memory we found.
    debug!(
        "found {} usable regions of physical memory{}",
        physical_memory_region_count,
        if physical_memory_region_count == 0 {
            ""
        } else {
            ":"
        }
    );
    for (addr, len_pages) in physical_memory_free_list.iter() {
        debug!(
            "{:p}..{:p} ({} bytes)",
            addr.as_ptr(),
            addr.as_ptr().wrapping_add(len_pages),
            len_pages << PAGE_SIZE_BITS,
        )
    }

    // Initialize the buddy allocator.
    let alloc_buddy = BuddyAllocator::new(physical_memory_free_list)
        .expect("failed to configure the buddy allocator");

    // Set up the kernel page table.
    init_kernel_page_table(alloc_buddy);

    // Map the trampoline page.
    let mut vaddr_bump = usize::MAX - PAGE_SIZE + 1;
    kernel_map(
        vaddr_bump,
        early_boot_addrs.trampoline_start as usize,
        PAGE_SIZE,
        MappingFlags::R | MappingFlags::X,
    )
    .expect("failed to map the trampoline page to himem");

    // Skip a page down for a guard page, then map the kernel.
    let total_kernel_pages = early_boot_addrs
        .kernel_rw_end
        .offset_from(early_boot_addrs.kernel_start) as usize;
    vaddr_bump -= PAGE_SIZE * (total_kernel_pages + 1);
    let new_kernel_start = vaddr_bump;
    for i in 0..total_kernel_pages {
        let vaddr = vaddr_bump + (i * PAGE_SIZE);
        let paddr = early_boot_addrs.kernel_start.add(i);
        let flags = if paddr < early_boot_addrs.kernel_rx_end {
            MappingFlags::R | MappingFlags::X
        } else if paddr < early_boot_addrs.kernel_ro_end {
            MappingFlags::R
        } else {
            MappingFlags::R | MappingFlags::W
        };

        kernel_map(vaddr, paddr as usize, PAGE_SIZE, flags)
            .expect("failed to map the kernel to himem");
    }

    // Skip a page down for a guard page, then map the top page of the stack.
    vaddr_bump -= PAGE_SIZE;
    let new_stack_end = vaddr_bump;
    vaddr_bump -= PAGE_SIZE;
    kernel_map(
        vaddr_bump,
        early_boot_addrs.initial_stack_start as usize,
        PAGE_SIZE,
        MappingFlags::R | MappingFlags::W,
    )
    .expect("failed to map the initial stack to himem");

    // Allocate and map more pages for the stack.
    let new_stack_start = new_stack_end - STACK_SIZE;
    vaddr_bump = new_stack_start;
    for i in 0..((STACK_SIZE >> PAGE_SIZE_BITS) - 1) {
        let vaddr = new_stack_start + (i << PAGE_SIZE_BITS);
        let paddr =
            alloc_page(PAGE_SIZE).expect("failed to allocate memory for a hart0 stack page");
        kernel_map(
            vaddr,
            paddr.into(),
            PAGE_SIZE,
            MappingFlags::R | MappingFlags::W,
        )
        .expect("failed to map a hart0 stack page");
    }

    // Skip another page down for a guard page.
    vaddr_bump -= PAGE_SIZE;

    // Set up the kernel virtual memory allocator (and general allocator).
    init_kernel_virtual_memory_allocator(vaddr_bump);

    // Set the fields in `early_boot_addrs` that we promise to.
    early_boot_addrs.kernel_start = new_kernel_start as *const [u8; PAGE_SIZE];
    early_boot_addrs.stack_end = new_stack_end as *const [u8; PAGE_SIZE];

    // Log and return.
    kernel_log_page_table();
}

/// The entrypoint to the kernel, to be run after paging and the allocator have been set up, and
/// the stack has been switched to be in himem. This should be executed by hart0 alone. It performs
/// some early boot tasks, then wakes up any other harts.
///
/// The tasks it performs are:
///
/// - converts the DeviceTree into a global key-value mapping
/// - upgrades the logger to one that can dynamically grow
/// - TODO
///
/// # Safety
///
/// - `hart0_early_boot` must have been called.
/// - This must be called in supervisor mode with traps disabled, but with all traps delegated to
///   supervisor mode.
/// - Any other harts must not be running concurrently with us. TODO: Define their state.
#[no_mangle]
pub unsafe extern "C" fn hart0_boot(early_boot_addrs: &mut EarlyBootAddrs) -> ! {
    // Check that the stack canary was present.
    assert_eq!(
        *(early_boot_addrs.initial_stack_start as *const u64),
        0xdead0bad0defaced
    );

    todo!("hart0_boot");
}