summaryrefslogtreecommitdiff
path: root/crates/kernel/src/alloc.rs
blob: 93d8b15d087d4cc5eade5d2a2523fc6b21e4c644 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
//! Global structures for the allocators.

use crate::paging::{
    BuddyAllocator, MapError, MappingFlags, PageTable, ASID, LOMEM_TOP, MAX_PAGE_SIZE_BITS,
    PAGE_SIZE, PAGE_SIZES,
};
use allocator_api2::alloc::AllocError;
use contracts::requires;
use core::{num::NonZero, ptr::NonNull};
use spin::mutex::FairMutex;

/// The global instance of the physical page allocator.
static BUDDY_ALLOCATOR: FairMutex<Option<BuddyAllocator>> = FairMutex::new(None);

/// The global kernel page table.
static KERNEL_PAGE_TABLE: FairMutex<Option<&'static mut PageTable>> = FairMutex::new(None);

/// Initializes the kernel page table and enables paging.
///
/// # Safety
///
/// - Paging must not have been enabled previously.
/// - The buddy allocator must be valid.
#[requires(BUDDY_ALLOCATOR.lock().is_none())]
#[requires(KERNEL_PAGE_TABLE.lock().is_none())]
#[ensures(BUDDY_ALLOCATOR.lock().is_some())]
#[ensures(KERNEL_PAGE_TABLE.lock().is_some())]
pub unsafe fn init_kernel_page_table(buddy_allocator: BuddyAllocator) {
    // Just making this mut above gets a warning thanks to the contracts macros...
    let mut buddy_allocator = buddy_allocator;

    // Allocate a page to use (for now) as the global kernel page table. Later we'll actually
    // replace it with the hart0 initial stack's page, since we'll never free the root page of the
    // kernel page table, and we can't return that page to the buddy allocator anyway.
    let page_table = buddy_allocator
        .alloc_zeroed::<PageTable>()
        .expect("failed to allocate the kernel page table")
        .as_mut();

    // Create identity mappings for the lower half of memory.
    for page_num in 0..(LOMEM_TOP >> MAX_PAGE_SIZE_BITS) {
        let addr = page_num << MAX_PAGE_SIZE_BITS;
        let flags = MappingFlags::R | MappingFlags::W | MappingFlags::X;
        page_table
            .map(
                &mut buddy_allocator,
                addr,
                addr,
                1 << MAX_PAGE_SIZE_BITS,
                flags,
            )
            .expect("failed to set up identity mapping for low memory in the kernel page table");
    }

    // Set the page table as the current page table.
    PageTable::make_current(NonNull::from(&*page_table), ASID::KERNEL);

    // Print the page table.
    vernos_utils::dbg!(&page_table);

    // Save the buddy allocator and kernel page table.
    *BUDDY_ALLOCATOR.lock() = Some(buddy_allocator);
    *KERNEL_PAGE_TABLE.lock() = Some(page_table);
}

/// Initializes the virtual memory allocator and the regular allocator.
///
/// # Safety
///
/// - `himem_top` must be accurate.
#[requires(BUDDY_ALLOCATOR.lock().is_some())]
#[requires(KERNEL_PAGE_TABLE.lock().is_some())]
#[requires(himem_top & (PAGE_SIZE - 1) == 0)]
pub unsafe fn init_kernel_virtual_memory_allocator(himem_top: usize) {
    todo!()
}

/// Tries to allocate a page of physical memory of the given size, returning its physical address.
#[requires(PAGE_SIZES.contains(&len))]
pub fn alloc_page(len: usize) -> Result<NonZero<usize>, AllocError> {
    let mut buddy_allocator = BUDDY_ALLOCATOR.lock();
    let buddy_allocator = buddy_allocator.as_mut().unwrap();
    buddy_allocator.alloc_of_size(len).map(|addr| {
        // SAFETY: NonNull guarantees the address will be nonzero.
        unsafe { NonZero::new_unchecked(addr.as_ptr() as usize) }
    })
}

/// Log the kernel page table.
pub fn kernel_log_page_table() {
    let kernel_page_table = KERNEL_PAGE_TABLE.lock();
    let kernel_page_table = kernel_page_table.as_ref().unwrap();

    let count = kernel_page_table.debug_mappings().count();
    log::info!(
        "The kernel page table had {count} mapping{}",
        match count {
            0 => "s.",
            1 => ":",
            _ => "s:",
        }
    );
    for mapping in kernel_page_table.debug_mappings() {
        log::info!("{mapping:?}");
    }
}

/// Adds a mapping into the kernel page table.
pub fn kernel_map(
    vaddr: usize,
    paddr: usize,
    len: usize,
    flags: MappingFlags,
) -> Result<(), MapError> {
    let mut buddy_allocator = BUDDY_ALLOCATOR.lock();
    let mut kernel_page_table = KERNEL_PAGE_TABLE.lock();
    let buddy_allocator = buddy_allocator.as_mut().unwrap();
    let kernel_page_table = kernel_page_table.as_mut().unwrap();
    kernel_page_table.map(&mut *buddy_allocator, vaddr, paddr, len, flags)?;
    log::warn!("TODO: sfence.vma");
    log::warn!("TODO: TLB shootdown");
    Ok(())
}