From 1d61f97abb31224ddc8e50c452fddcb0d1ef9d68 Mon Sep 17 00:00:00 2001 From: MCorange Date: Wed, 12 Jun 2024 23:37:24 +0300 Subject: [PATCH] Actual heap allocation!!!!!!!!!!!! --- Cargo.lock | 18 ++ boot/boot.s | 4 + kernel/Cargo.toml | 3 + kernel/src/gdt/mod.rs | 2 +- kernel/src/interrupts/mod.rs | 2 +- kernel/src/lib.rs | 38 +++-- kernel/src/mem/area_frame_alloc.rs | 4 +- kernel/src/mem/heap/mod.rs | 46 +++++ kernel/src/mem/mod.rs | 80 ++++++++- kernel/src/mem/paging/entry.rs | 76 +++++++++ kernel/src/mem/paging/mapper.rs | 141 ++++++++++++++++ kernel/src/mem/paging/mod.rs | 263 +++++++++++++++++++++++++++++ kernel/src/mem/paging/tables.rs | 95 +++++++++++ kernel/src/mem/paging/temporary.rs | 81 +++++++++ kernel/src/utils.rs | 18 +- linker.ld | 51 +++++- 16 files changed, 889 insertions(+), 33 deletions(-) create mode 100644 kernel/src/mem/heap/mod.rs create mode 100644 kernel/src/mem/paging/entry.rs create mode 100644 kernel/src/mem/paging/mapper.rs create mode 100644 kernel/src/mem/paging/mod.rs create mode 100644 kernel/src/mem/paging/tables.rs create mode 100644 kernel/src/mem/paging/temporary.rs diff --git a/Cargo.lock b/Cargo.lock index eb3ac1c..157aaf9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -41,12 +41,15 @@ dependencies = [ name = "kernel" version = "0.1.0" dependencies = [ + "bitflags 2.5.0", "lazy_static", "log", "multiboot2", + "once", "pc-keyboard", "pic8259", "spin 0.9.8", + "talc", "uart_16550", "x86", "x86_64", @@ -90,6 +93,12 @@ dependencies = [ "uefi-raw", ] +[[package]] +name = "once" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60bfe75a40f755f162b794140436c57845cb106fd1467598631c76c6fff08e28" + [[package]] name = "pc-keyboard" version = "0.7.0" @@ -190,6 +199,15 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "talc" +version = "4.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04be12ec299aadd63a0bf781d893e4b6139d33cdca6dcd6f6be31f849cedcac8" +dependencies = [ + "lock_api", +] + [[package]] name = "uart_16550" version = "0.3.0" diff --git a/boot/boot.s b/boot/boot.s index 9034282..dc3291b 100644 --- a/boot/boot.s +++ b/boot/boot.s @@ -94,6 +94,10 @@ check_long_mode: jmp error set_up_page_tables: + mov eax, p4_table + or eax, 0b11 ; present + writable + mov [p4_table + 511 * 8], eax + ; map first P4 entry to P3 table mov eax, p3_table or eax, 0b11 ; present + writable diff --git a/kernel/Cargo.toml b/kernel/Cargo.toml index 354aebe..6e8c03a 100644 --- a/kernel/Cargo.toml +++ b/kernel/Cargo.toml @@ -11,12 +11,15 @@ crate-type=["staticlib"] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +bitflags = "2.5.0" lazy_static = { version = "1.4.0", features = ["spin_no_std"] } log = "0.4.21" multiboot2 = "0.20.2" +once = "0.3.4" pc-keyboard = "0.7.0" pic8259 = "0.11.0" spin = "0.9.8" +talc = "4.4.1" uart_16550 = "0.3.0" x86 = "0.52.0" x86_64 = "0.15.1" diff --git a/kernel/src/gdt/mod.rs b/kernel/src/gdt/mod.rs index c8d1fbe..df6d187 100644 --- a/kernel/src/gdt/mod.rs +++ b/kernel/src/gdt/mod.rs @@ -38,7 +38,7 @@ struct Selectors { } pub fn init() { - log::debug!("GDT init"); + log::info!("GDT init"); use x86_64::instructions::tables::load_tss; use x86_64::instructions::segmentation::{CS, Segment}; diff --git a/kernel/src/interrupts/mod.rs b/kernel/src/interrupts/mod.rs index d78518d..9dcf1e7 100644 --- a/kernel/src/interrupts/mod.rs +++ b/kernel/src/interrupts/mod.rs @@ -40,7 +40,7 @@ pub enum InterruptIndex { } pub fn init_idt() { - log::debug!("IDT init"); + log::info!("IDT init"); IDT.load(); unsafe { PICS.lock().initialize() }; x86_64::instructions::interrupts::enable(); diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index 1ee2624..d22310b 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -2,10 +2,13 @@ #![no_main] #![feature(abi_x86_interrupt)] #![feature(associated_type_defaults)] +#![allow(internal_features)] +#![feature(ptr_internals)] +#![feature(allocator_api)] -use events::EVENTMAN; +use alloc::vec::Vec; -use crate::events::Event; +extern crate alloc; #[macro_use] mod logger; @@ -21,23 +24,38 @@ mod mem; #[no_mangle] extern "C" fn kmain(mb2_info_addr: usize) -> ! { + // Init logger::init(log::LevelFilter::Trace).unwrap(); gdt::init(); interrupts::init_idt(); - let mem_info = mem::MemInfo::load(mb2_info_addr); - let mut _alloc = mem::area_frame_alloc::AreaFrameAllocator::new(mem_info); + utils::enable_nxe_bit(); + utils::enable_write_protect_bit(); - EVENTMAN.lock().add_listener(events::Event::Ps2KeyPress(None), |k| { - let Event::Ps2KeyPress(v) = k else {panic!()}; + mem::init(mb2_info_addr); - log::debug!("Keypress event received! ({:?})", v.unwrap()); - Ok(()) - }); + let mut a = Vec::new(); + + a.push(6); + a.push(9); + a.push(4); + a.push(2); + a.push(0); + + log::error!("{:?}", a); + + + + // EVENTMAN.lock().add_listener(events::Event::Ps2KeyPress(None), |k| { + // let Event::Ps2KeyPress(v) = k else {panic!()}; + // + // log::debug!("Keypress event received! ({:?})", v.unwrap()); + // Ok(()) + // }); // let (level_4_page_table, _) = Cr3::read(); // println!("Level 4 page table at: {:?}", level_4_page_table.start_address()); - + log::info!("end of work"); utils::hcf(); } diff --git a/kernel/src/mem/area_frame_alloc.rs b/kernel/src/mem/area_frame_alloc.rs index 6ae52ec..3d15cc0 100644 --- a/kernel/src/mem/area_frame_alloc.rs +++ b/kernel/src/mem/area_frame_alloc.rs @@ -68,11 +68,11 @@ impl<'a> AreaFrameAllocator<'a> { } } } - pub fn new(mi: MemInfo) -> AreaFrameAllocator<'a> { + pub fn new(mi: &MemInfo) -> AreaFrameAllocator<'a> { let mut allocator = AreaFrameAllocator { next_free_frame: Frame::containing_address(0), current_area: None, - areas: mi.mem_area_iter, + areas: mi.mem_area_iter.clone(), kernel_start: Frame::containing_address(mi.kernel_start), kernel_end: Frame::containing_address(mi.kernel_end), multiboot_start: Frame::containing_address(mi.mb_start), diff --git a/kernel/src/mem/heap/mod.rs b/kernel/src/mem/heap/mod.rs new file mode 100644 index 0000000..a1442db --- /dev/null +++ b/kernel/src/mem/heap/mod.rs @@ -0,0 +1,46 @@ +use core::{alloc::{AllocError, GlobalAlloc, Layout}, sync::atomic::{AtomicUsize, Ordering}}; + +use x86_64::align_up; + +pub const HEAP_START: usize = 0o_000_001_000_000_0000; +pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB + +/// A simple allocator that allocates memory linearly and ignores freed memory. +#[derive(Debug)] +pub struct BumpAllocator { + heap_start: usize, + heap_end: usize, + next: AtomicUsize, +} + +impl BumpAllocator { + pub const fn new(heap_start: usize, heap_end: usize) -> Self { + Self { heap_start, heap_end, next: AtomicUsize::new(heap_start) } + } +} + +unsafe impl GlobalAlloc for BumpAllocator { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + loop { + // load current state of the `next` field + let current_next = self.next.load(Ordering::Relaxed); + let alloc_start = align_up(current_next as u64, layout.align() as u64); + let alloc_end = alloc_start.saturating_add(layout.size() as u64); + + if alloc_end <= self.heap_end as u64 { + // update the `next` pointer if it still has the value `current_next` + let next_now = self.next.compare_exchange(current_next, alloc_end as usize, Ordering::Relaxed, Ordering::Relaxed).unwrap(); + if next_now == current_next { + // next address was successfully updated, allocation succeeded + return alloc_start as *mut u8; + } + } else { + panic!("OUT OF MEMORY"); + } + } + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + // do nothing, leak memory + } +} diff --git a/kernel/src/mem/mod.rs b/kernel/src/mem/mod.rs index 670bf3e..e2188a7 100644 --- a/kernel/src/mem/mod.rs +++ b/kernel/src/mem/mod.rs @@ -1,11 +1,23 @@ use core::alloc::{GlobalAlloc, Layout}; -use multiboot2::{BootInformationHeader, MemoryArea, MemoryAreaType, MemoryMapTag, TagTrait}; +use multiboot2::{BootInformation, BootInformationHeader, MemoryArea, MemoryAreaType, MemoryMapTag, TagTrait}; +use talc::{ErrOnOom, Span, Talc, Talck}; +use crate::mem::paging::entry::EntryFlags; +use self::heap::{HEAP_SIZE, HEAP_START}; +pub use self::paging::remap_the_kernel; + +pub mod heap; pub mod area_frame_alloc; +pub mod paging; + +//#[global_allocator] +//static GLOBAL_ALLOCATOR: heap::BumpAllocator = heap::BumpAllocator::new(HEAP_START, HEAP_START + HEAP_SIZE); #[global_allocator] -static GLOBAL_ALLOCATOR: DummyAlloc = DummyAlloc; +static GLOBAL_ALLOCATOR: Talck, ErrOnOom> = Talc::new(ErrOnOom).lock(); + + pub struct DummyAlloc; unsafe impl GlobalAlloc for DummyAlloc { unsafe fn alloc(&self, _: Layout) -> *mut u8 { 0 as *mut u8 } @@ -24,8 +36,40 @@ impl Frame { fn containing_address(address: usize) -> Frame { Frame{ number: address / PAGE_SIZE } } + fn start_address(&self) -> paging::PhysicalAddress { + self.number * PAGE_SIZE + } + fn clone(&self) -> Frame { + Frame { number: self.number } + } + fn range_inclusive(start: Frame, end: Frame) -> FrameIter { + FrameIter { + start, + end, + } + } } +struct FrameIter { + start: Frame, + end: Frame, +} + +impl Iterator for FrameIter { + type Item = Frame; + + fn next(&mut self) -> Option { + if self.start <= self.end { + let frame = self.start.clone(); + self.start.number += 1; + Some(frame) + } else { + None + } + } + } + + #[allow(dead_code)] pub trait FrameAllocator { fn allocate_frame(&mut self) -> Option; @@ -70,8 +114,9 @@ impl MemoryAreaIter { } -#[derive(Debug, Clone)] -pub struct MemInfo{ +#[derive(Debug)] +pub struct MemInfo<'a>{ + boot_info: BootInformation<'a>, mem_area_iter: MemoryAreaIter, pub kernel_start: usize, pub kernel_end: usize, @@ -79,7 +124,7 @@ pub struct MemInfo{ pub mb_end: usize, } -impl MemInfo { +impl MemInfo<'_> { pub fn load(mb_ptr: usize) -> Self { let boot_info = unsafe { multiboot2::BootInformation::load(mb_ptr as *const BootInformationHeader).unwrap() @@ -96,8 +141,9 @@ impl MemInfo { // log::debug!("{}: start: 0x{:x}, sz: 0x{:x}, flags: 0b{:b}", section.name().unwrap_or("NONE"), // section.start_address(), section.size(), section.flags()); // } - let kernel_start = boot_info.elf_sections().unwrap().clone().map(|s| s.start_address()).min().unwrap() as usize; - let kernel_end = boot_info.elf_sections().unwrap().clone().map(|s| s.end_address()).max().unwrap() as usize; + let elf_secs = boot_info.elf_sections().unwrap(); + let kernel_start = elf_secs.clone().filter(|s| s.is_allocated()).map(|s| s.start_address()).min().unwrap() as usize; + let kernel_end = elf_secs.clone().filter(|s| s.is_allocated()).map(|s| s.end_address()).min().unwrap() as usize; let mb_start = boot_info.start_address(); let mb_end = boot_info.end_address(); @@ -105,6 +151,7 @@ impl MemInfo { //log::debug!("Multiboot: start: 0x{:x} sz: 0x{:x}", mi.mb_start, mi.mb_end - mi.mb_start); Self { + boot_info:unsafe { multiboot2::BootInformation::load(mb_ptr as *const BootInformationHeader).unwrap()}, mem_area_iter: MemoryAreaIter::new(mmap_tag), kernel_start, kernel_end, @@ -114,3 +161,22 @@ impl MemInfo { } } +pub fn init(mb_ptr: usize) { + once::assert_has_not_been_called!("mem::init must be called only once"); + let mem_info = MemInfo::load(mb_ptr); + let mut frame_alloc = area_frame_alloc::AreaFrameAllocator::new(&mem_info); + + let mut active_table = remap_the_kernel(&mut frame_alloc, &mem_info); + + use self::paging::Page; + + let heap_start_page = Page::containing_address(HEAP_START); + let heap_end_page = Page::containing_address(HEAP_START + HEAP_SIZE-1); + + for page in Page::range_inclusive(heap_start_page, heap_end_page) { + active_table.map(page, EntryFlags::WRITABLE, &mut frame_alloc); + } + unsafe { + GLOBAL_ALLOCATOR.lock().claim(Span::from_base_size(HEAP_START as *mut u8, HEAP_SIZE)).unwrap(); + } +} diff --git a/kernel/src/mem/paging/entry.rs b/kernel/src/mem/paging/entry.rs new file mode 100644 index 0000000..9033a6e --- /dev/null +++ b/kernel/src/mem/paging/entry.rs @@ -0,0 +1,76 @@ +use multiboot2::ElfSection; + +use crate::mem::Frame; + +pub struct Entry(u64); + +bitflags::bitflags! { + #[derive(Debug, Clone, Copy)] + pub struct EntryFlags: u64 { + const PRESENT = 1 << 0; + const WRITABLE = 1 << 1; + const USER_ACCESSIBLE = 1 << 2; + const WRITE_THROUGH = 1 << 3; + const NO_CACHE = 1 << 4; + const ACCESSED = 1 << 5; + const DIRTY = 1 << 6; + const HUGE_PAGE = 1 << 7; + const GLOBAL = 1 << 8; + const NO_EXECUTE = 1 << 63; + } +} + +impl EntryFlags { + pub fn from_elf_section_flags(section: &ElfSection) -> EntryFlags { + use multiboot2::ElfSectionFlags; + + let mut flags = EntryFlags::empty(); + + if section.flags().contains(ElfSectionFlags::ALLOCATED) { + // section is loaded to memory + flags = flags | EntryFlags::PRESENT; + } + if section.flags().contains(ElfSectionFlags::WRITABLE) { + flags = flags | EntryFlags::WRITABLE; + } + if !section.flags().contains(ElfSectionFlags::EXECUTABLE) { + flags = flags | EntryFlags::NO_EXECUTE; + } + + flags + } +} + + + +impl Entry { + pub fn is_unused(&self) -> bool { + self.0 == 0 + } + + pub fn set_unused(&mut self) { + self.0 = 0; + } + + pub fn flags(&self) -> EntryFlags { + EntryFlags::from_bits_truncate(self.0) + } + + pub fn pointed_frame(&self) -> Option { + if self.flags().contains(EntryFlags::PRESENT) { + Some(Frame::containing_address( + self.0 as usize & 0x000fffff_fffff000 + )) + } else { + None + } + } + + pub fn set(&mut self, frame: Frame, flags: EntryFlags) { + assert!(frame.start_address() & !0x000fffff_fffff000 == 0); + self.0 = (frame.start_address() as u64) | flags.bits(); + } + + +} + diff --git a/kernel/src/mem/paging/mapper.rs b/kernel/src/mem/paging/mapper.rs new file mode 100644 index 0000000..4cbee25 --- /dev/null +++ b/kernel/src/mem/paging/mapper.rs @@ -0,0 +1,141 @@ +use x86_64::VirtAddr; + +use super::{ + entry::*, tables::{ + Level4, Table, P4 + }, + Frame, FrameAllocator, + PAGE_SIZE, PhysicalAddress, + VirtualAddress, Page, + ENTRY_COUNT, InactivePageTable +}; +use core::ptr::Unique; + +pub struct Mapper { + p4: Unique>, +} + +impl Mapper { + pub unsafe fn new() -> Mapper { + Mapper { + p4: Unique::new_unchecked(P4), + } + } + pub fn p4(&self) -> &Table { + unsafe { self.p4.as_ref() } + } + + pub fn p4_mut(&mut self) -> &mut Table { + unsafe { self.p4.as_mut() } + } + + /// Translates a virtual to the corresponding physical address. + /// Returns `None` if the address is not mapped. + pub fn translate(&self, virtual_address: VirtualAddress) + -> Option + { + let offset = virtual_address % PAGE_SIZE; + self.translate_page(Page::containing_address(virtual_address)) + .map(|frame| frame.number * PAGE_SIZE + offset) + } + + pub fn translate_page(&self, page: Page) -> Option { + + let p3 = self.p4().next_table(page.p4_index()); + + let huge_page = || { + p3.and_then(|p3| { + let p3_entry = &p3[page.p3_index()]; + // 1GiB page? + if let Some(start_frame) = p3_entry.pointed_frame() { + if p3_entry.flags().contains(EntryFlags::HUGE_PAGE) { + // address must be 1GiB aligned + assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0); + return Some(Frame { + number: start_frame.number + page.p2_index() * + ENTRY_COUNT + page.p1_index(), + }); + } + } + if let Some(p2) = p3.next_table(page.p3_index()) { + let p2_entry = &p2[page.p2_index()]; + // 2MiB page? + if let Some(start_frame) = p2_entry.pointed_frame() { + if p2_entry.flags().contains(EntryFlags::HUGE_PAGE) { + // address must be 2MiB aligned + assert!(start_frame.number % ENTRY_COUNT == 0); + return Some(Frame { + number: start_frame.number + page.p1_index() + }); + } + } + } + None + }) + }; + + p3.and_then(|p3| p3.next_table(page.p3_index())) + .and_then(|p2| p2.next_table(page.p2_index())) + .and_then(|p1| p1[page.p1_index()].pointed_frame()) + .or_else(huge_page) + } + + /// Maps the page to the frame with the provided flags. + /// The `EntryFlags::PRESENT` flag is added by default. Needs a + /// `FrameAllocator` as it might need to create new page tables. + pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags, + allocator: &mut A) + where A: FrameAllocator + { + let p4 = self.p4_mut(); + let p3 = p4.next_table_create(page.p4_index(), allocator); + let p2 = p3.next_table_create(page.p3_index(), allocator); + let p1 = p2.next_table_create(page.p2_index(), allocator); + + assert!(p1[page.p1_index()].is_unused()); + p1[page.p1_index()].set(frame, flags | EntryFlags::PRESENT); + } + + /// Maps the page to some free frame with the provided flags. + /// The free frame is allocated from the given `FrameAllocator`. + pub fn map(&mut self, page: Page, flags: EntryFlags, allocator: &mut A) + where A: FrameAllocator + { + let frame = allocator.allocate_frame().expect("out of memory"); + self.map_to(page, frame, flags, allocator) + } + + /// Identity map the the given frame with the provided flags. + /// The `FrameAllocator` is used to create new page tables if needed. + pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A) + where A: FrameAllocator + { + let page = Page::containing_address(frame.start_address()); + self.map_to(page, frame, flags, allocator) + } + + /// Unmaps the given page and adds all freed frames to the given + /// `FrameAllocator`. + pub fn unmap(&mut self, page: Page, _allocator: &mut A) + where A: FrameAllocator + { + assert!(self.translate(page.start_address()).is_some()); + + let p1 = self.p4_mut() + .next_table_mut(page.p4_index()) + .and_then(|p3| p3.next_table_mut(page.p3_index())) + .and_then(|p2| p2.next_table_mut(page.p2_index())) + .expect("mapping code does not support huge pages"); + let _frame = p1[page.p1_index()].pointed_frame().unwrap(); + p1[page.p1_index()].set_unused(); + + use x86_64::instructions::tlb; + tlb::flush(VirtAddr::new(page.start_address().try_into().unwrap())); + // TODO free p(1,2,3) table if empty + // _allocator.deallocate_frame(_frame); + } + + + +} + diff --git a/kernel/src/mem/paging/mod.rs b/kernel/src/mem/paging/mod.rs new file mode 100644 index 0000000..79f9097 --- /dev/null +++ b/kernel/src/mem/paging/mod.rs @@ -0,0 +1,263 @@ +use core::ops::{Deref, DerefMut}; +use self::{entry::*, mapper::Mapper, temporary::TemporaryPage}; +use super::{Frame, FrameAllocator, MemInfo, PAGE_SIZE}; +use x86_64::{registers::control::{self, Cr3Flags}, structures::paging::PhysFrame, PhysAddr}; + +pub mod tables; +pub mod entry; +pub mod temporary; +pub mod mapper; + +const ENTRY_COUNT: usize = 512; + +pub type PhysicalAddress = usize; +pub type VirtualAddress = usize; + + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub struct Page { + number: usize, +} + + +pub struct ActivePageTable { + mapper: Mapper +} + +impl Deref for ActivePageTable { + type Target = Mapper; + fn deref(&self) -> &Mapper { + &self.mapper + } +} + +impl DerefMut for ActivePageTable { + fn deref_mut(&mut self) -> &mut Mapper { + &mut self.mapper + } +} + + + +impl ActivePageTable { + unsafe fn new() -> ActivePageTable { + ActivePageTable { + mapper: Mapper::new(), + } + } + + pub fn with(&mut self, table: &mut InactivePageTable, + temporary_page: &mut temporary::TemporaryPage, f: F) + where F: FnOnce(&mut Mapper) + { + use x86_64::instructions::tlb; + + { + let backup = Frame::containing_address( + control::Cr3::read().0.start_address().as_u64() as usize); + + // map temporary_page to current p4 table + let p4_table = temporary_page.map_table_frame(backup.clone(), self); + + // overwrite recursive mapping + self.p4_mut()[511].set(table.p4_frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE); + tlb::flush_all(); + + // execute f in the new context + f(self); + + // restore recursive mapping to original p4 table + p4_table[511].set(backup, EntryFlags::PRESENT | EntryFlags::WRITABLE); + tlb::flush_all(); + } + + temporary_page.unmap(self); + } + + pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable { + + let old_table = InactivePageTable { + p4_frame: Frame::containing_address( + control::Cr3::read().0.start_address().as_u64() as usize + ), + }; + unsafe { + let v = PhysFrame::containing_address( + PhysAddr::new( + new_table.p4_frame.start_address() as u64 + ) + ); + control::Cr3::write(v, Cr3Flags::PAGE_LEVEL_CACHE_DISABLE); + } + old_table + } + +} + + +pub struct InactivePageTable { + p4_frame: Frame, +} + +impl InactivePageTable { + pub fn new(frame: Frame, active_table: &mut ActivePageTable, + temporary_page: &mut temporary::TemporaryPage) -> InactivePageTable { + { + let table = temporary_page.map_table_frame(frame.clone(), + active_table); + // now we are able to zero the table + table.zero(); + // set up recursive mapping for the table + table[511].set(frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE); + } + temporary_page.unmap(active_table); + + InactivePageTable { p4_frame: frame } + } +} + + +impl Page { + pub fn containing_address(address: VirtualAddress) -> Page { + assert!(address < 0x0000_8000_0000_0000 || + address >= 0xffff_8000_0000_0000, + "invalid address: 0x{:x}", address); + Page { number: address / PAGE_SIZE } + } + fn start_address(&self) -> usize { + self.number * PAGE_SIZE + } + fn p4_index(&self) -> usize { + (self.number >> 27) & 0o777 + } + fn p3_index(&self) -> usize { + (self.number >> 18) & 0o777 + } + fn p2_index(&self) -> usize { + (self.number >> 9) & 0o777 + } + fn p1_index(&self) -> usize { + (self.number >> 0) & 0o777 + } + pub fn range_inclusive(start: Page, end: Page) -> PageIter { + PageIter { + start, + end, + } + } +} + +pub struct PageIter { + start: Page, + end: Page, +} + +impl Iterator for PageIter { + type Item = Page; + + fn next(&mut self) -> Option { + if self.start <= self.end { + let page = self.start; + self.start.number += 1; + Some(page) + } else { + None + } + } +} + +pub fn remap_the_kernel(allocator: &mut A, mem_info: &MemInfo) -> ActivePageTable + where A: FrameAllocator +{ + let mut temporary_page = TemporaryPage::new(Page { number: 0xcafebabe }, + allocator); + + let mut active_table = unsafe { ActivePageTable::new() }; + let mut new_table = { + let frame = allocator.allocate_frame().expect("no more frames"); + InactivePageTable::new(frame, &mut active_table, &mut temporary_page) + }; + + log::info!("Remaping kernel"); + + active_table.with(&mut new_table, &mut temporary_page, |mapper| { + + for section in mem_info.boot_info.elf_sections().unwrap() { + + if !section.is_allocated() { + // section is not loaded to memory + continue; + } + + log::debug!("mapping section '{}' at addr: {:#x}, size: {:#x}", section.name().unwrap_or("NONE"), section.start_address(), section.size()); + assert!(section.start_address() as usize % PAGE_SIZE == 0, + "sections need to be page aligned"); + + + let flags = EntryFlags::from_elf_section_flags(§ion); + + let start_frame = Frame::containing_address(section.start_address().try_into().unwrap()); + let end_frame = Frame::containing_address(section.end_address() as usize - 1); + for frame in Frame::range_inclusive(start_frame, end_frame) { + mapper.identity_map(frame, flags, allocator); + } + } + let vga_buffer_frame = Frame::containing_address(0xb8000); + mapper.identity_map(vga_buffer_frame, EntryFlags::WRITABLE, allocator); + let multiboot_start = Frame::containing_address(mem_info.boot_info.start_address()); + let multiboot_end = Frame::containing_address(mem_info.boot_info.end_address() - 1); + for frame in Frame::range_inclusive(multiboot_start, multiboot_end) { + mapper.identity_map(frame, EntryFlags::PRESENT, allocator); + } + }); + + let old_table = active_table.switch(new_table); + log::debug!("Switched to new table"); + + let old_p4_page = Page::containing_address( + old_table.p4_frame.start_address() + ); + active_table.unmap(old_p4_page, allocator); + log::debug!("Put guard page at {:#x}", old_p4_page.start_address()); + + active_table +} + + + +pub fn test_paging(allocator: &mut A) + where A: FrameAllocator +{ + let mut page_table = unsafe { ActivePageTable::new() }; + + let addr = 42 * 512 * 512 * 4096; // 42th P3 entry + log::debug!("Virt addr: 0x{:x}", addr); + { // ActivePageTable::map_to + log::debug!("TEST: ActivePageTable::map_to"); + let page = Page::containing_address(addr); + let frame = allocator.allocate_frame().expect("no more frames"); + log::debug!("Phys addr = {:?}, map to frame: {:?}", + page_table.translate(addr), + frame); + page_table.map_to(page, frame, EntryFlags::empty(), allocator); + log::debug!("Phys addr = {:?}", page_table.translate(addr)); + log::debug!("next free frame: {:?}", allocator.allocate_frame()); + } + + { // ActivePageTable::unmap + log::debug!("TEST: ActivePageTable::unmap"); + log::debug!("Val: {:#x}", unsafe { + *(Page::containing_address(addr).start_address() as *const u64) + }); + page_table.unmap(Page::containing_address(addr), allocator); + log::debug!("Phys addr: {:?} (unmapped addr)", page_table.translate(addr)); + + // Panic = good + // log::debug!("Val: {:#x}", unsafe { + // *(Page::containing_address(addr).start_address() as *const u64) + // }); + + } + + // test it +} diff --git a/kernel/src/mem/paging/tables.rs b/kernel/src/mem/paging/tables.rs new file mode 100644 index 0000000..52603fc --- /dev/null +++ b/kernel/src/mem/paging/tables.rs @@ -0,0 +1,95 @@ +use crate::mem::FrameAllocator; + +use super::{Entry, ENTRY_COUNT, EntryFlags}; +use core::{marker::PhantomData, ops::{Index, IndexMut}}; + +pub const P4: *mut Table = 0xffffffff_fffff000 as *mut _; + +pub struct Table { + entries: [Entry; ENTRY_COUNT], + level: PhantomData +} + + +impl Table where L: TableLevel { + pub fn zero(&mut self) { + for entry in self.entries.iter_mut() { + entry.set_unused(); + } + } +} + + +impl Table where L: HierarchicalLevel { + fn next_table_address(&self, index: usize) -> Option { + let entry_flags = self[index].flags(); + if entry_flags.contains(EntryFlags::PRESENT) && + !entry_flags.contains(EntryFlags::HUGE_PAGE) { + let table_address = self as *const _ as usize; + Some((table_address << 9) | (index << 12)) + } else { + None + } + } + pub fn next_table_create(&mut self, index: usize, allocator: &mut A) -> &mut Table + where A: FrameAllocator + { + if self.next_table(index).is_none() { + assert!(!self.entries[index].flags().contains(EntryFlags::HUGE_PAGE), + "mapping code does not support huge pages"); + let frame = allocator.allocate_frame().expect("no frames available"); + self.entries[index].set(frame, EntryFlags::PRESENT | EntryFlags::WRITABLE); + self.next_table_mut(index).unwrap().zero(); + } + self.next_table_mut(index).unwrap() + } + + pub fn next_table<'a>(&'a self, index: usize) -> Option<&'a Table> { + self.next_table_address(index) + .map(|address| unsafe { &*(address as *const _) }) + } + + pub fn next_table_mut<'a>(&'a mut self, index: usize) -> Option<&'a mut Table> { + self.next_table_address(index) + .map(|address| unsafe { &mut *(address as *mut _) }) + } + +} + +impl Index for Table where L: TableLevel { + type Output = Entry; + + fn index(&self, index: usize) -> &Entry { + &self.entries[index] + } +} + +impl IndexMut for Table where L: TableLevel { + fn index_mut(&mut self, index: usize) -> &mut Entry { + &mut self.entries[index] + } +} + +pub trait TableLevel {} +pub trait HierarchicalLevel: TableLevel { + type NextLevel: TableLevel; +} + +pub enum Level4 {} +pub enum Level3 {} +pub enum Level2 {} +pub enum Level1 {} + +impl TableLevel for Level4 {} +impl TableLevel for Level3 {} +impl TableLevel for Level2 {} +impl TableLevel for Level1 {} +impl HierarchicalLevel for Level4 { + type NextLevel = Level3; +} +impl HierarchicalLevel for Level3 { + type NextLevel = Level2; +} +impl HierarchicalLevel for Level2 { + type NextLevel = Level1; +} diff --git a/kernel/src/mem/paging/temporary.rs b/kernel/src/mem/paging/temporary.rs new file mode 100644 index 0000000..1b62e8a --- /dev/null +++ b/kernel/src/mem/paging/temporary.rs @@ -0,0 +1,81 @@ + +use crate::mem::paging::EntryFlags; +use crate::mem::FrameAllocator; +use super::tables::{Level1, Table}; +use super::{ActivePageTable, VirtualAddress, Frame}; +use super::Page; + +pub struct TemporaryPage { + page: Page, + allocator: TinyAllocator +} + +struct TinyAllocator([Option; 3]); + + +impl TemporaryPage { + pub fn new(page: Page, allocator: &mut A) -> TemporaryPage + where A: FrameAllocator + { + TemporaryPage { + page: page, + allocator: TinyAllocator::new(allocator), + } + } + /// Maps the temporary page to the given frame in the active table. + /// Returns the start address of the temporary page. + pub fn map(&mut self, frame: Frame, active_table: &mut ActivePageTable) + -> VirtualAddress + { + + assert!(active_table.translate_page(self.page).is_none(), + "temporary page is already mapped"); + active_table.map_to(self.page, frame, EntryFlags::WRITABLE, &mut self.allocator); + self.page.start_address() + } + + /// Unmaps the temporary page in the active table. + pub fn unmap(&mut self, active_table: &mut ActivePageTable) { + active_table.unmap(self.page, &mut self.allocator) + } + + /// Maps the temporary page to the given page table frame in the active + /// table. Returns a reference to the now mapped table. + pub fn map_table_frame(&mut self, + frame: Frame, + active_table: &mut ActivePageTable) + -> &mut Table { + unsafe { &mut *(self.map(frame, active_table) as *mut Table) } + } +} + +impl FrameAllocator for TinyAllocator { + fn allocate_frame(&mut self) -> Option { + for frame_option in &mut self.0 { + if frame_option.is_some() { + return frame_option.take(); + } + } + None + } + + fn deallocate_frame(&mut self, frame: Frame) { + for frame_option in &mut self.0 { + if frame_option.is_none() { + *frame_option = Some(frame); + return; + } + } + panic!("Tiny allocator can hold only 3 frames."); + } +} + +impl TinyAllocator { + fn new(allocator: &mut A) -> TinyAllocator + where A: FrameAllocator + { + let mut f = || allocator.allocate_frame(); + let frames = [f(), f(), f()]; + TinyAllocator(frames) + } +} diff --git a/kernel/src/utils.rs b/kernel/src/utils.rs index 4df881e..c9d91b1 100644 --- a/kernel/src/utils.rs +++ b/kernel/src/utils.rs @@ -1,4 +1,5 @@ -use x86_64::instructions; +use x86::msr::{rdmsr, wrmsr, IA32_EFER}; +use x86_64::{instructions, registers::control::{Cr0, Cr0Flags}}; @@ -7,6 +8,17 @@ pub fn hcf() -> ! { instructions::hlt(); core::hint::spin_loop(); } - - +} + +pub fn enable_nxe_bit() { + + let nxe_bit = 1 << 11; + unsafe { + let efer = rdmsr(IA32_EFER); + wrmsr(IA32_EFER, efer | nxe_bit); + } +} +pub fn enable_write_protect_bit() { + + unsafe { Cr0::write(Cr0::read() | Cr0Flags::WRITE_PROTECT) }; } diff --git a/linker.ld b/linker.ld index 16c3d3d..c5abdd5 100644 --- a/linker.ld +++ b/linker.ld @@ -3,25 +3,58 @@ ENTRY(_start) SECTIONS { . = 1M; - .boot : + .rodata : { - /* ensure that the multiboot header is at the beginning */ - KEEP(*(.multiboot_header)) - + /* ensure that the multiboot header is at the beginning */ + KEEP(*(.multiboot_header)) + *(.rodata .rodata.*) + . = ALIGN(4K); + } + + .eh_frame : + { + + *(.eh_frame) + . = ALIGN(4K); } .text : { - *(.text .text.*) + *(.text .text.*) + . = ALIGN(4K); } - .rodata : + .data : { - *(.rodata .rodata.*) + *(.data .data.*) + . = ALIGN(4K); } - .data.rel.ro : + .bss : { - *(.data.rel.ro.local*) *(.data.rel.ro .data.rel.ro.*) + *(.bss .bss.*) + . = ALIGN(4K); } + + .got : + { + *(.got) + . = ALIGN(4K); + } + + .got.plt : + { + *(.got.plt) + . = ALIGN(4K); + } + + .data.rel.ro : ALIGN(4K) { + *(.data.rel.ro.local*) *(.data.rel.ro .data.rel.ro.*) + . = ALIGN(4K); + } + + .gcc_except_table : ALIGN(4K) { + *(.gcc_except_table) + . = ALIGN(4K); + }` }