use core::ops::{Deref, DerefMut}; use self::{entry::*, mapper::Mapper, temporary::TemporaryPage}; use super::{Frame, FrameAllocator, MemInfo, PAGE_SIZE}; use x86_64::{registers::control::{self, Cr3Flags}, structures::paging::PhysFrame, PhysAddr}; pub mod tables; pub mod entry; pub mod temporary; pub mod mapper; const ENTRY_COUNT: usize = 512; pub type PhysicalAddress = usize; pub type VirtualAddress = usize; #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct Page { number: usize, } pub struct ActivePageTable { mapper: Mapper } impl Deref for ActivePageTable { type Target = Mapper; fn deref(&self) -> &Mapper { &self.mapper } } impl DerefMut for ActivePageTable { fn deref_mut(&mut self) -> &mut Mapper { &mut self.mapper } } impl ActivePageTable { unsafe fn new() -> ActivePageTable { ActivePageTable { mapper: Mapper::new(), } } pub fn with(&mut self, table: &mut InactivePageTable, temporary_page: &mut temporary::TemporaryPage, f: F) where F: FnOnce(&mut Mapper) { use x86_64::instructions::tlb; { let backup = Frame::containing_address( control::Cr3::read().0.start_address().as_u64() as usize); // map temporary_page to current p4 table let p4_table = temporary_page.map_table_frame(backup.clone(), self); // overwrite recursive mapping self.p4_mut()[511].set(table.p4_frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE); tlb::flush_all(); // execute f in the new context f(self); // restore recursive mapping to original p4 table p4_table[511].set(backup, EntryFlags::PRESENT | EntryFlags::WRITABLE); tlb::flush_all(); } temporary_page.unmap(self); } pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable { let old_table = InactivePageTable { p4_frame: Frame::containing_address( control::Cr3::read().0.start_address().as_u64() as usize ), }; unsafe { let v = PhysFrame::containing_address( PhysAddr::new( new_table.p4_frame.start_address() as u64 ) ); control::Cr3::write(v, Cr3Flags::PAGE_LEVEL_CACHE_DISABLE); } old_table } } pub struct InactivePageTable { p4_frame: Frame, } impl InactivePageTable { pub fn new(frame: Frame, active_table: &mut ActivePageTable, temporary_page: &mut temporary::TemporaryPage) -> InactivePageTable { { let table = temporary_page.map_table_frame(frame.clone(), active_table); // now we are able to zero the table table.zero(); // set up recursive mapping for the table table[511].set(frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE); } temporary_page.unmap(active_table); InactivePageTable { p4_frame: frame } } } impl Page { pub fn containing_address(address: VirtualAddress) -> Page { assert!(address < 0x0000_8000_0000_0000 || address >= 0xffff_8000_0000_0000, "invalid address: 0x{:x}", address); Page { number: address / PAGE_SIZE } } fn start_address(&self) -> usize { self.number * PAGE_SIZE } fn p4_index(&self) -> usize { (self.number >> 27) & 0o777 } fn p3_index(&self) -> usize { (self.number >> 18) & 0o777 } fn p2_index(&self) -> usize { (self.number >> 9) & 0o777 } fn p1_index(&self) -> usize { (self.number >> 0) & 0o777 } pub fn range_inclusive(start: Page, end: Page) -> PageIter { PageIter { start, end, } } } pub struct PageIter { start: Page, end: Page, } impl Iterator for PageIter { type Item = Page; fn next(&mut self) -> Option { if self.start <= self.end { let page = self.start; self.start.number += 1; Some(page) } else { None } } } pub fn remap_the_kernel(allocator: &mut A, mem_info: &MemInfo) -> ActivePageTable where A: FrameAllocator { let mut temporary_page = TemporaryPage::new(Page { number: 0xcafebabe }, allocator); let mut active_table = unsafe { ActivePageTable::new() }; let mut new_table = { let frame = allocator.allocate_frame().expect("no more frames"); InactivePageTable::new(frame, &mut active_table, &mut temporary_page) }; log::info!("Remaping kernel"); active_table.with(&mut new_table, &mut temporary_page, |mapper| { for section in mem_info.boot_info.elf_sections().unwrap() { if !section.is_allocated() { // section is not loaded to memory continue; } log::debug!("mapping section '{}' at addr: {:#x}, size: {:#x}", section.name().unwrap_or("NONE"), section.start_address(), section.size()); assert!(section.start_address() as usize % PAGE_SIZE == 0, "sections need to be page aligned"); let flags = EntryFlags::from_elf_section_flags(§ion); let start_frame = Frame::containing_address(section.start_address().try_into().unwrap()); let end_frame = Frame::containing_address(section.end_address() as usize - 1); for frame in Frame::range_inclusive(start_frame, end_frame) { mapper.identity_map(frame, flags, allocator); } } let vga_buffer_frame = Frame::containing_address(0xb8000); mapper.identity_map(vga_buffer_frame, EntryFlags::WRITABLE, allocator); let multiboot_start = Frame::containing_address(mem_info.boot_info.start_address()); let multiboot_end = Frame::containing_address(mem_info.boot_info.end_address() - 1); for frame in Frame::range_inclusive(multiboot_start, multiboot_end) { mapper.identity_map(frame, EntryFlags::PRESENT, allocator); } }); let old_table = active_table.switch(new_table); log::debug!("Switched to new table"); let old_p4_page = Page::containing_address( old_table.p4_frame.start_address() ); active_table.unmap(old_p4_page, allocator); log::debug!("Put guard page at {:#x}", old_p4_page.start_address()); active_table } pub fn test_paging(allocator: &mut A) where A: FrameAllocator { let mut page_table = unsafe { ActivePageTable::new() }; let addr = 42 * 512 * 512 * 4096; // 42th P3 entry log::debug!("Virt addr: 0x{:x}", addr); { // ActivePageTable::map_to log::debug!("TEST: ActivePageTable::map_to"); let page = Page::containing_address(addr); let frame = allocator.allocate_frame().expect("no more frames"); log::debug!("Phys addr = {:?}, map to frame: {:?}", page_table.translate(addr), frame); page_table.map_to(page, frame, EntryFlags::empty(), allocator); log::debug!("Phys addr = {:?}", page_table.translate(addr)); log::debug!("next free frame: {:?}", allocator.allocate_frame()); } { // ActivePageTable::unmap log::debug!("TEST: ActivePageTable::unmap"); log::debug!("Val: {:#x}", unsafe { *(Page::containing_address(addr).start_address() as *const u64) }); page_table.unmap(Page::containing_address(addr), allocator); log::debug!("Phys addr: {:?} (unmapped addr)", page_table.translate(addr)); // Panic = good // log::debug!("Val: {:#x}", unsafe { // *(Page::containing_address(addr).start_address() as *const u64) // }); } // test it }