Actual heap allocation!!!!!!!!!!!!
This commit is contained in:
parent
638bfb2d5c
commit
1d61f97abb
18
Cargo.lock
generated
18
Cargo.lock
generated
|
@ -41,12 +41,15 @@ dependencies = [
|
||||||
name = "kernel"
|
name = "kernel"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"bitflags 2.5.0",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"log",
|
"log",
|
||||||
"multiboot2",
|
"multiboot2",
|
||||||
|
"once",
|
||||||
"pc-keyboard",
|
"pc-keyboard",
|
||||||
"pic8259",
|
"pic8259",
|
||||||
"spin 0.9.8",
|
"spin 0.9.8",
|
||||||
|
"talc",
|
||||||
"uart_16550",
|
"uart_16550",
|
||||||
"x86",
|
"x86",
|
||||||
"x86_64",
|
"x86_64",
|
||||||
|
@ -90,6 +93,12 @@ dependencies = [
|
||||||
"uefi-raw",
|
"uefi-raw",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "once"
|
||||||
|
version = "0.3.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "60bfe75a40f755f162b794140436c57845cb106fd1467598631c76c6fff08e28"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pc-keyboard"
|
name = "pc-keyboard"
|
||||||
version = "0.7.0"
|
version = "0.7.0"
|
||||||
|
@ -190,6 +199,15 @@ dependencies = [
|
||||||
"unicode-ident",
|
"unicode-ident",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "talc"
|
||||||
|
version = "4.4.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "04be12ec299aadd63a0bf781d893e4b6139d33cdca6dcd6f6be31f849cedcac8"
|
||||||
|
dependencies = [
|
||||||
|
"lock_api",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "uart_16550"
|
name = "uart_16550"
|
||||||
version = "0.3.0"
|
version = "0.3.0"
|
||||||
|
|
|
@ -94,6 +94,10 @@ check_long_mode:
|
||||||
jmp error
|
jmp error
|
||||||
|
|
||||||
set_up_page_tables:
|
set_up_page_tables:
|
||||||
|
mov eax, p4_table
|
||||||
|
or eax, 0b11 ; present + writable
|
||||||
|
mov [p4_table + 511 * 8], eax
|
||||||
|
|
||||||
; map first P4 entry to P3 table
|
; map first P4 entry to P3 table
|
||||||
mov eax, p3_table
|
mov eax, p3_table
|
||||||
or eax, 0b11 ; present + writable
|
or eax, 0b11 ; present + writable
|
||||||
|
|
|
@ -11,12 +11,15 @@ crate-type=["staticlib"]
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
bitflags = "2.5.0"
|
||||||
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
|
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
|
||||||
log = "0.4.21"
|
log = "0.4.21"
|
||||||
multiboot2 = "0.20.2"
|
multiboot2 = "0.20.2"
|
||||||
|
once = "0.3.4"
|
||||||
pc-keyboard = "0.7.0"
|
pc-keyboard = "0.7.0"
|
||||||
pic8259 = "0.11.0"
|
pic8259 = "0.11.0"
|
||||||
spin = "0.9.8"
|
spin = "0.9.8"
|
||||||
|
talc = "4.4.1"
|
||||||
uart_16550 = "0.3.0"
|
uart_16550 = "0.3.0"
|
||||||
x86 = "0.52.0"
|
x86 = "0.52.0"
|
||||||
x86_64 = "0.15.1"
|
x86_64 = "0.15.1"
|
||||||
|
|
|
@ -38,7 +38,7 @@ struct Selectors {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn init() {
|
pub fn init() {
|
||||||
log::debug!("GDT init");
|
log::info!("GDT init");
|
||||||
use x86_64::instructions::tables::load_tss;
|
use x86_64::instructions::tables::load_tss;
|
||||||
use x86_64::instructions::segmentation::{CS, Segment};
|
use x86_64::instructions::segmentation::{CS, Segment};
|
||||||
|
|
||||||
|
|
|
@ -40,7 +40,7 @@ pub enum InterruptIndex {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn init_idt() {
|
pub fn init_idt() {
|
||||||
log::debug!("IDT init");
|
log::info!("IDT init");
|
||||||
IDT.load();
|
IDT.load();
|
||||||
unsafe { PICS.lock().initialize() };
|
unsafe { PICS.lock().initialize() };
|
||||||
x86_64::instructions::interrupts::enable();
|
x86_64::instructions::interrupts::enable();
|
||||||
|
|
|
@ -2,10 +2,13 @@
|
||||||
#![no_main]
|
#![no_main]
|
||||||
#![feature(abi_x86_interrupt)]
|
#![feature(abi_x86_interrupt)]
|
||||||
#![feature(associated_type_defaults)]
|
#![feature(associated_type_defaults)]
|
||||||
|
#![allow(internal_features)]
|
||||||
|
#![feature(ptr_internals)]
|
||||||
|
#![feature(allocator_api)]
|
||||||
|
|
||||||
use events::EVENTMAN;
|
use alloc::vec::Vec;
|
||||||
|
|
||||||
use crate::events::Event;
|
extern crate alloc;
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
mod logger;
|
mod logger;
|
||||||
|
@ -21,23 +24,38 @@ mod mem;
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
extern "C" fn kmain(mb2_info_addr: usize) -> ! {
|
extern "C" fn kmain(mb2_info_addr: usize) -> ! {
|
||||||
|
// Init
|
||||||
logger::init(log::LevelFilter::Trace).unwrap();
|
logger::init(log::LevelFilter::Trace).unwrap();
|
||||||
gdt::init();
|
gdt::init();
|
||||||
interrupts::init_idt();
|
interrupts::init_idt();
|
||||||
let mem_info = mem::MemInfo::load(mb2_info_addr);
|
utils::enable_nxe_bit();
|
||||||
let mut _alloc = mem::area_frame_alloc::AreaFrameAllocator::new(mem_info);
|
utils::enable_write_protect_bit();
|
||||||
|
|
||||||
EVENTMAN.lock().add_listener(events::Event::Ps2KeyPress(None), |k| {
|
mem::init(mb2_info_addr);
|
||||||
let Event::Ps2KeyPress(v) = k else {panic!()};
|
|
||||||
|
|
||||||
log::debug!("Keypress event received! ({:?})", v.unwrap());
|
let mut a = Vec::new();
|
||||||
Ok(())
|
|
||||||
});
|
|
||||||
|
|
||||||
|
a.push(6);
|
||||||
|
a.push(9);
|
||||||
|
a.push(4);
|
||||||
|
a.push(2);
|
||||||
|
a.push(0);
|
||||||
|
|
||||||
|
log::error!("{:?}", a);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// EVENTMAN.lock().add_listener(events::Event::Ps2KeyPress(None), |k| {
|
||||||
|
// let Event::Ps2KeyPress(v) = k else {panic!()};
|
||||||
|
//
|
||||||
|
// log::debug!("Keypress event received! ({:?})", v.unwrap());
|
||||||
|
// Ok(())
|
||||||
|
// });
|
||||||
|
|
||||||
// let (level_4_page_table, _) = Cr3::read();
|
// let (level_4_page_table, _) = Cr3::read();
|
||||||
// println!("Level 4 page table at: {:?}", level_4_page_table.start_address());
|
// println!("Level 4 page table at: {:?}", level_4_page_table.start_address());
|
||||||
|
log::info!("end of work");
|
||||||
|
|
||||||
utils::hcf();
|
utils::hcf();
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,11 +68,11 @@ impl<'a> AreaFrameAllocator<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn new(mi: MemInfo) -> AreaFrameAllocator<'a> {
|
pub fn new(mi: &MemInfo) -> AreaFrameAllocator<'a> {
|
||||||
let mut allocator = AreaFrameAllocator {
|
let mut allocator = AreaFrameAllocator {
|
||||||
next_free_frame: Frame::containing_address(0),
|
next_free_frame: Frame::containing_address(0),
|
||||||
current_area: None,
|
current_area: None,
|
||||||
areas: mi.mem_area_iter,
|
areas: mi.mem_area_iter.clone(),
|
||||||
kernel_start: Frame::containing_address(mi.kernel_start),
|
kernel_start: Frame::containing_address(mi.kernel_start),
|
||||||
kernel_end: Frame::containing_address(mi.kernel_end),
|
kernel_end: Frame::containing_address(mi.kernel_end),
|
||||||
multiboot_start: Frame::containing_address(mi.mb_start),
|
multiboot_start: Frame::containing_address(mi.mb_start),
|
||||||
|
|
46
kernel/src/mem/heap/mod.rs
Normal file
46
kernel/src/mem/heap/mod.rs
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
use core::{alloc::{AllocError, GlobalAlloc, Layout}, sync::atomic::{AtomicUsize, Ordering}};
|
||||||
|
|
||||||
|
use x86_64::align_up;
|
||||||
|
|
||||||
|
pub const HEAP_START: usize = 0o_000_001_000_000_0000;
|
||||||
|
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
|
||||||
|
|
||||||
|
/// A simple allocator that allocates memory linearly and ignores freed memory.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct BumpAllocator {
|
||||||
|
heap_start: usize,
|
||||||
|
heap_end: usize,
|
||||||
|
next: AtomicUsize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BumpAllocator {
|
||||||
|
pub const fn new(heap_start: usize, heap_end: usize) -> Self {
|
||||||
|
Self { heap_start, heap_end, next: AtomicUsize::new(heap_start) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl GlobalAlloc for BumpAllocator {
|
||||||
|
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||||
|
loop {
|
||||||
|
// load current state of the `next` field
|
||||||
|
let current_next = self.next.load(Ordering::Relaxed);
|
||||||
|
let alloc_start = align_up(current_next as u64, layout.align() as u64);
|
||||||
|
let alloc_end = alloc_start.saturating_add(layout.size() as u64);
|
||||||
|
|
||||||
|
if alloc_end <= self.heap_end as u64 {
|
||||||
|
// update the `next` pointer if it still has the value `current_next`
|
||||||
|
let next_now = self.next.compare_exchange(current_next, alloc_end as usize, Ordering::Relaxed, Ordering::Relaxed).unwrap();
|
||||||
|
if next_now == current_next {
|
||||||
|
// next address was successfully updated, allocation succeeded
|
||||||
|
return alloc_start as *mut u8;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
panic!("OUT OF MEMORY");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||||
|
// do nothing, leak memory
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,11 +1,23 @@
|
||||||
use core::alloc::{GlobalAlloc, Layout};
|
use core::alloc::{GlobalAlloc, Layout};
|
||||||
|
|
||||||
use multiboot2::{BootInformationHeader, MemoryArea, MemoryAreaType, MemoryMapTag, TagTrait};
|
use multiboot2::{BootInformation, BootInformationHeader, MemoryArea, MemoryAreaType, MemoryMapTag, TagTrait};
|
||||||
|
use talc::{ErrOnOom, Span, Talc, Talck};
|
||||||
|
use crate::mem::paging::entry::EntryFlags;
|
||||||
|
|
||||||
|
use self::heap::{HEAP_SIZE, HEAP_START};
|
||||||
|
pub use self::paging::remap_the_kernel;
|
||||||
|
|
||||||
|
pub mod heap;
|
||||||
pub mod area_frame_alloc;
|
pub mod area_frame_alloc;
|
||||||
|
pub mod paging;
|
||||||
|
|
||||||
|
//#[global_allocator]
|
||||||
|
//static GLOBAL_ALLOCATOR: heap::BumpAllocator = heap::BumpAllocator::new(HEAP_START, HEAP_START + HEAP_SIZE);
|
||||||
|
|
||||||
#[global_allocator]
|
#[global_allocator]
|
||||||
static GLOBAL_ALLOCATOR: DummyAlloc = DummyAlloc;
|
static GLOBAL_ALLOCATOR: Talck<spin::Mutex<()>, ErrOnOom> = Talc::new(ErrOnOom).lock();
|
||||||
|
|
||||||
|
|
||||||
pub struct DummyAlloc;
|
pub struct DummyAlloc;
|
||||||
unsafe impl GlobalAlloc for DummyAlloc {
|
unsafe impl GlobalAlloc for DummyAlloc {
|
||||||
unsafe fn alloc(&self, _: Layout) -> *mut u8 { 0 as *mut u8 }
|
unsafe fn alloc(&self, _: Layout) -> *mut u8 { 0 as *mut u8 }
|
||||||
|
@ -24,8 +36,40 @@ impl Frame {
|
||||||
fn containing_address(address: usize) -> Frame {
|
fn containing_address(address: usize) -> Frame {
|
||||||
Frame{ number: address / PAGE_SIZE }
|
Frame{ number: address / PAGE_SIZE }
|
||||||
}
|
}
|
||||||
|
fn start_address(&self) -> paging::PhysicalAddress {
|
||||||
|
self.number * PAGE_SIZE
|
||||||
|
}
|
||||||
|
fn clone(&self) -> Frame {
|
||||||
|
Frame { number: self.number }
|
||||||
|
}
|
||||||
|
fn range_inclusive(start: Frame, end: Frame) -> FrameIter {
|
||||||
|
FrameIter {
|
||||||
|
start,
|
||||||
|
end,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct FrameIter {
|
||||||
|
start: Frame,
|
||||||
|
end: Frame,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Iterator for FrameIter {
|
||||||
|
type Item = Frame;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Frame> {
|
||||||
|
if self.start <= self.end {
|
||||||
|
let frame = self.start.clone();
|
||||||
|
self.start.number += 1;
|
||||||
|
Some(frame)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub trait FrameAllocator {
|
pub trait FrameAllocator {
|
||||||
fn allocate_frame(&mut self) -> Option<Frame>;
|
fn allocate_frame(&mut self) -> Option<Frame>;
|
||||||
|
@ -70,8 +114,9 @@ impl MemoryAreaIter {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug)]
|
||||||
pub struct MemInfo{
|
pub struct MemInfo<'a>{
|
||||||
|
boot_info: BootInformation<'a>,
|
||||||
mem_area_iter: MemoryAreaIter,
|
mem_area_iter: MemoryAreaIter,
|
||||||
pub kernel_start: usize,
|
pub kernel_start: usize,
|
||||||
pub kernel_end: usize,
|
pub kernel_end: usize,
|
||||||
|
@ -79,7 +124,7 @@ pub struct MemInfo{
|
||||||
pub mb_end: usize,
|
pub mb_end: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MemInfo {
|
impl MemInfo<'_> {
|
||||||
pub fn load(mb_ptr: usize) -> Self {
|
pub fn load(mb_ptr: usize) -> Self {
|
||||||
let boot_info = unsafe {
|
let boot_info = unsafe {
|
||||||
multiboot2::BootInformation::load(mb_ptr as *const BootInformationHeader).unwrap()
|
multiboot2::BootInformation::load(mb_ptr as *const BootInformationHeader).unwrap()
|
||||||
|
@ -96,8 +141,9 @@ impl MemInfo {
|
||||||
// log::debug!("{}: start: 0x{:x}, sz: 0x{:x}, flags: 0b{:b}", section.name().unwrap_or("NONE"),
|
// log::debug!("{}: start: 0x{:x}, sz: 0x{:x}, flags: 0b{:b}", section.name().unwrap_or("NONE"),
|
||||||
// section.start_address(), section.size(), section.flags());
|
// section.start_address(), section.size(), section.flags());
|
||||||
// }
|
// }
|
||||||
let kernel_start = boot_info.elf_sections().unwrap().clone().map(|s| s.start_address()).min().unwrap() as usize;
|
let elf_secs = boot_info.elf_sections().unwrap();
|
||||||
let kernel_end = boot_info.elf_sections().unwrap().clone().map(|s| s.end_address()).max().unwrap() as usize;
|
let kernel_start = elf_secs.clone().filter(|s| s.is_allocated()).map(|s| s.start_address()).min().unwrap() as usize;
|
||||||
|
let kernel_end = elf_secs.clone().filter(|s| s.is_allocated()).map(|s| s.end_address()).min().unwrap() as usize;
|
||||||
let mb_start = boot_info.start_address();
|
let mb_start = boot_info.start_address();
|
||||||
let mb_end = boot_info.end_address();
|
let mb_end = boot_info.end_address();
|
||||||
|
|
||||||
|
@ -105,6 +151,7 @@ impl MemInfo {
|
||||||
//log::debug!("Multiboot: start: 0x{:x} sz: 0x{:x}", mi.mb_start, mi.mb_end - mi.mb_start);
|
//log::debug!("Multiboot: start: 0x{:x} sz: 0x{:x}", mi.mb_start, mi.mb_end - mi.mb_start);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
|
boot_info:unsafe { multiboot2::BootInformation::load(mb_ptr as *const BootInformationHeader).unwrap()},
|
||||||
mem_area_iter: MemoryAreaIter::new(mmap_tag),
|
mem_area_iter: MemoryAreaIter::new(mmap_tag),
|
||||||
kernel_start,
|
kernel_start,
|
||||||
kernel_end,
|
kernel_end,
|
||||||
|
@ -114,3 +161,22 @@ impl MemInfo {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn init(mb_ptr: usize) {
|
||||||
|
once::assert_has_not_been_called!("mem::init must be called only once");
|
||||||
|
let mem_info = MemInfo::load(mb_ptr);
|
||||||
|
let mut frame_alloc = area_frame_alloc::AreaFrameAllocator::new(&mem_info);
|
||||||
|
|
||||||
|
let mut active_table = remap_the_kernel(&mut frame_alloc, &mem_info);
|
||||||
|
|
||||||
|
use self::paging::Page;
|
||||||
|
|
||||||
|
let heap_start_page = Page::containing_address(HEAP_START);
|
||||||
|
let heap_end_page = Page::containing_address(HEAP_START + HEAP_SIZE-1);
|
||||||
|
|
||||||
|
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
|
||||||
|
active_table.map(page, EntryFlags::WRITABLE, &mut frame_alloc);
|
||||||
|
}
|
||||||
|
unsafe {
|
||||||
|
GLOBAL_ALLOCATOR.lock().claim(Span::from_base_size(HEAP_START as *mut u8, HEAP_SIZE)).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
76
kernel/src/mem/paging/entry.rs
Normal file
76
kernel/src/mem/paging/entry.rs
Normal file
|
@ -0,0 +1,76 @@
|
||||||
|
use multiboot2::ElfSection;
|
||||||
|
|
||||||
|
use crate::mem::Frame;
|
||||||
|
|
||||||
|
pub struct Entry(u64);
|
||||||
|
|
||||||
|
bitflags::bitflags! {
|
||||||
|
#[derive(Debug, Clone, Copy)]
|
||||||
|
pub struct EntryFlags: u64 {
|
||||||
|
const PRESENT = 1 << 0;
|
||||||
|
const WRITABLE = 1 << 1;
|
||||||
|
const USER_ACCESSIBLE = 1 << 2;
|
||||||
|
const WRITE_THROUGH = 1 << 3;
|
||||||
|
const NO_CACHE = 1 << 4;
|
||||||
|
const ACCESSED = 1 << 5;
|
||||||
|
const DIRTY = 1 << 6;
|
||||||
|
const HUGE_PAGE = 1 << 7;
|
||||||
|
const GLOBAL = 1 << 8;
|
||||||
|
const NO_EXECUTE = 1 << 63;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EntryFlags {
|
||||||
|
pub fn from_elf_section_flags(section: &ElfSection) -> EntryFlags {
|
||||||
|
use multiboot2::ElfSectionFlags;
|
||||||
|
|
||||||
|
let mut flags = EntryFlags::empty();
|
||||||
|
|
||||||
|
if section.flags().contains(ElfSectionFlags::ALLOCATED) {
|
||||||
|
// section is loaded to memory
|
||||||
|
flags = flags | EntryFlags::PRESENT;
|
||||||
|
}
|
||||||
|
if section.flags().contains(ElfSectionFlags::WRITABLE) {
|
||||||
|
flags = flags | EntryFlags::WRITABLE;
|
||||||
|
}
|
||||||
|
if !section.flags().contains(ElfSectionFlags::EXECUTABLE) {
|
||||||
|
flags = flags | EntryFlags::NO_EXECUTE;
|
||||||
|
}
|
||||||
|
|
||||||
|
flags
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
impl Entry {
|
||||||
|
pub fn is_unused(&self) -> bool {
|
||||||
|
self.0 == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_unused(&mut self) {
|
||||||
|
self.0 = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn flags(&self) -> EntryFlags {
|
||||||
|
EntryFlags::from_bits_truncate(self.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pointed_frame(&self) -> Option<Frame> {
|
||||||
|
if self.flags().contains(EntryFlags::PRESENT) {
|
||||||
|
Some(Frame::containing_address(
|
||||||
|
self.0 as usize & 0x000fffff_fffff000
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
|
||||||
|
assert!(frame.start_address() & !0x000fffff_fffff000 == 0);
|
||||||
|
self.0 = (frame.start_address() as u64) | flags.bits();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
141
kernel/src/mem/paging/mapper.rs
Normal file
141
kernel/src/mem/paging/mapper.rs
Normal file
|
@ -0,0 +1,141 @@
|
||||||
|
use x86_64::VirtAddr;
|
||||||
|
|
||||||
|
use super::{
|
||||||
|
entry::*, tables::{
|
||||||
|
Level4, Table, P4
|
||||||
|
},
|
||||||
|
Frame, FrameAllocator,
|
||||||
|
PAGE_SIZE, PhysicalAddress,
|
||||||
|
VirtualAddress, Page,
|
||||||
|
ENTRY_COUNT, InactivePageTable
|
||||||
|
};
|
||||||
|
use core::ptr::Unique;
|
||||||
|
|
||||||
|
pub struct Mapper {
|
||||||
|
p4: Unique<Table<Level4>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Mapper {
|
||||||
|
pub unsafe fn new() -> Mapper {
|
||||||
|
Mapper {
|
||||||
|
p4: Unique::new_unchecked(P4),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn p4(&self) -> &Table<Level4> {
|
||||||
|
unsafe { self.p4.as_ref() }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn p4_mut(&mut self) -> &mut Table<Level4> {
|
||||||
|
unsafe { self.p4.as_mut() }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Translates a virtual to the corresponding physical address.
|
||||||
|
/// Returns `None` if the address is not mapped.
|
||||||
|
pub fn translate(&self, virtual_address: VirtualAddress)
|
||||||
|
-> Option<PhysicalAddress>
|
||||||
|
{
|
||||||
|
let offset = virtual_address % PAGE_SIZE;
|
||||||
|
self.translate_page(Page::containing_address(virtual_address))
|
||||||
|
.map(|frame| frame.number * PAGE_SIZE + offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn translate_page(&self, page: Page) -> Option<Frame> {
|
||||||
|
|
||||||
|
let p3 = self.p4().next_table(page.p4_index());
|
||||||
|
|
||||||
|
let huge_page = || {
|
||||||
|
p3.and_then(|p3| {
|
||||||
|
let p3_entry = &p3[page.p3_index()];
|
||||||
|
// 1GiB page?
|
||||||
|
if let Some(start_frame) = p3_entry.pointed_frame() {
|
||||||
|
if p3_entry.flags().contains(EntryFlags::HUGE_PAGE) {
|
||||||
|
// address must be 1GiB aligned
|
||||||
|
assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0);
|
||||||
|
return Some(Frame {
|
||||||
|
number: start_frame.number + page.p2_index() *
|
||||||
|
ENTRY_COUNT + page.p1_index(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(p2) = p3.next_table(page.p3_index()) {
|
||||||
|
let p2_entry = &p2[page.p2_index()];
|
||||||
|
// 2MiB page?
|
||||||
|
if let Some(start_frame) = p2_entry.pointed_frame() {
|
||||||
|
if p2_entry.flags().contains(EntryFlags::HUGE_PAGE) {
|
||||||
|
// address must be 2MiB aligned
|
||||||
|
assert!(start_frame.number % ENTRY_COUNT == 0);
|
||||||
|
return Some(Frame {
|
||||||
|
number: start_frame.number + page.p1_index()
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
|
p3.and_then(|p3| p3.next_table(page.p3_index()))
|
||||||
|
.and_then(|p2| p2.next_table(page.p2_index()))
|
||||||
|
.and_then(|p1| p1[page.p1_index()].pointed_frame())
|
||||||
|
.or_else(huge_page)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Maps the page to the frame with the provided flags.
|
||||||
|
/// The `EntryFlags::PRESENT` flag is added by default. Needs a
|
||||||
|
/// `FrameAllocator` as it might need to create new page tables.
|
||||||
|
pub fn map_to<A>(&mut self, page: Page, frame: Frame, flags: EntryFlags,
|
||||||
|
allocator: &mut A)
|
||||||
|
where A: FrameAllocator
|
||||||
|
{
|
||||||
|
let p4 = self.p4_mut();
|
||||||
|
let p3 = p4.next_table_create(page.p4_index(), allocator);
|
||||||
|
let p2 = p3.next_table_create(page.p3_index(), allocator);
|
||||||
|
let p1 = p2.next_table_create(page.p2_index(), allocator);
|
||||||
|
|
||||||
|
assert!(p1[page.p1_index()].is_unused());
|
||||||
|
p1[page.p1_index()].set(frame, flags | EntryFlags::PRESENT);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Maps the page to some free frame with the provided flags.
|
||||||
|
/// The free frame is allocated from the given `FrameAllocator`.
|
||||||
|
pub fn map<A>(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
|
||||||
|
where A: FrameAllocator
|
||||||
|
{
|
||||||
|
let frame = allocator.allocate_frame().expect("out of memory");
|
||||||
|
self.map_to(page, frame, flags, allocator)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Identity map the the given frame with the provided flags.
|
||||||
|
/// The `FrameAllocator` is used to create new page tables if needed.
|
||||||
|
pub fn identity_map<A>(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
|
||||||
|
where A: FrameAllocator
|
||||||
|
{
|
||||||
|
let page = Page::containing_address(frame.start_address());
|
||||||
|
self.map_to(page, frame, flags, allocator)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unmaps the given page and adds all freed frames to the given
|
||||||
|
/// `FrameAllocator`.
|
||||||
|
pub fn unmap<A>(&mut self, page: Page, _allocator: &mut A)
|
||||||
|
where A: FrameAllocator
|
||||||
|
{
|
||||||
|
assert!(self.translate(page.start_address()).is_some());
|
||||||
|
|
||||||
|
let p1 = self.p4_mut()
|
||||||
|
.next_table_mut(page.p4_index())
|
||||||
|
.and_then(|p3| p3.next_table_mut(page.p3_index()))
|
||||||
|
.and_then(|p2| p2.next_table_mut(page.p2_index()))
|
||||||
|
.expect("mapping code does not support huge pages");
|
||||||
|
let _frame = p1[page.p1_index()].pointed_frame().unwrap();
|
||||||
|
p1[page.p1_index()].set_unused();
|
||||||
|
|
||||||
|
use x86_64::instructions::tlb;
|
||||||
|
tlb::flush(VirtAddr::new(page.start_address().try_into().unwrap()));
|
||||||
|
// TODO free p(1,2,3) table if empty
|
||||||
|
// _allocator.deallocate_frame(_frame);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
263
kernel/src/mem/paging/mod.rs
Normal file
263
kernel/src/mem/paging/mod.rs
Normal file
|
@ -0,0 +1,263 @@
|
||||||
|
use core::ops::{Deref, DerefMut};
|
||||||
|
use self::{entry::*, mapper::Mapper, temporary::TemporaryPage};
|
||||||
|
use super::{Frame, FrameAllocator, MemInfo, PAGE_SIZE};
|
||||||
|
use x86_64::{registers::control::{self, Cr3Flags}, structures::paging::PhysFrame, PhysAddr};
|
||||||
|
|
||||||
|
pub mod tables;
|
||||||
|
pub mod entry;
|
||||||
|
pub mod temporary;
|
||||||
|
pub mod mapper;
|
||||||
|
|
||||||
|
const ENTRY_COUNT: usize = 512;
|
||||||
|
|
||||||
|
pub type PhysicalAddress = usize;
|
||||||
|
pub type VirtualAddress = usize;
|
||||||
|
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
pub struct Page {
|
||||||
|
number: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
pub struct ActivePageTable {
|
||||||
|
mapper: Mapper
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deref for ActivePageTable {
|
||||||
|
type Target = Mapper;
|
||||||
|
fn deref(&self) -> &Mapper {
|
||||||
|
&self.mapper
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DerefMut for ActivePageTable {
|
||||||
|
fn deref_mut(&mut self) -> &mut Mapper {
|
||||||
|
&mut self.mapper
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
impl ActivePageTable {
|
||||||
|
unsafe fn new() -> ActivePageTable {
|
||||||
|
ActivePageTable {
|
||||||
|
mapper: Mapper::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with<F>(&mut self, table: &mut InactivePageTable,
|
||||||
|
temporary_page: &mut temporary::TemporaryPage, f: F)
|
||||||
|
where F: FnOnce(&mut Mapper)
|
||||||
|
{
|
||||||
|
use x86_64::instructions::tlb;
|
||||||
|
|
||||||
|
{
|
||||||
|
let backup = Frame::containing_address(
|
||||||
|
control::Cr3::read().0.start_address().as_u64() as usize);
|
||||||
|
|
||||||
|
// map temporary_page to current p4 table
|
||||||
|
let p4_table = temporary_page.map_table_frame(backup.clone(), self);
|
||||||
|
|
||||||
|
// overwrite recursive mapping
|
||||||
|
self.p4_mut()[511].set(table.p4_frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
|
||||||
|
tlb::flush_all();
|
||||||
|
|
||||||
|
// execute f in the new context
|
||||||
|
f(self);
|
||||||
|
|
||||||
|
// restore recursive mapping to original p4 table
|
||||||
|
p4_table[511].set(backup, EntryFlags::PRESENT | EntryFlags::WRITABLE);
|
||||||
|
tlb::flush_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
temporary_page.unmap(self);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
|
||||||
|
|
||||||
|
let old_table = InactivePageTable {
|
||||||
|
p4_frame: Frame::containing_address(
|
||||||
|
control::Cr3::read().0.start_address().as_u64() as usize
|
||||||
|
),
|
||||||
|
};
|
||||||
|
unsafe {
|
||||||
|
let v = PhysFrame::containing_address(
|
||||||
|
PhysAddr::new(
|
||||||
|
new_table.p4_frame.start_address() as u64
|
||||||
|
)
|
||||||
|
);
|
||||||
|
control::Cr3::write(v, Cr3Flags::PAGE_LEVEL_CACHE_DISABLE);
|
||||||
|
}
|
||||||
|
old_table
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
pub struct InactivePageTable {
|
||||||
|
p4_frame: Frame,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InactivePageTable {
|
||||||
|
pub fn new(frame: Frame, active_table: &mut ActivePageTable,
|
||||||
|
temporary_page: &mut temporary::TemporaryPage) -> InactivePageTable {
|
||||||
|
{
|
||||||
|
let table = temporary_page.map_table_frame(frame.clone(),
|
||||||
|
active_table);
|
||||||
|
// now we are able to zero the table
|
||||||
|
table.zero();
|
||||||
|
// set up recursive mapping for the table
|
||||||
|
table[511].set(frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
|
||||||
|
}
|
||||||
|
temporary_page.unmap(active_table);
|
||||||
|
|
||||||
|
InactivePageTable { p4_frame: frame }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
impl Page {
|
||||||
|
pub fn containing_address(address: VirtualAddress) -> Page {
|
||||||
|
assert!(address < 0x0000_8000_0000_0000 ||
|
||||||
|
address >= 0xffff_8000_0000_0000,
|
||||||
|
"invalid address: 0x{:x}", address);
|
||||||
|
Page { number: address / PAGE_SIZE }
|
||||||
|
}
|
||||||
|
fn start_address(&self) -> usize {
|
||||||
|
self.number * PAGE_SIZE
|
||||||
|
}
|
||||||
|
fn p4_index(&self) -> usize {
|
||||||
|
(self.number >> 27) & 0o777
|
||||||
|
}
|
||||||
|
fn p3_index(&self) -> usize {
|
||||||
|
(self.number >> 18) & 0o777
|
||||||
|
}
|
||||||
|
fn p2_index(&self) -> usize {
|
||||||
|
(self.number >> 9) & 0o777
|
||||||
|
}
|
||||||
|
fn p1_index(&self) -> usize {
|
||||||
|
(self.number >> 0) & 0o777
|
||||||
|
}
|
||||||
|
pub fn range_inclusive(start: Page, end: Page) -> PageIter {
|
||||||
|
PageIter {
|
||||||
|
start,
|
||||||
|
end,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct PageIter {
|
||||||
|
start: Page,
|
||||||
|
end: Page,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Iterator for PageIter {
|
||||||
|
type Item = Page;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Page> {
|
||||||
|
if self.start <= self.end {
|
||||||
|
let page = self.start;
|
||||||
|
self.start.number += 1;
|
||||||
|
Some(page)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn remap_the_kernel<A>(allocator: &mut A, mem_info: &MemInfo) -> ActivePageTable
|
||||||
|
where A: FrameAllocator
|
||||||
|
{
|
||||||
|
let mut temporary_page = TemporaryPage::new(Page { number: 0xcafebabe },
|
||||||
|
allocator);
|
||||||
|
|
||||||
|
let mut active_table = unsafe { ActivePageTable::new() };
|
||||||
|
let mut new_table = {
|
||||||
|
let frame = allocator.allocate_frame().expect("no more frames");
|
||||||
|
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
|
||||||
|
};
|
||||||
|
|
||||||
|
log::info!("Remaping kernel");
|
||||||
|
|
||||||
|
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
|
||||||
|
|
||||||
|
for section in mem_info.boot_info.elf_sections().unwrap() {
|
||||||
|
|
||||||
|
if !section.is_allocated() {
|
||||||
|
// section is not loaded to memory
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
log::debug!("mapping section '{}' at addr: {:#x}, size: {:#x}", section.name().unwrap_or("NONE"), section.start_address(), section.size());
|
||||||
|
assert!(section.start_address() as usize % PAGE_SIZE == 0,
|
||||||
|
"sections need to be page aligned");
|
||||||
|
|
||||||
|
|
||||||
|
let flags = EntryFlags::from_elf_section_flags(§ion);
|
||||||
|
|
||||||
|
let start_frame = Frame::containing_address(section.start_address().try_into().unwrap());
|
||||||
|
let end_frame = Frame::containing_address(section.end_address() as usize - 1);
|
||||||
|
for frame in Frame::range_inclusive(start_frame, end_frame) {
|
||||||
|
mapper.identity_map(frame, flags, allocator);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let vga_buffer_frame = Frame::containing_address(0xb8000);
|
||||||
|
mapper.identity_map(vga_buffer_frame, EntryFlags::WRITABLE, allocator);
|
||||||
|
let multiboot_start = Frame::containing_address(mem_info.boot_info.start_address());
|
||||||
|
let multiboot_end = Frame::containing_address(mem_info.boot_info.end_address() - 1);
|
||||||
|
for frame in Frame::range_inclusive(multiboot_start, multiboot_end) {
|
||||||
|
mapper.identity_map(frame, EntryFlags::PRESENT, allocator);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let old_table = active_table.switch(new_table);
|
||||||
|
log::debug!("Switched to new table");
|
||||||
|
|
||||||
|
let old_p4_page = Page::containing_address(
|
||||||
|
old_table.p4_frame.start_address()
|
||||||
|
);
|
||||||
|
active_table.unmap(old_p4_page, allocator);
|
||||||
|
log::debug!("Put guard page at {:#x}", old_p4_page.start_address());
|
||||||
|
|
||||||
|
active_table
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
pub fn test_paging<A>(allocator: &mut A)
|
||||||
|
where A: FrameAllocator
|
||||||
|
{
|
||||||
|
let mut page_table = unsafe { ActivePageTable::new() };
|
||||||
|
|
||||||
|
let addr = 42 * 512 * 512 * 4096; // 42th P3 entry
|
||||||
|
log::debug!("Virt addr: 0x{:x}", addr);
|
||||||
|
{ // ActivePageTable::map_to
|
||||||
|
log::debug!("TEST: ActivePageTable::map_to");
|
||||||
|
let page = Page::containing_address(addr);
|
||||||
|
let frame = allocator.allocate_frame().expect("no more frames");
|
||||||
|
log::debug!("Phys addr = {:?}, map to frame: {:?}",
|
||||||
|
page_table.translate(addr),
|
||||||
|
frame);
|
||||||
|
page_table.map_to(page, frame, EntryFlags::empty(), allocator);
|
||||||
|
log::debug!("Phys addr = {:?}", page_table.translate(addr));
|
||||||
|
log::debug!("next free frame: {:?}", allocator.allocate_frame());
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // ActivePageTable::unmap
|
||||||
|
log::debug!("TEST: ActivePageTable::unmap");
|
||||||
|
log::debug!("Val: {:#x}", unsafe {
|
||||||
|
*(Page::containing_address(addr).start_address() as *const u64)
|
||||||
|
});
|
||||||
|
page_table.unmap(Page::containing_address(addr), allocator);
|
||||||
|
log::debug!("Phys addr: {:?} (unmapped addr)", page_table.translate(addr));
|
||||||
|
|
||||||
|
// Panic = good
|
||||||
|
// log::debug!("Val: {:#x}", unsafe {
|
||||||
|
// *(Page::containing_address(addr).start_address() as *const u64)
|
||||||
|
// });
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// test it
|
||||||
|
}
|
95
kernel/src/mem/paging/tables.rs
Normal file
95
kernel/src/mem/paging/tables.rs
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
use crate::mem::FrameAllocator;
|
||||||
|
|
||||||
|
use super::{Entry, ENTRY_COUNT, EntryFlags};
|
||||||
|
use core::{marker::PhantomData, ops::{Index, IndexMut}};
|
||||||
|
|
||||||
|
pub const P4: *mut Table<Level4> = 0xffffffff_fffff000 as *mut _;
|
||||||
|
|
||||||
|
pub struct Table<L: TableLevel> {
|
||||||
|
entries: [Entry; ENTRY_COUNT],
|
||||||
|
level: PhantomData<L>
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
impl<L> Table<L> where L: TableLevel {
|
||||||
|
pub fn zero(&mut self) {
|
||||||
|
for entry in self.entries.iter_mut() {
|
||||||
|
entry.set_unused();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
impl<L> Table<L> where L: HierarchicalLevel {
|
||||||
|
fn next_table_address(&self, index: usize) -> Option<usize> {
|
||||||
|
let entry_flags = self[index].flags();
|
||||||
|
if entry_flags.contains(EntryFlags::PRESENT) &&
|
||||||
|
!entry_flags.contains(EntryFlags::HUGE_PAGE) {
|
||||||
|
let table_address = self as *const _ as usize;
|
||||||
|
Some((table_address << 9) | (index << 12))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn next_table_create<A>(&mut self, index: usize, allocator: &mut A) -> &mut Table<L::NextLevel>
|
||||||
|
where A: FrameAllocator
|
||||||
|
{
|
||||||
|
if self.next_table(index).is_none() {
|
||||||
|
assert!(!self.entries[index].flags().contains(EntryFlags::HUGE_PAGE),
|
||||||
|
"mapping code does not support huge pages");
|
||||||
|
let frame = allocator.allocate_frame().expect("no frames available");
|
||||||
|
self.entries[index].set(frame, EntryFlags::PRESENT | EntryFlags::WRITABLE);
|
||||||
|
self.next_table_mut(index).unwrap().zero();
|
||||||
|
}
|
||||||
|
self.next_table_mut(index).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn next_table<'a>(&'a self, index: usize) -> Option<&'a Table<L::NextLevel>> {
|
||||||
|
self.next_table_address(index)
|
||||||
|
.map(|address| unsafe { &*(address as *const _) })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn next_table_mut<'a>(&'a mut self, index: usize) -> Option<&'a mut Table<L::NextLevel>> {
|
||||||
|
self.next_table_address(index)
|
||||||
|
.map(|address| unsafe { &mut *(address as *mut _) })
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<L> Index<usize> for Table<L> where L: TableLevel {
|
||||||
|
type Output = Entry;
|
||||||
|
|
||||||
|
fn index(&self, index: usize) -> &Entry {
|
||||||
|
&self.entries[index]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<L> IndexMut<usize> for Table<L> where L: TableLevel {
|
||||||
|
fn index_mut(&mut self, index: usize) -> &mut Entry {
|
||||||
|
&mut self.entries[index]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait TableLevel {}
|
||||||
|
pub trait HierarchicalLevel: TableLevel {
|
||||||
|
type NextLevel: TableLevel;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum Level4 {}
|
||||||
|
pub enum Level3 {}
|
||||||
|
pub enum Level2 {}
|
||||||
|
pub enum Level1 {}
|
||||||
|
|
||||||
|
impl TableLevel for Level4 {}
|
||||||
|
impl TableLevel for Level3 {}
|
||||||
|
impl TableLevel for Level2 {}
|
||||||
|
impl TableLevel for Level1 {}
|
||||||
|
impl HierarchicalLevel for Level4 {
|
||||||
|
type NextLevel = Level3;
|
||||||
|
}
|
||||||
|
impl HierarchicalLevel for Level3 {
|
||||||
|
type NextLevel = Level2;
|
||||||
|
}
|
||||||
|
impl HierarchicalLevel for Level2 {
|
||||||
|
type NextLevel = Level1;
|
||||||
|
}
|
81
kernel/src/mem/paging/temporary.rs
Normal file
81
kernel/src/mem/paging/temporary.rs
Normal file
|
@ -0,0 +1,81 @@
|
||||||
|
|
||||||
|
use crate::mem::paging::EntryFlags;
|
||||||
|
use crate::mem::FrameAllocator;
|
||||||
|
use super::tables::{Level1, Table};
|
||||||
|
use super::{ActivePageTable, VirtualAddress, Frame};
|
||||||
|
use super::Page;
|
||||||
|
|
||||||
|
pub struct TemporaryPage {
|
||||||
|
page: Page,
|
||||||
|
allocator: TinyAllocator
|
||||||
|
}
|
||||||
|
|
||||||
|
struct TinyAllocator([Option<Frame>; 3]);
|
||||||
|
|
||||||
|
|
||||||
|
impl TemporaryPage {
|
||||||
|
pub fn new<A>(page: Page, allocator: &mut A) -> TemporaryPage
|
||||||
|
where A: FrameAllocator
|
||||||
|
{
|
||||||
|
TemporaryPage {
|
||||||
|
page: page,
|
||||||
|
allocator: TinyAllocator::new(allocator),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// Maps the temporary page to the given frame in the active table.
|
||||||
|
/// Returns the start address of the temporary page.
|
||||||
|
pub fn map(&mut self, frame: Frame, active_table: &mut ActivePageTable)
|
||||||
|
-> VirtualAddress
|
||||||
|
{
|
||||||
|
|
||||||
|
assert!(active_table.translate_page(self.page).is_none(),
|
||||||
|
"temporary page is already mapped");
|
||||||
|
active_table.map_to(self.page, frame, EntryFlags::WRITABLE, &mut self.allocator);
|
||||||
|
self.page.start_address()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unmaps the temporary page in the active table.
|
||||||
|
pub fn unmap(&mut self, active_table: &mut ActivePageTable) {
|
||||||
|
active_table.unmap(self.page, &mut self.allocator)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Maps the temporary page to the given page table frame in the active
|
||||||
|
/// table. Returns a reference to the now mapped table.
|
||||||
|
pub fn map_table_frame(&mut self,
|
||||||
|
frame: Frame,
|
||||||
|
active_table: &mut ActivePageTable)
|
||||||
|
-> &mut Table<Level1> {
|
||||||
|
unsafe { &mut *(self.map(frame, active_table) as *mut Table<Level1>) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FrameAllocator for TinyAllocator {
|
||||||
|
fn allocate_frame(&mut self) -> Option<Frame> {
|
||||||
|
for frame_option in &mut self.0 {
|
||||||
|
if frame_option.is_some() {
|
||||||
|
return frame_option.take();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deallocate_frame(&mut self, frame: Frame) {
|
||||||
|
for frame_option in &mut self.0 {
|
||||||
|
if frame_option.is_none() {
|
||||||
|
*frame_option = Some(frame);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic!("Tiny allocator can hold only 3 frames.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TinyAllocator {
|
||||||
|
fn new<A>(allocator: &mut A) -> TinyAllocator
|
||||||
|
where A: FrameAllocator
|
||||||
|
{
|
||||||
|
let mut f = || allocator.allocate_frame();
|
||||||
|
let frames = [f(), f(), f()];
|
||||||
|
TinyAllocator(frames)
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,4 +1,5 @@
|
||||||
use x86_64::instructions;
|
use x86::msr::{rdmsr, wrmsr, IA32_EFER};
|
||||||
|
use x86_64::{instructions, registers::control::{Cr0, Cr0Flags}};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -7,6 +8,17 @@ pub fn hcf() -> ! {
|
||||||
instructions::hlt();
|
instructions::hlt();
|
||||||
core::hint::spin_loop();
|
core::hint::spin_loop();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn enable_nxe_bit() {
|
||||||
|
|
||||||
|
let nxe_bit = 1 << 11;
|
||||||
|
unsafe {
|
||||||
|
let efer = rdmsr(IA32_EFER);
|
||||||
|
wrmsr(IA32_EFER, efer | nxe_bit);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn enable_write_protect_bit() {
|
||||||
|
|
||||||
|
unsafe { Cr0::write(Cr0::read() | Cr0Flags::WRITE_PROTECT) };
|
||||||
}
|
}
|
||||||
|
|
51
linker.ld
51
linker.ld
|
@ -3,25 +3,58 @@ ENTRY(_start)
|
||||||
SECTIONS {
|
SECTIONS {
|
||||||
. = 1M;
|
. = 1M;
|
||||||
|
|
||||||
.boot :
|
.rodata :
|
||||||
{
|
{
|
||||||
/* ensure that the multiboot header is at the beginning */
|
/* ensure that the multiboot header is at the beginning */
|
||||||
KEEP(*(.multiboot_header))
|
KEEP(*(.multiboot_header))
|
||||||
|
*(.rodata .rodata.*)
|
||||||
|
. = ALIGN(4K);
|
||||||
|
}
|
||||||
|
|
||||||
|
.eh_frame :
|
||||||
|
{
|
||||||
|
|
||||||
|
*(.eh_frame)
|
||||||
|
. = ALIGN(4K);
|
||||||
}
|
}
|
||||||
|
|
||||||
.text :
|
.text :
|
||||||
{
|
{
|
||||||
*(.text .text.*)
|
*(.text .text.*)
|
||||||
|
. = ALIGN(4K);
|
||||||
}
|
}
|
||||||
|
|
||||||
.rodata :
|
.data :
|
||||||
{
|
{
|
||||||
*(.rodata .rodata.*)
|
*(.data .data.*)
|
||||||
|
. = ALIGN(4K);
|
||||||
}
|
}
|
||||||
|
|
||||||
.data.rel.ro :
|
.bss :
|
||||||
{
|
{
|
||||||
*(.data.rel.ro.local*) *(.data.rel.ro .data.rel.ro.*)
|
*(.bss .bss.*)
|
||||||
|
. = ALIGN(4K);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.got :
|
||||||
|
{
|
||||||
|
*(.got)
|
||||||
|
. = ALIGN(4K);
|
||||||
|
}
|
||||||
|
|
||||||
|
.got.plt :
|
||||||
|
{
|
||||||
|
*(.got.plt)
|
||||||
|
. = ALIGN(4K);
|
||||||
|
}
|
||||||
|
|
||||||
|
.data.rel.ro : ALIGN(4K) {
|
||||||
|
*(.data.rel.ro.local*) *(.data.rel.ro .data.rel.ro.*)
|
||||||
|
. = ALIGN(4K);
|
||||||
|
}
|
||||||
|
|
||||||
|
.gcc_except_table : ALIGN(4K) {
|
||||||
|
*(.gcc_except_table)
|
||||||
|
. = ALIGN(4K);
|
||||||
|
}`
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user