//use core::{alloc::{AllocError, GlobalAlloc, Layout}, sync::atomic::{AtomicUsize, Ordering}}; //use x86_64::align_up; pub const HEAP_START: usize = 0x40000000; // 0x40000000 - 0x40019000 pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB // A simple allocator that allocates memory linearly and ignores freed memory. //#[derive(Debug)] //pub struct BumpAllocator { // heap_start: usize, // heap_end: usize, // next: AtomicUsize, //} //impl BumpAllocator { // pub const fn new(heap_start: usize, heap_end: usize) -> Self { // Self { heap_start, heap_end, next: AtomicUsize::new(heap_start) } // } //} //unsafe impl GlobalAlloc for BumpAllocator { // unsafe fn alloc(&self, layout: Layout) -> *mut u8 { // loop { // // load current state of the `next` field // let current_next = self.next.load(Ordering::Relaxed); // let alloc_start = align_up(current_next as u64, layout.align() as u64); // let alloc_end = alloc_start.saturating_add(layout.size() as u64); // // if alloc_end <= self.heap_end as u64 { // // update the `next` pointer if it still has the value `current_next` // let next_now = self.next.compare_exchange(current_next, alloc_end as usize, Ordering::Relaxed, Ordering::Relaxed).unwrap(); // if next_now == current_next { // // next address was successfully updated, allocation succeeded // return alloc_start as *mut u8; // } // } else { // panic!("OUT OF MEMORY"); // } // } // } // unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { // // do nothing, leak memory // } //}