Add a linked list allocator for heap that can now do 256 mib due to

frame allocator optimizations, make a basic init function in arch that
will initialize everything for a given arch. Add tests in kmain for
alloc, and add a Locked struct used for static mutables and the linked
list.
This commit is contained in:
csd4ni3l
2026-03-28 15:06:16 +01:00
parent 269d900d97
commit 4a3c1c9ced
8 changed files with 285 additions and 67 deletions

View File

@@ -1,4 +1,4 @@
[unstable]
json-target-spec = true
build-std-features = ["compiler-builtins-mem"]
build-std = ["core", "compiler_builtins"]
build-std = ["core", "compiler_builtins", "alloc"]

View File

@@ -2,42 +2,16 @@ use core::arch::asm;
use limine::response::{HhdmResponse, MemoryMapResponse};
#[cfg(target_arch = "x86_64")]
use crate::arch::x86_64::{
init::init_x86_64,
paging::{XunilFrameAllocator, example_mapping, initialize_paging},
};
use crate::arch::x86_64::{init::init_x86_64, paging::XunilFrameAllocator};
#[cfg(target_arch = "x86_64")]
use x86_64::{
VirtAddr, registers::control::Cr3, structures::paging::OffsetPageTable,
structures::paging::Page,
};
use x86_64::structures::paging::OffsetPageTable;
#[cfg(target_arch = "x86_64")]
pub fn memory_management_init(
pub fn init<'a>(
hhdm_response: &HhdmResponse,
memory_map_response: &MemoryMapResponse,
) -> OffsetPageTable<'static> {
let physical_offset = VirtAddr::new(hhdm_response.offset());
let (frame, _) = Cr3::read();
let mut mapper = unsafe { initialize_paging(physical_offset) };
let l4_virt = physical_offset + frame.start_address().as_u64() + 0xb8000;
let mut frame_allocator = XunilFrameAllocator::new(memory_map_response.entries());
let page = Page::containing_address(l4_virt);
unsafe {
example_mapping(page, &mut mapper, &mut frame_allocator);
}
mapper
}
#[cfg(target_arch = "x86_64")]
pub fn init(
hhdm_response: &HhdmResponse,
memory_map_response: &MemoryMapResponse,
) -> OffsetPageTable<'static> {
init_x86_64();
return memory_management_init(hhdm_response, memory_map_response);
memory_map_response: &'a MemoryMapResponse,
) -> (OffsetPageTable<'static>, XunilFrameAllocator<'a>) {
return init_x86_64(hhdm_response, memory_map_response);
}
pub fn idle() -> ! {

View File

@@ -0,0 +1,167 @@
use crate::arch::x86_64::paging::XunilFrameAllocator;
use crate::util::{LinkedNode, Locked};
use core::{
alloc::{GlobalAlloc, Layout},
ptr::null_mut,
};
use x86_64::{
VirtAddr,
structures::paging::{
FrameAllocator, Mapper, OffsetPageTable, Page, PageTableFlags as Flags, Size4KiB,
mapper::MapToError,
},
};
fn align_up(addr: usize, align: usize) -> usize {
(addr + align - 1) & !(align - 1)
}
#[global_allocator]
static ALLOCATOR: Locked<LinkedListAllocator> = Locked::new(LinkedListAllocator::new());
pub const HEAP_START: usize = 0x_4444_4444_0000;
pub const HEAP_SIZE: usize = 256 * 1024 * 1024; // 256 MiB
pub struct LinkedListAllocator {
head: LinkedNode,
}
impl LinkedListAllocator {
pub const fn new() -> LinkedListAllocator {
Self {
head: LinkedNode::new(0),
}
}
fn size_align(layout: Layout) -> (usize, usize) {
let layout = layout
.align_to(core::mem::align_of::<LinkedNode>())
.expect("Align to LinkedNode failed")
.pad_to_align();
let size = layout.size().max(core::mem::size_of::<LinkedNode>()); // either take layout's size or atleast the size of a single linked node.
(size, layout.align())
}
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
unsafe {
self.add_free_memory_region(heap_start, heap_size);
}
}
unsafe fn add_free_memory_region(&mut self, start: usize, size: usize) {
assert_eq!(align_up(start, core::mem::align_of::<LinkedNode>()), start); // Check if we are up at least 1 LinkedNode size
assert!(size >= core::mem::size_of::<LinkedNode>()); // check if we have enough space for a LinkedNode
let mut linked_node = LinkedNode::new(size);
linked_node.next = self.head.next.take();
let linked_node_ptr = start as *mut LinkedNode; // Treat the start memory region as a LinkedNode type
unsafe {
linked_node_ptr.write(linked_node); // write the data, very risky
self.head.next = Some(&mut *linked_node_ptr);
}
}
fn find_region(
&mut self,
size: usize,
align: usize,
) -> Option<(&'static mut LinkedNode, usize)> {
let mut current = &mut self.head;
while let Some(ref mut region) = current.next {
if let Ok(alloc_start) = Self::alloc_from_region(&region, size, align) {
let next = region.next.take();
let ret = Some((current.next.take().unwrap(), alloc_start));
current.next = next;
return ret;
} else {
current = current.next.as_mut().unwrap();
}
}
None
}
fn alloc_from_region(region: &LinkedNode, size: usize, align: usize) -> Result<usize, ()> {
let alloc_start = align_up(region.start_addr(), align);
let alloc_end = alloc_start.checked_add(size).ok_or(())?; // check for overflows
if alloc_end > region.end_addr() {
return Err(());
}
let excess_size = region.end_addr() - alloc_end;
if excess_size > 0 && excess_size < core::mem::size_of::<LinkedNode>() {
// if the remaining space is not enough for another LinkedNode, skip this region.
return Err(());
}
Ok(alloc_start)
}
}
unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
unsafe fn alloc(&self, _layout: Layout) -> *mut u8 {
let (size, align) = LinkedListAllocator::size_align(_layout);
let mut allocator = self.lock();
if let Some((region, alloc_start)) = allocator.find_region(size, align) {
let alloc_end = alloc_start.checked_add(size).expect("overflow");
let excess_size = region.end_addr() - alloc_end;
if excess_size > 0 {
unsafe {
allocator.add_free_memory_region(alloc_end, excess_size);
}
}
alloc_start as *mut u8
} else {
null_mut()
}
}
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
let (size, _) = LinkedListAllocator::size_align(_layout);
unsafe {
self.lock().add_free_memory_region(_ptr as usize, size);
}
}
}
pub fn init_heap(
mapper: &mut OffsetPageTable,
frame_allocator: &mut XunilFrameAllocator,
) -> Result<(), MapToError<Size4KiB>> {
let page_range = {
let page_start = VirtAddr::new(HEAP_START as u64);
let page_end = page_start + HEAP_SIZE as u64 - 1u64;
let heap_start_page: Page<Size4KiB> = Page::containing_address(page_start);
let heap_end_page: Page<Size4KiB> = Page::containing_address(page_end);
Page::range_inclusive(heap_start_page, heap_end_page)
};
for page in page_range {
let frame = frame_allocator
.allocate_frame()
.ok_or(MapToError::<Size4KiB>::FrameAllocationFailed)?;
let flags = Flags::PRESENT | Flags::WRITABLE;
unsafe {
mapper
.map_to(page, frame, flags, frame_allocator)
.map_err(|e| e)?
.flush();
}
}
unsafe {
ALLOCATOR.lock().init(HEAP_START, HEAP_SIZE);
}
Ok(())
}

View File

@@ -1,8 +1,31 @@
use crate::arch::x86_64::gdt::load_gdt_x86_64;
use crate::arch::x86_64::interrupts::{PICS, init_idt_x86_64};
use limine::response::{HhdmResponse, MemoryMapResponse};
use x86_64::instructions::interrupts;
pub fn init_x86_64() {
#[cfg(target_arch = "x86_64")]
use crate::arch::x86_64::{
heap::init_heap,
paging::{XunilFrameAllocator, initialize_paging},
};
#[cfg(target_arch = "x86_64")]
use x86_64::{VirtAddr, structures::paging::OffsetPageTable};
#[cfg(target_arch = "x86_64")]
pub fn memory_management_init<'a>(
hhdm_response: &HhdmResponse,
memory_map_response: &'a MemoryMapResponse,
) -> (OffsetPageTable<'static>, XunilFrameAllocator<'a>) {
let physical_offset = VirtAddr::new(hhdm_response.offset());
let mapper = unsafe { initialize_paging(physical_offset) };
let frame_allocator = XunilFrameAllocator::new(memory_map_response.entries());
(mapper, frame_allocator)
}
pub fn init_x86_64<'a>(
hhdm_response: &HhdmResponse,
memory_map_response: &'a MemoryMapResponse,
) -> (OffsetPageTable<'static>, XunilFrameAllocator<'a>) {
load_gdt_x86_64();
init_idt_x86_64();
@@ -13,4 +36,13 @@ pub fn init_x86_64() {
}
interrupts::enable();
let (mut mapper, mut frame_allocator) =
memory_management_init(hhdm_response, memory_map_response);
init_heap(&mut mapper, &mut frame_allocator)
.ok()
.expect("Failed to initalize heap");
return (mapper, frame_allocator);
}

View File

@@ -1,4 +1,5 @@
pub mod gdt;
pub mod heap;
pub mod init;
pub mod interrupts;
pub mod paging;

View File

@@ -1,10 +1,7 @@
use x86_64::{
PhysAddr, VirtAddr,
registers::control::Cr3,
structures::paging::{
FrameAllocator, Mapper, OffsetPageTable, Page, PageTable, PageTableFlags as Flags,
PhysFrame, Size2MiB, Size4KiB,
},
structures::paging::{FrameAllocator, OffsetPageTable, PageTable, PhysFrame, Size4KiB},
};
use limine::memory_map::{Entry, EntryType};
@@ -29,43 +26,45 @@ pub unsafe fn initialize_paging(physical_memory_offset: VirtAddr) -> OffsetPageT
pub struct XunilFrameAllocator<'a> {
next: usize,
memory_map: &'a [&'a Entry],
region_index: usize,
region_offset: usize,
}
impl<'a> XunilFrameAllocator<'a> {
pub fn new(memory_map: &'a [&'a Entry]) -> Self {
let region_index = memory_map
.iter()
.position(|region| region.entry_type == EntryType::USABLE)
.unwrap();
Self {
next: 0,
memory_map,
region_index,
region_offset: 0,
}
}
fn usable_frames(&self) -> impl Iterator<Item = PhysFrame> {
let regions = self.memory_map.iter();
let usable = regions.filter(|region| region.entry_type == EntryType::USABLE);
let ranges = usable
.map(|usable_region| usable_region.base..usable_region.base + usable_region.length);
let frame_addresses = ranges.flat_map(|r| r.step_by(4096));
frame_addresses
.map(|frame_address| PhysFrame::containing_address(PhysAddr::new(frame_address)))
}
}
unsafe impl<'a> FrameAllocator<Size4KiB> for XunilFrameAllocator<'a> {
fn allocate_frame(&mut self) -> Option<PhysFrame<Size4KiB>> {
let frame = self.usable_frames().nth(self.next);
self.next += 1;
frame
}
loop {
let region = self
.memory_map
.iter()
.filter(|region| region.entry_type == EntryType::USABLE)
.nth(self.region_index)?;
let frame_count = region.length / 4096;
if self.region_offset < frame_count as usize {
let addr = region.base + (self.region_offset as u64 * 4096);
self.region_offset += 1;
return Some(PhysFrame::containing_address(PhysAddr::new(addr)));
}
pub unsafe fn example_mapping(
page: Page<Size2MiB>,
mapper: &mut OffsetPageTable,
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) {
let frame = PhysFrame::<Size2MiB>::containing_address(PhysAddr::new(0x0000_1234_4000_0000));
let flags = Flags::PRESENT | Flags::WRITABLE;
let map_to_result = unsafe { mapper.map_to(page, frame, flags, frame_allocator) };
map_to_result.expect("map_to failed").flush();
self.region_index += 1;
self.region_offset = 0;
}
}
}

View File

@@ -2,6 +2,7 @@
#![no_main]
#![feature(abi_x86_interrupt)]
extern crate alloc;
use core::fmt::Write;
use limine::BaseRevision;
@@ -10,6 +11,7 @@ use limine::request::{
};
pub mod arch;
pub mod driver;
pub mod util;
use crate::arch::arch::{idle, init};
use crate::driver::graphics::base::rgb;
@@ -18,6 +20,7 @@ use crate::driver::graphics::primitives::{
circle_filled, circle_outline, rectangle_filled, rectangle_outline, triangle_outline,
};
use crate::driver::serial::{ConsoleWriter, init_serial_console, with_serial_console};
use alloc::{boxed::Box, vec::Vec};
/// Sets the base revision to the latest revision supported by the crate.
/// See specification for further info.
@@ -88,19 +91,18 @@ unsafe extern "C" fn kmain() -> ! {
if let Some(framebuffer_response) = FRAMEBUFFER_REQUEST.get_response() {
if let Some(limine_framebuffer) = framebuffer_response.framebuffers().next() {
init_framebuffer(&limine_framebuffer);
// boot_animation();
}
}
init_serial_console(0, 0);
if let Some(hhdm_response) = HHDM_REQUEST.get_response() {
if let Some(memory_map_response) = MEMORY_MAP_REQUEST.get_response() {
let mapper = init(hhdm_response, memory_map_response);
let (mapper, frame_allocator) = init(hhdm_response, memory_map_response);
} else {
init_serial_console(0, 0);
panic!("Could not get required info from Limine's memory map. ")
}
} else {
init_serial_console(0, 0);
panic!("Could not get required info from the Limine's higher-half direct mapping. ")
}
@@ -115,6 +117,13 @@ unsafe extern "C" fn kmain() -> ! {
triangle_outline(&mut fb, 100, 400, 200, 400, 150, 600, rgb(0, 0, 0));
});
let x = Box::new(41);
let mut test_vec: Vec<u16> = Vec::new();
test_vec.push(5);
println!("Before: {:?}", test_vec);
test_vec.push(9);
println!("After: {:?}", test_vec);
idle();
}

36
kernel/src/util.rs Normal file
View File

@@ -0,0 +1,36 @@
use spin::{Mutex, MutexGuard};
pub struct LinkedNode {
pub size: usize,
pub next: Option<&'static mut LinkedNode>,
}
impl LinkedNode {
pub const fn new(size: usize) -> LinkedNode {
LinkedNode { size, next: None }
}
pub fn start_addr(&self) -> usize {
self as *const Self as usize
}
pub fn end_addr(&self) -> usize {
self.start_addr() + self.size
}
}
pub struct Locked<A> {
inner: Mutex<A>,
}
impl<A> Locked<A> {
pub const fn new(inner: A) -> Self {
Locked {
inner: Mutex::new(inner),
}
}
pub fn lock(&self) -> MutexGuard<A> {
self.inner.lock()
}
}