Add a scheduler which just keeps of processes for now, and a

with_process to interact with them. User space now has it's own address
space and mapper which means we will be able to allocate memory for it.
I added a bunch of functions as stubs into libxunil which are required
for doomgeneric.
This commit is contained in:
csd4ni3l
2026-04-03 11:28:31 +02:00
parent 720b68190d
commit 1e899e2f97
28 changed files with 535 additions and 57 deletions

View File

@@ -26,12 +26,8 @@ pub fn enter_usermode(user_rip: u64, user_rsp: u64) {
}
#[cfg(target_arch = "x86_64")]
pub fn run_elf(
entry_point: *const u8,
frame_allocator: &mut XunilFrameAllocator,
mapper: &mut OffsetPageTable,
) {
run_elf_x86_64(entry_point, frame_allocator, mapper);
pub fn run_elf(entry_point: *const u8, frame_allocator: &mut XunilFrameAllocator) {
run_elf_x86_64(entry_point, frame_allocator);
}
pub fn get_allocator<'a>() -> &'static impl GlobalAlloc {

View File

@@ -6,13 +6,20 @@ use x86_64::{
},
};
use crate::arch::x86_64::{paging::XunilFrameAllocator, usermode::enter_usermode_x86_64};
use crate::{
arch::x86_64::{paging::XunilFrameAllocator, usermode::enter_usermode_x86_64},
task::{process::Process, scheduler::SCHEDULER},
};
pub fn run_elf_x86_64(entry_point: *const u8, frame_allocator: &mut XunilFrameAllocator) {
let process_pid = SCHEDULER
.spawn_process(entry_point as u64, frame_allocator)
.unwrap();
SCHEDULER.with_process(process_pid, |process| {
process.address_space.use_address_space()
});
pub fn run_elf_x86_64(
entry_point: *const u8,
frame_allocator: &mut XunilFrameAllocator,
mapper: &mut OffsetPageTable,
) {
let stack_base: u64 = 0x0000_7fff_0000_0000;
let page_count = 3;
let page_size = 0x1000u64;
@@ -26,17 +33,21 @@ pub fn run_elf_x86_64(
let page = Page::<Size4KiB>::containing_address(virt_addr);
unsafe {
mapper
.map_to(
page,
frame,
PageTableFlags::PRESENT
| PageTableFlags::WRITABLE
| PageTableFlags::USER_ACCESSIBLE,
frame_allocator,
)
.unwrap()
.flush();
SCHEDULER.with_process(process_pid, |process| {
process
.address_space
.mapper
.map_to(
page,
frame,
PageTableFlags::PRESENT
| PageTableFlags::WRITABLE
| PageTableFlags::USER_ACCESSIBLE,
frame_allocator,
)
.unwrap()
.flush();
});
}
}

View File

@@ -1,5 +1,5 @@
use crate::arch::x86_64::paging::XunilFrameAllocator;
use crate::util::{LinkedNode, Locked};
use crate::util::Locked;
use core::{
alloc::{GlobalAlloc, Layout},
ptr::null_mut,
@@ -22,6 +22,25 @@ pub static ALLOCATOR: Locked<LinkedListAllocator> = Locked::new(LinkedListAlloca
pub const HEAP_START: usize = 0x_4444_4444_0000;
pub const HEAP_SIZE: usize = 256 * 1024 * 1024; // 256 MiB
pub struct LinkedNode {
pub size: usize,
pub next: Option<&'static mut LinkedNode>,
}
impl LinkedNode {
pub const fn new(size: usize) -> LinkedNode {
LinkedNode { size, next: None }
}
pub fn start_addr(&self) -> usize {
self as *const Self as usize
}
pub fn end_addr(&self) -> usize {
self.start_addr() + self.size
}
}
pub struct LinkedListAllocator {
head: LinkedNode,
}

View File

@@ -28,7 +28,8 @@ pub fn memory_management_init<'a>(
) -> (OffsetPageTable<'static>, XunilFrameAllocator<'a>) {
let physical_offset = VirtAddr::new(hhdm_response.offset());
let mapper = unsafe { initialize_paging(physical_offset) };
let frame_allocator = XunilFrameAllocator::new(memory_map_response.entries());
let frame_allocator =
XunilFrameAllocator::new(hhdm_response.offset(), memory_map_response.entries());
(mapper, frame_allocator)
}

View File

@@ -24,19 +24,21 @@ pub unsafe fn initialize_paging(physical_memory_offset: VirtAddr) -> OffsetPageT
}
pub struct XunilFrameAllocator<'a> {
pub hhdm_offset: u64,
memory_map: &'a [&'a Entry],
region_index: usize,
region_offset: usize,
}
impl<'a> XunilFrameAllocator<'a> {
pub fn new(memory_map: &'a [&'a Entry]) -> Self {
pub fn new(hhdm_offset: u64, memory_map: &'a [&'a Entry]) -> Self {
let region_index = memory_map
.iter()
.position(|region| region.entry_type == EntryType::USABLE)
.unwrap();
Self {
hhdm_offset,
memory_map,
region_index,
region_offset: 0,

View File

@@ -3,7 +3,11 @@ use core::{
ptr::null_mut,
};
use crate::{arch::arch::get_allocator, driver::graphics::framebuffer::with_framebuffer, println};
use crate::{
arch::arch::{get_allocator, infinite_idle},
driver::graphics::framebuffer::with_framebuffer,
println,
};
const SYS_EXIT: usize = 1;
const SYS_WRITE: usize = 60;
@@ -63,7 +67,7 @@ pub unsafe extern "C" fn syscall_dispatch(
SYS_EXIT => {
println!("Program exit.");
with_framebuffer(|fb| fb.swap());
0
infinite_idle();
}
_ => -38, // syscall not found
}

View File

@@ -13,6 +13,8 @@ use limine::request::{
};
pub mod arch;
pub mod driver;
pub mod mm;
pub mod task;
pub mod userspace_stub;
pub mod util;

View File

@@ -0,0 +1,66 @@
use x86_64::{
PhysAddr, VirtAddr,
registers::control::{Cr3, Cr3Flags},
structures::paging::{FrameAllocator, OffsetPageTable, PageTable, PhysFrame, Size4KiB},
};
use crate::{arch::x86_64::paging::XunilFrameAllocator, driver::syscall::memset};
pub struct AddressSpace {
cr3_frame: PhysFrame<Size4KiB>,
user_stack_top: VirtAddr,
pub mapper: OffsetPageTable<'static>,
heap_base: VirtAddr,
heap_end: VirtAddr,
}
impl AddressSpace {
pub fn new(frame_allocator: &mut XunilFrameAllocator) -> Option<AddressSpace> {
let new_pml4 = frame_allocator.allocate_frame()?;
unsafe {
let new_pml4_ptr =
(frame_allocator.hhdm_offset + new_pml4.start_address().as_u64()) as *mut u64;
core::ptr::write_bytes(new_pml4_ptr, 0, 512);
}
let (cur_pml4, pml4_flags) = Cr3::read();
unsafe {
let cur_pml4_ptr =
physical_to_virt_pointer(cur_pml4.start_address(), frame_allocator.hhdm_offset);
let new_pml4_ptr =
physical_to_virt_pointer(new_pml4.start_address(), frame_allocator.hhdm_offset);
for i in 0..512 {
let val = core::ptr::read(cur_pml4_ptr.add(i));
core::ptr::write(new_pml4_ptr.add(i), val);
}
}
let mut mapper = unsafe {
let addr = frame_allocator.hhdm_offset + new_pml4.start_address().as_u64();
let virtual_addr = VirtAddr::new(addr);
let level_4_table: *mut PageTable = virtual_addr.as_mut_ptr();
OffsetPageTable::new(
&mut *level_4_table,
VirtAddr::new(frame_allocator.hhdm_offset),
)
};
Some(AddressSpace {
cr3_frame: new_pml4,
user_stack_top: VirtAddr::new(0x0000_7fff_0000_0000),
mapper: mapper,
heap_base: VirtAddr::new(0x0),
heap_end: VirtAddr::new(0x0),
})
}
pub fn use_address_space(&mut self) {
unsafe { Cr3::write(self.cr3_frame, Cr3Flags::empty()) };
}
}
unsafe fn physical_to_virt_pointer(phys_addr: PhysAddr, hhdm_offset: u64) -> *mut u64 {
(hhdm_offset + phys_addr.as_u64()) as *mut u64
}

1
kernel/src/mm/mod.rs Normal file
View File

@@ -0,0 +1 @@
pub mod address_space;

View File

3
kernel/src/task/mod.rs Normal file
View File

@@ -0,0 +1,3 @@
pub mod context;
pub mod process;
pub mod scheduler;

View File

@@ -0,0 +1,32 @@
use crate::{arch::x86_64::paging::XunilFrameAllocator, mm::address_space::AddressSpace};
enum ProcessState {
Ready,
Running,
Blocked,
Zombie,
}
pub struct Process {
pub pid: u64,
pub state: ProcessState,
// cpu_ctx: &[u8],
pub address_space: AddressSpace,
pub user_entry: u64,
}
impl Process {
pub fn new(
pid: u64,
user_entry: u64,
frame_allocator: &mut XunilFrameAllocator,
) -> Option<Process> {
let address_space = AddressSpace::new(frame_allocator)?;
Some(Process {
pid,
state: ProcessState::Ready,
address_space,
user_entry,
})
}
}

View File

@@ -0,0 +1,45 @@
use alloc::collections::btree_map::BTreeMap;
use lazy_static::lazy_static;
use crate::{arch::x86_64::paging::XunilFrameAllocator, task::process::Process, util::Locked};
pub struct Scheduler {
pub processes: BTreeMap<u64, Process>,
next_pid: u64,
}
impl Scheduler {
pub const fn new() -> Scheduler {
Scheduler {
processes: BTreeMap::new(),
next_pid: 1,
}
}
}
impl Locked<Scheduler> {
pub fn spawn_process(
&self,
entry_point: u64,
frame_allocator: &mut XunilFrameAllocator,
) -> Option<u64> {
let mut guard = self.lock();
let pid = guard.next_pid;
guard.next_pid += 1;
let process = Process::new(pid, entry_point, frame_allocator)?;
guard.processes.insert(pid, process);
Some(pid)
}
pub fn with_process<F, R>(&self, index: u64, f: F) -> Option<R>
where
F: FnOnce(&mut Process) -> R,
{
let mut guard = self.lock();
let process = guard.processes.get_mut(&index)?;
Some(f(process))
}
}
pub static SCHEDULER: Locked<Scheduler> = Locked::new(Scheduler::new());

View File

@@ -91,7 +91,7 @@ pub fn userspace_init(
with_framebuffer(|fb| fb.swap());
run_elf(entry_point, frame_allocator, mapper);
run_elf(entry_point, frame_allocator);
loop {}

View File

@@ -1,25 +1,6 @@
use crate::{driver::timer::TIMER, println};
use spin::{Mutex, MutexGuard};
pub struct LinkedNode {
pub size: usize,
pub next: Option<&'static mut LinkedNode>,
}
impl LinkedNode {
pub const fn new(size: usize) -> LinkedNode {
LinkedNode { size, next: None }
}
pub fn start_addr(&self) -> usize {
self as *const Self as usize
}
pub fn end_addr(&self) -> usize {
self.start_addr() + self.size
}
}
pub struct Locked<A> {
inner: Mutex<A>,
}