Add a usermode address space which can be switched to, make

FRAME_ALLOCATOR global, make XunilFrameAllocator not hold Limine entries
so it can be used without lifetimes, implement the process struct, add
user heap by giving back heap_start from ELF and adding sbrk syscall,
align ELF loading in userspace_stub, implement lots of libc functions in
libxunil, remove x86_64 dependency from libxunil, add malloc and all
required heap functions to libxunil and more syscall numbers, add a util
file to libxunil, add build scripts for libxunil and doomgeneric
This commit is contained in:
csd4ni3l
2026-04-05 20:12:59 +02:00
parent 1e899e2f97
commit ae3915147a
26 changed files with 785 additions and 219 deletions

View File

@@ -1,12 +1,16 @@
use crate::driver::timer::TIMER;
#[cfg(target_arch = "x86_64")]
use core::alloc::GlobalAlloc;
use core::arch::asm;
pub use crate::arch::x86_64::paging::FRAME_ALLOCATOR_X86_64 as FRAME_ALLOCATOR;
use crate::driver::timer::TIMER;
use core::{alloc::GlobalAlloc, arch::asm};
use limine::response::{HhdmResponse, MemoryMapResponse};
#[cfg(target_arch = "x86_64")]
use crate::arch::x86_64::{
elf::run_elf_x86_64, heap::ALLOCATOR, init::init_x86_64, paging::XunilFrameAllocator,
elf::run_elf_x86_64,
heap::ALLOCATOR,
init::init_x86_64,
paging::{FRAME_ALLOCATOR_X86_64, XunilFrameAllocator},
usermode::enter_usermode_x86_64,
};
#[cfg(target_arch = "x86_64")]
@@ -16,7 +20,7 @@ use x86_64::structures::paging::OffsetPageTable;
pub fn init<'a>(
hhdm_response: &HhdmResponse,
memory_map_response: &'a MemoryMapResponse,
) -> (OffsetPageTable<'static>, XunilFrameAllocator<'a>) {
) -> (OffsetPageTable<'static>) {
return init_x86_64(hhdm_response, memory_map_response);
}
@@ -26,8 +30,8 @@ pub fn enter_usermode(user_rip: u64, user_rsp: u64) {
}
#[cfg(target_arch = "x86_64")]
pub fn run_elf(entry_point: *const u8, frame_allocator: &mut XunilFrameAllocator) {
run_elf_x86_64(entry_point, frame_allocator);
pub fn run_elf(entry_point: *const u8, heap_base: u64) {
run_elf_x86_64(entry_point, heap_base);
}
pub fn get_allocator<'a>() -> &'static impl GlobalAlloc {

View File

@@ -7,24 +7,30 @@ use x86_64::{
};
use crate::{
arch::x86_64::{paging::XunilFrameAllocator, usermode::enter_usermode_x86_64},
arch::{
arch::FRAME_ALLOCATOR,
x86_64::{paging::XunilFrameAllocator, usermode::enter_usermode_x86_64},
},
task::{process::Process, scheduler::SCHEDULER},
};
pub fn run_elf_x86_64(entry_point: *const u8, frame_allocator: &mut XunilFrameAllocator) {
pub fn run_elf_x86_64(entry_point: *const u8, heap_base: u64) {
let stack_base: u64 = 0x0000_7fff_0000_0000;
let page_count = 3;
let page_size = 0x1000u64;
let stack_top = stack_base + (page_count as u64 * page_size);
let process_pid = SCHEDULER
.spawn_process(entry_point as u64, frame_allocator)
.spawn_process(entry_point as u64, stack_top, heap_base)
.unwrap();
SCHEDULER.with_process(process_pid, |process| {
process.address_space.use_address_space()
});
let stack_base: u64 = 0x0000_7fff_0000_0000;
let page_count = 3;
let page_size = 0x1000u64;
let mut frames: Vec<PhysFrame<Size4KiB>> = Vec::new();
let mut frame_allocator = FRAME_ALLOCATOR.lock();
for i in 0..page_count {
let frame = frame_allocator.allocate_frame().unwrap();
frames.push(frame);
@@ -43,16 +49,14 @@ pub fn run_elf_x86_64(entry_point: *const u8, frame_allocator: &mut XunilFrameAl
PageTableFlags::PRESENT
| PageTableFlags::WRITABLE
| PageTableFlags::USER_ACCESSIBLE,
frame_allocator,
&mut *frame_allocator,
)
.unwrap()
.flush();
});
}
}
drop(frame_allocator);
let stack_top = stack_base + (page_count as u64 * page_size);
let rsp = (stack_top & !0xF) - 8;
enter_usermode_x86_64(entry_point as u64, rsp);
SCHEDULER.run_process(process_pid, entry_point);
}

View File

@@ -1,5 +1,5 @@
use crate::arch::x86_64::paging::XunilFrameAllocator;
use crate::util::Locked;
use crate::arch::x86_64::paging::{FRAME_ALLOCATOR_X86_64, XunilFrameAllocator};
use crate::util::{Locked, serial_print};
use core::{
alloc::{GlobalAlloc, Layout},
ptr::null_mut,
@@ -138,6 +138,9 @@ unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
allocator.add_free_memory_region(alloc_end, excess_size);
}
}
drop(allocator);
alloc_start as *mut u8
} else {
null_mut()
@@ -153,10 +156,7 @@ unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
}
}
pub fn init_heap(
mapper: &mut OffsetPageTable,
frame_allocator: &mut XunilFrameAllocator,
) -> Result<(), MapToError<Size4KiB>> {
pub fn init_heap(mapper: &mut OffsetPageTable) -> Result<(), MapToError<Size4KiB>> {
let page_range = {
let page_start = VirtAddr::new(HEAP_START as u64);
let page_end = page_start + HEAP_SIZE as u64 - 1u64;
@@ -165,6 +165,8 @@ pub fn init_heap(
Page::range_inclusive(heap_start_page, heap_end_page)
};
let mut frame_allocator = FRAME_ALLOCATOR_X86_64.lock();
for page in page_range {
let frame = frame_allocator
.allocate_frame()
@@ -172,12 +174,14 @@ pub fn init_heap(
let flags = Flags::PRESENT | Flags::WRITABLE;
unsafe {
mapper
.map_to(page, frame, flags, frame_allocator)
.map_to(page, frame, flags, &mut *frame_allocator)
.map_err(|e| e)?
.flush();
}
}
drop(frame_allocator);
unsafe {
ALLOCATOR.lock().init(HEAP_START, HEAP_SIZE);
}

View File

@@ -5,6 +5,7 @@ use crate::{
mouse::setup_mouse,
},
driver::mouse::MOUSE,
util::serial_print,
};
use limine::response::{HhdmResponse, MemoryMapResponse};
use x86_64::instructions::interrupts::without_interrupts;
@@ -16,21 +17,22 @@ const PIT_DIVISOR: u16 = (1_193_182_u32 / TIMER_PRECISION_HZ) as u16;
#[cfg(target_arch = "x86_64")]
use crate::arch::x86_64::{
heap::init_heap,
paging::{XunilFrameAllocator, initialize_paging},
paging::{FRAME_ALLOCATOR_X86_64, XunilFrameAllocator, initialize_paging},
};
#[cfg(target_arch = "x86_64")]
use x86_64::{VirtAddr, structures::paging::OffsetPageTable};
#[cfg(target_arch = "x86_64")]
pub fn memory_management_init<'a>(
pub fn memory_management_init(
hhdm_response: &HhdmResponse,
memory_map_response: &'a MemoryMapResponse,
) -> (OffsetPageTable<'static>, XunilFrameAllocator<'a>) {
memory_map_response: &MemoryMapResponse,
) -> OffsetPageTable<'static> {
let physical_offset = VirtAddr::new(hhdm_response.offset());
let mapper = unsafe { initialize_paging(physical_offset) };
let frame_allocator =
XunilFrameAllocator::new(hhdm_response.offset(), memory_map_response.entries());
(mapper, frame_allocator)
let mut frame_allocator = FRAME_ALLOCATOR_X86_64.lock();
frame_allocator.initialize(hhdm_response.offset(), memory_map_response.entries());
drop(frame_allocator);
mapper
}
pub fn set_pit_interval() {
@@ -49,7 +51,7 @@ pub fn set_pit_interval() {
pub fn init_x86_64<'a>(
hhdm_response: &HhdmResponse,
memory_map_response: &'a MemoryMapResponse,
) -> (OffsetPageTable<'static>, XunilFrameAllocator<'a>) {
) -> OffsetPageTable<'static> {
load_gdt_x86_64();
init_idt_x86_64();
@@ -66,14 +68,13 @@ pub fn init_x86_64<'a>(
interrupts::enable();
let (mut mapper, mut frame_allocator) =
memory_management_init(hhdm_response, memory_map_response);
let mut mapper = memory_management_init(hhdm_response, memory_map_response);
init_heap(&mut mapper, &mut frame_allocator)
init_heap(&mut mapper)
.ok()
.expect("Failed to initalize heap");
MOUSE.set_status(mouse_status);
return (mapper, frame_allocator);
return mapper;
}

View File

@@ -1,3 +1,5 @@
use alloc::string::ToString;
use spin::mutex::Mutex;
use x86_64::{
PhysAddr, VirtAddr,
registers::control::Cr3,
@@ -6,6 +8,8 @@ use x86_64::{
use limine::memory_map::{Entry, EntryType};
use crate::util::{align_up, serial_print};
unsafe fn active_level_4_table(mem_offset: VirtAddr) -> &'static mut PageTable {
let (level_4_table, _) = Cr3::read();
@@ -23,38 +27,68 @@ pub unsafe fn initialize_paging(physical_memory_offset: VirtAddr) -> OffsetPageT
}
}
pub struct XunilFrameAllocator<'a> {
#[derive(Clone, Copy)]
struct UsableRegion {
base: u64,
length: u64,
}
const EMPTY_REGION: UsableRegion = UsableRegion { base: 0, length: 0 };
pub struct XunilFrameAllocator {
pub hhdm_offset: u64,
memory_map: &'a [&'a Entry],
usable_regions: [UsableRegion; 1024],
usable_region_count: usize,
region_index: usize,
region_offset: usize,
}
impl<'a> XunilFrameAllocator<'a> {
pub fn new(hhdm_offset: u64, memory_map: &'a [&'a Entry]) -> Self {
let region_index = memory_map
.iter()
.position(|region| region.entry_type == EntryType::USABLE)
.unwrap();
impl XunilFrameAllocator {
pub const fn new() -> Self {
Self {
hhdm_offset,
memory_map,
region_index,
hhdm_offset: 0,
usable_regions: [EMPTY_REGION; 1024],
usable_region_count: 0,
region_index: 0,
region_offset: 0,
}
}
pub fn initialize(&mut self, hhdm_offset: u64, memory_map: &[&Entry]) {
let mut regions = [EMPTY_REGION; 1024];
let mut count = 0usize;
for region in memory_map.iter().copied() {
if region.entry_type != EntryType::USABLE {
continue;
}
if count < regions.len() && region.length >= 4096 {
let aligned_base = align_up(region.base, 4096);
let base_offset = aligned_base - region.base;
let aligned_length = region.length.saturating_sub(base_offset);
if aligned_length >= 4096 {
regions[count] = UsableRegion {
base: aligned_base,
length: aligned_length,
};
count += 1;
}
}
}
self.hhdm_offset = hhdm_offset;
self.usable_regions = regions;
self.usable_region_count = count;
self.region_index = 0;
self.region_offset = 0;
}
}
unsafe impl<'a> FrameAllocator<Size4KiB> for XunilFrameAllocator<'a> {
unsafe impl FrameAllocator<Size4KiB> for XunilFrameAllocator {
fn allocate_frame(&mut self) -> Option<PhysFrame<Size4KiB>> {
loop {
let region = self
.memory_map
.iter()
.filter(|region| region.entry_type == EntryType::USABLE)
.nth(self.region_index)?;
while self.region_index < self.usable_region_count {
let region = self.usable_regions[self.region_index];
let frame_count = region.length / 4096;
if self.region_offset < frame_count as usize {
@@ -66,5 +100,10 @@ unsafe impl<'a> FrameAllocator<Size4KiB> for XunilFrameAllocator<'a> {
self.region_index += 1;
self.region_offset = 0;
}
None
}
}
pub static FRAME_ALLOCATOR_X86_64: Mutex<XunilFrameAllocator> =
Mutex::new(XunilFrameAllocator::new());

View File

@@ -1,45 +1,41 @@
use core::ptr::null;
use alloc::boxed::Box;
use x86_64::structures::paging::OffsetPageTable;
use crate::{
arch::x86_64::paging::XunilFrameAllocator,
driver::{
elf::{
header::{
ET_DYN, ET_EXEC, ET_REL, Elf64Ehdr, Elf64Rel, Elf64Shdr, SHF_ALLOC, SHT_NOBITS,
SHT_REL,
},
program::load_program,
reloc::elf_do_reloc,
section::elf_sheader,
validation::validate_elf,
use crate::driver::{
elf::{
header::{
ET_DYN, ET_EXEC, ET_REL, Elf64Ehdr, Elf64Rel, Elf64Shdr, SHF_ALLOC, SHT_NOBITS, SHT_REL,
},
syscall::{malloc, memset},
program::load_program,
reloc::elf_do_reloc,
section::elf_sheader,
validation::validate_elf,
},
syscall::{malloc, memset},
};
pub fn load_file(
frame_allocator: &mut XunilFrameAllocator,
mapper: &mut OffsetPageTable,
elf_bytes: &[u8],
) -> *const u8 {
pub fn load_file(mapper: &mut OffsetPageTable, elf_bytes: &[u8]) -> (*const u8, u64) {
// elf header size
if elf_bytes.len() < 64 {
return null();
return (null(), 0);
}
let elf_header: &Elf64Ehdr = unsafe { &*(elf_bytes.as_ptr() as *const Elf64Ehdr) };
let elf_header: Elf64Ehdr =
unsafe { core::ptr::read_unaligned(elf_bytes.as_ptr() as *const Elf64Ehdr) };
if !validate_elf(elf_header, elf_bytes.len()) {
return null();
if !validate_elf(&elf_header, elf_bytes.len()) {
return (null(), 0);
}
return match elf_header.e_type {
ET_EXEC => unsafe { load_program(frame_allocator, mapper, elf_header, elf_bytes, false) },
ET_DYN => unsafe { load_program(frame_allocator, mapper, elf_header, elf_bytes, true) }, // TODO
ET_REL => return null(),
_ => return null(),
let elf_header_ptr = elf_bytes.as_ptr() as *const Elf64Ehdr;
return match unsafe { elf_header.e_type } {
ET_EXEC => unsafe { load_program(mapper, elf_header_ptr, elf_bytes, false) },
ET_DYN => unsafe { load_program(mapper, elf_header_ptr, elf_bytes, true) },
ET_REL => return (null(), 0),
_ => return (null(), 0),
};
}
@@ -69,7 +65,7 @@ pub unsafe fn elf_load_stage1(hdr: *const Elf64Ehdr) {
// zero the memory
memset(mem, 0, section.sh_size as usize);
}
section.sh_offset = (mem.addr() + hdr.addr()) as u64;
section.sh_offset = mem.addr() as u64;
}
}

View File

@@ -12,7 +12,7 @@ use x86_64::{
};
use crate::{
arch::x86_64::paging::XunilFrameAllocator,
arch::arch::FRAME_ALLOCATOR,
driver::{
elf::{
header::{
@@ -40,12 +40,11 @@ pub fn get_vaddr(phdr: *const Elf64Phdr, load_bias: u64) -> *mut u8 {
}
pub unsafe fn load_program(
frame_allocator: &mut XunilFrameAllocator,
mapper: &mut OffsetPageTable,
hdr: *const Elf64Ehdr,
elf_bytes: &[u8],
pie: bool,
) -> *const u8 {
) -> (*const u8, u64) {
let phdr = unsafe { elf_pheader(hdr) };
let phnum = unsafe { (*hdr).e_phnum };
let mut program_headers: Vec<*const Elf64Phdr> = Vec::new();
@@ -64,9 +63,23 @@ pub unsafe fn load_program(
if !pie {
for program_header in program_headers {
load_segment_to_memory(frame_allocator, mapper, program_header, elf_bytes, 0);
load_segment_to_memory(mapper, program_header, elf_bytes, 0);
}
return unsafe { (*hdr).e_entry as *const u8 };
let mut highest_seg = 0;
for i in 0..phnum {
let program_header = unsafe { phdr.add(i as usize) };
let seg_end = unsafe { (*program_header).p_vaddr + (*program_header).p_memsz };
if seg_end > highest_seg {
highest_seg = seg_end;
}
}
return (
unsafe { (*hdr).e_entry as *const u8 },
align_up(highest_seg as u64, 4096),
);
} else {
let base_address = 0x0000_0100_0000; // TODO: add per-process memory
let min_vaddr = align_down(
@@ -80,18 +93,23 @@ pub unsafe fn load_program(
let load_bias = base_address - min_vaddr;
let mut highest_seg = 0;
for i in 0..phnum {
let program_header = unsafe { phdr.add(i as usize) };
let seg_end =
unsafe { (*program_header).p_vaddr + (*program_header).p_memsz + load_bias };
if seg_end > highest_seg {
highest_seg = seg_end;
}
}
for program_header in program_headers {
load_segment_to_memory(
frame_allocator,
mapper,
program_header,
elf_bytes,
load_bias,
);
load_segment_to_memory(mapper, program_header, elf_bytes, load_bias);
}
if pt_dynamic_header.is_null() {
return null();
return (null(), 0);
}
parse_dyn(
@@ -101,7 +119,10 @@ pub unsafe fn load_program(
load_bias,
);
return unsafe { ((*hdr).e_entry + load_bias) as *const u8 };
return (
unsafe { ((*hdr).e_entry + load_bias) as *const u8 },
align_up(highest_seg as u64, 4096),
);
}
}
@@ -261,7 +282,6 @@ fn parse_dyn(
}
pub fn load_segment_to_memory(
frame_allocator: &mut XunilFrameAllocator,
mapper: &mut OffsetPageTable,
phdr: *const Elf64Phdr,
elf_bytes: &[u8],
@@ -299,6 +319,8 @@ pub fn load_segment_to_memory(
let end_page: Page<Size4KiB> = Page::containing_address(VirtAddr::new(seg_end - 1));
let page_range = Page::range_inclusive(start_page, end_page);
let mut frame_allocator = FRAME_ALLOCATOR.lock();
for page in page_range {
let frame = frame_allocator
.allocate_frame()
@@ -306,13 +328,15 @@ pub fn load_segment_to_memory(
.expect("test");
unsafe {
mapper
.map_to(page, frame, flags, frame_allocator)
.map_to(page, frame, flags, &mut *frame_allocator)
.map_err(|e| e)
.expect("test")
.flush();
}
}
drop(frame_allocator);
unsafe {
core::ptr::copy_nonoverlapping(
elf_bytes.as_ptr().add(p_offset as usize),

View File

@@ -3,14 +3,6 @@ use crate::driver::elf::{
section::{elf_get_symval, elf_section},
};
pub trait ArchRelocate {
fn apply_relocation(&self, rela: &Elf64Rela, base: usize, sym_value: usize) -> Result<(), i8>;
fn setup_entry(&self, entry: usize, stack_top: usize, argv: &[&str]) -> !;
}
// TODO: make ET_REL work
pub unsafe fn elf_do_reloc(
hdr: *const Elf64Ehdr,
rel: *const Elf64Rel,

View File

@@ -3,14 +3,40 @@ use core::{
ptr::null_mut,
};
use crate::{
arch::arch::{get_allocator, infinite_idle},
driver::graphics::framebuffer::with_framebuffer,
println,
use x86_64::{
VirtAddr,
structures::paging::{FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB},
};
const SYS_EXIT: usize = 1;
const SYS_WRITE: usize = 60;
use crate::{
arch::arch::{FRAME_ALLOCATOR, get_allocator, infinite_idle},
driver::graphics::framebuffer::with_framebuffer,
println,
task::scheduler::SCHEDULER,
util::{align_up, serial_print},
};
const READ: usize = 0;
const WRITE: usize = 1;
const OPEN: usize = 2;
const CLOSE: usize = 3;
const STAT: usize = 4;
const LSEEK: usize = 8;
const MMAP: usize = 9;
const MUNMAP: usize = 9;
const BRK: usize = 12;
const GETPID: usize = 39;
const FORK: usize = 57;
const EXECVE: usize = 59;
const EXIT: usize = 60;
const WAIT4: usize = 61;
const KILL: usize = 62;
const CHDIR: usize = 80;
const MKDIR: usize = 83;
const UNLINK: usize = 87;
const GETDENTS64: usize = 217;
const CLOCK_GETTIME: usize = 228;
const EXIT_GROUP: usize = 231;
pub unsafe fn malloc(size: usize, align: usize) -> *mut u8 {
let align = if align < 1 {
@@ -47,14 +73,84 @@ pub unsafe fn memset(ptr: *mut u8, val: u8, count: usize) {
unsafe { core::ptr::write_bytes(ptr, val, count) };
}
pub unsafe fn sbrk(increment: isize) -> isize {
serial_print("sbrk called");
let mut scheduler = SCHEDULER.lock();
if scheduler.current_process == -1 {
return -1;
}
let pid = scheduler.current_process as u64;
drop(scheduler);
let mut frame_allocator = FRAME_ALLOCATOR.lock();
return SCHEDULER
.with_process(pid as u64, |mut process| {
let (heap_end, heap_base, stack_top) =
(process.heap_end, process.heap_base, process.stack_top);
let old = heap_end;
let new = if increment >= 0 {
old.checked_add(increment as u64)
} else {
let dec = increment.unsigned_abs() as u64;
old.checked_sub(dec)
}
.unwrap_or(old);
if new < heap_base {
return -1;
}
if new > stack_top - 3 * 4096 {
return -1;
}
if new > old {
let map_start = align_up(old, 4096);
let map_end = align_up(new, 4096);
for addr in (map_start..map_end).step_by(4096) {
let frame = frame_allocator.allocate_frame().unwrap();
// TODO: do not use x86_64 only
let virt_addr = VirtAddr::new(addr);
let page = Page::<Size4KiB>::containing_address(virt_addr);
unsafe {
process
.address_space
.mapper
.map_to(
page,
frame,
PageTableFlags::PRESENT
| PageTableFlags::WRITABLE
| PageTableFlags::USER_ACCESSIBLE
| PageTableFlags::NO_EXECUTE,
&mut *frame_allocator,
)
.unwrap()
.flush();
}
}
}
drop(frame_allocator);
process.heap_end = new;
serial_print("sbrk finished");
return old as isize;
})
.unwrap_or(-1);
}
#[unsafe(no_mangle)]
pub unsafe extern "C" fn syscall_dispatch(
num: usize,
arg0: usize,
arg1: usize,
arg2: usize,
arg0: isize,
arg1: isize,
arg2: isize,
) -> isize {
match num {
SYS_BRK => sbrk(arg0),
SYS_WRITE => {
let buf_ptr = arg1 as *const u8;
let len = arg2 as usize;

View File

@@ -24,6 +24,7 @@ use crate::driver::graphics::framebuffer::{init_framebuffer, with_framebuffer};
use crate::driver::serial::{ConsoleWriter, init_serial_console, with_serial_console};
use crate::driver::timer::TIMER;
use crate::userspace_stub::userspace_init;
use crate::util::serial_print;
/// Sets the base revision to the latest revision supported by the crate.
/// See specification for further info.
/// Be sure to mark all limine requests with #[used], otherwise they may be removed by the compiler.
@@ -98,11 +99,10 @@ unsafe extern "C" fn kmain() -> ! {
assert!(BASE_REVISION.is_supported());
let mut mapper;
let mut frame_allocator;
if let Some(hhdm_response) = HHDM_REQUEST.get_response() {
if let Some(memory_map_response) = MEMORY_MAP_REQUEST.get_response() {
(mapper, frame_allocator) = init(hhdm_response, memory_map_response);
mapper = init(hhdm_response, memory_map_response);
} else {
kernel_crash(); // Could not get required info from Limine's memory map.
}
@@ -124,7 +124,7 @@ unsafe extern "C" fn kmain() -> ! {
println!("Could not get date at boot. Will default to 0.")
}
userspace_init(&mut frame_allocator, &mut mapper)
userspace_init(&mut mapper)
}
#[panic_handler]

View File

@@ -4,17 +4,18 @@ use x86_64::{
structures::paging::{FrameAllocator, OffsetPageTable, PageTable, PhysFrame, Size4KiB},
};
use crate::{arch::x86_64::paging::XunilFrameAllocator, driver::syscall::memset};
use crate::{
arch::{arch::FRAME_ALLOCATOR, x86_64::paging::XunilFrameAllocator},
driver::syscall::memset,
};
pub struct AddressSpace {
cr3_frame: PhysFrame<Size4KiB>,
user_stack_top: VirtAddr,
pub mapper: OffsetPageTable<'static>,
heap_base: VirtAddr,
heap_end: VirtAddr,
}
impl AddressSpace {
pub fn new(frame_allocator: &mut XunilFrameAllocator) -> Option<AddressSpace> {
pub fn new() -> Option<AddressSpace> {
let mut frame_allocator = FRAME_ALLOCATOR.lock();
let new_pml4 = frame_allocator.allocate_frame()?;
unsafe {
@@ -47,12 +48,11 @@ impl AddressSpace {
)
};
drop(frame_allocator);
Some(AddressSpace {
cr3_frame: new_pml4,
user_stack_top: VirtAddr::new(0x0000_7fff_0000_0000),
mapper: mapper,
heap_base: VirtAddr::new(0x0),
heap_end: VirtAddr::new(0x0),
})
}

View File

@@ -11,6 +11,9 @@ pub struct Process {
pub pid: u64,
pub state: ProcessState,
// cpu_ctx: &[u8],
pub stack_top: u64,
pub heap_base: u64,
pub heap_end: u64,
pub address_space: AddressSpace,
pub user_entry: u64,
}
@@ -18,13 +21,18 @@ impl Process {
pub fn new(
pid: u64,
user_entry: u64,
frame_allocator: &mut XunilFrameAllocator,
stack_top: u64,
heap_base: u64,
heap_end: u64,
) -> Option<Process> {
let address_space = AddressSpace::new(frame_allocator)?;
let address_space = AddressSpace::new()?;
Some(Process {
pid,
stack_top,
state: ProcessState::Ready,
heap_base,
heap_end,
address_space,
user_entry,
})

View File

@@ -1,10 +1,15 @@
use alloc::collections::btree_map::BTreeMap;
use lazy_static::lazy_static;
use crate::{arch::x86_64::paging::XunilFrameAllocator, task::process::Process, util::Locked};
use crate::{
arch::{arch::enter_usermode, x86_64::paging::XunilFrameAllocator},
task::process::Process,
util::Locked,
};
pub struct Scheduler {
pub processes: BTreeMap<u64, Process>,
pub current_process: i64,
next_pid: u64,
}
@@ -12,26 +17,31 @@ impl Scheduler {
pub const fn new() -> Scheduler {
Scheduler {
processes: BTreeMap::new(),
current_process: -1,
next_pid: 1,
}
}
}
impl Locked<Scheduler> {
pub fn spawn_process(
&self,
entry_point: u64,
frame_allocator: &mut XunilFrameAllocator,
) -> Option<u64> {
pub fn spawn_process(&self, entry_point: u64, stack_top: u64, heap_base: u64) -> Option<u64> {
let mut guard = self.lock();
let pid = guard.next_pid;
guard.next_pid += 1;
let process = Process::new(pid, entry_point, frame_allocator)?;
let process = Process::new(pid, entry_point, stack_top, heap_base, heap_base)?;
guard.processes.insert(pid, process);
Some(pid)
}
pub fn run_process(&self, pid: u64, entry_point: *const u8) {
let mut guard = self.lock();
let stack_top = guard.processes[&pid].stack_top;
guard.current_process = pid as i64;
enter_usermode(entry_point as u64, (stack_top & !0xF) - 8);
}
pub fn with_process<F, R>(&self, index: u64, f: F) -> Option<R>
where
F: FnOnce(&mut Process) -> R,

View File

@@ -3,7 +3,7 @@ use x86_64::structures::paging::OffsetPageTable;
use crate::{
arch::{
arch::{run_elf, sleep},
arch::{FRAME_ALLOCATOR, run_elf, sleep},
x86_64::paging::XunilFrameAllocator,
},
driver::{
@@ -21,11 +21,16 @@ use crate::{
timer::TIMER,
},
print, println,
util::test_performance,
util::{serial_print, test_performance},
};
static CURSOR_BYTES: &[u8] = include_bytes!("../../assets/cursors/default.bmp");
static TEST_ELF_BYTES: &[u8] = include_bytes!("../../assets/helloworld.elf");
#[repr(C, align(8))]
struct AlignedElf([u8; include_bytes!("../../assets/doomgeneric").len()]);
static TEST_ELF: AlignedElf = AlignedElf(*include_bytes!("../../assets/doomgeneric"));
static TEST_ELF_BYTES: &[u8] = &TEST_ELF.0;
const BMP_HEADER_SIZE: usize = 138;
pub const CURSOR_W: usize = 24;
pub const CURSOR_H: usize = 24;
@@ -78,20 +83,17 @@ fn boot_animation() {
});
}
pub fn userspace_init(
frame_allocator: &mut XunilFrameAllocator,
mapper: &mut OffsetPageTable,
) -> ! {
pub fn userspace_init(mapper: &mut OffsetPageTable) -> ! {
// this is just a stub
boot_animation();
let entry_point = load_file(frame_allocator, mapper, TEST_ELF_BYTES);
let (entry_point, heap_base) = load_file(mapper, TEST_ELF_BYTES);
println!("Entry point: {:?}", entry_point);
with_framebuffer(|fb| fb.swap());
run_elf(entry_point, frame_allocator);
run_elf(entry_point, heap_base);
loop {}

View File

@@ -12,6 +12,7 @@ impl<A> Locked<A> {
}
}
#[allow(mismatched_lifetime_syntaxes)]
pub fn lock(&self) -> MutexGuard<A> {
self.inner.lock()
}
@@ -48,3 +49,9 @@ pub const fn align_up(addr: u64, align: u64) -> u64 {
}
}
}
pub fn serial_print(s: &str) {
for byte in s.bytes() {
unsafe { core::arch::asm!("out dx, al", in("dx") 0x3F8u16, in("al") byte) }
}
}