Add a usermode address space which can be switched to, make

FRAME_ALLOCATOR global, make XunilFrameAllocator not hold Limine entries
so it can be used without lifetimes, implement the process struct, add
user heap by giving back heap_start from ELF and adding sbrk syscall,
align ELF loading in userspace_stub, implement lots of libc functions in
libxunil, remove x86_64 dependency from libxunil, add malloc and all
required heap functions to libxunil and more syscall numbers, add a util
file to libxunil, add build scripts for libxunil and doomgeneric
This commit is contained in:
csd4ni3l
2026-04-05 20:12:59 +02:00
parent 1e899e2f97
commit ae3915147a
26 changed files with 785 additions and 219 deletions

1
assets/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
doomgeneric*

6
build_doomgeneric.sh Normal file
View File

@@ -0,0 +1,6 @@
bash build_libxunil.sh
cd user/apps/doomgeneric/doomgeneric
rm -r ./build
make -f Makefile.xunil
cp doomgeneric ../../../../assets/doomgeneric
cd ../../../..

2
build_libxunil.sh Normal file
View File

@@ -0,0 +1,2 @@
cd user/libxunil
cargo build --release

View File

@@ -1,12 +1,16 @@
use crate::driver::timer::TIMER;
#[cfg(target_arch = "x86_64")]
use core::alloc::GlobalAlloc;
use core::arch::asm;
pub use crate::arch::x86_64::paging::FRAME_ALLOCATOR_X86_64 as FRAME_ALLOCATOR;
use crate::driver::timer::TIMER;
use core::{alloc::GlobalAlloc, arch::asm};
use limine::response::{HhdmResponse, MemoryMapResponse};
#[cfg(target_arch = "x86_64")]
use crate::arch::x86_64::{
elf::run_elf_x86_64, heap::ALLOCATOR, init::init_x86_64, paging::XunilFrameAllocator,
elf::run_elf_x86_64,
heap::ALLOCATOR,
init::init_x86_64,
paging::{FRAME_ALLOCATOR_X86_64, XunilFrameAllocator},
usermode::enter_usermode_x86_64,
};
#[cfg(target_arch = "x86_64")]
@@ -16,7 +20,7 @@ use x86_64::structures::paging::OffsetPageTable;
pub fn init<'a>(
hhdm_response: &HhdmResponse,
memory_map_response: &'a MemoryMapResponse,
) -> (OffsetPageTable<'static>, XunilFrameAllocator<'a>) {
) -> (OffsetPageTable<'static>) {
return init_x86_64(hhdm_response, memory_map_response);
}
@@ -26,8 +30,8 @@ pub fn enter_usermode(user_rip: u64, user_rsp: u64) {
}
#[cfg(target_arch = "x86_64")]
pub fn run_elf(entry_point: *const u8, frame_allocator: &mut XunilFrameAllocator) {
run_elf_x86_64(entry_point, frame_allocator);
pub fn run_elf(entry_point: *const u8, heap_base: u64) {
run_elf_x86_64(entry_point, heap_base);
}
pub fn get_allocator<'a>() -> &'static impl GlobalAlloc {

View File

@@ -7,24 +7,30 @@ use x86_64::{
};
use crate::{
arch::x86_64::{paging::XunilFrameAllocator, usermode::enter_usermode_x86_64},
arch::{
arch::FRAME_ALLOCATOR,
x86_64::{paging::XunilFrameAllocator, usermode::enter_usermode_x86_64},
},
task::{process::Process, scheduler::SCHEDULER},
};
pub fn run_elf_x86_64(entry_point: *const u8, frame_allocator: &mut XunilFrameAllocator) {
pub fn run_elf_x86_64(entry_point: *const u8, heap_base: u64) {
let stack_base: u64 = 0x0000_7fff_0000_0000;
let page_count = 3;
let page_size = 0x1000u64;
let stack_top = stack_base + (page_count as u64 * page_size);
let process_pid = SCHEDULER
.spawn_process(entry_point as u64, frame_allocator)
.spawn_process(entry_point as u64, stack_top, heap_base)
.unwrap();
SCHEDULER.with_process(process_pid, |process| {
process.address_space.use_address_space()
});
let stack_base: u64 = 0x0000_7fff_0000_0000;
let page_count = 3;
let page_size = 0x1000u64;
let mut frames: Vec<PhysFrame<Size4KiB>> = Vec::new();
let mut frame_allocator = FRAME_ALLOCATOR.lock();
for i in 0..page_count {
let frame = frame_allocator.allocate_frame().unwrap();
frames.push(frame);
@@ -43,16 +49,14 @@ pub fn run_elf_x86_64(entry_point: *const u8, frame_allocator: &mut XunilFrameAl
PageTableFlags::PRESENT
| PageTableFlags::WRITABLE
| PageTableFlags::USER_ACCESSIBLE,
frame_allocator,
&mut *frame_allocator,
)
.unwrap()
.flush();
});
}
}
drop(frame_allocator);
let stack_top = stack_base + (page_count as u64 * page_size);
let rsp = (stack_top & !0xF) - 8;
enter_usermode_x86_64(entry_point as u64, rsp);
SCHEDULER.run_process(process_pid, entry_point);
}

View File

@@ -1,5 +1,5 @@
use crate::arch::x86_64::paging::XunilFrameAllocator;
use crate::util::Locked;
use crate::arch::x86_64::paging::{FRAME_ALLOCATOR_X86_64, XunilFrameAllocator};
use crate::util::{Locked, serial_print};
use core::{
alloc::{GlobalAlloc, Layout},
ptr::null_mut,
@@ -138,6 +138,9 @@ unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
allocator.add_free_memory_region(alloc_end, excess_size);
}
}
drop(allocator);
alloc_start as *mut u8
} else {
null_mut()
@@ -153,10 +156,7 @@ unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
}
}
pub fn init_heap(
mapper: &mut OffsetPageTable,
frame_allocator: &mut XunilFrameAllocator,
) -> Result<(), MapToError<Size4KiB>> {
pub fn init_heap(mapper: &mut OffsetPageTable) -> Result<(), MapToError<Size4KiB>> {
let page_range = {
let page_start = VirtAddr::new(HEAP_START as u64);
let page_end = page_start + HEAP_SIZE as u64 - 1u64;
@@ -165,6 +165,8 @@ pub fn init_heap(
Page::range_inclusive(heap_start_page, heap_end_page)
};
let mut frame_allocator = FRAME_ALLOCATOR_X86_64.lock();
for page in page_range {
let frame = frame_allocator
.allocate_frame()
@@ -172,12 +174,14 @@ pub fn init_heap(
let flags = Flags::PRESENT | Flags::WRITABLE;
unsafe {
mapper
.map_to(page, frame, flags, frame_allocator)
.map_to(page, frame, flags, &mut *frame_allocator)
.map_err(|e| e)?
.flush();
}
}
drop(frame_allocator);
unsafe {
ALLOCATOR.lock().init(HEAP_START, HEAP_SIZE);
}

View File

@@ -5,6 +5,7 @@ use crate::{
mouse::setup_mouse,
},
driver::mouse::MOUSE,
util::serial_print,
};
use limine::response::{HhdmResponse, MemoryMapResponse};
use x86_64::instructions::interrupts::without_interrupts;
@@ -16,21 +17,22 @@ const PIT_DIVISOR: u16 = (1_193_182_u32 / TIMER_PRECISION_HZ) as u16;
#[cfg(target_arch = "x86_64")]
use crate::arch::x86_64::{
heap::init_heap,
paging::{XunilFrameAllocator, initialize_paging},
paging::{FRAME_ALLOCATOR_X86_64, XunilFrameAllocator, initialize_paging},
};
#[cfg(target_arch = "x86_64")]
use x86_64::{VirtAddr, structures::paging::OffsetPageTable};
#[cfg(target_arch = "x86_64")]
pub fn memory_management_init<'a>(
pub fn memory_management_init(
hhdm_response: &HhdmResponse,
memory_map_response: &'a MemoryMapResponse,
) -> (OffsetPageTable<'static>, XunilFrameAllocator<'a>) {
memory_map_response: &MemoryMapResponse,
) -> OffsetPageTable<'static> {
let physical_offset = VirtAddr::new(hhdm_response.offset());
let mapper = unsafe { initialize_paging(physical_offset) };
let frame_allocator =
XunilFrameAllocator::new(hhdm_response.offset(), memory_map_response.entries());
(mapper, frame_allocator)
let mut frame_allocator = FRAME_ALLOCATOR_X86_64.lock();
frame_allocator.initialize(hhdm_response.offset(), memory_map_response.entries());
drop(frame_allocator);
mapper
}
pub fn set_pit_interval() {
@@ -49,7 +51,7 @@ pub fn set_pit_interval() {
pub fn init_x86_64<'a>(
hhdm_response: &HhdmResponse,
memory_map_response: &'a MemoryMapResponse,
) -> (OffsetPageTable<'static>, XunilFrameAllocator<'a>) {
) -> OffsetPageTable<'static> {
load_gdt_x86_64();
init_idt_x86_64();
@@ -66,14 +68,13 @@ pub fn init_x86_64<'a>(
interrupts::enable();
let (mut mapper, mut frame_allocator) =
memory_management_init(hhdm_response, memory_map_response);
let mut mapper = memory_management_init(hhdm_response, memory_map_response);
init_heap(&mut mapper, &mut frame_allocator)
init_heap(&mut mapper)
.ok()
.expect("Failed to initalize heap");
MOUSE.set_status(mouse_status);
return (mapper, frame_allocator);
return mapper;
}

View File

@@ -1,3 +1,5 @@
use alloc::string::ToString;
use spin::mutex::Mutex;
use x86_64::{
PhysAddr, VirtAddr,
registers::control::Cr3,
@@ -6,6 +8,8 @@ use x86_64::{
use limine::memory_map::{Entry, EntryType};
use crate::util::{align_up, serial_print};
unsafe fn active_level_4_table(mem_offset: VirtAddr) -> &'static mut PageTable {
let (level_4_table, _) = Cr3::read();
@@ -23,38 +27,68 @@ pub unsafe fn initialize_paging(physical_memory_offset: VirtAddr) -> OffsetPageT
}
}
pub struct XunilFrameAllocator<'a> {
#[derive(Clone, Copy)]
struct UsableRegion {
base: u64,
length: u64,
}
const EMPTY_REGION: UsableRegion = UsableRegion { base: 0, length: 0 };
pub struct XunilFrameAllocator {
pub hhdm_offset: u64,
memory_map: &'a [&'a Entry],
usable_regions: [UsableRegion; 1024],
usable_region_count: usize,
region_index: usize,
region_offset: usize,
}
impl<'a> XunilFrameAllocator<'a> {
pub fn new(hhdm_offset: u64, memory_map: &'a [&'a Entry]) -> Self {
let region_index = memory_map
.iter()
.position(|region| region.entry_type == EntryType::USABLE)
.unwrap();
impl XunilFrameAllocator {
pub const fn new() -> Self {
Self {
hhdm_offset,
memory_map,
region_index,
hhdm_offset: 0,
usable_regions: [EMPTY_REGION; 1024],
usable_region_count: 0,
region_index: 0,
region_offset: 0,
}
}
pub fn initialize(&mut self, hhdm_offset: u64, memory_map: &[&Entry]) {
let mut regions = [EMPTY_REGION; 1024];
let mut count = 0usize;
for region in memory_map.iter().copied() {
if region.entry_type != EntryType::USABLE {
continue;
}
if count < regions.len() && region.length >= 4096 {
let aligned_base = align_up(region.base, 4096);
let base_offset = aligned_base - region.base;
let aligned_length = region.length.saturating_sub(base_offset);
if aligned_length >= 4096 {
regions[count] = UsableRegion {
base: aligned_base,
length: aligned_length,
};
count += 1;
}
}
}
self.hhdm_offset = hhdm_offset;
self.usable_regions = regions;
self.usable_region_count = count;
self.region_index = 0;
self.region_offset = 0;
}
}
unsafe impl<'a> FrameAllocator<Size4KiB> for XunilFrameAllocator<'a> {
unsafe impl FrameAllocator<Size4KiB> for XunilFrameAllocator {
fn allocate_frame(&mut self) -> Option<PhysFrame<Size4KiB>> {
loop {
let region = self
.memory_map
.iter()
.filter(|region| region.entry_type == EntryType::USABLE)
.nth(self.region_index)?;
while self.region_index < self.usable_region_count {
let region = self.usable_regions[self.region_index];
let frame_count = region.length / 4096;
if self.region_offset < frame_count as usize {
@@ -66,5 +100,10 @@ unsafe impl<'a> FrameAllocator<Size4KiB> for XunilFrameAllocator<'a> {
self.region_index += 1;
self.region_offset = 0;
}
None
}
}
pub static FRAME_ALLOCATOR_X86_64: Mutex<XunilFrameAllocator> =
Mutex::new(XunilFrameAllocator::new());

View File

@@ -1,45 +1,41 @@
use core::ptr::null;
use alloc::boxed::Box;
use x86_64::structures::paging::OffsetPageTable;
use crate::{
arch::x86_64::paging::XunilFrameAllocator,
driver::{
elf::{
header::{
ET_DYN, ET_EXEC, ET_REL, Elf64Ehdr, Elf64Rel, Elf64Shdr, SHF_ALLOC, SHT_NOBITS,
SHT_REL,
},
program::load_program,
reloc::elf_do_reloc,
section::elf_sheader,
validation::validate_elf,
use crate::driver::{
elf::{
header::{
ET_DYN, ET_EXEC, ET_REL, Elf64Ehdr, Elf64Rel, Elf64Shdr, SHF_ALLOC, SHT_NOBITS, SHT_REL,
},
syscall::{malloc, memset},
program::load_program,
reloc::elf_do_reloc,
section::elf_sheader,
validation::validate_elf,
},
syscall::{malloc, memset},
};
pub fn load_file(
frame_allocator: &mut XunilFrameAllocator,
mapper: &mut OffsetPageTable,
elf_bytes: &[u8],
) -> *const u8 {
pub fn load_file(mapper: &mut OffsetPageTable, elf_bytes: &[u8]) -> (*const u8, u64) {
// elf header size
if elf_bytes.len() < 64 {
return null();
return (null(), 0);
}
let elf_header: &Elf64Ehdr = unsafe { &*(elf_bytes.as_ptr() as *const Elf64Ehdr) };
let elf_header: Elf64Ehdr =
unsafe { core::ptr::read_unaligned(elf_bytes.as_ptr() as *const Elf64Ehdr) };
if !validate_elf(elf_header, elf_bytes.len()) {
return null();
if !validate_elf(&elf_header, elf_bytes.len()) {
return (null(), 0);
}
return match elf_header.e_type {
ET_EXEC => unsafe { load_program(frame_allocator, mapper, elf_header, elf_bytes, false) },
ET_DYN => unsafe { load_program(frame_allocator, mapper, elf_header, elf_bytes, true) }, // TODO
ET_REL => return null(),
_ => return null(),
let elf_header_ptr = elf_bytes.as_ptr() as *const Elf64Ehdr;
return match unsafe { elf_header.e_type } {
ET_EXEC => unsafe { load_program(mapper, elf_header_ptr, elf_bytes, false) },
ET_DYN => unsafe { load_program(mapper, elf_header_ptr, elf_bytes, true) },
ET_REL => return (null(), 0),
_ => return (null(), 0),
};
}
@@ -69,7 +65,7 @@ pub unsafe fn elf_load_stage1(hdr: *const Elf64Ehdr) {
// zero the memory
memset(mem, 0, section.sh_size as usize);
}
section.sh_offset = (mem.addr() + hdr.addr()) as u64;
section.sh_offset = mem.addr() as u64;
}
}

View File

@@ -12,7 +12,7 @@ use x86_64::{
};
use crate::{
arch::x86_64::paging::XunilFrameAllocator,
arch::arch::FRAME_ALLOCATOR,
driver::{
elf::{
header::{
@@ -40,12 +40,11 @@ pub fn get_vaddr(phdr: *const Elf64Phdr, load_bias: u64) -> *mut u8 {
}
pub unsafe fn load_program(
frame_allocator: &mut XunilFrameAllocator,
mapper: &mut OffsetPageTable,
hdr: *const Elf64Ehdr,
elf_bytes: &[u8],
pie: bool,
) -> *const u8 {
) -> (*const u8, u64) {
let phdr = unsafe { elf_pheader(hdr) };
let phnum = unsafe { (*hdr).e_phnum };
let mut program_headers: Vec<*const Elf64Phdr> = Vec::new();
@@ -64,9 +63,23 @@ pub unsafe fn load_program(
if !pie {
for program_header in program_headers {
load_segment_to_memory(frame_allocator, mapper, program_header, elf_bytes, 0);
load_segment_to_memory(mapper, program_header, elf_bytes, 0);
}
return unsafe { (*hdr).e_entry as *const u8 };
let mut highest_seg = 0;
for i in 0..phnum {
let program_header = unsafe { phdr.add(i as usize) };
let seg_end = unsafe { (*program_header).p_vaddr + (*program_header).p_memsz };
if seg_end > highest_seg {
highest_seg = seg_end;
}
}
return (
unsafe { (*hdr).e_entry as *const u8 },
align_up(highest_seg as u64, 4096),
);
} else {
let base_address = 0x0000_0100_0000; // TODO: add per-process memory
let min_vaddr = align_down(
@@ -80,18 +93,23 @@ pub unsafe fn load_program(
let load_bias = base_address - min_vaddr;
let mut highest_seg = 0;
for i in 0..phnum {
let program_header = unsafe { phdr.add(i as usize) };
let seg_end =
unsafe { (*program_header).p_vaddr + (*program_header).p_memsz + load_bias };
if seg_end > highest_seg {
highest_seg = seg_end;
}
}
for program_header in program_headers {
load_segment_to_memory(
frame_allocator,
mapper,
program_header,
elf_bytes,
load_bias,
);
load_segment_to_memory(mapper, program_header, elf_bytes, load_bias);
}
if pt_dynamic_header.is_null() {
return null();
return (null(), 0);
}
parse_dyn(
@@ -101,7 +119,10 @@ pub unsafe fn load_program(
load_bias,
);
return unsafe { ((*hdr).e_entry + load_bias) as *const u8 };
return (
unsafe { ((*hdr).e_entry + load_bias) as *const u8 },
align_up(highest_seg as u64, 4096),
);
}
}
@@ -261,7 +282,6 @@ fn parse_dyn(
}
pub fn load_segment_to_memory(
frame_allocator: &mut XunilFrameAllocator,
mapper: &mut OffsetPageTable,
phdr: *const Elf64Phdr,
elf_bytes: &[u8],
@@ -299,6 +319,8 @@ pub fn load_segment_to_memory(
let end_page: Page<Size4KiB> = Page::containing_address(VirtAddr::new(seg_end - 1));
let page_range = Page::range_inclusive(start_page, end_page);
let mut frame_allocator = FRAME_ALLOCATOR.lock();
for page in page_range {
let frame = frame_allocator
.allocate_frame()
@@ -306,13 +328,15 @@ pub fn load_segment_to_memory(
.expect("test");
unsafe {
mapper
.map_to(page, frame, flags, frame_allocator)
.map_to(page, frame, flags, &mut *frame_allocator)
.map_err(|e| e)
.expect("test")
.flush();
}
}
drop(frame_allocator);
unsafe {
core::ptr::copy_nonoverlapping(
elf_bytes.as_ptr().add(p_offset as usize),

View File

@@ -3,14 +3,6 @@ use crate::driver::elf::{
section::{elf_get_symval, elf_section},
};
pub trait ArchRelocate {
fn apply_relocation(&self, rela: &Elf64Rela, base: usize, sym_value: usize) -> Result<(), i8>;
fn setup_entry(&self, entry: usize, stack_top: usize, argv: &[&str]) -> !;
}
// TODO: make ET_REL work
pub unsafe fn elf_do_reloc(
hdr: *const Elf64Ehdr,
rel: *const Elf64Rel,

View File

@@ -3,14 +3,40 @@ use core::{
ptr::null_mut,
};
use crate::{
arch::arch::{get_allocator, infinite_idle},
driver::graphics::framebuffer::with_framebuffer,
println,
use x86_64::{
VirtAddr,
structures::paging::{FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB},
};
const SYS_EXIT: usize = 1;
const SYS_WRITE: usize = 60;
use crate::{
arch::arch::{FRAME_ALLOCATOR, get_allocator, infinite_idle},
driver::graphics::framebuffer::with_framebuffer,
println,
task::scheduler::SCHEDULER,
util::{align_up, serial_print},
};
const READ: usize = 0;
const WRITE: usize = 1;
const OPEN: usize = 2;
const CLOSE: usize = 3;
const STAT: usize = 4;
const LSEEK: usize = 8;
const MMAP: usize = 9;
const MUNMAP: usize = 9;
const BRK: usize = 12;
const GETPID: usize = 39;
const FORK: usize = 57;
const EXECVE: usize = 59;
const EXIT: usize = 60;
const WAIT4: usize = 61;
const KILL: usize = 62;
const CHDIR: usize = 80;
const MKDIR: usize = 83;
const UNLINK: usize = 87;
const GETDENTS64: usize = 217;
const CLOCK_GETTIME: usize = 228;
const EXIT_GROUP: usize = 231;
pub unsafe fn malloc(size: usize, align: usize) -> *mut u8 {
let align = if align < 1 {
@@ -47,14 +73,84 @@ pub unsafe fn memset(ptr: *mut u8, val: u8, count: usize) {
unsafe { core::ptr::write_bytes(ptr, val, count) };
}
pub unsafe fn sbrk(increment: isize) -> isize {
serial_print("sbrk called");
let mut scheduler = SCHEDULER.lock();
if scheduler.current_process == -1 {
return -1;
}
let pid = scheduler.current_process as u64;
drop(scheduler);
let mut frame_allocator = FRAME_ALLOCATOR.lock();
return SCHEDULER
.with_process(pid as u64, |mut process| {
let (heap_end, heap_base, stack_top) =
(process.heap_end, process.heap_base, process.stack_top);
let old = heap_end;
let new = if increment >= 0 {
old.checked_add(increment as u64)
} else {
let dec = increment.unsigned_abs() as u64;
old.checked_sub(dec)
}
.unwrap_or(old);
if new < heap_base {
return -1;
}
if new > stack_top - 3 * 4096 {
return -1;
}
if new > old {
let map_start = align_up(old, 4096);
let map_end = align_up(new, 4096);
for addr in (map_start..map_end).step_by(4096) {
let frame = frame_allocator.allocate_frame().unwrap();
// TODO: do not use x86_64 only
let virt_addr = VirtAddr::new(addr);
let page = Page::<Size4KiB>::containing_address(virt_addr);
unsafe {
process
.address_space
.mapper
.map_to(
page,
frame,
PageTableFlags::PRESENT
| PageTableFlags::WRITABLE
| PageTableFlags::USER_ACCESSIBLE
| PageTableFlags::NO_EXECUTE,
&mut *frame_allocator,
)
.unwrap()
.flush();
}
}
}
drop(frame_allocator);
process.heap_end = new;
serial_print("sbrk finished");
return old as isize;
})
.unwrap_or(-1);
}
#[unsafe(no_mangle)]
pub unsafe extern "C" fn syscall_dispatch(
num: usize,
arg0: usize,
arg1: usize,
arg2: usize,
arg0: isize,
arg1: isize,
arg2: isize,
) -> isize {
match num {
SYS_BRK => sbrk(arg0),
SYS_WRITE => {
let buf_ptr = arg1 as *const u8;
let len = arg2 as usize;

View File

@@ -24,6 +24,7 @@ use crate::driver::graphics::framebuffer::{init_framebuffer, with_framebuffer};
use crate::driver::serial::{ConsoleWriter, init_serial_console, with_serial_console};
use crate::driver::timer::TIMER;
use crate::userspace_stub::userspace_init;
use crate::util::serial_print;
/// Sets the base revision to the latest revision supported by the crate.
/// See specification for further info.
/// Be sure to mark all limine requests with #[used], otherwise they may be removed by the compiler.
@@ -98,11 +99,10 @@ unsafe extern "C" fn kmain() -> ! {
assert!(BASE_REVISION.is_supported());
let mut mapper;
let mut frame_allocator;
if let Some(hhdm_response) = HHDM_REQUEST.get_response() {
if let Some(memory_map_response) = MEMORY_MAP_REQUEST.get_response() {
(mapper, frame_allocator) = init(hhdm_response, memory_map_response);
mapper = init(hhdm_response, memory_map_response);
} else {
kernel_crash(); // Could not get required info from Limine's memory map.
}
@@ -124,7 +124,7 @@ unsafe extern "C" fn kmain() -> ! {
println!("Could not get date at boot. Will default to 0.")
}
userspace_init(&mut frame_allocator, &mut mapper)
userspace_init(&mut mapper)
}
#[panic_handler]

View File

@@ -4,17 +4,18 @@ use x86_64::{
structures::paging::{FrameAllocator, OffsetPageTable, PageTable, PhysFrame, Size4KiB},
};
use crate::{arch::x86_64::paging::XunilFrameAllocator, driver::syscall::memset};
use crate::{
arch::{arch::FRAME_ALLOCATOR, x86_64::paging::XunilFrameAllocator},
driver::syscall::memset,
};
pub struct AddressSpace {
cr3_frame: PhysFrame<Size4KiB>,
user_stack_top: VirtAddr,
pub mapper: OffsetPageTable<'static>,
heap_base: VirtAddr,
heap_end: VirtAddr,
}
impl AddressSpace {
pub fn new(frame_allocator: &mut XunilFrameAllocator) -> Option<AddressSpace> {
pub fn new() -> Option<AddressSpace> {
let mut frame_allocator = FRAME_ALLOCATOR.lock();
let new_pml4 = frame_allocator.allocate_frame()?;
unsafe {
@@ -47,12 +48,11 @@ impl AddressSpace {
)
};
drop(frame_allocator);
Some(AddressSpace {
cr3_frame: new_pml4,
user_stack_top: VirtAddr::new(0x0000_7fff_0000_0000),
mapper: mapper,
heap_base: VirtAddr::new(0x0),
heap_end: VirtAddr::new(0x0),
})
}

View File

@@ -11,6 +11,9 @@ pub struct Process {
pub pid: u64,
pub state: ProcessState,
// cpu_ctx: &[u8],
pub stack_top: u64,
pub heap_base: u64,
pub heap_end: u64,
pub address_space: AddressSpace,
pub user_entry: u64,
}
@@ -18,13 +21,18 @@ impl Process {
pub fn new(
pid: u64,
user_entry: u64,
frame_allocator: &mut XunilFrameAllocator,
stack_top: u64,
heap_base: u64,
heap_end: u64,
) -> Option<Process> {
let address_space = AddressSpace::new(frame_allocator)?;
let address_space = AddressSpace::new()?;
Some(Process {
pid,
stack_top,
state: ProcessState::Ready,
heap_base,
heap_end,
address_space,
user_entry,
})

View File

@@ -1,10 +1,15 @@
use alloc::collections::btree_map::BTreeMap;
use lazy_static::lazy_static;
use crate::{arch::x86_64::paging::XunilFrameAllocator, task::process::Process, util::Locked};
use crate::{
arch::{arch::enter_usermode, x86_64::paging::XunilFrameAllocator},
task::process::Process,
util::Locked,
};
pub struct Scheduler {
pub processes: BTreeMap<u64, Process>,
pub current_process: i64,
next_pid: u64,
}
@@ -12,26 +17,31 @@ impl Scheduler {
pub const fn new() -> Scheduler {
Scheduler {
processes: BTreeMap::new(),
current_process: -1,
next_pid: 1,
}
}
}
impl Locked<Scheduler> {
pub fn spawn_process(
&self,
entry_point: u64,
frame_allocator: &mut XunilFrameAllocator,
) -> Option<u64> {
pub fn spawn_process(&self, entry_point: u64, stack_top: u64, heap_base: u64) -> Option<u64> {
let mut guard = self.lock();
let pid = guard.next_pid;
guard.next_pid += 1;
let process = Process::new(pid, entry_point, frame_allocator)?;
let process = Process::new(pid, entry_point, stack_top, heap_base, heap_base)?;
guard.processes.insert(pid, process);
Some(pid)
}
pub fn run_process(&self, pid: u64, entry_point: *const u8) {
let mut guard = self.lock();
let stack_top = guard.processes[&pid].stack_top;
guard.current_process = pid as i64;
enter_usermode(entry_point as u64, (stack_top & !0xF) - 8);
}
pub fn with_process<F, R>(&self, index: u64, f: F) -> Option<R>
where
F: FnOnce(&mut Process) -> R,

View File

@@ -3,7 +3,7 @@ use x86_64::structures::paging::OffsetPageTable;
use crate::{
arch::{
arch::{run_elf, sleep},
arch::{FRAME_ALLOCATOR, run_elf, sleep},
x86_64::paging::XunilFrameAllocator,
},
driver::{
@@ -21,11 +21,16 @@ use crate::{
timer::TIMER,
},
print, println,
util::test_performance,
util::{serial_print, test_performance},
};
static CURSOR_BYTES: &[u8] = include_bytes!("../../assets/cursors/default.bmp");
static TEST_ELF_BYTES: &[u8] = include_bytes!("../../assets/helloworld.elf");
#[repr(C, align(8))]
struct AlignedElf([u8; include_bytes!("../../assets/doomgeneric").len()]);
static TEST_ELF: AlignedElf = AlignedElf(*include_bytes!("../../assets/doomgeneric"));
static TEST_ELF_BYTES: &[u8] = &TEST_ELF.0;
const BMP_HEADER_SIZE: usize = 138;
pub const CURSOR_W: usize = 24;
pub const CURSOR_H: usize = 24;
@@ -78,20 +83,17 @@ fn boot_animation() {
});
}
pub fn userspace_init(
frame_allocator: &mut XunilFrameAllocator,
mapper: &mut OffsetPageTable,
) -> ! {
pub fn userspace_init(mapper: &mut OffsetPageTable) -> ! {
// this is just a stub
boot_animation();
let entry_point = load_file(frame_allocator, mapper, TEST_ELF_BYTES);
let (entry_point, heap_base) = load_file(mapper, TEST_ELF_BYTES);
println!("Entry point: {:?}", entry_point);
with_framebuffer(|fb| fb.swap());
run_elf(entry_point, frame_allocator);
run_elf(entry_point, heap_base);
loop {}

View File

@@ -12,6 +12,7 @@ impl<A> Locked<A> {
}
}
#[allow(mismatched_lifetime_syntaxes)]
pub fn lock(&self) -> MutexGuard<A> {
self.inner.lock()
}
@@ -48,3 +49,9 @@ pub const fn align_up(addr: u64, align: u64) -> u64 {
}
}
}
pub fn serial_print(s: &str) {
for byte in s.bytes() {
unsafe { core::arch::asm!("out dx, al", in("dx") 0x3F8u16, in("al") byte) }
}
}

View File

@@ -2,52 +2,33 @@
# It is not intended for manual editing.
version = 4
[[package]]
name = "bit_field"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e4b40c7323adcfc0a41c4b88143ed58346ff65a288fc144329c5c45e05d70c6"
[[package]]
name = "bitflags"
version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af"
[[package]]
name = "const_fn"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "413d67b29ef1021b4d60f4aa1e925ca031751e213832b4b1d588fae623c05c60"
[[package]]
name = "libxunil"
version = "0.1.0"
dependencies = [
"x86_64",
"spin",
]
[[package]]
name = "rustversion"
version = "1.0.22"
name = "lock_api"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
[[package]]
name = "volatile"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "442887c63f2c839b346c192d047a7c87e73d0689c9157b00b53dcc27dd5ea793"
[[package]]
name = "x86_64"
version = "0.15.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7841fa0098ceb15c567d93d3fae292c49e10a7662b4936d5f6a9728594555ba"
checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965"
dependencies = [
"bit_field",
"bitflags",
"const_fn",
"rustversion",
"volatile",
"scopeguard",
]
[[package]]
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "spin"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591"
dependencies = [
"lock_api",
]

View File

@@ -11,8 +11,8 @@ crate-type = ["staticlib"]
panic = "abort"
opt-level = "s"
[target.'cfg(target_arch = "x86_64")'.dependencies]
x86_64 = "0.15.4"
[dependencies]
spin = "0.10.0"
[profile.dev]
panic = "abort"

View File

@@ -1,8 +1,6 @@
use core::ptr::null_mut;
#[unsafe(no_mangle)]
extern "C" fn fopen(path: *const u8, mode: *const u8) -> *mut u8 {
null_mut()
0x10 as *mut u8
}
#[unsafe(no_mangle)]

94
user/libxunil/src/heap.rs Normal file
View File

@@ -0,0 +1,94 @@
use spin::mutex::Mutex;
use crate::util::align_up;
pub const HEAP_START: usize = 0x_4444_4444_0000;
pub const HEAP_SIZE: usize = 256 * 1024 * 1024; // 256 MiB
pub struct LinkedNode {
pub size: usize,
pub next: Option<&'static mut LinkedNode>,
}
impl LinkedNode {
pub const fn new(size: usize) -> LinkedNode {
LinkedNode { size, next: None }
}
pub fn start_addr(&self) -> usize {
self as *const Self as usize
}
pub fn end_addr(&self) -> usize {
self.start_addr() + self.size
}
}
pub struct LinkedListAllocator {
head: LinkedNode,
}
impl LinkedListAllocator {
pub const fn new() -> LinkedListAllocator {
Self {
head: LinkedNode::new(0),
}
}
pub unsafe fn add_free_memory_region(&mut self, start: usize, size: usize) {
assert_eq!(align_up(start, core::mem::align_of::<LinkedNode>()), start); // Check if we are up at least 1 LinkedNode size
assert!(size >= core::mem::size_of::<LinkedNode>()); // check if we have enough space for a LinkedNode
let mut linked_node = LinkedNode::new(size);
linked_node.next = self.head.next.take();
let linked_node_ptr = start as *mut LinkedNode; // Treat the start memory region as a LinkedNode type
unsafe {
linked_node_ptr.write(linked_node); // write the data, very risky
self.head.next = Some(&mut *linked_node_ptr);
}
}
pub fn find_region(
&mut self,
size: usize,
align: usize,
) -> Option<(&'static mut LinkedNode, usize)> {
let mut current = &mut self.head;
while let Some(ref mut region) = current.next {
if let Ok(alloc_start) = Self::alloc_from_region(&region, size, align) {
let next = region.next.take();
let ret = Some((current.next.take().unwrap(), alloc_start));
current.next = next;
return ret;
} else {
current = current.next.as_mut().unwrap();
}
}
None
}
fn alloc_from_region(region: &LinkedNode, size: usize, align: usize) -> Result<usize, ()> {
let alloc_start = align_up(region.start_addr() + core::mem::size_of::<usize>(), align);
let alloc_end = (alloc_start - core::mem::size_of::<usize>())
.checked_add(size)
.ok_or(())?; // check for overflows
if alloc_end > region.end_addr() {
return Err(());
}
let excess_size = region.end_addr() - alloc_end;
if excess_size > 0 && excess_size < core::mem::size_of::<LinkedNode>() {
// if the remaining space is not enough for another LinkedNode, skip this region.
return Err(());
}
Ok(alloc_start)
}
}
pub static ALLOCATOR: Mutex<LinkedListAllocator> = Mutex::new(LinkedListAllocator::new());

View File

@@ -1,25 +1,27 @@
#![no_std]
#![feature(c_variadic)]
use core::ptr::null;
use core::ptr::{null, null_mut};
use crate::syscall::syscall3;
use crate::{
mem::{malloc, memcpy, memset},
syscall::{EXIT, WRITE, syscall3},
};
pub mod file;
pub mod heap;
pub mod mem;
pub mod syscall;
const SYS_EXIT: usize = 1;
const SYS_WRITE: usize = 60;
pub mod util;
#[unsafe(no_mangle)]
extern "C" fn write(fd: i64, buf: *const u8, count: usize) -> isize {
unsafe { syscall3(SYS_WRITE, fd as usize, buf as usize, count) }
unsafe { syscall3(WRITE, fd as usize, buf as usize, count) }
}
#[unsafe(no_mangle)]
extern "C" fn exit(code: i64) -> ! {
unsafe { syscall3(SYS_EXIT, code as usize, 0, 0) };
unsafe { syscall3(EXIT, code as usize, 0, 0) };
loop {}
}
@@ -53,7 +55,7 @@ extern "C" fn abs(n: i64) -> i64 {
#[unsafe(no_mangle)]
unsafe extern "C" fn printf(format: *const u8, args: ...) {
unsafe { syscall3(SYS_WRITE, 1, format as usize, strlen(format)) };
unsafe { syscall3(WRITE, 1, format as usize, strlen(format)) };
}
#[unsafe(no_mangle)]
@@ -61,6 +63,10 @@ extern "C" fn atoi(mut c: *const u8) -> i64 {
let mut value: i64 = 0;
let mut sign: i64 = 1;
unsafe {
while (*c).is_ascii_whitespace() {
c = c.add(1);
}
if (*c) == b'+' || (*c) == b'-' {
if *c == b'-' {
sign = -1;
@@ -77,9 +83,83 @@ extern "C" fn atoi(mut c: *const u8) -> i64 {
value * sign
}
#[inline]
fn pow10_i32(exp: i32) -> f64 {
let mut e = exp;
let mut scale: f64 = 1.0;
if e > 0 {
while e > 0 {
scale *= 10.0;
e -= 1;
}
} else if e < 0 {
while e < 0 {
scale *= 0.1;
e += 1;
}
}
scale
}
#[unsafe(no_mangle)]
extern "C" fn atof(mut c: *const u8) -> f64 {
0.0
let mut sign: f64 = 1.0;
unsafe {
while (*c).is_ascii_whitespace() {
c = c.add(1);
}
if (*c) == b'+' || (*c) == b'-' {
if *c == b'-' {
sign = -1.0;
}
c = c.add(1);
}
let mut int_part: i64 = 0;
while (*c).is_ascii_digit() {
int_part = int_part * 10 + ((*c) - b'0') as i64;
c = c.add(1);
}
let mut result: f64 = int_part as f64;
if *c == b'.' {
c = c.add(1);
let mut factor = 0.1;
while (*c).is_ascii_digit() {
result += ((*c) - b'0') as f64 * factor;
factor *= 0.1;
c = c.add(1);
}
}
if *c == b'e' || *c == b'E' {
c = c.add(1);
let mut exp_sign = 1;
let mut exp_value = 0;
if (*c) == b'+' || (*c) == b'-' {
if *c == b'-' {
exp_sign = -1;
}
c = c.add(1);
}
while (*c).is_ascii_digit() {
exp_value *= 10;
exp_value += ((*c) - b'0') as i64;
c = c.add(1);
}
result *= pow10_i32((exp_sign * exp_value) as i32);
}
sign * result
}
}
pub fn compare_str(str_1: *const u8, str_2: *const u8, case: bool, n: usize) -> i32 {
@@ -129,22 +209,91 @@ unsafe extern "C" fn strncmp(str_1: *const u8, str_2: *const u8, n: i32) -> i32
}
#[unsafe(no_mangle)]
unsafe extern "C" fn strncopy(s: *const u8, n: i32) -> *const u8 {
null()
unsafe extern "C" fn strncpy(dest: *mut u8, source: *const u8, n: usize) -> *mut u8 {
let mut i = 0usize;
unsafe {
while i < n {
let b = *source.add(i);
*dest.add(i) = b;
i += 1;
if b == 0 {
while i < n {
*dest.add(i) = 0;
i += 1;
}
break;
}
}
}
dest
}
#[unsafe(no_mangle)]
unsafe extern "C" fn strdup(s: *const u8) -> *const u8 {
null()
unsafe extern "C" fn strdup(s: *const u8) -> *mut u8 {
let len = strlen(s);
let memory = malloc((len + 1) as u64);
if memory.is_null() {
return null_mut();
}
memcpy(memory, s, len + 1);
memory
}
#[unsafe(no_mangle)]
unsafe extern "C" fn strstr(s: *const u8) -> *const u8 {
unsafe extern "C" fn strstr(haystack: *const u8, needle: *const u8) -> *const u8 {
if haystack.is_null() || needle.is_null() {
return null();
}
let mut h = haystack;
unsafe {
if *needle == 0 {
return haystack;
}
while *h != 0 {
if *h == *needle {
let mut h2 = h;
let mut n2 = needle;
while *n2 != 0 && *h2 != 0 && *h2 == *n2 {
h2 = h2.add(1);
n2 = n2.add(1);
}
if *n2 == 0 {
return h;
}
}
h = h.add(1);
}
}
null()
}
#[unsafe(no_mangle)]
unsafe extern "C" fn strchr(s: *const u8, ch: u8) -> *const u8 {
if s.is_null() {
return null();
}
let mut i = 0usize;
unsafe {
while *s.add(i) != 0 {
if *s.add(i) == ch {
return s.add(i);
}
i += 1;
}
}
null()
}
@@ -157,7 +306,7 @@ unsafe extern "C" fn strrchr(s: *const u8, ch: u8) -> *const u8 {
while unsafe { *s.add(n) } != 0 {
n += 1;
}
unsafe { s.add(n + 1) }
return unsafe { s.add(n + 1) };
} else {
while unsafe { *s.add(n) } != 0 {
let cur_ch = unsafe { s.add(n) };

View File

@@ -1,27 +1,117 @@
use core::ptr::null_mut;
use crate::{
heap::ALLOCATOR,
syscall::{BRK, syscall1},
};
#[unsafe(no_mangle)]
extern "C" fn calloc(nitems: u64, size: u64) -> *mut u8 {
null_mut()
pub extern "C" fn sbrk(increment: i64) -> i64 {
unsafe { syscall1(BRK, increment as usize) as i64 }
}
const MAX_SIZE: u64 = 18446744073709551615;
#[unsafe(no_mangle)]
pub extern "C" fn calloc(count: u64, size: u64) -> *mut u8 {
if count != 0 && size > MAX_SIZE / count {
return null_mut();
}
let mut total = count * size;
if total == 0 {
total = 1;
}
let ptr = malloc(total);
if ptr.is_null() {
return null_mut();
}
memset(ptr, 0, total as usize);
ptr
}
#[unsafe(no_mangle)]
extern "C" fn free(ptr: *mut u8) {}
pub extern "C" fn free(ptr: *mut u8) {
if ptr.is_null() {
return;
}
unsafe {
let size = *(((ptr as usize) - core::mem::size_of::<usize>()) as *const usize);
#[unsafe(no_mangle)]
extern "C" fn malloc(size: u64) -> *mut u8 {
null_mut()
let mut allocator = ALLOCATOR.lock();
allocator
.add_free_memory_region(ptr as usize - core::mem::size_of::<usize>(), size as usize);
}
}
#[unsafe(no_mangle)]
extern "C" fn memcpy(dest_str: *mut u8, src_str: *const u8, n: u64) {}
pub extern "C" fn malloc(size: u64) -> *mut u8 {
let mut allocator = ALLOCATOR.lock();
#[unsafe(no_mangle)]
extern "C" fn memset(str: *mut u8, c: i64, n: u64) -> *mut u8 {
null_mut()
if let Some(region) = allocator.find_region(size as usize, 16) {
return region.1 as *mut u8;
} else {
let start_addr: i64 = sbrk(size as i64);
if start_addr == -1 {
return null_mut();
}
unsafe { allocator.add_free_memory_region(start_addr as usize, size as usize) };
drop(allocator);
malloc(size)
}
}
#[unsafe(no_mangle)]
unsafe extern "C" fn realloc(ptr: *mut u8, size: u64) -> *mut u8 {
null_mut()
pub extern "C" fn memcpy(dest_str: *mut u8, src_str: *const u8, n: usize) -> *mut u8 {
unsafe { core::ptr::copy(src_str, dest_str, n) };
dest_str
}
#[unsafe(no_mangle)]
pub extern "C" fn memset(str: *mut u8, c: i64, n: usize) -> *mut u8 {
unsafe {
core::ptr::write_bytes(str, c as u8, n);
}
str
}
#[unsafe(no_mangle)]
pub unsafe extern "C" fn realloc(ptr: *mut u8, size: usize) -> *mut u8 {
unsafe {
if size == 0 {
free(ptr);
return null_mut();
}
let header = ((ptr as usize) - core::mem::size_of::<usize>()) as *mut usize;
let old_size = *header;
if old_size == size {
return ptr;
} else if size < old_size {
let mut allocator = ALLOCATOR.lock();
let difference = old_size.abs_diff(size);
let start = (ptr as usize) + size;
*header = size;
allocator.add_free_memory_region(start as usize, difference as usize);
return ptr;
} else {
let new_ptr = malloc(size as u64);
if new_ptr.is_null() {
return null_mut();
}
core::ptr::copy_nonoverlapping(ptr, new_ptr, old_size);
free(ptr);
return new_ptr;
}
}
}

View File

@@ -1,3 +1,25 @@
pub const READ: usize = 0;
pub const WRITE: usize = 1;
pub const OPEN: usize = 2;
pub const CLOSE: usize = 3;
pub const STAT: usize = 4;
pub const LSEEK: usize = 8;
pub const MMAP: usize = 9;
pub const MUNMAP: usize = 9;
pub const BRK: usize = 12;
pub const GETPID: usize = 39;
pub const FORK: usize = 57;
pub const EXECVE: usize = 59;
pub const EXIT: usize = 60;
pub const WAIT4: usize = 61;
pub const KILL: usize = 62;
pub const CHDIR: usize = 80;
pub const MKDIR: usize = 83;
pub const UNLINK: usize = 87;
pub const GETDENTS64: usize = 217;
pub const CLOCK_GETTIME: usize = 228;
pub const EXIT_GROUP: usize = 231;
#[inline(always)]
pub unsafe fn syscall0(num: usize) -> isize {
let ret: isize;
@@ -13,6 +35,22 @@ pub unsafe fn syscall0(num: usize) -> isize {
ret
}
#[inline(always)]
pub unsafe fn syscall1(num: usize, arg0: usize) -> isize {
let ret: isize;
unsafe {
core::arch::asm!(
"int 0x80",
in("rax") num,
in("rdi") arg0,
lateout("rax") ret,
options(nostack)
);
}
ret
}
#[inline(always)]
pub unsafe fn syscall3(num: usize, arg0: usize, arg1: usize, arg2: usize) -> isize {
let ret: isize;

20
user/libxunil/src/util.rs Normal file
View File

@@ -0,0 +1,20 @@
#[inline]
pub const fn align_down(addr: usize, align: usize) -> usize {
assert!(align.is_power_of_two(), "`align` must be a power of two");
addr & !(align - 1)
}
#[inline]
pub const fn align_up(addr: usize, align: usize) -> usize {
assert!(align.is_power_of_two(), "`align` must be a power of two");
let align_mask = align - 1;
if addr & align_mask == 0 {
addr
} else {
if let Some(aligned) = (addr | align_mask).checked_add(1) {
aligned
} else {
panic!("attempt to add with overflow")
}
}
}