Skip to content

Commit

Permalink
Implements stage 2 kernel heap activation (#960)
Browse files Browse the repository at this point in the history
  • Loading branch information
ultimaweapon authored Sep 1, 2024
1 parent 2528bef commit e2c7151
Show file tree
Hide file tree
Showing 7 changed files with 119 additions and 12 deletions.
5 changes: 5 additions & 0 deletions src/obkrnl/src/context/aarch64.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
use super::Context;
use crate::proc::Thread;

pub unsafe fn activate(_: *mut Context) {
todo!();
}

pub unsafe fn thread() -> *const Thread {
todo!();
}
9 changes: 9 additions & 0 deletions src/obkrnl/src/context/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,15 @@ impl Context {
}
}

pub fn thread() -> Arc<Thread> {
// It does not matter if we are on a different CPU after we load the Context::thread because
// it is going to be the same one since it represent the current thread.
let td = unsafe { self::arch::thread() };

unsafe { Arc::increment_strong_count(td) };
unsafe { Arc::from_raw(td) }
}

/// # Safety
/// The only place this method is safe to call is in the CPU entry point. Once this method
/// return this instance must outlive the CPU lifetime and it must never be accessed via this
Expand Down
17 changes: 17 additions & 0 deletions src/obkrnl/src/context/x86_64.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
use super::Context;
use crate::proc::Thread;
use core::arch::asm;
use core::mem::offset_of;

/// Set kernel `GS` segment register to `cx`.
///
Expand Down Expand Up @@ -33,3 +35,18 @@ pub unsafe fn activate(cx: *mut Context) {
options(preserves_flags, nostack)
);
}

pub unsafe fn thread() -> *const Thread {
// SAFETY: "AtomicPtr<Thread>" is guarantee to have the same bit as "*mut Thread" and "mov" is
// atomic if the memory has correct alignment.
let mut td;

asm!(
"mov {out}, gs:[{off}]",
off = in(reg) offset_of!(Context, thread),
out = out(reg) td,
options(pure, readonly, preserves_flags, nostack)
);

td
}
3 changes: 3 additions & 0 deletions src/obkrnl/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,9 @@ extern "C" fn _start(env: &'static BootEnv) -> ! {
}

fn main() -> ! {
// Activate stage 2 heap.
unsafe { KERNEL_HEAP.activate_stage2() };

loop {
#[cfg(target_arch = "x86_64")]
unsafe {
Expand Down
48 changes: 39 additions & 9 deletions src/obkrnl/src/malloc/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use self::stage1::Stage1;
use self::stage2::Stage2;
use alloc::boxed::Box;
use core::alloc::{GlobalAlloc, Layout};
use core::ptr::null_mut;
use core::sync::atomic::{AtomicPtr, Ordering};
Expand Down Expand Up @@ -27,27 +28,56 @@ impl KernelHeap {
stage2: AtomicPtr::new(null_mut()),
}
}

/// # Panics
/// If stage 2 already activated.
pub fn activate_stage2(&self) {
// Setup stage 2.
let state2 = Box::new(Stage2::new());

// Activate.
let state2 = Box::into_raw(state2);

assert!(self
.stage2
.compare_exchange(null_mut(), state2, Ordering::Release, Ordering::Relaxed)
.is_ok());
}
}

unsafe impl GlobalAlloc for KernelHeap {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let stage2 = self.stage2.load(Ordering::Relaxed);
impl Drop for KernelHeap {
fn drop(&mut self) {
let stage2 = self.stage2.load(Ordering::Acquire);

if stage2.is_null() {
// SAFETY: GlobalAlloc::alloc required layout to be non-zero.
self.stage1.alloc(layout)
} else {
todo!()
if !stage2.is_null() {
drop(unsafe { Box::from_raw(stage2) });
}
}
}

unsafe impl GlobalAlloc for KernelHeap {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// SAFETY: GlobalAlloc::alloc required layout to be non-zero.
self.stage2
.load(Ordering::Acquire)
.as_ref()
.map(|stage2| stage2.alloc(layout))
.unwrap_or_else(|| self.stage1.alloc(layout))
}

unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if self.stage1.is_owner(ptr) {
// SAFETY: GlobalAlloc::dealloc required ptr to be the same one that returned from our
// GlobalAlloc::alloc and layout to be the same one that passed to it.
self.stage1.dealloc(ptr, layout);
} else {
todo!()
// SAFETY: ptr is not owned by stage 1 so with the requirements of GlobalAlloc::dealloc
// the pr will be owned by stage 2 for sure.
self.stage2
.load(Ordering::Acquire)
.as_ref()
.unwrap()
.dealloc(ptr, layout);
}
}
}
34 changes: 33 additions & 1 deletion src/obkrnl/src/malloc/stage2.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,36 @@
use crate::context::Context;
use core::alloc::Layout;

/// Stage 2 kernel heap.
///
/// This stage allocate a memory from a virtual memory management system.
/// This stage allocate a memory from a virtual memory management system. This struct is a merge of
/// `malloc_type` and `malloc_type_internal` structure.
pub struct Stage2 {}

impl Stage2 {
pub fn new() -> Self {
Self {}
}

/// See `malloc` on the PS4 for a reference.
///
/// # Safety
/// `layout` must be nonzero.
pub unsafe fn alloc(&self, _: Layout) -> *mut u8 {
// Our implementation imply M_WAITOK.
let td = Context::thread();

if td.active_interrupts() != 0 {
panic!("heap allocation in an interrupt handler is not supported");
}

todo!()
}

/// # Safety
/// `ptr` must be obtained with [`Self::alloc()`] and `layout` must be the same one that was
/// passed to that method.
pub unsafe fn dealloc(&self, _: *mut u8, _: Layout) {
todo!()
}
}
15 changes: 13 additions & 2 deletions src/obkrnl/src/proc/thread.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,23 @@
/// Implementation of `thread` structure.
pub struct Thread {}
///
/// All thread **must** run to completion once execution has been started otherwise resource will be
/// leak if the thread is dropped while its execution currently in the kernel space.
pub struct Thread {
active_interrupts: usize, // td_intr_nesting_level
}

impl Thread {
/// # Safety
/// This function does not do anything except initialize the struct memory. It is the caller
/// responsibility to configure the thread after this so it have a proper states and trigger
/// necessary events.
pub unsafe fn new_bare() -> Self {
Self {}
Self {
active_interrupts: 0,
}
}

pub fn active_interrupts(&self) -> usize {
self.active_interrupts
}
}

0 comments on commit e2c7151

Please sign in to comment.