Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Initializes stage 2 kernel heap #959

Merged
merged 1 commit into from
Sep 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions src/core/src/vmm/hw/ram/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -213,9 +213,11 @@ impl RamBuilder {
devices: &DeviceTree,
dynamic: ProgramHeader,
) -> Result<(Ram, RamMap), RamBuilderError> {
// Allocate page-map level-4 table. We use 4K 4-Level Paging here. Not sure how the PS4
// achieve 16K page because x86-64 does not support it. Maybe it is a special request from
// Sony to AMD?
// Allocate page-map level-4 table. We use 4K 4-Level Paging here. You may wonder about this
// because it seems like page size on the PS4 is 16K. The truth is the PS4 emulate the 16K
// page size with 4K pages. You can check this by yourself by looking at
// acpi_install_wakeup_handler() function on the PS4 kernel and compare it with FreeBSD
// version. No idea why the PS4 choose to emulate 16K page.
//
// See Page Translation and Protection section on AMD64 Architecture Programmer's Manual
// Volume 2 for how paging work in long-mode.
Expand Down
7 changes: 4 additions & 3 deletions src/obkrnl/src/context/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use crate::proc::Thread;
use alloc::sync::Arc;
use core::sync::atomic::{AtomicPtr, Ordering};

#[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")]
#[cfg_attr(target_arch = "x86_64", path = "x86_64.rs")]
Expand All @@ -18,14 +19,14 @@ mod arch;
/// interupt before reading `pc_cpuid`, which can make the CPU missed some events from the other
/// hardwares.
pub struct Context {
thread: *const Thread, // pc_curthread
thread: AtomicPtr<Thread>, // pc_curthread
}

impl Context {
/// See `pcpu_init` on the PS4 for a reference.
pub fn new(td: Arc<Thread>) -> Self {
Self {
thread: Arc::into_raw(td),
thread: AtomicPtr::new(Arc::into_raw(td).cast_mut()),
}
}

Expand All @@ -41,6 +42,6 @@ impl Context {

impl Drop for Context {
fn drop(&mut self) {
unsafe { drop(Arc::from_raw(self.thread)) };
unsafe { drop(Arc::from_raw(self.thread.load(Ordering::Relaxed))) };
}
}
41 changes: 26 additions & 15 deletions src/obkrnl/src/malloc/mod.rs
Original file line number Diff line number Diff line change
@@ -1,42 +1,53 @@
use self::stage1::Stage1;
use self::stage2::Stage2;
use core::alloc::{GlobalAlloc, Layout};
use core::ptr::{null_mut, NonNull};
use talc::{ClaimOnOom, Span, Talc};
use core::ptr::null_mut;
use core::sync::atomic::{AtomicPtr, Ordering};

mod stage1;
mod stage2;

/// Implementation of [`GlobalAlloc`] for objects belong to kernel space.
///
/// This allocator has 2 stages. The first stage will allocate a memory from a static buffer (AKA
/// arena). This stage will be primary used for bootstrapping the kernel. The second stage will be
/// activated once the required subsystems has been initialized.
pub struct KernelHeap {
stage1: spin::Mutex<Talc<ClaimOnOom>>,
stage1: Stage1,
stage2: AtomicPtr<Stage2>,
}

impl KernelHeap {
/// # Safety
/// The specified memory must be valid for reads and writes and it must be exclusively available
/// to [`KernelHeap`].
pub const unsafe fn new(stage1: *mut u8, len: usize) -> Self {
let stage1 = Talc::new(unsafe { ClaimOnOom::new(Span::from_base_size(stage1, len)) });

Self {
stage1: spin::Mutex::new(stage1),
stage1: Stage1::new(stage1, len),
stage2: AtomicPtr::new(null_mut()),
}
}
}

unsafe impl GlobalAlloc for KernelHeap {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// SAFETY: GlobalAlloc::alloc required layout to be non-zero.
self.stage1
.lock()
.malloc(layout)
.map(|v| v.as_ptr())
.unwrap_or(null_mut())
let stage2 = self.stage2.load(Ordering::Relaxed);

if stage2.is_null() {
// SAFETY: GlobalAlloc::alloc required layout to be non-zero.
self.stage1.alloc(layout)
} else {
todo!()
}
}

unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// SAFETY: GlobalAlloc::dealloc required ptr to be the same one that returned from our
// GlobalAlloc::alloc and layout to be the same one that passed to it.
self.stage1.lock().free(NonNull::new_unchecked(ptr), layout);
if self.stage1.is_owner(ptr) {
// SAFETY: GlobalAlloc::dealloc required ptr to be the same one that returned from our
// GlobalAlloc::alloc and layout to be the same one that passed to it.
self.stage1.dealloc(ptr, layout);
} else {
todo!()
}
}
}
51 changes: 51 additions & 0 deletions src/obkrnl/src/malloc/stage1.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
use core::alloc::Layout;
use core::ptr::{null_mut, NonNull};
use talc::{ClaimOnOom, Span, Talc};

/// Stage 1 kernel heap.
///
/// This stage allocate a memory from a static buffer (AKA arena).
pub struct Stage1 {
engine: spin::Mutex<Talc<ClaimOnOom>>,
buf_ptr: *const u8,
buf_end: *const u8,
}

impl Stage1 {
/// # Safety
/// The specified memory must be valid for reads and writes and it must be exclusively available
/// to [`Stage1`].
pub const unsafe fn new(buf: *mut u8, len: usize) -> Self {
let engine = Talc::new(ClaimOnOom::new(Span::from_base_size(buf, len)));

Self {
engine: spin::Mutex::new(engine),
buf_ptr: buf,
buf_end: buf.add(len),
}
}

pub fn is_owner(&self, ptr: *const u8) -> bool {
ptr >= self.buf_ptr && ptr < self.buf_end
}

/// The returned pointer will always within the buffer that was specified in the
/// [`Self::new()`].
///
/// # Safety
/// `layout` must be nonzero.
pub unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
self.engine
.lock()
.malloc(layout)
.map(|v| v.as_ptr())
.unwrap_or(null_mut())
}

/// # Safety
/// `ptr` must be obtained with [`Self::alloc()`] and `layout` must be the same one that was
/// passed to that method.
pub unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
self.engine.lock().free(NonNull::new_unchecked(ptr), layout);
}
}
4 changes: 4 additions & 0 deletions src/obkrnl/src/malloc/stage2.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
/// Stage 2 kernel heap.
///
/// This stage allocate a memory from a virtual memory management system.
pub struct Stage2 {}