diff --git a/src/core/src/vmm/hw/ram/builder.rs b/src/core/src/vmm/hw/ram/builder.rs index 56c837a10..4f9455a7a 100644 --- a/src/core/src/vmm/hw/ram/builder.rs +++ b/src/core/src/vmm/hw/ram/builder.rs @@ -213,9 +213,11 @@ impl RamBuilder { devices: &DeviceTree, dynamic: ProgramHeader, ) -> Result<(Ram, RamMap), RamBuilderError> { - // Allocate page-map level-4 table. We use 4K 4-Level Paging here. Not sure how the PS4 - // achieve 16K page because x86-64 does not support it. Maybe it is a special request from - // Sony to AMD? + // Allocate page-map level-4 table. We use 4K 4-Level Paging here. You may wonder about this + // because it seems like page size on the PS4 is 16K. The truth is the PS4 emulate the 16K + // page size with 4K pages. You can check this by yourself by looking at + // acpi_install_wakeup_handler() function on the PS4 kernel and compare it with FreeBSD + // version. No idea why the PS4 choose to emulate 16K page. // // See Page Translation and Protection section on AMD64 Architecture Programmer's Manual // Volume 2 for how paging work in long-mode. diff --git a/src/obkrnl/src/context/mod.rs b/src/obkrnl/src/context/mod.rs index f8c8b770d..43d0c62aa 100644 --- a/src/obkrnl/src/context/mod.rs +++ b/src/obkrnl/src/context/mod.rs @@ -1,5 +1,6 @@ use crate::proc::Thread; use alloc::sync::Arc; +use core::sync::atomic::{AtomicPtr, Ordering}; #[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")] #[cfg_attr(target_arch = "x86_64", path = "x86_64.rs")] @@ -18,14 +19,14 @@ mod arch; /// interupt before reading `pc_cpuid`, which can make the CPU missed some events from the other /// hardwares. pub struct Context { - thread: *const Thread, // pc_curthread + thread: AtomicPtr, // pc_curthread } impl Context { /// See `pcpu_init` on the PS4 for a reference. pub fn new(td: Arc) -> Self { Self { - thread: Arc::into_raw(td), + thread: AtomicPtr::new(Arc::into_raw(td).cast_mut()), } } @@ -41,6 +42,6 @@ impl Context { impl Drop for Context { fn drop(&mut self) { - unsafe { drop(Arc::from_raw(self.thread)) }; + unsafe { drop(Arc::from_raw(self.thread.load(Ordering::Relaxed))) }; } } diff --git a/src/obkrnl/src/malloc/mod.rs b/src/obkrnl/src/malloc/mod.rs index c9c571d4a..29b584759 100644 --- a/src/obkrnl/src/malloc/mod.rs +++ b/src/obkrnl/src/malloc/mod.rs @@ -1,6 +1,11 @@ +use self::stage1::Stage1; +use self::stage2::Stage2; use core::alloc::{GlobalAlloc, Layout}; -use core::ptr::{null_mut, NonNull}; -use talc::{ClaimOnOom, Span, Talc}; +use core::ptr::null_mut; +use core::sync::atomic::{AtomicPtr, Ordering}; + +mod stage1; +mod stage2; /// Implementation of [`GlobalAlloc`] for objects belong to kernel space. /// @@ -8,7 +13,8 @@ use talc::{ClaimOnOom, Span, Talc}; /// arena). This stage will be primary used for bootstrapping the kernel. The second stage will be /// activated once the required subsystems has been initialized. pub struct KernelHeap { - stage1: spin::Mutex>, + stage1: Stage1, + stage2: AtomicPtr, } impl KernelHeap { @@ -16,27 +22,32 @@ impl KernelHeap { /// The specified memory must be valid for reads and writes and it must be exclusively available /// to [`KernelHeap`]. pub const unsafe fn new(stage1: *mut u8, len: usize) -> Self { - let stage1 = Talc::new(unsafe { ClaimOnOom::new(Span::from_base_size(stage1, len)) }); - Self { - stage1: spin::Mutex::new(stage1), + stage1: Stage1::new(stage1, len), + stage2: AtomicPtr::new(null_mut()), } } } unsafe impl GlobalAlloc for KernelHeap { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - // SAFETY: GlobalAlloc::alloc required layout to be non-zero. - self.stage1 - .lock() - .malloc(layout) - .map(|v| v.as_ptr()) - .unwrap_or(null_mut()) + let stage2 = self.stage2.load(Ordering::Relaxed); + + if stage2.is_null() { + // SAFETY: GlobalAlloc::alloc required layout to be non-zero. + self.stage1.alloc(layout) + } else { + todo!() + } } unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - // SAFETY: GlobalAlloc::dealloc required ptr to be the same one that returned from our - // GlobalAlloc::alloc and layout to be the same one that passed to it. - self.stage1.lock().free(NonNull::new_unchecked(ptr), layout); + if self.stage1.is_owner(ptr) { + // SAFETY: GlobalAlloc::dealloc required ptr to be the same one that returned from our + // GlobalAlloc::alloc and layout to be the same one that passed to it. + self.stage1.dealloc(ptr, layout); + } else { + todo!() + } } } diff --git a/src/obkrnl/src/malloc/stage1.rs b/src/obkrnl/src/malloc/stage1.rs new file mode 100644 index 000000000..786301e3e --- /dev/null +++ b/src/obkrnl/src/malloc/stage1.rs @@ -0,0 +1,51 @@ +use core::alloc::Layout; +use core::ptr::{null_mut, NonNull}; +use talc::{ClaimOnOom, Span, Talc}; + +/// Stage 1 kernel heap. +/// +/// This stage allocate a memory from a static buffer (AKA arena). +pub struct Stage1 { + engine: spin::Mutex>, + buf_ptr: *const u8, + buf_end: *const u8, +} + +impl Stage1 { + /// # Safety + /// The specified memory must be valid for reads and writes and it must be exclusively available + /// to [`Stage1`]. + pub const unsafe fn new(buf: *mut u8, len: usize) -> Self { + let engine = Talc::new(ClaimOnOom::new(Span::from_base_size(buf, len))); + + Self { + engine: spin::Mutex::new(engine), + buf_ptr: buf, + buf_end: buf.add(len), + } + } + + pub fn is_owner(&self, ptr: *const u8) -> bool { + ptr >= self.buf_ptr && ptr < self.buf_end + } + + /// The returned pointer will always within the buffer that was specified in the + /// [`Self::new()`]. + /// + /// # Safety + /// `layout` must be nonzero. + pub unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + self.engine + .lock() + .malloc(layout) + .map(|v| v.as_ptr()) + .unwrap_or(null_mut()) + } + + /// # Safety + /// `ptr` must be obtained with [`Self::alloc()`] and `layout` must be the same one that was + /// passed to that method. + pub unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + self.engine.lock().free(NonNull::new_unchecked(ptr), layout); + } +} diff --git a/src/obkrnl/src/malloc/stage2.rs b/src/obkrnl/src/malloc/stage2.rs new file mode 100644 index 000000000..899fffd13 --- /dev/null +++ b/src/obkrnl/src/malloc/stage2.rs @@ -0,0 +1,4 @@ +/// Stage 2 kernel heap. +/// +/// This stage allocate a memory from a virtual memory management system. +pub struct Stage2 {}