diff --git a/src/core/src/vmm/hw/console/mod.rs b/src/core/src/vmm/hw/console/mod.rs index 92526d71b..f8f459ab6 100644 --- a/src/core/src/vmm/hw/console/mod.rs +++ b/src/core/src/vmm/hw/console/mod.rs @@ -14,9 +14,9 @@ pub struct Console { } impl Console { - pub fn new(addr: usize, vm_page_size: NonZero, event: VmmEventHandler) -> Self { + pub fn new(addr: usize, block_size: NonZero, event: VmmEventHandler) -> Self { let len = size_of::() - .checked_next_multiple_of(vm_page_size.get()) + .checked_next_multiple_of(block_size.get()) .and_then(NonZero::new) .unwrap(); diff --git a/src/core/src/vmm/hw/mod.rs b/src/core/src/vmm/hw/mod.rs index 5a23f4680..b4278f44e 100644 --- a/src/core/src/vmm/hw/mod.rs +++ b/src/core/src/vmm/hw/mod.rs @@ -13,14 +13,14 @@ mod ram; pub fn setup_devices( start_addr: usize, - vm_page_size: NonZero, + block_size: NonZero, event: VmmEventHandler, ) -> DeviceTree { let mut map = BTreeMap::>::new(); // Console. let addr = start_addr; - let console = Arc::new(Console::new(addr, vm_page_size, event)); + let console = Arc::new(Console::new(addr, block_size, event)); assert!(map.insert(console.addr(), console.clone()).is_none()); diff --git a/src/core/src/vmm/hw/ram/builder.rs b/src/core/src/vmm/hw/ram/builder.rs index 4f9455a7a..cd620e016 100644 --- a/src/core/src/vmm/hw/ram/builder.rs +++ b/src/core/src/vmm/hw/ram/builder.rs @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 use super::{Ram, RamError}; use crate::vmm::hw::DeviceTree; use crate::vmm::kernel::ProgramHeader; @@ -18,8 +19,8 @@ pub struct RamBuilder { impl RamBuilder { /// # Safety - /// `vm_page_size` must be greater or equal host page size. - pub unsafe fn new(vm_page_size: NonZero) -> Result { + /// `block_size` must be greater or equal host page size. + pub unsafe fn new(block_size: NonZero) -> Result { use std::io::Error; // Reserve memory range. @@ -59,7 +60,7 @@ impl RamBuilder { }; Ok(Self { - ram: Ram { mem, vm_page_size }, + ram: Ram { mem, block_size }, next: 0, kern: None, stack: None, @@ -68,7 +69,7 @@ impl RamBuilder { } /// # Panics - /// - If `len` is not multiplied by VM page size. + /// - If `len` is not multiplied by block size. /// - If called a second time. pub fn alloc_kernel(&mut self, len: NonZero) -> Result<&mut [u8], RamError> { assert!(self.kern.is_none()); @@ -83,7 +84,7 @@ impl RamBuilder { } /// # Panics - /// - If `len` is not multiplied by VM page size. + /// - If `len` is not multiplied by block size. /// - If called a second time. pub fn alloc_stack(&mut self, len: NonZero) -> Result<(), RamError> { assert!(self.stack.is_none()); @@ -102,12 +103,12 @@ impl RamBuilder { /// If called a second time. pub fn alloc_args(&mut self, env: BootEnv) -> Result<(), RamError> { assert!(self.args.is_none()); - assert!(align_of::() <= self.ram.vm_page_size.get()); + assert!(align_of::() <= self.ram.block_size.get()); // Allocate RAM for all arguments. let addr = self.next; let len = size_of::() - .checked_next_multiple_of(self.ram.vm_page_size.get()) + .checked_next_multiple_of(self.ram.block_size.get()) .and_then(NonZero::new) .unwrap(); let args = unsafe { self.ram.alloc(addr, len)?.as_mut_ptr() }; @@ -210,6 +211,7 @@ impl RamBuilder { impl RamBuilder { pub fn build( mut self, + page_size: NonZero, devices: &DeviceTree, dynamic: ProgramHeader, ) -> Result<(Ram, RamMap), RamBuilderError> { @@ -272,7 +274,7 @@ impl RamBuilder { // Relocate the kernel to virtual address. let map = RamMap { - page_size: self.ram.vm_page_size, + page_size, page_table, kern_paddr, kern_vaddr, @@ -368,7 +370,7 @@ impl RamBuilder { // Get address and length. let addr = self.next; let len = (512usize * 8) - .checked_next_multiple_of(self.ram.vm_page_size.get()) + .checked_next_multiple_of(self.ram.block_size.get()) .and_then(NonZero::new) .unwrap(); @@ -392,11 +394,12 @@ impl RamBuilder { pub fn build( mut self, + page_size: NonZero, devices: &DeviceTree, dynamic: ProgramHeader, ) -> Result<(Ram, RamMap), RamBuilderError> { // Setup page tables. - let map = match self.ram.vm_page_size.get() { + let map = match page_size.get() { 0x4000 => self.build_16k_page_tables(devices)?, _ => todo!(), }; @@ -410,8 +413,8 @@ impl RamBuilder { fn build_16k_page_tables(&mut self, devices: &DeviceTree) -> Result { // Allocate page table level 0. let page_table = self.next; - let len = self.ram.vm_page_size; - let l0t: &mut [usize; 2] = match unsafe { self.ram.alloc(page_table, len) } { + let len = self.ram.block_size; + let l0t: &mut [usize; 32] = match unsafe { self.ram.alloc(page_table, len) } { Ok(v) => unsafe { &mut *v.as_mut_ptr().cast() }, Err(e) => return Err(RamBuilderError::AllocPageTableLevel0Failed(e)), }; @@ -477,7 +480,7 @@ impl RamBuilder { fn setup_16k_page_tables( &mut self, - l0t: &mut [usize; 2], + l0t: &mut [usize; 32], vaddr: usize, paddr: usize, len: usize, @@ -558,10 +561,12 @@ impl RamBuilder { } fn alloc_16k_page_table(&mut self) -> Result<(*mut [usize; 2048], usize), RamError> { - // Get address and length. The page table is effectively the same size as page size - // (2048 * 8 = 16384). + // Get address and length. let addr = self.next; - let len = unsafe { NonZero::new_unchecked(0x4000) }; + let len = (2048usize * 8) + .checked_next_multiple_of(self.ram.block_size.get()) + .and_then(NonZero::new) + .unwrap(); // Allocate. let tab = unsafe { self.ram.alloc(addr, len).map(|v| v.as_mut_ptr().cast())? }; diff --git a/src/core/src/vmm/hw/ram/mod.rs b/src/core/src/vmm/hw/ram/mod.rs index 4c285eb6e..31303e77a 100644 --- a/src/core/src/vmm/hw/ram/mod.rs +++ b/src/core/src/vmm/hw/ram/mod.rs @@ -16,7 +16,7 @@ mod builder; /// RAM always started at address 0. pub struct Ram { mem: *mut u8, - vm_page_size: NonZero, + block_size: NonZero, } impl Ram { @@ -31,14 +31,14 @@ impl Ram { } /// # Panics - /// If `addr` or `len` is not multiply by VM page size. + /// If `addr` or `len` is not multiply by block size. /// /// # Safety /// This method does not check if `addr` is already allocated. It is undefined behavior if /// `addr` + `len` is overlapped with the previous allocation. pub unsafe fn alloc(&self, addr: usize, len: NonZero) -> Result<&mut [u8], RamError> { - assert_eq!(addr % self.vm_page_size, 0); - assert_eq!(len.get() % self.vm_page_size, 0); + assert_eq!(addr % self.block_size, 0); + assert_eq!(len.get() % self.block_size, 0); if !addr.checked_add(len.get()).is_some_and(|v| v <= Self::SIZE) { return Err(RamError::InvalidAddr); @@ -50,13 +50,13 @@ impl Ram { } /// # Panics - /// If `addr` or `len` is not multiply by VM page size. + /// If `addr` or `len` is not multiply by block size. /// /// # Safety /// Accessing the deallocated memory on the host after this will be undefined behavior. pub unsafe fn dealloc(&self, addr: usize, len: NonZero) -> Result<(), RamError> { - assert_eq!(addr % self.vm_page_size, 0); - assert_eq!(len.get() % self.vm_page_size, 0); + assert_eq!(addr % self.block_size, 0); + assert_eq!(len.get() % self.block_size, 0); if !addr.checked_add(len.get()).is_some_and(|v| v <= Self::SIZE) { return Err(RamError::InvalidAddr); diff --git a/src/core/src/vmm/kernel/segment.rs b/src/core/src/vmm/kernel/segment.rs index e0f3b822b..3b3c536a7 100644 --- a/src/core/src/vmm/kernel/segment.rs +++ b/src/core/src/vmm/kernel/segment.rs @@ -1,8 +1,17 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 use std::fs::File; use std::io::Read; use std::iter::FusedIterator; use thiserror::Error; +pub(crate) const PT_LOAD: u32 = 1; +pub(crate) const PT_DYNAMIC: u32 = 2; +pub(crate) const PT_NOTE: u32 = 4; +pub(crate) const PT_PHDR: u32 = 6; +pub(crate) const PT_GNU_EH_FRAME: u32 = 0x6474e550; +pub(crate) const PT_GNU_STACK: u32 = 0x6474e551; +pub(crate) const PT_GNU_RELRO: u32 = 0x6474e552; + /// Iterator to enumerate ELF program headers. pub struct ProgramHeaders<'a> { file: &'a mut File, diff --git a/src/core/src/vmm/mod.rs b/src/core/src/vmm/mod.rs index 3f4bfe4ae..a96da1dfb 100644 --- a/src/core/src/vmm/mod.rs +++ b/src/core/src/vmm/mod.rs @@ -1,11 +1,14 @@ // SPDX-License-Identifier: MIT OR Apache-2.0 use self::hv::{Cpu, CpuExit, CpuIo, CpuStates, Hypervisor}; use self::hw::{setup_devices, Device, DeviceContext, DeviceTree, Ram, RamBuilder, RamMap}; -use self::kernel::Kernel; +use self::kernel::{ + Kernel, PT_DYNAMIC, PT_GNU_EH_FRAME, PT_GNU_RELRO, PT_GNU_STACK, PT_LOAD, PT_NOTE, PT_PHDR, +}; use self::screen::Screen; use crate::error::RustError; use obconf::{BootEnv, Vm}; use obvirt::console::MsgType; +use std::cmp::max; use std::collections::BTreeMap; use std::error::Error; use std::ffi::{c_char, c_void, CStr}; @@ -88,7 +91,7 @@ pub unsafe extern "C" fn vmm_run( // Process the header. match hdr.p_type { - 1 => { + PT_LOAD => { if hdr.p_filesz > TryInto::::try_into(hdr.p_memsz).unwrap() { *err = RustError::new(format!("invalid p_filesz on on PT_LOAD {index}")); return null_mut(); @@ -96,7 +99,7 @@ pub unsafe extern "C" fn vmm_run( segments.push(hdr); } - 2 => { + PT_DYNAMIC => { if dynamic.is_some() { *err = RustError::new("multiple PT_DYNAMIC is not supported"); return null_mut(); @@ -104,7 +107,7 @@ pub unsafe extern "C" fn vmm_run( dynamic = Some(hdr); } - 4 => { + PT_NOTE => { if note.is_some() { *err = RustError::new("multiple PT_NOTE is not supported"); return null_mut(); @@ -112,7 +115,7 @@ pub unsafe extern "C" fn vmm_run( note = Some(hdr); } - 6 | 1685382480 | 1685382481 | 1685382482 => {} + PT_PHDR | PT_GNU_EH_FRAME | PT_GNU_STACK | PT_GNU_RELRO => {} v => { *err = RustError::new(format!("unknown p_type {v} on program header {index}")); return null_mut(); @@ -257,9 +260,7 @@ pub unsafe extern "C" fn vmm_run( } }; - // TODO: Support any page size on the host. With page size the same as kernel or lower we don't - // need to keep track allocations in the RAM because any requested address from the kernel will - // always page-aligned on the host. + // Get page size on the host. let host_page_size = match get_page_size() { Ok(v) => v, Err(e) => { @@ -268,11 +269,6 @@ pub unsafe extern "C" fn vmm_run( } }; - if host_page_size > vm_page_size { - *err = RustError::new("your system using an unsupported page size"); - return null_mut(); - } - // Get kernel memory size. let mut len = 0; @@ -296,12 +292,13 @@ pub unsafe extern "C" fn vmm_run( } // Round kernel memory size. + let block_size = max(vm_page_size, host_page_size); let len = match len { 0 => { *err = RustError::new("the kernel has PT_LOAD with zero length"); return null_mut(); } - v => match v.checked_next_multiple_of(vm_page_size.get()) { + v => match v.checked_next_multiple_of(block_size.get()) { Some(v) => NonZero::new_unchecked(v), None => { *err = RustError::new("total size of PT_LOAD is too large"); @@ -311,7 +308,7 @@ pub unsafe extern "C" fn vmm_run( }; // Setup RAM builder. - let mut ram = match RamBuilder::new(vm_page_size) { + let mut ram = match RamBuilder::new(block_size) { Ok(v) => v, Err(e) => { *err = RustError::wrap(e); @@ -371,9 +368,10 @@ pub unsafe extern "C" fn vmm_run( // Allocate arguments. let event = VmmEventHandler { fp: event, cx }; - let devices = Arc::new(setup_devices(Ram::SIZE, vm_page_size, event)); + let devices = Arc::new(setup_devices(Ram::SIZE, block_size, event)); let env = BootEnv::Vm(Vm { console: devices.console().addr(), + host_page_size, }); if let Err(e) = ram.alloc_args(env) { @@ -382,7 +380,7 @@ pub unsafe extern "C" fn vmm_run( } // Build RAM. - let (ram, map) = match ram.build(&devices, dynamic) { + let (ram, map) = match ram.build(vm_page_size, &devices, dynamic) { Ok(v) => v, Err(e) => { *err = RustError::with_source("couldn't build RAM", e); diff --git a/src/obconf/src/env/vm.rs b/src/obconf/src/env/vm.rs index 0f2d1f959..3ac27c03c 100644 --- a/src/obconf/src/env/vm.rs +++ b/src/obconf/src/env/vm.rs @@ -1,6 +1,10 @@ +use core::num::NonZero; + /// Provides boot information when booting on a Virtual Machine. #[repr(C)] pub struct Vm { /// Physical address of one page for console memory. pub console: usize, + /// Page size on the host. + pub host_page_size: NonZero, }