Skip to content

Commit

Permalink
Prevents the kernel from crashing the VMM (#1051)
Browse files Browse the repository at this point in the history
  • Loading branch information
ultimaweapon authored Oct 20, 2024
1 parent 9df9f3b commit 2cbd609
Show file tree
Hide file tree
Showing 5 changed files with 146 additions and 49 deletions.
45 changes: 34 additions & 11 deletions gui/src/vmm/hw/console/context.rs
Original file line number Diff line number Diff line change
@@ -1,18 +1,19 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
use super::Console;
use crate::vmm::hv::{Cpu, CpuExit, CpuIo, Hypervisor};
use crate::vmm::hw::{read_bin, read_u8, read_usize, DeviceContext, MmioError};
use crate::vmm::hw::{read_ptr, read_u8, read_usize, DeviceContext, MmioError};
use crate::vmm::VmmEvent;
use obconf::{ConsoleMemory, ConsoleType};
use std::error::Error;
use std::mem::offset_of;
use std::num::NonZero;
use thiserror::Error;

/// Implementation of [`DeviceContext`].
pub struct Context<'a, H> {
dev: &'a Console,
hv: &'a H,
msg_len: usize,
msg_len: Option<NonZero<usize>>,
msg: Vec<u8>,
}

Expand All @@ -21,7 +22,7 @@ impl<'a, H: Hypervisor> Context<'a, H> {
Self {
dev,
hv,
msg_len: 0,
msg_len: None,
msg: Vec::new(),
}
}
Expand All @@ -33,13 +34,26 @@ impl<'a, H: Hypervisor, C: Cpu> DeviceContext<C> for Context<'a, H> {
let off = exit.addr() - self.dev.addr;

if off == offset_of!(ConsoleMemory, msg_len) {
self.msg_len = read_usize(exit).map_err(|e| ExecError::ReadFailed(off, e))?;
self.msg_len = read_usize(exit)
.map_err(|e| ExecError::ReadFailed(off, e))
.and_then(|v| NonZero::new(v).ok_or(ExecError::InvalidLen))
.map(Some)?;
} else if off == offset_of!(ConsoleMemory, msg_addr) {
let data =
read_bin(exit, self.msg_len, self.hv).map_err(|e| ExecError::ReadFailed(off, e))?;
// We don't need to check if length is too large here. The read_ptr will return only
// allocated memory, which prevent invalid length automatically.
let len = self.msg_len.take().ok_or(ExecError::InvalidSequence)?;
let data = read_ptr(exit, len, self.hv).map_err(|e| ExecError::ReadFailed(off, e))?;

self.msg.extend_from_slice(data);
self.msg
.extend_from_slice(unsafe { std::slice::from_raw_parts(data.as_ptr(), len.get()) });
} else if off == offset_of!(ConsoleMemory, commit) {
// Check if state valid.
if self.msg_len.is_some() || self.msg.is_empty() {
return Err(Box::new(ExecError::InvalidSequence));
} else if std::str::from_utf8(&self.msg).is_err() {
return Err(Box::new(ExecError::InvalidMsg));
}

// Parse data.
let commit = read_u8(exit).map_err(|e| ExecError::ReadFailed(off, e))?;
let ty: ConsoleType = commit
Expand All @@ -64,15 +78,24 @@ impl<'a, H: Hypervisor, C: Cpu> DeviceContext<C> for Context<'a, H> {
}
}

/// Represents an error when [`Context::exec()`] fails.
/// Represents an error when [`Context::mmio()`] fails.
#[derive(Debug, Error)]
enum ExecError {
#[error("unknown field at offset {0:#}")]
#[error("unknown field at offset {0:#x}")]
UnknownField(usize),

#[error("couldn't read data for offset {0:#}")]
#[error("couldn't read data for offset {0:#x}")]
ReadFailed(usize, #[source] MmioError),

#[error("{0:#} is not a valid commit")]
#[error("invalid message length")]
InvalidLen,

#[error("invalid message")]
InvalidMsg,

#[error("{0:#x} is not a valid commit")]
InvalidCommit(u8),

#[error("invalid operation sequence")]
InvalidSequence,
}
20 changes: 12 additions & 8 deletions gui/src/vmm/hw/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ pub use self::console::*;
pub use self::vmm::*;

use super::hv::{Cpu, CpuExit, CpuIo, Hypervisor, IoBuf};
use super::ram::LockedAddr;
use super::VmmEventHandler;
use std::collections::BTreeMap;
use std::error::Error;
Expand Down Expand Up @@ -61,11 +62,11 @@ fn read_usize(exit: &mut impl CpuIo) -> Result<usize, MmioError> {
.map_err(|_| MmioError::InvalidData)
}

fn read_bin<'b>(
exit: &'b mut impl CpuIo,
len: usize,
hv: &impl Hypervisor,
) -> Result<&'b [u8], MmioError> {
fn read_ptr<'a>(
exit: &mut impl CpuIo,
len: NonZero<usize>,
hv: &'a impl Hypervisor,
) -> Result<LockedAddr<'a>, MmioError> {
// Get data.
let buf = match exit.buffer() {
IoBuf::Write(v) => v,
Expand All @@ -82,9 +83,9 @@ fn read_bin<'b>(
.map_err(|e| MmioError::TranslateVaddrFailed(vaddr, Box::new(e)))?;

// Get data.
let data = unsafe { hv.ram().host_addr().add(paddr) };

Ok(unsafe { std::slice::from_raw_parts(data, len) })
hv.ram()
.lock(paddr, len)
.ok_or(MmioError::InvalidAddr { vaddr, paddr })
}

/// Contains all virtual devices (except RAM) for the VM.
Expand Down Expand Up @@ -169,4 +170,7 @@ enum MmioError {

#[error("couldn't translate {0:#x} to physical address")]
TranslateVaddrFailed(usize, #[source] Box<dyn Error>),

#[error("address {vaddr:#x} ({paddr:#x}) is not allocated")]
InvalidAddr { vaddr: usize, paddr: usize },
}
4 changes: 2 additions & 2 deletions gui/src/vmm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use self::hw::{setup_devices, Device};
use self::kernel::{
Kernel, PT_DYNAMIC, PT_GNU_EH_FRAME, PT_GNU_RELRO, PT_GNU_STACK, PT_LOAD, PT_NOTE, PT_PHDR,
};
use self::ram::Ram;
use self::ram::{Ram, RamBuilder};
use self::screen::Screen;
use crate::debug::DebugClient;
use crate::error::RustError;
Expand Down Expand Up @@ -352,7 +352,7 @@ pub unsafe extern "C" fn vmm_start(

// Map the kernel.
let feats = hv.cpu_features().clone();
let mut ram = hv.ram_mut().builder();
let mut ram = RamBuilder::new(hv.ram_mut());
let kern = match ram.alloc_kernel(len) {
Ok(v) => v,
Err(e) => {
Expand Down
2 changes: 1 addition & 1 deletion gui/src/vmm/ram/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ pub struct RamBuilder<'a> {
}

impl<'a> RamBuilder<'a> {
pub(super) fn new(ram: &'a mut Ram) -> Self {
pub fn new(ram: &'a mut Ram) -> Self {
Self {
ram,
next: 0,
Expand Down
124 changes: 97 additions & 27 deletions gui/src/vmm/ram/mod.rs
Original file line number Diff line number Diff line change
@@ -1,31 +1,38 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
pub use self::builder::*;

use std::collections::BTreeSet;
use std::io::Error;
use std::num::NonZero;
use std::sync::{Mutex, MutexGuard};
use thiserror::Error;

pub use self::builder::*;

mod builder;

/// Represents main memory of the PS4.
///
/// This struct will allocate a 8GB of memory immediately but not commit any parts of it until there
/// is an allocation request. That mean the actual memory usage is not fixed at 8GB but will be
/// dependent on what PS4 applications currently running. If it is a simple game the memory usage
/// might be just a hundred of megabytes.
/// This struct will immediate reserve a range of memory for its size but not commit any parts of it
/// until there is an allocation request.
///
/// RAM always started at address 0.
pub struct Ram {
mem: *mut u8,
len: NonZero<usize>,
block_size: NonZero<usize>,
allocated: Mutex<BTreeSet<usize>>,
}

impl Ram {
/// Panics
/// If `len` is not multiply by `block_size`.
///
/// # Safety
/// `block_size` must be greater or equal host page size.
pub unsafe fn new(len: NonZero<usize>, block_size: NonZero<usize>) -> Result<Self, Error> {
use std::io::Error;

assert_eq!(len.get() % block_size, 0);

// Reserve memory range.
#[cfg(unix)]
let mem = {
Expand Down Expand Up @@ -66,6 +73,7 @@ impl Ram {
mem,
len,
block_size,
allocated: Mutex::default(),
})
}

Expand All @@ -77,30 +85,36 @@ impl Ram {
self.len
}

pub fn builder(&mut self) -> RamBuilder {
RamBuilder::new(self)
}

/// # Panics
/// If `addr` or `len` is not multiply by block size.
///
/// # Safety
/// This method does not check if `addr` is already allocated. It is undefined behavior if
/// `addr` + `len` is overlapped with the previous allocation.
pub unsafe fn alloc(&self, addr: usize, len: NonZero<usize>) -> Result<&mut [u8], RamError> {
pub fn alloc(&self, addr: usize, len: NonZero<usize>) -> Result<&mut [u8], RamError> {
assert_eq!(addr % self.block_size, 0);
assert_eq!(len.get() % self.block_size, 0);

if !addr
.checked_add(len.get())
.is_some_and(|v| v <= self.len.get())
{
// Check if the requested range valid.
let end = addr.checked_add(len.get()).ok_or(RamError::InvalidAddr)?;

if end > self.len.get() {
return Err(RamError::InvalidAddr);
}

// Check if the requested range already allocated.
let mut allocated = self.allocated.lock().unwrap();

if allocated.range(addr..end).next().is_some() {
return Err(RamError::InvalidAddr);
}

Self::commit(self.mem.add(addr), len.get())
.map(|v| std::slice::from_raw_parts_mut(v, len.get()))
.map_err(RamError::HostFailed)
// Commit.
let start = unsafe { self.mem.add(addr) };
let mem = unsafe { Self::commit(start, len.get()).map_err(RamError::HostFailed)? };

// Add range to allocated list.
for addr in (addr..end).step_by(self.block_size.get()) {
assert!(allocated.insert(addr));
}

Ok(unsafe { std::slice::from_raw_parts_mut(mem, len.get()) })
}

/// # Panics
Expand All @@ -112,14 +126,54 @@ impl Ram {
assert_eq!(addr % self.block_size, 0);
assert_eq!(len.get() % self.block_size, 0);

if !addr
.checked_add(len.get())
.is_some_and(|v| v <= self.len.get())
{
// Check if the requested range valid.
let end = addr.checked_add(len.get()).ok_or(RamError::InvalidAddr)?;

if end > self.len.get() {
return Err(RamError::InvalidAddr);
}

Self::decommit(self.mem.add(addr), len.get()).map_err(RamError::HostFailed)
// Decommit the whole range. No need to check if the range already allocated since it will
// be no-op anyway.
let mut allocated = self.allocated.lock().unwrap();

Self::decommit(self.mem.add(addr), len.get()).map_err(RamError::HostFailed)?;

for addr in (addr..end).step_by(self.block_size.get()) {
allocated.remove(&addr);
}

Ok(())
}

/// Return [`None`] if some part of the requested range is not allocated.
pub fn lock(&self, addr: usize, len: NonZero<usize>) -> Option<LockedAddr> {
// Get allocated range.
let end = addr.checked_add(len.get())?;
let off = addr % self.block_size;
let mut next = addr - off;
let allocated = self.allocated.lock().unwrap();
let range = allocated.range(next..end);

// Check if the whole range valid.
for addr in range {
if *addr != next {
return None;
}

// This block has been allocated successfully, which mean this addition will never
// overflow.
next += self.block_size.get();
}

if next < end {
return None;
}

Some(LockedAddr {
lock: allocated,
ptr: unsafe { self.mem.add(addr) },
})
}

#[cfg(unix)]
Expand Down Expand Up @@ -209,6 +263,22 @@ impl Drop for Ram {
unsafe impl Send for Ram {}
unsafe impl Sync for Ram {}

/// RAII struct to prevent a range of memory from deallocated.
pub struct LockedAddr<'a> {
#[allow(dead_code)]
lock: MutexGuard<'a, BTreeSet<usize>>,
ptr: *mut u8,
}

impl<'a> LockedAddr<'a> {
/// # Safety
/// Although the whole memory range guarantee to be valid for the whole lifetime of this struct
/// but the data is subject to race condition due to the other vCPU may write into this range.
pub fn as_ptr(&self) -> *const u8 {
self.ptr
}
}

/// Represents an error when an operation on [`Ram`] fails.
#[derive(Debug, Error)]
pub enum RamError {
Expand Down

0 comments on commit 2cbd609

Please sign in to comment.