Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reusable Stack #74

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
192 changes: 118 additions & 74 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -134,143 +134,187 @@ fn set_stack_limit(l: Option<usize>) {
psm_stack_manipulation! {
yes {
struct StackRestoreGuard {
new_stack: *mut std::ffi::c_void,
stack_bytes: usize,
old_stack_limit: Option<usize>,
}

impl StackRestoreGuard {
fn new() -> StackRestoreGuard {
StackRestoreGuard {
old_stack_limit: get_stack_limit(),
}
}
}

impl Drop for StackRestoreGuard {
fn drop(&mut self) {
set_stack_limit(self.old_stack_limit);
}
}

fn _grow(stack_size: usize, callback: &mut dyn FnMut()) {
unsafe {
let stack = Stack::new(stack_size);
stack.call(callback);
}
}

fn page_size() -> usize {
// FIXME: consider caching the page size.
#[cfg(not(target_arch = "wasm32"))]
unsafe {
libc::sysconf(libc::_SC_PAGE_SIZE) as usize
}
#[cfg(target_arch = "wasm32")]
unsafe fn new(stack_bytes: usize, _page_size: usize) -> StackRestoreGuard {
{
65536
}
}

fn calculate_stack_size(stack_size: usize) -> usize {
// Calculate a number of pages we want to allocate for the new stack.
// For maximum portability we want to produce a stack that is aligned to a page and has
// a size that’s a multiple of page size. Furthermore we want to allocate two extras pages
// for the stack guard. To achieve that we do our calculations in number of pages and
// convert to bytes last.
let page_size = page_size();
let requested_pages = stack_size
.checked_add(page_size - 1)
.expect("unreasonably large stack requested")
/ page_size;
let stack_pages = std::cmp::max(1, requested_pages) + 2;
let stack_bytes = stack_pages
.checked_mul(page_size)
.expect("unreasonably large stack requesteed");

// Next, there are a couple of approaches to how we allocate the new stack. We take the
// most obvious path and use `mmap`. We also `mprotect` a guard page into our
// allocation.
stack_bytes
}

pub struct Stack {
ptr: *mut std::ffi::c_void,
size: usize,
requested_size: usize
}

impl Stack {

#[cfg(target_arch = "wasm32")]
pub unsafe fn new(stack_size: usize) -> Stack {
let stack_bytes = calculate_stack_size();
let layout = std::alloc::Layout::from_size_align(stack_bytes, 16).unwrap();
let ptr = std::alloc::alloc(layout);
assert!(!ptr.is_null(), "unable to allocate stack");
StackRestoreGuard {
new_stack: ptr as *mut _,
stack_bytes,
old_stack_limit: get_stack_limit(),
Stack {
ptr: ptr as *mut _,
size: stack_bytes,
requested_size: stack_size
}
}

#[cfg(not(target_arch = "wasm32"))]
unsafe fn new(stack_bytes: usize, page_size: usize) -> StackRestoreGuard {
pub unsafe fn new(stack_size: usize) -> Stack {
let stack_bytes = calculate_stack_size(stack_size);
let new_stack = libc::mmap(
std::ptr::null_mut(),
stack_bytes,
libc::PROT_NONE,
libc::MAP_PRIVATE |
libc::MAP_ANON,
libc::MAP_PRIVATE | libc::MAP_ANON,
-1, // Some implementations assert fd = -1 if MAP_ANON is specified
0
0,
);
let page_size = page_size();
if new_stack == libc::MAP_FAILED {
let error = std::io::Error::last_os_error();
panic!("allocating stack failed with: {}", error)
}
let guard = StackRestoreGuard {
new_stack,
stack_bytes,
old_stack_limit: get_stack_limit(),
let stack = Stack {
ptr: new_stack,
size: stack_bytes,
requested_size: stack_size
};
let above_guard_page = new_stack.add(page_size);
#[cfg(not(target_os = "openbsd"))]
let result = libc::mprotect(
above_guard_page,
stack_bytes - page_size,
libc::PROT_READ | libc::PROT_WRITE
libc::PROT_READ | libc::PROT_WRITE,
);
#[cfg(target_os = "openbsd")]
let result = if libc::mmap(
above_guard_page,
stack_bytes - page_size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_FIXED | libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_STACK,
-1,
0) == above_guard_page {
above_guard_page,
stack_bytes - page_size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_FIXED | libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_STACK,
-1,
0,
) == above_guard_page
{
0
} else {
-1
};
if result == -1 {
let error = std::io::Error::last_os_error();
drop(guard);
drop(stack);
panic!("setting stack permissions failed with: {}", error)
}
guard
stack
}

pub fn call(&self, callback: &mut dyn FnMut()) {
unsafe {
let guard = StackRestoreGuard::new();
let above_guard_page = self.ptr.add(page_size());
set_stack_limit(Some(above_guard_page as usize));
let panic = psm::on_stack(above_guard_page as *mut _, self.requested_size, move || {
std::panic::catch_unwind(std::panic::AssertUnwindSafe(callback)).err()
});
drop(guard);
if let Some(p) = panic {
std::panic::resume_unwind(p);
}
}
}
}

impl Drop for StackRestoreGuard {
impl Drop for Stack {
fn drop(&mut self) {
#[cfg(target_arch = "wasm32")]
unsafe {
std::alloc::dealloc(
self.new_stack as *mut u8,
std::alloc::Layout::from_size_align_unchecked(self.stack_bytes, 16),
std::alloc::Layout::from_size_align_unchecked(self.size, 16),
);
}
#[cfg(not(target_arch = "wasm32"))]
unsafe {
// FIXME: check the error code and decide what to do with it.
// Perhaps a debug_assertion?
libc::munmap(self.new_stack, self.stack_bytes);
libc::munmap(self.ptr, self.size);
}
set_stack_limit(self.old_stack_limit);
}
}

fn _grow(stack_size: usize, callback: &mut dyn FnMut()) {
// Calculate a number of pages we want to allocate for the new stack.
// For maximum portability we want to produce a stack that is aligned to a page and has
// a size that’s a multiple of page size. Furthermore we want to allocate two extras pages
// for the stack guard. To achieve that we do our calculations in number of pages and
// convert to bytes last.
let page_size = page_size();
let requested_pages = stack_size
.checked_add(page_size - 1)
.expect("unreasonably large stack requested") / page_size;
let stack_pages = std::cmp::max(1, requested_pages) + 2;
let stack_bytes = stack_pages.checked_mul(page_size)
.expect("unreasonably large stack requesteed");

// Next, there are a couple of approaches to how we allocate the new stack. We take the
// most obvious path and use `mmap`. We also `mprotect` a guard page into our
// allocation.
//
// We use a guard pattern to ensure we deallocate the allocated stack when we leave
// this function and also try to uphold various safety invariants required by `psm`
// (such as not unwinding from the callback we pass to it).
//
// Other than that this code has no meaningful gotchas.
unsafe {
let guard = StackRestoreGuard::new(stack_bytes, page_size);
let above_guard_page = guard.new_stack.add(page_size);
set_stack_limit(Some(above_guard_page as usize));
let panic = psm::on_stack(above_guard_page as *mut _, stack_size, move || {
std::panic::catch_unwind(std::panic::AssertUnwindSafe(callback)).err()
});
drop(guard);
if let Some(p) = panic {
std::panic::resume_unwind(p);
}
}
}

fn page_size() -> usize {
// FIXME: consider caching the page size.
#[cfg(not(target_arch = "wasm32"))]
unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) as usize }
#[cfg(target_arch = "wasm32")]
{ 65536 }
}
}

no {
#[cfg(not(windows))]
fn _grow(stack_size: usize, callback: &mut dyn FnMut()) {
drop(stack_size);
callback();
}

pub struct Stack;

impl Stack {
pub unsafe fn new(_stack_bytes: usize, _page_size: usize) -> Stack {
Stack
}

pub call(&self, callback: &mut dyn FnMut()) {
callback();
}
}
}
}

Expand Down