Skip to content

treat hermit like wasm32 #109

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Mar 18, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 47 additions & 0 deletions src/alloc_stack_restore_guard.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
use crate::{get_stack_limit, set_stack_limit};

pub struct StackRestoreGuard {
new_stack: *mut u8,
stack_bytes: usize,
old_stack_limit: Option<usize>,
}

const ALIGNMENT: usize = 16;

impl StackRestoreGuard {
pub fn new(stack_bytes: usize) -> StackRestoreGuard {
// On these platforms we do not use stack guards. this is very unfortunate,
// but there is not much we can do about it without OS support.
// We simply allocate the requested size from the global allocator with a suitable
// alignment.
let stack_bytes = stack_bytes
.checked_add(ALIGNMENT - 1)
.expect("unreasonably large stack requested")
/ ALIGNMENT
* ALIGNMENT;
let layout = std::alloc::Layout::from_size_align(stack_bytes, ALIGNMENT).unwrap();
let ptr = unsafe { std::alloc::alloc(layout) };
assert!(!ptr.is_null(), "unable to allocate stack");
StackRestoreGuard {
new_stack: ptr,
stack_bytes,
old_stack_limit: get_stack_limit(),
}
}

pub fn stack_area(&self) -> (*mut u8, usize) {
(self.new_stack, self.stack_bytes)
}
}

impl Drop for StackRestoreGuard {
fn drop(&mut self) {
unsafe {
std::alloc::dealloc(
self.new_stack,
std::alloc::Layout::from_size_align_unchecked(self.stack_bytes, ALIGNMENT),
);
}
set_stack_limit(self.old_stack_limit);
}
}
139 changes: 19 additions & 120 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -135,123 +135,30 @@ fn set_stack_limit(l: Option<usize>) {

psm_stack_manipulation! {
yes {
struct StackRestoreGuard {
new_stack: *mut std::ffi::c_void,
stack_bytes: usize,
old_stack_limit: Option<usize>,
}

impl StackRestoreGuard {
#[cfg(target_arch = "wasm32")]
unsafe fn new(stack_bytes: usize, _page_size: usize) -> StackRestoreGuard {
let layout = std::alloc::Layout::from_size_align(stack_bytes, 16).unwrap();
let ptr = std::alloc::alloc(layout);
assert!(!ptr.is_null(), "unable to allocate stack");
StackRestoreGuard {
new_stack: ptr as *mut _,
stack_bytes,
old_stack_limit: get_stack_limit(),
}
}

#[cfg(not(target_arch = "wasm32"))]
unsafe fn new(stack_bytes: usize, page_size: usize) -> StackRestoreGuard {
let new_stack = libc::mmap(
std::ptr::null_mut(),
stack_bytes,
libc::PROT_NONE,
libc::MAP_PRIVATE |
libc::MAP_ANON,
-1, // Some implementations assert fd = -1 if MAP_ANON is specified
0
);
assert_ne!(
new_stack,
libc::MAP_FAILED,
"mmap failed to allocate stack: {}",
std::io::Error::last_os_error()
);
let guard = StackRestoreGuard {
new_stack,
stack_bytes,
old_stack_limit: get_stack_limit(),
};
let above_guard_page = new_stack.add(page_size);
#[cfg(not(target_os = "openbsd"))]
let result = libc::mprotect(
above_guard_page,
stack_bytes - page_size,
libc::PROT_READ | libc::PROT_WRITE
);
#[cfg(target_os = "openbsd")]
let result = if libc::mmap(
above_guard_page,
stack_bytes - page_size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_FIXED | libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_STACK,
-1,
0) == above_guard_page {
0
} else {
-1
};
assert_ne!(
result,
-1,
"mprotect/mmap failed: {}",
std::io::Error::last_os_error()
);
guard
}
}
#[cfg(not(any(target_arch = "wasm32",target_os = "hermit")))]
#[path = "mmap_stack_restore_guard.rs"]
mod stack_restore_guard;

impl Drop for StackRestoreGuard {
fn drop(&mut self) {
#[cfg(target_arch = "wasm32")]
unsafe {
std::alloc::dealloc(
self.new_stack as *mut u8,
std::alloc::Layout::from_size_align_unchecked(self.stack_bytes, 16),
);
}
#[cfg(not(target_arch = "wasm32"))]
unsafe {
// FIXME: check the error code and decide what to do with it.
// Perhaps a debug_assertion?
libc::munmap(self.new_stack, self.stack_bytes);
}
set_stack_limit(self.old_stack_limit);
}
}
#[cfg(any(target_arch = "wasm32",target_os = "hermit"))]
#[path = "alloc_stack_restore_guard.rs"]
mod stack_restore_guard;

fn _grow(stack_size: usize, callback: &mut dyn FnMut()) {
// Calculate a number of pages we want to allocate for the new stack.
// For maximum portability we want to produce a stack that is aligned to a page and has
// a size that’s a multiple of page size. Furthermore we want to allocate two extras pages
// for the stack guard. To achieve that we do our calculations in number of pages and
// convert to bytes last.
let page_size = page_size();
let requested_pages = stack_size
.checked_add(page_size - 1)
.expect("unreasonably large stack requested") / page_size;
let stack_pages = std::cmp::max(1, requested_pages) + 2;
let stack_bytes = stack_pages.checked_mul(page_size)
.expect("unreasonably large stack requested");
use stack_restore_guard::StackRestoreGuard;

// Next, there are a couple of approaches to how we allocate the new stack. We take the
// most obvious path and use `mmap`. We also `mprotect` a guard page into our
// allocation.
//
// We use a guard pattern to ensure we deallocate the allocated stack when we leave
// this function and also try to uphold various safety invariants required by `psm`
// (such as not unwinding from the callback we pass to it).
//
fn _grow(requested_stack_size: usize, callback: &mut dyn FnMut()) {
// Other than that this code has no meaningful gotchas.
unsafe {
let guard = StackRestoreGuard::new(stack_bytes, page_size);
let above_guard_page = guard.new_stack.add(page_size);
set_stack_limit(Some(above_guard_page as usize));
let panic = psm::on_stack(above_guard_page as *mut _, stack_size, move || {
// We use a guard pattern to ensure we deallocate the allocated stack when we leave
// this function and also try to uphold various safety invariants required by `psm`
// (such as not unwinding from the callback we pass to it).
// `StackRestoreGuard` allocates a memory area with suitable size and alignment.
// It also sets up stack guards if supported on target.
let guard = StackRestoreGuard::new(requested_stack_size);
let (stack_base, allocated_stack_size) = guard.stack_area();
debug_assert!(allocated_stack_size >= requested_stack_size);
set_stack_limit(Some(stack_base as usize));
// TODO should we not pass `allocated_stack_size` here?
let panic = psm::on_stack(stack_base, requested_stack_size, move || {
std::panic::catch_unwind(std::panic::AssertUnwindSafe(callback)).err()
});
drop(guard);
Expand All @@ -260,14 +167,6 @@ psm_stack_manipulation! {
}
}
}

fn page_size() -> usize {
// FIXME: consider caching the page size.
#[cfg(not(target_arch = "wasm32"))]
unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) as usize }
#[cfg(target_arch = "wasm32")]
{ 65536 }
}
}

no {
Expand Down
105 changes: 105 additions & 0 deletions src/mmap_stack_restore_guard.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
use crate::{get_stack_limit, set_stack_limit};

pub struct StackRestoreGuard {
mapping: *mut u8,
size_with_guard: usize,
page_size: usize,
old_stack_limit: Option<usize>,
}

impl StackRestoreGuard {
pub fn new(requested_size: usize) -> StackRestoreGuard {
// For maximum portability we want to produce a stack that is aligned to a page and has
// a size that’s a multiple of page size. It is natural to use mmap to allocate
// these pages. Furthermore, we want to allocate two extras pages for the stack guard.
// To achieve that we do our calculations in number of pages and convert to bytes last.
let page_size = page_size();
let requested_pages = requested_size
.checked_add(page_size - 1)
.expect("unreasonably large stack requested")
/ page_size;
let page_count_with_guard = std::cmp::max(1, requested_pages) + 2;
let size_with_guard = page_count_with_guard
.checked_mul(page_size)
.expect("unreasonably large stack requested");

unsafe {
let new_stack = libc::mmap(
std::ptr::null_mut(),
size_with_guard,
libc::PROT_NONE,
libc::MAP_PRIVATE | libc::MAP_ANON,
-1, // Some implementations assert fd = -1 if MAP_ANON is specified
0,
);
assert_ne!(
new_stack,
libc::MAP_FAILED,
"mmap failed to allocate stack: {}",
std::io::Error::last_os_error()
);
let guard = StackRestoreGuard {
mapping: new_stack as *mut u8,
page_size,
size_with_guard,
old_stack_limit: get_stack_limit(),
};
// We leave a guard page without read/write access in our allocation.
// There is one guard page below the stack and another above it.
let above_guard_page = new_stack.add(page_size);
#[cfg(not(target_os = "openbsd"))]
let result = libc::mprotect(
above_guard_page,
size_with_guard - page_size,
libc::PROT_READ | libc::PROT_WRITE,
);
#[cfg(target_os = "openbsd")]
let result = if libc::mmap(
above_guard_page,
size_with_guard - page_size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_FIXED | libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_STACK,
-1,
0,
) == above_guard_page
{
0
} else {
-1
};
assert_ne!(
result,
-1,
"mprotect/mmap failed: {}",
std::io::Error::last_os_error()
);
guard
}
}

// TODO this should return a *mut [u8], but pointer slices only got proper support with Rust 1.79.
pub fn stack_area(&self) -> (*mut u8, usize) {
unsafe {
(
self.mapping.add(self.page_size),
self.size_with_guard - self.page_size,
)
}
}
}

impl Drop for StackRestoreGuard {
fn drop(&mut self) {
unsafe {
// FIXME: check the error code and decide what to do with it.
// Perhaps a debug_assertion?
libc::munmap(self.mapping as *mut std::ffi::c_void, self.size_with_guard);
}
set_stack_limit(self.old_stack_limit);
}
}

fn page_size() -> usize {
// FIXME: consider caching the page size.
unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) as usize }
}
Loading