From d5ee2a04d9b11ed93909d6da1a8f9186780237a4 Mon Sep 17 00:00:00 2001 From: Rain Date: Fri, 6 Dec 2024 15:51:06 -0800 Subject: [PATCH] Move MemoryImageSource::map_at to mmap module (#9687) * Simplify mmap interface slightly Return a single `SendSyncPtr` -- the platform-independent context converts to the various raw pointer types as desired. This simplifies upcoming work where I wanted to return a `SendSyncPtr`. * Move MemoryImageSource::map_at to mmap module This is part of the work to centralize memory management into the `mmap` module. This commit introduces a few structures which aid in that process, and starts converting one of the functions (`MemoryImageSource::map_at`) into this module. The structures introduced are: * `MemoryBase`: `RuntimeLinearMemory::base_ptr` is now `RuntimeLinearMemory::base`, which returns a `MemoryBase`. This is either a raw pointer or an `MmapOffset` as described below. * `MmapOffset`: A combination of a reference to an mmap and an offset into it. Logically represents a pointer into a mapped section of memory. In future work, we'll move more image-mapping code over to `Mmap` instances. --- .../wasmtime/src/runtime/trampoline/memory.rs | 6 +- crates/wasmtime/src/runtime/vm.rs | 4 +- crates/wasmtime/src/runtime/vm/cow.rs | 77 +++++++-------- .../instance/allocator/pooling/memory_pool.rs | 20 ++-- crates/wasmtime/src/runtime/vm/memory.rs | 95 ++++++++++++++---- .../wasmtime/src/runtime/vm/memory/malloc.rs | 6 +- crates/wasmtime/src/runtime/vm/memory/mmap.rs | 10 +- .../wasmtime/src/runtime/vm/memory/static_.rs | 13 ++- crates/wasmtime/src/runtime/vm/mmap.rs | 99 ++++++++++++++++++- .../src/runtime/vm/sys/custom/mmap.rs | 31 ++++-- .../wasmtime/src/runtime/vm/sys/custom/vm.rs | 10 +- .../wasmtime/src/runtime/vm/sys/miri/mmap.rs | 24 +++-- crates/wasmtime/src/runtime/vm/sys/miri/vm.rs | 4 - .../wasmtime/src/runtime/vm/sys/unix/mmap.rs | 32 ++++-- crates/wasmtime/src/runtime/vm/sys/unix/vm.rs | 17 +--- .../src/runtime/vm/sys/windows/mmap.rs | 45 ++++++--- .../wasmtime/src/runtime/vm/sys/windows/vm.rs | 4 - 17 files changed, 333 insertions(+), 164 deletions(-) diff --git a/crates/wasmtime/src/runtime/trampoline/memory.rs b/crates/wasmtime/src/runtime/trampoline/memory.rs index 51fc3a9ed5d7..dd2f93f178bb 100644 --- a/crates/wasmtime/src/runtime/trampoline/memory.rs +++ b/crates/wasmtime/src/runtime/trampoline/memory.rs @@ -3,7 +3,7 @@ use crate::prelude::*; use crate::runtime::vm::mpk::ProtectionKey; use crate::runtime::vm::{ CompiledModuleId, GcHeapAllocationIndex, Imports, InstanceAllocationRequest, InstanceAllocator, - InstanceAllocatorImpl, Memory, MemoryAllocationIndex, ModuleRuntimeInfo, + InstanceAllocatorImpl, Memory, MemoryAllocationIndex, MemoryBase, ModuleRuntimeInfo, OnDemandInstanceAllocator, RuntimeLinearMemory, RuntimeMemoryCreator, SharedMemory, StorePtr, Table, TableAllocationIndex, }; @@ -89,8 +89,8 @@ impl RuntimeLinearMemory for LinearMemoryProxy { self.mem.grow_to(new_size) } - fn base_ptr(&self) -> *mut u8 { - self.mem.as_ptr() + fn base(&self) -> MemoryBase { + MemoryBase::new_raw(self.mem.as_ptr()) } } diff --git a/crates/wasmtime/src/runtime/vm.rs b/crates/wasmtime/src/runtime/vm.rs index 4bb5f7e79af1..31949a5423b6 100644 --- a/crates/wasmtime/src/runtime/vm.rs +++ b/crates/wasmtime/src/runtime/vm.rs @@ -73,7 +73,7 @@ pub use crate::runtime::vm::instance::{ }; pub use crate::runtime::vm::interpreter::*; pub use crate::runtime::vm::memory::{ - Memory, RuntimeLinearMemory, RuntimeMemoryCreator, SharedMemory, + Memory, MemoryBase, RuntimeLinearMemory, RuntimeMemoryCreator, SharedMemory, }; pub use crate::runtime::vm::mmap_vec::MmapVec; pub use crate::runtime::vm::mpk::MpkEnabled; @@ -107,7 +107,7 @@ mod mmap; cfg_if::cfg_if! { if #[cfg(feature = "signals-based-traps")] { pub use crate::runtime::vm::byte_count::*; - pub use crate::runtime::vm::mmap::Mmap; + pub use crate::runtime::vm::mmap::{Mmap, MmapOffset}; pub use self::cow::{MemoryImage, MemoryImageSlot, ModuleMemoryImages}; } else { pub use self::cow_disabled::{MemoryImage, MemoryImageSlot, ModuleMemoryImages}; diff --git a/crates/wasmtime/src/runtime/vm/cow.rs b/crates/wasmtime/src/runtime/vm/cow.rs index 37ba1864c140..db9ddc3952c0 100644 --- a/crates/wasmtime/src/runtime/vm/cow.rs +++ b/crates/wasmtime/src/runtime/vm/cow.rs @@ -8,11 +8,10 @@ use super::sys::DecommitBehavior; use crate::prelude::*; use crate::runtime::vm::sys::vm::{self, MemoryImageSource}; -use crate::runtime::vm::{host_page_size, HostAlignedByteCount, MmapVec, SendSyncPtr}; +use crate::runtime::vm::{host_page_size, HostAlignedByteCount, MmapOffset, MmapVec}; use alloc::sync::Arc; -use core::ffi::c_void; use core::ops::Range; -use core::ptr::{self, NonNull}; +use core::ptr; use wasmtime_environ::{DefinedMemoryIndex, MemoryInitialization, Module, PrimaryMap, Tunables}; /// Backing images for memories in a module. @@ -131,13 +130,13 @@ impl MemoryImage { Ok(None) } - unsafe fn map_at(&self, base: *mut u8) -> Result<()> { - self.source.map_at( - base.add(self.linear_memory_offset.byte_count()), - self.len.byte_count(), + unsafe fn map_at(&self, mmap_base: &MmapOffset) -> Result<()> { + mmap_base.map_image_at( + &self.source, self.source_offset, - )?; - Ok(()) + self.linear_memory_offset, + self.len, + ) } unsafe fn remap_as_zeros_at(&self, base: *mut u8) -> Result<()> { @@ -283,10 +282,9 @@ impl ModuleMemoryImages { /// with a fresh zero'd mmap, meaning that reuse is effectively not supported. #[derive(Debug)] pub struct MemoryImageSlot { - /// The base address in virtual memory of the actual heap memory. - /// - /// Bytes at this address are what is seen by the Wasm guest code. - base: SendSyncPtr, + /// The mmap and offset within it that contains the linear memory for this + /// slot. + base: MmapOffset, /// The maximum static memory size which `self.accessible` can grow to. static_size: usize, @@ -337,12 +335,12 @@ impl MemoryImageSlot { /// and all memory from `accessible` from `static_size` should be mapped as /// `PROT_NONE` backed by zero-bytes. pub(crate) fn create( - base_addr: *mut c_void, + base: MmapOffset, accessible: HostAlignedByteCount, static_size: usize, ) -> Self { MemoryImageSlot { - base: NonNull::new(base_addr.cast()).unwrap().into(), + base, static_size, accessible, image: None, @@ -463,7 +461,7 @@ impl MemoryImageSlot { ); if !image.len.is_zero() { unsafe { - image.map_at(self.base.as_ptr())?; + image.map_at(&self.base)?; } } } @@ -480,7 +478,7 @@ impl MemoryImageSlot { pub(crate) fn remove_image(&mut self) -> Result<()> { if let Some(image) = &self.image { unsafe { - image.remap_as_zeros_at(self.base.as_ptr())?; + image.remap_as_zeros_at(self.base.as_mut_ptr())?; } self.image = None; } @@ -589,7 +587,7 @@ impl MemoryImageSlot { // This is memset (1) ptr::write_bytes( - self.base.as_ptr(), + self.base.as_mut_ptr(), 0u8, image.linear_memory_offset.byte_count(), ); @@ -603,7 +601,7 @@ impl MemoryImageSlot { // This is memset (3) ptr::write_bytes( - self.base.as_ptr().add(image_end.byte_count()), + self.base.as_mut_ptr().add(image_end.byte_count()), 0u8, remaining_memset.byte_count(), ); @@ -639,7 +637,7 @@ impl MemoryImageSlot { // Note that the memset may be zero bytes here. // This is memset (1) - ptr::write_bytes(self.base.as_ptr(), 0u8, keep_resident.byte_count()); + ptr::write_bytes(self.base.as_mut_ptr(), 0u8, keep_resident.byte_count()); // This is madvise (2) self.restore_original_mapping( @@ -657,7 +655,7 @@ impl MemoryImageSlot { // the rest. None => { let size_to_memset = keep_resident.min(self.accessible); - ptr::write_bytes(self.base.as_ptr(), 0u8, size_to_memset.byte_count()); + ptr::write_bytes(self.base.as_mut_ptr(), 0u8, size_to_memset.byte_count()); self.restore_original_mapping( size_to_memset, self.accessible @@ -685,7 +683,10 @@ impl MemoryImageSlot { vm::decommit_behavior(), DecommitBehavior::RestoreOriginalMapping ); - decommit(self.base.as_ptr().add(base.byte_count()), len.byte_count()); + decommit( + self.base.as_mut_ptr().add(base.byte_count()), + len.byte_count(), + ); } fn set_protection(&self, range: Range, readwrite: bool) -> Result<()> { @@ -701,7 +702,7 @@ impl MemoryImageSlot { // TODO: use Mmap to change memory permissions instead of these free // functions. unsafe { - let start = self.base.as_ptr().add(range.start.byte_count()); + let start = self.base.as_mut_ptr().add(range.start.byte_count()); if readwrite { vm::expose_existing_mapping(start, len.byte_count())?; } else { @@ -731,7 +732,7 @@ impl MemoryImageSlot { } unsafe { - vm::erase_existing_mapping(self.base.as_ptr(), self.static_size)?; + vm::erase_existing_mapping(self.base.as_mut_ptr(), self.static_size)?; } self.image = None; @@ -852,11 +853,8 @@ mod test { // 4 MiB mmap'd area, not accessible let mmap = mmap_4mib_inaccessible(); // Create a MemoryImageSlot on top of it - let mut memfd = MemoryImageSlot::create( - mmap.as_mut_ptr() as *mut _, - HostAlignedByteCount::ZERO, - 4 << 20, - ); + let mut memfd = + MemoryImageSlot::create(mmap.zero_offset(), HostAlignedByteCount::ZERO, 4 << 20); memfd.no_clear_on_drop(); assert!(!memfd.is_dirty()); // instantiate with 64 KiB initial size @@ -903,11 +901,8 @@ mod test { // 4 MiB mmap'd area, not accessible let mmap = mmap_4mib_inaccessible(); // Create a MemoryImageSlot on top of it - let mut memfd = MemoryImageSlot::create( - mmap.as_mut_ptr() as *mut _, - HostAlignedByteCount::ZERO, - 4 << 20, - ); + let mut memfd = + MemoryImageSlot::create(mmap.zero_offset(), HostAlignedByteCount::ZERO, 4 << 20); memfd.no_clear_on_drop(); // Create an image with some data. let image = Arc::new(create_memfd_with_data(page_size, &[1, 2, 3, 4]).unwrap()); @@ -996,11 +991,8 @@ mod test { ..Tunables::default_miri() }; let mmap = mmap_4mib_inaccessible(); - let mut memfd = MemoryImageSlot::create( - mmap.as_mut_ptr() as *mut _, - HostAlignedByteCount::ZERO, - 4 << 20, - ); + let mut memfd = + MemoryImageSlot::create(mmap.zero_offset(), HostAlignedByteCount::ZERO, 4 << 20); memfd.no_clear_on_drop(); // Test basics with the image @@ -1066,11 +1058,8 @@ mod test { }; let mmap = mmap_4mib_inaccessible(); - let mut memfd = MemoryImageSlot::create( - mmap.as_mut_ptr() as *mut _, - HostAlignedByteCount::ZERO, - 4 << 20, - ); + let mut memfd = + MemoryImageSlot::create(mmap.zero_offset(), HostAlignedByteCount::ZERO, 4 << 20); memfd.no_clear_on_drop(); let image = Arc::new(create_memfd_with_data(page_size, &[1, 2, 3, 4]).unwrap()); let initial = 64 << 10; diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs index fb8025d9f85f..26f7ef764ad4 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs @@ -57,13 +57,12 @@ use super::{ use crate::prelude::*; use crate::runtime::vm::{ mmap::AlignedLength, CompiledModuleId, InstanceAllocationRequest, InstanceLimits, Memory, - MemoryImageSlot, Mmap, MpkEnabled, PoolingInstanceAllocatorConfig, + MemoryBase, MemoryImageSlot, Mmap, MmapOffset, MpkEnabled, PoolingInstanceAllocatorConfig, }; use crate::{ runtime::vm::mpk::{self, ProtectionKey, ProtectionMask}, vm::HostAlignedByteCount, }; -use std::ffi::c_void; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use wasmtime_environ::{DefinedMemoryIndex, Module, Tunables}; @@ -357,7 +356,7 @@ impl MemoryPool { <= u64::try_from(self.layout.bytes_to_next_stripe_slot().byte_count()).unwrap() ); - let base_ptr = self.get_base(allocation_index); + let base = self.get_base(allocation_index); let base_capacity = self.layout.max_memory_bytes; let mut slot = self.take_memory_image_slot(allocation_index); @@ -385,7 +384,7 @@ impl MemoryPool { Memory::new_static( ty, tunables, - base_ptr, + MemoryBase::Mmap(base), base_capacity.byte_count(), slot, unsafe { &mut *request.store.get().unwrap() }, @@ -471,7 +470,7 @@ impl MemoryPool { } } - fn get_base(&self, allocation_index: MemoryAllocationIndex) -> *mut u8 { + fn get_base(&self, allocation_index: MemoryAllocationIndex) -> MmapOffset { assert!(allocation_index.index() < self.layout.num_slots); let offset = self .layout @@ -479,12 +478,7 @@ impl MemoryPool { .checked_mul(allocation_index.index()) .and_then(|c| c.checked_add(self.layout.pre_slab_guard_bytes)) .expect("slot_bytes * index + pre_slab_guard_bytes overflows"); - unsafe { - self.mapping - .as_ptr() - .offset(offset.byte_count() as isize) - .cast_mut() - } + self.mapping.offset(offset).expect("offset is in bounds") } /// Take ownership of the given image slot. Must be returned via @@ -497,7 +491,7 @@ impl MemoryPool { maybe_slot.unwrap_or_else(|| { MemoryImageSlot::create( - self.get_base(allocation_index) as *mut c_void, + self.get_base(allocation_index), HostAlignedByteCount::ZERO, self.layout.max_memory_bytes.byte_count(), ) @@ -822,7 +816,7 @@ mod tests { for i in 0..5 { let index = MemoryAllocationIndex(i); - let ptr = pool.get_base(index); + let ptr = pool.get_base(index).as_mut_ptr(); assert_eq!( ptr as usize - base, i as usize * pool.layout.slot_bytes.byte_count() diff --git a/crates/wasmtime/src/runtime/vm/memory.rs b/crates/wasmtime/src/runtime/vm/memory.rs index 221eefd2b08f..116830f41ed0 100644 --- a/crates/wasmtime/src/runtime/vm/memory.rs +++ b/crates/wasmtime/src/runtime/vm/memory.rs @@ -77,11 +77,11 @@ use crate::prelude::*; use crate::runtime::vm::vmcontext::VMMemoryDefinition; #[cfg(feature = "signals-based-traps")] -use crate::runtime::vm::HostAlignedByteCount; -use crate::runtime::vm::{MemoryImage, MemoryImageSlot, VMStore, WaitResult}; +use crate::runtime::vm::{HostAlignedByteCount, MmapOffset}; +use crate::runtime::vm::{MemoryImage, MemoryImageSlot, SendSyncPtr, VMStore, WaitResult}; use alloc::sync::Arc; -use core::ops::Range; use core::time::Duration; +use core::{ops::Range, ptr::NonNull}; use wasmtime_environ::{Trap, Tunables}; #[cfg(feature = "signals-based-traps")] @@ -160,7 +160,10 @@ pub trait RuntimeLinearMemory: Send + Sync { fn grow_to(&mut self, size: usize) -> Result<()>; /// Returns a pointer to the base of this linear memory allocation. - fn base_ptr(&self) -> *mut u8; + /// + /// This is either a raw pointer, or a reference to an mmap along with an + /// offset within it. + fn base(&self) -> MemoryBase; /// Internal method for Wasmtime when used in conjunction with CoW images. /// This is used to inform the underlying memory that the size of memory has @@ -175,6 +178,37 @@ pub trait RuntimeLinearMemory: Send + Sync { } } +/// The base pointer of a memory allocation. +#[derive(Clone, Debug)] +pub enum MemoryBase { + /// A raw pointer into memory. + /// + /// This may or may not be host-page-aligned. + Raw(SendSyncPtr), + + /// An mmap along with an offset into it. + #[cfg(feature = "signals-based-traps")] + Mmap(MmapOffset), +} + +impl MemoryBase { + /// Creates a new `MemoryBase` from a raw pointer. + /// + /// The pointer must be non-null, and it must be logically `Send + Sync`. + pub fn new_raw(ptr: *mut u8) -> Self { + Self::Raw(NonNull::new(ptr).expect("pointer is non-null").into()) + } + + /// Returns the actual memory address in memory that is represented by this base. + pub fn as_mut_ptr(&self) -> *mut u8 { + match self { + Self::Raw(ptr) => ptr.as_ptr(), + #[cfg(feature = "signals-based-traps")] + Self::Mmap(mmap_offset) => mmap_offset.as_mut_ptr(), + } + } +} + /// Representation of a runtime wasm linear memory. pub enum Memory { Local(LocalMemory), @@ -205,13 +239,13 @@ impl Memory { pub fn new_static( ty: &wasmtime_environ::Memory, tunables: &Tunables, - base_ptr: *mut u8, + base: MemoryBase, base_capacity: usize, memory_image: MemoryImageSlot, store: &mut dyn VMStore, ) -> Result { let (minimum, maximum) = Self::limit_new(ty, Some(store))?; - let pooled_memory = StaticMemory::new(base_ptr, base_capacity, minimum, maximum)?; + let pooled_memory = StaticMemory::new(base, base_capacity, minimum, maximum)?; let allocation = Box::new(pooled_memory); // Configure some defaults a bit differently for this memory within the @@ -496,14 +530,39 @@ impl LocalMemory { // `RuntimeLinearMemory::byte_size` is not a multiple of the host page // size. See https://github.com/bytecodealliance/wasmtime/issues/9660. if let Ok(byte_size) = HostAlignedByteCount::new(alloc.byte_size()) { - let mut slot = MemoryImageSlot::create( - alloc.base_ptr().cast(), - byte_size, - alloc.byte_capacity(), - ); - // On drop, we will unmap our mmap'd range that this slot was - // mapped on top of, so there is no need for the slot to wipe - // it with an anonymous mapping first. + // memory_image is CoW-based so it is expected to be backed + // by an mmap. + let mmap_base = match alloc.base() { + MemoryBase::Mmap(offset) => offset, + MemoryBase::Raw { .. } => { + unreachable!("memory_image is Some only for mmap-based memories") + } + }; + + let mut slot = + MemoryImageSlot::create(mmap_base, byte_size, alloc.byte_capacity()); + // On drop, we will unmap our mmap'd range that this slot + // was mapped on top of, so there is no need for the slot to + // wipe it with an anonymous mapping first. + // + // Note that this code would be incorrect if clear-on-drop + // were enabled. That's because: + // + // * In the struct definition, `memory_image` above is listed + // after `alloc`. + // * Rust drops fields in the order they're defined, so + // `memory_image` would be dropped after `alloc`. + // * `alloc` can represent either owned memory (i.e. the mmap is + // freed on drop) or logically borrowed memory (something else + // manages the mmap). + // * If `alloc` is borrowed memory, then this isn't an issue. + // * But if `alloc` is owned memory, then it would first drop + // the mmap, and then `memory_image` would try to remap + // part of that same memory as part of clear-on-drop. + // + // A lot of this really suggests representing the ownership + // via Rust lifetimes -- that would be a major refactor, + // though. slot.no_clear_on_drop(); slot.instantiate(alloc.byte_size(), Some(image), ty, tunables)?; Some(slot) @@ -577,7 +636,7 @@ impl LocalMemory { // Save the original base pointer to assert the invariant that growth up // to the byte capacity never relocates the base pointer. - let base_ptr_before = self.alloc.base_ptr(); + let base_ptr_before = self.alloc.base().as_mut_ptr(); let required_to_not_move_memory = new_byte_size <= self.alloc.byte_capacity(); let result = (|| -> Result<()> { @@ -626,7 +685,7 @@ impl LocalMemory { // On successful growth double-check that the base pointer // didn't move if it shouldn't have. if required_to_not_move_memory { - assert_eq!(base_ptr_before, self.alloc.base_ptr()); + assert_eq!(base_ptr_before, self.alloc.base().as_mut_ptr()); } Ok(Some((old_byte_size, new_byte_size))) @@ -646,7 +705,7 @@ impl LocalMemory { pub fn vmmemory(&mut self) -> VMMemoryDefinition { VMMemoryDefinition { - base: self.alloc.base_ptr(), + base: self.alloc.base().as_mut_ptr(), current_length: self.alloc.byte_size().into(), } } @@ -663,7 +722,7 @@ impl LocalMemory { } pub fn wasm_accessible(&self) -> Range { - let base = self.alloc.base_ptr() as usize; + let base = self.alloc.base().as_mut_ptr() as usize; // From the base add: // // * max(capacity, reservation) -- all memory is guaranteed to have at diff --git a/crates/wasmtime/src/runtime/vm/memory/malloc.rs b/crates/wasmtime/src/runtime/vm/memory/malloc.rs index b822a376acc3..13c83bd859c3 100644 --- a/crates/wasmtime/src/runtime/vm/memory/malloc.rs +++ b/crates/wasmtime/src/runtime/vm/memory/malloc.rs @@ -5,7 +5,7 @@ //! handle memory allocation failures. use crate::prelude::*; -use crate::runtime::vm::memory::RuntimeLinearMemory; +use crate::runtime::vm::memory::{MemoryBase, RuntimeLinearMemory}; use crate::runtime::vm::SendSyncPtr; use core::mem; use core::ptr::NonNull; @@ -80,8 +80,8 @@ impl RuntimeLinearMemory for MallocMemory { Ok(()) } - fn base_ptr(&self) -> *mut u8 { - self.base_ptr.as_ptr() + fn base(&self) -> MemoryBase { + MemoryBase::Raw(self.base_ptr) } } diff --git a/crates/wasmtime/src/runtime/vm/memory/mmap.rs b/crates/wasmtime/src/runtime/vm/memory/mmap.rs index 0d536d1d2737..e82f159c2f51 100644 --- a/crates/wasmtime/src/runtime/vm/memory/mmap.rs +++ b/crates/wasmtime/src/runtime/vm/memory/mmap.rs @@ -7,6 +7,8 @@ use crate::runtime::vm::{mmap::AlignedLength, HostAlignedByteCount, Mmap}; use alloc::sync::Arc; use wasmtime_environ::Tunables; +use super::MemoryBase; + /// A linear memory instance. #[derive(Debug)] pub struct MmapMemory { @@ -223,7 +225,11 @@ impl RuntimeLinearMemory for MmapMemory { self.len = len; } - fn base_ptr(&self) -> *mut u8 { - unsafe { self.mmap.as_mut_ptr().add(self.pre_guard_size.byte_count()) } + fn base(&self) -> MemoryBase { + MemoryBase::Mmap( + self.mmap + .offset(self.pre_guard_size) + .expect("pre_guard_size is in bounds"), + ) } } diff --git a/crates/wasmtime/src/runtime/vm/memory/static_.rs b/crates/wasmtime/src/runtime/vm/memory/static_.rs index ba2f432e46d4..ed62d114652e 100644 --- a/crates/wasmtime/src/runtime/vm/memory/static_.rs +++ b/crates/wasmtime/src/runtime/vm/memory/static_.rs @@ -3,15 +3,14 @@ use crate::prelude::*; use crate::runtime::vm::memory::RuntimeLinearMemory; -use crate::runtime::vm::SendSyncPtr; -use core::ptr::NonNull; +use crate::runtime::vm::MemoryBase; /// A "static" memory where the lifetime of the backing memory is managed /// elsewhere. Currently used with the pooling allocator. pub struct StaticMemory { /// The base pointer of this static memory, wrapped up in a send/sync /// wrapper. - base: SendSyncPtr, + base: MemoryBase, /// The byte capacity of the `base` pointer. capacity: usize, @@ -22,7 +21,7 @@ pub struct StaticMemory { impl StaticMemory { pub fn new( - base_ptr: *mut u8, + base: MemoryBase, base_capacity: usize, initial_size: usize, maximum_size: Option, @@ -43,7 +42,7 @@ impl StaticMemory { }; Ok(Self { - base: SendSyncPtr::new(NonNull::new(base_ptr).unwrap()), + base, capacity: base_capacity, size: initial_size, }) @@ -73,7 +72,7 @@ impl RuntimeLinearMemory for StaticMemory { self.size = len; } - fn base_ptr(&self) -> *mut u8 { - self.base.as_ptr() + fn base(&self) -> MemoryBase { + self.base.clone() } } diff --git a/crates/wasmtime/src/runtime/vm/mmap.rs b/crates/wasmtime/src/runtime/vm/mmap.rs index c16ec11e812f..f06bb0a8d436 100644 --- a/crates/wasmtime/src/runtime/vm/mmap.rs +++ b/crates/wasmtime/src/runtime/vm/mmap.rs @@ -3,10 +3,11 @@ use super::HostAlignedByteCount; use crate::prelude::*; -use crate::runtime::vm::sys::mmap; +use crate::runtime::vm::sys::{mmap, vm::MemoryImageSource}; +use alloc::sync::Arc; use core::ops::Range; #[cfg(feature = "std")] -use std::{fs::File, sync::Arc}; +use std::fs::File; /// A marker type for an [`Mmap`] where both the start address and length are a /// multiple of the host page size. @@ -132,6 +133,26 @@ impl Mmap { unsafe { HostAlignedByteCount::new_unchecked(self.sys.len()) } } + /// Return a struct representing a page-aligned offset into the mmap. + /// + /// Returns an error if `offset >= self.len_aligned()`. + pub fn offset(self: &Arc, offset: HostAlignedByteCount) -> Result { + if offset >= self.len_aligned() { + bail!( + "offset {} is not in bounds for mmap: {}", + offset, + self.len_aligned() + ); + } + + Ok(MmapOffset::new(self.clone(), offset)) + } + + /// Return an `MmapOffset` corresponding to zero bytes into the mmap. + pub fn zero_offset(self: &Arc) -> MmapOffset { + MmapOffset::new(self.clone(), HostAlignedByteCount::ZERO) + } + /// Make the memory starting at `start` and extending for `len` bytes /// accessible. `start` and `len` must be native page-size multiples and /// describe a range within `self`'s reserved memory. @@ -231,13 +252,13 @@ impl Mmap { /// Return the allocated memory as a pointer to u8. #[inline] pub fn as_ptr(&self) -> *const u8 { - self.sys.as_ptr() + self.sys.as_send_sync_ptr().as_ptr() as *const u8 } /// Return the allocated memory as a mutable pointer to u8. #[inline] pub fn as_mut_ptr(&self) -> *mut u8 { - self.sys.as_mut_ptr() + self.sys.as_send_sync_ptr().as_ptr() } /// Return the length of the allocated memory. @@ -324,6 +345,76 @@ impl From> for Mmap { } } +/// A reference to an [`Mmap`], along with a host-page-aligned index within it. +/// +/// The main invariant this type asserts is that the index is in bounds within +/// the `Mmap` (i.e. `self.mmap[self.offset]` is valid). In the future, this +/// type may also assert other invariants. +#[derive(Clone, Debug)] +pub struct MmapOffset { + mmap: Arc>, + offset: HostAlignedByteCount, +} + +impl MmapOffset { + #[inline] + fn new(mmap: Arc>, offset: HostAlignedByteCount) -> Self { + // Note < rather than <=. This currently cannot represent the logical + // end of the mmap. We may need to change this if that becomes + // necessary. + assert!( + offset < mmap.len_aligned(), + "offset {} is in bounds (< {})", + offset, + mmap.len_aligned(), + ); + Self { mmap, offset } + } + + /// Returns the mmap this offset is within. + #[inline] + pub fn mmap(&self) -> &Arc> { + &self.mmap + } + + /// Returns the host-page-aligned offset within the mmap. + #[inline] + pub fn offset(&self) -> HostAlignedByteCount { + self.offset + } + + /// Returns the raw pointer in memory represented by this offset. + #[inline] + pub fn as_mut_ptr(&self) -> *mut u8 { + // SAFETY: constructor checks that offset is within this allocation. + unsafe { self.mmap().as_mut_ptr().byte_add(self.offset.byte_count()) } + } + + /// Maps an image into the mmap with read/write permissions. + /// + /// The image is mapped at `self.mmap.as_ptr() + self.offset + + /// memory_offset`. + /// + /// ## Safety + /// + /// The caller must ensure that noone else has a reference to this memory. + pub unsafe fn map_image_at( + &self, + image_source: &MemoryImageSource, + source_offset: u64, + memory_offset: HostAlignedByteCount, + memory_len: HostAlignedByteCount, + ) -> Result<()> { + let total_offset = self + .offset + .checked_add(memory_offset) + .expect("self.offset + memory_offset is in bounds"); + self.mmap + .sys + .map_image_at(image_source, source_offset, total_offset, memory_len) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/wasmtime/src/runtime/vm/sys/custom/mmap.rs b/crates/wasmtime/src/runtime/vm/sys/custom/mmap.rs index 650bc070d444..7548f301d80b 100644 --- a/crates/wasmtime/src/runtime/vm/sys/custom/mmap.rs +++ b/crates/wasmtime/src/runtime/vm/sys/custom/mmap.rs @@ -1,6 +1,6 @@ use super::cvt; use crate::prelude::*; -use crate::runtime::vm::sys::capi; +use crate::runtime::vm::sys::{capi, vm::MemoryImageSource}; use crate::runtime::vm::{HostAlignedByteCount, SendSyncPtr}; use core::ops::Range; use core::ptr::{self, NonNull}; @@ -69,13 +69,8 @@ impl Mmap { } #[inline] - pub fn as_ptr(&self) -> *const u8 { - self.memory.as_ptr() as *const u8 - } - - #[inline] - pub fn as_mut_ptr(&self) -> *mut u8 { - self.memory.as_ptr().cast() + pub fn as_send_sync_ptr(&self) -> SendSyncPtr { + self.memory.cast() } #[inline] @@ -109,6 +104,26 @@ impl Mmap { cvt(capi::wasmtime_mprotect(base, len, capi::PROT_READ))?; Ok(()) } + + pub unsafe fn map_image_at( + &self, + image_source: &MemoryImageSource, + source_offset: u64, + memory_offset: HostAlignedByteCount, + memory_len: HostAlignedByteCount, + ) -> Result<()> { + assert_eq!(source_offset, 0); + let base = self + .memory + .as_ptr() + .byte_add(memory_offset.byte_count()) + .cast(); + cvt(capi::wasmtime_memory_image_map_at( + image_source.image_ptr().as_ptr(), + base, + memory_len.byte_count(), + )) + } } impl Drop for Mmap { diff --git a/crates/wasmtime/src/runtime/vm/sys/custom/vm.rs b/crates/wasmtime/src/runtime/vm/sys/custom/vm.rs index 1a1f1e015c97..414341c73e93 100644 --- a/crates/wasmtime/src/runtime/vm/sys/custom/vm.rs +++ b/crates/wasmtime/src/runtime/vm/sys/custom/vm.rs @@ -79,13 +79,9 @@ impl MemoryImageSource { } } - pub unsafe fn map_at(&self, base: *mut u8, len: usize, offset: u64) -> Result<()> { - assert_eq!(offset, 0); - cvt(capi::wasmtime_memory_image_map_at( - self.data.as_ptr(), - base, - len, - )) + #[inline] + pub(super) fn image_ptr(&self) -> SendSyncPtr { + self.data } pub unsafe fn remap_as_zeros_at(&self, base: *mut u8, len: usize) -> Result<()> { diff --git a/crates/wasmtime/src/runtime/vm/sys/miri/mmap.rs b/crates/wasmtime/src/runtime/vm/sys/miri/mmap.rs index eae0ed29fc9f..bcce7108b22a 100644 --- a/crates/wasmtime/src/runtime/vm/sys/miri/mmap.rs +++ b/crates/wasmtime/src/runtime/vm/sys/miri/mmap.rs @@ -6,6 +6,7 @@ //! but it's enough to get various tests running relying on memories and such. use crate::prelude::*; +use crate::runtime::vm::sys::vm::MemoryImageSource; use crate::runtime::vm::{HostAlignedByteCount, SendSyncPtr}; use std::alloc::{self, Layout}; use std::fs::File; @@ -66,7 +67,7 @@ impl Mmap { // initialized for miri-level checking. unsafe { std::ptr::write_bytes( - self.as_mut_ptr().add(start.byte_count()), + self.as_send_sync_ptr().as_ptr().add(start.byte_count()), 0u8, len.byte_count(), ); @@ -74,12 +75,9 @@ impl Mmap { Ok(()) } - pub fn as_ptr(&self) -> *const u8 { - self.memory.as_ptr() as *const u8 - } - - pub fn as_mut_ptr(&self) -> *mut u8 { - self.memory.as_ptr().cast() + #[inline] + pub fn as_send_sync_ptr(&self) -> SendSyncPtr { + self.memory.cast() } pub fn len(&self) -> usize { @@ -97,6 +95,16 @@ impl Mmap { pub unsafe fn make_readonly(&self, _range: Range) -> Result<()> { Ok(()) } + + pub unsafe fn map_image_at( + &self, + image_source: &MemoryImageSource, + _source_offset: u64, + _memory_offset: HostAlignedByteCount, + _memory_len: HostAlignedByteCount, + ) -> Result<()> { + match *image_source {} + } } impl Drop for Mmap { @@ -106,7 +114,7 @@ impl Drop for Mmap { } unsafe { let layout = make_layout(self.len()); - alloc::dealloc(self.as_mut_ptr(), layout); + alloc::dealloc(self.as_send_sync_ptr().as_ptr(), layout); } } } diff --git a/crates/wasmtime/src/runtime/vm/sys/miri/vm.rs b/crates/wasmtime/src/runtime/vm/sys/miri/vm.rs index 321e8cf2f1c3..ccdb6529aa4c 100644 --- a/crates/wasmtime/src/runtime/vm/sys/miri/vm.rs +++ b/crates/wasmtime/src/runtime/vm/sys/miri/vm.rs @@ -49,10 +49,6 @@ impl MemoryImageSource { Ok(None) } - pub unsafe fn map_at(&self, _base: *mut u8, _len: usize, _offset: u64) -> io::Result<()> { - match *self {} - } - pub unsafe fn remap_as_zeros_at(&self, _base: *mut u8, _len: usize) -> io::Result<()> { match *self {} } diff --git a/crates/wasmtime/src/runtime/vm/sys/unix/mmap.rs b/crates/wasmtime/src/runtime/vm/sys/unix/mmap.rs index 50512070d7b5..68ec224f8ee1 100644 --- a/crates/wasmtime/src/runtime/vm/sys/unix/mmap.rs +++ b/crates/wasmtime/src/runtime/vm/sys/unix/mmap.rs @@ -1,4 +1,5 @@ use crate::prelude::*; +use crate::runtime::vm::sys::vm::MemoryImageSource; use crate::runtime::vm::{HostAlignedByteCount, SendSyncPtr}; use rustix::mm::{mprotect, MprotectFlags}; use std::ops::Range; @@ -123,13 +124,8 @@ impl Mmap { } #[inline] - pub fn as_ptr(&self) -> *const u8 { - self.memory.as_ptr() as *const u8 - } - - #[inline] - pub fn as_mut_ptr(&self) -> *mut u8 { - self.memory.as_ptr().cast() + pub fn as_send_sync_ptr(&self) -> SendSyncPtr { + self.memory.cast() } #[inline] @@ -176,6 +172,28 @@ impl Mmap { Ok(()) } + + pub unsafe fn map_image_at( + &self, + image_source: &MemoryImageSource, + source_offset: u64, + memory_offset: HostAlignedByteCount, + memory_len: HostAlignedByteCount, + ) -> Result<()> { + unsafe { + let map_base = self.memory.as_ptr().byte_add(memory_offset.byte_count()); + let ptr = rustix::mm::mmap( + map_base.cast(), + memory_len.byte_count(), + rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE, + rustix::mm::MapFlags::PRIVATE | rustix::mm::MapFlags::FIXED, + image_source.as_file(), + source_offset, + )?; + assert_eq!(map_base.cast(), ptr); + }; + Ok(()) + } } impl Drop for Mmap { diff --git a/crates/wasmtime/src/runtime/vm/sys/unix/vm.rs b/crates/wasmtime/src/runtime/vm/sys/unix/vm.rs index 6d457ba5f191..2ad2a577bb41 100644 --- a/crates/wasmtime/src/runtime/vm/sys/unix/vm.rs +++ b/crates/wasmtime/src/runtime/vm/sys/unix/vm.rs @@ -1,6 +1,6 @@ use crate::runtime::vm::sys::DecommitBehavior; use rustix::fd::AsRawFd; -use rustix::mm::{mmap, mmap_anonymous, mprotect, MapFlags, MprotectFlags, ProtFlags}; +use rustix::mm::{mmap_anonymous, mprotect, MapFlags, MprotectFlags, ProtFlags}; use std::fs::File; use std::io; #[cfg(feature = "std")] @@ -146,7 +146,7 @@ impl MemoryImageSource { Ok(Some(MemoryImageSource::Memfd(memfd))) } - fn as_file(&self) -> &File { + pub(super) fn as_file(&self) -> &File { match *self { #[cfg(feature = "std")] MemoryImageSource::Mmap(ref file) => file, @@ -155,19 +155,6 @@ impl MemoryImageSource { } } - pub unsafe fn map_at(&self, base: *mut u8, len: usize, offset: u64) -> io::Result<()> { - let ptr = mmap( - base.cast(), - len, - ProtFlags::READ | ProtFlags::WRITE, - MapFlags::PRIVATE | MapFlags::FIXED, - self.as_file(), - offset, - )?; - assert_eq!(base, ptr.cast()); - Ok(()) - } - pub unsafe fn remap_as_zeros_at(&self, base: *mut u8, len: usize) -> io::Result<()> { let ptr = mmap_anonymous( base.cast(), diff --git a/crates/wasmtime/src/runtime/vm/sys/windows/mmap.rs b/crates/wasmtime/src/runtime/vm/sys/windows/mmap.rs index 9f23e0f2066d..defb8052a8da 100644 --- a/crates/wasmtime/src/runtime/vm/sys/windows/mmap.rs +++ b/crates/wasmtime/src/runtime/vm/sys/windows/mmap.rs @@ -1,4 +1,5 @@ use crate::prelude::*; +use crate::runtime::vm::sys::vm::MemoryImageSource; use crate::runtime::vm::{HostAlignedByteCount, SendSyncPtr}; use std::fs::{File, OpenOptions}; use std::io; @@ -133,7 +134,13 @@ impl Mmap { // Protect the entire file as PAGE_WRITECOPY to start (i.e. // remove the execute bit) let mut old = 0; - if VirtualProtect(ret.as_mut_ptr().cast(), ret.len(), PAGE_WRITECOPY, &mut old) == 0 { + if VirtualProtect( + ret.as_send_sync_ptr().as_ptr().cast(), + ret.len(), + PAGE_WRITECOPY, + &mut old, + ) == 0 + { return Err(io::Error::last_os_error()) .context("failed change pages to `PAGE_READONLY`"); } @@ -149,7 +156,10 @@ impl Mmap { ) -> Result<()> { if unsafe { VirtualAlloc( - self.as_ptr().add(start.byte_count()) as _, + self.as_send_sync_ptr() + .as_ptr() + .add(start.byte_count()) + .cast(), len.byte_count(), MEM_COMMIT, PAGE_READWRITE, @@ -164,13 +174,8 @@ impl Mmap { } #[inline] - pub fn as_ptr(&self) -> *const u8 { - self.memory.as_ptr() as *const u8 - } - - #[inline] - pub fn as_mut_ptr(&self) -> *mut u8 { - self.memory.as_ptr().cast() + pub fn as_send_sync_ptr(&self) -> SendSyncPtr { + self.memory.cast() } #[inline] @@ -191,8 +196,8 @@ impl Mmap { PAGE_EXECUTE_READ }; let mut old = 0; - let base = self.as_ptr().add(range.start); - let result = VirtualProtect(base as _, range.end - range.start, flags, &mut old); + let base = self.as_send_sync_ptr().as_ptr().add(range.start).cast(); + let result = VirtualProtect(base, range.end - range.start, flags, &mut old); if result == 0 { bail!(io::Error::last_os_error()); } @@ -201,13 +206,23 @@ impl Mmap { pub unsafe fn make_readonly(&self, range: Range) -> Result<()> { let mut old = 0; - let base = self.as_ptr().add(range.start); - let result = VirtualProtect(base as _, range.end - range.start, PAGE_READONLY, &mut old); + let base = self.as_send_sync_ptr().as_ptr().add(range.start).cast(); + let result = VirtualProtect(base, range.end - range.start, PAGE_READONLY, &mut old); if result == 0 { bail!(io::Error::last_os_error()); } Ok(()) } + + pub unsafe fn map_image_at( + &self, + image_source: &MemoryImageSource, + _source_offset: u64, + _memory_offset: HostAlignedByteCount, + _memory_len: HostAlignedByteCount, + ) -> Result<()> { + match *image_source {} + } } impl Drop for Mmap { @@ -219,12 +234,12 @@ impl Drop for Mmap { if self.is_file { let r = unsafe { UnmapViewOfFile(MEMORY_MAPPED_VIEW_ADDRESS { - Value: self.as_mut_ptr().cast(), + Value: self.memory.as_ptr().cast(), }) }; assert_ne!(r, 0); } else { - let r = unsafe { VirtualFree(self.as_mut_ptr().cast(), 0, MEM_RELEASE) }; + let r = unsafe { VirtualFree(self.memory.as_ptr().cast(), 0, MEM_RELEASE) }; assert_ne!(r, 0); } } diff --git a/crates/wasmtime/src/runtime/vm/sys/windows/vm.rs b/crates/wasmtime/src/runtime/vm/sys/windows/vm.rs index c4a3dd9f0634..29c735df3512 100644 --- a/crates/wasmtime/src/runtime/vm/sys/windows/vm.rs +++ b/crates/wasmtime/src/runtime/vm/sys/windows/vm.rs @@ -65,10 +65,6 @@ impl MemoryImageSource { Ok(None) } - pub unsafe fn map_at(&self, _base: *mut u8, _len: usize, _offset: u64) -> io::Result<()> { - match *self {} - } - pub unsafe fn remap_as_zeros_at(&self, _base: *mut u8, _len: usize) -> io::Result<()> { match *self {} }