Skip to content

Commit

Permalink
Revert "near-vm: use a recycling pool of shared code memories instead…
Browse files Browse the repository at this point in the history
… of a in-memory cache of loaded artifacts (#9244)" (#10788)

This reverts commit ad67e6b.
  • Loading branch information
nagisa authored and posvyatokum committed Mar 14, 2024
1 parent cba6410 commit 1435100
Show file tree
Hide file tree
Showing 19 changed files with 295 additions and 478 deletions.
6 changes: 3 additions & 3 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 0 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,6 @@ runtime-tester = { path = "test-utils/runtime-tester" }
rusqlite = { version = "0.29.0", features = ["bundled", "chrono", "functions"] }
rustc-demangle = "0.1"
rust-s3 = { version = "0.32.3", features = ["blocking"] }
rustix = "0.37"
secp256k1 = { version = "0.27.0", features = ["recovery", "rand-std"] }
semver = "1.0.4"
serde = { version = "1.0.136", features = ["alloc", "derive", "rc"] }
Expand Down
3 changes: 0 additions & 3 deletions Justfile
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,6 @@ nextest-integration TYPE *FLAGS:
nextest-integration TYPE *FLAGS:
@echo "Nextest integration tests are currently disabled on macos!"

<<<<<<< HEAD
=======
# check various build configurations compile as anticipated
check-non-default:
# Ensure that near-vm-runner always builds without default features enabled
Expand All @@ -82,7 +80,6 @@ check-cargo-deny:
check-themis:
env CARGO_TARGET_DIR="target/themis" cargo run --locked -p themis

>>>>>>> b03bfffa4 (do not have a globally-set warnings=deny, but set it locally in CI only (#10738))
# generate a codecov report for RULE
codecov RULE:
#!/usr/bin/env bash
Expand Down
1 change: 1 addition & 0 deletions runtime/near-vm-runner/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ wasm-encoder.workspace = true
wasmparser.workspace = true
wasmtime = { workspace = true, optional = true }

near-cache.workspace = true
near-crypto.workspace = true
near-primitives-core.workspace = true
near-parameters.workspace = true
Expand Down
140 changes: 69 additions & 71 deletions runtime/near-vm-runner/src/near_vm_runner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ use near_parameters::vm::VMKind;
use near_parameters::RuntimeFeesConfig;
use near_vm_compiler_singlepass::Singlepass;
use near_vm_engine::universal::{
LimitedMemoryPool, Universal, UniversalEngine, UniversalExecutable, UniversalExecutableRef,
Universal, UniversalEngine, UniversalExecutable, UniversalExecutableRef,
};
use near_vm_types::{FunctionIndex, InstanceConfig, MemoryType, Pages, WASM_PAGE_SIZE};
use near_vm_vm::{
Expand All @@ -26,7 +26,7 @@ use near_vm_vm::{
use std::borrow::Cow;
use std::hash::Hash;
use std::mem::size_of;
use std::sync::{Arc, OnceLock};
use std::sync::Arc;

#[derive(Clone)]
pub struct NearVmMemory(Arc<LinearMemory>);
Expand Down Expand Up @@ -241,38 +241,11 @@ impl NearVM {
compiler.set_9393_fix(!config.disable_9393_fix);
// We only support universal engine at the moment.
assert_eq!(VM_CONFIG.engine, NearVmEngine::Universal);

static CODE_MEMORY_POOL_CELL: OnceLock<LimitedMemoryPool> = OnceLock::new();
let code_memory_pool = CODE_MEMORY_POOL_CELL
.get_or_init(|| {
// FIXME: should have as many code memories as there are possible parallel
// invocations of the runtime… How do we determine that? Should we make it
// configurable for the node operators, perhaps, so that they can make an informed
// choice based on the amount of memory they have and shards they track? Should we
// actually use some sort of semaphore to enforce a parallelism limit?
//
// NB: 64MiB is a best guess as to what the maximum size a loaded artifact can
// plausibly be. This is not necessarily true – there may be WebAssembly
// instructions that expand by more than 4 times in terms of instruction size after
// a conversion to x86_64, In that case a re-allocation will occur and executing
// that particular function call will be slower. Not to mention there isn't a
// strong guarantee on the upper bound of the memory that the contract runtime may
// require.
LimitedMemoryPool::new(256, 1 * 1024 * 1024).unwrap_or_else(|e| {
panic!("could not pre-allocate resources for the runtime: {e}");
})
})
.clone();

let features =
crate::features::WasmFeatures::from(config.limit_config.contract_prepare_version);
Self {
config,
engine: Universal::new(compiler)
.target(target)
.features(features.into())
.code_memory_pool(code_memory_pool)
.engine(),
engine: Universal::new(compiler).target(target).features(features.into()).engine(),
}
}

Expand Down Expand Up @@ -346,58 +319,83 @@ impl NearVM {
code: &ContractCode,
cache: Option<&dyn CompiledContractCache>,
) -> VMResult<Result<VMArtifact, CompilationError>> {
// `cache` stores compiled machine code in the database
// A bit of a tricky logic ahead! We need to deal with two levels of
// caching:
// * `cache` stores compiled machine code in the database
// * `MEM_CACHE` below holds in-memory cache of loaded contracts
//
// Caches also cache _compilation_ errors, so that we don't have to
// re-parse invalid code (invalid code, in a sense, is a normal
// outcome). And `cache`, being a database, can fail with an `io::Error`.
let _span = tracing::debug_span!(target: "vm", "NearVM::compile_and_load").entered();
let key = get_contract_cache_key(code, &self.config);
let cache_record = cache
.map(|cache| cache.get(&key))
.transpose()
.map_err(CacheError::ReadError)?
.flatten();

let stored_artifact: Option<VMArtifact> = match cache_record {
None => None,
Some(CompiledContract::CompileModuleError(err)) => return Ok(Err(err)),
Some(CompiledContract::Code(serialized_module)) => {
let _span = tracing::debug_span!(target: "vm", "NearVM::read_from_cache").entered();
unsafe {
// (UN-)SAFETY: the `serialized_module` must have been produced by a prior call to
// `serialize`.
//
// In practice this is not necessarily true. One could have forgotten to change the
// cache key when upgrading the version of the near_vm library or the database could
// have had its data corrupted while at rest.
//
// There should definitely be some validation in near_vm to ensure we load what we think
// we load.
let executable = UniversalExecutableRef::deserialize(&serialized_module)
.map_err(|_| CacheError::DeserializationError)?;
let artifact = self

let compile_or_read_from_cache = || -> VMResult<Result<VMArtifact, CompilationError>> {
let _span =
tracing::debug_span!(target: "vm", "NearVM::compile_or_read_from_cache").entered();
let cache_record = cache
.map(|cache| cache.get(&key))
.transpose()
.map_err(CacheError::ReadError)?
.flatten();

let stored_artifact: Option<VMArtifact> = match cache_record {
None => None,
Some(CompiledContract::CompileModuleError(err)) => return Ok(Err(err)),
Some(CompiledContract::Code(serialized_module)) => {
let _span =
tracing::debug_span!(target: "vm", "NearVM::read_from_cache").entered();
unsafe {
// (UN-)SAFETY: the `serialized_module` must have been produced by a prior call to
// `serialize`.
//
// In practice this is not necessarily true. One could have forgotten to change the
// cache key when upgrading the version of the near_vm library or the database could
// have had its data corrupted while at rest.
//
// There should definitely be some validation in near_vm to ensure we load what we think
// we load.
let executable = UniversalExecutableRef::deserialize(&serialized_module)
.map_err(|_| CacheError::DeserializationError)?;
let artifact = self
.engine
.load_universal_executable_ref(&executable)
.map(Arc::new)
.map_err(|err| VMRunnerError::LoadingError(err.to_string()))?;
Some(artifact)
}
}
};

Ok(if let Some(it) = stored_artifact {
Ok(it)
} else {
match self.compile_and_cache(code, cache)? {
Ok(executable) => Ok(self
.engine
.load_universal_executable_ref(&executable)
.load_universal_executable(&executable)
.map(Arc::new)
.map_err(|err| VMRunnerError::LoadingError(err.to_string()))?;
Some(artifact)
.map_err(|err| VMRunnerError::LoadingError(err.to_string()))?),
Err(err) => Err(err),
}
}
})
};

Ok(if let Some(it) = stored_artifact {
Ok(it)
} else {
match self.compile_and_cache(code, cache)? {
Ok(executable) => Ok(self
.engine
.load_universal_executable(&executable)
.map(Arc::new)
.map_err(|err| VMRunnerError::LoadingError(err.to_string()))?),
Err(err) => Err(err),
}
})
#[cfg(feature = "no_cache")]
return compile_or_read_from_cache();

#[cfg(not(feature = "no_cache"))]
return {
static MEM_CACHE: once_cell::sync::Lazy<
near_cache::SyncLruCache<
near_primitives_core::hash::CryptoHash,
Result<VMArtifact, CompilationError>,
>,
> = once_cell::sync::Lazy::new(|| {
near_cache::SyncLruCache::new(128)
});
MEM_CACHE.get_or_try_put(key, |_key| compile_or_read_from_cache())
};
}

fn run_method(
Expand Down
4 changes: 2 additions & 2 deletions runtime/near-vm/compiler/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ pub enum CompileError {
Resource(String),

/// Cannot downcast the engine to a specific type.
#[error("data offset is out of bounds")]
InvalidOffset,
#[error("cannot downcast the engine to a specific type")]
EngineDowncast,
}

impl From<WasmError> for CompileError {
Expand Down
1 change: 0 additions & 1 deletion runtime/near-vm/engine/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ target-lexicon.workspace = true
thiserror.workspace = true
cfg-if.workspace = true
tracing.workspace = true
rustix = { workspace = true, features = ["param", "mm"] }

[badges]
maintenance = { status = "actively-developed" }
5 changes: 2 additions & 3 deletions runtime/near-vm/engine/src/universal/artifact.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,7 @@ use std::sync::Arc;
/// A compiled wasm module, containing everything necessary for instantiation.
pub struct UniversalArtifact {
// TODO: figure out how to allocate fewer distinct structures onto heap. Maybe have an arena…?
pub(crate) engine: super::UniversalEngine,
pub(crate) _code_memory: super::CodeMemory,
pub(crate) engine: crate::universal::UniversalEngine,
pub(crate) import_counts: ImportCounts,
pub(crate) start_function: Option<FunctionIndex>,
pub(crate) vmoffsets: VMOffsets,
Expand All @@ -48,7 +47,7 @@ impl UniversalArtifact {
}

/// Return the engine instance this artifact is loaded into.
pub fn engine(&self) -> &super::UniversalEngine {
pub fn engine(&self) -> &crate::universal::UniversalEngine {
&self.engine
}
}
Expand Down
22 changes: 4 additions & 18 deletions runtime/near-vm/engine/src/universal/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ pub struct Universal {
compiler_config: Option<Box<dyn CompilerConfig>>,
target: Option<Target>,
features: Option<Features>,
pool: Option<super::LimitedMemoryPool>,
}

impl Universal {
Expand All @@ -16,17 +15,12 @@ impl Universal {
where
T: Into<Box<dyn CompilerConfig>>,
{
Self {
compiler_config: Some(compiler_config.into()),
target: None,
features: None,
pool: None,
}
Self { compiler_config: Some(compiler_config.into()), target: None, features: None }
}

/// Create a new headless Universal
pub fn headless() -> Self {
Self { compiler_config: None, target: None, features: None, pool: None }
Self { compiler_config: None, target: None, features: None }
}

/// Set the target
Expand All @@ -41,25 +35,17 @@ impl Universal {
self
}

/// Set the pool of reusable code memory
pub fn code_memory_pool(mut self, pool: super::LimitedMemoryPool) -> Self {
self.pool = Some(pool);
self
}

/// Build the `UniversalEngine` for this configuration
pub fn engine(self) -> UniversalEngine {
let target = self.target.unwrap_or_default();
let pool =
self.pool.unwrap_or_else(|| panic!("Universal::code_memory_pool was not set up!"));
if let Some(compiler_config) = self.compiler_config {
let features = self
.features
.unwrap_or_else(|| compiler_config.default_features_for_target(&target));
let compiler = compiler_config.compiler();
UniversalEngine::new(compiler, target, features, pool)
UniversalEngine::new(compiler, target, features)
} else {
UniversalEngine::headless(pool)
UniversalEngine::headless()
}
}
}
Loading

0 comments on commit 1435100

Please sign in to comment.