From 224ea25631df041875cb7859c4e764cf8c8e9f48 Mon Sep 17 00:00:00 2001 From: Damir Vodenicarevic Date: Fri, 4 Feb 2022 22:03:31 +0100 Subject: [PATCH 01/73] time cursor vm prototype --- massa-execution/src/lib.rs | 1 + massa-execution/src/vm_new.rs | 436 ++++++++++++++++++++++++++++++++++ 2 files changed, 437 insertions(+) create mode 100644 massa-execution/src/vm_new.rs diff --git a/massa-execution/src/lib.rs b/massa-execution/src/lib.rs index 1e660cdbf2b..846c0af3920 100644 --- a/massa-execution/src/lib.rs +++ b/massa-execution/src/lib.rs @@ -8,6 +8,7 @@ mod interface_impl; mod sce_ledger; mod types; mod vm; +mod vm_new; mod worker; pub use config::{ExecutionConfigs, ExecutionSettings}; diff --git a/massa-execution/src/vm_new.rs b/massa-execution/src/vm_new.rs new file mode 100644 index 00000000000..0aedc48cace --- /dev/null +++ b/massa-execution/src/vm_new.rs @@ -0,0 +1,436 @@ +use massa_models::{ + timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}, + Slot, +}; +use massa_time::MassaTime; +use std::{ + collections::{HashMap, VecDeque}, + sync::{Arc, Condvar, Mutex}, +}; +use tracing::info; + +use massa_models::BlockId; + +pub struct VMConfig { + thread_count: u8, + cursor_delay: MassaTime, + clock_compensation: i64, + genesis_timestamp: MassaTime, + t0: MassaTime, +} + +pub struct VMBootstrapData {} + +#[derive(Default)] +pub struct VMInputData { + stop: bool, + blockclique_changed: bool, + finalized_blocks: HashMap, + blockclique: HashMap, +} + +pub struct VMController { + loop_cv: Condvar, + input_data: Mutex, +} + +pub struct VMManager { + controller: Arc, + thread_handle: std::thread::JoinHandle<()>, +} + +impl VMManager { + pub fn stop(self) { + info!("stopping VM controller..."); + { + let mut input_wlock = self + .controller + .input_data + .lock() + .expect("could not w-lock VM input data"); + input_wlock.stop = true; + input_wlock.blockclique_changed = true; + self.controller.loop_cv.notify_one(); + } + self.thread_handle + .join() + .expect("VM controller thread panicked"); + info!("VM controller stopped"); + } + + pub fn get_controller(&self) -> Arc { + self.controller.clone() + } +} + +pub fn start_vm(config: VMConfig, bootstrap: Option) -> VMManager { + let controller = Arc::new(VMController { + loop_cv: Condvar::new(), + input_data: Mutex::new(VMInputData { + blockclique_changed: true, + ..Default::default() + }), + }); + + let ctl = controller.clone(); + let thread_handle = std::thread::spawn(move || { + VMThread::new(config, ctl, bootstrap).main_loop(); + }); + + VMManager { + controller, + thread_handle, + } +} + +struct ExecutionOutput { + slot: Slot, + block_id: Option, + //TODO ledger_changes + //TODO event_store +} + +struct VMThread { + // VM config + config: VMConfig, + // VM data exchange controller + controller: Arc, + // map of SCE-final blocks not executed yet + sce_finals: HashMap>, + // last SCE final slot in sce_finals list + last_sce_final: Slot, + // map of CSS-final but non-SCE-final blocks + remaining_css_finals: HashMap, + // last blockclique + blockclique: HashMap, + // map of active slots + active_slots: HashMap>, + // highest active slot + last_active_slot: Slot, + // final execution cursor + final_cursor: Slot, + // active execution cursor + active_cursor: Slot, + // execution output history + execution_history: VecDeque, +} + +impl VMThread { + fn new( + config: VMConfig, + controller: Arc, + _bootstrap: Option, + ) -> Self { + // TODO bootstrap + VMThread { + controller, + sce_finals: Default::default(), + last_sce_final: Slot::new(0, config.thread_count.saturating_sub(1)), + remaining_css_finals: Default::default(), + config, + } + } + + /// reads the list of newly finalized blocks and the new blockclique, if there was a change + /// if found, remove from input queue + fn consume_input(&mut self) -> VMInputData { + std::mem::take( + &mut self + .controller + .input_data + .lock() + .expect("VM input data lock failed"), + ) + } + + /// update final slots + fn update_final_slots(&mut self, new_css_finals: HashMap) { + // return if empty + if new_css_finals.is_empty() { + return; + } + + // add them to pending css finals + self.remaining_css_finals.extend(new_css_finals); + + // get maximal css-final slot + let max_css_final_slot = self + .remaining_css_finals + .iter() + .max_by_key(|(s, _b_id)| *s) + .map(|(s, _b_id)| *s) + .expect("expected remaining_css_finals to be non-empty"); + + // detect SCE-final slots + let mut slot = self.last_sce_final; + while slot < max_css_final_slot { + slot = slot + .get_next_slot(self.config.thread_count) + .expect("final slot overflow in VM"); + + // pop slot from remaining CSS finals + if let Some(block_id) = self.remaining_css_finals.remove(&slot) { + // CSS-final block found at slot: add block to to sce_finals + self.sce_finals.insert(slot, Some(block_id)); + self.last_sce_final = slot; + // continue the loop + continue; + } + + // no CSS-final block found: it's a miss + + // check if the miss is final + let mut miss_final = false; + let mut search_slot = slot; + while search_slot < max_css_final_slot { + search_slot = search_slot + .get_next_slot(self.config.thread_count) + .expect("final slot overflow in VM"); + if self.remaining_css_finals.contains_key(&search_slot) { + miss_final = true; + break; + } + } + + if miss_final { + // if the miss is final, set slot to be a final miss + self.sce_finals.insert(slot, None); + self.last_sce_final = slot; + } else { + // otherwise, this slot is not final => break + break; + } + } + } + + /// returns the end active slot (if any yet) + /// this is the slot at which the cursor ends and it depends on the cursor_delay setting + fn get_end_active_slot(&self) -> Option { + let target_time = MassaTime::compensated_now(self.config.clock_compensation) + .expect("could not read current time") + .saturating_sub(self.config.cursor_delay); + get_latest_block_slot_at_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + target_time, + ) + .expect("could not get current slot") + } + + /// update active slot sequence + fn update_active_slots(&mut self, new_blockclique: Option>) { + // update blockclique if changed + if let Some(blockclique) = new_blockclique { + self.blockclique = blockclique; + } + + // get last active slot, if any + let current_active_slot = self.get_end_active_slot(); + + // reset active slots + self.active_slots = HashMap::new(); + self.last_active_slot = self.last_sce_final; + + // if no active slot yet => keep the active_slots empty + let current_active_slot = match current_active_slot { + Some(s) => s, + None => return, + }; + + // recompute non-SCE-final slot sequence + let mut slot = self.last_sce_final; + while slot < current_active_slot { + slot = slot + .get_next_slot(self.config.thread_count) + .expect("active slot overflow in VM"); + if let Some(block_id) = self.remaining_css_finals.get(&slot) { + // found in remaining_css_finals + self.active_slots.insert(slot, Some(*block_id)); + } else if let Some(block_id) = self.blockclique.get(&slot) { + // found in blockclique + self.active_slots.insert(slot, Some(*block_id)); + } else { + // miss + self.active_slots.insert(slot, None); + } + self.last_active_slot = slot; + } + } + + /// executes one final slot, if any + /// returns true if something was executed + fn execute_one_final_slot(&mut self) -> bool { + // check if there are final slots to execute + if self.sce_finals.is_empty() { + return false; + } + + // get the next one + let slot = self + .final_cursor + .get_next_slot(self.config.thread_count) + .expect("final slot overflow in VM"); + let block_id = *self + .sce_finals + .get(&slot) + .expect("the SCE final slot list skipped a slot"); + + // update final cursor + self.final_cursor = slot; + + // check if the final slot is cached at the front of the speculative execution history + if let Some(exec_out) = self.execution_history.pop_front() { + if exec_out.slot == slot && exec_out.block_id == block_id { + // speculative execution front result matches what we wnat to compute + + // TODO apply exec_out to final state + + return true; + } + } + + // speculative cache mismatch + + // clear the speculative execution output cache completely + self.execution_history.clear(); + self.active_cursor = self.final_cursor; + + // TODO execute + // TODO apply exec_out to final state + + return true; + } + + /// truncates active slots at the fitst mismatch + /// between the active execution output history and the planned active_slots + fn truncate_history(&mut self) { + // find mismatch point (included) + let mut truncate_at = None; + for (hist_index, exec_output) in self.execution_history.iter().enumerate() { + if self.active_slots.get(&exec_output.slot) == Some(&exec_output.block_id) { + continue; + } + truncate_at = Some(hist_index); + break; + } + + // truncate speculative execution output history + if let Some(truncate_at) = truncate_at { + self.execution_history.truncate(truncate_at); + self.active_cursor = self + .execution_history + .back() + .map_or(self.final_cursor, |out| out.slot); + } + } + + /// executes one active slot, if any + /// returns true if something was executed + fn execute_one_active_slot(&mut self) -> bool { + // get the next active slot + let slot = self + .active_cursor + .get_next_slot(self.config.thread_count) + .expect("active slot overflow in VM"); + let to_execute = match self.active_slots.get(&slot) { + Some(b) => b, + None => return false, + }; + + // update active cursor + self.active_cursor = slot; + + // TODO execute + + // TODO push_back result into history + + return true; + } + + /// main VM loop + fn main_loop(&mut self) { + loop { + // read input queues + let input_data = self.consume_input(); + + // check for stop signal + if input_data.stop { + break; + } + + // update execution sequences + if input_data.blockclique_changed { + // changes detected in input + + // update final slot sequence + self.update_final_slots(input_data.finalized_blocks); + + // update active slot sequence + self.update_active_slots(Some(input_data.blockclique)); + } + + // execute one final slot, if any + if self.execute_one_final_slot() { + // a final slot was executed: continue + continue; + } + + // now all final slots have been executed + + // if the blockclique was not updated, still fill up active slots with misses until now() + if !input_data.blockclique_changed { + self.update_active_slots(None); + } + + // truncate the speculative execution outputs if necessary + if input_data.blockclique_changed { + self.truncate_history(); + } + + // speculatively execute one active slot, if any + if self.execute_one_active_slot() { + // an active slot was executed: continue + continue; + } + + // TODO execute readonly requests + + // compute when the next slot is + let delay_until_next_slot = { + let next_slot = self + .last_active_slot + .get_next_slot(self.config.thread_count) + .expect("active slot overflow in VM"); + let next_timestmap = get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + next_slot, + ) + .expect("could not compute block timestmap in VM"); + let now = MassaTime::compensated_now(self.config.clock_compensation) + .expect("could not get current time in VM"); + next_timestmap.saturating_sub(now) + }; + if delay_until_next_slot == 0.into() { + // next slot is right now + continue; + } + + // check if data changed during the iteration + let ctl = self.controller; + let input_data = ctl.input_data.lock().expect("could not lock VM input data"); + if input_data.stop { + break; + } + if input_data.blockclique_changed { + continue; + } + let _ = ctl + .loop_cv + .wait_timeout(input_data, delay_until_next_slot.to_duration()) + .expect("VM main loop condition variable wait failed"); + } + } +} From 0bf8bf87b3ab10a25fcf569c54b5714d2f4d2388 Mon Sep 17 00:00:00 2001 From: Damir Vodenicarevic Date: Fri, 4 Feb 2022 22:07:36 +0100 Subject: [PATCH 02/73] move get_time_until_next_active_slot to a function --- massa-execution/src/vm_new.rs | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/massa-execution/src/vm_new.rs b/massa-execution/src/vm_new.rs index 0aedc48cace..7721309be06 100644 --- a/massa-execution/src/vm_new.rs +++ b/massa-execution/src/vm_new.rs @@ -348,6 +348,24 @@ impl VMThread { return true; } + /// gets the time until the next active slot (saturates down to 0) + fn get_time_until_next_active_slot(&self) -> MassaTime { + let next_slot = self + .last_active_slot + .get_next_slot(self.config.thread_count) + .expect("active slot overflow in VM"); + let next_timestmap = get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + next_slot, + ) + .expect("could not compute block timestmap in VM"); + let now = MassaTime::compensated_now(self.config.clock_compensation) + .expect("could not get current time in VM"); + next_timestmap.saturating_sub(now) + } + /// main VM loop fn main_loop(&mut self) { loop { @@ -397,22 +415,7 @@ impl VMThread { // TODO execute readonly requests // compute when the next slot is - let delay_until_next_slot = { - let next_slot = self - .last_active_slot - .get_next_slot(self.config.thread_count) - .expect("active slot overflow in VM"); - let next_timestmap = get_block_slot_timestamp( - self.config.thread_count, - self.config.t0, - self.config.genesis_timestamp, - next_slot, - ) - .expect("could not compute block timestmap in VM"); - let now = MassaTime::compensated_now(self.config.clock_compensation) - .expect("could not get current time in VM"); - next_timestmap.saturating_sub(now) - }; + let delay_until_next_slot = self.get_time_until_next_active_slot(); if delay_until_next_slot == 0.into() { // next slot is right now continue; From f1d3e9b256ec66d12ae80a6dd24dfad0f5df1924 Mon Sep 17 00:00:00 2001 From: Damir Vodenicarevic Date: Fri, 4 Feb 2022 22:12:00 +0100 Subject: [PATCH 03/73] improve timing system --- massa-execution/src/vm_new.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/massa-execution/src/vm_new.rs b/massa-execution/src/vm_new.rs index 7721309be06..4bcbe553646 100644 --- a/massa-execution/src/vm_new.rs +++ b/massa-execution/src/vm_new.rs @@ -414,13 +414,6 @@ impl VMThread { // TODO execute readonly requests - // compute when the next slot is - let delay_until_next_slot = self.get_time_until_next_active_slot(); - if delay_until_next_slot == 0.into() { - // next slot is right now - continue; - } - // check if data changed during the iteration let ctl = self.controller; let input_data = ctl.input_data.lock().expect("could not lock VM input data"); @@ -430,6 +423,15 @@ impl VMThread { if input_data.blockclique_changed { continue; } + + // compute when the next slot is + let delay_until_next_slot = self.get_time_until_next_active_slot(); + if delay_until_next_slot == 0.into() { + // next slot is right now + continue; + } + + // wait for change or for next slot let _ = ctl .loop_cv .wait_timeout(input_data, delay_until_next_slot.to_duration()) From 1aafc857b02427271b3db8aa3e2a272afb5c13f4 Mon Sep 17 00:00:00 2001 From: Damir Vodenicarevic Date: Mon, 7 Feb 2022 07:55:04 +0100 Subject: [PATCH 04/73] minor changes --- massa-execution/src/vm_new.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/massa-execution/src/vm_new.rs b/massa-execution/src/vm_new.rs index 4bcbe553646..d4cbb8f64ee 100644 --- a/massa-execution/src/vm_new.rs +++ b/massa-execution/src/vm_new.rs @@ -1,3 +1,4 @@ +use crate::types::ExecutionContext; use massa_models::{ timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}, Slot, @@ -84,12 +85,16 @@ pub fn start_vm(config: VMConfig, bootstrap: Option) -> VMManag } struct ExecutionOutput { + // slot slot: Slot, + // optional block ID at that slot (None if miss) block_id: Option, - //TODO ledger_changes + // ledger_changes caused by the execution step + //TODO event_store } +/// structure gathering all elements needed by the VM thread struct VMThread { // VM config config: VMConfig, @@ -113,13 +118,17 @@ struct VMThread { active_cursor: Slot, // execution output history execution_history: VecDeque, + /// execution context + execution_context: Arc>, + /// final events + final_events: EventStore, } impl VMThread { fn new( config: VMConfig, controller: Arc, - _bootstrap: Option, + bootstrap: Option, ) -> Self { // TODO bootstrap VMThread { @@ -413,6 +422,7 @@ impl VMThread { } // TODO execute readonly requests + // must be done in this loop because of the static shared context // check if data changed during the iteration let ctl = self.controller; From 182f7943e39c8d4047fa1d48a42bcf58d7752a99 Mon Sep 17 00:00:00 2001 From: Damir Vodenicarevic Date: Mon, 7 Feb 2022 20:11:29 +0100 Subject: [PATCH 05/73] update ledger --- Cargo.lock | 23 ++++ Cargo.toml | 1 + massa-ledger/Cargo.toml | 27 ++++ massa-ledger/src/config.rs | 9 ++ massa-ledger/src/error.rs | 11 ++ massa-ledger/src/ledger.rs | 227 ++++++++++++++++++++++++++++++++++ massa-ledger/src/lib.rs | 17 +++ massa-ledger/src/tests/mod.rs | 1 + 8 files changed, 316 insertions(+) create mode 100644 massa-ledger/Cargo.toml create mode 100644 massa-ledger/src/config.rs create mode 100644 massa-ledger/src/error.rs create mode 100644 massa-ledger/src/ledger.rs create mode 100644 massa-ledger/src/lib.rs create mode 100644 massa-ledger/src/tests/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 7222878c046..7dcad5d3128 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1535,6 +1535,29 @@ dependencies = [ "thiserror", ] +[[package]] +name = "massa_ledger" +version = "0.1.0" +dependencies = [ + "displaydoc", + "futures 0.3.19", + "lazy_static", + "massa_hash", + "massa_logging", + "massa_models", + "massa_protocol_exports", + "massa_signature", + "massa_time", + "num", + "pretty_assertions", + "serde 1.0.134", + "serde_json", + "serial_test", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "massa_logging" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 4e3fe3ba056..8da9ff0d3bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,4 +16,5 @@ members = [ "massa-signature", "massa-time", "massa-wallet", + "massa-ledger" ] diff --git a/massa-ledger/Cargo.toml b/massa-ledger/Cargo.toml new file mode 100644 index 00000000000..46deff43f0a --- /dev/null +++ b/massa-ledger/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "massa_ledger" +version = "0.1.0" +authors = ["Massa Labs "] +edition = "2021" + +[dependencies] +displaydoc = "0.2" +futures = "0.3" +lazy_static = "1.4.0" +num = "0.4" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +thiserror = "1.0" +tokio = { version = "1.11", features = ["full"] } +tracing = {version = "0.1", features = ["max_level_debug", "release_max_level_debug"] } +# custom modules +massa_hash = { path = "../massa-hash" } +massa_logging = { path = "../massa-logging" } +massa_models = { path = "../massa-models" } +massa_protocol_exports = { path = "../massa-protocol-exports" } +massa_signature = { path = "../massa-signature" } +massa_time = { path = "../massa-time" } + +[dev-dependencies] +pretty_assertions = "1.0" +serial_test = "0.5" diff --git a/massa-ledger/src/config.rs b/massa-ledger/src/config.rs new file mode 100644 index 00000000000..aeddcd8b15a --- /dev/null +++ b/massa-ledger/src/config.rs @@ -0,0 +1,9 @@ +// Copyright (c) 2021 MASSA LABS + +use serde::{Deserialize, Serialize}; + +/// Ledger configuration +#[derive(Debug, Deserialize, Serialize, Clone, Copy)] +pub struct LedgerConfig { + pub final_history_length: usize, +} diff --git a/massa-ledger/src/error.rs b/massa-ledger/src/error.rs new file mode 100644 index 00000000000..0fbc5f55d55 --- /dev/null +++ b/massa-ledger/src/error.rs @@ -0,0 +1,11 @@ +// Copyright (c) 2021 MASSA LABS + +use displaydoc::Display; +use thiserror::Error; + +#[non_exhaustive] +#[derive(Display, Error, Debug)] +pub enum LedgerError { + /// there was an inconsistency between containers + ContainerInconsistency(String), +} diff --git a/massa-ledger/src/ledger.rs b/massa-ledger/src/ledger.rs new file mode 100644 index 00000000000..cc6f9a94af6 --- /dev/null +++ b/massa-ledger/src/ledger.rs @@ -0,0 +1,227 @@ +use massa_hash::hash::Hash; +use massa_models::{prehash::Map, Address, Amount, Slot}; +use std::collections::{hash_map, BTreeMap, HashMap, VecDeque}; + +use crate::LedgerConfig; + +#[derive(Default, Debug, Clone)] +pub struct LedgerEntry { + pub roll_count: u64, + pub sequential_balance: Amount, + pub parallel_balance: Amount, + pub bytecode: Vec, + pub datastore: BTreeMap>, + pub files: BTreeMap>, +} + +impl LedgerEntry { + /// applies a LedgerEntryUpdate + pub fn apply_update(&mut self, update: LedgerEntryUpdate) { + if let SetOrKeep::Set(v) = update.roll_count { + self.roll_count = v; + } + if let SetOrKeep::Set(v) = update.sequential_balance { + self.sequential_balance = v; + } + if let SetOrKeep::Set(v) = update.parallel_balance { + self.parallel_balance = v; + } + if let SetOrKeep::Set(v) = update.bytecode { + self.bytecode = v; + } + for (key, value_update) in update.datastore { + match value_update { + SetOrDelete::Set(v) => { + self.datastore.insert(key, v); + } + SetOrDelete::Delete => { + self.datastore.remove(&key); + } + } + } + for (key, value_update) in update.files { + match value_update { + SetOrDelete::Set(v) => { + self.files.insert(key, v); + } + SetOrDelete::Delete => { + self.files.remove(&key); + } + } + } + } +} + +/// represents a set/update/delete change +#[derive(Debug, Clone)] +pub enum SetUpdateOrDelete { + /// sets a new absolute value T + Set(T), + /// applies an update V to an existing value + Update(V), + /// deletes a value + Delete, +} + +/// represents a set/delete change +#[derive(Debug, Clone)] +pub enum SetOrDelete { + /// sets a new absolute value T + Set(T), + /// deletes a value + Delete, +} + +/// represents a set/keep change +#[derive(Debug, Clone)] +pub enum SetOrKeep { + /// sets a new absolute value T + Set(T), + /// keeps the existing value + Keep, +} + +impl Default for SetOrKeep { + fn default() -> Self { + SetOrKeep::Keep + } +} + +#[derive(Default, Debug, Clone)] +pub struct LedgerEntryUpdate { + roll_count: SetOrKeep, + sequential_balance: SetOrKeep, + parallel_balance: SetOrKeep, + bytecode: SetOrKeep>, + datastore: Map>>, + files: HashMap>>, +} + +impl LedgerEntryUpdate { + /// extends the LedgerEntryUpdate with another one + pub fn apply_update(&mut self, update: LedgerEntryUpdate) { + if let v @ SetOrKeep::Set(..) = update.roll_count { + self.roll_count = v; + } + if let v @ SetOrKeep::Set(..) = update.sequential_balance { + self.sequential_balance = v; + } + if let v @ SetOrKeep::Set(..) = update.parallel_balance { + self.parallel_balance = v; + } + if let v @ SetOrKeep::Set(..) = update.bytecode { + self.bytecode = v; + } + self.datastore.extend(update.datastore); + self.files.extend(update.files); + } +} + +/// represents a list of changes to ledger entries +#[derive(Default, Debug, Clone)] +pub struct LedgerChanges(Map>); + +impl LedgerChanges { + /// extends teh current LedgerChanges with another one + pub fn apply_changes(&mut self, changes: LedgerChanges) { + // iterate over all incoming changes + for (addr, change) in changes.0 { + match change { + SetUpdateOrDelete::Set(new_entry) => { + // sets an entry to an absolute value, overriding any previous change + self.0.insert(addr, SetUpdateOrDelete::Set(new_entry)); + } + SetUpdateOrDelete::Update(entry_update) => match self.0.entry(addr) { + // applies incoming updates to an entry + hash_map::Entry::Occupied(mut occ) => match occ.get_mut() { + // the entry was already being changed + SetUpdateOrDelete::Set(cur) => { + // the entry was already being set to an absolute value + // apply the incoming updates to that absolute value + cur.apply_update(entry_update); + } + SetUpdateOrDelete::Update(cur) => { + // the entry was already being updated + // extend the existing update with the incoming ones + cur.apply_update(entry_update); + } + cur @ SetUpdateOrDelete::Delete => { + // the entry was being deleted + // set the entry to a default absolute value to which the incoming updates are applied + let mut res_entry = LedgerEntry::default(); + res_entry.apply_update(entry_update); + *cur = SetUpdateOrDelete::Set(res_entry); + } + }, + hash_map::Entry::Vacant(vac) => { + // the entry was not being changesd: simply add the incoming update + vac.insert(SetUpdateOrDelete::Update(entry_update)); + } + }, + SetUpdateOrDelete::Delete => { + // deletes an entry, overriding any previous change + self.0.insert(addr, SetUpdateOrDelete::Delete); + } + } + } + } +} + +/// represents a final ledger +pub struct FinalLedger { + /// ledger config + config: LedgerConfig, + /// slot at which the final ledger is computed + pub slot: Slot, + /// sorted ledger tree + /// TODO put it on the hard drive as it can reach 1TB + pub sorted_ledger: BTreeMap, + /// history of recent final ledger changes + /// front = oldest, back = newest + changes_history: VecDeque<(Slot, LedgerChanges)>, +} + +impl FinalLedger { + /// applies LedgerChanges to the final ledger + pub fn apply_changes(&mut self, slot: Slot, changes: LedgerChanges) { + // if the slot is in the past: ignore + if slot <= self.slot { + return; + } + + // update the slot + self.slot = slot; + + // update and prune changes history + self.changes_history.push_back((slot, changes.clone())); + while self.changes_history.len() > self.config.final_history_length { + self.changes_history.pop_front(); + } + + // for all incoming changes + for (addr, change) in changes.0 { + match change { + SetUpdateOrDelete::Set(new_entry) => { + // inserts/overwrites the entry with an incoming absolute value + self.sorted_ledger.insert(addr, new_entry); + } + SetUpdateOrDelete::Update(entry_update) => { + // applies updates to an entry + // if the entry does not exist, inserts a default one and applies the updates to it + self.sorted_ledger + .entry(addr) + .or_insert_with(|| Default::default()) + .apply_update(entry_update); + } + SetUpdateOrDelete::Delete => { + // deletes an entry, if it exists + self.sorted_ledger.remove(&addr); + } + } + } + } +} + +/* + TODO how to evaluate the storage costs of a ledger change ? +*/ diff --git a/massa-ledger/src/lib.rs b/massa-ledger/src/lib.rs new file mode 100644 index 00000000000..751666acbb3 --- /dev/null +++ b/massa-ledger/src/lib.rs @@ -0,0 +1,17 @@ +// Copyright (c) 2021 MASSA LABS + +#![feature(map_first_last)] +#![feature(async_closure)] + +#[macro_use] +extern crate massa_logging; + +pub use config::LedgerConfig; +pub use error::LedgerError; + +mod config; +mod error; +mod ledger; + +#[cfg(test)] +mod tests; diff --git a/massa-ledger/src/tests/mod.rs b/massa-ledger/src/tests/mod.rs new file mode 100644 index 00000000000..00e9f959513 --- /dev/null +++ b/massa-ledger/src/tests/mod.rs @@ -0,0 +1 @@ +// Copyright (c) 2021 MASSA LABS From be9de1122637444c0e6f21d6d5a19b533781a40e Mon Sep 17 00:00:00 2001 From: Damir Vodenicarevic Date: Tue, 8 Feb 2022 17:31:59 +0100 Subject: [PATCH 06/73] time cursor updates --- Cargo.lock | 1 + massa-execution/Cargo.toml | 1 + massa-execution/src/lib.rs | 1 + massa-execution/src/speculative_ledger.rs | 38 ++++ massa-execution/src/vm_new.rs | 55 +++-- massa-ledger/src/ledger.rs | 245 ++++++++++++++-------- massa-ledger/src/lib.rs | 4 + 7 files changed, 237 insertions(+), 108 deletions(-) create mode 100644 massa-execution/src/speculative_ledger.rs diff --git a/Cargo.lock b/Cargo.lock index 7dcad5d3128..1039d082155 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1503,6 +1503,7 @@ dependencies = [ "lazy_static", "massa-sc-runtime", "massa_hash", + "massa_ledger", "massa_models", "massa_signature", "massa_time", diff --git a/massa-execution/Cargo.toml b/massa-execution/Cargo.toml index 035d0e14832..9d1e41be1d0 100644 --- a/massa-execution/Cargo.toml +++ b/massa-execution/Cargo.toml @@ -23,6 +23,7 @@ rand = "0.8" rand_xoshiro = "0.6" lazy_static = "1.4.0" massa_time = { path = "../massa-time" } +massa_ledger = { path = "../massa-ledger" } [dev-dependencies] serial_test = "0.5" diff --git a/massa-execution/src/lib.rs b/massa-execution/src/lib.rs index 846c0af3920..9c0df23a91d 100644 --- a/massa-execution/src/lib.rs +++ b/massa-execution/src/lib.rs @@ -6,6 +6,7 @@ mod error; mod exports; mod interface_impl; mod sce_ledger; +mod speculative_ledger; mod types; mod vm; mod vm_new; diff --git a/massa-execution/src/speculative_ledger.rs b/massa-execution/src/speculative_ledger.rs new file mode 100644 index 00000000000..966dcbfd5d5 --- /dev/null +++ b/massa-execution/src/speculative_ledger.rs @@ -0,0 +1,38 @@ +use std::sync::{Arc, RwLock}; + +use massa_ledger::{FinalLedger, LedgerChanges, LedgerEntryUpdate, SetOrKeep, SetUpdateOrDelete}; +use massa_models::{Address, Amount}; + +/// represents a speculative ledger state combining +/// data from the final ledger, previous speculative changes, +/// and accumulated changes since the construction of the object +pub struct SpeculativeLedger { + /// accumulation of previous changes + previous_changes: LedgerChanges, + + /// list of added changes + pub added_changes: LedgerChanges, +} + +impl SpeculativeLedger { + /// creates a new SpeculativeLedger + pub fn new(previous_changes: LedgerChanges) -> Self { + SpeculativeLedger { + previous_changes, + added_changes: Default::default(), + } + } + + /// gets the sequential balance of an address + pub fn get_sequential_balance( + &self, + addr: &Address, + final_ledger: &FinalLedger, + ) -> Option { + // try to read from added_changes, then previous_changes, then final_ledger + self.added_changes.get_sequential_balance_or_else(addr, || { + self.previous_changes + .get_sequential_balance_or_else(addr, || final_ledger.get_sequential_balance(addr)) + }) + } +} diff --git a/massa-execution/src/vm_new.rs b/massa-execution/src/vm_new.rs index d4cbb8f64ee..0f765f6b48d 100644 --- a/massa-execution/src/vm_new.rs +++ b/massa-execution/src/vm_new.rs @@ -1,4 +1,6 @@ -use crate::types::ExecutionContext; +use crate::speculative_ledger::SpeculativeLedger; +use massa_ledger::{FinalLedger, LedgerChanges, LedgerEntry, LedgerEntryUpdate}; +use massa_models::BlockId; use massa_models::{ timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}, Slot, @@ -6,12 +8,10 @@ use massa_models::{ use massa_time::MassaTime; use std::{ collections::{HashMap, VecDeque}, - sync::{Arc, Condvar, Mutex}, + sync::{Arc, Condvar, Mutex, RwLock}, }; use tracing::info; -use massa_models::BlockId; - pub struct VMConfig { thread_count: u8, cursor_delay: MassaTime, @@ -19,9 +19,7 @@ pub struct VMConfig { genesis_timestamp: MassaTime, t0: MassaTime, } - -pub struct VMBootstrapData {} - +/// structure used to communicate with the VM thread #[derive(Default)] pub struct VMInputData { stop: bool, @@ -30,11 +28,13 @@ pub struct VMInputData { blockclique: HashMap, } +/// VM controller pub struct VMController { loop_cv: Condvar, input_data: Mutex, } +/// VM manager pub struct VMManager { controller: Arc, thread_handle: std::thread::JoinHandle<()>, @@ -90,14 +90,17 @@ struct ExecutionOutput { // optional block ID at that slot (None if miss) block_id: Option, // ledger_changes caused by the execution step - - //TODO event_store + ledger_changes: LedgerChanges, + // events emitted by the execution step + //TODO events: EventStore } /// structure gathering all elements needed by the VM thread struct VMThread { // VM config config: VMConfig, + // Final ledger + final_ledger: Arc>, // VM data exchange controller controller: Arc, // map of SCE-final blocks not executed yet @@ -118,24 +121,46 @@ struct VMThread { active_cursor: Slot, // execution output history execution_history: VecDeque, - /// execution context + // execution context execution_context: Arc>, - /// final events - final_events: EventStore, + // final events + // final_events: EventStore, +} + +pub(crate) struct ExecutionContext { + speculative_ledger: SpeculativeLedger, } impl VMThread { fn new( config: VMConfig, controller: Arc, - bootstrap: Option, + final_ledger: Arc>, ) -> Self { - // TODO bootstrap + let final_slot = final_ledger + .read() + .expect("could not R-lock final ledger in VM thread creation") + .slot; + let execution_context = Arc::new(Mutex::new(ExecutionContext { + speculative_ledger: SpeculativeLedger { + final_ledger: final_ledger.clone(), + all_changes: Default::default(), + added_changes: Default::default(), + }, + })); VMThread { + final_ledger, + last_active_slot: final_slot, + final_cursor: final_slot, + active_cursor: final_slot, controller, + last_sce_final: final_slot, + execution_context, sce_finals: Default::default(), - last_sce_final: Slot::new(0, config.thread_count.saturating_sub(1)), remaining_css_finals: Default::default(), + blockclique: Default::default(), + active_slots: Default::default(), + execution_history: Default::default(), config, } } diff --git a/massa-ledger/src/ledger.rs b/massa-ledger/src/ledger.rs index cc6f9a94af6..382b46d2324 100644 --- a/massa-ledger/src/ledger.rs +++ b/massa-ledger/src/ledger.rs @@ -1,9 +1,16 @@ +// Copyright (c) 2021 MASSA LABS + +use crate::LedgerConfig; use massa_hash::hash::Hash; use massa_models::{prehash::Map, Address, Amount, Slot}; use std::collections::{hash_map, BTreeMap, HashMap, VecDeque}; -use crate::LedgerConfig; +/// represents a structure that supports another one being applied to it +pub trait Applicable { + fn apply(&mut self, _: V); +} +/// structure defining a ledger entry #[derive(Default, Debug, Clone)] pub struct LedgerEntry { pub roll_count: u64, @@ -14,21 +21,16 @@ pub struct LedgerEntry { pub files: BTreeMap>, } -impl LedgerEntry { +/// LedgerEntryUpdate can be applied to a LedgerEntry +impl Applicable for LedgerEntry { /// applies a LedgerEntryUpdate - pub fn apply_update(&mut self, update: LedgerEntryUpdate) { - if let SetOrKeep::Set(v) = update.roll_count { - self.roll_count = v; - } - if let SetOrKeep::Set(v) = update.sequential_balance { - self.sequential_balance = v; - } - if let SetOrKeep::Set(v) = update.parallel_balance { - self.parallel_balance = v; - } - if let SetOrKeep::Set(v) = update.bytecode { - self.bytecode = v; - } + fn apply(&mut self, update: LedgerEntryUpdate) { + update.roll_count.apply_to(&mut self.roll_count); + update + .sequential_balance + .apply_to(&mut self.sequential_balance); + update.parallel_balance.apply_to(&mut self.parallel_balance); + update.bytecode.apply_to(&mut self.bytecode); for (key, value_update) in update.datastore { match value_update { SetOrDelete::Set(v) => { @@ -54,7 +56,7 @@ impl LedgerEntry { /// represents a set/update/delete change #[derive(Debug, Clone)] -pub enum SetUpdateOrDelete { +pub enum SetUpdateOrDelete, V: Applicable> { /// sets a new absolute value T Set(T), /// applies an update V to an existing value @@ -63,30 +65,90 @@ pub enum SetUpdateOrDelete { Delete, } +/// supports applying another SetUpdateOrDelete to self +impl, V: Applicable> Applicable> + for SetUpdateOrDelete +{ + fn apply(&mut self, other: SetUpdateOrDelete) { + match other { + // the other SetUpdateOrDelete sets a new absolute value => force it on self + v @ SetUpdateOrDelete::Set(_) => *self = v, + + // the other SetUpdateOrDelete updates the value + SetUpdateOrDelete::Update(u) => match self { + // if self currently sets an absolute value, apply other to that value + SetUpdateOrDelete::Set(cur) => cur.apply(u), + + // if self currently updates a value, apply the updates of the other to that update + SetUpdateOrDelete::Update(cur) => cur.apply(u), + + // if self currently deletes a value, + // create a new default value, apply other's updates to it and make self set it as an absolute new value + SetUpdateOrDelete::Delete => { + let mut res = T::default(); + res.apply(u); + *self = SetUpdateOrDelete::Set(res); + } + }, + + // the other SetUpdateOrDelete deletes a value => force self to delete it as well + v @ SetUpdateOrDelete::Delete => *self = v, + } + } +} + /// represents a set/delete change #[derive(Debug, Clone)] -pub enum SetOrDelete { +pub enum SetOrDelete { /// sets a new absolute value T Set(T), /// deletes a value Delete, } +/// allows applying another SetOrDelete to the current one +impl Applicable> for SetOrDelete { + fn apply(&mut self, other: Self) { + *self = other; + } +} + /// represents a set/keep change #[derive(Debug, Clone)] -pub enum SetOrKeep { +pub enum SetOrKeep { /// sets a new absolute value T Set(T), /// keeps the existing value Keep, } -impl Default for SetOrKeep { +/// allows applying another SetOrKeep to the current one +impl Applicable> for SetOrKeep { + fn apply(&mut self, other: SetOrKeep) { + if let v @ SetOrKeep::Set(..) = other { + // update the current value only if the other SetOrKeep sets a new one + *self = v; + } + } +} + +impl SetOrKeep { + /// applies the current SetOrKeep to a target mutable value + pub fn apply_to(self, val: &mut T) { + if let SetOrKeep::Set(v) = self { + // only change the value if self is setting a new one + *val = v; + } + } +} + +impl Default for SetOrKeep { fn default() -> Self { SetOrKeep::Keep } } +/// represents an update to one or more fields of a LedgerEntry #[derive(Default, Debug, Clone)] pub struct LedgerEntryUpdate { roll_count: SetOrKeep, @@ -97,21 +159,13 @@ pub struct LedgerEntryUpdate { files: HashMap>>, } -impl LedgerEntryUpdate { +impl Applicable for LedgerEntryUpdate { /// extends the LedgerEntryUpdate with another one - pub fn apply_update(&mut self, update: LedgerEntryUpdate) { - if let v @ SetOrKeep::Set(..) = update.roll_count { - self.roll_count = v; - } - if let v @ SetOrKeep::Set(..) = update.sequential_balance { - self.sequential_balance = v; - } - if let v @ SetOrKeep::Set(..) = update.parallel_balance { - self.parallel_balance = v; - } - if let v @ SetOrKeep::Set(..) = update.bytecode { - self.bytecode = v; - } + fn apply(&mut self, update: LedgerEntryUpdate) { + self.roll_count.apply(update.roll_count); + self.sequential_balance.apply(update.sequential_balance); + self.parallel_balance.apply(update.parallel_balance); + self.bytecode.apply(update.bytecode); self.datastore.extend(update.datastore); self.files.extend(update.files); } @@ -119,54 +173,55 @@ impl LedgerEntryUpdate { /// represents a list of changes to ledger entries #[derive(Default, Debug, Clone)] -pub struct LedgerChanges(Map>); +pub struct LedgerChanges(pub Map>); -impl LedgerChanges { - /// extends teh current LedgerChanges with another one - pub fn apply_changes(&mut self, changes: LedgerChanges) { - // iterate over all incoming changes +impl Applicable for LedgerChanges { + /// extends the current LedgerChanges with another one + fn apply(&mut self, changes: LedgerChanges) { for (addr, change) in changes.0 { - match change { - SetUpdateOrDelete::Set(new_entry) => { - // sets an entry to an absolute value, overriding any previous change - self.0.insert(addr, SetUpdateOrDelete::Set(new_entry)); + match self.0.entry(addr) { + hash_map::Entry::Occupied(mut occ) => { + // apply incoming change if a change on this entry already exists + occ.get_mut().apply(change); } - SetUpdateOrDelete::Update(entry_update) => match self.0.entry(addr) { - // applies incoming updates to an entry - hash_map::Entry::Occupied(mut occ) => match occ.get_mut() { - // the entry was already being changed - SetUpdateOrDelete::Set(cur) => { - // the entry was already being set to an absolute value - // apply the incoming updates to that absolute value - cur.apply_update(entry_update); - } - SetUpdateOrDelete::Update(cur) => { - // the entry was already being updated - // extend the existing update with the incoming ones - cur.apply_update(entry_update); - } - cur @ SetUpdateOrDelete::Delete => { - // the entry was being deleted - // set the entry to a default absolute value to which the incoming updates are applied - let mut res_entry = LedgerEntry::default(); - res_entry.apply_update(entry_update); - *cur = SetUpdateOrDelete::Set(res_entry); - } - }, - hash_map::Entry::Vacant(vac) => { - // the entry was not being changesd: simply add the incoming update - vac.insert(SetUpdateOrDelete::Update(entry_update)); - } - }, - SetUpdateOrDelete::Delete => { - // deletes an entry, overriding any previous change - self.0.insert(addr, SetUpdateOrDelete::Delete); + hash_map::Entry::Vacant(vac) => { + // otherwise insert the incoming change + vac.insert(change); } } } } } +impl LedgerChanges { + /// tries to return the sequential balance or gets it from a function + /// + /// # Returns + /// * Some(v) if a value is present + /// * None if the value is absent + /// * f() if the value is unknown + /// + /// this is used as an optimization: + /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further + pub fn get_sequential_balance_or_else Option>( + &self, + addr: &Address, + f: F, + ) -> Option { + match self.0.get(addr) { + Some(SetUpdateOrDelete::Set(v)) => Some(v.sequential_balance), + Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { + sequential_balance, .. + })) => match sequential_balance { + SetOrKeep::Set(v) => Some(*v), + SetOrKeep::Keep => f(), + }, + Some(SetUpdateOrDelete::Delete) => None, + None => f(), + } + } +} + /// represents a final ledger pub struct FinalLedger { /// ledger config @@ -175,29 +230,15 @@ pub struct FinalLedger { pub slot: Slot, /// sorted ledger tree /// TODO put it on the hard drive as it can reach 1TB - pub sorted_ledger: BTreeMap, + sorted_ledger: BTreeMap, /// history of recent final ledger changes /// front = oldest, back = newest changes_history: VecDeque<(Slot, LedgerChanges)>, } -impl FinalLedger { +impl Applicable for FinalLedger { /// applies LedgerChanges to the final ledger - pub fn apply_changes(&mut self, slot: Slot, changes: LedgerChanges) { - // if the slot is in the past: ignore - if slot <= self.slot { - return; - } - - // update the slot - self.slot = slot; - - // update and prune changes history - self.changes_history.push_back((slot, changes.clone())); - while self.changes_history.len() > self.config.final_history_length { - self.changes_history.pop_front(); - } - + fn apply(&mut self, changes: LedgerChanges) { // for all incoming changes for (addr, change) in changes.0 { match change { @@ -211,7 +252,7 @@ impl FinalLedger { self.sorted_ledger .entry(addr) .or_insert_with(|| Default::default()) - .apply_update(entry_update); + .apply(entry_update); } SetUpdateOrDelete::Delete => { // deletes an entry, if it exists @@ -222,6 +263,24 @@ impl FinalLedger { } } -/* - TODO how to evaluate the storage costs of a ledger change ? -*/ +impl FinalLedger { + /// settles a slot and saves the corresponding ledger changes to history + pub fn settle_slot(&mut self, slot: Slot, changes: LedgerChanges) { + // update the slot + self.slot = slot; + + // update and prune changes history + self.changes_history.push_back((slot, changes.clone())); + while self.changes_history.len() > self.config.final_history_length { + self.changes_history.pop_front(); + } + + // apply changes + self.apply(changes); + } + + /// gets the sequential balance of an entry + pub fn get_sequential_balance(&self, addr: &Address) -> Option { + self.sorted_ledger.get(addr).map(|v| v.sequential_balance) + } +} diff --git a/massa-ledger/src/lib.rs b/massa-ledger/src/lib.rs index 751666acbb3..284a7c2f86d 100644 --- a/massa-ledger/src/lib.rs +++ b/massa-ledger/src/lib.rs @@ -8,6 +8,10 @@ extern crate massa_logging; pub use config::LedgerConfig; pub use error::LedgerError; +pub use ledger::{ + FinalLedger, LedgerChanges, LedgerEntry, LedgerEntryUpdate, SetOrDelete, SetOrKeep, + SetUpdateOrDelete, +}; mod config; mod error; From 47c858dd42332e820589e7dfd06623613fc7e2bd Mon Sep 17 00:00:00 2001 From: damip Date: Wed, 16 Feb 2022 01:05:07 +0100 Subject: [PATCH 07/73] improve speculative execution prototype --- massa-execution/src/speculative_ledger.rs | 31 ++- massa-execution/src/vm_new.rs | 279 +++++++++++++++++----- massa-ledger/src/ledger.rs | 188 +++++++++++---- massa-ledger/src/lib.rs | 2 +- 4 files changed, 384 insertions(+), 116 deletions(-) diff --git a/massa-execution/src/speculative_ledger.rs b/massa-execution/src/speculative_ledger.rs index 966dcbfd5d5..8c78e1266ed 100644 --- a/massa-execution/src/speculative_ledger.rs +++ b/massa-execution/src/speculative_ledger.rs @@ -7,32 +7,51 @@ use massa_models::{Address, Amount}; /// data from the final ledger, previous speculative changes, /// and accumulated changes since the construction of the object pub struct SpeculativeLedger { + /// final ledger + final_ledger: Arc>, + /// accumulation of previous changes previous_changes: LedgerChanges, /// list of added changes - pub added_changes: LedgerChanges, + added_changes: LedgerChanges, } impl SpeculativeLedger { /// creates a new SpeculativeLedger - pub fn new(previous_changes: LedgerChanges) -> Self { + pub fn new(final_ledger: Arc>, previous_changes: LedgerChanges) -> Self { SpeculativeLedger { + final_ledger, previous_changes, added_changes: Default::default(), } } - /// gets the sequential balance of an address - pub fn get_sequential_balance( + /// takes a snapshot (clone) of the added changes + pub fn get_snapshot(&self) -> LedgerChanges { + self.added_changes.clone() + } + + /// resets to a snapshot of added ledger changes + pub fn reset_to_snapshot(&mut self, snapshot: LedgerChanges) { + self.added_changes = snapshot; + } + + /// consumes Self to get added changes + pub fn into_added_changes(self) -> LedgerChanges { + self.added_changes + } + + /// gets the parallel balance of an address + pub fn get_parallel_balance( &self, addr: &Address, final_ledger: &FinalLedger, ) -> Option { // try to read from added_changes, then previous_changes, then final_ledger - self.added_changes.get_sequential_balance_or_else(addr, || { + self.added_changes.get_parallel_balance_or_else(addr, || { self.previous_changes - .get_sequential_balance_or_else(addr, || final_ledger.get_sequential_balance(addr)) + .get_parallel_balance_or_else(addr, || final_ledger.get_parallel_balance(addr)) }) } } diff --git a/massa-execution/src/vm_new.rs b/massa-execution/src/vm_new.rs index 0f765f6b48d..fa3a552bac1 100644 --- a/massa-execution/src/vm_new.rs +++ b/massa-execution/src/vm_new.rs @@ -1,10 +1,11 @@ use crate::speculative_ledger::SpeculativeLedger; -use massa_ledger::{FinalLedger, LedgerChanges, LedgerEntry, LedgerEntryUpdate}; -use massa_models::BlockId; +use crate::ExecutionError; +use massa_ledger::{Applicable, FinalLedger, LedgerChanges, LedgerEntry, LedgerEntryUpdate}; use massa_models::{ timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}, - Slot, + Block, Slot, }; +use massa_models::{Address, Amount, BlockId}; use massa_time::MassaTime; use std::{ collections::{HashMap, VecDeque}, @@ -12,37 +13,72 @@ use std::{ }; use tracing::info; +/// VM module configuration pub struct VMConfig { + /// number of threads thread_count: u8, + /// extra lag to add on the cursor to improve performance cursor_delay: MassaTime, + /// time compensation in milliseconds clock_compensation: i64, + /// genesis timestamp genesis_timestamp: MassaTime, + /// period duration t0: MassaTime, } + +/// structure describing a read-only execution request +pub struct ReadOnlyExecutionRequest { + /// The slot at which the execution will occur. + slot: Slot, + /// Maximum gas to spend in the execution. + max_gas: u64, + /// The simulated price of gas for the read-only execution. + simulated_gas_price: Amount, + /// The code to execute. + bytecode: Vec, + /// Call stack to simulate + call_stack: Vec
, + /// The channel used to send the result of the execution. + result_sender: std::sync::mpsc::Sender>, +} + /// structure used to communicate with the VM thread #[derive(Default)] pub struct VMInputData { + /// set stop to true to stop the thread stop: bool, + /// signal whether the blockclique changed blockclique_changed: bool, - finalized_blocks: HashMap, - blockclique: HashMap, + /// list of newly finalized blocks + finalized_blocks: HashMap, + /// blockclique + blockclique: HashMap, + /// readonly execution requests + readonly_requests: VecDeque, } /// VM controller pub struct VMController { + /// condition variable to wake up the VM loop loop_cv: Condvar, + /// input data to process in the VM loop input_data: Mutex, } /// VM manager pub struct VMManager { + /// shared reference to the VM controller controller: Arc, + /// handle used to join the VM thread thread_handle: std::thread::JoinHandle<()>, } impl VMManager { + /// stops the VM pub fn stop(self) { info!("stopping VM controller..."); + // notify the VM thread to stop { let mut input_wlock = self .controller @@ -50,21 +86,27 @@ impl VMManager { .lock() .expect("could not w-lock VM input data"); input_wlock.stop = true; - input_wlock.blockclique_changed = true; self.controller.loop_cv.notify_one(); } + // join the VM thread self.thread_handle .join() .expect("VM controller thread panicked"); info!("VM controller stopped"); } + /// get a shared reference to the VM controller pub fn get_controller(&self) -> Arc { self.controller.clone() } } -pub fn start_vm(config: VMConfig, bootstrap: Option) -> VMManager { +/// launches the VM and returns a VMManager +/// +/// # parameters +/// * config: VM configuration +/// * bootstrap: +pub fn start_vm(config: VMConfig, final_ledger: Arc>) -> VMManager { let controller = Arc::new(VMController { loop_cv: Condvar::new(), input_data: Mutex::new(VMInputData { @@ -75,7 +117,7 @@ pub fn start_vm(config: VMConfig, bootstrap: Option) -> VMManag let ctl = controller.clone(); let thread_handle = std::thread::spawn(move || { - VMThread::new(config, ctl, bootstrap).main_loop(); + VMThread::new(config, ctl, final_ledger).main_loop(); }); VMManager { @@ -104,15 +146,15 @@ struct VMThread { // VM data exchange controller controller: Arc, // map of SCE-final blocks not executed yet - sce_finals: HashMap>, + sce_finals: HashMap>, // last SCE final slot in sce_finals list last_sce_final: Slot, // map of CSS-final but non-SCE-final blocks - remaining_css_finals: HashMap, + remaining_css_finals: HashMap, // last blockclique - blockclique: HashMap, + blockclique: HashMap, // map of active slots - active_slots: HashMap>, + active_slots: HashMap>, // highest active slot last_active_slot: Slot, // final execution cursor @@ -128,7 +170,9 @@ struct VMThread { } pub(crate) struct ExecutionContext { + //TODO other things (eg. call stack) speculative_ledger: SpeculativeLedger, + //TODO event store } impl VMThread { @@ -142,12 +186,12 @@ impl VMThread { .expect("could not R-lock final ledger in VM thread creation") .slot; let execution_context = Arc::new(Mutex::new(ExecutionContext { - speculative_ledger: SpeculativeLedger { - final_ledger: final_ledger.clone(), - all_changes: Default::default(), - added_changes: Default::default(), - }, + speculative_ledger: SpeculativeLedger::new( + final_ledger.clone(), + LedgerChanges::default(), + ), })); + VMThread { final_ledger, last_active_slot: final_slot, @@ -178,21 +222,21 @@ impl VMThread { } /// update final slots - fn update_final_slots(&mut self, new_css_finals: HashMap) { + fn update_final_slots(&mut self, new_css_finals: HashMap) { // return if empty if new_css_finals.is_empty() { return; } - // add them to pending css finals + // add new_css_finals to pending css finals self.remaining_css_finals.extend(new_css_finals); // get maximal css-final slot let max_css_final_slot = self .remaining_css_finals .iter() - .max_by_key(|(s, _b_id)| *s) - .map(|(s, _b_id)| *s) + .max_by_key(|(s, _)| *s) + .map(|(s, _)| *s) .expect("expected remaining_css_finals to be non-empty"); // detect SCE-final slots @@ -203,9 +247,9 @@ impl VMThread { .expect("final slot overflow in VM"); // pop slot from remaining CSS finals - if let Some(block_id) = self.remaining_css_finals.remove(&slot) { + if let Some((block_id, block)) = self.remaining_css_finals.remove(&slot) { // CSS-final block found at slot: add block to to sce_finals - self.sce_finals.insert(slot, Some(block_id)); + self.sce_finals.insert(slot, Some((block_id, block))); self.last_sce_final = slot; // continue the loop continue; @@ -253,37 +297,39 @@ impl VMThread { } /// update active slot sequence - fn update_active_slots(&mut self, new_blockclique: Option>) { + fn update_active_slots(&mut self, new_blockclique: Option>) { // update blockclique if changed if let Some(blockclique) = new_blockclique { self.blockclique = blockclique; } - // get last active slot, if any - let current_active_slot = self.get_end_active_slot(); + // get end active slot, if any + let end_active_slot = self.get_end_active_slot(); // reset active slots self.active_slots = HashMap::new(); self.last_active_slot = self.last_sce_final; // if no active slot yet => keep the active_slots empty - let current_active_slot = match current_active_slot { + let end_active_slot = match end_active_slot { Some(s) => s, None => return, }; // recompute non-SCE-final slot sequence let mut slot = self.last_sce_final; - while slot < current_active_slot { + while slot < end_active_slot { slot = slot .get_next_slot(self.config.thread_count) .expect("active slot overflow in VM"); - if let Some(block_id) = self.remaining_css_finals.get(&slot) { + if let Some((block_id, block)) = self.remaining_css_finals.get(&slot) { // found in remaining_css_finals - self.active_slots.insert(slot, Some(*block_id)); - } else if let Some(block_id) = self.blockclique.get(&slot) { + self.active_slots + .insert(slot, Some((*block_id, block.clone()))); + } else if let Some((block_id, block)) = self.blockclique.get(&slot) { // found in blockclique - self.active_slots.insert(slot, Some(*block_id)); + self.active_slots + .insert(slot, Some((*block_id, block.clone()))); } else { // miss self.active_slots.insert(slot, None); @@ -292,6 +338,98 @@ impl VMThread { } } + /// applies an execution output to the final state + fn apply_final_execution_output(&mut self, exec_out: ExecutionOutput) { + // update cursors + self.final_cursor = exec_out.slot; + if self.active_cursor <= self.final_cursor { + self.final_cursor = self.final_cursor; + } + + // apply final ledger changes + { + let mut final_ledger = self + .final_ledger + .write() + .expect("could not lock final ledger for writing"); + final_ledger.settle_slot(exec_out.slot, exec_out.ledger_changes); + } + + // save generated events to final store + // TODO + } + + /// applies an execution output to the active state + fn apply_active_execution_output(&mut self, exec_out: ExecutionOutput) { + // update active cursor + self.active_cursor = exec_out.slot; + + // add execution output to history + self.execution_history.push_back(exec_out); + } + + /// returns the speculative ledger at a given history slot + fn get_speculative_ledger_at_slot(&self, slot: Slot) -> SpeculativeLedger { + // check that the slot is within the reach of history + if slot <= self.final_cursor { + panic!("cannot execute at a slot before finality"); + } + let max_slot = self + .active_cursor + .get_next_slot(self.config.thread_count) + .expect("slot overflow when getting speculative ledger"); + if slot > max_slot { + panic!("cannot execute at a slot beyond active cursor + 1"); + } + + // gather the history of changes + let mut previous_ledger_changes = LedgerChanges::default(); + for previous_output in &self.execution_history { + if previous_output.slot >= slot { + break; + } + previous_ledger_changes.apply(&previous_output.ledger_changes); + } + + // return speculative ledger + SpeculativeLedger::new(self.final_ledger.clone(), previous_ledger_changes) + } + + /// executes a full slot without causing any changes to the state, + /// and yields an execution output + fn execute_slot(&mut self, slot: Slot, opt_block: Option<(BlockId, Block)>) -> ExecutionOutput { + // get the speculative ledger + let ledger = self.get_speculative_ledger_at_slot(slot); + + // TODO init context + + // TODO intial executions + + // TODO async executions + + let mut out_block_id = None; + if let Some((block_id, block)) = opt_block { + out_block_id = Some(block_id); + + //TODO block stuff + } + + ExecutionOutput { + slot, + block_id: out_block_id, + ledger_changes: ledger.into_added_changes(), + } + } + + /// clear execution history + fn clear_history(&mut self) { + // clear history + self.execution_history.clear(); + + // reset active cursor + self.active_cursor = self.final_cursor; + } + /// executes one final slot, if any /// returns true if something was executed fn execute_one_final_slot(&mut self) -> bool { @@ -300,26 +438,25 @@ impl VMThread { return false; } - // get the next one + // get the slot just after the last executed final slot let slot = self .final_cursor .get_next_slot(self.config.thread_count) .expect("final slot overflow in VM"); - let block_id = *self + + // take element from sce finals + let exec_target = self .sce_finals - .get(&slot) + .remove(&slot) .expect("the SCE final slot list skipped a slot"); - // update final cursor - self.final_cursor = slot; - // check if the final slot is cached at the front of the speculative execution history if let Some(exec_out) = self.execution_history.pop_front() { - if exec_out.slot == slot && exec_out.block_id == block_id { - // speculative execution front result matches what we wnat to compute - - // TODO apply exec_out to final state - + if exec_out.slot == slot + && exec_out.block_id == exec_target.as_ref().map(|(b_id, _)| *b_id) + { + // speculative execution front result matches what we want to compute + self.apply_final_execution_output(exec_out); return true; } } @@ -327,22 +464,28 @@ impl VMThread { // speculative cache mismatch // clear the speculative execution output cache completely - self.execution_history.clear(); - self.active_cursor = self.final_cursor; + self.clear_history(); + + // execute slot + let exec_out = self.execute_slot(slot, exec_target); - // TODO execute - // TODO apply exec_out to final state + // apply execution output to final state + self.apply_final_execution_output(exec_out); return true; } - /// truncates active slots at the fitst mismatch + /// truncates active slots at the first mismatch /// between the active execution output history and the planned active_slots fn truncate_history(&mut self) { // find mismatch point (included) let mut truncate_at = None; for (hist_index, exec_output) in self.execution_history.iter().enumerate() { - if self.active_slots.get(&exec_output.slot) == Some(&exec_output.block_id) { + let found_block_id = self + .active_slots + .get(&exec_output.slot) + .map(|opt_b| opt_b.as_ref().map(|(b_id, b)| *b_id)); + if found_block_id == Some(exec_output.block_id) { continue; } truncate_at = Some(hist_index); @@ -367,17 +510,17 @@ impl VMThread { .active_cursor .get_next_slot(self.config.thread_count) .expect("active slot overflow in VM"); - let to_execute = match self.active_slots.get(&slot) { - Some(b) => b, + + let exec_target = match self.active_slots.get(&slot) { + Some(b) => b.clone(), //TODO get rid of that clone None => return false, }; - // update active cursor - self.active_cursor = slot; - - // TODO execute + // execute the slot + let exec_out = self.execute_slot(slot, exec_target); - // TODO push_back result into history + // apply execution output to active state + self.apply_active_execution_output(exec_out); return true; } @@ -400,6 +543,11 @@ impl VMThread { next_timestmap.saturating_sub(now) } + /// executed a readonly request + fn execute_readonly_request(&mut self, req: ReadOnlyExecutionRequest) { + // TODO + } + /// main VM loop fn main_loop(&mut self) { loop { @@ -446,16 +594,22 @@ impl VMThread { continue; } - // TODO execute readonly requests + // execute all queued readonly requests // must be done in this loop because of the static shared context + for req in input_data.readonly_requests { + self.execute_readonly_request(req); + } - // check if data changed during the iteration - let ctl = self.controller; - let input_data = ctl.input_data.lock().expect("could not lock VM input data"); + // check if new data or requests arrived during the iteration + let input_data = self + .controller + .input_data + .lock() + .expect("could not lock VM input data"); if input_data.stop { break; } - if input_data.blockclique_changed { + if input_data.blockclique_changed || !input_data.readonly_requests.is_empty() { continue; } @@ -467,7 +621,8 @@ impl VMThread { } // wait for change or for next slot - let _ = ctl + let _ = self + .controller .loop_cv .wait_timeout(input_data, delay_until_next_slot.to_duration()) .expect("VM main loop condition variable wait failed"); diff --git a/massa-ledger/src/ledger.rs b/massa-ledger/src/ledger.rs index 382b46d2324..8a6fe0539f8 100644 --- a/massa-ledger/src/ledger.rs +++ b/massa-ledger/src/ledger.rs @@ -3,51 +3,51 @@ use crate::LedgerConfig; use massa_hash::hash::Hash; use massa_models::{prehash::Map, Address, Amount, Slot}; -use std::collections::{hash_map, BTreeMap, HashMap, VecDeque}; +use std::collections::{hash_map, BTreeMap, VecDeque}; /// represents a structure that supports another one being applied to it pub trait Applicable { - fn apply(&mut self, _: V); + fn apply(&mut self, _: &V); + fn merge(&mut self, _: V); } /// structure defining a ledger entry #[derive(Default, Debug, Clone)] pub struct LedgerEntry { - pub roll_count: u64, - pub sequential_balance: Amount, pub parallel_balance: Amount, pub bytecode: Vec, pub datastore: BTreeMap>, - pub files: BTreeMap>, } /// LedgerEntryUpdate can be applied to a LedgerEntry impl Applicable for LedgerEntry { /// applies a LedgerEntryUpdate - fn apply(&mut self, update: LedgerEntryUpdate) { - update.roll_count.apply_to(&mut self.roll_count); - update - .sequential_balance - .apply_to(&mut self.sequential_balance); + fn apply(&mut self, update: &LedgerEntryUpdate) { update.parallel_balance.apply_to(&mut self.parallel_balance); update.bytecode.apply_to(&mut self.bytecode); - for (key, value_update) in update.datastore { + for (key, value_update) in &update.datastore { match value_update { SetOrDelete::Set(v) => { - self.datastore.insert(key, v); + self.datastore.insert(*key, v.clone()); } SetOrDelete::Delete => { - self.datastore.remove(&key); + self.datastore.remove(key); } } } - for (key, value_update) in update.files { + } + + /// merges a LedgerEntryUpdate + fn merge(&mut self, update: LedgerEntryUpdate) { + update.parallel_balance.merge_to(&mut self.parallel_balance); + update.bytecode.merge_to(&mut self.bytecode); + for (key, value_update) in update.datastore { match value_update { SetOrDelete::Set(v) => { - self.files.insert(key, v); + self.datastore.insert(key, v); } SetOrDelete::Delete => { - self.files.remove(&key); + self.datastore.remove(&key); } } } @@ -56,7 +56,7 @@ impl Applicable for LedgerEntry { /// represents a set/update/delete change #[derive(Debug, Clone)] -pub enum SetUpdateOrDelete, V: Applicable> { +pub enum SetUpdateOrDelete, V: Applicable + Clone> { /// sets a new absolute value T Set(T), /// applies an update V to an existing value @@ -68,11 +68,14 @@ pub enum SetUpdateOrDelete, V: Applicable> { /// supports applying another SetUpdateOrDelete to self impl, V: Applicable> Applicable> for SetUpdateOrDelete +where + V: Clone, + T: Clone, { - fn apply(&mut self, other: SetUpdateOrDelete) { + fn apply(&mut self, other: &SetUpdateOrDelete) { match other { // the other SetUpdateOrDelete sets a new absolute value => force it on self - v @ SetUpdateOrDelete::Set(_) => *self = v, + v @ SetUpdateOrDelete::Set(_) => *self = v.clone(), // the other SetUpdateOrDelete updates the value SetUpdateOrDelete::Update(u) => match self { @@ -91,6 +94,33 @@ impl, V: Applicable> Applicable force self to delete it as well + v @ SetUpdateOrDelete::Delete => *self = v.clone(), + } + } + + fn merge(&mut self, other: SetUpdateOrDelete) { + match other { + // the other SetUpdateOrDelete sets a new absolute value => force it on self + v @ SetUpdateOrDelete::Set(_) => *self = v, + + // the other SetUpdateOrDelete updates the value + SetUpdateOrDelete::Update(u) => match self { + // if self currently sets an absolute value, merge other to that value + SetUpdateOrDelete::Set(cur) => cur.merge(u), + + // if self currently updates a value, merge the updates of the other to that update + SetUpdateOrDelete::Update(cur) => cur.merge(u), + + // if self currently deletes a value, + // create a new default value, merge other's updates to it and make self set it as an absolute new value + SetUpdateOrDelete::Delete => { + let mut res = T::default(); + res.merge(u); + *self = SetUpdateOrDelete::Set(res); + } + }, + // the other SetUpdateOrDelete deletes a value => force self to delete it as well v @ SetUpdateOrDelete::Delete => *self = v, } @@ -108,7 +138,11 @@ pub enum SetOrDelete { /// allows applying another SetOrDelete to the current one impl Applicable> for SetOrDelete { - fn apply(&mut self, other: Self) { + fn apply(&mut self, other: &Self) { + *self = other.clone(); + } + + fn merge(&mut self, other: Self) { *self = other; } } @@ -124,7 +158,14 @@ pub enum SetOrKeep { /// allows applying another SetOrKeep to the current one impl Applicable> for SetOrKeep { - fn apply(&mut self, other: SetOrKeep) { + fn apply(&mut self, other: &SetOrKeep) { + if let v @ SetOrKeep::Set(..) = other { + // update the current value only if the other SetOrKeep sets a new one + *self = v.clone(); + } + } + + fn merge(&mut self, other: SetOrKeep) { if let v @ SetOrKeep::Set(..) = other { // update the current value only if the other SetOrKeep sets a new one *self = v; @@ -134,7 +175,15 @@ impl Applicable> for SetOrKeep { impl SetOrKeep { /// applies the current SetOrKeep to a target mutable value - pub fn apply_to(self, val: &mut T) { + pub fn apply_to(&self, val: &mut T) { + if let SetOrKeep::Set(v) = &self { + // only change the value if self is setting a new one + *val = v.clone(); + } + } + + /// merges the current SetOrKeep into a target mutable value + pub fn merge_to(self, val: &mut T) { if let SetOrKeep::Set(v) = self { // only change the value if self is setting a new one *val = v; @@ -152,22 +201,26 @@ impl Default for SetOrKeep { #[derive(Default, Debug, Clone)] pub struct LedgerEntryUpdate { roll_count: SetOrKeep, - sequential_balance: SetOrKeep, parallel_balance: SetOrKeep, bytecode: SetOrKeep>, datastore: Map>>, - files: HashMap>>, } impl Applicable for LedgerEntryUpdate { /// extends the LedgerEntryUpdate with another one - fn apply(&mut self, update: LedgerEntryUpdate) { - self.roll_count.apply(update.roll_count); - self.sequential_balance.apply(update.sequential_balance); - self.parallel_balance.apply(update.parallel_balance); - self.bytecode.apply(update.bytecode); + fn apply(&mut self, update: &LedgerEntryUpdate) { + self.roll_count.apply(&update.roll_count); + self.parallel_balance.apply(&update.parallel_balance); + self.bytecode.apply(&update.bytecode); + self.datastore.extend(update.datastore.clone()); + } + + /// extends the LedgerEntryUpdate with another one + fn merge(&mut self, update: LedgerEntryUpdate) { + self.roll_count.merge(update.roll_count); + self.parallel_balance.merge(update.parallel_balance); + self.bytecode.merge(update.bytecode); self.datastore.extend(update.datastore); - self.files.extend(update.files); } } @@ -177,13 +230,29 @@ pub struct LedgerChanges(pub Map for LedgerChanges { /// extends the current LedgerChanges with another one - fn apply(&mut self, changes: LedgerChanges) { - for (addr, change) in changes.0 { - match self.0.entry(addr) { + fn apply(&mut self, changes: &LedgerChanges) { + for (addr, change) in &changes.0 { + match self.0.entry(*addr) { hash_map::Entry::Occupied(mut occ) => { // apply incoming change if a change on this entry already exists occ.get_mut().apply(change); } + hash_map::Entry::Vacant(vac) => { + // otherwise insert the incoming change + vac.insert(change.clone()); + } + } + } + } + + /// extends the current LedgerChanges with another one + fn merge(&mut self, changes: LedgerChanges) { + for (addr, change) in changes.0 { + match self.0.entry(addr) { + hash_map::Entry::Occupied(mut occ) => { + // merge incoming change if a change on this entry already exists + occ.get_mut().merge(change); + } hash_map::Entry::Vacant(vac) => { // otherwise insert the incoming change vac.insert(change); @@ -194,7 +263,7 @@ impl Applicable for LedgerChanges { } impl LedgerChanges { - /// tries to return the sequential balance or gets it from a function + /// tries to return the parallel balance or gets it from a function /// /// # Returns /// * Some(v) if a value is present @@ -203,16 +272,16 @@ impl LedgerChanges { /// /// this is used as an optimization: /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further - pub fn get_sequential_balance_or_else Option>( + pub fn get_parallel_balance_or_else Option>( &self, addr: &Address, f: F, ) -> Option { match self.0.get(addr) { - Some(SetUpdateOrDelete::Set(v)) => Some(v.sequential_balance), + Some(SetUpdateOrDelete::Set(v)) => Some(v.parallel_balance), Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { - sequential_balance, .. - })) => match sequential_balance { + parallel_balance, .. + })) => match parallel_balance { SetOrKeep::Set(v) => Some(*v), SetOrKeep::Keep => f(), }, @@ -238,7 +307,32 @@ pub struct FinalLedger { impl Applicable for FinalLedger { /// applies LedgerChanges to the final ledger - fn apply(&mut self, changes: LedgerChanges) { + fn apply(&mut self, changes: &LedgerChanges) { + // for all incoming changes + for (addr, change) in &changes.0 { + match &change { + SetUpdateOrDelete::Set(new_entry) => { + // inserts/overwrites the entry with an incoming absolute value + self.sorted_ledger.insert(*addr, new_entry.clone()); + } + SetUpdateOrDelete::Update(entry_update) => { + // applies updates to an entry + // if the entry does not exist, inserts a default one and applies the updates to it + self.sorted_ledger + .entry(*addr) + .or_insert_with(|| Default::default()) + .apply(entry_update); + } + SetUpdateOrDelete::Delete => { + // deletes an entry, if it exists + self.sorted_ledger.remove(&addr); + } + } + } + } + + /// merges LedgerChanges to the final ledger + fn merge(&mut self, changes: LedgerChanges) { // for all incoming changes for (addr, change) in changes.0 { match change { @@ -252,7 +346,7 @@ impl Applicable for FinalLedger { self.sorted_ledger .entry(addr) .or_insert_with(|| Default::default()) - .apply(entry_update); + .merge(entry_update); } SetUpdateOrDelete::Delete => { // deletes an entry, if it exists @@ -266,21 +360,21 @@ impl Applicable for FinalLedger { impl FinalLedger { /// settles a slot and saves the corresponding ledger changes to history pub fn settle_slot(&mut self, slot: Slot, changes: LedgerChanges) { + // apply changes + self.apply(&changes); + // update the slot self.slot = slot; // update and prune changes history - self.changes_history.push_back((slot, changes.clone())); + self.changes_history.push_back((slot, changes)); while self.changes_history.len() > self.config.final_history_length { self.changes_history.pop_front(); } - - // apply changes - self.apply(changes); } - /// gets the sequential balance of an entry - pub fn get_sequential_balance(&self, addr: &Address) -> Option { - self.sorted_ledger.get(addr).map(|v| v.sequential_balance) + /// gets the parallel balance of an entry + pub fn get_parallel_balance(&self, addr: &Address) -> Option { + self.sorted_ledger.get(addr).map(|v| v.parallel_balance) } } diff --git a/massa-ledger/src/lib.rs b/massa-ledger/src/lib.rs index 284a7c2f86d..cba4bdfaa61 100644 --- a/massa-ledger/src/lib.rs +++ b/massa-ledger/src/lib.rs @@ -9,7 +9,7 @@ extern crate massa_logging; pub use config::LedgerConfig; pub use error::LedgerError; pub use ledger::{ - FinalLedger, LedgerChanges, LedgerEntry, LedgerEntryUpdate, SetOrDelete, SetOrKeep, + Applicable, FinalLedger, LedgerChanges, LedgerEntry, LedgerEntryUpdate, SetOrDelete, SetOrKeep, SetUpdateOrDelete, }; From 52ac8a88a19b92f2a0df05c8637d20b00e7797c4 Mon Sep 17 00:00:00 2001 From: damip Date: Wed, 16 Feb 2022 02:48:28 +0100 Subject: [PATCH 08/73] time cursor vm progress --- massa-execution/src/config.rs | 26 +- massa-execution/src/controller.rs | 275 +++-------- massa-execution/src/event_store.rs | 270 +++++++++++ massa-execution/src/lib.rs | 3 +- massa-execution/src/types.rs | 454 ++---------------- .../src/{vm_new.rs => vm_thread.rs} | 182 ++----- 6 files changed, 420 insertions(+), 790 deletions(-) create mode 100644 massa-execution/src/event_store.rs rename massa-execution/src/{vm_new.rs => vm_thread.rs} (78%) diff --git a/massa-execution/src/config.rs b/massa-execution/src/config.rs index 9a7722596b5..e4c366be7de 100644 --- a/massa-execution/src/config.rs +++ b/massa-execution/src/config.rs @@ -1,30 +1,20 @@ use massa_time::MassaTime; use std::path::PathBuf; -use serde::{Deserialize, Serialize}; - -/// Max size for channels used with communication with other components. -pub const CHANNEL_SIZE: usize = 256; - -/// Execution setting parsed with .toml in `massa-node/src/settings.rs` -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct ExecutionSettings { +/// VM module configuration +pub struct VMConfig { /// Initial SCE ledger file pub initial_sce_ledger_path: PathBuf, /// maximum number of SC output events kept in cache pub max_final_events: usize, -} - -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct ExecutionConfigs { - /// Execution settings - pub settings: ExecutionSettings, - /// Thread count + /// number of threads pub thread_count: u8, - /// Genesis timestmap + /// extra lag to add on the cursor to improve performance + pub cursor_delay: MassaTime, + /// time compensation in milliseconds + pub clock_compensation: i64, + /// genesis timestamp pub genesis_timestamp: MassaTime, /// period duration pub t0: MassaTime, - /// clock compensation in milliseconds - pub clock_compensation: i64, } diff --git a/massa-execution/src/controller.rs b/massa-execution/src/controller.rs index a1823b21b74..fd744022430 100644 --- a/massa-execution/src/controller.rs +++ b/massa-execution/src/controller.rs @@ -1,214 +1,97 @@ -use crate::config::{ExecutionConfigs, CHANNEL_SIZE}; -use crate::error::ExecutionError; -use crate::worker::{ - ExecutionCommand, ExecutionEvent, ExecutionManagementCommand, ExecutionWorker, -}; -use crate::BootstrapExecutionState; -use massa_models::api::SCELedgerInfo; -use massa_models::output_event::SCOutputEvent; -use massa_models::prehash::Map; -use massa_models::OperationId; -use massa_models::{execution::ExecuteReadOnlyResponse, Address, Amount, Block, BlockId, Slot}; -use std::collections::VecDeque; -use tokio::sync::{mpsc, oneshot}; -use tokio::task::JoinHandle; -use tracing::{error, info}; +use crate::{config::VMConfig, types::ReadOnlyExecutionRequest, vm_thread::VMThread}; +use massa_ledger::FinalLedger; +use massa_models::{Block, BlockId, Slot}; +use std::collections::{HashMap, VecDeque}; +use std::sync::{Arc, Condvar, Mutex, RwLock}; +use tracing::info; -/// A sender of execution commands. -#[derive(Clone)] -pub struct ExecutionCommandSender(pub mpsc::Sender); - -/// A receiver of execution events. -pub struct ExecutionEventReceiver(pub mpsc::UnboundedReceiver); - -impl ExecutionEventReceiver { - /// drains remaining events and returns them in a VecDeque - /// note: events are sorted from oldest to newest - pub async fn drain(mut self) -> VecDeque { - let mut remaining_events: VecDeque = VecDeque::new(); - - while let Some(evt) = self.0.recv().await { - remaining_events.push_back(evt); - } - remaining_events - } +/// structure used to communicate with the VM thread +#[derive(Default)] +pub struct VMInputData { + /// set stop to true to stop the thread + pub stop: bool, + /// signal whether the blockclique changed + pub blockclique_changed: bool, + /// list of newly finalized blocks + pub finalized_blocks: HashMap, + /// blockclique + pub blockclique: HashMap, + /// readonly execution requests + pub readonly_requests: VecDeque, } -/// A sender of execution management commands. -pub struct ExecutionManager { - join_handle: JoinHandle>, - manager_tx: mpsc::Sender, +/// VM controller +pub struct VMController { + /// condition variable to wake up the VM loop + pub loop_cv: Condvar, + /// input data to process in the VM loop + pub input_data: Mutex, } -impl ExecutionManager { - pub async fn stop(self) -> Result<(), ExecutionError> { - drop(self.manager_tx); - if let Err(err) = self.join_handle.await { - error!("execution worker crashed: {}", err); - return Err(ExecutionError::JoinError); - }; - - info!("execution worker finished cleanly"); - Ok(()) +impl VMController { + /// reads the list of newly finalized blocks and the new blockclique, if there was a change + /// if found, remove from input queue + pub fn consume_input(&mut self) -> VMInputData { + std::mem::take(&mut self.input_data.lock().expect("VM input data lock failed")) } } -/// Creates a new execution controller. -/// -/// # Arguments -/// * cfg: execution configuration -/// * thread_count: number of threads -/// * genesis_timestamp: genesis timestamp -/// * t0: period duration -/// * clock_compensation: clock compensation in milliseconds -/// * bootstrap_state: optional bootstrap state -/// -/// TODO: add a consensus command sender, -/// to be able to send the `TransferToConsensus` message. -pub async fn start_controller( - cfg: ExecutionConfigs, - bootstrap_state: Option, -) -> Result< - ( - ExecutionCommandSender, - ExecutionEventReceiver, - ExecutionManager, - ), - ExecutionError, -> { - let (command_tx, command_rx) = mpsc::channel::(CHANNEL_SIZE); - let (manager_tx, manager_rx) = mpsc::channel::(1); - - // Unbounded, as execution is limited per metering already. - let (event_tx, event_rx) = mpsc::unbounded_channel::(); - let worker = ExecutionWorker::new(cfg, event_tx, command_rx, manager_rx, bootstrap_state)?; - let join_handle = tokio::spawn(async move { - match worker.run_loop().await { - Err(err) => Err(err), - Ok(v) => Ok(v), - } - }); - Ok(( - ExecutionCommandSender(command_tx), - ExecutionEventReceiver(event_rx), - ExecutionManager { - join_handle, - manager_tx, - }, - )) +/// VM manager +pub struct VMManager { + /// shared reference to the VM controller + controller: Arc, + /// handle used to join the VM thread + thread_handle: std::thread::JoinHandle<()>, } -impl ExecutionCommandSender { - /// notify of a blockclique change - pub async fn update_blockclique( - &self, - finalized_blocks: Map, - blockclique: Map, - ) -> Result<(), ExecutionError> { - self.0 - .send(ExecutionCommand::BlockCliqueChanged { - blockclique, - finalized_blocks, - }) - .await - .map_err(|_err| { - ExecutionError::ChannelError( - "could not send BlockCliqueChanged command to execution".into(), - ) - })?; - Ok(()) +impl VMManager { + /// stops the VM + pub fn stop(self) { + info!("stopping VM controller..."); + // notify the VM thread to stop + { + let mut input_wlock = self + .controller + .input_data + .lock() + .expect("could not w-lock VM input data"); + input_wlock.stop = true; + self.controller.loop_cv.notify_one(); + } + // join the VM thread + self.thread_handle + .join() + .expect("VM controller thread panicked"); + info!("VM controller stopped"); } - pub async fn get_bootstrap_state(&self) -> Result { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .send(ExecutionCommand::GetBootstrapState(response_tx)) - .await - .map_err(|_| { - ExecutionError::ChannelError("could not send GetBootstrapState command".into()) - })?; - response_rx.await.map_err(|_| { - ExecutionError::ChannelError("could not send GetBootstrapState upstream".into()) - }) + /// get a shared reference to the VM controller + pub fn get_controller(&self) -> Arc { + self.controller.clone() } +} - /// Get events optionnally filtered by: - /// * start slot - /// * end slot - /// * emitter address - /// * original caller address - /// * operation id - pub async fn get_filtered_sc_output_event( - &self, - start: Option, - end: Option, - emitter_address: Option
, - original_caller_address: Option
, - original_operation_id: Option, - ) -> Result, ExecutionError> { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .send(ExecutionCommand::GetSCOutputEvents { - start, - end, - emitter_address, - original_caller_address, - original_operation_id, - response_tx, - }) - .await - .map_err(|_| { - ExecutionError::ChannelError("could not send GetSCOutputEvents command".into()) - })?; - response_rx.await.map_err(|_| { - ExecutionError::ChannelError("could not send GetSCOutputEvents upstream".into()) - }) - } +/// launches the VM and returns a VMManager +/// +/// # parameters +/// * config: VM configuration +/// * bootstrap: +pub fn start_vm(config: VMConfig, final_ledger: Arc>) -> VMManager { + let controller = Arc::new(VMController { + loop_cv: Condvar::new(), + input_data: Mutex::new(VMInputData { + blockclique_changed: true, + ..Default::default() + }), + }); - /// Execute code in read-only mode. - pub async fn execute_read_only_request( - &self, - max_gas: u64, - simulated_gas_price: Amount, - bytecode: Vec, - address: Option
, - ) -> Result { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .send(ExecutionCommand::ExecuteReadOnlyRequest { - max_gas, - simulated_gas_price, - bytecode, - result_sender: response_tx, - address, - }) - .await - .map_err(|_| { - ExecutionError::ChannelError("could not send ExecuteReadOnlyRequest command".into()) - })?; - response_rx.await.map_err(|_| { - ExecutionError::ChannelError("could not send ExecuteReadOnlyResponse upstream".into()) - }) - } + let ctl = controller.clone(); + let thread_handle = std::thread::spawn(move || { + VMThread::new(config, ctl, final_ledger).main_loop(); + }); - pub async fn get_sce_ledger_for_addresses( - self, - addresses: Vec
, - ) -> Result, ExecutionError> { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .send(ExecutionCommand::GetSCELedgerForAddresses { - response_tx, - addresses, - }) - .await - .map_err(|_| { - ExecutionError::ChannelError( - "could not send GetSCELedgerForAddresses command".into(), - ) - })?; - response_rx.await.map_err(|_| { - ExecutionError::ChannelError("could not send GetSCELedgerForAddresses upstream".into()) - }) + VMManager { + controller, + thread_handle, } } diff --git a/massa-execution/src/event_store.rs b/massa-execution/src/event_store.rs new file mode 100644 index 00000000000..6d36dfbcb43 --- /dev/null +++ b/massa-execution/src/event_store.rs @@ -0,0 +1,270 @@ +use massa_models::output_event::{SCOutputEvent, SCOutputEventId}; +use massa_models::prehash::{Map, PreHashed, Set}; +/// Define types used while executing block bytecodes +use massa_models::{Address, OperationId, Slot}; +use std::cmp; +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use tracing::warn; + +#[inline] +/// Remove a given event_id from a `Set` +/// The Set is stored into a map `ctnr` at a `key` address. If +/// the Set resulted from the operation is empty, remove the entry +/// from the `ctnr` +/// +/// Used in `prune()` +fn remove_from_map( + ctnr: &mut Map>, + key: &T, + evt_id: &SCOutputEventId, +) { + match ctnr.get_mut(key) { + Some(ele) => { + ele.remove(evt_id); + if ele.is_empty() { + ctnr.remove(key); + } + } + _ => { + ctnr.remove(key); + } + } +} + +#[inline] +/// Remove a given event_id from a `Set` +/// The Set is stored into a Hashmap `ctnr` at a `key` address. If +/// the Set resulted from the operation is empty, remove the entry +/// from the `ctnr` +/// +/// Used in `prune()` +fn remove_from_hashmap( + ctnr: &mut HashMap>, + key: &T, + evt_id: &SCOutputEventId, +) { + match ctnr.get_mut(key) { + Some(ele) => { + ele.remove(evt_id); + if ele.is_empty() { + ctnr.remove(key); + } + } + _ => { + ctnr.remove(key); + } + } +} + +/// Keep all events you need with some useful indexes +#[derive(Default, Debug, Clone)] +pub(crate) struct EventStore { + /// maps ids to events + id_to_event: Map, + + /// maps slot to a set of event ids + slot_to_id: HashMap>, + + /// maps initial caller to a set of event ids + caller_to_id: Map>, + + /// maps direct event producer to a set of event ids + smart_contract_to_id: Map>, + + /// maps operation id to a set of event ids + operation_id_to_event_id: Map>, +} + +impl EventStore { + /// add event to the store and all its indexes + pub fn insert(&mut self, id: SCOutputEventId, event: SCOutputEvent) { + if let Entry::Vacant(entry) = self.id_to_event.entry(id) { + self.slot_to_id + .entry(event.context.slot) + .or_insert_with(Set::::default) + .insert(id); + if let Some(&caller) = event.context.call_stack.front() { + self.caller_to_id + .entry(caller) + .or_insert_with(Set::::default) + .insert(id); + } + if let Some(&sc) = event.context.call_stack.back() { + self.smart_contract_to_id + .entry(sc) + .or_insert_with(Set::::default) + .insert(id); + } + if let Some(op) = event.context.origin_operation_id { + self.operation_id_to_event_id + .entry(op) + .or_insert_with(Set::::default) + .insert(id); + } + entry.insert(event); + } else { + // just warn or return error ? + warn!("execution event already exist {:?}", id) + } + } + + /// get just the map if ids to events + pub fn export(&self) -> Map { + self.id_to_event.clone() + } + + /// Clears the map, removing all key-value pairs. Keeps the allocated memory for reuse. + pub fn clear(&mut self) { + self.id_to_event.clear(); + self.slot_to_id.clear(); + self.caller_to_id.clear(); + self.smart_contract_to_id.clear(); + self.operation_id_to_event_id.clear(); + } + + /// Prune the exess of events from the event store, + /// While there is a slot found, pop slots and get the `event_ids` + /// inside, remove the event from divers containers. + /// + /// Return directly if the event_size <= max_final_events + pub fn prune(&mut self, max_final_events: usize) { + let mut events_size = self.id_to_event.len(); + if events_size <= max_final_events { + return; + } + let mut slots = self.slot_to_id.keys().copied().collect::>(); + slots.sort_unstable_by_key(|s| cmp::Reverse(*s)); + loop { + let slot = match slots.pop() { + Some(slot) => slot, + _ => return, + }; + let event_ids = match self.slot_to_id.get(&slot) { + Some(event_ids) => event_ids.clone(), + _ => continue, + }; + for event_id in event_ids.iter() { + let event = match self.id_to_event.remove(event_id) { + Some(event) => event, + _ => continue, /* This shouldn't happen */ + }; + remove_from_hashmap(&mut self.slot_to_id, &event.context.slot, event_id); + if let Some(caller) = event.context.call_stack.front() { + remove_from_map(&mut self.caller_to_id, caller, event_id); + } + if let Some(sc) = event.context.call_stack.back() { + remove_from_map(&mut self.smart_contract_to_id, sc, event_id); + } + if let Some(op) = event.context.origin_operation_id { + remove_from_map(&mut self.operation_id_to_event_id, &op, event_id); + } + events_size -= 1; + if events_size <= max_final_events { + return; + } + } + } + } + + /// Extend an event store with another one + pub fn extend(&mut self, other: EventStore) { + self.id_to_event.extend(other.id_to_event); + + other + .slot_to_id + .iter() + .for_each(|(slot, ids)| match self.slot_to_id.get_mut(slot) { + Some(set) => set.extend(ids), + None => { + self.slot_to_id.insert(*slot, ids.clone()); + } + }); + + other.caller_to_id.iter().for_each(|(caller, ids)| { + match self.caller_to_id.get_mut(caller) { + Some(set) => set.extend(ids), + None => { + self.caller_to_id.insert(*caller, ids.clone()); + } + } + }); + + other.smart_contract_to_id.iter().for_each(|(sc, ids)| { + match self.smart_contract_to_id.get_mut(sc) { + Some(set) => set.extend(ids), + None => { + self.smart_contract_to_id.insert(*sc, ids.clone()); + } + } + }); + + other.operation_id_to_event_id.iter().for_each(|(op, ids)| { + match self.operation_id_to_event_id.get_mut(op) { + Some(set) => set.extend(ids), + None => { + self.operation_id_to_event_id.insert(*op, ids.clone()); + } + } + }) + } + + /// get vec of event for given slot range (start included, end excluded) + /// Get events optionnally filtered by: + /// * start slot + /// * end slot + /// * emitter address + /// * original caller address + /// * operation id + pub fn get_filtered_sc_output_event( + &self, + start: Slot, + end: Slot, + emitter_address: Option
, + original_caller_address: Option
, + original_operation_id: Option, + ) -> Vec { + let empty = Set::::default(); + self.slot_to_id + .iter() + // filter on slots + .filter_map(|(slot, ids)| { + if slot >= &start && slot < &end { + Some(ids) + } else { + None + } + }) + .flatten() + // filter on original caller + .chain(if let Some(addr) = original_caller_address { + match self.caller_to_id.get(&addr) { + Some(it) => it.iter(), + None => empty.iter(), + } + } else { + empty.iter() + }) + // filter on emitter + .chain(if let Some(addr) = emitter_address { + match self.smart_contract_to_id.get(&addr) { + Some(it) => it.iter(), + None => empty.iter(), + } + } else { + empty.iter() + }) + // filter on operation id + .chain(if let Some(op) = original_operation_id { + match self.operation_id_to_event_id.get(&op) { + Some(it) => it.iter(), + None => empty.iter(), + } + } else { + empty.iter() + }) + .filter_map(|id| self.id_to_event.get(id)) + .cloned() + .collect() + } +} diff --git a/massa-execution/src/lib.rs b/massa-execution/src/lib.rs index 9c0df23a91d..ff02542ad90 100644 --- a/massa-execution/src/lib.rs +++ b/massa-execution/src/lib.rs @@ -3,13 +3,14 @@ mod config; mod controller; mod error; +mod event_store; mod exports; mod interface_impl; mod sce_ledger; mod speculative_ledger; mod types; mod vm; -mod vm_new; +mod vm_thread; mod worker; pub use config::{ExecutionConfigs, ExecutionSettings}; diff --git a/massa-execution/src/types.rs b/massa-execution/src/types.rs index a8a2272ba27..c379b3005be 100644 --- a/massa-execution/src/types.rs +++ b/massa-execution/src/types.rs @@ -1,21 +1,10 @@ -use crate::sce_ledger::{FinalLedger, SCELedger, SCELedgerChanges, SCELedgerStep}; -use crate::BootstrapExecutionState; -use massa_models::api::SCELedgerInfo; -use massa_models::execution::ExecuteReadOnlyResponse; -use massa_models::output_event::{SCOutputEvent, SCOutputEventId}; -use massa_models::prehash::{Map, PreHashed, Set}; -/// Define types used while executing block bytecodes -use massa_models::{Address, Amount, Block, BlockId, OperationId, Slot}; -use rand::SeedableRng; +use crate::sce_ledger::SCELedgerChanges; +use crate::speculative_ledger::SpeculativeLedger; +use crate::{event_store::EventStore, ExecutionError}; +use massa_ledger::LedgerChanges; +use massa_models::{Address, Amount, BlockId, OperationId, Slot}; use rand_xoshiro::Xoshiro256PlusPlus; -use std::cmp; -use std::collections::hash_map::Entry; -use std::collections::HashMap; -use std::hash::Hash; -use std::sync::{Condvar, Mutex}; -use std::{collections::VecDeque, sync::Arc}; -use tokio::sync::oneshot; -use tracing::warn; +use std::collections::VecDeque; /// history of active executed steps pub(crate) type StepHistory = VecDeque; @@ -36,220 +25,8 @@ pub(crate) struct StepHistoryItem { pub events: EventStore, } -/// Keep all events you need with some useful indexes -#[derive(Default, Debug, Clone)] -pub(crate) struct EventStore { - /// maps ids to events - id_to_event: Map, - - /// maps slot to a set of event ids - slot_to_id: HashMap>, - - /// maps initial caller to a set of event ids - caller_to_id: Map>, - - /// maps direct event producer to a set of event ids - smart_contract_to_id: Map>, - - /// maps operation id to a set of event ids - operation_id_to_event_id: Map>, -} - -impl EventStore { - /// add event to the store and all its indexes - pub fn insert(&mut self, id: SCOutputEventId, event: SCOutputEvent) { - if let Entry::Vacant(entry) = self.id_to_event.entry(id) { - self.slot_to_id - .entry(event.context.slot) - .or_insert_with(Set::::default) - .insert(id); - if let Some(&caller) = event.context.call_stack.front() { - self.caller_to_id - .entry(caller) - .or_insert_with(Set::::default) - .insert(id); - } - if let Some(&sc) = event.context.call_stack.back() { - self.smart_contract_to_id - .entry(sc) - .or_insert_with(Set::::default) - .insert(id); - } - if let Some(op) = event.context.origin_operation_id { - self.operation_id_to_event_id - .entry(op) - .or_insert_with(Set::::default) - .insert(id); - } - entry.insert(event); - } else { - // just warn or return error ? - warn!("execution event already exist {:?}", id) - } - } - - /// get just the map if ids to events - pub fn export(&self) -> Map { - self.id_to_event.clone() - } - - /// Clears the map, removing all key-value pairs. Keeps the allocated memory for reuse. - pub fn clear(&mut self) { - self.id_to_event.clear(); - self.slot_to_id.clear(); - self.caller_to_id.clear(); - self.smart_contract_to_id.clear(); - self.operation_id_to_event_id.clear(); - } - - /// Prune the exess of events from the event store, - /// While there is a slot found, pop slots and get the `event_ids` - /// inside, remove the event from divers containers. - /// - /// Return directly if the event_size <= max_final_events - pub fn prune(&mut self, max_final_events: usize) { - let mut events_size = self.id_to_event.len(); - if events_size <= max_final_events { - return; - } - let mut slots = self.slot_to_id.keys().copied().collect::>(); - slots.sort_unstable_by_key(|s| cmp::Reverse(*s)); - loop { - let slot = match slots.pop() { - Some(slot) => slot, - _ => return, - }; - let event_ids = match self.slot_to_id.get(&slot) { - Some(event_ids) => event_ids.clone(), - _ => continue, - }; - for event_id in event_ids.iter() { - let event = match self.id_to_event.remove(event_id) { - Some(event) => event, - _ => continue, /* This shouldn't happen */ - }; - remove_from_hashmap(&mut self.slot_to_id, &event.context.slot, event_id); - if let Some(caller) = event.context.call_stack.front() { - remove_from_map(&mut self.caller_to_id, caller, event_id); - } - if let Some(sc) = event.context.call_stack.back() { - remove_from_map(&mut self.smart_contract_to_id, sc, event_id); - } - if let Some(op) = event.context.origin_operation_id { - remove_from_map(&mut self.operation_id_to_event_id, &op, event_id); - } - events_size -= 1; - if events_size <= max_final_events { - return; - } - } - } - } - - /// Extend an event store with another one - pub fn extend(&mut self, other: EventStore) { - self.id_to_event.extend(other.id_to_event); - - other - .slot_to_id - .iter() - .for_each(|(slot, ids)| match self.slot_to_id.get_mut(slot) { - Some(set) => set.extend(ids), - None => { - self.slot_to_id.insert(*slot, ids.clone()); - } - }); - - other.caller_to_id.iter().for_each(|(caller, ids)| { - match self.caller_to_id.get_mut(caller) { - Some(set) => set.extend(ids), - None => { - self.caller_to_id.insert(*caller, ids.clone()); - } - } - }); - - other.smart_contract_to_id.iter().for_each(|(sc, ids)| { - match self.smart_contract_to_id.get_mut(sc) { - Some(set) => set.extend(ids), - None => { - self.smart_contract_to_id.insert(*sc, ids.clone()); - } - } - }); - - other.operation_id_to_event_id.iter().for_each(|(op, ids)| { - match self.operation_id_to_event_id.get_mut(op) { - Some(set) => set.extend(ids), - None => { - self.operation_id_to_event_id.insert(*op, ids.clone()); - } - } - }) - } - - /// get vec of event for given slot range (start included, end excluded) - /// Get events optionnally filtered by: - /// * start slot - /// * end slot - /// * emitter address - /// * original caller address - /// * operation id - pub fn get_filtered_sc_output_event( - &self, - start: Slot, - end: Slot, - emitter_address: Option
, - original_caller_address: Option
, - original_operation_id: Option, - ) -> Vec { - let empty = Set::::default(); - self.slot_to_id - .iter() - // filter on slots - .filter_map(|(slot, ids)| { - if slot >= &start && slot < &end { - Some(ids) - } else { - None - } - }) - .flatten() - // filter on original caller - .chain(if let Some(addr) = original_caller_address { - match self.caller_to_id.get(&addr) { - Some(it) => it.iter(), - None => empty.iter(), - } - } else { - empty.iter() - }) - // filter on emitter - .chain(if let Some(addr) = emitter_address { - match self.smart_contract_to_id.get(&addr) { - Some(it) => it.iter(), - None => empty.iter(), - } - } else { - empty.iter() - }) - // filter on operation id - .chain(if let Some(op) = original_operation_id { - match self.operation_id_to_event_id.get(&op) { - Some(it) => it.iter(), - None => empty.iter(), - } - } else { - empty.iter() - }) - .filter_map(|id| self.id_to_event.get(id)) - .cloned() - .collect() - } -} - #[derive(Clone)] -pub struct StackElement { +pub struct ExecutionStackElement { /// called address pub address: Address, /// coins transferred to the target address during a call, @@ -258,11 +35,9 @@ pub struct StackElement { pub owned_addresses: Vec
, } -#[derive(Clone)] -/// Stateful context, providing a context during the execution of a module pub(crate) struct ExecutionContext { - /// final and active ledger at the current step - pub ledger_step: SCELedgerStep, + // speculative ledger + speculative_ledger: SpeculativeLedger, /// max gas for this execution pub max_gas: u64, @@ -286,7 +61,7 @@ pub(crate) struct ExecutionContext { pub opt_block_creator_addr: Option
, /// address call stack, most recent is at the back - pub stack: Vec, + pub stack: Vec, /// True if it's a read-only context pub read_only: bool, @@ -301,194 +76,29 @@ pub(crate) struct ExecutionContext { pub origin_operation_id: Option, } -/// an active execution step target slot and block -#[derive(Clone)] -pub(crate) struct ExecutionStep { - /// slot at which the execution step will happen +pub struct ExecutionOutput { + // slot pub slot: Slot, - - /// Some(BlockID, block), if a block is present at this slot, otherwise None - pub block: Option<(BlockId, Block)>, -} - -impl ExecutionContext { - pub fn new(ledger: SCELedger, ledger_at_slot: Slot) -> Self { - let final_ledger_slot = FinalLedger { - ledger, - slot: ledger_at_slot, - }; - ExecutionContext { - ledger_step: SCELedgerStep { - final_ledger_slot, - cumulative_history_changes: Default::default(), - caused_changes: Default::default(), - }, - max_gas: Default::default(), - stack: Default::default(), - gas_price: Default::default(), - slot: Slot::new(0, 0), - opt_block_id: Default::default(), - opt_block_creator_addr: Default::default(), - created_addr_index: Default::default(), - read_only: Default::default(), - created_event_index: Default::default(), - events: Default::default(), - unsafe_rng: Xoshiro256PlusPlus::from_seed([0u8; 32]), - origin_operation_id: Default::default(), - } - } -} - -impl From for SCELedgerChanges { - fn from(step: StepHistory) -> Self { - let mut ret = SCELedgerChanges::default(); - step.iter() - .for_each(|StepHistoryItem { ledger_changes, .. }| { - ret.apply_changes(ledger_changes); - }); - ret - } -} - -// Thread vm types: - -/// execution request -pub(crate) enum ExecutionRequest { - /// Runs a final step - RunFinalStep(ExecutionStep), - /// Runs an active step - #[allow(dead_code)] // TODO DISABLED TEMPORARILY #2101 - RunActiveStep(ExecutionStep), - /// Resets the VM to its final state - /// Run code in read-only mode - RunReadOnly { - /// The slot at which the execution will occur. - slot: Slot, - /// Maximum gas spend in execution. - max_gas: u64, - /// The simulated price of gas for the read-only execution. - simulated_gas_price: Amount, - /// The code to execute. - bytecode: Vec, - /// The channel used to send the result of execution. - result_sender: oneshot::Sender, - /// The address, or a default random one if none is provided, - /// which will simulate the sender of the operation. - address: Option
, - }, - /// Reset to latest final state - ResetToFinalState, - /// Get bootstrap state - GetBootstrapState { - response_tx: oneshot::Sender, - }, - /// Shutdown state, set by the worker to signal shutdown to the VM thread. - Shutdown, - /// Get events optionnally filtered by: - /// * start slot - /// * end slot - /// * emitter address - /// * original caller address - /// * operation id - GetSCOutputEvents { - start: Option, - end: Option, - emitter_address: Option
, - original_caller_address: Option
, - original_operation_id: Option, - response_tx: oneshot::Sender>, - }, - /// Get ledger entry for address - GetSCELedgerForAddresses { - response_tx: oneshot::Sender>, - addresses: Vec
, - }, -} - -pub(crate) type ExecutionQueue = Arc<(Mutex>, Condvar)>; - -/// Wrapping structure for an ExecutionSC and a sender -pub struct ExecutionData { - /// Sender address - pub sender_address: Address, - /// Smart contract bytecode. - pub bytecode: Vec, - /// The maximum amount of gas that the execution of the contract is allowed to cost. - pub max_gas: u64, - /// Extra coins that are spent by consensus and are available in the execution context of the contract. - pub coins: Amount, - /// The price per unit of gas that the caller is willing to pay for the execution. - pub gas_price: Amount, -} - -impl TryFrom<&massa_models::Operation> for ExecutionData { - type Error = anyhow::Error; - - fn try_from(operation: &massa_models::Operation) -> anyhow::Result { - match &operation.content.op { - massa_models::OperationType::ExecuteSC { - data, - max_gas, - gas_price, - coins, - } => Ok(ExecutionData { - bytecode: data.to_owned(), - sender_address: Address::from_public_key(&operation.content.sender_public_key), - max_gas: *max_gas, - gas_price: *gas_price, - coins: *coins, - }), - _ => anyhow::bail!("Conversion require an `OperationType::ExecuteSC`"), - } - } -} - -#[inline] -/// Remove a given event_id from a `Set` -/// The Set is stored into a map `ctnr` at a `key` address. If -/// the Set resulted from the operation is empty, remove the entry -/// from the `ctnr` -/// -/// Used in `prune()` -fn remove_from_map( - ctnr: &mut Map>, - key: &T, - evt_id: &SCOutputEventId, -) { - match ctnr.get_mut(key) { - Some(ele) => { - ele.remove(evt_id); - if ele.is_empty() { - ctnr.remove(key); - } - } - _ => { - ctnr.remove(key); - } - } + // optional block ID at that slot (None if miss) + pub block_id: Option, + // ledger_changes caused by the execution step + pub ledger_changes: LedgerChanges, + // events emitted by the execution step + pub events: EventStore, } -#[inline] -/// Remove a given event_id from a `Set` -/// The Set is stored into a Hashmap `ctnr` at a `key` address. If -/// the Set resulted from the operation is empty, remove the entry -/// from the `ctnr` -/// -/// Used in `prune()` -fn remove_from_hashmap( - ctnr: &mut HashMap>, - key: &T, - evt_id: &SCOutputEventId, -) { - match ctnr.get_mut(key) { - Some(ele) => { - ele.remove(evt_id); - if ele.is_empty() { - ctnr.remove(key); - } - } - _ => { - ctnr.remove(key); - } - } +/// structure describing a read-only execution request +pub struct ReadOnlyExecutionRequest { + /// The slot at which the execution will occur. + slot: Slot, + /// Maximum gas to spend in the execution. + max_gas: u64, + /// The simulated price of gas for the read-only execution. + simulated_gas_price: Amount, + /// The code to execute. + bytecode: Vec, + /// Call stack to simulate + call_stack: Vec
, + /// The channel used to send the result of the execution. + result_sender: std::sync::mpsc::Sender>, } diff --git a/massa-execution/src/vm_new.rs b/massa-execution/src/vm_thread.rs similarity index 78% rename from massa-execution/src/vm_new.rs rename to massa-execution/src/vm_thread.rs index fa3a552bac1..4d6aa034f57 100644 --- a/massa-execution/src/vm_new.rs +++ b/massa-execution/src/vm_thread.rs @@ -1,144 +1,23 @@ -use crate::speculative_ledger::SpeculativeLedger; -use crate::ExecutionError; -use massa_ledger::{Applicable, FinalLedger, LedgerChanges, LedgerEntry, LedgerEntryUpdate}; +use crate::config::VMConfig; +use crate::controller::VMController; +use crate::types::{ExecutionContext, ExecutionOutput, ReadOnlyExecutionRequest}; +use crate::{event_store::EventStore, speculative_ledger::SpeculativeLedger}; +use massa_ledger::{Applicable, FinalLedger, LedgerChanges}; +use massa_models::BlockId; use massa_models::{ timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}, Block, Slot, }; -use massa_models::{Address, Amount, BlockId}; use massa_time::MassaTime; +use rand::SeedableRng; +use rand_xoshiro::Xoshiro256PlusPlus; use std::{ collections::{HashMap, VecDeque}, - sync::{Arc, Condvar, Mutex, RwLock}, + sync::{Arc, Mutex, RwLock}, }; -use tracing::info; - -/// VM module configuration -pub struct VMConfig { - /// number of threads - thread_count: u8, - /// extra lag to add on the cursor to improve performance - cursor_delay: MassaTime, - /// time compensation in milliseconds - clock_compensation: i64, - /// genesis timestamp - genesis_timestamp: MassaTime, - /// period duration - t0: MassaTime, -} - -/// structure describing a read-only execution request -pub struct ReadOnlyExecutionRequest { - /// The slot at which the execution will occur. - slot: Slot, - /// Maximum gas to spend in the execution. - max_gas: u64, - /// The simulated price of gas for the read-only execution. - simulated_gas_price: Amount, - /// The code to execute. - bytecode: Vec, - /// Call stack to simulate - call_stack: Vec
, - /// The channel used to send the result of the execution. - result_sender: std::sync::mpsc::Sender>, -} - -/// structure used to communicate with the VM thread -#[derive(Default)] -pub struct VMInputData { - /// set stop to true to stop the thread - stop: bool, - /// signal whether the blockclique changed - blockclique_changed: bool, - /// list of newly finalized blocks - finalized_blocks: HashMap, - /// blockclique - blockclique: HashMap, - /// readonly execution requests - readonly_requests: VecDeque, -} - -/// VM controller -pub struct VMController { - /// condition variable to wake up the VM loop - loop_cv: Condvar, - /// input data to process in the VM loop - input_data: Mutex, -} - -/// VM manager -pub struct VMManager { - /// shared reference to the VM controller - controller: Arc, - /// handle used to join the VM thread - thread_handle: std::thread::JoinHandle<()>, -} - -impl VMManager { - /// stops the VM - pub fn stop(self) { - info!("stopping VM controller..."); - // notify the VM thread to stop - { - let mut input_wlock = self - .controller - .input_data - .lock() - .expect("could not w-lock VM input data"); - input_wlock.stop = true; - self.controller.loop_cv.notify_one(); - } - // join the VM thread - self.thread_handle - .join() - .expect("VM controller thread panicked"); - info!("VM controller stopped"); - } - - /// get a shared reference to the VM controller - pub fn get_controller(&self) -> Arc { - self.controller.clone() - } -} - -/// launches the VM and returns a VMManager -/// -/// # parameters -/// * config: VM configuration -/// * bootstrap: -pub fn start_vm(config: VMConfig, final_ledger: Arc>) -> VMManager { - let controller = Arc::new(VMController { - loop_cv: Condvar::new(), - input_data: Mutex::new(VMInputData { - blockclique_changed: true, - ..Default::default() - }), - }); - - let ctl = controller.clone(); - let thread_handle = std::thread::spawn(move || { - VMThread::new(config, ctl, final_ledger).main_loop(); - }); - - VMManager { - controller, - thread_handle, - } -} - -struct ExecutionOutput { - // slot - slot: Slot, - // optional block ID at that slot (None if miss) - block_id: Option, - // ledger_changes caused by the execution step - ledger_changes: LedgerChanges, - // events emitted by the execution step - //TODO events: EventStore -} /// structure gathering all elements needed by the VM thread -struct VMThread { +pub struct VMThread { // VM config config: VMConfig, // Final ledger @@ -166,17 +45,11 @@ struct VMThread { // execution context execution_context: Arc>, // final events - // final_events: EventStore, -} - -pub(crate) struct ExecutionContext { - //TODO other things (eg. call stack) - speculative_ledger: SpeculativeLedger, - //TODO event store + final_events: EventStore, } impl VMThread { - fn new( + pub fn new( config: VMConfig, controller: Arc, final_ledger: Arc>, @@ -190,6 +63,18 @@ impl VMThread { final_ledger.clone(), LedgerChanges::default(), ), + max_gas: Default::default(), + gas_price: Default::default(), + slot: Slot::new(0, 0), + created_addr_index: Default::default(), + created_event_index: Default::default(), + opt_block_id: Default::default(), + opt_block_creator_addr: Default::default(), + stack: Default::default(), + read_only: Default::default(), + events: Default::default(), + unsafe_rng: Xoshiro256PlusPlus::from_seed([0u8; 32]), + origin_operation_id: Default::default(), })); VMThread { @@ -206,21 +91,10 @@ impl VMThread { active_slots: Default::default(), execution_history: Default::default(), config, + final_events: Default::default(), } } - /// reads the list of newly finalized blocks and the new blockclique, if there was a change - /// if found, remove from input queue - fn consume_input(&mut self) -> VMInputData { - std::mem::take( - &mut self - .controller - .input_data - .lock() - .expect("VM input data lock failed"), - ) - } - /// update final slots fn update_final_slots(&mut self, new_css_finals: HashMap) { // return if empty @@ -546,13 +420,15 @@ impl VMThread { /// executed a readonly request fn execute_readonly_request(&mut self, req: ReadOnlyExecutionRequest) { // TODO + + //TODO send execution result back through req.result_sender } /// main VM loop - fn main_loop(&mut self) { + pub fn main_loop(&mut self) { loop { // read input queues - let input_data = self.consume_input(); + let input_data = self.controller.consume_input(); // check for stop signal if input_data.stop { From 6b8020b1a5dae0b5227f7adc91515fc731419ba5 Mon Sep 17 00:00:00 2001 From: damip Date: Wed, 16 Feb 2022 19:21:10 +0100 Subject: [PATCH 09/73] reorganization and safety --- massa-execution/src/config.rs | 1 + massa-execution/src/context.rs | 197 ++++++++++++ massa-execution/src/controller.rs | 77 +++-- massa-execution/src/error.rs | 3 + massa-execution/src/execution.rs | 207 +++++++++++++ massa-execution/src/interface_impl.rs | 221 ++++---------- massa-execution/src/lib.rs | 2 + massa-execution/src/speculative_ledger.rs | 158 +++++++++- massa-execution/src/types.rs | 47 +-- massa-execution/src/vm_thread.rs | 336 ++++++++++----------- massa-ledger/src/error.rs | 4 +- massa-ledger/src/ledger.rs | 346 ++++++++++++++-------- 12 files changed, 1039 insertions(+), 560 deletions(-) create mode 100644 massa-execution/src/context.rs create mode 100644 massa-execution/src/execution.rs diff --git a/massa-execution/src/config.rs b/massa-execution/src/config.rs index e4c366be7de..628c18e660c 100644 --- a/massa-execution/src/config.rs +++ b/massa-execution/src/config.rs @@ -2,6 +2,7 @@ use massa_time::MassaTime; use std::path::PathBuf; /// VM module configuration +#[derive(Debug, Clone)] pub struct VMConfig { /// Initial SCE ledger file pub initial_sce_ledger_path: PathBuf, diff --git a/massa-execution/src/context.rs b/massa-execution/src/context.rs new file mode 100644 index 00000000000..75813da5943 --- /dev/null +++ b/massa-execution/src/context.rs @@ -0,0 +1,197 @@ +use crate::sce_ledger::SCELedgerChanges; +use crate::speculative_ledger::SpeculativeLedger; +use crate::types::ExecutionStackElement; +use crate::{event_store::EventStore, ExecutionError}; +use massa_ledger::{FinalLedger, LedgerChanges}; +use massa_models::{Address, Amount, BlockId, OperationId, Slot}; +use rand::SeedableRng; +use rand_xoshiro::Xoshiro256PlusPlus; +use std::collections::VecDeque; +use std::sync::{Arc, RwLock}; + +pub(crate) struct ExecutionContext { + // speculative ledger + speculative_ledger: SpeculativeLedger, + + /// max gas for this execution + pub max_gas: u64, + + /// gas price of the execution + pub gas_price: Amount, + + /// slot at which the execution happens + pub slot: Slot, + + /// counter of newly created addresses so far during this execution + pub created_addr_index: u64, + + /// counter of newly created events so far during this execution + pub created_event_index: u64, + + /// block ID, if one is present at this slot + pub opt_block_id: Option, + + /// block creator addr, if there is a block at this slot + pub opt_block_creator_addr: Option
, + + /// address call stack, most recent is at the back + pub stack: Vec, + + /// True if it's a read-only context + pub read_only: bool, + + /// geerated events during this execution, with multiple indexes + pub events: EventStore, + + /// Unsafe RNG state + pub unsafe_rng: Xoshiro256PlusPlus, + + /// origin operation id + pub origin_operation_id: Option, +} + +impl ExecutionContext { + pub(crate) fn new(final_ledger: Arc>) -> Self { + ExecutionContext { + speculative_ledger: SpeculativeLedger::new(final_ledger, Default::default()), + max_gas: Default::default(), + gas_price: Default::default(), + slot: Slot::new(0, 0), + created_addr_index: Default::default(), + created_event_index: Default::default(), + opt_block_id: Default::default(), + opt_block_creator_addr: Default::default(), + stack: Default::default(), + read_only: Default::default(), + events: Default::default(), + unsafe_rng: Xoshiro256PlusPlus::from_seed([0u8; 32]), + origin_operation_id: Default::default(), + } + } + + /// gets the address at the top of the stack + pub fn get_current_address(&self) -> Result { + match self.stack.last() { + Some(addr) => Ok(addr.address), + _ => { + return Err(ExecutionError::RuntimeError( + "failed to read current address: call stack empty".into(), + )) + } + } + } + + /// gets the current list of owned addresses (top of the stack) + /// ordering is conserved for determinism + pub fn get_current_owned_addresses(&self) -> Result, ExecutionError> { + match self.stack.last() { + Some(v) => Ok(v.owned_addresses.clone()), + None => { + return Err(ExecutionError::RuntimeError( + "failed to read current owned addresses list: call stack empty".into(), + )) + } + } + } + + /// gets the current call coins + pub fn get_current_call_coins(&self) -> Result { + match self.stack.last() { + Some(v) => Ok(v.coins), + None => { + return Err(ExecutionError::RuntimeError( + "failed to read current call coins: call stack empty".into(), + )) + } + } + } + + /// gets the call stack (addresses) + pub fn get_call_stack(&self) -> Vec
{ + self.stack.iter().map(|v| v.address).collect() + } + + /// check whether the context grants write access on a given address + pub fn has_write_rights_on(&self, addr: &Address) -> bool { + self.stack + .last() + .map_or(false, |v| v.owned_addresses.contains(&addr)) + } + + /// creates a new smart contract address with initial bytecode, within the current execution context + pub fn create_new_sc_address(&mut self, bytecode: Vec) -> Result { + // TODO: security problem: + // prefix addresses to know if they are SCs or normal, otherwise people can already create new accounts by sending coins to the right hash + // they won't have ownership over it but this can still be a pain + + // generate address + let (slot, created_addr_index) = (self.slot, self.created_addr_index); + let mut data: Vec = slot.to_bytes_key().to_vec(); + data.append(&mut self.created_addr_index.to_be_bytes().to_vec()); + if self.read_only { + data.push(0u8); + } else { + data.push(1u8); + } + let address = Address(massa_hash::hash::Hash::compute_from(&data)); + + // create address in the speculative ledger + self.speculative_ledger + .create_new_sc_address(address, bytecode)?; + + // add to owned addresses + match self.stack.last_mut() { + Some(v) => { + v.owned_addresses.push(address); + } + None => { + return Err(ExecutionError::RuntimeError( + "owned addresses not found in context stack".into(), + )) + } + }; + + // increment the address creation counter at this slot + self.created_addr_index += 1; + + Ok(address) + } + + /// gets the bytecode of an address if it exists + pub fn get_bytecode(&self, address: &Address) -> Option> { + self.speculative_ledger.get_bytecode(address) + } + + /// gets the bytecode of an address if it exists + pub fn get_parallel_balance(&self, address: &Address) -> Option { + self.speculative_ledger.get_parallel_balance(address) + } + + /// Transfers parallel coins from one address to another. + /// No changes are retained in case of failure. + /// Spending is only allowed from existing addresses we have write acess on + /// + /// # parameters + /// * from_addr: optional spending address (use None for pure coin creation) + /// * to_addr: optional crediting address (use None for pure coin destruction) + /// * amount: amount of coins to transfer + pub fn transfer_parallel_coins( + &mut self, + from_addr: Option
, + to_addr: Option
, + amount: Amount, + ) -> Result<(), ExecutionError> { + // check access rights + if let Some(from_addr) = &from_addr { + if !self.has_write_rights_on(from_addr) { + return Err(ExecutionError::RuntimeError(format!( + "spending from address {} is not allowed in this context", + from_addr + ))); + } + } + // do the transfer + self.speculative_ledger + .transfer_parallel_coins(from_addr, to_addr, amount) + } +} diff --git a/massa-execution/src/controller.rs b/massa-execution/src/controller.rs index fd744022430..0ea13517cae 100644 --- a/massa-execution/src/controller.rs +++ b/massa-execution/src/controller.rs @@ -1,8 +1,12 @@ +use crate::execution::ExecutionState; +use crate::speculative_ledger::SpeculativeLedger; +use crate::types::{ExecutionContext, ExecutionOutput}; +use crate::ExecutionError; use crate::{config::VMConfig, types::ReadOnlyExecutionRequest, vm_thread::VMThread}; use massa_ledger::FinalLedger; use massa_models::{Block, BlockId, Slot}; use std::collections::{HashMap, VecDeque}; -use std::sync::{Arc, Condvar, Mutex, RwLock}; +use std::sync::{mpsc, Arc, Condvar, Mutex, RwLock}; use tracing::info; /// structure used to communicate with the VM thread @@ -16,8 +20,11 @@ pub struct VMInputData { pub finalized_blocks: HashMap, /// blockclique pub blockclique: HashMap, - /// readonly execution requests - pub readonly_requests: VecDeque, + /// readonly execution requests and response mpscs + pub readonly_requests: VecDeque<( + ReadOnlyExecutionRequest, + mpsc::Sender>, + )>, } /// VM controller @@ -26,14 +33,51 @@ pub struct VMController { pub loop_cv: Condvar, /// input data to process in the VM loop pub input_data: Mutex, + /// execution state + pub execution_state: Arc>, } impl VMController { /// reads the list of newly finalized blocks and the new blockclique, if there was a change /// if found, remove from input queue - pub fn consume_input(&mut self) -> VMInputData { + pub(crate) fn consume_input(&mut self) -> VMInputData { std::mem::take(&mut self.input_data.lock().expect("VM input data lock failed")) } + + /// Executes a readonly request + pub fn execute_readonly_request( + &mut self, + req: ReadOnlyExecutionRequest, + max_queue_length: usize, + ) -> Result { + // queue request + let resp_rx = { + let input_data = self + .input_data + .lock() + .expect("could not lock VM input data"); + if input_data.readonly_requests.len() > max_queue_length { + return Err(ExecutionError::RuntimeError( + "too many queued readonly requests".into(), + )); + } + let (resp_tx, resp_rx) = + std::sync::mpsc::channel::>(); + input_data.readonly_requests.push_back((req, resp_tx)); + self.loop_cv.notify_one(); + resp_rx + }; + + // wait for response + match resp_rx.recv() { + Ok(result) => return result, + Err(err) => { + return Err(ExecutionError::RuntimeError( + "the VM input channel is closed".into(), + )) + } + } + } } /// VM manager @@ -70,28 +114,3 @@ impl VMManager { self.controller.clone() } } - -/// launches the VM and returns a VMManager -/// -/// # parameters -/// * config: VM configuration -/// * bootstrap: -pub fn start_vm(config: VMConfig, final_ledger: Arc>) -> VMManager { - let controller = Arc::new(VMController { - loop_cv: Condvar::new(), - input_data: Mutex::new(VMInputData { - blockclique_changed: true, - ..Default::default() - }), - }); - - let ctl = controller.clone(); - let thread_handle = std::thread::spawn(move || { - VMThread::new(config, ctl, final_ledger).main_loop(); - }); - - VMManager { - controller, - thread_handle, - } -} diff --git a/massa-execution/src/error.rs b/massa-execution/src/error.rs index 1cb6c769511..5adb6eccf0e 100644 --- a/massa-execution/src/error.rs +++ b/massa-execution/src/error.rs @@ -19,6 +19,9 @@ pub enum ExecutionError { /// File error FileError(String), + + /// Runtime error: {0} + RuntimeError(String), } macro_rules! bootstrap_file_error { diff --git a/massa-execution/src/execution.rs b/massa-execution/src/execution.rs new file mode 100644 index 00000000000..d6286d63974 --- /dev/null +++ b/massa-execution/src/execution.rs @@ -0,0 +1,207 @@ +use crate::config::VMConfig; +use crate::interface_impl::InterfaceImpl; +use crate::types::{ExecutionContext, ExecutionOutput, ReadOnlyExecutionRequest}; +use crate::ExecutionError; +use crate::{event_store::EventStore, speculative_ledger::SpeculativeLedger}; +use massa_ledger::{Applicable, FinalLedger, LedgerChanges}; +use massa_models::BlockId; +use massa_models::{Block, Slot}; +use massa_sc_runtime::Interface; +use rand::SeedableRng; +use rand_xoshiro::Xoshiro256PlusPlus; +use std::{ + collections::{HashMap, VecDeque}, + sync::{Arc, Mutex, RwLock}, +}; + +pub struct ExecutionState { + // VM config + pub config: VMConfig, + // active execution output history + pub active_history: VecDeque, + // active execution cursor + pub active_cursor: Slot, + // final execution cursor + pub final_cursor: Slot, + // final events + pub final_events: EventStore, + // final ledger + pub final_ledger: Arc>, + // execution context + pub execution_context: Arc>, + // execution interface + pub execution_interface: Box, +} + +impl ExecutionState { + /// create a new execution state + pub fn new(config: VMConfig, final_ledger: Arc>) -> ExecutionState { + // get last final slot from final ledger + let last_final_slot = final_ledger + .read() + .expect("could not r-lock final ledger") + .slot; + + // init execution context + let execution_context = Arc::new(Mutex::new(ExecutionContext::new(final_ledger.clone()))); + + // Instantiate the interface used by the assembly simulator. + let execution_interface = Box::new(InterfaceImpl::new( + config.clone(), + execution_context.clone(), + )); + + // build execution state + ExecutionState { + config, + final_ledger, + execution_context, + execution_interface, + active_history: Default::default(), + final_events: Default::default(), + active_cursor: last_final_slot, + final_cursor: last_final_slot, + } + } + + /// applies an execution output to the final state + pub fn apply_final_execution_output(&mut self, exec_out: ExecutionOutput) { + // apply final ledger changes + self.final_ledger + .write() + .expect("could not lock final ledger for writing") + .settle_slot(exec_out.slot, exec_out.ledger_changes); + self.final_cursor = exec_out.slot; + + // update active cursor + if self.active_cursor < self.final_cursor { + self.active_cursor = self.final_cursor; + } + + // save generated events to final store + self.final_events.extend(exec_out.events); + } + + /// applies an execution output to the active state + pub fn apply_active_execution_output(&mut self, exec_out: ExecutionOutput) { + // update active cursor + self.active_cursor = exec_out.slot; + + // add execution output to history + self.active_history.push_back(exec_out); + } + + /// clear execution history + pub fn clear_history(&mut self) { + // clear history + self.active_history.clear(); + + // reset active cursor + self.active_cursor = self.final_cursor; + } + + /// truncates active slots at the first mismatch + /// between the active execution output history and the planned active_slots + pub fn truncate_history(&mut self, active_slots: &HashMap>) { + // find mismatch point (included) + let mut truncate_at = None; + for (hist_index, exec_output) in self.active_history.iter().enumerate() { + let found_block_id = active_slots + .get(&exec_output.slot) + .map(|opt_b| opt_b.as_ref().map(|(b_id, b)| *b_id)); + if found_block_id == Some(exec_output.block_id) { + continue; + } + truncate_at = Some(hist_index); + break; + } + + // truncate speculative execution output history + if let Some(truncate_at) = truncate_at { + self.active_history.truncate(truncate_at); + self.active_cursor = self + .active_history + .back() + .map_or(self.final_cursor, |out| out.slot); + } + } + + /// returns the speculative ledger at the entrance of a given history slot + /// warning: only use in the main loop because the lock on the final ledger + /// at the base of the returned SpeculativeLedger is not held + pub fn get_accumulated_active_changes_at_slot(&self, slot: Slot) -> LedgerChanges { + // check that the slot is within the reach of history + if slot <= self.final_cursor { + panic!("cannot execute at a slot before finality"); + } + let max_slot = self + .active_cursor + .get_next_slot(self.config.thread_count) + .expect("slot overflow when getting speculative ledger"); + if slot > max_slot { + panic!("cannot execute at a slot beyond active cursor + 1"); + } + + // gather the history of changes + let mut accumulated_changes = LedgerChanges::default(); + for previous_output in &self.active_history { + if previous_output.slot >= slot { + break; + } + accumulated_changes.apply(previous_output.ledger_changes.clone()); + } + + accumulated_changes + } + + /// executes a full slot without causing any changes to the state, + /// and yields an execution output + pub fn execute_slot(&self, slot: Slot, opt_block: Option<(BlockId, Block)>) -> ExecutionOutput { + // get the speculative ledger + let previous_ledger_changes = self.get_accumulated_active_changes_at_slot(slot); + let ledger = SpeculativeLedger::new(self.final_ledger.clone(), previous_ledger_changes); + + // TODO init context + + // TODO async executions + + let mut out_block_id = None; + if let Some((block_id, block)) = opt_block { + out_block_id = Some(block_id); + + //TODO execute block elements + } + + ExecutionOutput { + slot, + block_id: out_block_id, + ledger_changes: ledger.into_added_changes(), + } + } + + /// executed a readonly request + pub(crate) fn execute_readonly_request( + &self, + req: ReadOnlyExecutionRequest, + ) -> Result { + // execute at the slot just after the latest executed active slot + let slot = self + .active_cursor + .get_next_slot(self.config.thread_count) + .expect("slot overflow in readonly execution"); + + // get the speculative ledger + let previous_ledger_changes = self.get_accumulated_active_changes_at_slot(slot); + let ledger = SpeculativeLedger::new(self.final_ledger.clone(), previous_ledger_changes); + + // TODO execute ReadOnlyExecutionRequest at slot with context req + + //TODO send execution result back through req.result_sender + Ok(ExecutionOutput { + slot, + block_id: None, + events: TODO, + ledger_changes: TODO, + }) + } +} diff --git a/massa-execution/src/interface_impl.rs b/massa-execution/src/interface_impl.rs index 796ee3f61ed..e71eabc59a2 100644 --- a/massa-execution/src/interface_impl.rs +++ b/massa-execution/src/interface_impl.rs @@ -1,6 +1,6 @@ /// Implementation of the interface used in the execution external library /// -use crate::types::{ExecutionContext, StackElement}; +use crate::{config::VMConfig, context::ExecutionContext, types::ExecutionStackElement}; use anyhow::{bail, Result}; use massa_hash::hash::Hash; use massa_models::{ @@ -26,25 +26,13 @@ macro_rules! context_guard { #[derive(Clone)] pub(crate) struct InterfaceImpl { + config: VMConfig, context: Arc>, - thread_count: u8, - t0: MassaTime, - genesis_timestamp: MassaTime, } impl InterfaceImpl { - pub fn new( - context: Arc>, - thread_count: u8, - t0: MassaTime, - genesis_timestamp: MassaTime, - ) -> InterfaceImpl { - InterfaceImpl { - context, - thread_count, - t0, - genesis_timestamp, - } + pub fn new(config: VMConfig, context: Arc>) -> InterfaceImpl { + InterfaceImpl { config, context } } } @@ -68,38 +56,33 @@ impl Interface for InterfaceImpl { let mut context = context_guard!(self); // get bytecode - let bytecode = match context.ledger_step.get_module(&to_address) { + let bytecode = match context.get_bytecode(&to_address) { Some(bytecode) => bytecode, - None => bail!("Error bytecode not found"), + None => bail!("bytecode not found for address {}", to_address), }; // get caller let from_address = match context.stack.last() { Some(addr) => addr.address, - _ => bail!("Failed to read call stack current address"), + _ => bail!("failed to read call stack current address"), }; // transfer coins let coins = massa_models::Amount::from_raw(raw_coins); - // debit - context - .ledger_step - .set_balance_delta(from_address, coins, false)?; - // credit - if let Err(err) = context - .ledger_step - .set_balance_delta(to_address, coins, true) + if let Err(err) = + context.transfer_parallel_coins(Some(from_address), Some(to_address), coins) { - // cancel debit - context - .ledger_step - .set_balance_delta(from_address, coins, true) - .expect("credit failed after same-amount debit succeeded"); - bail!("Error crediting destination balance: {}", err); + bail!( + "error transferring {} parallel coins from {} to {}: {}", + coins, + from_address, + to_address, + err + ); } // prepare context - context.stack.push(StackElement { + context.stack.push(ExecutionStackElement { address: to_address, coins, owned_addresses: vec![to_address], @@ -110,6 +93,7 @@ impl Interface for InterfaceImpl { fn finish_call(&self) -> Result<()> { let mut context = context_guard!(self); + if context.stack.pop().is_none() { bail!("call stack out of bounds") } @@ -120,19 +104,19 @@ impl Interface for InterfaceImpl { /// Returns zero as a default if address not found. fn get_balance(&self) -> Result { let context = context_guard!(self); - let address = match context.stack.last() { - Some(addr) => addr.address, - _ => bail!("Failed to read call stack current address"), - }; - Ok(context.ledger_step.get_balance(&address).to_raw()) + let address = context.get_current_address()?; + Ok(context + .get_parallel_balance(&address) + .unwrap_or(AMOUNT_ZERO) + .to_raw()) } /// Returns zero as a default if address not found. fn get_balance_for(&self, address: &str) -> Result { let address = massa_models::Address::from_str(address)?; Ok(context_guard!(self) - .ledger_step - .get_balance(&address) + .get_parallel_balance(&address) + .unwrap_or(AMOUNT_ZERO) .to_raw()) } @@ -143,28 +127,10 @@ impl Interface for InterfaceImpl { /// /// Insert in the ledger the given bytecode in the generated address fn create_module(&self, module: &[u8]) -> Result { - let mut context = context_guard!(self); - let (slot, created_addr_index) = (context.slot, context.created_addr_index); - let mut data: Vec = slot.to_bytes_key().to_vec(); - data.append(&mut created_addr_index.to_be_bytes().to_vec()); - if context.read_only { - data.push(0u8); - } else { - data.push(1u8); + match context_guard!(self).create_new_sc_address(module.to_vec()) { + Ok(addr) => Ok(addr.to_bs58_check()), + Err(err) => bail!("couldn't create new SC address: {}", err), } - let address = massa_models::Address(massa_hash::hash::Hash::compute_from(&data)); - let res = address.to_bs58_check(); - context - .ledger_step - .set_module(address, Some(module.to_vec())); - match context.stack.last_mut() { - Some(v) => { - v.owned_addresses.push(address); - } - None => bail!("owned addresses not found in stack"), - }; - context.created_addr_index += 1; - Ok(res) } /// Requires the data at the address @@ -172,9 +138,9 @@ impl Interface for InterfaceImpl { let addr = &massa_models::Address::from_bs58_check(address)?; let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); let context = context_guard!(self); - match context.ledger_step.get_data_entry(addr, &key) { + match context.get_data_entry(addr, &key) { Some(value) => Ok(value), - _ => bail!("Data entry not found"), + _ => bail!("data entry not found"), } } @@ -186,60 +152,40 @@ impl Interface for InterfaceImpl { let addr = massa_models::Address::from_str(address)?; let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); let mut context = context_guard!(self); - let is_allowed = context - .stack - .last() - .map_or(false, |v| v.owned_addresses.contains(&addr)); - if !is_allowed { - bail!("You don't have the write access to this entry") - } - context - .ledger_step - .set_data_entry(addr, key, value.to_vec()); + context.set_data_entry(addr, key, value.to_vec())?; Ok(()) } fn has_data_for(&self, address: &str, key: &str) -> Result { - let context = context_guard!(self); let addr = massa_models::Address::from_str(address)?; let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); - Ok(context.ledger_step.has_data_entry(&addr, &key)) + let context = context_guard!(self); + Ok(context.has_data_entry(&addr, &key)) } fn raw_get_data(&self, key: &str) -> Result> { let context = context_guard!(self); - let addr = match context.stack.last() { - Some(addr) => addr.address, - _ => bail!("Failed to read call stack current address"), - }; let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); - match context.ledger_step.get_data_entry(&addr, &key) { + let addr = context.get_current_address()?; + match context.get_data_entry(&addr, &key) { Some(bytecode) => Ok(bytecode), - _ => bail!("Data entry not found"), + _ => bail!("data entry not found"), } } fn raw_set_data(&self, key: &str, value: &[u8]) -> Result<()> { let mut context = context_guard!(self); - let addr = match context.stack.last() { - Some(addr) => addr.address, - _ => bail!("Failed to read call stack current address"), - }; let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); - context - .ledger_step - .set_data_entry(addr, key, value.to_vec()); + let addr = context.get_current_address()?; + context.set_data_entry(addr, key, value.to_vec())?; Ok(()) } fn has_data(&self, key: &str) -> Result { let context = context_guard!(self); - let addr = match context.stack.last() { - Some(addr) => addr.address, - _ => bail!("Failed to read call stack current address"), - }; let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); - Ok(context.ledger_step.has_data_entry(&addr, &key)) + let addr = context.get_current_address()?; + Ok(context.has_data_entry(&addr, &key)) } /// hash data @@ -268,33 +214,15 @@ impl Interface for InterfaceImpl { Ok(massa_signature::verify_signature(&h, &signature, &public_key).is_ok()) } - /// Transfer coins from the current address to a target address + /// Transfer parallel coins from the current address to a target address /// to_address: target address /// raw_amount: amount to transfer (in raw u64) fn transfer_coins(&self, to_address: &str, raw_amount: u64) -> Result<()> { let to_address = massa_models::Address::from_str(to_address)?; - let mut context = context_guard!(self); - let from_address = match context.stack.last() { - Some(addr) => addr.address, - _ => bail!("Failed to read call stack current address"), - }; let amount = massa_models::Amount::from_raw(raw_amount); - // debit - context - .ledger_step - .set_balance_delta(from_address, amount, false)?; - // credit - if let Err(err) = context - .ledger_step - .set_balance_delta(to_address, amount, true) - { - // cancel debit - context - .ledger_step - .set_balance_delta(from_address, amount, true) - .expect("credit failed after same-amount debit succeeded"); - bail!("Error crediting destination balance: {}", err); - } + let mut context = context_guard!(self); + let from_address = context.get_current_address()?; + context.transfer_parallel_coins(Some(from_address), Some(to_address), amount)?; Ok(()) } @@ -310,62 +238,33 @@ impl Interface for InterfaceImpl { ) -> Result<()> { let from_address = massa_models::Address::from_str(from_address)?; let to_address = massa_models::Address::from_str(to_address)?; - let mut context = context_guard!(self); - let is_allowed = context - .stack - .last() - .map_or(false, |v| v.owned_addresses.contains(&from_address)); - if !is_allowed { - bail!("You don't have the spending access to this entry") - } let amount = massa_models::Amount::from_raw(raw_amount); - // debit - context - .ledger_step - .set_balance_delta(from_address, amount, false)?; - // credit - if let Err(err) = context - .ledger_step - .set_balance_delta(to_address, amount, true) - { - // cancel debit - context - .ledger_step - .set_balance_delta(from_address, amount, true) - .expect("credit failed after same-amount debit succeeded"); - bail!("Error crediting destination balance: {}", err); - } + let mut context = context_guard!(self); + context.transfer_parallel_coins(Some(from_address), Some(to_address), amount)?; Ok(()) } /// Return the list of owned adresses of a given SC user fn get_owned_addresses(&self) -> Result> { - match context_guard!(self).stack.last() { - Some(v) => Ok(v - .owned_addresses - .iter() - .map(|addr| addr.to_bs58_check()) - .collect()), - None => bail!("owned address stack out of bounds"), - } + Ok(context_guard!(self) + .get_current_owned_addresses()? + .into_iter() + .map(|addr| addr.to_bs58_check()) + .collect()) } + /// Return the call stack (addresses) fn get_call_stack(&self) -> Result> { Ok(context_guard!(self) - .stack - .iter() - .map(|addr| addr.address.to_bs58_check()) + .get_call_stack() + .into_iter() + .map(|addr| addr.to_bs58_check()) .collect()) } /// Get the amount of coins that have been made available for use by the caller of the currently executing code. fn get_call_coins(&self) -> Result { - Ok(context_guard!(self) - .stack - .last() - .map(|e| e.coins) - .unwrap_or(AMOUNT_ZERO) - .to_raw()) + Ok(context_guard!(self).get_current_call_coins()?.to_raw()) } /// generate an execution event and stores it @@ -396,8 +295,12 @@ impl Interface for InterfaceImpl { /// Returns the current time (millisecond unix timestamp) fn get_time(&self) -> Result { let slot = context_guard!(self).slot; - let ts = - get_block_slot_timestamp(self.thread_count, self.t0, self.genesis_timestamp, slot)?; + let ts = get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + slot, + )?; Ok(ts.to_millis()) } diff --git a/massa-execution/src/lib.rs b/massa-execution/src/lib.rs index ff02542ad90..33090f0e2b9 100644 --- a/massa-execution/src/lib.rs +++ b/massa-execution/src/lib.rs @@ -1,9 +1,11 @@ #![feature(map_first_last)] mod config; +mod context; mod controller; mod error; mod event_store; +mod execution; mod exports; mod interface_impl; mod sce_ledger; diff --git a/massa-execution/src/speculative_ledger.rs b/massa-execution/src/speculative_ledger.rs index 8c78e1266ed..6f534c6a665 100644 --- a/massa-execution/src/speculative_ledger.rs +++ b/massa-execution/src/speculative_ledger.rs @@ -1,7 +1,13 @@ use std::sync::{Arc, RwLock}; -use massa_ledger::{FinalLedger, LedgerChanges, LedgerEntryUpdate, SetOrKeep, SetUpdateOrDelete}; -use massa_models::{Address, Amount}; +use anyhow::bail; +use massa_hash::hash::Hash; +use massa_ledger::{ + Applicable, FinalLedger, LedgerChanges, LedgerEntryUpdate, SetOrKeep, SetUpdateOrDelete, +}; +use massa_models::{Address, Amount, AMOUNT_ZERO}; + +use crate::ExecutionError; /// represents a speculative ledger state combining /// data from the final ledger, previous speculative changes, @@ -43,15 +49,151 @@ impl SpeculativeLedger { } /// gets the parallel balance of an address - pub fn get_parallel_balance( - &self, - addr: &Address, - final_ledger: &FinalLedger, - ) -> Option { + pub fn get_parallel_balance(&self, addr: &Address) -> Option { // try to read from added_changes, then previous_changes, then final_ledger self.added_changes.get_parallel_balance_or_else(addr, || { self.previous_changes - .get_parallel_balance_or_else(addr, || final_ledger.get_parallel_balance(addr)) + .get_parallel_balance_or_else(addr, || { + self.final_ledger + .read() + .expect("couldn't r-lock final ledger") + .get_parallel_balance(addr) + }) + }) + } + + /// gets the bytecode of an address + pub fn get_bytecode(&self, addr: &Address) -> Option> { + // try to read from added_changes, then previous_changes, then final_ledger + self.added_changes.get_bytecode_or_else(addr, || { + self.previous_changes.get_bytecode_or_else(addr, || { + self.final_ledger + .read() + .expect("couldn't r-lock final ledger") + .get_bytecode(addr) + }) + }) + } + + /// Transfers parallel coins from one address to another. + /// No changes are retained in case of failure. + /// The spending address, if defined, must exist + /// + /// # parameters + /// * from_addr: optional spending address (use None for pure coin creation) + /// * to_addr: optional crediting address (use None for pure coin destruction) + /// * amount: amount of coins to transfer + pub fn transfer_parallel_coins( + &mut self, + from_addr: Option
, + to_addr: Option
, + amount: Amount, + ) -> Result<(), ExecutionError> { + // init empty ledger changes + let mut changes = LedgerChanges::default(); + + // spend coins from sender address (if any) + if let Some(from_addr) = from_addr { + let new_balance = self + .get_parallel_balance(&from_addr) + .ok_or(ExecutionError::RuntimeError( + "source address not found".into(), + ))? + .checked_sub(amount) + .ok_or(ExecutionError::RuntimeError( + "unsufficient from_addr balance".into(), + ))?; + changes.set_parallel_balance(from_addr, new_balance); + } + + // credit coins to destination address (if any) + // note that to_addr can be the same as from_addr + if let Some(to_addr) = to_addr { + let new_balance = changes + .get_parallel_balance_or_else(&to_addr, || self.get_parallel_balance(&to_addr)) + .unwrap_or(AMOUNT_ZERO) + .checked_add(amount) + .ok_or(ExecutionError::RuntimeError( + "overflow in to_addr balance".into(), + ))?; + changes.set_parallel_balance(to_addr, new_balance); + } + + // apply changes + self.added_changes.apply(changes); + + Ok(()) + } + + /// checks if an address exists + pub fn entry_exists(&self, addr: &Address) -> bool { + // try to read from added_changes, then previous_changes, then final_ledger + self.added_changes.entry_exists_or_else(addr, || { + self.previous_changes.entry_exists_or_else(addr, || { + self.final_ledger + .read() + .expect("couldn't r-lock final ledger") + .entry_exists(addr) + }) }) } + + /// creates a new smart contract address with initial bytecode + pub fn create_new_sc_address( + &self, + addr: Address, + bytecode: Vec, + ) -> Result<(), ExecutionError> { + // set bytecode (create if non-existant) + Ok(self.added_changes.set_bytecode(addr, bytecode)) + } + + /// sets the bytecode of an address + /// fails if the address doesn't exist + pub fn set_bytecode(&self, addr: Address, bytecode: Vec) -> Result<(), ExecutionError> { + // check for existence + if !self.entry_exists(&addr) { + return Err(ExecutionError::RuntimeError(format!( + "could not set bytecode for address {}: entry does not exist", + addr + ))); + } + + //set bytecode + self.added_changes.set_bytecode(addr, bytecode); + + Ok(()) + } + + /// gets a copy of a data entry for a given address + pub fn get_data_entry(&self, addr: &Address, key: &Hash) -> Option> { + // try to read from added_changes, then previous_changes, then final_ledger + self.added_changes.get_data_entry_or_else(addr, key, || { + self.previous_changes.get_data_entry_or_else(addr, key, || { + self.final_ledger + .read() + .expect("couldn't r-lock final ledger") + .get_data_entry(addr, key) + }) + }) + } + + /// checks if a data entry exists for a given address + pub fn has_data_entry(&self, addr: &Address, key: &Hash) -> bool { + // try to read from added_changes, then previous_changes, then final_ledger + self.added_changes.has_data_entry_or_else(addr, key, || { + self.previous_changes.has_data_entry_or_else(addr, key, || { + self.final_ledger + .read() + .expect("couldn't r-lock final ledger") + .has_data_entry(addr, key) + }) + }) + } + + /// sets an entry for an address + /// fails if the address doesn't exist + pub fn set_data_entry(&self, addr: Address, bytecode: Vec) -> Result<(), ExecutionError> { + self.added_changes.set_bytecode(addr, bytecode) + } } diff --git a/massa-execution/src/types.rs b/massa-execution/src/types.rs index c379b3005be..38d08fafb19 100644 --- a/massa-execution/src/types.rs +++ b/massa-execution/src/types.rs @@ -35,47 +35,6 @@ pub struct ExecutionStackElement { pub owned_addresses: Vec
, } -pub(crate) struct ExecutionContext { - // speculative ledger - speculative_ledger: SpeculativeLedger, - - /// max gas for this execution - pub max_gas: u64, - - /// gas price of the execution - pub gas_price: Amount, - - /// slot at which the execution happens - pub slot: Slot, - - /// counter of newly created addresses so far during this execution - pub created_addr_index: u64, - - /// counter of newly created events so far during this execution - pub created_event_index: u64, - - /// block ID, if one is present at this slot - pub opt_block_id: Option, - - /// block creator addr, if there is a block at this slot - pub opt_block_creator_addr: Option
, - - /// address call stack, most recent is at the back - pub stack: Vec, - - /// True if it's a read-only context - pub read_only: bool, - - /// geerated events during this execution, with multiple indexes - pub events: EventStore, - - /// Unsafe RNG state - pub unsafe_rng: Xoshiro256PlusPlus, - - /// origin operation id - pub origin_operation_id: Option, -} - pub struct ExecutionOutput { // slot pub slot: Slot, @@ -89,8 +48,6 @@ pub struct ExecutionOutput { /// structure describing a read-only execution request pub struct ReadOnlyExecutionRequest { - /// The slot at which the execution will occur. - slot: Slot, /// Maximum gas to spend in the execution. max_gas: u64, /// The simulated price of gas for the read-only execution. @@ -98,7 +55,5 @@ pub struct ReadOnlyExecutionRequest { /// The code to execute. bytecode: Vec, /// Call stack to simulate - call_stack: Vec
, - /// The channel used to send the result of the execution. - result_sender: std::sync::mpsc::Sender>, + call_stack: Vec, } diff --git a/massa-execution/src/vm_thread.rs b/massa-execution/src/vm_thread.rs index 4d6aa034f57..da1dace215d 100644 --- a/massa-execution/src/vm_thread.rs +++ b/massa-execution/src/vm_thread.rs @@ -1,27 +1,26 @@ use crate::config::VMConfig; -use crate::controller::VMController; -use crate::types::{ExecutionContext, ExecutionOutput, ReadOnlyExecutionRequest}; -use crate::{event_store::EventStore, speculative_ledger::SpeculativeLedger}; -use massa_ledger::{Applicable, FinalLedger, LedgerChanges}; +use crate::controller::{VMController, VMInputData, VMManager}; +use crate::execution::ExecutionState; +use crate::types::{ExecutionOutput, ReadOnlyExecutionRequest}; +use crate::ExecutionError; +use massa_ledger::FinalLedger; use massa_models::BlockId; use massa_models::{ timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}, Block, Slot, }; use massa_time::MassaTime; -use rand::SeedableRng; -use rand_xoshiro::Xoshiro256PlusPlus; +use std::sync::mpsc; use std::{ - collections::{HashMap, VecDeque}, - sync::{Arc, Mutex, RwLock}, + collections::HashMap, + sync::{Arc, Condvar, Mutex, RwLock}, }; - +use tracing::debug; /// structure gathering all elements needed by the VM thread pub struct VMThread { // VM config config: VMConfig, - // Final ledger - final_ledger: Arc>, + // VM data exchange controller controller: Arc, // map of SCE-final blocks not executed yet @@ -36,62 +35,33 @@ pub struct VMThread { active_slots: HashMap>, // highest active slot last_active_slot: Slot, - // final execution cursor - final_cursor: Slot, - // active execution cursor - active_cursor: Slot, - // execution output history - execution_history: VecDeque, - // execution context - execution_context: Arc>, - // final events - final_events: EventStore, + + // execution state + execution_state: Arc>, } impl VMThread { pub fn new( config: VMConfig, controller: Arc, - final_ledger: Arc>, + execution_state: Arc>, ) -> Self { - let final_slot = final_ledger + let final_cursor = execution_state .read() - .expect("could not R-lock final ledger in VM thread creation") - .slot; - let execution_context = Arc::new(Mutex::new(ExecutionContext { - speculative_ledger: SpeculativeLedger::new( - final_ledger.clone(), - LedgerChanges::default(), - ), - max_gas: Default::default(), - gas_price: Default::default(), - slot: Slot::new(0, 0), - created_addr_index: Default::default(), - created_event_index: Default::default(), - opt_block_id: Default::default(), - opt_block_creator_addr: Default::default(), - stack: Default::default(), - read_only: Default::default(), - events: Default::default(), - unsafe_rng: Xoshiro256PlusPlus::from_seed([0u8; 32]), - origin_operation_id: Default::default(), - })); + .expect("could not r-lock execution context") + .final_cursor; + // return VMThread VMThread { - final_ledger, - last_active_slot: final_slot, - final_cursor: final_slot, - active_cursor: final_slot, + last_active_slot: final_cursor, controller, - last_sce_final: final_slot, - execution_context, + last_sce_final: final_cursor, sce_finals: Default::default(), remaining_css_finals: Default::default(), blockclique: Default::default(), active_slots: Default::default(), - execution_history: Default::default(), config, - final_events: Default::default(), + execution_state, } } @@ -212,98 +182,6 @@ impl VMThread { } } - /// applies an execution output to the final state - fn apply_final_execution_output(&mut self, exec_out: ExecutionOutput) { - // update cursors - self.final_cursor = exec_out.slot; - if self.active_cursor <= self.final_cursor { - self.final_cursor = self.final_cursor; - } - - // apply final ledger changes - { - let mut final_ledger = self - .final_ledger - .write() - .expect("could not lock final ledger for writing"); - final_ledger.settle_slot(exec_out.slot, exec_out.ledger_changes); - } - - // save generated events to final store - // TODO - } - - /// applies an execution output to the active state - fn apply_active_execution_output(&mut self, exec_out: ExecutionOutput) { - // update active cursor - self.active_cursor = exec_out.slot; - - // add execution output to history - self.execution_history.push_back(exec_out); - } - - /// returns the speculative ledger at a given history slot - fn get_speculative_ledger_at_slot(&self, slot: Slot) -> SpeculativeLedger { - // check that the slot is within the reach of history - if slot <= self.final_cursor { - panic!("cannot execute at a slot before finality"); - } - let max_slot = self - .active_cursor - .get_next_slot(self.config.thread_count) - .expect("slot overflow when getting speculative ledger"); - if slot > max_slot { - panic!("cannot execute at a slot beyond active cursor + 1"); - } - - // gather the history of changes - let mut previous_ledger_changes = LedgerChanges::default(); - for previous_output in &self.execution_history { - if previous_output.slot >= slot { - break; - } - previous_ledger_changes.apply(&previous_output.ledger_changes); - } - - // return speculative ledger - SpeculativeLedger::new(self.final_ledger.clone(), previous_ledger_changes) - } - - /// executes a full slot without causing any changes to the state, - /// and yields an execution output - fn execute_slot(&mut self, slot: Slot, opt_block: Option<(BlockId, Block)>) -> ExecutionOutput { - // get the speculative ledger - let ledger = self.get_speculative_ledger_at_slot(slot); - - // TODO init context - - // TODO intial executions - - // TODO async executions - - let mut out_block_id = None; - if let Some((block_id, block)) = opt_block { - out_block_id = Some(block_id); - - //TODO block stuff - } - - ExecutionOutput { - slot, - block_id: out_block_id, - ledger_changes: ledger.into_added_changes(), - } - } - - /// clear execution history - fn clear_history(&mut self) { - // clear history - self.execution_history.clear(); - - // reset active cursor - self.active_cursor = self.final_cursor; - } - /// executes one final slot, if any /// returns true if something was executed fn execute_one_final_slot(&mut self) -> bool { @@ -312,25 +190,33 @@ impl VMThread { return false; } + // w-lock execution state + let mut exec_state = self + .execution_state + .write() + .expect("could not lock execution state for writing"); + // get the slot just after the last executed final slot - let slot = self + let slot = exec_state .final_cursor .get_next_slot(self.config.thread_count) .expect("final slot overflow in VM"); - // take element from sce finals + // take the corresponding element from sce finals let exec_target = self .sce_finals .remove(&slot) .expect("the SCE final slot list skipped a slot"); // check if the final slot is cached at the front of the speculative execution history - if let Some(exec_out) = self.execution_history.pop_front() { + if let Some(exec_out) = exec_state.active_history.pop_front() { if exec_out.slot == slot && exec_out.block_id == exec_target.as_ref().map(|(b_id, _)| *b_id) { // speculative execution front result matches what we want to compute - self.apply_final_execution_output(exec_out); + + // apply the cached output and return + exec_state.apply_final_execution_output(exec_out); return true; } } @@ -338,63 +224,71 @@ impl VMThread { // speculative cache mismatch // clear the speculative execution output cache completely - self.clear_history(); + exec_state.clear_history(); + + // downgrade execution state lock to read-only + // to allow for outside r-locks while the slot is being executed, which takes CPU time + // note that this downgrade does not happen atomically + // but the main loop is the only one writng in the execution state + // so there won't be writes in-between the release of write() and the acquiring of read() + let exec_state = self + .execution_state + .read() + .expect("could not lock execution state for reading"); // execute slot - let exec_out = self.execute_slot(slot, exec_target); + let exec_out = exec_state.execute_slot(slot, exec_target); + + // upgrade execution state lock to write + // note that this upgrade does not happen atomically + // but the main loop is the only one writng in the execution state + // so there won't be writes in-between the release of read() and the acquiring of write() + let exec_state = self + .execution_state + .write() + .expect("could not lock execution state for writing"); // apply execution output to final state - self.apply_final_execution_output(exec_out); + exec_state.apply_final_execution_output(exec_out); return true; } - /// truncates active slots at the first mismatch - /// between the active execution output history and the planned active_slots - fn truncate_history(&mut self) { - // find mismatch point (included) - let mut truncate_at = None; - for (hist_index, exec_output) in self.execution_history.iter().enumerate() { - let found_block_id = self - .active_slots - .get(&exec_output.slot) - .map(|opt_b| opt_b.as_ref().map(|(b_id, b)| *b_id)); - if found_block_id == Some(exec_output.block_id) { - continue; - } - truncate_at = Some(hist_index); - break; - } - - // truncate speculative execution output history - if let Some(truncate_at) = truncate_at { - self.execution_history.truncate(truncate_at); - self.active_cursor = self - .execution_history - .back() - .map_or(self.final_cursor, |out| out.slot); - } - } - /// executes one active slot, if any /// returns true if something was executed fn execute_one_active_slot(&mut self) -> bool { + // read-lock the execution state + let exec_state = self + .execution_state + .read() + .expect("could not lock execution state for reading"); + // get the next active slot - let slot = self + let slot = exec_state .active_cursor .get_next_slot(self.config.thread_count) .expect("active slot overflow in VM"); + // choose the execution target let exec_target = match self.active_slots.get(&slot) { Some(b) => b.clone(), //TODO get rid of that clone None => return false, }; // execute the slot - let exec_out = self.execute_slot(slot, exec_target); + let exec_out = exec_state.execute_slot(slot, exec_target); + + // upgrade execution state lock to write + // note that this upgrade does not happen atomically + // but the main loop is the only one writng in the execution state + // so there won't be writes in-between the release of read() and the acquiring of write() + let exec_state = self + .execution_state + .write() + .expect("could not lock execution state for writing"); // apply execution output to active state - self.apply_active_execution_output(exec_out); + exec_state.apply_active_execution_output(exec_out); return true; } @@ -417,11 +311,34 @@ impl VMThread { next_timestmap.saturating_sub(now) } - /// executed a readonly request - fn execute_readonly_request(&mut self, req: ReadOnlyExecutionRequest) { - // TODO + /// truncates history if necessary + pub fn truncate_history(&mut self) { + // acquire write access to execution state + let exec_state = self + .execution_state + .write() + .expect("could not lock execution state for writing"); - //TODO send execution result back through req.result_sender + exec_state.truncate_history(&self.active_slots); + } + + /// execute readonly request + fn execute_readonly_request( + &self, + req: ReadOnlyExecutionRequest, + resp_tx: mpsc::Sender>, + ) { + // acquire read access to execution state and execute the request + let outcome = self + .execution_state + .read() + .expect("could not lock execution state for reading") + .execute_readonly_request(req); + + // send the response + if resp_tx.send(outcome).is_err() { + debug!("could not send execute_readonly_request response: response channel died"); + } } /// main VM loop @@ -472,8 +389,8 @@ impl VMThread { // execute all queued readonly requests // must be done in this loop because of the static shared context - for req in input_data.readonly_requests { - self.execute_readonly_request(req); + for (req, resp_tx) in input_data.readonly_requests { + self.execute_readonly_request(req, resp_tx); } // check if new data or requests arrived during the iteration @@ -503,5 +420,52 @@ impl VMThread { .wait_timeout(input_data, delay_until_next_slot.to_duration()) .expect("VM main loop condition variable wait failed"); } + + // signal cancellation to all remaining readonly requests + let input_data = self + .controller + .input_data + .lock() + .expect("could not lock VM input data"); + for (_req, resp_tx) in input_data.readonly_requests.drain(..) { + resp_tx.send(Err(ExecutionError::RuntimeError( + "readonly execution cancelled because VM is closing".into(), + ))); + } + } +} + +/// launches the VM and returns a VMManager +/// +/// # parameters +/// * config: VM configuration +/// * bootstrap: +pub fn start_vm(config: VMConfig, final_ledger: Arc>) -> VMManager { + // create an execution state + let execution_state = Arc::new(RwLock::new(ExecutionState::new( + config.clone(), + final_ledger.clone(), + ))); + + // create a controller + let controller = Arc::new(VMController { + loop_cv: Condvar::new(), + input_data: Mutex::new(VMInputData { + blockclique_changed: true, + ..Default::default() + }), + execution_state: execution_state.clone(), + }); + + // launch the VM thread + let ctl = controller.clone(); + let thread_handle = std::thread::spawn(move || { + VMThread::new(config, ctl, execution_state).main_loop(); + }); + + // return the VM manager + VMManager { + controller, + thread_handle, } } diff --git a/massa-ledger/src/error.rs b/massa-ledger/src/error.rs index 0fbc5f55d55..c09508a6d6e 100644 --- a/massa-ledger/src/error.rs +++ b/massa-ledger/src/error.rs @@ -6,6 +6,8 @@ use thiserror::Error; #[non_exhaustive] #[derive(Display, Error, Debug)] pub enum LedgerError { - /// there was an inconsistency between containers + /// container iconsistency: {0} ContainerInconsistency(String), + /// missing entry: {0} + MissingEntry(String), } diff --git a/massa-ledger/src/ledger.rs b/massa-ledger/src/ledger.rs index 8a6fe0539f8..f14943c5a1b 100644 --- a/massa-ledger/src/ledger.rs +++ b/massa-ledger/src/ledger.rs @@ -7,8 +7,7 @@ use std::collections::{hash_map, BTreeMap, VecDeque}; /// represents a structure that supports another one being applied to it pub trait Applicable { - fn apply(&mut self, _: &V); - fn merge(&mut self, _: V); + fn apply(&mut self, _: V); } /// structure defining a ledger entry @@ -22,25 +21,9 @@ pub struct LedgerEntry { /// LedgerEntryUpdate can be applied to a LedgerEntry impl Applicable for LedgerEntry { /// applies a LedgerEntryUpdate - fn apply(&mut self, update: &LedgerEntryUpdate) { + fn apply(&mut self, update: LedgerEntryUpdate) { update.parallel_balance.apply_to(&mut self.parallel_balance); update.bytecode.apply_to(&mut self.bytecode); - for (key, value_update) in &update.datastore { - match value_update { - SetOrDelete::Set(v) => { - self.datastore.insert(*key, v.clone()); - } - SetOrDelete::Delete => { - self.datastore.remove(key); - } - } - } - } - - /// merges a LedgerEntryUpdate - fn merge(&mut self, update: LedgerEntryUpdate) { - update.parallel_balance.merge_to(&mut self.parallel_balance); - update.bytecode.merge_to(&mut self.bytecode); for (key, value_update) in update.datastore { match value_update { SetOrDelete::Set(v) => { @@ -70,12 +53,11 @@ impl, V: Applicable> Applicable where V: Clone, - T: Clone, { - fn apply(&mut self, other: &SetUpdateOrDelete) { + fn apply(&mut self, other: SetUpdateOrDelete) { match other { // the other SetUpdateOrDelete sets a new absolute value => force it on self - v @ SetUpdateOrDelete::Set(_) => *self = v.clone(), + v @ SetUpdateOrDelete::Set(_) => *self = v, // the other SetUpdateOrDelete updates the value SetUpdateOrDelete::Update(u) => match self { @@ -94,33 +76,6 @@ where } }, - // the other SetUpdateOrDelete deletes a value => force self to delete it as well - v @ SetUpdateOrDelete::Delete => *self = v.clone(), - } - } - - fn merge(&mut self, other: SetUpdateOrDelete) { - match other { - // the other SetUpdateOrDelete sets a new absolute value => force it on self - v @ SetUpdateOrDelete::Set(_) => *self = v, - - // the other SetUpdateOrDelete updates the value - SetUpdateOrDelete::Update(u) => match self { - // if self currently sets an absolute value, merge other to that value - SetUpdateOrDelete::Set(cur) => cur.merge(u), - - // if self currently updates a value, merge the updates of the other to that update - SetUpdateOrDelete::Update(cur) => cur.merge(u), - - // if self currently deletes a value, - // create a new default value, merge other's updates to it and make self set it as an absolute new value - SetUpdateOrDelete::Delete => { - let mut res = T::default(); - res.merge(u); - *self = SetUpdateOrDelete::Set(res); - } - }, - // the other SetUpdateOrDelete deletes a value => force self to delete it as well v @ SetUpdateOrDelete::Delete => *self = v, } @@ -138,11 +93,7 @@ pub enum SetOrDelete { /// allows applying another SetOrDelete to the current one impl Applicable> for SetOrDelete { - fn apply(&mut self, other: &Self) { - *self = other.clone(); - } - - fn merge(&mut self, other: Self) { + fn apply(&mut self, other: Self) { *self = other; } } @@ -158,14 +109,7 @@ pub enum SetOrKeep { /// allows applying another SetOrKeep to the current one impl Applicable> for SetOrKeep { - fn apply(&mut self, other: &SetOrKeep) { - if let v @ SetOrKeep::Set(..) = other { - // update the current value only if the other SetOrKeep sets a new one - *self = v.clone(); - } - } - - fn merge(&mut self, other: SetOrKeep) { + fn apply(&mut self, other: SetOrKeep) { if let v @ SetOrKeep::Set(..) = other { // update the current value only if the other SetOrKeep sets a new one *self = v; @@ -174,16 +118,8 @@ impl Applicable> for SetOrKeep { } impl SetOrKeep { - /// applies the current SetOrKeep to a target mutable value - pub fn apply_to(&self, val: &mut T) { - if let SetOrKeep::Set(v) = &self { - // only change the value if self is setting a new one - *val = v.clone(); - } - } - - /// merges the current SetOrKeep into a target mutable value - pub fn merge_to(self, val: &mut T) { + /// applies the current SetOrKeep into a target mutable value + pub fn apply_to(self, val: &mut T) { if let SetOrKeep::Set(v) = self { // only change the value if self is setting a new one *val = v; @@ -208,18 +144,10 @@ pub struct LedgerEntryUpdate { impl Applicable for LedgerEntryUpdate { /// extends the LedgerEntryUpdate with another one - fn apply(&mut self, update: &LedgerEntryUpdate) { - self.roll_count.apply(&update.roll_count); - self.parallel_balance.apply(&update.parallel_balance); - self.bytecode.apply(&update.bytecode); - self.datastore.extend(update.datastore.clone()); - } - - /// extends the LedgerEntryUpdate with another one - fn merge(&mut self, update: LedgerEntryUpdate) { - self.roll_count.merge(update.roll_count); - self.parallel_balance.merge(update.parallel_balance); - self.bytecode.merge(update.bytecode); + fn apply(&mut self, update: LedgerEntryUpdate) { + self.roll_count.apply(update.roll_count); + self.parallel_balance.apply(update.parallel_balance); + self.bytecode.apply(update.bytecode); self.datastore.extend(update.datastore); } } @@ -230,28 +158,12 @@ pub struct LedgerChanges(pub Map for LedgerChanges { /// extends the current LedgerChanges with another one - fn apply(&mut self, changes: &LedgerChanges) { - for (addr, change) in &changes.0 { - match self.0.entry(*addr) { - hash_map::Entry::Occupied(mut occ) => { - // apply incoming change if a change on this entry already exists - occ.get_mut().apply(change); - } - hash_map::Entry::Vacant(vac) => { - // otherwise insert the incoming change - vac.insert(change.clone()); - } - } - } - } - - /// extends the current LedgerChanges with another one - fn merge(&mut self, changes: LedgerChanges) { + fn apply(&mut self, changes: LedgerChanges) { for (addr, change) in changes.0 { match self.0.entry(addr) { hash_map::Entry::Occupied(mut occ) => { - // merge incoming change if a change on this entry already exists - occ.get_mut().merge(change); + // apply incoming change if a change on this entry already exists + occ.get_mut().apply(change); } hash_map::Entry::Vacant(vac) => { // otherwise insert the incoming change @@ -289,6 +201,179 @@ impl LedgerChanges { None => f(), } } + + /// tries to return the bytecode or gets it from a function + /// + /// # Returns + /// * Some(v) if a value is present + /// * None if the value is absent + /// * f() if the value is unknown + /// + /// this is used as an optimization: + /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further + pub fn get_bytecode_or_else Option>>( + &self, + addr: &Address, + f: F, + ) -> Option> { + match self.0.get(addr) { + Some(SetUpdateOrDelete::Set(v)) => Some(v.bytecode.clone()), + Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { bytecode, .. })) => match bytecode { + SetOrKeep::Set(v) => Some(v.clone()), + SetOrKeep::Keep => f(), + }, + Some(SetUpdateOrDelete::Delete) => None, + None => f(), + } + } + + /// tries to return whether an entry exists or gets it from a function + /// + /// # Returns + /// * true if a entry is present + /// * false if the entry is absent + /// * f() if the existence of the value is unknown + /// + /// this is used as an optimization: + /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further + pub fn entry_exists_or_else bool>(&self, addr: &Address, f: F) -> bool { + match self.0.get(addr) { + Some(SetUpdateOrDelete::Set(_)) => true, + Some(SetUpdateOrDelete::Update(_)) => true, + Some(SetUpdateOrDelete::Delete) => false, + None => f(), + } + } + + /// set the parallel balance of an address + pub fn set_parallel_balance(&mut self, addr: Address, balance: Amount) { + match self.0.entry(addr) { + hash_map::Entry::Occupied(mut occ) => { + match occ.get_mut() { + SetUpdateOrDelete::Set(v) => { + // we currently set the absolute value of the entry + // so we need to update the parallel_balance of that value + v.parallel_balance = balance; + } + SetUpdateOrDelete::Update(u) => { + // we currently update the value of the entry + // so we need to set the parallel_balance for that update + u.parallel_balance = SetOrKeep::Set(balance); + } + d @ SetUpdateOrDelete::Delete => { + // we currently delete the entry + // so we need to create a default one with the target balance + *d = SetUpdateOrDelete::Set(LedgerEntry { + parallel_balance: balance, + ..Default::default() + }); + } + } + } + hash_map::Entry::Vacant(vac) => { + // we currently aren't changing anything on that entry + // so we need to create an update with the target balance + vac.insert(SetUpdateOrDelete::Update(LedgerEntryUpdate { + parallel_balance: SetOrKeep::Set(balance), + ..Default::default() + })); + } + } + } + + /// set the parallel balance of an address + pub fn set_bytecode(&mut self, addr: Address, bytecode: Vec) { + match self.0.entry(addr) { + hash_map::Entry::Occupied(mut occ) => { + match occ.get_mut() { + SetUpdateOrDelete::Set(v) => { + // we currently set the absolute value of the entry + // so we need to update the bytecode of that value + v.bytecode = bytecode; + } + SetUpdateOrDelete::Update(u) => { + // we currently update the value of the entry + // so we need to set the bytecode for that update + u.bytecode = SetOrKeep::Set(bytecode); + } + d @ SetUpdateOrDelete::Delete => { + // we currently delete the entry + // so we need to create a default one with the target bytecode + *d = SetUpdateOrDelete::Set(LedgerEntry { + bytecode, + ..Default::default() + }); + } + } + } + hash_map::Entry::Vacant(vac) => { + // we currently aren't changing anything on that entry + // so we need to create an update with the target bytecode + vac.insert(SetUpdateOrDelete::Update(LedgerEntryUpdate { + bytecode: SetOrKeep::Set(bytecode), + ..Default::default() + })); + } + } + } + + /// tries to return a data entry + /// + /// # Returns + /// * Some(v) if a value is present + /// * None if the value is absent + /// * f() if the value is unknown + /// + /// this is used as an optimization: + /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further + pub fn get_data_entry_or_else Option>>( + &self, + addr: &Address, + key: &Hash, + f: F, + ) -> Option> { + match self.0.get(addr) { + Some(SetUpdateOrDelete::Set(v)) => v.datastore.get(key).cloned(), + Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { datastore, .. })) => { + match datastore.get(key) { + Some(SetOrDelete::Set(v)) => Some(v.clone()), + Some(SetOrDelete::Delete) => None, + None => f(), + } + } + Some(SetUpdateOrDelete::Delete) => None, + None => f(), + } + } + + /// tries to return whether a data entry exists + /// + /// # Returns + /// * true if it does + /// * false if it does not + /// * f() if its existance is unknown + /// + /// this is used as an optimization: + /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further + pub fn has_data_entry_or_else bool>( + &self, + addr: &Address, + key: &Hash, + f: F, + ) -> bool { + match self.0.get(addr) { + Some(SetUpdateOrDelete::Set(v)) => v.datastore.contains_key(key), + Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { datastore, .. })) => { + match datastore.get(key) { + Some(SetOrDelete::Set(_)) => true, + Some(SetOrDelete::Delete) => false, + None => f(), + } + } + Some(SetUpdateOrDelete::Delete) => false, + None => f(), + } + } } /// represents a final ledger @@ -306,33 +391,8 @@ pub struct FinalLedger { } impl Applicable for FinalLedger { - /// applies LedgerChanges to the final ledger - fn apply(&mut self, changes: &LedgerChanges) { - // for all incoming changes - for (addr, change) in &changes.0 { - match &change { - SetUpdateOrDelete::Set(new_entry) => { - // inserts/overwrites the entry with an incoming absolute value - self.sorted_ledger.insert(*addr, new_entry.clone()); - } - SetUpdateOrDelete::Update(entry_update) => { - // applies updates to an entry - // if the entry does not exist, inserts a default one and applies the updates to it - self.sorted_ledger - .entry(*addr) - .or_insert_with(|| Default::default()) - .apply(entry_update); - } - SetUpdateOrDelete::Delete => { - // deletes an entry, if it exists - self.sorted_ledger.remove(&addr); - } - } - } - } - /// merges LedgerChanges to the final ledger - fn merge(&mut self, changes: LedgerChanges) { + fn apply(&mut self, changes: LedgerChanges) { // for all incoming changes for (addr, change) in changes.0 { match change { @@ -346,7 +406,7 @@ impl Applicable for FinalLedger { self.sorted_ledger .entry(addr) .or_insert_with(|| Default::default()) - .merge(entry_update); + .apply(entry_update); } SetUpdateOrDelete::Delete => { // deletes an entry, if it exists @@ -361,7 +421,7 @@ impl FinalLedger { /// settles a slot and saves the corresponding ledger changes to history pub fn settle_slot(&mut self, slot: Slot, changes: LedgerChanges) { // apply changes - self.apply(&changes); + self.apply(changes.clone()); // update the slot self.slot = slot; @@ -377,4 +437,28 @@ impl FinalLedger { pub fn get_parallel_balance(&self, addr: &Address) -> Option { self.sorted_ledger.get(addr).map(|v| v.parallel_balance) } + + /// gets a copy of the bytecode of an entry + pub fn get_bytecode(&self, addr: &Address) -> Option> { + self.sorted_ledger.get(addr).map(|v| v.bytecode.clone()) + } + + /// checks if an entry exists + pub fn entry_exists(&self, addr: &Address) -> bool { + self.sorted_ledger.contains_key(addr) + } + + /// gets a copy of a data entry + pub fn get_data_entry(&self, addr: &Address, key: &Hash) -> Option> { + self.sorted_ledger + .get(addr) + .and_then(|v| v.datastore.get(key).cloned()) + } + + /// checks whether a data entry exists + pub fn has_data_entry(&self, addr: &Address, key: &Hash) -> bool { + self.sorted_ledger + .get(addr) + .map_or(false, |v| v.datastore.contains_key(key)) + } } From 891d914f7dbbb0c61abc88fb2f9d5b7efa748676 Mon Sep 17 00:00:00 2001 From: damip Date: Wed, 16 Feb 2022 20:13:45 +0100 Subject: [PATCH 10/73] fmt and error corrections --- massa-execution/src/context.rs | 30 +++++++++++++++++++ massa-execution/src/controller.rs | 2 +- massa-execution/src/execution.rs | 5 ++-- massa-execution/src/interface_impl.rs | 7 ++--- massa-execution/src/speculative_ledger.rs | 31 ++++++++++++------- massa-ledger/src/ledger.rs | 36 +++++++++++++++++++++++ 6 files changed, 93 insertions(+), 18 deletions(-) diff --git a/massa-execution/src/context.rs b/massa-execution/src/context.rs index 75813da5943..639bdc0fb9e 100644 --- a/massa-execution/src/context.rs +++ b/massa-execution/src/context.rs @@ -2,6 +2,7 @@ use crate::sce_ledger::SCELedgerChanges; use crate::speculative_ledger::SpeculativeLedger; use crate::types::ExecutionStackElement; use crate::{event_store::EventStore, ExecutionError}; +use massa_hash::hash::Hash; use massa_ledger::{FinalLedger, LedgerChanges}; use massa_models::{Address, Amount, BlockId, OperationId, Slot}; use rand::SeedableRng; @@ -162,11 +163,40 @@ impl ExecutionContext { self.speculative_ledger.get_bytecode(address) } + /// gets the data from a datastore entry of an address if it exists + pub fn get_data_entry(&self, address: &Address, key: &Hash) -> Option> { + self.speculative_ledger.get_data_entry(address, key) + } + + /// checks if a datastore entry exists + pub fn has_data_entry(&self, address: &Address, key: &Hash) -> bool { + self.speculative_ledger.has_data_entry(address, key) + } + /// gets the bytecode of an address if it exists pub fn get_parallel_balance(&self, address: &Address) -> Option { self.speculative_ledger.get_parallel_balance(address) } + /// checks if a datastore entry exists + pub fn set_data_entry( + &self, + address: &Address, + key: Hash, + data: Vec, + ) -> Result<(), ExecutionError> { + // check access right + if !self.has_write_rights_on(address) { + return Err(ExecutionError::RuntimeError(format!( + "writing in the datastore of address {} is not allowed in this context", + address + ))); + } + + // set data entry + self.speculative_ledger.set_data_entry(address, key, data) + } + /// Transfers parallel coins from one address to another. /// No changes are retained in case of failure. /// Spending is only allowed from existing addresses we have write acess on diff --git a/massa-execution/src/controller.rs b/massa-execution/src/controller.rs index 0ea13517cae..66c5c8dab2d 100644 --- a/massa-execution/src/controller.rs +++ b/massa-execution/src/controller.rs @@ -1,6 +1,6 @@ use crate::execution::ExecutionState; use crate::speculative_ledger::SpeculativeLedger; -use crate::types::{ExecutionContext, ExecutionOutput}; +use crate::types::ExecutionOutput; use crate::ExecutionError; use crate::{config::VMConfig, types::ReadOnlyExecutionRequest, vm_thread::VMThread}; use massa_ledger::FinalLedger; diff --git a/massa-execution/src/execution.rs b/massa-execution/src/execution.rs index d6286d63974..d56497850ea 100644 --- a/massa-execution/src/execution.rs +++ b/massa-execution/src/execution.rs @@ -1,14 +1,13 @@ use crate::config::VMConfig; +use crate::context::ExecutionContext; use crate::interface_impl::InterfaceImpl; -use crate::types::{ExecutionContext, ExecutionOutput, ReadOnlyExecutionRequest}; +use crate::types::{ExecutionOutput, ReadOnlyExecutionRequest}; use crate::ExecutionError; use crate::{event_store::EventStore, speculative_ledger::SpeculativeLedger}; use massa_ledger::{Applicable, FinalLedger, LedgerChanges}; use massa_models::BlockId; use massa_models::{Block, Slot}; use massa_sc_runtime::Interface; -use rand::SeedableRng; -use rand_xoshiro::Xoshiro256PlusPlus; use std::{ collections::{HashMap, VecDeque}, sync::{Arc, Mutex, RwLock}, diff --git a/massa-execution/src/interface_impl.rs b/massa-execution/src/interface_impl.rs index e71eabc59a2..ed3c0ce07de 100644 --- a/massa-execution/src/interface_impl.rs +++ b/massa-execution/src/interface_impl.rs @@ -9,7 +9,6 @@ use massa_models::{ AMOUNT_ZERO, }; use massa_sc_runtime::{Interface, InterfaceClone}; -use massa_time::MassaTime; use rand::Rng; use std::str::FromStr; use std::sync::{Arc, Mutex}; @@ -152,7 +151,7 @@ impl Interface for InterfaceImpl { let addr = massa_models::Address::from_str(address)?; let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); let mut context = context_guard!(self); - context.set_data_entry(addr, key, value.to_vec())?; + context.set_data_entry(&addr, key, value.to_vec())?; Ok(()) } @@ -168,7 +167,7 @@ impl Interface for InterfaceImpl { let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); let addr = context.get_current_address()?; match context.get_data_entry(&addr, &key) { - Some(bytecode) => Ok(bytecode), + Some(data) => Ok(data), _ => bail!("data entry not found"), } } @@ -177,7 +176,7 @@ impl Interface for InterfaceImpl { let mut context = context_guard!(self); let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); let addr = context.get_current_address()?; - context.set_data_entry(addr, key, value.to_vec())?; + context.set_data_entry(&addr, key, value.to_vec())?; Ok(()) } diff --git a/massa-execution/src/speculative_ledger.rs b/massa-execution/src/speculative_ledger.rs index 6f534c6a665..96170a063cc 100644 --- a/massa-execution/src/speculative_ledger.rs +++ b/massa-execution/src/speculative_ledger.rs @@ -1,13 +1,8 @@ -use std::sync::{Arc, RwLock}; - -use anyhow::bail; +use crate::ExecutionError; use massa_hash::hash::Hash; -use massa_ledger::{ - Applicable, FinalLedger, LedgerChanges, LedgerEntryUpdate, SetOrKeep, SetUpdateOrDelete, -}; +use massa_ledger::{Applicable, FinalLedger, LedgerChanges}; use massa_models::{Address, Amount, AMOUNT_ZERO}; - -use crate::ExecutionError; +use std::sync::{Arc, RwLock}; /// represents a speculative ledger state combining /// data from the final ledger, previous speculative changes, @@ -193,7 +188,23 @@ impl SpeculativeLedger { /// sets an entry for an address /// fails if the address doesn't exist - pub fn set_data_entry(&self, addr: Address, bytecode: Vec) -> Result<(), ExecutionError> { - self.added_changes.set_bytecode(addr, bytecode) + pub fn set_data_entry( + &self, + addr: &Address, + key: Hash, + data: Vec, + ) -> Result<(), ExecutionError> { + // check for address existence + if !self.entry_exists(&addr) { + return Err(ExecutionError::RuntimeError(format!( + "could not set data for address {}: entry does not exist", + addr + ))); + } + + // set data + self.added_changes.set_data_entry(*addr, key, data); + + Ok(()) } } diff --git a/massa-ledger/src/ledger.rs b/massa-ledger/src/ledger.rs index f14943c5a1b..bd0e17eb321 100644 --- a/massa-ledger/src/ledger.rs +++ b/massa-ledger/src/ledger.rs @@ -374,6 +374,42 @@ impl LedgerChanges { None => f(), } } + + /// set a datastore entry for an address + pub fn set_data_entry(&mut self, addr: Address, key: Hash, data: Vec) { + match self.0.entry(addr) { + hash_map::Entry::Occupied(mut occ) => { + match occ.get_mut() { + SetUpdateOrDelete::Set(v) => { + // we currently set the absolute value of the entry + // so we need to update the data of that value + v.datastore.insert(key, data); + } + SetUpdateOrDelete::Update(u) => { + // we currently update the value of the entry + // so we need to set the data for that update + u.datastore.insert(key, SetOrDelete::Set(data)); + } + d @ SetUpdateOrDelete::Delete => { + // we currently delete the entry + // so we need to create a default one with the target data + *d = SetUpdateOrDelete::Set(LedgerEntry { + datastore: vec![(key, data)].into_iter().collect(), + ..Default::default() + }); + } + } + } + hash_map::Entry::Vacant(vac) => { + // we currently aren't changing anything on that entry + // so we need to create an update with the target data + vac.insert(SetUpdateOrDelete::Update(LedgerEntryUpdate { + datastore: vec![(key, SetOrDelete::Set(data))].into_iter().collect(), + ..Default::default() + })); + } + } + } } /// represents a final ledger From a36a4364048c97a45f1a8d41c5888e9bb9847ed4 Mon Sep 17 00:00:00 2001 From: damip Date: Thu, 17 Feb 2022 12:06:02 +0100 Subject: [PATCH 11/73] improve code organization and execution primitives --- massa-execution/src/context.rs | 125 ++++++++++++++- massa-execution/src/controller.rs | 4 +- massa-execution/src/execution.rs | 183 ++++++++++++++++++---- massa-execution/src/interface_impl.rs | 12 +- massa-execution/src/lib.rs | 1 + massa-execution/src/speculative_ledger.rs | 5 + massa-execution/src/types.rs | 8 +- 7 files changed, 288 insertions(+), 50 deletions(-) diff --git a/massa-execution/src/context.rs b/massa-execution/src/context.rs index 639bdc0fb9e..3e9e5c682ad 100644 --- a/massa-execution/src/context.rs +++ b/massa-execution/src/context.rs @@ -1,15 +1,33 @@ -use crate::sce_ledger::SCELedgerChanges; use crate::speculative_ledger::SpeculativeLedger; -use crate::types::ExecutionStackElement; +use crate::types::{ExecutionOutput, ExecutionStackElement, ReadOnlyExecutionRequest}; use crate::{event_store::EventStore, ExecutionError}; use massa_hash::hash::Hash; use massa_ledger::{FinalLedger, LedgerChanges}; use massa_models::{Address, Amount, BlockId, OperationId, Slot}; use rand::SeedableRng; use rand_xoshiro::Xoshiro256PlusPlus; -use std::collections::VecDeque; use std::sync::{Arc, RwLock}; +pub(crate) struct ExecutionContextSnapshot { + // added speculative ledger changes + pub ledger_changes: LedgerChanges, + + /// counter of newly created addresses so far during this execution + pub created_addr_index: u64, + + /// counter of newly created events so far during this execution + pub created_event_index: u64, + + /// address call stack, most recent is at the back + pub stack: Vec, + + /// generated events during this execution, with multiple indexes + pub events: EventStore, + + /// Unsafe RNG state + pub unsafe_rng: Xoshiro256PlusPlus, +} + pub(crate) struct ExecutionContext { // speculative ledger speculative_ledger: SpeculativeLedger, @@ -41,7 +59,7 @@ pub(crate) struct ExecutionContext { /// True if it's a read-only context pub read_only: bool, - /// geerated events during this execution, with multiple indexes + /// generated events during this execution, with multiple indexes pub events: EventStore, /// Unsafe RNG state @@ -52,9 +70,12 @@ pub(crate) struct ExecutionContext { } impl ExecutionContext { - pub(crate) fn new(final_ledger: Arc>) -> Self { + pub(crate) fn new( + final_ledger: Arc>, + previous_changes: LedgerChanges, + ) -> Self { ExecutionContext { - speculative_ledger: SpeculativeLedger::new(final_ledger, Default::default()), + speculative_ledger: SpeculativeLedger::new(final_ledger, previous_changes), max_gas: Default::default(), gas_price: Default::default(), slot: Slot::new(0, 0), @@ -70,6 +91,91 @@ impl ExecutionContext { } } + /// returns an copied execution state snapshot + pub(crate) fn get_snapshot(&self) -> ExecutionContextSnapshot { + ExecutionContextSnapshot { + ledger_changes: self.speculative_ledger.get_snapshot(), + created_addr_index: self.created_addr_index, + created_event_index: self.created_event_index, + stack: self.stack.clone(), + events: self.events.clone(), + unsafe_rng: self.unsafe_rng.clone(), + } + } + + /// resets context to a snapshot + pub fn reset_to_snapshot(&mut self, snapshot: ExecutionContextSnapshot) { + self.speculative_ledger + .reset_to_snapshot(snapshot.ledger_changes); + self.created_addr_index = snapshot.created_addr_index; + self.created_event_index = snapshot.created_event_index; + self.stack = snapshot.stack; + self.events = snapshot.events; + self.unsafe_rng = snapshot.unsafe_rng; + } + + /// create the execution context at the beginning of a readonly execution + pub(crate) fn new_readonly( + slot: Slot, + req: ReadOnlyExecutionRequest, + previous_changes: LedgerChanges, + final_ledger: Arc>, + ) -> Self { + // Seed the RNG + let mut seed: Vec = slot.to_bytes_key().to_vec(); + seed.push(0u8); // read-only + let seed = massa_hash::hash::Hash::compute_from(&seed).to_bytes(); + let unsafe_rng = Xoshiro256PlusPlus::from_seed(seed); + + // return readonly context + ExecutionContext { + max_gas: req.max_gas, + gas_price: req.simulated_gas_price, + slot, + stack: req.call_stack, + read_only: true, + unsafe_rng, + ..ExecutionContext::new(final_ledger, previous_changes) + } + } + + /// create the execution context at the beginning of an active execution slot + pub(crate) fn new_active_slot( + slot: Slot, + opt_block_id: Option, + opt_block_creator_addr: Option
, + previous_changes: LedgerChanges, + final_ledger: Arc>, + ) -> Self { + // seed the RNG + let mut seed: Vec = slot.to_bytes_key().to_vec(); + seed.push(1u8); // not read-only + if let Some(block_id) = &opt_block_id { + seed.extend(block_id.to_bytes()); // append block ID + } + let seed = massa_hash::hash::Hash::compute_from(&seed).to_bytes(); + let unsafe_rng = Xoshiro256PlusPlus::from_seed(seed); + + // return active slot execution context + ExecutionContext { + slot, + opt_block_id, + opt_block_creator_addr, + unsafe_rng, + ..ExecutionContext::new(final_ledger, previous_changes) + } + } + + /// moves out the output of the execution, resetting some fields + pub fn take_execution_output(&mut self) -> ExecutionOutput { + ExecutionOutput { + slot: self.slot, + block_id: std::mem::take(&mut self.opt_block_id), + ledger_changes: self.speculative_ledger.take(), + events: std::mem::take(&mut self.events), + } + } + /// gets the address at the top of the stack pub fn get_current_address(&self) -> Result { match self.stack.last() { @@ -184,9 +290,10 @@ impl ExecutionContext { address: &Address, key: Hash, data: Vec, + check_rights: bool, ) -> Result<(), ExecutionError> { // check access right - if !self.has_write_rights_on(address) { + if check_rights && !self.has_write_rights_on(address) { return Err(ExecutionError::RuntimeError(format!( "writing in the datastore of address {} is not allowed in this context", address @@ -205,15 +312,17 @@ impl ExecutionContext { /// * from_addr: optional spending address (use None for pure coin creation) /// * to_addr: optional crediting address (use None for pure coin destruction) /// * amount: amount of coins to transfer + /// * check_rights: if true, access rights are checked pub fn transfer_parallel_coins( &mut self, from_addr: Option
, to_addr: Option
, amount: Amount, + check_rights: bool, ) -> Result<(), ExecutionError> { // check access rights if let Some(from_addr) = &from_addr { - if !self.has_write_rights_on(from_addr) { + if check_rights && !self.has_write_rights_on(from_addr) { return Err(ExecutionError::RuntimeError(format!( "spending from address {} is not allowed in this context", from_addr diff --git a/massa-execution/src/controller.rs b/massa-execution/src/controller.rs index 66c5c8dab2d..48413609045 100644 --- a/massa-execution/src/controller.rs +++ b/massa-execution/src/controller.rs @@ -1,9 +1,7 @@ use crate::execution::ExecutionState; -use crate::speculative_ledger::SpeculativeLedger; use crate::types::ExecutionOutput; +use crate::types::ReadOnlyExecutionRequest; use crate::ExecutionError; -use crate::{config::VMConfig, types::ReadOnlyExecutionRequest, vm_thread::VMThread}; -use massa_ledger::FinalLedger; use massa_models::{Block, BlockId, Slot}; use std::collections::{HashMap, VecDeque}; use std::sync::{mpsc, Arc, Condvar, Mutex, RwLock}; diff --git a/massa-execution/src/execution.rs b/massa-execution/src/execution.rs index d56497850ea..35ae48cb611 100644 --- a/massa-execution/src/execution.rs +++ b/massa-execution/src/execution.rs @@ -1,17 +1,27 @@ use crate::config::VMConfig; use crate::context::ExecutionContext; +use crate::event_store::EventStore; use crate::interface_impl::InterfaceImpl; -use crate::types::{ExecutionOutput, ReadOnlyExecutionRequest}; +use crate::types::{ExecutionOutput, ExecutionStackElement, ReadOnlyExecutionRequest}; use crate::ExecutionError; -use crate::{event_store::EventStore, speculative_ledger::SpeculativeLedger}; use massa_ledger::{Applicable, FinalLedger, LedgerChanges}; -use massa_models::BlockId; +use massa_models::{Address, BlockId, Operation, OperationType}; use massa_models::{Block, Slot}; use massa_sc_runtime::Interface; use std::{ collections::{HashMap, VecDeque}, sync::{Arc, Mutex, RwLock}, }; +use tracing::debug; + +macro_rules! context_guard { + ($self:ident) => { + $self + .execution_context + .lock() + .expect("failed to acquire lock on execution context") + }; +} pub struct ExecutionState { // VM config @@ -42,7 +52,10 @@ impl ExecutionState { .slot; // init execution context - let execution_context = Arc::new(Mutex::new(ExecutionContext::new(final_ledger.clone()))); + let execution_context = Arc::new(Mutex::new(ExecutionContext::new( + final_ledger.clone(), + Default::default(), + ))); // Instantiate the interface used by the assembly simulator. let execution_interface = Box::new(InterfaceImpl::new( @@ -153,54 +166,166 @@ impl ExecutionState { accumulated_changes } + pub fn execute_operation( + &mut self, + operation: &Operation, + block_creator_addr: Address, + ) -> Result<(), ExecutionError> { + // process ExecuteSC operations only + let (bytecode, max_gas, coins, gas_price) = match &operation.content.op { + op @ OperationType::ExecuteSC { + data, + max_gas, + coins, + gas_price, + } => (data, max_gas, coins, gas_price), + _ => return Ok(()), + }; + + // get sender address + let sender_addr = Address::from_public_key(&operation.content.sender_public_key); + + // get operation ID + // TODO have operation_id contained in the Operation object in the future to avoid recomputation + let operation_id = operation + .get_operation_id() + .expect("could not compute operation ID"); + + // prepare the context + let context_snapshot; + { + let context = context_guard!(self); + + // credit the producer of the block B with max_gas * gas_price parallel coins + // note that errors are deterministic and do not cancel op execution + let gas_fees = gas_price.saturating_mul_u64(*max_gas); + if let Err(err) = + context.transfer_parallel_coins(None, Some(block_creator_addr), gas_fees, false) + { + debug!( + "failed to credit block producer {} with {} gas fee coins: {}", + block_creator_addr, gas_fees, err + ); + } + + // credit Op's sender with `coins` parallel coins + // note that errors are deterministic and do not cancel op execution + if let Err(err) = + context.transfer_parallel_coins(None, Some(sender_addr), *coins, false) + { + debug!( + "failed to credit operation sender {} with {} operation coins: {}", + sender_addr, *coins, err + ); + } + + // save a snapshot of the context state to restore it if the op fails to execute + context_snapshot = context.get_snapshot(); + + // prepare context for op execution + context.gas_price = *gas_price; + context.max_gas = *max_gas; + context.stack = vec![ExecutionStackElement { + address: sender_addr, + coins: *coins, + owned_addresses: vec![sender_addr], + }]; + context.origin_operation_id = Some(operation_id); + }; + + // run in the intepreter + let run_result = massa_sc_runtime::run(bytecode, *max_gas, &*self.execution_interface); + if let Err(err) = run_result { + // there was an error during bytecode execution: cancel the effects of the execution + let mut context = context_guard!(self); + context.origin_operation_id = None; + context.reset_to_snapshot(context_snapshot); + return Err(ExecutionError::RuntimeError(format!( + "bytecode execution error: {}", + err + ))); + } + + Ok(()) + } + /// executes a full slot without causing any changes to the state, /// and yields an execution output pub fn execute_slot(&self, slot: Slot, opt_block: Option<(BlockId, Block)>) -> ExecutionOutput { - // get the speculative ledger + // get optional block ID and creator address + let (opt_block_id, opt_block_creator_addr) = opt_block + .as_ref() + .map(|(b_id, b)| (*b_id, Address::from_public_key(&b.header.content.creator))) + .unzip(); + + // accumulate previous active changes from history let previous_ledger_changes = self.get_accumulated_active_changes_at_slot(slot); - let ledger = SpeculativeLedger::new(self.final_ledger.clone(), previous_ledger_changes); - // TODO init context + // prepare execution context for the whole active slot + let execution_context = ExecutionContext::new_active_slot( + slot, + opt_block_id, + opt_block_creator_addr, + previous_ledger_changes, + self.final_ledger.clone(), + ); - // TODO async executions + // note that here, some pre-operations (like crediting block producers) can be performed before the lock - let mut out_block_id = None; - if let Some((block_id, block)) = opt_block { - out_block_id = Some(block_id); + // set the execution context for slot execution + *context_guard!(self) = execution_context; - //TODO execute block elements - } + // note that here, async operations should be executed - ExecutionOutput { - slot, - block_id: out_block_id, - ledger_changes: ledger.into_added_changes(), + // check if there is a block at this slot + if let (Some((block_id, block)), Some(block_creator_addr)) = + (opt_block, opt_block_creator_addr) + { + // execute operations + for (op_idx, operation) in block.operations.iter().enumerate() { + if let Err(err) = self.execute_operation(operation, block_creator_addr) { + debug!( + "failed executing operation index {} in block {}: {}", + op_idx, block_id, err + ); + } + } } + + // return the execution output + context_guard!(self).take_execution_output() } - /// executed a readonly request + /// execute a readonly request pub(crate) fn execute_readonly_request( &self, req: ReadOnlyExecutionRequest, ) -> Result { - // execute at the slot just after the latest executed active slot + // set the exec slot just after the latest executed active slot let slot = self .active_cursor .get_next_slot(self.config.thread_count) .expect("slot overflow in readonly execution"); - // get the speculative ledger + // get previous changes let previous_ledger_changes = self.get_accumulated_active_changes_at_slot(slot); - let ledger = SpeculativeLedger::new(self.final_ledger.clone(), previous_ledger_changes); - - // TODO execute ReadOnlyExecutionRequest at slot with context req - //TODO send execution result back through req.result_sender - Ok(ExecutionOutput { + // create readonly execution context + let execution_context = ExecutionContext::new_readonly( slot, - block_id: None, - events: TODO, - ledger_changes: TODO, - }) + req, + previous_ledger_changes, + self.final_ledger.clone(), + ); + + // set the execution context for execution + *context_guard!(self) = execution_context; + + // run the intepreter + massa_sc_runtime::run(&req.bytecode, req.max_gas, &*self.execution_interface) + .map_err(|err| ExecutionError::RuntimeError(err.to_string()))?; + + // return the execution output + Ok(context_guard!(self).take_execution_output()) } } diff --git a/massa-execution/src/interface_impl.rs b/massa-execution/src/interface_impl.rs index ed3c0ce07de..9b19084495c 100644 --- a/massa-execution/src/interface_impl.rs +++ b/massa-execution/src/interface_impl.rs @@ -19,7 +19,7 @@ macro_rules! context_guard { $self .context .lock() - .expect("Failed to acquire lock on context.") + .expect("failed to acquire lock on execution context") }; } @@ -69,7 +69,7 @@ impl Interface for InterfaceImpl { // transfer coins let coins = massa_models::Amount::from_raw(raw_coins); if let Err(err) = - context.transfer_parallel_coins(Some(from_address), Some(to_address), coins) + context.transfer_parallel_coins(Some(from_address), Some(to_address), coins, true) { bail!( "error transferring {} parallel coins from {} to {}: {}", @@ -151,7 +151,7 @@ impl Interface for InterfaceImpl { let addr = massa_models::Address::from_str(address)?; let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); let mut context = context_guard!(self); - context.set_data_entry(&addr, key, value.to_vec())?; + context.set_data_entry(&addr, key, value.to_vec(), true)?; Ok(()) } @@ -176,7 +176,7 @@ impl Interface for InterfaceImpl { let mut context = context_guard!(self); let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); let addr = context.get_current_address()?; - context.set_data_entry(&addr, key, value.to_vec())?; + context.set_data_entry(&addr, key, value.to_vec(), true)?; Ok(()) } @@ -221,7 +221,7 @@ impl Interface for InterfaceImpl { let amount = massa_models::Amount::from_raw(raw_amount); let mut context = context_guard!(self); let from_address = context.get_current_address()?; - context.transfer_parallel_coins(Some(from_address), Some(to_address), amount)?; + context.transfer_parallel_coins(Some(from_address), Some(to_address), amount, true)?; Ok(()) } @@ -239,7 +239,7 @@ impl Interface for InterfaceImpl { let to_address = massa_models::Address::from_str(to_address)?; let amount = massa_models::Amount::from_raw(raw_amount); let mut context = context_guard!(self); - context.transfer_parallel_coins(Some(from_address), Some(to_address), amount)?; + context.transfer_parallel_coins(Some(from_address), Some(to_address), amount, true)?; Ok(()) } diff --git a/massa-execution/src/lib.rs b/massa-execution/src/lib.rs index 33090f0e2b9..5fafa36c2d6 100644 --- a/massa-execution/src/lib.rs +++ b/massa-execution/src/lib.rs @@ -1,4 +1,5 @@ #![feature(map_first_last)] +#![feature(unzip_option)] mod config; mod context; diff --git a/massa-execution/src/speculative_ledger.rs b/massa-execution/src/speculative_ledger.rs index 96170a063cc..82ddb00c023 100644 --- a/massa-execution/src/speculative_ledger.rs +++ b/massa-execution/src/speculative_ledger.rs @@ -28,6 +28,11 @@ impl SpeculativeLedger { } } + /// takes the added changes (move) and resets added changes + pub fn take(&mut self) -> LedgerChanges { + std::mem::take(&mut self.added_changes) + } + /// takes a snapshot (clone) of the added changes pub fn get_snapshot(&self) -> LedgerChanges { self.added_changes.clone() diff --git a/massa-execution/src/types.rs b/massa-execution/src/types.rs index 38d08fafb19..6f3fafdff35 100644 --- a/massa-execution/src/types.rs +++ b/massa-execution/src/types.rs @@ -49,11 +49,11 @@ pub struct ExecutionOutput { /// structure describing a read-only execution request pub struct ReadOnlyExecutionRequest { /// Maximum gas to spend in the execution. - max_gas: u64, + pub max_gas: u64, /// The simulated price of gas for the read-only execution. - simulated_gas_price: Amount, + pub simulated_gas_price: Amount, /// The code to execute. - bytecode: Vec, + pub bytecode: Vec, /// Call stack to simulate - call_stack: Vec, + pub call_stack: Vec, } From fd660402588a35d6f3ee415d790a3baac93ac32d Mon Sep 17 00:00:00 2001 From: damip Date: Thu, 17 Feb 2022 15:55:16 +0100 Subject: [PATCH 12/73] SCE ledger access --- massa-execution/src/controller.rs | 26 +++++++++--- massa-execution/src/execution.rs | 50 ++++++++++++++++++++++- massa-execution/src/lib.rs | 4 -- massa-execution/src/speculative_ledger.rs | 3 ++ massa-execution/src/vm_thread.rs | 34 ++------------- massa-ledger/src/ledger.rs | 15 ++++++- 6 files changed, 89 insertions(+), 43 deletions(-) diff --git a/massa-execution/src/controller.rs b/massa-execution/src/controller.rs index 48413609045..2efc8aacf9f 100644 --- a/massa-execution/src/controller.rs +++ b/massa-execution/src/controller.rs @@ -2,6 +2,8 @@ use crate::execution::ExecutionState; use crate::types::ExecutionOutput; use crate::types::ReadOnlyExecutionRequest; use crate::ExecutionError; +use massa_ledger::LedgerEntry; +use massa_models::Address; use massa_models::{Block, BlockId, Slot}; use std::collections::{HashMap, VecDeque}; use std::sync::{mpsc, Arc, Condvar, Mutex, RwLock}; @@ -9,7 +11,7 @@ use tracing::info; /// structure used to communicate with the VM thread #[derive(Default)] -pub struct VMInputData { +pub(crate) struct VMInputData { /// set stop to true to stop the thread pub stop: bool, /// signal whether the blockclique changed @@ -28,11 +30,11 @@ pub struct VMInputData { /// VM controller pub struct VMController { /// condition variable to wake up the VM loop - pub loop_cv: Condvar, + pub(crate) loop_cv: Condvar, /// input data to process in the VM loop - pub input_data: Mutex, + pub(crate) input_data: Mutex, /// execution state - pub execution_state: Arc>, + pub(crate) execution_state: Arc>, } impl VMController { @@ -42,6 +44,20 @@ impl VMController { std::mem::take(&mut self.input_data.lock().expect("VM input data lock failed")) } + /// gets a copy of a full ledger entry + /// + /// # return value + /// * (final_entry, active_entry) + pub fn get_full_ledger_entry( + &self, + addr: &Address, + ) -> (Option, Option) { + self.execution_state + .read() + .expect("could not lock execution state for reading") + .get_full_ledger_entry(addr) + } + /// Executes a readonly request pub fn execute_readonly_request( &mut self, @@ -96,7 +112,7 @@ impl VMManager { .controller .input_data .lock() - .expect("could not w-lock VM input data"); + .expect("could not lock VM input data"); input_wlock.stop = true; self.controller.loop_cv.notify_one(); } diff --git a/massa-execution/src/execution.rs b/massa-execution/src/execution.rs index 35ae48cb611..50b1a4f794b 100644 --- a/massa-execution/src/execution.rs +++ b/massa-execution/src/execution.rs @@ -4,7 +4,7 @@ use crate::event_store::EventStore; use crate::interface_impl::InterfaceImpl; use crate::types::{ExecutionOutput, ExecutionStackElement, ReadOnlyExecutionRequest}; use crate::ExecutionError; -use massa_ledger::{Applicable, FinalLedger, LedgerChanges}; +use massa_ledger::{Applicable, FinalLedger, LedgerChanges, LedgerEntry, SetUpdateOrDelete}; use massa_models::{Address, BlockId, Operation, OperationType}; use massa_models::{Block, Slot}; use massa_sc_runtime::Interface; @@ -23,6 +23,7 @@ macro_rules! context_guard { }; } +/// structure holding consistent speculative and final execution states pub struct ExecutionState { // VM config pub config: VMConfig, @@ -141,6 +142,8 @@ impl ExecutionState { /// returns the speculative ledger at the entrance of a given history slot /// warning: only use in the main loop because the lock on the final ledger /// at the base of the returned SpeculativeLedger is not held + /// TODO: do not do this anymore but allow the speculative ledger to lazily query any subentry + /// by scanning through history from end to beginning pub fn get_accumulated_active_changes_at_slot(&self, slot: Slot) -> LedgerChanges { // check that the slot is within the reach of history if slot <= self.final_cursor { @@ -166,6 +169,7 @@ impl ExecutionState { accumulated_changes } + /// execute an operation in the context of a block pub fn execute_operation( &mut self, operation: &Operation, @@ -233,7 +237,7 @@ impl ExecutionState { context.origin_operation_id = Some(operation_id); }; - // run in the intepreter + // run the intepreter let run_result = massa_sc_runtime::run(bytecode, *max_gas, &*self.execution_interface); if let Err(err) = run_result { // there was an error during bytecode execution: cancel the effects of the execution @@ -328,4 +332,46 @@ impl ExecutionState { // return the execution output Ok(context_guard!(self).take_execution_output()) } + + /// gets a full ledger entry both at final and active states + /// TODO: this can be heavily optimized, see comments + /// + /// # returns + /// (final_entry, active_entry) + pub fn get_full_ledger_entry( + &self, + addr: &Address, + ) -> (Option, Option) { + // get the full entry from the final ledger + let final_entry = self + .final_ledger + .read() + .expect("could not r-lock final ledger") + .get_full_entry(addr); + + // get cumulative active changes and apply them + // TODO there is a lot of overhead here: we only need to compute the changes for one entry and no need to clone it + // also we should proceed backwards through history for performance + let active_change = self + .get_accumulated_active_changes_at_slot(self.active_cursor) + .get(addr) + .cloned(); + let active_entry = match (&final_entry, active_change) { + (final_v, None) => final_v.clone(), + (_, Some(SetUpdateOrDelete::Set(v))) => Some(v.clone()), + (_, Some(SetUpdateOrDelete::Delete)) => None, + (None, Some(SetUpdateOrDelete::Update(u))) => { + let mut v = LedgerEntry::default(); + v.apply(u); + Some(v) + } + (Some(final_v), Some(SetUpdateOrDelete::Update(u))) => { + let mut v = final_v.clone(); + v.apply(u); + Some(v) + } + }; + + (final_entry, active_entry) + } } diff --git a/massa-execution/src/lib.rs b/massa-execution/src/lib.rs index 5fafa36c2d6..64c1afe95d9 100644 --- a/massa-execution/src/lib.rs +++ b/massa-execution/src/lib.rs @@ -16,10 +16,6 @@ mod vm; mod vm_thread; mod worker; -pub use config::{ExecutionConfigs, ExecutionSettings}; -pub use controller::{ - start_controller, ExecutionCommandSender, ExecutionEventReceiver, ExecutionManager, -}; pub use error::ExecutionError; pub use exports::BootstrapExecutionState; pub use sce_ledger::{SCELedger, SCELedgerEntry}; diff --git a/massa-execution/src/speculative_ledger.rs b/massa-execution/src/speculative_ledger.rs index 82ddb00c023..d54c188072a 100644 --- a/massa-execution/src/speculative_ledger.rs +++ b/massa-execution/src/speculative_ledger.rs @@ -12,6 +12,9 @@ pub struct SpeculativeLedger { final_ledger: Arc>, /// accumulation of previous changes + /// TODO maybe have the history directly here, + /// so that we can avoid accumulating all the changes at every slot + /// but only lazily query addresses backwards in history (to avoid useless computations) with caching previous_changes: LedgerChanges, /// list of added changes diff --git a/massa-execution/src/vm_thread.rs b/massa-execution/src/vm_thread.rs index da1dace215d..fe714840f4e 100644 --- a/massa-execution/src/vm_thread.rs +++ b/massa-execution/src/vm_thread.rs @@ -226,28 +226,9 @@ impl VMThread { // clear the speculative execution output cache completely exec_state.clear_history(); - // downgrade execution state lock to read-only - // to allow for outside r-locks while the slot is being executed, which takes CPU time - // note that this downgrade does not happen atomically - // but the main loop is the only one writng in the execution state - // so there won't be writes in-between the release of write() and the acquiring of read() - let exec_state = self - .execution_state - .read() - .expect("could not lock execution state for reading"); - // execute slot let exec_out = exec_state.execute_slot(slot, exec_target); - // upgrade execution state lock to write - // note that this upgrade does not happen atomically - // but the main loop is the only one writng in the execution state - // so there won't be writes in-between the release of read() and the acquiring of write() - let exec_state = self - .execution_state - .write() - .expect("could not lock execution state for writing"); - // apply execution output to final state exec_state.apply_final_execution_output(exec_out); @@ -257,11 +238,11 @@ impl VMThread { /// executes one active slot, if any /// returns true if something was executed fn execute_one_active_slot(&mut self) -> bool { - // read-lock the execution state + // write-lock the execution state let exec_state = self .execution_state - .read() - .expect("could not lock execution state for reading"); + .write() + .expect("could not lock execution state for writing"); // get the next active slot let slot = exec_state @@ -278,15 +259,6 @@ impl VMThread { // execute the slot let exec_out = exec_state.execute_slot(slot, exec_target); - // upgrade execution state lock to write - // note that this upgrade does not happen atomically - // but the main loop is the only one writng in the execution state - // so there won't be writes in-between the release of read() and the acquiring of write() - let exec_state = self - .execution_state - .write() - .expect("could not lock execution state for writing"); - // apply execution output to active state exec_state.apply_active_execution_output(exec_out); diff --git a/massa-ledger/src/ledger.rs b/massa-ledger/src/ledger.rs index bd0e17eb321..4cff45fea94 100644 --- a/massa-ledger/src/ledger.rs +++ b/massa-ledger/src/ledger.rs @@ -2,7 +2,7 @@ use crate::LedgerConfig; use massa_hash::hash::Hash; -use massa_models::{prehash::Map, Address, Amount, Slot}; +use massa_models::{ledger_models::LedgerChange, prehash::Map, Address, Amount, Slot}; use std::collections::{hash_map, BTreeMap, VecDeque}; /// represents a structure that supports another one being applied to it @@ -175,6 +175,14 @@ impl Applicable for LedgerChanges { } impl LedgerChanges { + /// get an item + pub fn get( + &self, + addr: &Address, + ) -> Option<&SetUpdateOrDelete> { + self.0.get(addr) + } + /// tries to return the parallel balance or gets it from a function /// /// # Returns @@ -454,6 +462,11 @@ impl Applicable for FinalLedger { } impl FinalLedger { + /// gets a full cloned entry + pub fn get_full_entry(&self, addr: &Address) -> Option { + self.sorted_ledger.get(addr).cloned() + } + /// settles a slot and saves the corresponding ledger changes to history pub fn settle_slot(&mut self, slot: Slot, changes: LedgerChanges) { // apply changes From 585e50599999a1061886d3c627093f74e52eadd8 Mon Sep 17 00:00:00 2001 From: damip Date: Fri, 18 Feb 2022 08:54:54 +0100 Subject: [PATCH 13/73] add event extraction --- massa-execution/src/config.rs | 2 ++ massa-execution/src/controller.rs | 34 +++++++++++++++++++-- massa-execution/src/execution.rs | 49 ++++++++++++++++++++++++++++++- massa-execution/src/vm_thread.rs | 1 + massa-models/src/slot.rs | 16 ++++++++++ 5 files changed, 99 insertions(+), 3 deletions(-) diff --git a/massa-execution/src/config.rs b/massa-execution/src/config.rs index 628c18e660c..60122eb856e 100644 --- a/massa-execution/src/config.rs +++ b/massa-execution/src/config.rs @@ -4,6 +4,8 @@ use std::path::PathBuf; /// VM module configuration #[derive(Debug, Clone)] pub struct VMConfig { + /// read-only execution request queue length + pub readonly_queue_length: usize, /// Initial SCE ledger file pub initial_sce_ledger_path: PathBuf, /// maximum number of SC output events kept in cache diff --git a/massa-execution/src/controller.rs b/massa-execution/src/controller.rs index 2efc8aacf9f..4646383d369 100644 --- a/massa-execution/src/controller.rs +++ b/massa-execution/src/controller.rs @@ -1,9 +1,12 @@ +use crate::config::VMConfig; use crate::execution::ExecutionState; use crate::types::ExecutionOutput; use crate::types::ReadOnlyExecutionRequest; use crate::ExecutionError; use massa_ledger::LedgerEntry; +use massa_models::output_event::SCOutputEvent; use massa_models::Address; +use massa_models::OperationId; use massa_models::{Block, BlockId, Slot}; use std::collections::{HashMap, VecDeque}; use std::sync::{mpsc, Arc, Condvar, Mutex, RwLock}; @@ -29,6 +32,8 @@ pub(crate) struct VMInputData { /// VM controller pub struct VMController { + /// VM config + pub(crate) config: VMConfig, /// condition variable to wake up the VM loop pub(crate) loop_cv: Condvar, /// input data to process in the VM loop @@ -44,6 +49,32 @@ impl VMController { std::mem::take(&mut self.input_data.lock().expect("VM input data lock failed")) } + /// Get events optionnally filtered by: + /// * start slot + /// * end slot + /// * emitter address + /// * original caller address + /// * operation id + pub fn get_filtered_sc_output_event( + &self, + start: Option, + end: Option, + emitter_address: Option
, + original_caller_address: Option
, + original_operation_id: Option, + ) -> Vec { + self.execution_state + .read() + .expect("could not lock execution state for reading") + .get_filtered_sc_output_event( + start, + end, + emitter_address, + original_caller_address, + original_operation_id, + ) + } + /// gets a copy of a full ledger entry /// /// # return value @@ -62,7 +93,6 @@ impl VMController { pub fn execute_readonly_request( &mut self, req: ReadOnlyExecutionRequest, - max_queue_length: usize, ) -> Result { // queue request let resp_rx = { @@ -70,7 +100,7 @@ impl VMController { .input_data .lock() .expect("could not lock VM input data"); - if input_data.readonly_requests.len() > max_queue_length { + if input_data.readonly_requests.len() >= self.config.readonly_queue_length { return Err(ExecutionError::RuntimeError( "too many queued readonly requests".into(), )); diff --git a/massa-execution/src/execution.rs b/massa-execution/src/execution.rs index 50b1a4f794b..b575912672f 100644 --- a/massa-execution/src/execution.rs +++ b/massa-execution/src/execution.rs @@ -5,7 +5,9 @@ use crate::interface_impl::InterfaceImpl; use crate::types::{ExecutionOutput, ExecutionStackElement, ReadOnlyExecutionRequest}; use crate::ExecutionError; use massa_ledger::{Applicable, FinalLedger, LedgerChanges, LedgerEntry, SetUpdateOrDelete}; -use massa_models::{Address, BlockId, Operation, OperationType}; +use massa_models::output_event::SCOutputEvent; +use massa_models::timeslots::get_current_latest_block_slot; +use massa_models::{Address, BlockId, Operation, OperationId, OperationType}; use massa_models::{Block, Slot}; use massa_sc_runtime::Interface; use std::{ @@ -374,4 +376,49 @@ impl ExecutionState { (final_entry, active_entry) } + + /// Get events optionnally filtered by: + /// * start slot + /// * end slot + /// * emitter address + /// * original caller address + /// * operation id + pub fn get_filtered_sc_output_event( + &self, + start: Option, + end: Option, + emitter_address: Option
, + original_caller_address: Option
, + original_operation_id: Option, + ) -> Vec { + // iter on step history chained with final events + let start = start.unwrap_or_else(|| Slot::min()); + let end = end.unwrap_or_else(|| Slot::max()); + self.final_events + .get_filtered_sc_output_event( + start, + end, + emitter_address, + original_caller_address, + original_operation_id, + ) + .into_iter() + .chain( + // TODO note that active history is made of consecutive slots, + // so this algo does not need to scan all history items as iteation bounds can be derived a priori + self.active_history + .iter() + .filter(|item| item.slot >= start && item.slot < end) + .flat_map(|item| { + item.events.get_filtered_sc_output_event( + start, + end, + emitter_address, + original_caller_address, + original_operation_id, + ) + }), + ) + .collect() + } } diff --git a/massa-execution/src/vm_thread.rs b/massa-execution/src/vm_thread.rs index fe714840f4e..c1f8d75dccd 100644 --- a/massa-execution/src/vm_thread.rs +++ b/massa-execution/src/vm_thread.rs @@ -421,6 +421,7 @@ pub fn start_vm(config: VMConfig, final_ledger: Arc>) -> VMM // create a controller let controller = Arc::new(VMController { + config: config.clone(), loop_cv: Condvar::new(), input_data: Mutex::new(VMInputData { blockclique_changed: true, diff --git a/massa-models/src/slot.rs b/massa-models/src/slot.rs index f9bc68fd68d..d0f9d1e09f7 100644 --- a/massa-models/src/slot.rs +++ b/massa-models/src/slot.rs @@ -42,6 +42,22 @@ impl Slot { Slot { period, thread } } + /// returns the minimal slot + pub const fn min() -> Slot { + Slot { + period: 0, + thread: 0, + } + } + + /// returns the maximal slot + pub const fn max() -> Slot { + Slot { + period: u64::MAX, + thread: u8::MAX, + } + } + pub fn get_first_bit(&self) -> bool { Hash::compute_from(&self.to_bytes_key()).to_bytes()[0] >> 7 == 1 } From 380fec4fb7629499373221392f14d3353107e1db Mon Sep 17 00:00:00 2001 From: damip Date: Fri, 18 Feb 2022 10:10:19 +0100 Subject: [PATCH 14/73] reorganize code, split crates --- Cargo.lock | 43 +- Cargo.toml | 3 +- massa-api/Cargo.toml | 2 +- massa-bootstrap/Cargo.toml | 2 +- massa-consensus-exports/Cargo.toml | 2 +- massa-consensus-worker/Cargo.toml | 2 +- .../Cargo.toml | 3 +- .../src/config.rs | 6 +- .../src/controller.rs | 4 +- .../src/error.rs | 0 massa-execution-exports/src/lib.rs | 10 + massa-execution-exports/src/types.rs | 29 + massa-execution-worker/Cargo.toml | 35 ++ .../read-only.md | 0 .../src/context.rs | 0 .../src/event_store.rs | 0 .../src/execution.rs | 1 - .../src/exports.rs | 0 .../src/interface_impl.rs | 0 .../src/lib.rs | 0 .../src/sce_ledger.rs | 0 .../src/spec.md | 0 .../src/speculative_ledger.rs | 0 .../src/tests/mod.rs | 0 .../src/tests/scenarios_mandatories.rs | 0 .../src/types.rs | 29 +- .../src/vm_thread.rs | 0 massa-execution/src/vm.rs | 508 ---------------- massa-execution/src/worker.rs | 557 ------------------ massa-graph/Cargo.toml | 2 +- massa-ledger/Cargo.toml | 1 - massa-ledger/src/config.rs | 7 +- massa-ledger/src/ledger.rs | 422 +------------ massa-ledger/src/ledger_changes.rs | 292 +++++++++ massa-ledger/src/ledger_entry.rs | 32 + massa-ledger/src/lib.rs | 17 +- massa-ledger/src/types.rs | 105 ++++ massa-node/Cargo.toml | 3 +- 38 files changed, 573 insertions(+), 1544 deletions(-) rename {massa-execution => massa-execution-exports}/Cargo.toml (93%) rename {massa-execution => massa-execution-exports}/src/config.rs (83%) rename {massa-execution => massa-execution-exports}/src/controller.rs (98%) rename {massa-execution => massa-execution-exports}/src/error.rs (100%) create mode 100644 massa-execution-exports/src/lib.rs create mode 100644 massa-execution-exports/src/types.rs create mode 100644 massa-execution-worker/Cargo.toml rename {massa-execution => massa-execution-worker}/read-only.md (100%) rename {massa-execution => massa-execution-worker}/src/context.rs (100%) rename {massa-execution => massa-execution-worker}/src/event_store.rs (100%) rename {massa-execution => massa-execution-worker}/src/execution.rs (99%) rename {massa-execution => massa-execution-worker}/src/exports.rs (100%) rename {massa-execution => massa-execution-worker}/src/interface_impl.rs (100%) rename {massa-execution => massa-execution-worker}/src/lib.rs (100%) rename {massa-execution => massa-execution-worker}/src/sce_ledger.rs (100%) rename {massa-execution => massa-execution-worker}/src/spec.md (100%) rename {massa-execution => massa-execution-worker}/src/speculative_ledger.rs (100%) rename {massa-execution => massa-execution-worker}/src/tests/mod.rs (100%) rename {massa-execution => massa-execution-worker}/src/tests/scenarios_mandatories.rs (100%) rename {massa-execution => massa-execution-worker}/src/types.rs (50%) rename {massa-execution => massa-execution-worker}/src/vm_thread.rs (100%) delete mode 100644 massa-execution/src/vm.rs delete mode 100644 massa-execution/src/worker.rs create mode 100644 massa-ledger/src/ledger_changes.rs create mode 100644 massa-ledger/src/ledger_entry.rs create mode 100644 massa-ledger/src/types.rs diff --git a/Cargo.lock b/Cargo.lock index c93456f08b5..880e5e84195 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1397,7 +1397,8 @@ dependencies = [ "massa_bootstrap", "massa_consensus_exports", "massa_consensus_worker", - "massa_execution", + "massa_execution_exports", + "massa_execution_worker", "massa_logging", "massa_models", "massa_network", @@ -1445,7 +1446,7 @@ dependencies = [ "jsonrpc-derive", "jsonrpc-http-server", "massa_consensus_exports", - "massa_execution", + "massa_execution_exports", "massa_graph", "massa_hash", "massa_models", @@ -1468,9 +1469,9 @@ dependencies = [ "futures 0.3.19", "lazy_static", "massa_consensus_exports", - "massa_execution", "massa_graph", "massa_hash", + "massa_ledger", "massa_logging", "massa_models", "massa_network", @@ -1496,7 +1497,7 @@ dependencies = [ "displaydoc", "futures 0.3.19", "lazy_static", - "massa_execution", + "massa_execution_exports", "massa_graph", "massa_hash", "massa_logging", @@ -1530,7 +1531,7 @@ dependencies = [ "futures 0.3.19", "lazy_static", "massa_consensus_exports", - "massa_execution", + "massa_execution_exports", "massa_graph", "massa_hash", "massa_logging", @@ -1556,7 +1557,7 @@ dependencies = [ ] [[package]] -name = "massa_execution" +name = "massa_execution_exports" version = "0.1.0" dependencies = [ "anyhow", @@ -1568,7 +1569,32 @@ dependencies = [ "massa_models", "massa_signature", "massa_time", - "parking_lot", + "pretty_assertions", + "rand 0.8.4", + "rand_xoshiro", + "serde 1.0.134", + "serde_json", + "serial_test", + "tempfile", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "massa_execution_worker" +version = "0.1.0" +dependencies = [ + "anyhow", + "displaydoc", + "lazy_static", + "massa-sc-runtime", + "massa_execution_exports", + "massa_hash", + "massa_ledger", + "massa_models", + "massa_signature", + "massa_time", "pretty_assertions", "rand 0.8.4", "rand_xoshiro", @@ -1588,7 +1614,7 @@ dependencies = [ "bitvec", "displaydoc", "lazy_static", - "massa_execution", + "massa_execution_exports", "massa_hash", "massa_logging", "massa_models", @@ -1636,7 +1662,6 @@ dependencies = [ "massa_hash", "massa_logging", "massa_models", - "massa_protocol_exports", "massa_signature", "massa_time", "num", diff --git a/Cargo.toml b/Cargo.toml index ac3c3341a02..820f6b19618 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,8 @@ members = [ "massa-client", "massa-consensus-exports", "massa-consensus-worker", - "massa-execution", + "massa-execution-exports", + "massa-execution-worker", "massa-graph", "massa-hash", "massa-logging", diff --git a/massa-api/Cargo.toml b/massa-api/Cargo.toml index 324721cdc04..5d3fd8d4480 100644 --- a/massa-api/Cargo.toml +++ b/massa-api/Cargo.toml @@ -16,7 +16,7 @@ tokio = { version = "1.11", features = ["full"] } tracing = {version = "0.1", features = ["max_level_debug", "release_max_level_debug"] } # custom modules massa_consensus_exports = { path = "../massa-consensus-exports" } -massa_execution = { path = "../massa-execution" } +massa_execution_exports = { path = "../massa-execution-exports" } massa_hash = { path = "../massa-hash" } massa_models = { path = "../massa-models" } massa_network = { path = "../massa-network" } diff --git a/massa-bootstrap/Cargo.toml b/massa-bootstrap/Cargo.toml index 29bbf0db320..7764a16af21 100644 --- a/massa-bootstrap/Cargo.toml +++ b/massa-bootstrap/Cargo.toml @@ -25,7 +25,7 @@ massa_models = { path = "../massa-models" } massa_network = { path = "../massa-network" } massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } -massa_execution = { path = "../massa-execution" } +massa_ledger = { path = "../massa-ledger" } massa_graph = { path = "../massa-graph" } massa_proof_of_stake_exports = { path = "../massa-proof-of-stake-exports" } diff --git a/massa-consensus-exports/Cargo.toml b/massa-consensus-exports/Cargo.toml index 50d719592f3..bdffba46082 100644 --- a/massa-consensus-exports/Cargo.toml +++ b/massa-consensus-exports/Cargo.toml @@ -22,7 +22,7 @@ tokio = { version = "1.11", features = ["full"] } tracing = {version = "0.1", features = ["max_level_debug", "release_max_level_debug"] } tempfile = "3.2" # custom modules -massa_execution = { path = "../massa-execution" } +massa_execution_exports = { path = "../massa-execution-exports" } massa_hash = { path = "../massa-hash" } massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } diff --git a/massa-consensus-worker/Cargo.toml b/massa-consensus-worker/Cargo.toml index faa892f5e5c..dffdd38f485 100644 --- a/massa-consensus-worker/Cargo.toml +++ b/massa-consensus-worker/Cargo.toml @@ -21,7 +21,7 @@ thiserror = "1.0" tokio = { version = "1.11", features = ["full"] } tracing = {version = "0.1", features = ["max_level_debug", "release_max_level_debug"] } # custom modules -massa_execution = { path = "../massa-execution" } +massa_execution_exports = { path = "../massa-execution-exports" } massa_hash = { path = "../massa-hash" } massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } diff --git a/massa-execution/Cargo.toml b/massa-execution-exports/Cargo.toml similarity index 93% rename from massa-execution/Cargo.toml rename to massa-execution-exports/Cargo.toml index ec7cbd89b9a..6b83b596a75 100644 --- a/massa-execution/Cargo.toml +++ b/massa-execution-exports/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "massa_execution" +name = "massa_execution_exports" version = "0.1.0" authors = ["Massa Labs "] edition = "2021" @@ -10,7 +10,6 @@ edition = "2021" anyhow = "1" displaydoc = "0.2" lazy_static = "1.4.0" -parking_lot = { version = "0.11.2" } rand = "0.8" rand_xoshiro = "0.6" serde = { version = "1.0", features = ["derive"] } diff --git a/massa-execution/src/config.rs b/massa-execution-exports/src/config.rs similarity index 83% rename from massa-execution/src/config.rs rename to massa-execution-exports/src/config.rs index 60122eb856e..9194e57cf81 100644 --- a/massa-execution/src/config.rs +++ b/massa-execution-exports/src/config.rs @@ -1,13 +1,10 @@ use massa_time::MassaTime; -use std::path::PathBuf; /// VM module configuration #[derive(Debug, Clone)] -pub struct VMConfig { +pub struct ExecutionConfig { /// read-only execution request queue length pub readonly_queue_length: usize, - /// Initial SCE ledger file - pub initial_sce_ledger_path: PathBuf, /// maximum number of SC output events kept in cache pub max_final_events: usize, /// number of threads @@ -21,3 +18,4 @@ pub struct VMConfig { /// period duration pub t0: MassaTime, } + diff --git a/massa-execution/src/controller.rs b/massa-execution-exports/src/controller.rs similarity index 98% rename from massa-execution/src/controller.rs rename to massa-execution-exports/src/controller.rs index 4646383d369..54ee3508564 100644 --- a/massa-execution/src/controller.rs +++ b/massa-execution-exports/src/controller.rs @@ -31,7 +31,7 @@ pub(crate) struct VMInputData { } /// VM controller -pub struct VMController { +pub struct ExecutionController { /// VM config pub(crate) config: VMConfig, /// condition variable to wake up the VM loop @@ -42,7 +42,7 @@ pub struct VMController { pub(crate) execution_state: Arc>, } -impl VMController { +impl ExecutionController { /// reads the list of newly finalized blocks and the new blockclique, if there was a change /// if found, remove from input queue pub(crate) fn consume_input(&mut self) -> VMInputData { diff --git a/massa-execution/src/error.rs b/massa-execution-exports/src/error.rs similarity index 100% rename from massa-execution/src/error.rs rename to massa-execution-exports/src/error.rs diff --git a/massa-execution-exports/src/lib.rs b/massa-execution-exports/src/lib.rs new file mode 100644 index 00000000000..3d454cab687 --- /dev/null +++ b/massa-execution-exports/src/lib.rs @@ -0,0 +1,10 @@ +mod config; +mod controller; +mod types; +mod error; + +pub use config::ExecutionConfig; +pub use types::{ExecutionOutput, ReadOnlyExecutionRequest}; +pub use error::ExecutionError; +pub use controller::ExecutionController; + diff --git a/massa-execution-exports/src/types.rs b/massa-execution-exports/src/types.rs new file mode 100644 index 00000000000..19b0ee4400e --- /dev/null +++ b/massa-execution-exports/src/types.rs @@ -0,0 +1,29 @@ + +use crate::event_store::EventStore; +use crate::sce_ledger::SCELedgerChanges; +use massa_ledger::LedgerChanges; +use massa_models::{Address, Amount, BlockId, Slot}; +use std::collections::VecDeque; + +pub struct ExecutionOutput { + // slot + pub slot: Slot, + // optional block ID at that slot (None if miss) + pub block_id: Option, + // ledger_changes caused by the execution step + pub ledger_changes: LedgerChanges, + // events emitted by the execution step + pub events: EventStore, +} + +/// structure describing a read-only execution request +pub struct ReadOnlyExecutionRequest { + /// Maximum gas to spend in the execution. + pub max_gas: u64, + /// The simulated price of gas for the read-only execution. + pub simulated_gas_price: Amount, + /// The code to execute. + pub bytecode: Vec, + /// Call stack to simulate + pub call_stack: Vec, +} diff --git a/massa-execution-worker/Cargo.toml b/massa-execution-worker/Cargo.toml new file mode 100644 index 00000000000..7f7592c1690 --- /dev/null +++ b/massa-execution-worker/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "massa_execution_worker" +version = "0.1.0" +authors = ["Massa Labs "] +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = "1" +displaydoc = "0.2" +lazy_static = "1.4.0" +rand = "0.8" +rand_xoshiro = "0.6" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +thiserror = "1.0" +tokio = { version = "1.11", features = ["full"] } +tracing = { version = "0.1", features = [ + "max_level_debug", + "release_max_level_debug", +] } +# custom modules +massa_execution_exports = { path = "../massa-execution-exports" } +massa_models = { path = "../massa-models" } +massa_hash = { path = "../massa-hash" } +massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime", tag = "v0.4.3" } +massa_signature = { path = "../massa-signature" } +massa_time = { path = "../massa-time" } +massa_ledger = { path = "../massa-ledger" } + +[dev-dependencies] +pretty_assertions = "1.0" +serial_test = "0.5" +tempfile = "3.2" diff --git a/massa-execution/read-only.md b/massa-execution-worker/read-only.md similarity index 100% rename from massa-execution/read-only.md rename to massa-execution-worker/read-only.md diff --git a/massa-execution/src/context.rs b/massa-execution-worker/src/context.rs similarity index 100% rename from massa-execution/src/context.rs rename to massa-execution-worker/src/context.rs diff --git a/massa-execution/src/event_store.rs b/massa-execution-worker/src/event_store.rs similarity index 100% rename from massa-execution/src/event_store.rs rename to massa-execution-worker/src/event_store.rs diff --git a/massa-execution/src/execution.rs b/massa-execution-worker/src/execution.rs similarity index 99% rename from massa-execution/src/execution.rs rename to massa-execution-worker/src/execution.rs index b575912672f..527c29b645c 100644 --- a/massa-execution/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -6,7 +6,6 @@ use crate::types::{ExecutionOutput, ExecutionStackElement, ReadOnlyExecutionRequ use crate::ExecutionError; use massa_ledger::{Applicable, FinalLedger, LedgerChanges, LedgerEntry, SetUpdateOrDelete}; use massa_models::output_event::SCOutputEvent; -use massa_models::timeslots::get_current_latest_block_slot; use massa_models::{Address, BlockId, Operation, OperationId, OperationType}; use massa_models::{Block, Slot}; use massa_sc_runtime::Interface; diff --git a/massa-execution/src/exports.rs b/massa-execution-worker/src/exports.rs similarity index 100% rename from massa-execution/src/exports.rs rename to massa-execution-worker/src/exports.rs diff --git a/massa-execution/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs similarity index 100% rename from massa-execution/src/interface_impl.rs rename to massa-execution-worker/src/interface_impl.rs diff --git a/massa-execution/src/lib.rs b/massa-execution-worker/src/lib.rs similarity index 100% rename from massa-execution/src/lib.rs rename to massa-execution-worker/src/lib.rs diff --git a/massa-execution/src/sce_ledger.rs b/massa-execution-worker/src/sce_ledger.rs similarity index 100% rename from massa-execution/src/sce_ledger.rs rename to massa-execution-worker/src/sce_ledger.rs diff --git a/massa-execution/src/spec.md b/massa-execution-worker/src/spec.md similarity index 100% rename from massa-execution/src/spec.md rename to massa-execution-worker/src/spec.md diff --git a/massa-execution/src/speculative_ledger.rs b/massa-execution-worker/src/speculative_ledger.rs similarity index 100% rename from massa-execution/src/speculative_ledger.rs rename to massa-execution-worker/src/speculative_ledger.rs diff --git a/massa-execution/src/tests/mod.rs b/massa-execution-worker/src/tests/mod.rs similarity index 100% rename from massa-execution/src/tests/mod.rs rename to massa-execution-worker/src/tests/mod.rs diff --git a/massa-execution/src/tests/scenarios_mandatories.rs b/massa-execution-worker/src/tests/scenarios_mandatories.rs similarity index 100% rename from massa-execution/src/tests/scenarios_mandatories.rs rename to massa-execution-worker/src/tests/scenarios_mandatories.rs diff --git a/massa-execution/src/types.rs b/massa-execution-worker/src/types.rs similarity index 50% rename from massa-execution/src/types.rs rename to massa-execution-worker/src/types.rs index 6f3fafdff35..c2740a83ae0 100644 --- a/massa-execution/src/types.rs +++ b/massa-execution-worker/src/types.rs @@ -1,9 +1,7 @@ +use crate::event_store::EventStore; use crate::sce_ledger::SCELedgerChanges; -use crate::speculative_ledger::SpeculativeLedger; -use crate::{event_store::EventStore, ExecutionError}; use massa_ledger::LedgerChanges; -use massa_models::{Address, Amount, BlockId, OperationId, Slot}; -use rand_xoshiro::Xoshiro256PlusPlus; +use massa_models::{Address, Amount, BlockId, Slot}; use std::collections::VecDeque; /// history of active executed steps @@ -34,26 +32,3 @@ pub struct ExecutionStackElement { /// list of addresses created so far during excution, pub owned_addresses: Vec
, } - -pub struct ExecutionOutput { - // slot - pub slot: Slot, - // optional block ID at that slot (None if miss) - pub block_id: Option, - // ledger_changes caused by the execution step - pub ledger_changes: LedgerChanges, - // events emitted by the execution step - pub events: EventStore, -} - -/// structure describing a read-only execution request -pub struct ReadOnlyExecutionRequest { - /// Maximum gas to spend in the execution. - pub max_gas: u64, - /// The simulated price of gas for the read-only execution. - pub simulated_gas_price: Amount, - /// The code to execute. - pub bytecode: Vec, - /// Call stack to simulate - pub call_stack: Vec, -} diff --git a/massa-execution/src/vm_thread.rs b/massa-execution-worker/src/vm_thread.rs similarity index 100% rename from massa-execution/src/vm_thread.rs rename to massa-execution-worker/src/vm_thread.rs diff --git a/massa-execution/src/vm.rs b/massa-execution/src/vm.rs deleted file mode 100644 index 274982139e7..00000000000 --- a/massa-execution/src/vm.rs +++ /dev/null @@ -1,508 +0,0 @@ -use crate::error::bootstrap_file_error; -use crate::interface_impl::InterfaceImpl; -use crate::sce_ledger::{FinalLedger, SCELedger, SCELedgerChanges}; -use crate::types::{ - EventStore, ExecutionContext, ExecutionData, ExecutionStep, StackElement, StepHistory, - StepHistoryItem, -}; -use crate::{config::ExecutionConfigs, ExecutionError}; -use massa_models::api::SCELedgerInfo; -use massa_models::output_event::SCOutputEvent; -use massa_models::prehash::Map; -use massa_models::timeslots::{get_latest_block_slot_at_timestamp, slot_count_in_range}; -use massa_models::{ - execution::{ExecuteReadOnlyResponse, ReadOnlyResult}, - Address, Amount, BlockId, Slot, -}; -use massa_models::{OperationId, AMOUNT_ZERO}; -use massa_sc_runtime::Interface; -use massa_signature::{derive_public_key, generate_random_private_key}; -use massa_time::MassaTime; -use rand::SeedableRng; -use rand_xoshiro::Xoshiro256PlusPlus; -use std::mem; -use std::sync::{Arc, Mutex}; -use tokio::sync::oneshot; -use tracing::debug; - -/// Virtual Machine and step history system -pub(crate) struct VM { - /// thread count - thread_count: u8, - - genesis_timestamp: MassaTime, - t0: MassaTime, - - /// history of SCE-active executed steps - step_history: StepHistory, - - /// execution interface used by the runtime - execution_interface: Box, - - /// execution context - execution_context: Arc>, - - /// final events - final_events: EventStore, -} - -impl VM { - pub fn new( - cfg: ExecutionConfigs, - ledger_bootstrap: Option<(SCELedger, Slot)>, - ) -> Result { - let (ledger_bootstrap, ledger_slot) = - if let Some((ledger_bootstrap, ledger_slot)) = ledger_bootstrap { - // bootstrap from snapshot - (ledger_bootstrap, ledger_slot) - } else { - // not bootstrapping: load initial SCE ledger from file - let ledger_slot = Slot::new(0, cfg.thread_count.saturating_sub(1)); // last genesis block - let ledgger_balances = serde_json::from_str::>( - &std::fs::read_to_string(&cfg.settings.initial_sce_ledger_path) - .map_err(bootstrap_file_error!("loading", cfg))?, - ) - .map_err(bootstrap_file_error!("parsing", cfg))?; - let ledger_bootstrap = SCELedger::from_balances_map(ledgger_balances); - (ledger_bootstrap, ledger_slot) - }; - - // Context shared between VM and the interface provided to the assembly simulator. - let execution_context = Arc::new(Mutex::new(ExecutionContext::new( - ledger_bootstrap, - ledger_slot, - ))); - - // Instantiate the interface used by the assembly simulator. - let execution_interface = Box::new(InterfaceImpl::new( - Arc::clone(&execution_context), - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - )); - - Ok(VM { - thread_count: cfg.thread_count, - step_history: Default::default(), - execution_interface, - execution_context, - final_events: Default::default(), - genesis_timestamp: cfg.genesis_timestamp, - t0: cfg.t0, - }) - } - - // clone bootstrap state (final ledger and slot) - pub fn get_bootstrap_state(&self) -> FinalLedger { - self.execution_context - .lock() - .unwrap() - .ledger_step - .final_ledger_slot - .clone() - } - - /// Get events optionnally filtered by: - /// * start slot - /// * end slot - /// * emitter address - /// * original caller address - /// * operation id - pub fn get_filtered_sc_output_event( - &self, - start: Option, - end: Option, - emitter_address: Option
, - original_caller_address: Option
, - original_operation_id: Option, - ) -> Vec { - // iter on step history chained with final events - let start = start.unwrap_or_else(|| Slot::new(0, 0)); - let end = end.unwrap_or(match MassaTime::now() { - Ok(now) => get_latest_block_slot_at_timestamp( - self.thread_count, - self.t0, - self.genesis_timestamp, - now, - ) - .unwrap_or_else(|_| Some(Slot::new(0, 0))) - .unwrap_or_else(|| Slot::new(0, 0)), - Err(_) => Slot::new(0, 0), - }); - self.step_history - .iter() - .filter(|item| item.slot >= start && item.slot < end) - .flat_map(|item| { - item.events.get_filtered_sc_output_event( - start, - end, - emitter_address, - original_caller_address, - original_operation_id, - ) - }) - .chain(self.final_events.get_filtered_sc_output_event( - start, - end, - emitter_address, - original_caller_address, - original_operation_id, - )) - .collect() - } - - // clone bootstrap state (final ledger and slot) - pub fn get_sce_ledger_entry_for_addresses( - &self, - addresses: Vec
, - ) -> Map { - let ledger = &self - .execution_context - .lock() - .unwrap() - .ledger_step - .final_ledger_slot - .ledger; - addresses - .into_iter() - .map(|ad| { - let entry = ledger.0.get(&ad).cloned().unwrap_or_default(); - ( - ad, - SCELedgerInfo { - balance: entry.balance, - module: entry.opt_module, - datastore: entry.data, - }, - ) - }) - .collect() - } - - /// runs an SCE-final execution step - /// See https://github.com/massalabs/massa/wiki/vm_ledger_interaction - /// - /// # Parameters - /// * step: execution step to run - /// * max_final_events: max number of events kept in cache (todo should be removed when config become static) - pub(crate) fn run_final_step(&mut self, step: ExecutionStep, max_final_events: usize) { - // check if that step was already executed as the earliest active step - let history_item = if let Some(cached) = self.pop_cached_step(&step) { - // if so, pop it - cached - } else { - // otherwise, clear step history an run it again explicitly - self.step_history.clear(); - self.run_step_internal(&step) - }; - - // apply ledger changes to final ledger - let mut context = self.execution_context.lock().unwrap(); - let mut ledger_step = &mut (*context).ledger_step; - ledger_step - .final_ledger_slot - .ledger - .apply_changes(&history_item.ledger_changes); - ledger_step.final_ledger_slot.slot = step.slot; - - self.final_events.extend(mem::take(&mut context.events)); - self.final_events.prune(max_final_events) - } - - /// check if step already at history front, if so, pop it - fn pop_cached_step(&mut self, step: &ExecutionStep) -> Option { - let found = if let Some(StepHistoryItem { - slot, opt_block_id, .. - }) = self.step_history.front() - { - if *slot == step.slot { - match (&opt_block_id, &step.block) { - // matching miss - (None, None) => true, - - // matching block - (Some(b_id_hist), Some((b_id_step, _b_step))) => (b_id_hist == b_id_step), - - // miss/block mismatch - (None, Some(_)) => false, - - // block/miss mismatch - (Some(_), None) => false, - } - } else { - false // slot mismatch - } - } else { - false // no item - }; - - // rerturn the step if found - if found { - self.step_history.pop_front() - } else { - None - } - } - - /// Tooling function that has to be run before each new step execution, even if we are in read-only - /// - /// Clear all caused changes in the context - /// Set cumulative_hisory_changes = step_history.into_changes - /// Reset the execution call stack and the owned addresses - fn clear_and_update_context(&self) { - let mut context = self.execution_context.lock().unwrap(); - context.ledger_step.caused_changes.clear(); - context.ledger_step.cumulative_history_changes = - SCELedgerChanges::from(self.step_history.clone()); - context.created_addr_index = 0; - context.created_event_index = 0; - context.stack.clear(); - context.events.clear(); - context.read_only = false; - context.origin_operation_id = None; - } - - /// Prepares (updates) the shared context before the new operation. - /// Returns a snapshot of the current caused ledger changes. - /// See https://github.com/massalabs/massa/wiki/vm_ledger_interaction - /// TODO: do not ignore the results - /// TODO: consider dispatching gas fees with edorsers/endorsees as well - /// Returns (backup of local ledger changes, backup of created_addr_index, - /// backup of events, backup of created_events_index, backup of unsafe rng) - fn prepare_context( - &self, - data: &ExecutionData, - block_creator_addr: Address, - block_id: BlockId, - slot: Slot, - operation: Option, - ) -> (SCELedgerChanges, u64, EventStore, u64, Xoshiro256PlusPlus) { - let mut context = self.execution_context.lock().unwrap(); - // make context.ledger_step credit Op's sender with Op.coins in the SCE ledger - let _result = context - .ledger_step - .set_balance_delta(data.sender_address, data.coins, true); - - // make context.ledger_step credit the producer of the block B with Op.max_gas * Op.gas_price in the SCE ledger - let _result = context.ledger_step.set_balance_delta( - block_creator_addr, - data.gas_price.saturating_mul_u64(data.max_gas), - true, - ); - - // fill context for execution - // created_addr_index is not reset here (it is used at the slot scale) - context.gas_price = data.gas_price; - context.max_gas = data.max_gas; - context.stack = vec![StackElement { - address: data.sender_address, - coins: data.coins, - owned_addresses: vec![data.sender_address], - }]; - context.slot = slot; - context.opt_block_id = Some(block_id); - context.opt_block_creator_addr = Some(block_creator_addr); - context.origin_operation_id = operation; - - ( - context.ledger_step.caused_changes.clone(), - context.created_addr_index, - context.events.clone(), - context.created_event_index, - context.unsafe_rng.clone(), - ) - } - - /// Run code in read-only mode - pub(crate) fn run_read_only( - &self, - slot: Slot, - max_gas: u64, - simulated_gas_price: Amount, - bytecode: Vec, - address: Option
, - result_sender: oneshot::Sender, - ) { - // Reset active ledger changes history - self.clear_and_update_context(); - - { - let mut context = self.execution_context.lock().unwrap(); - - // Set the call stack, using the provided address, or a random one. - let address = address.unwrap_or_else(|| { - let private_key = generate_random_private_key(); - let public_key = derive_public_key(&private_key); - Address::from_public_key(&public_key) - }); - - context.stack = vec![StackElement { - address, - coins: AMOUNT_ZERO, - owned_addresses: vec![address], - }]; - - // Set read-only - context.read_only = true; - - // Set the max gas. - context.max_gas = max_gas; - - // Set the simulated gas price. - context.gas_price = simulated_gas_price; - - // Seed the RNG - let mut seed: Vec = slot.to_bytes_key().to_vec(); - seed.push(0u8); // read-only - let seed = massa_hash::hash::Hash::compute_from(&seed).to_bytes(); - context.unsafe_rng = Xoshiro256PlusPlus::from_seed(seed); - } - - // run in the intepreter - let run_result = massa_sc_runtime::run(&bytecode, max_gas, &*self.execution_interface); - - let mut context = self.execution_context.lock().unwrap(); - // Send result back. - let execution_response = ExecuteReadOnlyResponse { - executed_at: slot, - // TODO: specify result. - result: run_result.map_or_else( - |_| ReadOnlyResult::Error("Failed to run in read-only mode".to_string()), - |_| ReadOnlyResult::Ok, - ), - // integrate with output events. - output_events: mem::take(&mut context.events).export(), - }; - if result_sender.send(execution_response).is_err() { - debug!("Execution: could not send ExecuteReadOnlyResponse."); - } - - // Note: changes are not applied to the ledger. - } - - /// Runs an active step - /// See https://github.com/massalabs/massa/wiki/vm_ledger_interaction - /// - /// 1. Get step history (cache of final ledger changes by slot and block_id history) - /// 2. clear caused changes - /// 3. accumulated step history - /// 4. Execute each block of each operation - /// - /// # Parameters - /// * step: execution step to run - fn run_step_internal(&mut self, step: &ExecutionStep) -> StepHistoryItem { - // reset active ledger changes history - self.clear_and_update_context(); - - { - let mut context = self.execution_context.lock().unwrap(); - - // seed the RNG - let mut seed: Vec = step.slot.to_bytes_key().to_vec(); - seed.push(1u8); // not read-only - if let Some((block_id, _block)) = &step.block { - seed.extend(block_id.to_bytes()); // append block ID - } - let seed = massa_hash::hash::Hash::compute_from(&seed).to_bytes(); - context.unsafe_rng = Xoshiro256PlusPlus::from_seed(seed); - } - - // run implicit and async calls - // TODO - - // run explicit calls within the block (if the slot is not a miss) - // note that total block gas is not checked, because currently Protocol makes the block invalid if it overflows gas - let opt_block_id: Option; - if let Some((block_id, block)) = &step.block { - opt_block_id = Some(*block_id); - - // get block creator addr - let block_creator_addr = Address::from_public_key(&block.header.content.creator); - // run all operations - for (op_idx, operation) in block.operations.iter().enumerate() { - // process ExecuteSC operations only - let execution_data = match ExecutionData::try_from(operation) { - Ok(data) => data, - _ => continue, - }; - - // Prepare context and save the initial ledger changes before execution. - // The returned snapshot takes into account the initial coin credits. - // This snapshot will be popped back if bytecode execution fails. - let ( - ledger_changes_backup, - created_addr_index_backup, - events_backup, - event_index_backup, - rng_backup, - ) = self.prepare_context( - &execution_data, - block_creator_addr, - *block_id, - step.slot, - Some(match operation.get_operation_id() { - Ok(id) => id, - Err(_) => continue, - }), - ); - - // run in the intepreter - let run_result = massa_sc_runtime::run( - &execution_data.bytecode, - execution_data.max_gas, - &*self.execution_interface, - ); - if let Err(err) = run_result { - debug!( - "failed running bytecode in operation index {} in block {}: {}", - op_idx, block_id, err - ); - // cancel the effects of execution only, pop back init_changes - let mut context = self.execution_context.lock().unwrap(); - context.ledger_step.caused_changes = ledger_changes_backup; - context.created_addr_index = created_addr_index_backup; - context.events = events_backup; - context.created_event_index = event_index_backup; - context.unsafe_rng = rng_backup; - context.origin_operation_id = None; - } - } - } else { - // There is no block for this step, miss - opt_block_id = None; - } - - // generate history item - let mut context = self.execution_context.lock().unwrap(); - StepHistoryItem { - slot: step.slot, - opt_block_id, - ledger_changes: mem::take(&mut context.ledger_step.caused_changes), - events: mem::take(&mut context.events), - } - } - - /// runs an SCE-active execution step - /// See https://github.com/massalabs/massa/wiki/vm_ledger_interaction - /// - /// # Parameters - /// * step: execution step to run - pub(crate) fn run_active_step(&mut self, step: ExecutionStep) { - // rewind history to optimize execution - if let Some(front_slot) = self.step_history.front().map(|h| h.slot) { - if let Ok(len) = slot_count_in_range(front_slot, step.slot, self.thread_count) { - self.step_history.truncate(len as usize); - } - } - - // run step - let history_item = self.run_step_internal(&step); - - // push step into history - self.step_history.push_back(history_item); - } - - pub fn reset_to_final(&mut self) { - self.step_history.clear(); - } -} diff --git a/massa-execution/src/worker.rs b/massa-execution/src/worker.rs deleted file mode 100644 index 494504a8ecb..00000000000 --- a/massa-execution/src/worker.rs +++ /dev/null @@ -1,557 +0,0 @@ -use crate::error::ExecutionError; -use crate::sce_ledger::FinalLedger; -use crate::types::{ExecutionQueue, ExecutionRequest}; -use crate::vm::VM; -use crate::BootstrapExecutionState; -use crate::{config::ExecutionConfigs, types::ExecutionStep}; -use massa_models::api::SCELedgerInfo; -use massa_models::execution::ExecuteReadOnlyResponse; -use massa_models::output_event::SCOutputEvent; -use massa_models::prehash::Map; -use massa_models::timeslots::{get_block_slot_timestamp, get_current_latest_block_slot}; -use massa_models::{Address, Amount, Block, BlockId, OperationId, Slot}; -use std::collections::BTreeMap; -use std::thread::{self, JoinHandle}; -use tokio::sync::{mpsc, oneshot}; -use tokio::time::sleep_until; -use tracing::debug; - -/// Commands sent to the `execution` component. -#[derive(Debug)] -pub enum ExecutionCommand { - /// The clique has changed, - /// contains the blocks of the new blockclique - /// and a list of blocks that became final - BlockCliqueChanged { - blockclique: Map, - finalized_blocks: Map, - }, - - /// Get a snapshot of the current state for bootstrap - GetBootstrapState(tokio::sync::oneshot::Sender), - - /// Get events optionnally filtered by: - /// * start slot - /// * end slot - /// * emitter address - /// * original caller address - /// * operation id - GetSCOutputEvents { - start: Option, - end: Option, - emitter_address: Option
, - original_caller_address: Option
, - original_operation_id: Option, - response_tx: oneshot::Sender>, - }, - - /// Execute bytecode in read-only mode - ExecuteReadOnlyRequest { - /// Maximum gas spend in execution. - max_gas: u64, - /// The simulated price of gas for the read-only execution. - simulated_gas_price: Amount, - /// The code to execute. - bytecode: Vec, - /// The channel used to send the result of execution. - result_sender: oneshot::Sender, - /// The address, or a default random one if none is provided, - /// which will simulate the sender of the operation. - address: Option
, - }, - GetSCELedgerForAddresses { - response_tx: oneshot::Sender>, - addresses: Vec
, - }, -} - -// Events produced by the execution component. -pub enum ExecutionEvent { - /// A coin transfer - /// from the SCE ledger to the CSS ledger. - TransferToConsensus, -} - -/// Management commands sent to the `execution` component. -pub enum ExecutionManagementCommand {} - -pub struct ExecutionWorker { - /// Configuration - cfg: ExecutionConfigs, - /// Receiver of commands. - controller_command_rx: mpsc::Receiver, - /// Receiver of management commands. - controller_manager_rx: mpsc::Receiver, - /// Sender of events. - _event_sender: mpsc::UnboundedSender, - /// Time cursors - last_final_slot: Slot, - last_active_slot: Slot, - /// pending CSS final blocks - pending_css_final_blocks: BTreeMap, - /// VM thread - vm_thread: JoinHandle<()>, - /// VM execution requests queue - execution_queue: ExecutionQueue, -} - -impl ExecutionWorker { - pub fn new( - cfg: ExecutionConfigs, - event_sender: mpsc::UnboundedSender, - controller_command_rx: mpsc::Receiver, - controller_manager_rx: mpsc::Receiver, - bootstrap_state: Option, - ) -> Result { - let execution_queue = ExecutionQueue::default(); - let execution_queue_clone = execution_queue.clone(); - - // Check bootstrap - let bootstrap_final_slot; - let bootstrap_ledger; - if let Some(bootstrap_state) = bootstrap_state { - // init from bootstrap - bootstrap_final_slot = bootstrap_state.final_slot; - bootstrap_ledger = Some((bootstrap_state.final_ledger, bootstrap_final_slot)); - } else { - // init without bootstrap - bootstrap_final_slot = Slot::new(0, cfg.thread_count.saturating_sub(1)); - bootstrap_ledger = None; - }; - - // Init VM - let mut vm = VM::new(cfg.clone(), bootstrap_ledger)?; - - // Start VM thread - let vm_thread = thread::spawn(move || { - let (lock, condvar) = &*execution_queue_clone; - let mut requests = lock.lock().unwrap(); - // Run until shutdown. - loop { - match requests.pop_front() { - Some(ExecutionRequest::RunFinalStep(step)) => { - vm.run_final_step(step, cfg.settings.max_final_events); // todo make settings static - } - Some(ExecutionRequest::RunActiveStep(step)) => { - vm.run_active_step(step); - } - Some(ExecutionRequest::RunReadOnly { - slot, - max_gas, - simulated_gas_price, - bytecode, - result_sender, - address, - }) => { - vm.run_read_only( - slot, - max_gas, - simulated_gas_price, - bytecode, - address, - result_sender, - ); - } - Some(ExecutionRequest::ResetToFinalState) => vm.reset_to_final(), - Some(ExecutionRequest::GetBootstrapState { response_tx }) => { - let FinalLedger { ledger, slot } = vm.get_bootstrap_state(); - let bootstrap_state = BootstrapExecutionState { - final_ledger: ledger, - final_slot: slot, - }; - if response_tx.send(bootstrap_state).is_err() { - debug!("execution: could not send get_bootstrap_state answer"); - } - } - Some(ExecutionRequest::GetSCOutputEvents { - start, - end, - emitter_address, - original_caller_address, - original_operation_id, - response_tx, - }) => { - if response_tx - .send(vm.get_filtered_sc_output_event( - start, - end, - emitter_address, - original_caller_address, - original_operation_id, - )) - .is_err() - { - debug!("execution: could not send get_sc_output_event_by_caller_address answer"); - } - } - - Some(ExecutionRequest::Shutdown) => return, - Some(ExecutionRequest::GetSCELedgerForAddresses { - addresses, - response_tx, - }) => { - let res = vm.get_sce_ledger_entry_for_addresses(addresses); - if response_tx.send(res).is_err() { - debug!("execution: could not send GetSCELedgerForAddresses response") - } - } - None => { - requests = condvar.wait(requests).unwrap(); - } - }; - } - }); - - // return execution worker - Ok(ExecutionWorker { - cfg, - controller_command_rx, - controller_manager_rx, - _event_sender: event_sender, - last_final_slot: bootstrap_final_slot, - last_active_slot: bootstrap_final_slot, - pending_css_final_blocks: Default::default(), - vm_thread, - execution_queue, - }) - } - - /// asks the VM to reset to its final state - pub fn reset_to_final(&mut self) { - let (queue_lock, condvar) = &*self.execution_queue; - let queue_guard = &mut queue_lock.lock().unwrap(); - // cancel all non-final requests - // Final execution requests are left to maintain final state consistency - queue_guard.retain(|req| { - matches!( - req, - ExecutionRequest::RunFinalStep(..) - | ExecutionRequest::Shutdown - | ExecutionRequest::GetBootstrapState { .. } - ) - }); - // request reset to final state - queue_guard.push_back(ExecutionRequest::ResetToFinalState); - // notify - condvar.notify_one(); - } - - /// sends an arbitrary VM request - fn push_request(&self, request: ExecutionRequest) { - let (queue_lock, condvar) = &*self.execution_queue; - let queue_guard = &mut queue_lock.lock().unwrap(); - queue_guard.push_back(request); - condvar.notify_one(); - } - - fn get_timer_to_next_slot(&self) -> Result { - Ok(sleep_until( - get_block_slot_timestamp( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - get_current_latest_block_slot( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - self.cfg.clock_compensation, - )? - .map_or(Ok(Slot::new(0, 0)), |v| { - v.get_next_slot(self.cfg.thread_count) - })?, - )? - .estimate_instant(self.cfg.clock_compensation)?, - )) - } - - pub async fn run_loop(mut self) -> Result<(), ExecutionError> { - // set slot timer - let next_slot_timer = self.get_timer_to_next_slot()?; - tokio::pin!(next_slot_timer); - loop { - tokio::select! { - // Process management commands - _ = self.controller_manager_rx.recv() => break, - // Process commands - Some(cmd) = self.controller_command_rx.recv() => self.process_command(cmd)?, - // Process slot timer event - _ = &mut next_slot_timer => { - self.fill_misses_until_now()?; - next_slot_timer.set(self.get_timer_to_next_slot()?); - } - } - } - // Shutdown VM, cancel all pending execution requests - self.push_request(ExecutionRequest::Shutdown); - if self.vm_thread.join().is_err() { - debug!("Failed joining vm thread") - } - Ok(()) - } - - /// Proces a given command. - /// - /// # Argument - /// * cmd: command to process - fn process_command(&mut self, cmd: ExecutionCommand) -> Result<(), ExecutionError> { - match cmd { - ExecutionCommand::BlockCliqueChanged { - blockclique, - finalized_blocks, - } => { - self.blockclique_changed(blockclique, finalized_blocks)?; - } - - ExecutionCommand::GetBootstrapState(response_tx) => { - self.push_request(ExecutionRequest::GetBootstrapState { response_tx }); - } - - ExecutionCommand::ExecuteReadOnlyRequest { - max_gas, - simulated_gas_price, - bytecode, - result_sender, - address, - } => { - // call the VM to execute in read-only mode at the last active slot. - self.push_request(ExecutionRequest::RunReadOnly { - slot: self.last_active_slot, - max_gas, - simulated_gas_price, - bytecode, - result_sender, - address, - }); - } - ExecutionCommand::GetSCOutputEvents { - start, - end, - emitter_address, - original_caller_address, - original_operation_id, - response_tx, - } => self.push_request(ExecutionRequest::GetSCOutputEvents { - start, - end, - emitter_address, - original_caller_address, - original_operation_id, - response_tx, - }), - ExecutionCommand::GetSCELedgerForAddresses { - response_tx, - addresses, - } => self.push_request(ExecutionRequest::GetSCELedgerForAddresses { - response_tx, - addresses, - }), - } - Ok(()) - } - - /// fills the remaining slots until now() with miss executions - /// see step 4 in spec https://github.com/massalabs/massa/wiki/vm-block-feed - fn fill_misses_until_now(&mut self) -> Result<(), ExecutionError> { - /* TODO DISABLED TEMPORARILY https://github.com/massalabs/massa/issues/2101 - let end_step = get_current_latest_block_slot( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - self.cfg.clock_compensation, - )?; - if let Some(end_step) = end_step { - // slot S - let mut s = self.last_active_slot.get_next_slot(self.cfg.thread_count)?; - - while s <= end_step { - // call the VM to execute an SCE-active miss at slot S - self.push_request(ExecutionRequest::RunActiveStep(ExecutionStep { - slot: self.last_active_slot, - block: None, - })); - - // set last_active_slot = S - self.last_active_slot = s; - - s = s.get_next_slot(self.cfg.thread_count)?; - } - } - */ - Ok(()) - } - - /// checks whether a miss at slot S would be SCE-final by looking up subsequent CSS-final blocks in the same thread - /// see spec at https://github.com/massalabs/massa/wiki/vm-block-feed - /// - /// # Arguments - /// * s: missed slot - /// * max_css_final_slot: maximum lookup slot (included) - fn is_miss_sce_final(&self, s: Slot, max_css_final_slot: Slot) -> bool { - let mut check_slot = Slot::new(s.period + 1, s.thread); - while check_slot <= max_css_final_slot { - if self.pending_css_final_blocks.contains_key(&check_slot) { - break; - } - check_slot.period += 1; - } - check_slot <= max_css_final_slot - } - - /// called when the blockclique changes - /// see spec at https://github.com/massalabs/massa/wiki/vm-block-feed - fn blockclique_changed( - &mut self, - blockclique: Map, - finalized_blocks: Map, - ) -> Result<(), ExecutionError> { - // 1 - reset the SCE state back to its latest final state - - // revert the VM to its latest SCE-final state by clearing its active slot history. - // TODO make something more iterative/conservative in the future to reuse unaffected executions - self.reset_to_final(); - - self.last_active_slot = self.last_final_slot; - - // 2 - process CSS-final blocks - - // extend `pending_css_final_blocks` with `new_css_final_blocks` - let new_css_final_blocks = finalized_blocks.into_iter().filter_map(|(b_id, b)| { - if b.header.content.slot <= self.last_active_slot { - // eliminate blocks that are not from a stricly later slot than the current latest SCE-final one - // (this is an optimization) - return None; - } - Some((b.header.content.slot, (b_id, b))) - }); - self.pending_css_final_blocks.extend(new_css_final_blocks); - - if let Some(max_css_final_slot) = self - .pending_css_final_blocks - .last_key_value() - .map(|(s, _v)| *s) - { - // iterate over every slot S starting from `last_final_slot.get_next_slot()` up to the latest slot in `pending_css_final_blocks` (included) - let mut s = self.last_final_slot.get_next_slot(self.cfg.thread_count)?; - while s <= max_css_final_slot { - match self - .pending_css_final_blocks - .first_key_value() - .map(|(s, _v)| *s) - { - // there is a block B at slot S in `pending_css_final_blocks`: - Some(b_slot) if b_slot == s => { - // remove B from `pending_css_final_blocks` - // cannot panic, checked above - let (_s, (b_id, b)) = self - .pending_css_final_blocks - .pop_first() - .expect("pending_css_final_blocks was unexpectedly empty"); - // call the VM to execute the SCE-final block B at slot S - self.push_request(ExecutionRequest::RunFinalStep(ExecutionStep { - slot: s, - block: Some((b_id, b)), - })); - - self.last_active_slot = s; - self.last_final_slot = s; - } - - // there is no CSS-final block at s, but there are CSS-final blocks later - Some(_b_slot) => { - // check whether there is a CSS-final block later in the same thread - if self.is_miss_sce_final(s, max_css_final_slot) { - // subsequent CSS-final block found in the same thread as s - // call the VM to execute an SCE-final miss at slot S - self.push_request(ExecutionRequest::RunFinalStep(ExecutionStep { - slot: s, - block: None, - })); - - self.last_active_slot = s; - self.last_final_slot = s; - } else { - // no subsequent CSS-final block found in the same thread as s - break; - } - } - - // there are no more CSS-final blocks - None => break, - } - - s = s.get_next_slot(self.cfg.thread_count)?; - } - } - - // 3 - process CSS-active blocks - - // define `sce_active_blocks = blockclique_blocks UNION pending_css_final_blocks` - let new_blockclique_blocks = blockclique.iter().filter_map(|(b_id, b)| { - if b.header.content.slot <= self.last_final_slot { - // eliminate blocks that are not from a stricly later slot than the current latest SCE-final one - // (this is an optimization) - return None; - } - Some((b.header.content.slot, (b_id, b))) - }); - let mut sce_active_blocks: BTreeMap = new_blockclique_blocks - .chain( - self.pending_css_final_blocks - .iter() - .map(|(k, (b_id, b))| (*k, (b_id, b))), - ) - .collect(); - - if let Some(max_css_active_slot) = sce_active_blocks.last_key_value().map(|(s, _v)| *s) { - // iterate over every slot S starting from `last_active_slot.get_next_slot()` up to the latest slot in `sce_active_blocks` (included) - let mut s = self.last_final_slot.get_next_slot(self.cfg.thread_count)?; - while s <= max_css_active_slot { - let first_sce_active_slot = sce_active_blocks.first_key_value().map(|(s, _v)| *s); - match first_sce_active_slot { - // there is a block B at slot S in `sce_active_blocks`: - Some(b_slot) if b_slot == s => { - // remove the entry from sce_active_blocks (cannot panic, checked above) - let (_b_slot, (_b_id, _block)) = sce_active_blocks - .pop_first() - .expect("sce_active_blocks should not be empty"); - // call the VM to execute the SCE-active block B at slot S - /* TODO DISABLED TEMPORARILY https://github.com/massalabs/massa/issues/2101 - self.push_request(ExecutionRequest::RunActiveStep(ExecutionStep { - slot: s, - block: Some((*b_id, block.clone())), - })); - self.last_active_slot = s; - */ - } - - // otherwise, if there is no CSS-active block at S - Some(b_slot) => { - // make sure b_slot is after s - if b_slot <= s { - panic!("remaining CSS-active blocks should be later than S"); - } - - // call the VM to execute an SCE-active miss at slot S - /* TODO DISABLED TEMPORARILY https://github.com/massalabs/massa/issues/2101 - self.push_request(ExecutionRequest::RunActiveStep(ExecutionStep { - slot: s, - block: None, - })); - self.last_active_slot = s; - */ - } - - // there are no more CSS-active blocks - None => break, - } - - s = s.get_next_slot(self.cfg.thread_count)?; - } - } - - // 4 - fill the remaining slots with misses - self.fill_misses_until_now()?; - - Ok(()) - } -} diff --git a/massa-graph/Cargo.toml b/massa-graph/Cargo.toml index 69f94840b66..463bb1e6a6c 100644 --- a/massa-graph/Cargo.toml +++ b/massa-graph/Cargo.toml @@ -20,7 +20,7 @@ thiserror = "1.0" tokio = { version = "1.11", features = ["full"] } tracing = {version = "0.1", features = ["max_level_debug", "release_max_level_debug"] } # custom modules -massa_execution = { path = "../massa-execution" } +massa_execution_exports = { path = "../massa-execution-exports" } massa_hash = { path = "../massa-hash" } massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } diff --git a/massa-ledger/Cargo.toml b/massa-ledger/Cargo.toml index 46deff43f0a..cf923278308 100644 --- a/massa-ledger/Cargo.toml +++ b/massa-ledger/Cargo.toml @@ -18,7 +18,6 @@ tracing = {version = "0.1", features = ["max_level_debug", "release_max_level_de massa_hash = { path = "../massa-hash" } massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } -massa_protocol_exports = { path = "../massa-protocol-exports" } massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } diff --git a/massa-ledger/src/config.rs b/massa-ledger/src/config.rs index aeddcd8b15a..2e131e22b1c 100644 --- a/massa-ledger/src/config.rs +++ b/massa-ledger/src/config.rs @@ -1,9 +1,12 @@ // Copyright (c) 2021 MASSA LABS -use serde::{Deserialize, Serialize}; +use std::path::PathBuf; /// Ledger configuration -#[derive(Debug, Deserialize, Serialize, Clone, Copy)] +#[derive(Debug, Clone)] pub struct LedgerConfig { + /// initial SCE ledger file + pub initial_sce_ledger_path: PathBuf, + /// final changes history length pub final_history_length: usize, } diff --git a/massa-ledger/src/ledger.rs b/massa-ledger/src/ledger.rs index 4cff45fea94..8ce03e9e517 100644 --- a/massa-ledger/src/ledger.rs +++ b/massa-ledger/src/ledger.rs @@ -1,424 +1,12 @@ // Copyright (c) 2021 MASSA LABS +use crate::ledger_changes::LedgerChanges; +use crate::ledger_entry::LedgerEntry; +use crate::types::{Applicable, SetUpdateOrDelete}; use crate::LedgerConfig; use massa_hash::hash::Hash; -use massa_models::{ledger_models::LedgerChange, prehash::Map, Address, Amount, Slot}; -use std::collections::{hash_map, BTreeMap, VecDeque}; - -/// represents a structure that supports another one being applied to it -pub trait Applicable { - fn apply(&mut self, _: V); -} - -/// structure defining a ledger entry -#[derive(Default, Debug, Clone)] -pub struct LedgerEntry { - pub parallel_balance: Amount, - pub bytecode: Vec, - pub datastore: BTreeMap>, -} - -/// LedgerEntryUpdate can be applied to a LedgerEntry -impl Applicable for LedgerEntry { - /// applies a LedgerEntryUpdate - fn apply(&mut self, update: LedgerEntryUpdate) { - update.parallel_balance.apply_to(&mut self.parallel_balance); - update.bytecode.apply_to(&mut self.bytecode); - for (key, value_update) in update.datastore { - match value_update { - SetOrDelete::Set(v) => { - self.datastore.insert(key, v); - } - SetOrDelete::Delete => { - self.datastore.remove(&key); - } - } - } - } -} - -/// represents a set/update/delete change -#[derive(Debug, Clone)] -pub enum SetUpdateOrDelete, V: Applicable + Clone> { - /// sets a new absolute value T - Set(T), - /// applies an update V to an existing value - Update(V), - /// deletes a value - Delete, -} - -/// supports applying another SetUpdateOrDelete to self -impl, V: Applicable> Applicable> - for SetUpdateOrDelete -where - V: Clone, -{ - fn apply(&mut self, other: SetUpdateOrDelete) { - match other { - // the other SetUpdateOrDelete sets a new absolute value => force it on self - v @ SetUpdateOrDelete::Set(_) => *self = v, - - // the other SetUpdateOrDelete updates the value - SetUpdateOrDelete::Update(u) => match self { - // if self currently sets an absolute value, apply other to that value - SetUpdateOrDelete::Set(cur) => cur.apply(u), - - // if self currently updates a value, apply the updates of the other to that update - SetUpdateOrDelete::Update(cur) => cur.apply(u), - - // if self currently deletes a value, - // create a new default value, apply other's updates to it and make self set it as an absolute new value - SetUpdateOrDelete::Delete => { - let mut res = T::default(); - res.apply(u); - *self = SetUpdateOrDelete::Set(res); - } - }, - - // the other SetUpdateOrDelete deletes a value => force self to delete it as well - v @ SetUpdateOrDelete::Delete => *self = v, - } - } -} - -/// represents a set/delete change -#[derive(Debug, Clone)] -pub enum SetOrDelete { - /// sets a new absolute value T - Set(T), - /// deletes a value - Delete, -} - -/// allows applying another SetOrDelete to the current one -impl Applicable> for SetOrDelete { - fn apply(&mut self, other: Self) { - *self = other; - } -} - -/// represents a set/keep change -#[derive(Debug, Clone)] -pub enum SetOrKeep { - /// sets a new absolute value T - Set(T), - /// keeps the existing value - Keep, -} - -/// allows applying another SetOrKeep to the current one -impl Applicable> for SetOrKeep { - fn apply(&mut self, other: SetOrKeep) { - if let v @ SetOrKeep::Set(..) = other { - // update the current value only if the other SetOrKeep sets a new one - *self = v; - } - } -} - -impl SetOrKeep { - /// applies the current SetOrKeep into a target mutable value - pub fn apply_to(self, val: &mut T) { - if let SetOrKeep::Set(v) = self { - // only change the value if self is setting a new one - *val = v; - } - } -} - -impl Default for SetOrKeep { - fn default() -> Self { - SetOrKeep::Keep - } -} - -/// represents an update to one or more fields of a LedgerEntry -#[derive(Default, Debug, Clone)] -pub struct LedgerEntryUpdate { - roll_count: SetOrKeep, - parallel_balance: SetOrKeep, - bytecode: SetOrKeep>, - datastore: Map>>, -} - -impl Applicable for LedgerEntryUpdate { - /// extends the LedgerEntryUpdate with another one - fn apply(&mut self, update: LedgerEntryUpdate) { - self.roll_count.apply(update.roll_count); - self.parallel_balance.apply(update.parallel_balance); - self.bytecode.apply(update.bytecode); - self.datastore.extend(update.datastore); - } -} - -/// represents a list of changes to ledger entries -#[derive(Default, Debug, Clone)] -pub struct LedgerChanges(pub Map>); - -impl Applicable for LedgerChanges { - /// extends the current LedgerChanges with another one - fn apply(&mut self, changes: LedgerChanges) { - for (addr, change) in changes.0 { - match self.0.entry(addr) { - hash_map::Entry::Occupied(mut occ) => { - // apply incoming change if a change on this entry already exists - occ.get_mut().apply(change); - } - hash_map::Entry::Vacant(vac) => { - // otherwise insert the incoming change - vac.insert(change); - } - } - } - } -} - -impl LedgerChanges { - /// get an item - pub fn get( - &self, - addr: &Address, - ) -> Option<&SetUpdateOrDelete> { - self.0.get(addr) - } - - /// tries to return the parallel balance or gets it from a function - /// - /// # Returns - /// * Some(v) if a value is present - /// * None if the value is absent - /// * f() if the value is unknown - /// - /// this is used as an optimization: - /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further - pub fn get_parallel_balance_or_else Option>( - &self, - addr: &Address, - f: F, - ) -> Option { - match self.0.get(addr) { - Some(SetUpdateOrDelete::Set(v)) => Some(v.parallel_balance), - Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { - parallel_balance, .. - })) => match parallel_balance { - SetOrKeep::Set(v) => Some(*v), - SetOrKeep::Keep => f(), - }, - Some(SetUpdateOrDelete::Delete) => None, - None => f(), - } - } - - /// tries to return the bytecode or gets it from a function - /// - /// # Returns - /// * Some(v) if a value is present - /// * None if the value is absent - /// * f() if the value is unknown - /// - /// this is used as an optimization: - /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further - pub fn get_bytecode_or_else Option>>( - &self, - addr: &Address, - f: F, - ) -> Option> { - match self.0.get(addr) { - Some(SetUpdateOrDelete::Set(v)) => Some(v.bytecode.clone()), - Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { bytecode, .. })) => match bytecode { - SetOrKeep::Set(v) => Some(v.clone()), - SetOrKeep::Keep => f(), - }, - Some(SetUpdateOrDelete::Delete) => None, - None => f(), - } - } - - /// tries to return whether an entry exists or gets it from a function - /// - /// # Returns - /// * true if a entry is present - /// * false if the entry is absent - /// * f() if the existence of the value is unknown - /// - /// this is used as an optimization: - /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further - pub fn entry_exists_or_else bool>(&self, addr: &Address, f: F) -> bool { - match self.0.get(addr) { - Some(SetUpdateOrDelete::Set(_)) => true, - Some(SetUpdateOrDelete::Update(_)) => true, - Some(SetUpdateOrDelete::Delete) => false, - None => f(), - } - } - - /// set the parallel balance of an address - pub fn set_parallel_balance(&mut self, addr: Address, balance: Amount) { - match self.0.entry(addr) { - hash_map::Entry::Occupied(mut occ) => { - match occ.get_mut() { - SetUpdateOrDelete::Set(v) => { - // we currently set the absolute value of the entry - // so we need to update the parallel_balance of that value - v.parallel_balance = balance; - } - SetUpdateOrDelete::Update(u) => { - // we currently update the value of the entry - // so we need to set the parallel_balance for that update - u.parallel_balance = SetOrKeep::Set(balance); - } - d @ SetUpdateOrDelete::Delete => { - // we currently delete the entry - // so we need to create a default one with the target balance - *d = SetUpdateOrDelete::Set(LedgerEntry { - parallel_balance: balance, - ..Default::default() - }); - } - } - } - hash_map::Entry::Vacant(vac) => { - // we currently aren't changing anything on that entry - // so we need to create an update with the target balance - vac.insert(SetUpdateOrDelete::Update(LedgerEntryUpdate { - parallel_balance: SetOrKeep::Set(balance), - ..Default::default() - })); - } - } - } - - /// set the parallel balance of an address - pub fn set_bytecode(&mut self, addr: Address, bytecode: Vec) { - match self.0.entry(addr) { - hash_map::Entry::Occupied(mut occ) => { - match occ.get_mut() { - SetUpdateOrDelete::Set(v) => { - // we currently set the absolute value of the entry - // so we need to update the bytecode of that value - v.bytecode = bytecode; - } - SetUpdateOrDelete::Update(u) => { - // we currently update the value of the entry - // so we need to set the bytecode for that update - u.bytecode = SetOrKeep::Set(bytecode); - } - d @ SetUpdateOrDelete::Delete => { - // we currently delete the entry - // so we need to create a default one with the target bytecode - *d = SetUpdateOrDelete::Set(LedgerEntry { - bytecode, - ..Default::default() - }); - } - } - } - hash_map::Entry::Vacant(vac) => { - // we currently aren't changing anything on that entry - // so we need to create an update with the target bytecode - vac.insert(SetUpdateOrDelete::Update(LedgerEntryUpdate { - bytecode: SetOrKeep::Set(bytecode), - ..Default::default() - })); - } - } - } - - /// tries to return a data entry - /// - /// # Returns - /// * Some(v) if a value is present - /// * None if the value is absent - /// * f() if the value is unknown - /// - /// this is used as an optimization: - /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further - pub fn get_data_entry_or_else Option>>( - &self, - addr: &Address, - key: &Hash, - f: F, - ) -> Option> { - match self.0.get(addr) { - Some(SetUpdateOrDelete::Set(v)) => v.datastore.get(key).cloned(), - Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { datastore, .. })) => { - match datastore.get(key) { - Some(SetOrDelete::Set(v)) => Some(v.clone()), - Some(SetOrDelete::Delete) => None, - None => f(), - } - } - Some(SetUpdateOrDelete::Delete) => None, - None => f(), - } - } - - /// tries to return whether a data entry exists - /// - /// # Returns - /// * true if it does - /// * false if it does not - /// * f() if its existance is unknown - /// - /// this is used as an optimization: - /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further - pub fn has_data_entry_or_else bool>( - &self, - addr: &Address, - key: &Hash, - f: F, - ) -> bool { - match self.0.get(addr) { - Some(SetUpdateOrDelete::Set(v)) => v.datastore.contains_key(key), - Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { datastore, .. })) => { - match datastore.get(key) { - Some(SetOrDelete::Set(_)) => true, - Some(SetOrDelete::Delete) => false, - None => f(), - } - } - Some(SetUpdateOrDelete::Delete) => false, - None => f(), - } - } - - /// set a datastore entry for an address - pub fn set_data_entry(&mut self, addr: Address, key: Hash, data: Vec) { - match self.0.entry(addr) { - hash_map::Entry::Occupied(mut occ) => { - match occ.get_mut() { - SetUpdateOrDelete::Set(v) => { - // we currently set the absolute value of the entry - // so we need to update the data of that value - v.datastore.insert(key, data); - } - SetUpdateOrDelete::Update(u) => { - // we currently update the value of the entry - // so we need to set the data for that update - u.datastore.insert(key, SetOrDelete::Set(data)); - } - d @ SetUpdateOrDelete::Delete => { - // we currently delete the entry - // so we need to create a default one with the target data - *d = SetUpdateOrDelete::Set(LedgerEntry { - datastore: vec![(key, data)].into_iter().collect(), - ..Default::default() - }); - } - } - } - hash_map::Entry::Vacant(vac) => { - // we currently aren't changing anything on that entry - // so we need to create an update with the target data - vac.insert(SetUpdateOrDelete::Update(LedgerEntryUpdate { - datastore: vec![(key, SetOrDelete::Set(data))].into_iter().collect(), - ..Default::default() - })); - } - } - } -} +use massa_models::{Address, Amount, Slot}; +use std::collections::{BTreeMap, VecDeque}; /// represents a final ledger pub struct FinalLedger { diff --git a/massa-ledger/src/ledger_changes.rs b/massa-ledger/src/ledger_changes.rs new file mode 100644 index 00000000000..ea8afbf0f15 --- /dev/null +++ b/massa-ledger/src/ledger_changes.rs @@ -0,0 +1,292 @@ +use crate::ledger_entry::LedgerEntry; +use crate::types::{Applicable, SetOrDelete, SetOrKeep, SetUpdateOrDelete}; +use massa_hash::hash::Hash; +use massa_models::{prehash::Map, Address, Amount}; +use std::collections::hash_map; + +/// represents an update to one or more fields of a LedgerEntry +#[derive(Default, Debug, Clone)] +pub struct LedgerEntryUpdate { + pub roll_count: SetOrKeep, + pub parallel_balance: SetOrKeep, + pub bytecode: SetOrKeep>, + pub datastore: Map>>, +} + +impl Applicable for LedgerEntryUpdate { + /// extends the LedgerEntryUpdate with another one + fn apply(&mut self, update: LedgerEntryUpdate) { + self.roll_count.apply(update.roll_count); + self.parallel_balance.apply(update.parallel_balance); + self.bytecode.apply(update.bytecode); + self.datastore.extend(update.datastore); + } +} + +/// represents a list of changes to ledger entries +#[derive(Default, Debug, Clone)] +pub struct LedgerChanges(pub Map>); + +impl Applicable for LedgerChanges { + /// extends the current LedgerChanges with another one + fn apply(&mut self, changes: LedgerChanges) { + for (addr, change) in changes.0 { + match self.0.entry(addr) { + hash_map::Entry::Occupied(mut occ) => { + // apply incoming change if a change on this entry already exists + occ.get_mut().apply(change); + } + hash_map::Entry::Vacant(vac) => { + // otherwise insert the incoming change + vac.insert(change); + } + } + } + } +} + +impl LedgerChanges { + /// get an item + pub fn get( + &self, + addr: &Address, + ) -> Option<&SetUpdateOrDelete> { + self.0.get(addr) + } + + /// tries to return the parallel balance or gets it from a function + /// + /// # Returns + /// * Some(v) if a value is present + /// * None if the value is absent + /// * f() if the value is unknown + /// + /// this is used as an optimization: + /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further + pub fn get_parallel_balance_or_else Option>( + &self, + addr: &Address, + f: F, + ) -> Option { + match self.0.get(addr) { + Some(SetUpdateOrDelete::Set(v)) => Some(v.parallel_balance), + Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { + parallel_balance, .. + })) => match parallel_balance { + SetOrKeep::Set(v) => Some(*v), + SetOrKeep::Keep => f(), + }, + Some(SetUpdateOrDelete::Delete) => None, + None => f(), + } + } + + /// tries to return the bytecode or gets it from a function + /// + /// # Returns + /// * Some(v) if a value is present + /// * None if the value is absent + /// * f() if the value is unknown + /// + /// this is used as an optimization: + /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further + pub fn get_bytecode_or_else Option>>( + &self, + addr: &Address, + f: F, + ) -> Option> { + match self.0.get(addr) { + Some(SetUpdateOrDelete::Set(v)) => Some(v.bytecode.clone()), + Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { bytecode, .. })) => match bytecode { + SetOrKeep::Set(v) => Some(v.clone()), + SetOrKeep::Keep => f(), + }, + Some(SetUpdateOrDelete::Delete) => None, + None => f(), + } + } + + /// tries to return whether an entry exists or gets it from a function + /// + /// # Returns + /// * true if a entry is present + /// * false if the entry is absent + /// * f() if the existence of the value is unknown + /// + /// this is used as an optimization: + /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further + pub fn entry_exists_or_else bool>(&self, addr: &Address, f: F) -> bool { + match self.0.get(addr) { + Some(SetUpdateOrDelete::Set(_)) => true, + Some(SetUpdateOrDelete::Update(_)) => true, + Some(SetUpdateOrDelete::Delete) => false, + None => f(), + } + } + + /// set the parallel balance of an address + pub fn set_parallel_balance(&mut self, addr: Address, balance: Amount) { + match self.0.entry(addr) { + hash_map::Entry::Occupied(mut occ) => { + match occ.get_mut() { + SetUpdateOrDelete::Set(v) => { + // we currently set the absolute value of the entry + // so we need to update the parallel_balance of that value + v.parallel_balance = balance; + } + SetUpdateOrDelete::Update(u) => { + // we currently update the value of the entry + // so we need to set the parallel_balance for that update + u.parallel_balance = SetOrKeep::Set(balance); + } + d @ SetUpdateOrDelete::Delete => { + // we currently delete the entry + // so we need to create a default one with the target balance + *d = SetUpdateOrDelete::Set(LedgerEntry { + parallel_balance: balance, + ..Default::default() + }); + } + } + } + hash_map::Entry::Vacant(vac) => { + // we currently aren't changing anything on that entry + // so we need to create an update with the target balance + vac.insert(SetUpdateOrDelete::Update(LedgerEntryUpdate { + parallel_balance: SetOrKeep::Set(balance), + ..Default::default() + })); + } + } + } + + /// set the parallel balance of an address + pub fn set_bytecode(&mut self, addr: Address, bytecode: Vec) { + match self.0.entry(addr) { + hash_map::Entry::Occupied(mut occ) => { + match occ.get_mut() { + SetUpdateOrDelete::Set(v) => { + // we currently set the absolute value of the entry + // so we need to update the bytecode of that value + v.bytecode = bytecode; + } + SetUpdateOrDelete::Update(u) => { + // we currently update the value of the entry + // so we need to set the bytecode for that update + u.bytecode = SetOrKeep::Set(bytecode); + } + d @ SetUpdateOrDelete::Delete => { + // we currently delete the entry + // so we need to create a default one with the target bytecode + *d = SetUpdateOrDelete::Set(LedgerEntry { + bytecode, + ..Default::default() + }); + } + } + } + hash_map::Entry::Vacant(vac) => { + // we currently aren't changing anything on that entry + // so we need to create an update with the target bytecode + vac.insert(SetUpdateOrDelete::Update(LedgerEntryUpdate { + bytecode: SetOrKeep::Set(bytecode), + ..Default::default() + })); + } + } + } + + /// tries to return a data entry + /// + /// # Returns + /// * Some(v) if a value is present + /// * None if the value is absent + /// * f() if the value is unknown + /// + /// this is used as an optimization: + /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further + pub fn get_data_entry_or_else Option>>( + &self, + addr: &Address, + key: &Hash, + f: F, + ) -> Option> { + match self.0.get(addr) { + Some(SetUpdateOrDelete::Set(v)) => v.datastore.get(key).cloned(), + Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { datastore, .. })) => { + match datastore.get(key) { + Some(SetOrDelete::Set(v)) => Some(v.clone()), + Some(SetOrDelete::Delete) => None, + None => f(), + } + } + Some(SetUpdateOrDelete::Delete) => None, + None => f(), + } + } + + /// tries to return whether a data entry exists + /// + /// # Returns + /// * true if it does + /// * false if it does not + /// * f() if its existance is unknown + /// + /// this is used as an optimization: + /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further + pub fn has_data_entry_or_else bool>( + &self, + addr: &Address, + key: &Hash, + f: F, + ) -> bool { + match self.0.get(addr) { + Some(SetUpdateOrDelete::Set(v)) => v.datastore.contains_key(key), + Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { datastore, .. })) => { + match datastore.get(key) { + Some(SetOrDelete::Set(_)) => true, + Some(SetOrDelete::Delete) => false, + None => f(), + } + } + Some(SetUpdateOrDelete::Delete) => false, + None => f(), + } + } + + /// set a datastore entry for an address + pub fn set_data_entry(&mut self, addr: Address, key: Hash, data: Vec) { + match self.0.entry(addr) { + hash_map::Entry::Occupied(mut occ) => { + match occ.get_mut() { + SetUpdateOrDelete::Set(v) => { + // we currently set the absolute value of the entry + // so we need to update the data of that value + v.datastore.insert(key, data); + } + SetUpdateOrDelete::Update(u) => { + // we currently update the value of the entry + // so we need to set the data for that update + u.datastore.insert(key, SetOrDelete::Set(data)); + } + d @ SetUpdateOrDelete::Delete => { + // we currently delete the entry + // so we need to create a default one with the target data + *d = SetUpdateOrDelete::Set(LedgerEntry { + datastore: vec![(key, data)].into_iter().collect(), + ..Default::default() + }); + } + } + } + hash_map::Entry::Vacant(vac) => { + // we currently aren't changing anything on that entry + // so we need to create an update with the target data + vac.insert(SetUpdateOrDelete::Update(LedgerEntryUpdate { + datastore: vec![(key, SetOrDelete::Set(data))].into_iter().collect(), + ..Default::default() + })); + } + } + } +} diff --git a/massa-ledger/src/ledger_entry.rs b/massa-ledger/src/ledger_entry.rs new file mode 100644 index 00000000000..43df0b50031 --- /dev/null +++ b/massa-ledger/src/ledger_entry.rs @@ -0,0 +1,32 @@ +use crate::ledger_changes::LedgerEntryUpdate; +use crate::types::{Applicable, SetOrDelete}; +use massa_hash::hash::Hash; +use massa_models::Amount; +use std::collections::BTreeMap; + +/// structure defining a ledger entry +#[derive(Default, Debug, Clone)] +pub struct LedgerEntry { + pub parallel_balance: Amount, + pub bytecode: Vec, + pub datastore: BTreeMap>, +} + +/// LedgerEntryUpdate can be applied to a LedgerEntry +impl Applicable for LedgerEntry { + /// applies a LedgerEntryUpdate + fn apply(&mut self, update: LedgerEntryUpdate) { + update.parallel_balance.apply_to(&mut self.parallel_balance); + update.bytecode.apply_to(&mut self.bytecode); + for (key, value_update) in update.datastore { + match value_update { + SetOrDelete::Set(v) => { + self.datastore.insert(key, v); + } + SetOrDelete::Delete => { + self.datastore.remove(&key); + } + } + } + } +} diff --git a/massa-ledger/src/lib.rs b/massa-ledger/src/lib.rs index cba4bdfaa61..28086a03772 100644 --- a/massa-ledger/src/lib.rs +++ b/massa-ledger/src/lib.rs @@ -6,16 +6,19 @@ #[macro_use] extern crate massa_logging; -pub use config::LedgerConfig; -pub use error::LedgerError; -pub use ledger::{ - Applicable, FinalLedger, LedgerChanges, LedgerEntry, LedgerEntryUpdate, SetOrDelete, SetOrKeep, - SetUpdateOrDelete, -}; - mod config; mod error; mod ledger; +mod ledger_changes; +mod ledger_entry; +mod types; + +pub use config::LedgerConfig; +pub use error::LedgerError; +pub use ledger::FinalLedger; +pub use ledger_changes::LedgerChanges; +pub use ledger_entry::LedgerEntry; +pub use types::{Applicable, SetOrDelete, SetOrKeep, SetUpdateOrDelete}; #[cfg(test)] mod tests; diff --git a/massa-ledger/src/types.rs b/massa-ledger/src/types.rs new file mode 100644 index 00000000000..ccd8a4c3ee4 --- /dev/null +++ b/massa-ledger/src/types.rs @@ -0,0 +1,105 @@ +use crate::LedgerConfig; +use massa_hash::hash::Hash; +use massa_models::{prehash::Map, Address, Amount, Slot}; +use std::collections::{BTreeMap, VecDeque}; + +/// represents a structure that supports another one being applied to it +pub trait Applicable { + fn apply(&mut self, _: V); +} + +/// represents a set/update/delete change +#[derive(Debug, Clone)] +pub enum SetUpdateOrDelete, V: Applicable + Clone> { + /// sets a new absolute value T + Set(T), + /// applies an update V to an existing value + Update(V), + /// deletes a value + Delete, +} + +/// supports applying another SetUpdateOrDelete to self +impl, V: Applicable> Applicable> + for SetUpdateOrDelete +where + V: Clone, +{ + fn apply(&mut self, other: SetUpdateOrDelete) { + match other { + // the other SetUpdateOrDelete sets a new absolute value => force it on self + v @ SetUpdateOrDelete::Set(_) => *self = v, + + // the other SetUpdateOrDelete updates the value + SetUpdateOrDelete::Update(u) => match self { + // if self currently sets an absolute value, apply other to that value + SetUpdateOrDelete::Set(cur) => cur.apply(u), + + // if self currently updates a value, apply the updates of the other to that update + SetUpdateOrDelete::Update(cur) => cur.apply(u), + + // if self currently deletes a value, + // create a new default value, apply other's updates to it and make self set it as an absolute new value + SetUpdateOrDelete::Delete => { + let mut res = T::default(); + res.apply(u); + *self = SetUpdateOrDelete::Set(res); + } + }, + + // the other SetUpdateOrDelete deletes a value => force self to delete it as well + v @ SetUpdateOrDelete::Delete => *self = v, + } + } +} + +/// represents a set/delete change +#[derive(Debug, Clone)] +pub enum SetOrDelete { + /// sets a new absolute value T + Set(T), + /// deletes a value + Delete, +} + +/// allows applying another SetOrDelete to the current one +impl Applicable> for SetOrDelete { + fn apply(&mut self, other: Self) { + *self = other; + } +} + +/// represents a set/keep change +#[derive(Debug, Clone)] +pub enum SetOrKeep { + /// sets a new absolute value T + Set(T), + /// keeps the existing value + Keep, +} + +/// allows applying another SetOrKeep to the current one +impl Applicable> for SetOrKeep { + fn apply(&mut self, other: SetOrKeep) { + if let v @ SetOrKeep::Set(..) = other { + // update the current value only if the other SetOrKeep sets a new one + *self = v; + } + } +} + +impl SetOrKeep { + /// applies the current SetOrKeep into a target mutable value + pub fn apply_to(self, val: &mut T) { + if let SetOrKeep::Set(v) = self { + // only change the value if self is setting a new one + *val = v; + } + } +} + +impl Default for SetOrKeep { + fn default() -> Self { + SetOrKeep::Keep + } +} diff --git a/massa-node/Cargo.toml b/massa-node/Cargo.toml index 50aba8e9b96..34652ceb548 100644 --- a/massa-node/Cargo.toml +++ b/massa-node/Cargo.toml @@ -27,7 +27,8 @@ massa_api = { path = "../massa-api" } massa_bootstrap = { path = "../massa-bootstrap" } massa_consensus_exports = { path = "../massa-consensus-exports" } massa_consensus_worker = { path = "../massa-consensus-worker" } -massa_execution = { path = "../massa-execution" } +massa_execution_exports = { path = "../massa-execution-exports" } +massa_execution_worker = { path = "../massa-execution-worker" } massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } massa_network = { path = "../massa-network" } From 2d2fe0e1768ec9a3654fe0d3004788357985cae1 Mon Sep 17 00:00:00 2001 From: damip Date: Fri, 18 Feb 2022 11:13:43 +0100 Subject: [PATCH 15/73] correct errors --- Cargo.lock | 3 - massa-execution-exports/Cargo.toml | 3 - massa-execution-exports/src/config.rs | 1 - .../src/controller_traits.rs | 47 ++ .../src/event_store.rs | 2 +- massa-execution-exports/src/lib.rs | 11 +- massa-execution-exports/src/types.rs | 16 +- massa-execution-worker/src/context.rs | 10 +- .../src/controller.rs | 39 +- massa-execution-worker/src/execution.rs | 20 +- massa-execution-worker/src/interface_impl.rs | 8 +- massa-execution-worker/src/lib.rs | 16 +- massa-execution-worker/src/sce_ledger.rs | 588 ------------------ .../src/speculative_ledger.rs | 8 +- massa-execution-worker/src/types.rs | 34 - massa-execution-worker/src/vm_thread.rs | 33 +- .../src/exports.rs | 0 17 files changed, 130 insertions(+), 709 deletions(-) create mode 100644 massa-execution-exports/src/controller_traits.rs rename {massa-execution-worker => massa-execution-exports}/src/event_store.rs (99%) rename {massa-execution-exports => massa-execution-worker}/src/controller.rs (85%) delete mode 100644 massa-execution-worker/src/sce_ledger.rs delete mode 100644 massa-execution-worker/src/types.rs rename {massa-execution-worker => massa-ledger}/src/exports.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 880e5e84195..8e8a2d47d64 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1563,11 +1563,8 @@ dependencies = [ "anyhow", "displaydoc", "lazy_static", - "massa-sc-runtime", - "massa_hash", "massa_ledger", "massa_models", - "massa_signature", "massa_time", "pretty_assertions", "rand 0.8.4", diff --git a/massa-execution-exports/Cargo.toml b/massa-execution-exports/Cargo.toml index 6b83b596a75..1cdddf5ee01 100644 --- a/massa-execution-exports/Cargo.toml +++ b/massa-execution-exports/Cargo.toml @@ -22,9 +22,6 @@ tracing = { version = "0.1", features = [ ] } # custom modules massa_models = { path = "../massa-models" } -massa_hash = { path = "../massa-hash" } -massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime", tag = "v0.4.3" } -massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } massa_ledger = { path = "../massa-ledger" } diff --git a/massa-execution-exports/src/config.rs b/massa-execution-exports/src/config.rs index 9194e57cf81..ef03e7778eb 100644 --- a/massa-execution-exports/src/config.rs +++ b/massa-execution-exports/src/config.rs @@ -18,4 +18,3 @@ pub struct ExecutionConfig { /// period duration pub t0: MassaTime, } - diff --git a/massa-execution-exports/src/controller_traits.rs b/massa-execution-exports/src/controller_traits.rs new file mode 100644 index 00000000000..7c2e2419521 --- /dev/null +++ b/massa-execution-exports/src/controller_traits.rs @@ -0,0 +1,47 @@ +use crate::types::ExecutionOutput; +use crate::types::ReadOnlyExecutionRequest; +use crate::ExecutionError; +use massa_ledger::LedgerEntry; +use massa_models::output_event::SCOutputEvent; +use massa_models::Address; +use massa_models::OperationId; +use massa_models::Slot; +use std::sync::Arc; + +pub trait ExecutionController { + /// Get events optionnally filtered by: + /// * start slot + /// * end slot + /// * emitter address + /// * original caller address + /// * operation id + fn get_filtered_sc_output_event( + &self, + start: Option, + end: Option, + emitter_address: Option
, + original_caller_address: Option
, + original_operation_id: Option, + ) -> Vec; + + /// gets a copy of a full ledger entry + /// + /// # return value + /// * (final_entry, active_entry) + fn get_full_ledger_entry(&self, addr: &Address) -> (Option, Option); + + /// Executes a readonly request + fn execute_readonly_request( + &mut self, + req: ReadOnlyExecutionRequest, + ) -> Result; +} + +/// execution manager +pub trait ExecutionManager { + /// stops the VM + fn stop(self); + + /// get a shared reference to the VM controller + fn get_controller(&self) -> Arc; +} diff --git a/massa-execution-worker/src/event_store.rs b/massa-execution-exports/src/event_store.rs similarity index 99% rename from massa-execution-worker/src/event_store.rs rename to massa-execution-exports/src/event_store.rs index 6d36dfbcb43..bc9fd0e3749 100644 --- a/massa-execution-worker/src/event_store.rs +++ b/massa-execution-exports/src/event_store.rs @@ -59,7 +59,7 @@ fn remove_from_hashmap( /// Keep all events you need with some useful indexes #[derive(Default, Debug, Clone)] -pub(crate) struct EventStore { +pub struct EventStore { /// maps ids to events id_to_event: Map, diff --git a/massa-execution-exports/src/lib.rs b/massa-execution-exports/src/lib.rs index 3d454cab687..f2f823b5cc0 100644 --- a/massa-execution-exports/src/lib.rs +++ b/massa-execution-exports/src/lib.rs @@ -1,10 +1,11 @@ mod config; -mod controller; -mod types; +mod controller_traits; mod error; +mod event_store; +mod types; pub use config::ExecutionConfig; -pub use types::{ExecutionOutput, ReadOnlyExecutionRequest}; +pub use controller_traits::{ExecutionController, ExecutionManager}; pub use error::ExecutionError; -pub use controller::ExecutionController; - +pub use event_store::EventStore; +pub use types::{ExecutionOutput, ExecutionStackElement, ReadOnlyExecutionRequest}; diff --git a/massa-execution-exports/src/types.rs b/massa-execution-exports/src/types.rs index 19b0ee4400e..186afc51f2c 100644 --- a/massa-execution-exports/src/types.rs +++ b/massa-execution-exports/src/types.rs @@ -1,10 +1,9 @@ - use crate::event_store::EventStore; -use crate::sce_ledger::SCELedgerChanges; use massa_ledger::LedgerChanges; use massa_models::{Address, Amount, BlockId, Slot}; -use std::collections::VecDeque; +/// structure describing the output of an execution +#[derive(Debug, Clone)] pub struct ExecutionOutput { // slot pub slot: Slot, @@ -17,6 +16,7 @@ pub struct ExecutionOutput { } /// structure describing a read-only execution request +#[derive(Debug, Clone)] pub struct ReadOnlyExecutionRequest { /// Maximum gas to spend in the execution. pub max_gas: u64, @@ -27,3 +27,13 @@ pub struct ReadOnlyExecutionRequest { /// Call stack to simulate pub call_stack: Vec, } + +#[derive(Debug, Clone)] +pub struct ExecutionStackElement { + /// called address + pub address: Address, + /// coins transferred to the target address during a call, + pub coins: Amount, + /// list of addresses created so far during excution, + pub owned_addresses: Vec
, +} diff --git a/massa-execution-worker/src/context.rs b/massa-execution-worker/src/context.rs index 3e9e5c682ad..375cf1300aa 100644 --- a/massa-execution-worker/src/context.rs +++ b/massa-execution-worker/src/context.rs @@ -1,6 +1,7 @@ use crate::speculative_ledger::SpeculativeLedger; -use crate::types::{ExecutionOutput, ExecutionStackElement, ReadOnlyExecutionRequest}; -use crate::{event_store::EventStore, ExecutionError}; +use massa_execution_exports::{ + EventStore, ExecutionError, ExecutionOutput, ExecutionStackElement, ReadOnlyExecutionRequest, +}; use massa_hash::hash::Hash; use massa_ledger::{FinalLedger, LedgerChanges}; use massa_models::{Address, Amount, BlockId, OperationId, Slot}; @@ -232,8 +233,7 @@ impl ExecutionContext { // they won't have ownership over it but this can still be a pain // generate address - let (slot, created_addr_index) = (self.slot, self.created_addr_index); - let mut data: Vec = slot.to_bytes_key().to_vec(); + let mut data: Vec = self.slot.to_bytes_key().to_vec(); data.append(&mut self.created_addr_index.to_be_bytes().to_vec()); if self.read_only { data.push(0u8); @@ -286,7 +286,7 @@ impl ExecutionContext { /// checks if a datastore entry exists pub fn set_data_entry( - &self, + &mut self, address: &Address, key: Hash, data: Vec, diff --git a/massa-execution-exports/src/controller.rs b/massa-execution-worker/src/controller.rs similarity index 85% rename from massa-execution-exports/src/controller.rs rename to massa-execution-worker/src/controller.rs index 54ee3508564..74140ac57eb 100644 --- a/massa-execution-exports/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -1,8 +1,8 @@ -use crate::config::VMConfig; use crate::execution::ExecutionState; -use crate::types::ExecutionOutput; -use crate::types::ReadOnlyExecutionRequest; -use crate::ExecutionError; +use massa_execution_exports::{ + ExecutionConfig, ExecutionController, ExecutionError, ExecutionManager, ExecutionOutput, + ReadOnlyExecutionRequest, +}; use massa_ledger::LedgerEntry; use massa_models::output_event::SCOutputEvent; use massa_models::Address; @@ -31,9 +31,9 @@ pub(crate) struct VMInputData { } /// VM controller -pub struct ExecutionController { +pub struct ExecutionControllerImpl { /// VM config - pub(crate) config: VMConfig, + pub(crate) config: ExecutionConfig, /// condition variable to wake up the VM loop pub(crate) loop_cv: Condvar, /// input data to process in the VM loop @@ -42,20 +42,22 @@ pub struct ExecutionController { pub(crate) execution_state: Arc>, } -impl ExecutionController { +impl ExecutionControllerImpl { /// reads the list of newly finalized blocks and the new blockclique, if there was a change /// if found, remove from input queue pub(crate) fn consume_input(&mut self) -> VMInputData { std::mem::take(&mut self.input_data.lock().expect("VM input data lock failed")) } +} +impl ExecutionController for ExecutionControllerImpl { /// Get events optionnally filtered by: /// * start slot /// * end slot /// * emitter address /// * original caller address /// * operation id - pub fn get_filtered_sc_output_event( + fn get_filtered_sc_output_event( &self, start: Option, end: Option, @@ -79,10 +81,7 @@ impl ExecutionController { /// /// # return value /// * (final_entry, active_entry) - pub fn get_full_ledger_entry( - &self, - addr: &Address, - ) -> (Option, Option) { + fn get_full_ledger_entry(&self, addr: &Address) -> (Option, Option) { self.execution_state .read() .expect("could not lock execution state for reading") @@ -90,13 +89,13 @@ impl ExecutionController { } /// Executes a readonly request - pub fn execute_readonly_request( + fn execute_readonly_request( &mut self, req: ReadOnlyExecutionRequest, ) -> Result { // queue request let resp_rx = { - let input_data = self + let mut input_data = self .input_data .lock() .expect("could not lock VM input data"); @@ -124,17 +123,17 @@ impl ExecutionController { } } -/// VM manager -pub struct VMManager { +/// Execution manager +pub struct ExecutionManagerImpl { /// shared reference to the VM controller - controller: Arc, + controller: Arc, /// handle used to join the VM thread thread_handle: std::thread::JoinHandle<()>, } -impl VMManager { +impl ExecutionManager for ExecutionManagerImpl { /// stops the VM - pub fn stop(self) { + fn stop(self) { info!("stopping VM controller..."); // notify the VM thread to stop { @@ -154,7 +153,7 @@ impl VMManager { } /// get a shared reference to the VM controller - pub fn get_controller(&self) -> Arc { + fn get_controller(&self) -> Arc { self.controller.clone() } } diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 527c29b645c..f5d1eddf70f 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -1,9 +1,9 @@ -use crate::config::VMConfig; use crate::context::ExecutionContext; -use crate::event_store::EventStore; use crate::interface_impl::InterfaceImpl; -use crate::types::{ExecutionOutput, ExecutionStackElement, ReadOnlyExecutionRequest}; -use crate::ExecutionError; +use massa_execution_exports::{ + EventStore, ExecutionConfig, ExecutionError, ExecutionOutput, ExecutionStackElement, + ReadOnlyExecutionRequest, +}; use massa_ledger::{Applicable, FinalLedger, LedgerChanges, LedgerEntry, SetUpdateOrDelete}; use massa_models::output_event::SCOutputEvent; use massa_models::{Address, BlockId, Operation, OperationId, OperationType}; @@ -27,7 +27,7 @@ macro_rules! context_guard { /// structure holding consistent speculative and final execution states pub struct ExecutionState { // VM config - pub config: VMConfig, + pub config: ExecutionConfig, // active execution output history pub active_history: VecDeque, // active execution cursor @@ -46,7 +46,7 @@ pub struct ExecutionState { impl ExecutionState { /// create a new execution state - pub fn new(config: VMConfig, final_ledger: Arc>) -> ExecutionState { + pub fn new(config: ExecutionConfig, final_ledger: Arc>) -> ExecutionState { // get last final slot from final ledger let last_final_slot = final_ledger .read() @@ -172,7 +172,7 @@ impl ExecutionState { /// execute an operation in the context of a block pub fn execute_operation( - &mut self, + &self, operation: &Operation, block_creator_addr: Address, ) -> Result<(), ExecutionError> { @@ -199,7 +199,7 @@ impl ExecutionState { // prepare the context let context_snapshot; { - let context = context_guard!(self); + let mut context = context_guard!(self); // credit the producer of the block B with max_gas * gas_price parallel coins // note that errors are deterministic and do not cancel op execution @@ -316,6 +316,8 @@ impl ExecutionState { let previous_ledger_changes = self.get_accumulated_active_changes_at_slot(slot); // create readonly execution context + let max_gas = req.max_gas; + let bytecode = req.bytecode.clone(); let execution_context = ExecutionContext::new_readonly( slot, req, @@ -327,7 +329,7 @@ impl ExecutionState { *context_guard!(self) = execution_context; // run the intepreter - massa_sc_runtime::run(&req.bytecode, req.max_gas, &*self.execution_interface) + massa_sc_runtime::run(&bytecode, max_gas, &*self.execution_interface) .map_err(|err| ExecutionError::RuntimeError(err.to_string()))?; // return the execution output diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs index 9b19084495c..02b47a058c1 100644 --- a/massa-execution-worker/src/interface_impl.rs +++ b/massa-execution-worker/src/interface_impl.rs @@ -1,7 +1,9 @@ /// Implementation of the interface used in the execution external library /// -use crate::{config::VMConfig, context::ExecutionContext, types::ExecutionStackElement}; +use crate::context::ExecutionContext; use anyhow::{bail, Result}; +use massa_execution_exports::ExecutionConfig; +use massa_execution_exports::ExecutionStackElement; use massa_hash::hash::Hash; use massa_models::{ output_event::{EventExecutionContext, SCOutputEvent, SCOutputEventId}, @@ -25,12 +27,12 @@ macro_rules! context_guard { #[derive(Clone)] pub(crate) struct InterfaceImpl { - config: VMConfig, + config: ExecutionConfig, context: Arc>, } impl InterfaceImpl { - pub fn new(config: VMConfig, context: Arc>) -> InterfaceImpl { + pub fn new(config: ExecutionConfig, context: Arc>) -> InterfaceImpl { InterfaceImpl { config, context } } } diff --git a/massa-execution-worker/src/lib.rs b/massa-execution-worker/src/lib.rs index 64c1afe95d9..df27399486b 100644 --- a/massa-execution-worker/src/lib.rs +++ b/massa-execution-worker/src/lib.rs @@ -1,28 +1,14 @@ #![feature(map_first_last)] #![feature(unzip_option)] -mod config; mod context; mod controller; -mod error; -mod event_store; mod execution; -mod exports; mod interface_impl; -mod sce_ledger; mod speculative_ledger; -mod types; -mod vm; mod vm_thread; -mod worker; -pub use error::ExecutionError; -pub use exports::BootstrapExecutionState; -pub use sce_ledger::{SCELedger, SCELedgerEntry}; -pub use worker::ExecutionCommand; -pub use worker::ExecutionEvent; -pub use worker::ExecutionManagementCommand; -pub use worker::ExecutionWorker; +pub use vm_thread::start_vm; #[cfg(test)] mod tests; diff --git a/massa-execution-worker/src/sce_ledger.rs b/massa-execution-worker/src/sce_ledger.rs deleted file mode 100644 index 08fb34c8090..00000000000 --- a/massa-execution-worker/src/sce_ledger.rs +++ /dev/null @@ -1,588 +0,0 @@ -use crate::ExecutionError; -use massa_hash::hash::Hash; -use massa_hash::HASH_SIZE_BYTES; -use massa_models::prehash::{BuildMap, Map}; -use massa_models::{ - array_from_slice, DeserializeCompact, DeserializeVarInt, ModelsError, SerializeCompact, - SerializeVarInt, Slot, ADDRESS_SIZE_BYTES, -}; -use massa_models::{Address, Amount, AMOUNT_ZERO}; -use serde::{Deserialize, Serialize}; - -/// an entry in the SCE ledger -#[derive(Debug, Clone, Default, Serialize, Deserialize)] -pub struct SCELedgerEntry { - // SCE balance - pub balance: Amount, - - // optional executable module - pub opt_module: Option>, - - // datastore - pub data: Map>, -} - -impl SCELedgerEntry { - /// applies an entry update to self - pub fn apply_entry_update(&mut self, update: &SCELedgerEntryUpdate) { - // balance - if let Some(new_balance) = update.update_balance { - self.balance = new_balance; - } - - // module - if let Some(opt_module) = &update.update_opt_module { - self.opt_module = opt_module.clone(); - } - - // data - for (data_key, data_update) in update.update_data.iter() { - match data_update { - Some(new_data) => { - self.data.insert(*data_key, new_data.clone()); - } - None => { - self.data.remove(data_key); - } - } - } - } -} - -impl SerializeCompact for SCELedgerEntry { - fn to_bytes_compact(&self) -> Result, massa_models::ModelsError> { - let mut res: Vec = Vec::new(); - - // write balance - res.extend(self.balance.to_bytes_compact()?); - - // write opt module data - if let Some(module_data) = &self.opt_module { - // write that it is present - res.push(1); - - // write length - let length: u32 = module_data.len().try_into().map_err(|_| { - ModelsError::SerializeError( - "SCE ledger entry module data too long for serialization".into(), - ) - })?; - // TODO check against max length - res.extend(length.to_varint_bytes()); - - // write bytecode - res.extend(module_data); - } else { - // write that it is absent - res.push(0); - } - - // write data store - - // write length - let length: u32 = self.data.len().try_into().map_err(|_| { - ModelsError::SerializeError( - "SCE ledger entry data store too long for serialization".into(), - ) - })?; - // TODO limit length - res.extend(length.to_varint_bytes()); - - // write entry pairs - for (h, data_entry) in self.data.iter() { - // write hash - res.extend(h.to_bytes()); - - // write length - let length: u32 = data_entry.len().try_into().map_err(|_| { - ModelsError::SerializeError( - "SCE ledger entry data store entry too long for serialization".into(), - ) - })?; - // TODO check against max length - res.extend(length.to_varint_bytes()); - - // write data entry - res.extend(data_entry); - } - - Ok(res) - } -} - -impl DeserializeCompact for SCELedgerEntry { - fn from_bytes_compact(buffer: &[u8]) -> Result<(Self, usize), massa_models::ModelsError> { - let mut cursor = 0usize; - - // read balance - let (balance, delta) = Amount::from_bytes_compact(&buffer[cursor..])?; - cursor += delta; - - // read opt module data - let has_module = match buffer.get(cursor) { - Some(1) => true, - Some(0) => false, - _ => { - return Err(ModelsError::DeserializeError( - "could not deserialize ledger entry opt module data byte".into(), - )) - } - }; - cursor += 1; - let opt_module: Option> = if has_module { - // read length - let (length, delta) = u32::from_varint_bytes(&buffer[cursor..])?; - // TOOD limit length with from_varint_bytes_bounded - cursor += delta; - - // read items - if let Some(slice) = buffer.get(cursor..(cursor + (length as usize))) { - cursor += length as usize; - Some(slice.to_vec()) - } else { - return Err(ModelsError::DeserializeError( - "could not deserialize ledger entry module bytes: buffer too small".into(), - )); - } - } else { - None - }; - - // read data store - - // read length - let (length, delta) = u32::from_varint_bytes(&buffer[cursor..])?; - // TOOD limit length with from_varint_bytes_bounded - cursor += delta; - - // read entry pairs - let mut data: Map> = - Map::with_capacity_and_hasher(length as usize, BuildMap::default()); - for _ in 0..length { - // read hash - let h = Hash::from_bytes(&array_from_slice(&buffer[cursor..])?)?; - cursor += HASH_SIZE_BYTES; - - // read data length - let (d_length, delta) = u32::from_varint_bytes(&buffer[cursor..])?; - // TOOD limit d_length with from_varint_bytes_bounded - cursor += delta; - - // read data - let entry_data = if let Some(slice) = buffer.get(cursor..(cursor + (d_length as usize))) - { - cursor += d_length as usize; - slice.to_vec() - } else { - return Err(ModelsError::DeserializeError( - "could not deserialize ledger entry data store entry bytes: buffer too small" - .into(), - )); - }; - - // insert - data.insert(h, entry_data); - } - - Ok(( - SCELedgerEntry { - balance, - opt_module, - data, - }, - cursor, - )) - } -} - -// optional updates to be applied to a ledger entry -#[derive(Debug, Clone, Default)] -pub struct SCELedgerEntryUpdate { - pub update_balance: Option, - pub update_opt_module: Option>>, - pub update_data: Map>>, // None for row deletion -} - -impl SCELedgerEntryUpdate { - /// apply another SCELedgerEntryUpdate to self - pub fn apply_entry_update(&mut self, other: &SCELedgerEntryUpdate) { - // balance - if let Some(new_balance) = other.update_balance { - self.update_balance = Some(new_balance); - } - - // module - if let Some(new_opt_module) = &other.update_opt_module { - self.update_opt_module = Some(new_opt_module.clone()); - } - - // data - self.update_data.extend(other.update_data.clone()); - } -} - -#[derive(Debug, Clone)] -pub enum SCELedgerChange { - // delete an entry - Delete, - // sets an entry to an absolute value - Set(SCELedgerEntry), - // updates an entry - Update(SCELedgerEntryUpdate), -} - -impl Default for SCELedgerChange { - fn default() -> Self { - Self::Delete - } -} - -impl SCELedgerChange { - /// applies another SCELedgerChange to the current one - pub fn apply_change(&mut self, other: &SCELedgerChange) { - let new_val = match (&self, other) { - // other deletes the entry - (_, SCELedgerChange::Delete) => { - // make self delete as well - SCELedgerChange::Delete - } - - // other sets an absolute entry - (_, new_set @ SCELedgerChange::Set(_)) => { - // make self set the same absolute entry - new_set.clone() - } - - // self deletes, other updates - (SCELedgerChange::Delete, SCELedgerChange::Update(other_entry_update)) => { - // prepare a default entry - let mut res_entry = SCELedgerEntry::default(); - // apply other's updates to res_entry - res_entry.apply_entry_update(other_entry_update); - // make self set to res_entry - SCELedgerChange::Set(res_entry) - } - - // self sets, other updates - (SCELedgerChange::Set(cur_entry), SCELedgerChange::Update(other_entry_update)) => { - // apply other's updates to cur_entry - // TODO avoid clone, act directly on mutable cur_entry - let mut res_entry = cur_entry.clone(); - res_entry.apply_entry_update(other_entry_update); - SCELedgerChange::Set(res_entry) - } - - // self updates, other updates - ( - SCELedgerChange::Update(cur_entry_update), - SCELedgerChange::Update(other_entry_update), - ) => { - // try to apply other's updates to self's updates - // TODO avoid clone, act directly on mutable cur_entry_update - let mut res_update = cur_entry_update.clone(); - res_update.apply_entry_update(other_entry_update); - SCELedgerChange::Update(res_update) - } - }; - *self = new_val; - } -} - -/// SCE ledger -#[derive(Debug, Clone, Default, Serialize, Deserialize)] -pub struct SCELedger(pub Map); - -impl SerializeCompact for SCELedger { - fn to_bytes_compact(&self) -> Result, massa_models::ModelsError> { - let mut res: Vec = Vec::new(); - - // write length - let length: u32 = self.0.len().try_into().map_err(|_| { - ModelsError::SerializeError("SCE ledger too long for serialization".into()) - })?; - // TODO limit length - res.extend(length.to_varint_bytes()); - - // write entry pairs - for (addr, ledger_entry) in self.0.iter() { - // write address - res.extend(addr.to_bytes()); - - // write ledger entry - res.extend(ledger_entry.to_bytes_compact()?); - } - - Ok(res) - } -} - -impl DeserializeCompact for SCELedger { - fn from_bytes_compact(buffer: &[u8]) -> Result<(Self, usize), massa_models::ModelsError> { - let mut cursor = 0usize; - - // read length - let (length, delta) = u32::from_varint_bytes(&buffer[cursor..])?; - // TOOD limit length with from_varint_bytes_bounded - cursor += delta; - - // read entry pairs - let mut res_ledger: Map = - Map::with_capacity_and_hasher(length as usize, BuildMap::default()); - for _ in 0..length { - // read address - let address = Address::from_bytes(&array_from_slice(&buffer[cursor..])?)?; - cursor += ADDRESS_SIZE_BYTES; - - // read ledger entry - let (ledger_entry, delta) = SCELedgerEntry::from_bytes_compact(&buffer[cursor..])?; - cursor += delta; - - // add to output ledger - res_ledger.insert(address, ledger_entry); - } - - Ok((SCELedger(res_ledger), cursor)) - } -} - -/// list of ledger changes (deletions, resets, updates) -#[derive(Debug, Clone, Default)] -pub struct SCELedgerChanges(pub Map); - -impl SCELedgerChanges { - /// extends the current SCELedgerChanges with another - pub fn apply_changes(&mut self, changes: &SCELedgerChanges) { - for (addr, change) in changes.0.iter() { - self.apply_change(*addr, change); - } - } - - /// appliees a single change to self - pub fn apply_change(&mut self, addr: Address, change: &SCELedgerChange) { - self.0 - .entry(addr) - .and_modify(|cur_c| cur_c.apply_change(change)) - .or_insert_with(|| change.clone()); - } - - pub fn clear(&mut self) { - self.0.clear(); - } -} - -impl SCELedger { - /// creates an SCELedger from a hashmap of balances - pub fn from_balances_map(balances_map: Map) -> Self { - SCELedger( - balances_map - .into_iter() - .map(|(k, v)| { - ( - k, - SCELedgerEntry { - balance: v, - ..Default::default() - }, - ) - }) - .collect(), - ) - } - - /// applies ledger changes to ledger - pub fn apply_changes(&mut self, changes: &SCELedgerChanges) { - for (addr, change) in changes.0.iter() { - match change { - // delete entry - SCELedgerChange::Delete => { - self.0.remove(addr); - } - - // set entry to absolute value - SCELedgerChange::Set(new_entry) => { - self.0.insert(*addr, new_entry.clone()); - } - - // update entry - SCELedgerChange::Update(update) => { - // insert default if absent - self.0 - .entry(*addr) - .or_insert_with(SCELedgerEntry::default) - .apply_entry_update(update); - } - } - } - } -} - -/// The final ledger. -#[derive(Debug, Clone)] -pub struct FinalLedger { - /// The slot of the ledger. - pub slot: Slot, - /// The ledger. - pub ledger: SCELedger, -} - -/// represents an execution step from the point of view of the SCE ledger -/// applying cumulative_history_changes then caused_changes to final_ledger yields the current ledger during the ledger step -#[derive(Debug, Clone)] -pub struct SCELedgerStep { - // The final ledger and its slot - pub final_ledger_slot: FinalLedger, - - // accumulator of existing ledger changes - pub cumulative_history_changes: SCELedgerChanges, - - // additional changes caused by the step - pub caused_changes: SCELedgerChanges, -} - -impl SCELedgerStep { - /// gets the balance of an SCE ledger entry - pub fn get_balance(&self, addr: &Address) -> Amount { - // check if caused_changes or cumulative_history_changes have an update on this - for changes in [&self.caused_changes, &self.cumulative_history_changes] { - match changes.0.get(addr) { - Some(SCELedgerChange::Delete) => return AMOUNT_ZERO, - Some(SCELedgerChange::Set(new_entry)) => return new_entry.balance, - Some(SCELedgerChange::Update(update)) => { - if let Some(updated_balance) = update.update_balance { - return updated_balance; - } - } - None => {} - } - } - // check if the final ledger has the info - if let Some(entry) = self.final_ledger_slot.ledger.0.get(addr) { - return entry.balance; - } - // otherwise, just return zero - AMOUNT_ZERO - } - - /// sets the balance of an address - pub fn set_balance(&mut self, addr: Address, balance: Amount) { - let update = SCELedgerEntryUpdate { - update_balance: Some(balance), - update_opt_module: Default::default(), - update_data: Default::default(), - }; - self.caused_changes - .apply_change(addr, &SCELedgerChange::Update(update)); - } - - /// tries to increase/decrease the balance of an address - /// does not change anything on failure - pub fn set_balance_delta( - &mut self, - addr: Address, - amount: Amount, - positive: bool, - ) -> Result<(), ExecutionError> { - let mut balance = self.get_balance(&addr); - if positive { - balance = balance - .checked_add(amount) - .ok_or_else(|| ModelsError::CheckedOperationError("balance overflow".into()))?; - } else { - balance = balance - .checked_sub(amount) - .ok_or_else(|| ModelsError::CheckedOperationError("balance underflow".into()))?; - } - self.set_balance(addr, balance); - Ok(()) - } - - /// gets the module of an SCE ledger entry - /// returns None if the entry was not found or has no module - pub fn get_module(&self, addr: &Address) -> Option> { - // check if caused_changes or cumulative_history_changes have an update on this - for changes in [&self.caused_changes, &self.cumulative_history_changes] { - match changes.0.get(addr) { - Some(SCELedgerChange::Delete) => return None, - Some(SCELedgerChange::Set(new_entry)) => return new_entry.opt_module.clone(), - Some(SCELedgerChange::Update(update)) => { - if let Some(updates_opt_module) = &update.update_opt_module { - return updates_opt_module.clone(); - } - } - None => {} - } - } - // check if the final ledger has the info - match self.final_ledger_slot.ledger.0.get(addr) { - Some(entry) => entry.opt_module.clone(), - _ => None, - } - } - - /// returns a data entry - /// None if address not found or entry nto found in addr's data - pub fn get_data_entry(&self, addr: &Address, key: &Hash) -> Option> { - // check if caused_changes or cumulative_history_changes have an update on this - for changes in [&self.caused_changes, &self.cumulative_history_changes] { - match changes.0.get(addr) { - Some(SCELedgerChange::Delete) => return None, - Some(SCELedgerChange::Set(new_entry)) => return new_entry.data.get(key).cloned(), - Some(SCELedgerChange::Update(update)) => { - match update.update_data.get(key) { - None => {} // no updates - Some(None) => return None, // data entry deleted, - Some(Some(updated_data)) => return Some(updated_data.clone()), - } - } - None => {} - } - } - - // check if the final ledger has the info - match self.final_ledger_slot.ledger.0.get(addr) { - Some(entry) => entry.data.get(key).cloned(), - _ => None, - } - } - - /// checks if a data entry exists - pub fn has_data_entry(&self, addr: &Address, key: &Hash) -> bool { - // check if caused_changes or cumulative_history_changes have an update on this - for changes in [&self.caused_changes, &self.cumulative_history_changes] { - match changes.0.get(addr) { - Some(SCELedgerChange::Delete) => return false, - Some(SCELedgerChange::Set(_)) => return true, - Some(SCELedgerChange::Update(update)) => { - match update.update_data.get(key) { - None => {} // no updates - Some(None) => return false, // data entry deleted, - Some(Some(_)) => return true, - } - } - None => {} - } - } - - // check if the final ledger has the info - self.final_ledger_slot.ledger.0.contains_key(addr) - } - - /// sets data entry - pub fn set_data_entry(&mut self, addr: Address, key: Hash, value: Vec) { - let update = SCELedgerEntryUpdate { - update_data: [(key, Some(value))].into_iter().collect(), - ..Default::default() - }; - self.caused_changes - .apply_change(addr, &SCELedgerChange::Update(update)); - } - - pub fn set_module(&mut self, addr: Address, opt_module: Option>) { - let update = SCELedgerEntryUpdate { - update_opt_module: Some(opt_module), - ..Default::default() - }; - self.caused_changes - .apply_change(addr, &SCELedgerChange::Update(update)); - } -} diff --git a/massa-execution-worker/src/speculative_ledger.rs b/massa-execution-worker/src/speculative_ledger.rs index d54c188072a..8607bb8a10f 100644 --- a/massa-execution-worker/src/speculative_ledger.rs +++ b/massa-execution-worker/src/speculative_ledger.rs @@ -1,4 +1,4 @@ -use crate::ExecutionError; +use massa_execution_exports::ExecutionError; use massa_hash::hash::Hash; use massa_ledger::{Applicable, FinalLedger, LedgerChanges}; use massa_models::{Address, Amount, AMOUNT_ZERO}; @@ -143,7 +143,7 @@ impl SpeculativeLedger { /// creates a new smart contract address with initial bytecode pub fn create_new_sc_address( - &self, + &mut self, addr: Address, bytecode: Vec, ) -> Result<(), ExecutionError> { @@ -153,7 +153,7 @@ impl SpeculativeLedger { /// sets the bytecode of an address /// fails if the address doesn't exist - pub fn set_bytecode(&self, addr: Address, bytecode: Vec) -> Result<(), ExecutionError> { + pub fn set_bytecode(&mut self, addr: Address, bytecode: Vec) -> Result<(), ExecutionError> { // check for existence if !self.entry_exists(&addr) { return Err(ExecutionError::RuntimeError(format!( @@ -197,7 +197,7 @@ impl SpeculativeLedger { /// sets an entry for an address /// fails if the address doesn't exist pub fn set_data_entry( - &self, + &mut self, addr: &Address, key: Hash, data: Vec, diff --git a/massa-execution-worker/src/types.rs b/massa-execution-worker/src/types.rs deleted file mode 100644 index c2740a83ae0..00000000000 --- a/massa-execution-worker/src/types.rs +++ /dev/null @@ -1,34 +0,0 @@ -use crate::event_store::EventStore; -use crate::sce_ledger::SCELedgerChanges; -use massa_ledger::LedgerChanges; -use massa_models::{Address, Amount, BlockId, Slot}; -use std::collections::VecDeque; - -/// history of active executed steps -pub(crate) type StepHistory = VecDeque; - -/// A StepHistory item representing the consequences of a given execution step -#[derive(Debug, Clone)] -pub(crate) struct StepHistoryItem { - // step slot - pub slot: Slot, - - // optional block ID (or miss if None) at that slot - pub opt_block_id: Option, - - // list of SCE ledger changes caused by this execution step - pub ledger_changes: SCELedgerChanges, - - /// events produced during this step - pub events: EventStore, -} - -#[derive(Clone)] -pub struct ExecutionStackElement { - /// called address - pub address: Address, - /// coins transferred to the target address during a call, - pub coins: Amount, - /// list of addresses created so far during excution, - pub owned_addresses: Vec
, -} diff --git a/massa-execution-worker/src/vm_thread.rs b/massa-execution-worker/src/vm_thread.rs index c1f8d75dccd..e2e0505a7e2 100644 --- a/massa-execution-worker/src/vm_thread.rs +++ b/massa-execution-worker/src/vm_thread.rs @@ -1,8 +1,8 @@ -use crate::config::VMConfig; -use crate::controller::{VMController, VMInputData, VMManager}; +use crate::controller::{ExecutionControllerImpl, ExecutionManagerImpl, VMInputData}; use crate::execution::ExecutionState; -use crate::types::{ExecutionOutput, ReadOnlyExecutionRequest}; -use crate::ExecutionError; +use massa_execution_exports::{ + ExecutionConfig, ExecutionError, ExecutionOutput, ReadOnlyExecutionRequest, +}; use massa_ledger::FinalLedger; use massa_models::BlockId; use massa_models::{ @@ -19,10 +19,10 @@ use tracing::debug; /// structure gathering all elements needed by the VM thread pub struct VMThread { // VM config - config: VMConfig, + config: ExecutionConfig, // VM data exchange controller - controller: Arc, + controller: Arc, // map of SCE-final blocks not executed yet sce_finals: HashMap>, // last SCE final slot in sce_finals list @@ -42,8 +42,8 @@ pub struct VMThread { impl VMThread { pub fn new( - config: VMConfig, - controller: Arc, + config: ExecutionConfig, + controller: Arc, execution_state: Arc>, ) -> Self { let final_cursor = execution_state @@ -239,7 +239,7 @@ impl VMThread { /// returns true if something was executed fn execute_one_active_slot(&mut self) -> bool { // write-lock the execution state - let exec_state = self + let mut exec_state = self .execution_state .write() .expect("could not lock execution state for writing"); @@ -286,7 +286,7 @@ impl VMThread { /// truncates history if necessary pub fn truncate_history(&mut self) { // acquire write access to execution state - let exec_state = self + let mut exec_state = self .execution_state .write() .expect("could not lock execution state for writing"); @@ -317,7 +317,7 @@ impl VMThread { pub fn main_loop(&mut self) { loop { // read input queues - let input_data = self.controller.consume_input(); + let mut input_data = self.controller.consume_input(); // check for stop signal if input_data.stop { @@ -394,7 +394,7 @@ impl VMThread { } // signal cancellation to all remaining readonly requests - let input_data = self + let mut input_data = self .controller .input_data .lock() @@ -412,7 +412,10 @@ impl VMThread { /// # parameters /// * config: VM configuration /// * bootstrap: -pub fn start_vm(config: VMConfig, final_ledger: Arc>) -> VMManager { +pub fn start_vm( + config: ExecutionConfig, + final_ledger: Arc>, +) -> ExecutionManagerImpl { // create an execution state let execution_state = Arc::new(RwLock::new(ExecutionState::new( config.clone(), @@ -420,7 +423,7 @@ pub fn start_vm(config: VMConfig, final_ledger: Arc>) -> VMM ))); // create a controller - let controller = Arc::new(VMController { + let controller = Arc::new(ExecutionControllerImpl { config: config.clone(), loop_cv: Condvar::new(), input_data: Mutex::new(VMInputData { @@ -437,7 +440,7 @@ pub fn start_vm(config: VMConfig, final_ledger: Arc>) -> VMM }); // return the VM manager - VMManager { + ExecutionManagerImpl { controller, thread_handle, } diff --git a/massa-execution-worker/src/exports.rs b/massa-ledger/src/exports.rs similarity index 100% rename from massa-execution-worker/src/exports.rs rename to massa-ledger/src/exports.rs From 5e422ee5452c6ad8b33168c733d4fbaf5d6c96b3 Mon Sep 17 00:00:00 2001 From: damip Date: Fri, 18 Feb 2022 11:29:55 +0100 Subject: [PATCH 16/73] make things compile --- .../src/controller_traits.rs | 3 +- massa-execution-exports/src/error.rs | 16 -------- massa-execution-worker/src/controller.rs | 23 ++++++----- massa-execution-worker/src/execution.rs | 2 +- massa-execution-worker/src/vm_thread.rs | 40 ++++++++++++------- massa-ledger/src/error.rs | 16 ++++++++ 6 files changed, 55 insertions(+), 45 deletions(-) diff --git a/massa-execution-exports/src/controller_traits.rs b/massa-execution-exports/src/controller_traits.rs index 7c2e2419521..3de3fcb47d0 100644 --- a/massa-execution-exports/src/controller_traits.rs +++ b/massa-execution-exports/src/controller_traits.rs @@ -6,7 +6,6 @@ use massa_models::output_event::SCOutputEvent; use massa_models::Address; use massa_models::OperationId; use massa_models::Slot; -use std::sync::Arc; pub trait ExecutionController { /// Get events optionnally filtered by: @@ -43,5 +42,5 @@ pub trait ExecutionManager { fn stop(self); /// get a shared reference to the VM controller - fn get_controller(&self) -> Arc; + fn get_controller(&self) -> Box; } diff --git a/massa-execution-exports/src/error.rs b/massa-execution-exports/src/error.rs index 5adb6eccf0e..95c24f13326 100644 --- a/massa-execution-exports/src/error.rs +++ b/massa-execution-exports/src/error.rs @@ -23,19 +23,3 @@ pub enum ExecutionError { /// Runtime error: {0} RuntimeError(String), } - -macro_rules! bootstrap_file_error { - ($st:expr, $cfg:ident) => { - |err| { - ExecutionError::FileError(format!( - "error $st initial SCE ledger file {}: {}", - $cfg.settings - .initial_sce_ledger_path - .to_str() - .unwrap_or("(non-utf8 path)"), - err - )) - } - }; -} -pub(crate) use bootstrap_file_error; diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index 74140ac57eb..52ba4a14a60 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -30,14 +30,13 @@ pub(crate) struct VMInputData { )>, } +#[derive(Clone)] /// VM controller pub struct ExecutionControllerImpl { /// VM config pub(crate) config: ExecutionConfig, - /// condition variable to wake up the VM loop - pub(crate) loop_cv: Condvar, - /// input data to process in the VM loop - pub(crate) input_data: Mutex, + /// input data to process in the VM loop with a wakeup condition variable + pub(crate) input_data: Arc<(Condvar, Mutex)>, /// execution state pub(crate) execution_state: Arc>, } @@ -46,7 +45,7 @@ impl ExecutionControllerImpl { /// reads the list of newly finalized blocks and the new blockclique, if there was a change /// if found, remove from input queue pub(crate) fn consume_input(&mut self) -> VMInputData { - std::mem::take(&mut self.input_data.lock().expect("VM input data lock failed")) + std::mem::take(&mut self.input_data.1.lock().expect("VM input data lock failed")) } } @@ -97,6 +96,7 @@ impl ExecutionController for ExecutionControllerImpl { let resp_rx = { let mut input_data = self .input_data + .1 .lock() .expect("could not lock VM input data"); if input_data.readonly_requests.len() >= self.config.readonly_queue_length { @@ -107,7 +107,7 @@ impl ExecutionController for ExecutionControllerImpl { let (resp_tx, resp_rx) = std::sync::mpsc::channel::>(); input_data.readonly_requests.push_back((req, resp_tx)); - self.loop_cv.notify_one(); + self.input_data.0.notify_one(); resp_rx }; @@ -126,9 +126,9 @@ impl ExecutionController for ExecutionControllerImpl { /// Execution manager pub struct ExecutionManagerImpl { /// shared reference to the VM controller - controller: Arc, + pub(crate) controller: ExecutionControllerImpl, /// handle used to join the VM thread - thread_handle: std::thread::JoinHandle<()>, + pub(crate) thread_handle: std::thread::JoinHandle<()>, } impl ExecutionManager for ExecutionManagerImpl { @@ -140,10 +140,11 @@ impl ExecutionManager for ExecutionManagerImpl { let mut input_wlock = self .controller .input_data + .1 .lock() .expect("could not lock VM input data"); input_wlock.stop = true; - self.controller.loop_cv.notify_one(); + self.controller.input_data.0.notify_one(); } // join the VM thread self.thread_handle @@ -153,7 +154,7 @@ impl ExecutionManager for ExecutionManagerImpl { } /// get a shared reference to the VM controller - fn get_controller(&self) -> Arc { - self.controller.clone() + fn get_controller(&self) -> Box { + Box::new(self.controller.clone()) } } diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index f5d1eddf70f..5689e2025cf 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -25,7 +25,7 @@ macro_rules! context_guard { } /// structure holding consistent speculative and final execution states -pub struct ExecutionState { +pub(crate) struct ExecutionState { // VM config pub config: ExecutionConfig, // active execution output history diff --git a/massa-execution-worker/src/vm_thread.rs b/massa-execution-worker/src/vm_thread.rs index e2e0505a7e2..8076e155ffd 100644 --- a/massa-execution-worker/src/vm_thread.rs +++ b/massa-execution-worker/src/vm_thread.rs @@ -17,12 +17,12 @@ use std::{ }; use tracing::debug; /// structure gathering all elements needed by the VM thread -pub struct VMThread { +pub(crate) struct VMThread { // VM config config: ExecutionConfig, // VM data exchange controller - controller: Arc, + controller: ExecutionControllerImpl, // map of SCE-final blocks not executed yet sce_finals: HashMap>, // last SCE final slot in sce_finals list @@ -43,7 +43,7 @@ pub struct VMThread { impl VMThread { pub fn new( config: ExecutionConfig, - controller: Arc, + controller: ExecutionControllerImpl, execution_state: Arc>, ) -> Self { let final_cursor = execution_state @@ -317,7 +317,7 @@ impl VMThread { pub fn main_loop(&mut self) { loop { // read input queues - let mut input_data = self.controller.consume_input(); + let input_data = self.controller.consume_input(); // check for stop signal if input_data.stop { @@ -369,6 +369,7 @@ impl VMThread { let input_data = self .controller .input_data + .1 .lock() .expect("could not lock VM input data"); if input_data.stop { @@ -388,7 +389,8 @@ impl VMThread { // wait for change or for next slot let _ = self .controller - .loop_cv + .input_data + .0 .wait_timeout(input_data, delay_until_next_slot.to_duration()) .expect("VM main loop condition variable wait failed"); } @@ -397,12 +399,18 @@ impl VMThread { let mut input_data = self .controller .input_data + .1 .lock() .expect("could not lock VM input data"); for (_req, resp_tx) in input_data.readonly_requests.drain(..) { - resp_tx.send(Err(ExecutionError::RuntimeError( - "readonly execution cancelled because VM is closing".into(), - ))); + if resp_tx + .send(Err(ExecutionError::RuntimeError( + "readonly execution cancelled because VM is closing".into(), + ))) + .is_err() + { + debug!("failed sending readonly request response: channel down"); + } } } } @@ -423,15 +431,17 @@ pub fn start_vm( ))); // create a controller - let controller = Arc::new(ExecutionControllerImpl { + let controller = ExecutionControllerImpl { config: config.clone(), - loop_cv: Condvar::new(), - input_data: Mutex::new(VMInputData { - blockclique_changed: true, - ..Default::default() - }), + input_data: Arc::new(( + Condvar::new(), + Mutex::new(VMInputData { + blockclique_changed: true, + ..Default::default() + }), + )), execution_state: execution_state.clone(), - }); + }; // launch the VM thread let ctl = controller.clone(); diff --git a/massa-ledger/src/error.rs b/massa-ledger/src/error.rs index c09508a6d6e..aa28f59489e 100644 --- a/massa-ledger/src/error.rs +++ b/massa-ledger/src/error.rs @@ -11,3 +11,19 @@ pub enum LedgerError { /// missing entry: {0} MissingEntry(String), } + +macro_rules! init_file_error { + ($st:expr, $cfg:ident) => { + |err| { + ExecutionError::FileError(format!( + "error $st initial SCE ledger file {}: {}", + $cfg.settings + .initial_sce_ledger_path + .to_str() + .unwrap_or("(non-utf8 path)"), + err + )) + } + }; +} +pub(crate) use init_file_error; From d9fb41e7744f8f35aaadc495ad8a4013a4410974 Mon Sep 17 00:00:00 2001 From: damip Date: Fri, 18 Feb 2022 12:56:27 +0100 Subject: [PATCH 17/73] integration --- massa-execution-worker/src/context.rs | 6 ------ massa-execution-worker/src/controller.rs | 7 ++++--- massa-execution-worker/src/execution.rs | 5 ++--- massa-execution-worker/src/lib.rs | 2 +- massa-execution-worker/src/speculative_ledger.rs | 6 +----- massa-execution-worker/src/vm_thread.rs | 2 +- massa-node/src/main.rs | 3 ++- 7 files changed, 11 insertions(+), 20 deletions(-) diff --git a/massa-execution-worker/src/context.rs b/massa-execution-worker/src/context.rs index 375cf1300aa..bc91cff4be1 100644 --- a/massa-execution-worker/src/context.rs +++ b/massa-execution-worker/src/context.rs @@ -51,9 +51,6 @@ pub(crate) struct ExecutionContext { /// block ID, if one is present at this slot pub opt_block_id: Option, - /// block creator addr, if there is a block at this slot - pub opt_block_creator_addr: Option
, - /// address call stack, most recent is at the back pub stack: Vec, @@ -83,7 +80,6 @@ impl ExecutionContext { created_addr_index: Default::default(), created_event_index: Default::default(), opt_block_id: Default::default(), - opt_block_creator_addr: Default::default(), stack: Default::default(), read_only: Default::default(), events: Default::default(), @@ -144,7 +140,6 @@ impl ExecutionContext { pub(crate) fn new_active_slot( slot: Slot, opt_block_id: Option, - opt_block_creator_addr: Option
, previous_changes: LedgerChanges, final_ledger: Arc>, ) -> Self { @@ -161,7 +156,6 @@ impl ExecutionContext { ExecutionContext { slot, opt_block_id, - opt_block_creator_addr, unsafe_rng, ..ExecutionContext::new(final_ledger, previous_changes) } diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index 52ba4a14a60..196cb1045d3 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -115,9 +115,10 @@ impl ExecutionController for ExecutionControllerImpl { match resp_rx.recv() { Ok(result) => return result, Err(err) => { - return Err(ExecutionError::RuntimeError( - "the VM input channel is closed".into(), - )) + return Err(ExecutionError::RuntimeError(format!( + "the VM input channel failed: {}", + err + ))) } } } diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 5689e2025cf..1f597b3f471 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -122,7 +122,7 @@ impl ExecutionState { for (hist_index, exec_output) in self.active_history.iter().enumerate() { let found_block_id = active_slots .get(&exec_output.slot) - .map(|opt_b| opt_b.as_ref().map(|(b_id, b)| *b_id)); + .map(|opt_b| opt_b.as_ref().map(|(b_id, _b)| *b_id)); if found_block_id == Some(exec_output.block_id) { continue; } @@ -178,7 +178,7 @@ impl ExecutionState { ) -> Result<(), ExecutionError> { // process ExecuteSC operations only let (bytecode, max_gas, coins, gas_price) = match &operation.content.op { - op @ OperationType::ExecuteSC { + OperationType::ExecuteSC { data, max_gas, coins, @@ -270,7 +270,6 @@ impl ExecutionState { let execution_context = ExecutionContext::new_active_slot( slot, opt_block_id, - opt_block_creator_addr, previous_ledger_changes, self.final_ledger.clone(), ); diff --git a/massa-execution-worker/src/lib.rs b/massa-execution-worker/src/lib.rs index df27399486b..3f0b6bda647 100644 --- a/massa-execution-worker/src/lib.rs +++ b/massa-execution-worker/src/lib.rs @@ -8,7 +8,7 @@ mod interface_impl; mod speculative_ledger; mod vm_thread; -pub use vm_thread::start_vm; +pub use vm_thread::start_execution_worker; #[cfg(test)] mod tests; diff --git a/massa-execution-worker/src/speculative_ledger.rs b/massa-execution-worker/src/speculative_ledger.rs index 8607bb8a10f..1aa2d05ee7c 100644 --- a/massa-execution-worker/src/speculative_ledger.rs +++ b/massa-execution-worker/src/speculative_ledger.rs @@ -46,11 +46,6 @@ impl SpeculativeLedger { self.added_changes = snapshot; } - /// consumes Self to get added changes - pub fn into_added_changes(self) -> LedgerChanges { - self.added_changes - } - /// gets the parallel balance of an address pub fn get_parallel_balance(&self, addr: &Address) -> Option { // try to read from added_changes, then previous_changes, then final_ledger @@ -153,6 +148,7 @@ impl SpeculativeLedger { /// sets the bytecode of an address /// fails if the address doesn't exist + #[allow(dead_code)] // TODO remove when it is used pub fn set_bytecode(&mut self, addr: Address, bytecode: Vec) -> Result<(), ExecutionError> { // check for existence if !self.entry_exists(&addr) { diff --git a/massa-execution-worker/src/vm_thread.rs b/massa-execution-worker/src/vm_thread.rs index 8076e155ffd..a3fa1f46a91 100644 --- a/massa-execution-worker/src/vm_thread.rs +++ b/massa-execution-worker/src/vm_thread.rs @@ -420,7 +420,7 @@ impl VMThread { /// # parameters /// * config: VM configuration /// * bootstrap: -pub fn start_vm( +pub fn start_execution_worker( config: ExecutionConfig, final_ledger: Arc>, ) -> ExecutionManagerImpl { diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 0f793285345..5d3346cc98f 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -12,7 +12,8 @@ use massa_consensus_exports::{ ConsensusEventReceiver, ConsensusManager, }; use massa_consensus_worker::start_consensus_controller; -use massa_execution::{ExecutionConfigs, ExecutionManager}; +use massa_execution_exports::{ExecutionConfig, ExecutionManager}; +use massa_execution_worker::start_execution_worker; use massa_logging::massa_trace; use massa_models::{init_serialization_context, SerializationContext}; From bc45b2d21bec9355a7307766a2835fc625664a9d Mon Sep 17 00:00:00 2001 From: damip Date: Fri, 18 Feb 2022 13:20:07 +0100 Subject: [PATCH 18/73] merge main --- massa-execution-worker/src/settings.rs | 40 ------------------- .../src/speculative_ledger.rs | 2 +- 2 files changed, 1 insertion(+), 41 deletions(-) delete mode 100644 massa-execution-worker/src/settings.rs diff --git a/massa-execution-worker/src/settings.rs b/massa-execution-worker/src/settings.rs deleted file mode 100644 index 19fd04de306..00000000000 --- a/massa-execution-worker/src/settings.rs +++ /dev/null @@ -1,40 +0,0 @@ -use massa_models::constants::{GENESIS_TIMESTAMP, T0, THREAD_COUNT}; -use massa_time::MassaTime; -use std::path::PathBuf; - -use serde::{Deserialize, Serialize}; - -/// Execution setting parsed with .toml in `massa-node/src/settings.rs` -#[derive(Debug, Deserialize, Serialize, Clone, Default)] -pub struct ExecutionSettings { - /// Initial SCE ledger file - pub initial_sce_ledger_path: PathBuf, - /// maximum number of SC output events kept in cache - pub max_final_events: usize, -} - -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct ExecutionConfigs { - /// Execution settings - pub settings: ExecutionSettings, - /// Thread count - pub thread_count: u8, - /// Genesis timestmap - pub genesis_timestamp: MassaTime, - /// period duration - pub t0: MassaTime, - /// clock compensation in milliseconds - pub clock_compensation: i64, -} - -impl Default for ExecutionConfigs { - fn default() -> Self { - Self { - settings: Default::default(), - thread_count: THREAD_COUNT, - genesis_timestamp: *GENESIS_TIMESTAMP, - t0: T0, - clock_compensation: Default::default(), - } - } -} diff --git a/massa-execution-worker/src/speculative_ledger.rs b/massa-execution-worker/src/speculative_ledger.rs index 1aa2d05ee7c..ea45c95d6e4 100644 --- a/massa-execution-worker/src/speculative_ledger.rs +++ b/massa-execution-worker/src/speculative_ledger.rs @@ -1,7 +1,7 @@ use massa_execution_exports::ExecutionError; use massa_hash::hash::Hash; use massa_ledger::{Applicable, FinalLedger, LedgerChanges}; -use massa_models::{Address, Amount, AMOUNT_ZERO}; +use massa_models::{Address, Amount}; use std::sync::{Arc, RwLock}; /// represents a speculative ledger state combining From 26321e3f4af9f05351d311787627b6353abde335 Mon Sep 17 00:00:00 2001 From: damip Date: Fri, 18 Feb 2022 15:48:41 +0100 Subject: [PATCH 19/73] integrate with consensus --- Cargo.lock | 1 + massa-bootstrap/Cargo.toml | 2 +- massa-bootstrap/src/error.rs | 6 +- massa-bootstrap/src/lib.rs | 16 ++--- massa-bootstrap/src/messages.rs | 24 +++---- .../src/consensus_controller.rs | 11 ++- massa-consensus-exports/src/error.rs | 2 +- massa-consensus-exports/src/settings.rs | 9 +-- .../src/consensus_worker.rs | 43 +++++------ .../src/controller_traits.rs | 14 ++++ massa-execution-worker/src/controller.rs | 26 +++++++ massa-execution-worker/src/interface_impl.rs | 5 +- .../src/speculative_ledger.rs | 2 +- massa-graph/src/error.rs | 2 +- massa-ledger/src/config.rs | 2 + massa-ledger/src/error.rs | 18 +---- massa-ledger/src/ledger.rs | 72 ++++++++++++++++++- massa-ledger/src/lib.rs | 1 + massa-ledger/src/types.rs | 5 -- massa-node/Cargo.toml | 1 + massa-node/src/main.rs | 32 +++++---- massa-node/src/settings.rs | 14 +++- 22 files changed, 204 insertions(+), 104 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0024aa3a57e..dc7f65242cf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1406,6 +1406,7 @@ dependencies = [ "massa_execution_exports", "massa_execution_worker", "massa_graph", + "massa_ledger", "massa_logging", "massa_models", "massa_network", diff --git a/massa-bootstrap/Cargo.toml b/massa-bootstrap/Cargo.toml index 7764a16af21..f57aa4ad318 100644 --- a/massa-bootstrap/Cargo.toml +++ b/massa-bootstrap/Cargo.toml @@ -19,13 +19,13 @@ tokio = { version = "1.11", features = ["full"] } tracing = {version = "0.1", features = ["max_level_debug", "release_max_level_debug"] } # custom modules massa_consensus_exports = { path = "../massa-consensus-exports" } +massa_ledger = { path = "../massa-ledger" } massa_hash = { path = "../massa-hash" } massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } massa_network = { path = "../massa-network" } massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } -massa_ledger = { path = "../massa-ledger" } massa_graph = { path = "../massa-graph" } massa_proof_of_stake_exports = { path = "../massa-proof-of-stake-exports" } diff --git a/massa-bootstrap/src/error.rs b/massa-bootstrap/src/error.rs index 537dfcb55e2..56b0a43249b 100644 --- a/massa-bootstrap/src/error.rs +++ b/massa-bootstrap/src/error.rs @@ -3,8 +3,8 @@ use crate::messages::BootstrapMessage; use displaydoc::Display; use massa_consensus_exports::error::ConsensusError; -use massa_execution::ExecutionError; use massa_hash::MassaHashError; +use massa_ledger::LedgerError; use massa_network::NetworkError; use massa_time::TimeError; use thiserror::Error; @@ -30,8 +30,8 @@ pub enum BootstrapError { ConsensusError(#[from] ConsensusError), /// network error: {0} NetworkError(#[from] NetworkError), - /// execution error: {0} - ExecutionError(#[from] ExecutionError), + /// ledger error: {0} + LedgerError(#[from] LedgerError), /// join error: {0} JoinError(#[from] tokio::task::JoinError), /// missing private key file diff --git a/massa-bootstrap/src/lib.rs b/massa-bootstrap/src/lib.rs index 5a71dd2adcc..a23f109a5af 100644 --- a/massa-bootstrap/src/lib.rs +++ b/massa-bootstrap/src/lib.rs @@ -9,8 +9,8 @@ use error::BootstrapError; pub use establisher::Establisher; use futures::{stream::FuturesUnordered, StreamExt}; use massa_consensus_exports::ConsensusCommandSender; -use massa_execution::{BootstrapExecutionState, ExecutionCommandSender}; use massa_graph::BootstrapableGraph; +use massa_ledger::FinalLedgerBootstrapState; use massa_logging::massa_trace; use massa_models::Version; use massa_network::{BootstrapPeers, NetworkCommandSender}; @@ -49,8 +49,8 @@ pub struct GlobalBootstrapState { /// list of network peers pub peers: Option, - /// state of the execution state - pub execution: Option, + /// state of the final ledger + pub final_ledger: Option, } /// Gets the state from a bootstrap server (internal private function) @@ -181,18 +181,18 @@ async fn get_state_internal( Ok(Ok(msg)) => return Err(BootstrapError::UnexpectedMessage(msg)), }; - // Fourth, get execution state + // Fourth, get final ledger // client.next() is not cancel-safe but we drop the whole client object if cancelled => it's OK - let execution = match tokio::time::timeout(cfg.read_timeout.into(), client.next()).await { + let final_ledger = match tokio::time::timeout(cfg.read_timeout.into(), client.next()).await { Err(_) => { return Err(std::io::Error::new( std::io::ErrorKind::TimedOut, - "bootstrap state read timed out", + "final ledger bootstrap state read timed out", ) .into()) } Ok(Err(e)) => return Err(e), - Ok(Ok(BootstrapMessage::ExecutionState { execution_state })) => execution_state, + Ok(Ok(BootstrapMessage::FinalLedgerState { ledger_state })) => ledger_state, Ok(Ok(msg)) => return Err(BootstrapError::UnexpectedMessage(msg)), }; @@ -203,7 +203,7 @@ async fn get_state_internal( graph: Some(graph), compensation_millis, peers: Some(peers), - execution: Some(execution), + final_ledger: Some(final_ledger), }) } diff --git a/massa-bootstrap/src/messages.rs b/massa-bootstrap/src/messages.rs index e6cc540a713..96f853759e5 100644 --- a/massa-bootstrap/src/messages.rs +++ b/massa-bootstrap/src/messages.rs @@ -1,7 +1,7 @@ // Copyright (c) 2021 MASSA LABS -use massa_execution::BootstrapExecutionState; use massa_graph::BootstrapableGraph; +use massa_ledger::FinalLedgerBootstrapState; use massa_models::{ DeserializeCompact, DeserializeVarInt, ModelsError, SerializeCompact, SerializeVarInt, Version, }; @@ -34,9 +34,9 @@ pub enum BootstrapMessage { graph: BootstrapableGraph, }, /// Execution state - ExecutionState { - /// execution state - execution_state: BootstrapExecutionState, + FinalLedgerState { + /// ledger state + ledger_state: FinalLedgerBootstrapState, }, } @@ -46,7 +46,7 @@ enum MessageTypeId { BootstrapTime = 0u32, Peers = 1u32, ConsensusState = 2u32, - ExecutionState = 3u32, + FinalLedgerState = 3u32, } impl SerializeCompact for BootstrapMessage { @@ -70,9 +70,9 @@ impl SerializeCompact for BootstrapMessage { res.extend(&pos.to_bytes_compact()?); res.extend(&graph.to_bytes_compact()?); } - BootstrapMessage::ExecutionState { execution_state } => { - res.extend(u32::from(MessageTypeId::ExecutionState).to_varint_bytes()); - res.extend(&execution_state.to_bytes_compact()?); + BootstrapMessage::FinalLedgerState { ledger_state } => { + res.extend(u32::from(MessageTypeId::FinalLedgerState).to_varint_bytes()); + res.extend(&ledger_state.to_bytes_compact()?); } } Ok(res) @@ -116,12 +116,12 @@ impl DeserializeCompact for BootstrapMessage { BootstrapMessage::ConsensusState { pos, graph } } - MessageTypeId::ExecutionState => { - let (execution_state, delta) = - BootstrapExecutionState::from_bytes_compact(&buffer[cursor..])?; + MessageTypeId::FinalLedgerState => { + let (ledger_state, delta) = + FinalLedgerBootstrapState::from_bytes_compact(&buffer[cursor..])?; cursor += delta; - BootstrapMessage::ExecutionState { execution_state } + BootstrapMessage::FinalLedgerState { ledger_state } } }; Ok((res, cursor)) diff --git a/massa-consensus-exports/src/consensus_controller.rs b/massa-consensus-exports/src/consensus_controller.rs index 9fdca83faf9..e846d85032f 100644 --- a/massa-consensus-exports/src/consensus_controller.rs +++ b/massa-consensus-exports/src/consensus_controller.rs @@ -1,6 +1,4 @@ // Copyright (c) 2021 MASSA LABS -use massa_execution::ExecutionEventReceiver; - use massa_graph::{BlockGraphExport, BootstrapableGraph, ExportBlockStatus, Status}; use massa_models::{ address::AddressState, api::EndorsementInfo, Endorsement, EndorsementId, OperationId, @@ -483,8 +481,7 @@ impl ConsensusEventReceiver { } pub struct ConsensusManager { - pub join_handle: - JoinHandle>, + pub join_handle: JoinHandle>, pub manager_tx: mpsc::Sender, } @@ -493,12 +490,12 @@ impl ConsensusManager { pub async fn stop( self, consensus_event_receiver: ConsensusEventReceiver, - ) -> Result<(ProtocolEventReceiver, ExecutionEventReceiver), ConsensusError> { + ) -> Result { massa_trace!("consensus.consensus_controller.stop", {}); drop(self.manager_tx); let _remaining_events = consensus_event_receiver.drain().await; - let (protocol_event_receiver, execution_event_receiver) = self.join_handle.await??; + let protocol_event_receiver = self.join_handle.await??; - Ok((protocol_event_receiver, execution_event_receiver)) + Ok(protocol_event_receiver) } } diff --git a/massa-consensus-exports/src/error.rs b/massa-consensus-exports/src/error.rs index 7314400d505..96a3fbffa0f 100644 --- a/massa-consensus-exports/src/error.rs +++ b/massa-consensus-exports/src/error.rs @@ -1,6 +1,6 @@ // Copyright (c) 2021 MASSA LABS use displaydoc::Display; -use massa_execution::ExecutionError; +use massa_execution_exports::ExecutionError; use massa_graph::error::GraphError; use massa_models::ModelsError; use massa_proof_of_stake_exports::error::ProofOfStakeError; diff --git a/massa-consensus-exports/src/settings.rs b/massa-consensus-exports/src/settings.rs index 96f996e7031..a10bcc47559 100644 --- a/massa-consensus-exports/src/settings.rs +++ b/massa-consensus-exports/src/settings.rs @@ -44,7 +44,7 @@ //! In unit test your allowed to use the `testing` feature flag that will //! use the default values from `/node_configuration/default_testing.rs` in the //! `massa-models` crate sources. -use massa_execution::{ExecutionCommandSender, ExecutionEventReceiver}; +use massa_execution_exports::ExecutionController; use massa_graph::{settings::GraphConfig, LedgerConfig}; use massa_models::Amount; use massa_pool::PoolCommandSender; @@ -303,10 +303,8 @@ pub struct ConsensusWorkerChannels { pub protocol_command_sender: ProtocolCommandSender, /// Associated protocol event listener. pub protocol_event_receiver: ProtocolEventReceiver, - /// Associated execution event listener. - pub execution_event_receiver: ExecutionEventReceiver, /// Execution command sender. - pub execution_command_sender: ExecutionCommandSender, + pub execution_controller: Box, /// Associated Pool command sender. pub pool_command_sender: PoolCommandSender, /// Channel receiving consensus commands. @@ -320,8 +318,7 @@ pub struct ConsensusWorkerChannels { /// Public channels associated to the consensus module. /// Execution & Protocol Sender/Receiver pub struct ConsensusChannels { - pub execution_command_sender: ExecutionCommandSender, - pub execution_event_receiver: ExecutionEventReceiver, + pub execution_controller: Box, pub protocol_command_sender: ProtocolCommandSender, pub protocol_event_receiver: ProtocolEventReceiver, pub pool_command_sender: PoolCommandSender, diff --git a/massa-consensus-worker/src/consensus_worker.rs b/massa-consensus-worker/src/consensus_worker.rs index 9e2abc97af3..86700dcbf0d 100644 --- a/massa-consensus-worker/src/consensus_worker.rs +++ b/massa-consensus-worker/src/consensus_worker.rs @@ -6,7 +6,6 @@ use massa_consensus_exports::{ settings::ConsensusWorkerChannels, ConsensusConfig, }; -use massa_execution::ExecutionEventReceiver; use massa_graph::{BlockGraph, BlockGraphExport}; use massa_hash::hash::Hash; use massa_models::address::AddressState; @@ -145,24 +144,18 @@ impl ConsensusWorker { // notify execution module of current blockclique and final blocks // we need to do this because the bootstrap snapshots of the executor vs the consensus may not have been taken in sync // because the two modules run concurrently and out of sync - channels - .execution_command_sender - .update_blockclique( - block_db.clone_all_final_blocks(), - /* TODO DISABLED TEMPORARILY https://github.com/massalabs/massa/issues/2101 - block_db - .get_blockclique() - .into_iter() - .filter_map(|block_id| { - block_db - .get_active_block(&block_id) - .map(|a_block| (block_id, a_block.block.clone())) - }) - .collect(), - */ - Map::default(), - ) - .await?; + channels.execution_controller.update_blockclique_status( + block_db.clone_all_final_blocks(), + block_db + .get_blockclique() + .into_iter() + .filter_map(|block_id| { + block_db + .get_active_block(&block_id) + .map(|a_block| (block_id, a_block.block.clone())) + }) + .collect(), + ); Ok(ConsensusWorker { genesis_public_key, @@ -187,7 +180,7 @@ impl ConsensusWorker { /// Consensus work is managed here. /// It's mostly a tokio::select within a loop. - pub async fn run_loop(mut self) -> Result<(ProtocolEventReceiver, ExecutionEventReceiver)> { + pub async fn run_loop(mut self) -> Result { // signal initial state to pool if let Some(previous_slot) = self.previous_slot { self.channels @@ -277,10 +270,7 @@ impl ConsensusWorker { } } // end loop - Ok(( - self.channels.protocol_event_receiver, - self.channels.execution_event_receiver, - )) + Ok(self.channels.protocol_event_receiver) } async fn slot_tick(&mut self, next_slot_timer: &mut std::pin::Pin<&mut Sleep>) -> Result<()> { @@ -1218,9 +1208,8 @@ impl ConsensusWorker { }) .collect(); self.channels - .execution_command_sender - .update_blockclique(finalized_blocks, blockclique) - .await?; + .execution_controller + .update_blockclique_status(finalized_blocks, blockclique); } // Process new final blocks diff --git a/massa-execution-exports/src/controller_traits.rs b/massa-execution-exports/src/controller_traits.rs index 3de3fcb47d0..91a0401b4e3 100644 --- a/massa-execution-exports/src/controller_traits.rs +++ b/massa-execution-exports/src/controller_traits.rs @@ -3,11 +3,25 @@ use crate::types::ReadOnlyExecutionRequest; use crate::ExecutionError; use massa_ledger::LedgerEntry; use massa_models::output_event::SCOutputEvent; +use massa_models::prehash::Map; use massa_models::Address; +use massa_models::Block; +use massa_models::BlockId; use massa_models::OperationId; use massa_models::Slot; pub trait ExecutionController { + /// Update blockclique status + /// + /// # arguments + /// * finalized_blocks: newly finalized blocks + /// * blockclique: new blockclique + fn update_blockclique_status( + &self, + finalized_blocks: Map, + blockclique: Map, + ); + /// Get events optionnally filtered by: /// * start slot /// * end slot diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index 196cb1045d3..a9a1d84ca7e 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -5,6 +5,7 @@ use massa_execution_exports::{ }; use massa_ledger::LedgerEntry; use massa_models::output_event::SCOutputEvent; +use massa_models::prehash::Map; use massa_models::Address; use massa_models::OperationId; use massa_models::{Block, BlockId, Slot}; @@ -50,6 +51,31 @@ impl ExecutionControllerImpl { } impl ExecutionController for ExecutionControllerImpl { + /// Updates blockclique status + fn update_blockclique_status( + &self, + finalized_blocks: Map, + blockclique: Map, + ) { + let mapped_finalized_blocks: HashMap<_, _> = finalized_blocks + .into_iter() + .map(|(b_id, b)| (b.header.content.slot, (b_id, b))) + .collect(); + let mapped_blockclique = blockclique + .into_iter() + .map(|(b_id, b)| (b.header.content.slot, (b_id, b))) + .collect(); + let mut input_data = self + .input_data + .1 + .lock() + .expect("could not lock VM input data"); + input_data.blockclique = mapped_blockclique; + input_data.finalized_blocks.extend(mapped_finalized_blocks); + input_data.blockclique_changed = true; + self.input_data.0.notify_one(); + } + /// Get events optionnally filtered by: /// * start slot /// * end slot diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs index e05ad13505d..bc13ade2e1d 100644 --- a/massa-execution-worker/src/interface_impl.rs +++ b/massa-execution-worker/src/interface_impl.rs @@ -5,6 +5,7 @@ use anyhow::{bail, Result}; use massa_execution_exports::ExecutionConfig; use massa_execution_exports::ExecutionStackElement; use massa_hash::hash::Hash; +use massa_models::Amount; use massa_models::{ output_event::{EventExecutionContext, SCOutputEvent, SCOutputEventId}, timeslots::get_block_slot_timestamp, @@ -107,7 +108,7 @@ impl Interface for InterfaceImpl { let address = context.get_current_address()?; Ok(context .get_parallel_balance(&address) - .unwrap_or(AMOUNT_ZERO) + .unwrap_or(Amount::default()) .to_raw()) } @@ -116,7 +117,7 @@ impl Interface for InterfaceImpl { let address = massa_models::Address::from_str(address)?; Ok(context_guard!(self) .get_parallel_balance(&address) - .unwrap_or(AMOUNT_ZERO) + .unwrap_or(Amount::default()) .to_raw()) } diff --git a/massa-execution-worker/src/speculative_ledger.rs b/massa-execution-worker/src/speculative_ledger.rs index ea45c95d6e4..2f6625fe4b4 100644 --- a/massa-execution-worker/src/speculative_ledger.rs +++ b/massa-execution-worker/src/speculative_ledger.rs @@ -109,7 +109,7 @@ impl SpeculativeLedger { if let Some(to_addr) = to_addr { let new_balance = changes .get_parallel_balance_or_else(&to_addr, || self.get_parallel_balance(&to_addr)) - .unwrap_or(AMOUNT_ZERO) + .unwrap_or(Amount::default()) .checked_add(amount) .ok_or(ExecutionError::RuntimeError( "overflow in to_addr balance".into(), diff --git a/massa-graph/src/error.rs b/massa-graph/src/error.rs index 14ecae74454..ecc4f33046e 100644 --- a/massa-graph/src/error.rs +++ b/massa-graph/src/error.rs @@ -2,7 +2,7 @@ use std::array::TryFromSliceError; // Copyright (c) 2021 MASSA LABS use displaydoc::Display; -use massa_execution::ExecutionError; +use massa_execution_exports::ExecutionError; use massa_models::ModelsError; use massa_proof_of_stake_exports::error::ProofOfStakeError; use thiserror::Error; diff --git a/massa-ledger/src/config.rs b/massa-ledger/src/config.rs index 2e131e22b1c..0881de031e9 100644 --- a/massa-ledger/src/config.rs +++ b/massa-ledger/src/config.rs @@ -9,4 +9,6 @@ pub struct LedgerConfig { pub initial_sce_ledger_path: PathBuf, /// final changes history length pub final_history_length: usize, + /// thread count + pub thread_count: u8, } diff --git a/massa-ledger/src/error.rs b/massa-ledger/src/error.rs index aa28f59489e..d7e1d078c0e 100644 --- a/massa-ledger/src/error.rs +++ b/massa-ledger/src/error.rs @@ -10,20 +10,6 @@ pub enum LedgerError { ContainerInconsistency(String), /// missing entry: {0} MissingEntry(String), + /// file error: {0} + FileError(String), } - -macro_rules! init_file_error { - ($st:expr, $cfg:ident) => { - |err| { - ExecutionError::FileError(format!( - "error $st initial SCE ledger file {}: {}", - $cfg.settings - .initial_sce_ledger_path - .to_str() - .unwrap_or("(non-utf8 path)"), - err - )) - } - }; -} -pub(crate) use init_file_error; diff --git a/massa-ledger/src/ledger.rs b/massa-ledger/src/ledger.rs index 8ce03e9e517..fb0d277f2d6 100644 --- a/massa-ledger/src/ledger.rs +++ b/massa-ledger/src/ledger.rs @@ -3,11 +3,19 @@ use crate::ledger_changes::LedgerChanges; use crate::ledger_entry::LedgerEntry; use crate::types::{Applicable, SetUpdateOrDelete}; -use crate::LedgerConfig; +use crate::{LedgerConfig, LedgerError}; use massa_hash::hash::Hash; use massa_models::{Address, Amount, Slot}; use std::collections::{BTreeMap, VecDeque}; +/// temporary ledger bootstrap structure +pub struct FinalLedgerBootstrapState { + /// ledger slot + slot: Slot, + /// sorted ledger + sorted_ledger: BTreeMap, +} + /// represents a final ledger pub struct FinalLedger { /// ledger config @@ -49,7 +57,69 @@ impl Applicable for FinalLedger { } } +macro_rules! init_file_error { + ($st:expr, $cfg:ident) => { + |err| { + LedgerError::FileError(format!( + "error $st initial ledger file {}: {}", + $cfg.initial_sce_ledger_path + .to_str() + .unwrap_or("(non-utf8 path)"), + err + )) + } + }; +} +pub(crate) use init_file_error; + impl FinalLedger { + /// init from file + pub fn new(config: LedgerConfig) -> Result { + // load file + let sorted_ledger = serde_json::from_str::>( + &std::fs::read_to_string(&config.initial_sce_ledger_path) + .map_err(init_file_error!("loading", config))?, + ) + .map_err(init_file_error!("parsing", config))? + .into_iter() + .map(|(address, balance)| { + ( + address, + LedgerEntry { + parallel_balance: balance, + ..Default::default() + }, + ) + }) + .collect(); + + // generate final ledger + Ok(FinalLedger { + slot: Slot::new(0, config.thread_count.saturating_sub(1)), + sorted_ledger, + changes_history: Default::default(), + config, + }) + } + + /// load from bootstrap + pub fn from_bootstrap_state(config: LedgerConfig, state: FinalLedgerBootstrapState) -> Self { + FinalLedger { + slot: state.slot, + sorted_ledger: state.sorted_ledger, + changes_history: Default::default(), + config, + } + } + + /// get bootstrap state + pub fn get_bootstrap_state(&self) -> FinalLedgerBootstrapState { + FinalLedgerBootstrapState { + slot: self.slot, + sorted_ledger: self.sorted_ledger.clone(), + } + } + /// gets a full cloned entry pub fn get_full_entry(&self, addr: &Address) -> Option { self.sorted_ledger.get(addr).cloned() diff --git a/massa-ledger/src/lib.rs b/massa-ledger/src/lib.rs index 28086a03772..7c6e26bd507 100644 --- a/massa-ledger/src/lib.rs +++ b/massa-ledger/src/lib.rs @@ -16,6 +16,7 @@ mod types; pub use config::LedgerConfig; pub use error::LedgerError; pub use ledger::FinalLedger; +pub use ledger::FinalLedgerBootstrapState; pub use ledger_changes::LedgerChanges; pub use ledger_entry::LedgerEntry; pub use types::{Applicable, SetOrDelete, SetOrKeep, SetUpdateOrDelete}; diff --git a/massa-ledger/src/types.rs b/massa-ledger/src/types.rs index ccd8a4c3ee4..a28f896aa90 100644 --- a/massa-ledger/src/types.rs +++ b/massa-ledger/src/types.rs @@ -1,8 +1,3 @@ -use crate::LedgerConfig; -use massa_hash::hash::Hash; -use massa_models::{prehash::Map, Address, Amount, Slot}; -use std::collections::{BTreeMap, VecDeque}; - /// represents a structure that supports another one being applied to it pub trait Applicable { fn apply(&mut self, _: V); diff --git a/massa-node/Cargo.toml b/massa-node/Cargo.toml index 7a73f6791d2..2a437c0f666 100644 --- a/massa-node/Cargo.toml +++ b/massa-node/Cargo.toml @@ -31,6 +31,7 @@ massa_consensus_worker = { path = "../massa-consensus-worker" } massa_execution_exports = { path = "../massa-execution-exports" } massa_execution_worker = { path = "../massa-execution-worker" } massa_logging = { path = "../massa-logging" } +massa_ledger = { path = "../massa-ledger" } massa_models = { path = "../massa-models" } massa_network = { path = "../massa-network" } massa_pool = { path = "../massa-pool" } diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 88f3f8729e0..7fc56423a88 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -14,7 +14,7 @@ use massa_consensus_exports::{ use massa_consensus_worker::start_consensus_controller; use massa_execution_exports::{ExecutionConfig, ExecutionManager}; use massa_execution_worker::start_execution_worker; - +use massa_ledger::{FinalLedger, LedgerConfig}; use massa_logging::massa_trace; use massa_models::{ constants::{ @@ -120,17 +120,25 @@ async fn launch() -> ( .await .expect("could not start pool controller"); - let execution_config = ExecutionConfigs { - settings: SETTINGS.execution.clone(), + // init ledger + let ledger_config = LedgerConfig { + initial_sce_ledger_path: SETTINGS.ledger.initial_sce_ledger_path, + final_history_length: SETTINGS.ledger.final_history_length, + ..Default::default() + }; + let final_ledger = Arc::new(RwLock::new(match bootstrap_state.final_ledger { + Some(l) => FinalLedger::from_bootstrap_state(ledger_config, l), + None => FinalLedger::new(ledger_config).expect("could not init final ledger"), + })); + + // launch execution module + let execution_config = ExecutionConfig { + max_final_events: SETTINGS.execution.max_final_events, + readonly_queue_length: SETTINGS.execution.readonly_queue_length, clock_compensation: bootstrap_state.compensation_millis, ..Default::default() }; - - // launch execution controller - let (execution_command_sender, execution_event_receiver, execution_manager) = - massa_execution::start_controller(execution_config, bootstrap_state.execution) - .await - .expect("could not start execution controller"); + let execution_manager = start_execution_worker(execution_config, final_ledger.clone()); let consensus_config = ConsensusConfig::from(&SETTINGS.consensus); // launch consensus controller @@ -138,8 +146,7 @@ async fn launch() -> ( start_consensus_controller( consensus_config.clone(), ConsensusChannels { - execution_command_sender: execution_command_sender.clone(), - execution_event_receiver, + execution_controller: execution_manager.get_controller(), protocol_command_sender: protocol_command_sender.clone(), protocol_event_receiver, pool_command_sender: pool_command_sender.clone(), @@ -252,7 +259,6 @@ async fn stop( // Stop execution controller. execution_manager .stop() - .await .expect("Failed to shutdown execution."); // stop pool controller @@ -269,6 +275,8 @@ async fn stop( .stop(network_event_receiver) .await .expect("network shutdown failed"); + + // note that FinalLedger gets destroyed as soon as its Arc count goes to zero } #[tokio::main] diff --git a/massa-node/src/settings.rs b/massa-node/src/settings.rs index 531caceaa9d..6d928bff4dd 100644 --- a/massa-node/src/settings.rs +++ b/massa-node/src/settings.rs @@ -3,7 +3,6 @@ //! Build here the default node settings from the config file toml use massa_bootstrap::settings::BootstrapSettings; use massa_consensus_exports::ConsensusSettings; -use massa_execution::ExecutionSettings; use massa_models::{ api::APISettings, constants::{build_massa_settings, OPERATION_VALIDITY_PERIODS, THREAD_COUNT}, @@ -27,6 +26,18 @@ pub struct LoggingSettings { pub level: usize, } +#[derive(Clone, Debug)] +pub struct ExecutionSettings { + max_final_events: usize, + readonly_queue_length: usize, +} + +#[derive(Clone, Debug)] +pub struct LedgerSettings { + initial_sce_ledger_path: usize, + final_history_length: usize, +} + #[derive(Debug, Deserialize, Clone)] pub struct Settings { pub logging: LoggingSettings, @@ -37,4 +48,5 @@ pub struct Settings { pub bootstrap: BootstrapSettings, pub pool: PoolSettings, pub execution: ExecutionSettings, + pub ledger: LedgerSettings, } From 2abd640fda746bf353cff40a8f8aef0fd8efb0b0 Mon Sep 17 00:00:00 2001 From: damip Date: Fri, 18 Feb 2022 18:18:21 +0100 Subject: [PATCH 20/73] serializatin and bootstrap --- Cargo.lock | 1 + massa-api/src/error.rs | 2 +- massa-api/src/lib.rs | 6 +- massa-api/src/private.rs | 6 +- massa-api/src/public.rs | 6 +- massa-bootstrap/Cargo.toml | 1 + massa-bootstrap/src/lib.rs | 26 ++-- massa-consensus-worker/src/tools.rs | 3 +- .../src/controller_traits.rs | 2 +- massa-ledger/src/bootstrap.rs | 86 ++++++++++++ massa-ledger/src/exports.rs | 46 ------- massa-ledger/src/ledger.rs | 10 +- massa-ledger/src/ledger_entry.rs | 124 +++++++++++++++++- massa-ledger/src/lib.rs | 3 +- 14 files changed, 237 insertions(+), 85 deletions(-) create mode 100644 massa-ledger/src/bootstrap.rs delete mode 100644 massa-ledger/src/exports.rs diff --git a/Cargo.lock b/Cargo.lock index dc7f65242cf..5b133bc815f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1477,6 +1477,7 @@ dependencies = [ "futures 0.3.21", "lazy_static", "massa_consensus_exports", + "massa_execution_exports", "massa_graph", "massa_hash", "massa_ledger", diff --git a/massa-api/src/error.rs b/massa-api/src/error.rs index 6c21d2c381e..da9b46823b2 100644 --- a/massa-api/src/error.rs +++ b/massa-api/src/error.rs @@ -2,7 +2,7 @@ use displaydoc::Display; use massa_consensus_exports::error::ConsensusError; -use massa_execution::ExecutionError; +use massa_execution_exports::ExecutionError; use massa_hash::MassaHashError; use massa_models::ModelsError; use massa_network::NetworkError; diff --git a/massa-api/src/lib.rs b/massa-api/src/lib.rs index 4cf4d65d1a1..fe6737da358 100644 --- a/massa-api/src/lib.rs +++ b/massa-api/src/lib.rs @@ -8,7 +8,7 @@ use jsonrpc_core::{BoxFuture, IoHandler, Value}; use jsonrpc_derive::rpc; use jsonrpc_http_server::{CloseHandle, ServerBuilder}; use massa_consensus_exports::{ConsensusCommandSender, ConsensusConfig}; -use massa_execution::ExecutionCommandSender; +use massa_execution_exports::ExecutionController; use massa_models::api::{ APISettings, AddressInfo, BlockInfo, BlockSummary, EndorsementInfo, EventFilter, NodeStatus, OperationInfo, ReadOnlyExecution, TimeInterval, @@ -36,7 +36,7 @@ mod public; pub struct Public { pub consensus_command_sender: ConsensusCommandSender, - pub execution_command_sender: ExecutionCommandSender, + pub execution_controller: Box, pub pool_command_sender: PoolCommandSender, pub consensus_config: ConsensusConfig, pub api_settings: &'static APISettings, @@ -50,7 +50,7 @@ pub struct Public { pub struct Private { pub consensus_command_sender: ConsensusCommandSender, pub network_command_sender: NetworkCommandSender, - execution_command_sender: ExecutionCommandSender, + execution_controller: Box, pub consensus_config: ConsensusConfig, pub api_settings: &'static APISettings, pub stop_node_channel: mpsc::Sender<()>, diff --git a/massa-api/src/private.rs b/massa-api/src/private.rs index 9b2d71629aa..bb371e0eb51 100644 --- a/massa-api/src/private.rs +++ b/massa-api/src/private.rs @@ -5,7 +5,7 @@ use crate::{Endpoints, Private, RpcServer, StopHandle, API}; use jsonrpc_core::BoxFuture; use jsonrpc_http_server::tokio::sync::mpsc; use massa_consensus_exports::{ConsensusCommandSender, ConsensusConfig}; -use massa_execution::ExecutionCommandSender; +use massa_execution_exports::ExecutionController; use massa_models::api::{ APISettings, AddressInfo, BlockInfo, BlockSummary, EndorsementInfo, EventFilter, NodeStatus, OperationInfo, ReadOnlyExecution, TimeInterval, @@ -24,7 +24,7 @@ impl API { pub fn new( consensus_command_sender: ConsensusCommandSender, network_command_sender: NetworkCommandSender, - execution_command_sender: ExecutionCommandSender, + execution_controller: Box, api_settings: &'static APISettings, consensus_settings: ConsensusConfig, ) -> (Self, mpsc::Receiver<()>) { @@ -33,7 +33,7 @@ impl API { API(Private { consensus_command_sender, network_command_sender, - execution_command_sender, + execution_controller, consensus_config: consensus_settings, api_settings, stop_node_channel, diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index 0a87287c094..622fb65f340 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -5,7 +5,7 @@ use crate::{Endpoints, Public, RpcServer, StopHandle, API}; use futures::{stream::FuturesUnordered, StreamExt}; use jsonrpc_core::BoxFuture; use massa_consensus_exports::{ConsensusCommandSender, ConsensusConfig}; -use massa_execution::ExecutionCommandSender; +use massa_execution_exports::ExecutionController; use massa_graph::{DiscardReason, ExportBlockStatus}; use massa_models::{ api::{ @@ -30,7 +30,7 @@ use std::net::{IpAddr, SocketAddr}; impl API { pub fn new( consensus_command_sender: ConsensusCommandSender, - execution_command_sender: ExecutionCommandSender, + execution_controller: Box, api_settings: &'static APISettings, consensus_settings: ConsensusConfig, pool_command_sender: PoolCommandSender, @@ -50,7 +50,7 @@ impl API { network_command_sender, compensation_millis, node_id, - execution_command_sender, + execution_controller, }) } } diff --git a/massa-bootstrap/Cargo.toml b/massa-bootstrap/Cargo.toml index f57aa4ad318..a20cba60db5 100644 --- a/massa-bootstrap/Cargo.toml +++ b/massa-bootstrap/Cargo.toml @@ -19,6 +19,7 @@ tokio = { version = "1.11", features = ["full"] } tracing = {version = "0.1", features = ["max_level_debug", "release_max_level_debug"] } # custom modules massa_consensus_exports = { path = "../massa-consensus-exports" } +massa_execution_exports = { path = "../massa-execution-exports" } massa_ledger = { path = "../massa-ledger" } massa_hash = { path = "../massa-hash" } massa_logging = { path = "../massa-logging" } diff --git a/massa-bootstrap/src/lib.rs b/massa-bootstrap/src/lib.rs index a23f109a5af..52b4b7dccc6 100644 --- a/massa-bootstrap/src/lib.rs +++ b/massa-bootstrap/src/lib.rs @@ -10,7 +10,7 @@ pub use establisher::Establisher; use futures::{stream::FuturesUnordered, StreamExt}; use massa_consensus_exports::ConsensusCommandSender; use massa_graph::BootstrapableGraph; -use massa_ledger::FinalLedgerBootstrapState; +use massa_ledger::{FinalLedger, FinalLedgerBootstrapState}; use massa_logging::massa_trace; use massa_models::Version; use massa_network::{BootstrapPeers, NetworkCommandSender}; @@ -22,6 +22,7 @@ use rand::{prelude::SliceRandom, rngs::StdRng, SeedableRng}; use settings::BootstrapSettings; use std::collections::{hash_map, HashMap}; use std::net::SocketAddr; +use std::sync::{Arc, RwLock}; use std::{convert::TryInto, net::IpAddr}; use tokio::time::Instant; use tokio::{sync::mpsc, task::JoinHandle, time::sleep}; @@ -274,7 +275,7 @@ impl BootstrapManager { pub async fn start_bootstrap_server( consensus_command_sender: ConsensusCommandSender, network_command_sender: NetworkCommandSender, - execution_command_sender: ExecutionCommandSender, + final_ledger: Arc>, bootstrap_settings: &'static BootstrapSettings, establisher: Establisher, private_key: PrivateKey, @@ -288,7 +289,7 @@ pub async fn start_bootstrap_server( BootstrapServer { consensus_command_sender, network_command_sender, - execution_command_sender, + final_ledger, establisher, manager_rx, bind, @@ -313,7 +314,7 @@ pub async fn start_bootstrap_server( struct BootstrapServer { consensus_command_sender: ConsensusCommandSender, network_command_sender: NetworkCommandSender, - execution_command_sender: ExecutionCommandSender, + final_ledger: Arc>, establisher: Establisher, manager_rx: mpsc::Receiver<()>, bind: SocketAddr, @@ -335,7 +336,7 @@ impl BootstrapServer { ExportProofOfStake, BootstrapableGraph, BootstrapPeers, - BootstrapExecutionState, + FinalLedgerBootstrapState, )> = None; let cache_timer = sleep(cache_timeout); let per_ip_min_interval = self.bootstrap_settings.per_ip_min_interval.to_duration(); @@ -409,14 +410,11 @@ impl BootstrapServer { // This is done to ensure that the execution bootstrap state is older than the consensus state. // If the consensus state snapshot is older than the execution state snapshot, // the execution final ledger will be in the future after bootstrap, which causes an inconsistency. - let get_peers = self.network_command_sender.get_bootstrap_peers(); + let peer_boot = self.network_command_sender.get_bootstrap_peers().await?; let get_pos_graph = self.consensus_command_sender.get_bootstrap_state(); - let execution_state = self.execution_command_sender.get_bootstrap_state(); - let (res_peers, res_execution) = tokio::join!(get_peers, execution_state); - let peer_boot = res_peers?; - let execution_state = res_execution?; + let res_ledger = self.final_ledger.read().expect("could not lock final ledger for reading").get_bootstrap_state(); let (pos_boot, graph_boot) = get_pos_graph.await?; - bootstrap_data = Some((pos_boot, graph_boot, peer_boot, execution_state)); + bootstrap_data = Some((pos_boot, graph_boot, peer_boot, res_ledger)); cache_timer.set(sleep(cache_timeout)); } massa_trace!("bootstrap.lib.run.select.accept.cache_available", {}); @@ -451,7 +449,7 @@ async fn manage_bootstrap( data_pos: ExportProofOfStake, data_graph: BootstrapableGraph, data_peers: BootstrapPeers, - data_execution: BootstrapExecutionState, + ledger_state: FinalLedgerBootstrapState, private_key: PrivateKey, compensation_millis: i64, version: Version, @@ -504,8 +502,8 @@ async fn manage_bootstrap( // Fourth, send execution state send_state_timeout( write_timeout, - server.send(messages::BootstrapMessage::ExecutionState { - execution_state: data_execution, + server.send(messages::BootstrapMessage::FinalLedgerState { + ledger_state: ledger_state, }), "bootstrap execution state send timed out", ) diff --git a/massa-consensus-worker/src/tools.rs b/massa-consensus-worker/src/tools.rs index 86e32e8296e..4b8f6952e00 100644 --- a/massa-consensus-worker/src/tools.rs +++ b/massa-consensus-worker/src/tools.rs @@ -93,8 +93,7 @@ pub async fn start_consensus_controller( ConsensusWorkerChannels { protocol_command_sender: channels.protocol_command_sender, protocol_event_receiver: channels.protocol_event_receiver, - execution_event_receiver: channels.execution_event_receiver, - execution_command_sender: channels.execution_command_sender, + execution_controller: channels.execution_controller, pool_command_sender: channels.pool_command_sender, controller_command_rx: command_rx, controller_event_tx: event_tx, diff --git a/massa-execution-exports/src/controller_traits.rs b/massa-execution-exports/src/controller_traits.rs index 91a0401b4e3..59a772fe00e 100644 --- a/massa-execution-exports/src/controller_traits.rs +++ b/massa-execution-exports/src/controller_traits.rs @@ -10,7 +10,7 @@ use massa_models::BlockId; use massa_models::OperationId; use massa_models::Slot; -pub trait ExecutionController { +pub trait ExecutionController: Sync + Send { /// Update blockclique status /// /// # arguments diff --git a/massa-ledger/src/bootstrap.rs b/massa-ledger/src/bootstrap.rs new file mode 100644 index 00000000000..5d10652eca7 --- /dev/null +++ b/massa-ledger/src/bootstrap.rs @@ -0,0 +1,86 @@ +use std::collections::BTreeMap; + +use massa_models::{ + array_from_slice, constants::ADDRESS_SIZE_BYTES, Address, DeserializeCompact, + DeserializeVarInt, ModelsError, SerializeCompact, SerializeVarInt, Slot, +}; +use serde::{Deserialize, Serialize}; + +use crate::LedgerEntry; + +/// temporary ledger bootstrap structure +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct FinalLedgerBootstrapState { + /// ledger slot + pub(crate) slot: Slot, + /// sorted ledger + pub(crate) sorted_ledger: BTreeMap, +} + +impl SerializeCompact for FinalLedgerBootstrapState { + fn to_bytes_compact(&self) -> Result, massa_models::ModelsError> { + let mut res: Vec = Vec::new(); + + // final slot + res.extend(self.slot.to_bytes_compact()?); + + // final ledger size + let ledger_size: u64 = self.sorted_ledger.len().try_into().map_err(|_| { + ModelsError::SerializeError("could not represent ledger size as u64".into()) + })?; + res.extend(ledger_size.to_varint_bytes()); + + // ledger elements + for (addr, entry) in &self.sorted_ledger { + // address + res.extend(addr.to_bytes()); + + // entry + res.extend(entry.to_bytes_compact()?); + } + + Ok(res) + } +} + +impl DeserializeCompact for FinalLedgerBootstrapState { + fn from_bytes_compact(buffer: &[u8]) -> Result<(Self, usize), massa_models::ModelsError> { + let mut cursor = 0usize; + + // final slot + let (slot, delta) = Slot::from_bytes_compact(&buffer[cursor..])?; + cursor += delta; + + // ledger size + let (ledger_size, delta) = u64::from_varint_bytes(&buffer[cursor..])? + .try_into() + .map_err(|_| { + ModelsError::SerializeError("could not convert ledger size to usize".into()) + })?; + // TODO cap the ledger size + cursor += delta; + + // final ledger + let mut sorted_ledger: BTreeMap = BTreeMap::new(); + cursor += delta; + for _ in 0..ledger_size { + // address + let addr = Address::from_bytes(&array_from_slice(&buffer[cursor..])?)?; + cursor += ADDRESS_SIZE_BYTES; + + // entry + let (entry, delta) = LedgerEntry::from_bytes_compact(&buffer[cursor..])?; + cursor += delta; + + sorted_ledger.insert(addr, entry); + } + + Ok(( + FinalLedgerBootstrapState { + slot, + sorted_ledger, + }, + cursor, + )) + } +} diff --git a/massa-ledger/src/exports.rs b/massa-ledger/src/exports.rs deleted file mode 100644 index 83203d0de72..00000000000 --- a/massa-ledger/src/exports.rs +++ /dev/null @@ -1,46 +0,0 @@ -use massa_models::{DeserializeCompact, SerializeCompact, Slot}; -use serde::{Deserialize, Serialize}; - -use crate::sce_ledger::SCELedger; - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct BootstrapExecutionState { - pub final_ledger: SCELedger, - pub final_slot: Slot, -} - -impl SerializeCompact for BootstrapExecutionState { - fn to_bytes_compact(&self) -> Result, massa_models::ModelsError> { - let mut res: Vec = Vec::new(); - - // final ledger - res.extend(self.final_ledger.to_bytes_compact()?); - - // final slot - res.extend(self.final_slot.to_bytes_compact()?); - - Ok(res) - } -} - -impl DeserializeCompact for BootstrapExecutionState { - fn from_bytes_compact(buffer: &[u8]) -> Result<(Self, usize), massa_models::ModelsError> { - let mut cursor = 0usize; - - // final ledger - let (final_ledger, delta) = SCELedger::from_bytes_compact(&buffer[cursor..])?; - cursor += delta; - - // final slot - let (final_slot, delta) = Slot::from_bytes_compact(&buffer[cursor..])?; - cursor += delta; - - Ok(( - BootstrapExecutionState { - final_ledger, - final_slot, - }, - cursor, - )) - } -} diff --git a/massa-ledger/src/ledger.rs b/massa-ledger/src/ledger.rs index fb0d277f2d6..85584f369d5 100644 --- a/massa-ledger/src/ledger.rs +++ b/massa-ledger/src/ledger.rs @@ -3,19 +3,11 @@ use crate::ledger_changes::LedgerChanges; use crate::ledger_entry::LedgerEntry; use crate::types::{Applicable, SetUpdateOrDelete}; -use crate::{LedgerConfig, LedgerError}; +use crate::{FinalLedgerBootstrapState, LedgerConfig, LedgerError}; use massa_hash::hash::Hash; use massa_models::{Address, Amount, Slot}; use std::collections::{BTreeMap, VecDeque}; -/// temporary ledger bootstrap structure -pub struct FinalLedgerBootstrapState { - /// ledger slot - slot: Slot, - /// sorted ledger - sorted_ledger: BTreeMap, -} - /// represents a final ledger pub struct FinalLedger { /// ledger config diff --git a/massa-ledger/src/ledger_entry.rs b/massa-ledger/src/ledger_entry.rs index 43df0b50031..0d2264a7741 100644 --- a/massa-ledger/src/ledger_entry.rs +++ b/massa-ledger/src/ledger_entry.rs @@ -1,11 +1,14 @@ use crate::ledger_changes::LedgerEntryUpdate; use crate::types::{Applicable, SetOrDelete}; use massa_hash::hash::Hash; -use massa_models::Amount; +use massa_hash::HASH_SIZE_BYTES; +use massa_models::{array_from_slice, Amount, DeserializeVarInt, ModelsError, SerializeVarInt}; +use massa_models::{DeserializeCompact, SerializeCompact}; +use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; /// structure defining a ledger entry -#[derive(Default, Debug, Clone)] +#[derive(Default, Debug, Clone, Serialize, Deserialize)] pub struct LedgerEntry { pub parallel_balance: Amount, pub bytecode: Vec, @@ -30,3 +33,120 @@ impl Applicable for LedgerEntry { } } } + +/// serialize as compact binary +impl SerializeCompact for LedgerEntry { + fn to_bytes_compact(&self) -> Result, massa_models::ModelsError> { + let mut res: Vec = Vec::new(); + + // parallel balance + res.extend(self.parallel_balance.to_bytes_compact()?); + + // bytecode length + let bytecode_len: u64 = self.bytecode.len().try_into().map_err(|_| { + ModelsError::SerializeError("could not convert bytecode size to u64".into()) + })?; + res.extend(bytecode_len.to_varint_bytes()); + + // bytecode + res.extend(&self.bytecode); + + // datastore length + let datastore_len: u64 = self.datastore.len().try_into().map_err(|_| { + ModelsError::SerializeError("could not convert datastore size to u64".into()) + })?; + res.extend(datastore_len.to_varint_bytes()); + + // datastore + for (key, value) in &self.datastore { + // key + res.extend(key.to_bytes()); + + // value length + let value_len: u64 = value.len().try_into().map_err(|_| { + ModelsError::SerializeError("could not convert datastore value size to u64".into()) + })?; + res.extend(value_len.to_varint_bytes()); + + // value + res.extend(value); + } + + Ok(res) + } +} + +impl DeserializeCompact for LedgerEntry { + fn from_bytes_compact(buffer: &[u8]) -> Result<(Self, usize), massa_models::ModelsError> { + let mut cursor = 0usize; + + // parallel balance + let (parallel_balance, delta) = Amount::from_bytes_compact(&buffer[cursor..])?; + cursor += delta; + + // bytecode length + let (bytecode_len, delta) = u64::from_varint_bytes(&buffer[cursor..])?; + let bytecode_len: usize = bytecode_len.try_into().map_err(|_| { + ModelsError::SerializeError("could not convert bytecode size to usize".into()) + })?; + //TODO cap bytecode length + cursor += delta; + + // bytecode + let bytecode = if let Some(slice) = buffer.get(cursor..(cursor + (bytecode_len as usize))) { + cursor += bytecode_len as usize; + slice.to_vec() + } else { + return Err(ModelsError::DeserializeError( + "could not deserialize ledger entry bytecode: buffer too small".into(), + )); + }; + + // datastore length + let (datastore_len, delta) = u64::from_varint_bytes(&buffer[cursor..])?; + let datastore_len: usize = datastore_len.try_into().map_err(|_| { + ModelsError::SerializeError("could not convert datastore size to usize".into()) + })?; + //TODO cap datastore length + cursor += delta; + + // datastore entries + let mut datastore: BTreeMap> = BTreeMap::new(); + for _ in 0..datastore_len { + // key + let key = Hash::from_bytes(&array_from_slice(&buffer[cursor..])?)?; + cursor += HASH_SIZE_BYTES; + + // value length + let (value_len, delta) = u64::from_varint_bytes(&buffer[cursor..])?; + let value_len: usize = value_len.try_into().map_err(|_| { + ModelsError::SerializeError( + "could not convert datastore entry value size to usize".into(), + ) + })?; + //TODO cap value length + cursor += delta; + + // value + let value = if let Some(slice) = buffer.get(cursor..(cursor + (value_len as usize))) { + cursor += value_len as usize; + slice.to_vec() + } else { + return Err(ModelsError::DeserializeError( + "could not deserialize ledger entry datastore value: buffer too small".into(), + )); + }; + + datastore.insert(key, value); + } + + Ok(( + LedgerEntry { + parallel_balance, + bytecode, + datastore, + }, + cursor, + )) + } +} diff --git a/massa-ledger/src/lib.rs b/massa-ledger/src/lib.rs index 7c6e26bd507..5a07fe78cae 100644 --- a/massa-ledger/src/lib.rs +++ b/massa-ledger/src/lib.rs @@ -6,6 +6,7 @@ #[macro_use] extern crate massa_logging; +mod bootstrap; mod config; mod error; mod ledger; @@ -13,10 +14,10 @@ mod ledger_changes; mod ledger_entry; mod types; +pub use bootstrap::FinalLedgerBootstrapState; pub use config::LedgerConfig; pub use error::LedgerError; pub use ledger::FinalLedger; -pub use ledger::FinalLedgerBootstrapState; pub use ledger_changes::LedgerChanges; pub use ledger_entry::LedgerEntry; pub use types::{Applicable, SetOrDelete, SetOrKeep, SetUpdateOrDelete}; From 98a7c2ed50d54edfcceac7e9614cbcad64db64e2 Mon Sep 17 00:00:00 2001 From: damip Date: Sat, 19 Feb 2022 10:37:04 +0100 Subject: [PATCH 21/73] implement api read only calls --- massa-api/src/private.rs | 15 ++--------- massa-api/src/public.rs | 52 +++++++++++++++++++++++++++++++++++--- massa-node/src/main.rs | 16 +++++++----- massa-node/src/settings.rs | 2 ++ 4 files changed, 62 insertions(+), 23 deletions(-) diff --git a/massa-api/src/private.rs b/massa-api/src/private.rs index bb371e0eb51..6f34ccf3ad8 100644 --- a/massa-api/src/private.rs +++ b/massa-api/src/private.rs @@ -76,20 +76,9 @@ impl Endpoints for API { fn execute_read_only_request( &self, - ReadOnlyExecution { - max_gas, - simulated_gas_price, - bytecode, - address, - }: ReadOnlyExecution, + _: ReadOnlyExecution, ) -> BoxFuture> { - let cmd_sender = self.0.execution_command_sender.clone(); - let closure = async move || { - Ok(cmd_sender - .execute_read_only_request(max_gas, simulated_gas_price, bytecode, address) - .await?) - }; - Box::pin(closure()) + crate::wrong_api::() } fn remove_staking_addresses(&self, keys: Vec
) -> BoxFuture> { diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index 622fb65f340..e84d1d6a7d6 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -5,8 +5,11 @@ use crate::{Endpoints, Public, RpcServer, StopHandle, API}; use futures::{stream::FuturesUnordered, StreamExt}; use jsonrpc_core::BoxFuture; use massa_consensus_exports::{ConsensusCommandSender, ConsensusConfig}; -use massa_execution_exports::ExecutionController; +use massa_execution_exports::{ + ExecutionController, ExecutionStackElement, ReadOnlyExecutionRequest, +}; use massa_graph::{DiscardReason, ExportBlockStatus}; +use massa_models::execution::ReadOnlyResult; use massa_models::{ api::{ APISettings, AddressInfo, BlockInfo, BlockInfoContent, BlockSummary, EndorsementInfo, @@ -23,7 +26,7 @@ use massa_models::{ }; use massa_network::{NetworkCommandSender, NetworkSettings}; use massa_pool::PoolCommandSender; -use massa_signature::PrivateKey; +use massa_signature::{derive_public_key, generate_random_private_key, PrivateKey, PublicKey}; use massa_time::MassaTime; use std::net::{IpAddr, SocketAddr}; @@ -77,9 +80,50 @@ impl Endpoints for API { fn execute_read_only_request( &self, - _: ReadOnlyExecution, + ReadOnlyExecution { + max_gas, + simulated_gas_price, + bytecode, + address, + }: ReadOnlyExecution, ) -> BoxFuture> { - crate::wrong_api::() + let address = address.unwrap_or_else(|| { + // if no addr provided, use a random one + Address::from_public_key(&derive_public_key(&generate_random_private_key())) + }); + + // TODO: + // * stop mapping request and result, reuse execution's structures + // * remove async stuff + + // translate request + let req = ReadOnlyExecutionRequest { + max_gas, + simulated_gas_price, + bytecode, + call_stack: vec![ExecutionStackElement { + address, + coins: Default::default(), + owned_addresses: vec![address], + }], + }; + + // run + let result = self.0.execution_controller.execute_readonly_request(req); + + // map result + let result = ExecuteReadOnlyResponse { + executed_at: result.as_ref().map_or_else(|_| Slot::new(0, 0), |v| v.slot), + result: result.as_ref().map_or_else( + |err| ReadOnlyResult::Error(format!("readonly call failed: {}", err)), + |_| ReadOnlyResult::Ok, + ), + output_events: result.map_or_else(|_| Default::default(), |v| v.events.export()), + }; + + // return result + let closure = async move || Ok(result); + Box::pin(closure()) } fn remove_staking_addresses(&self, _: Vec
) -> BoxFuture> { diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 7fc56423a88..b8fea72a8ca 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -18,7 +18,8 @@ use massa_ledger::{FinalLedger, LedgerConfig}; use massa_logging::massa_trace; use massa_models::{ constants::{ - END_TIMESTAMP, GENESIS_TIMESTAMP, MAX_GAS_PER_BLOCK, OPERATION_VALIDITY_PERIODS, VERSION, + END_TIMESTAMP, GENESIS_TIMESTAMP, MAX_GAS_PER_BLOCK, OPERATION_VALIDITY_PERIODS, + THREAD_COUNT, VERSION, }, init_serialization_context, SerializationContext, }; @@ -124,7 +125,7 @@ async fn launch() -> ( let ledger_config = LedgerConfig { initial_sce_ledger_path: SETTINGS.ledger.initial_sce_ledger_path, final_history_length: SETTINGS.ledger.final_history_length, - ..Default::default() + thread_count: THREAD_COUNT, }; let final_ledger = Arc::new(RwLock::new(match bootstrap_state.final_ledger { Some(l) => FinalLedger::from_bootstrap_state(ledger_config, l), @@ -135,8 +136,11 @@ async fn launch() -> ( let execution_config = ExecutionConfig { max_final_events: SETTINGS.execution.max_final_events, readonly_queue_length: SETTINGS.execution.readonly_queue_length, + cursor_delay: SETTINGS.execution.cursor_delay, clock_compensation: bootstrap_state.compensation_millis, - ..Default::default() + thread_count: THREAD_COUNT, + t0: T0, + genesis_timestamp: GENESIS_TIMESTAMP, }; let execution_manager = start_execution_worker(execution_config, final_ledger.clone()); @@ -162,7 +166,7 @@ async fn launch() -> ( let bootstrap_manager = start_bootstrap_server( consensus_command_sender.clone(), network_command_sender.clone(), - execution_command_sender.clone(), + final_ledger.clone(), &SETTINGS.bootstrap, massa_bootstrap::Establisher::new(), private_key, @@ -176,7 +180,7 @@ async fn launch() -> ( let (api_private, api_private_stop_rx) = API::::new( consensus_command_sender.clone(), network_command_sender.clone(), - execution_command_sender.clone(), + execution_controller.clone(), &SETTINGS.api, consensus_config.clone(), ); @@ -185,7 +189,7 @@ async fn launch() -> ( // spawn public API let api_public = API::::new( consensus_command_sender.clone(), - execution_command_sender, + execution_controller, &SETTINGS.api, consensus_config, pool_command_sender.clone(), diff --git a/massa-node/src/settings.rs b/massa-node/src/settings.rs index 6d928bff4dd..cccd2586866 100644 --- a/massa-node/src/settings.rs +++ b/massa-node/src/settings.rs @@ -10,6 +10,7 @@ use massa_models::{ use massa_network::NetworkSettings; use massa_pool::{PoolConfig, PoolSettings}; use massa_protocol_exports::ProtocolSettings; +use massa_time::MassaTime; use serde::Deserialize; lazy_static::lazy_static! { @@ -30,6 +31,7 @@ pub struct LoggingSettings { pub struct ExecutionSettings { max_final_events: usize, readonly_queue_length: usize, + cursor_delay: MassaTime, } #[derive(Clone, Debug)] From 369aa3e5d27728e4bf1e963d599ebaa632b0f0e3 Mon Sep 17 00:00:00 2001 From: damip Date: Sat, 19 Feb 2022 11:02:05 +0100 Subject: [PATCH 22/73] make API compile --- massa-api/src/lib.rs | 2 +- massa-api/src/public.rs | 62 ++++++++++++------- .../src/controller_traits.rs | 2 +- massa-execution-worker/src/controller.rs | 2 +- 4 files changed, 44 insertions(+), 24 deletions(-) diff --git a/massa-api/src/lib.rs b/massa-api/src/lib.rs index fe6737da358..ae0ae0cf59a 100644 --- a/massa-api/src/lib.rs +++ b/massa-api/src/lib.rs @@ -50,7 +50,7 @@ pub struct Public { pub struct Private { pub consensus_command_sender: ConsensusCommandSender, pub network_command_sender: NetworkCommandSender, - execution_controller: Box, + pub execution_controller: Box, pub consensus_config: ConsensusConfig, pub api_settings: &'static APISettings, pub stop_node_channel: mpsc::Sender<()>, diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index e84d1d6a7d6..1b5c4ceafec 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -9,6 +9,7 @@ use massa_execution_exports::{ ExecutionController, ExecutionStackElement, ReadOnlyExecutionRequest, }; use massa_graph::{DiscardReason, ExportBlockStatus}; +use massa_models::api::SCELedgerInfo; use massa_models::execution::ReadOnlyResult; use massa_models::{ api::{ @@ -26,7 +27,7 @@ use massa_models::{ }; use massa_network::{NetworkCommandSender, NetworkSettings}; use massa_pool::PoolCommandSender; -use massa_signature::{derive_public_key, generate_random_private_key, PrivateKey, PublicKey}; +use massa_signature::{derive_public_key, generate_random_private_key, PrivateKey}; use massa_time::MassaTime; use std::net::{IpAddr, SocketAddr}; @@ -400,11 +401,32 @@ impl Endpoints for API { let api_cfg = self.0.api_settings; let pool_command_sender = self.0.pool_command_sender.clone(); let compensation_millis = self.0.compensation_millis; - let sce_command_sender = self.0.execution_command_sender.clone(); - let closure = async move || { - if addresses.len() as u64 > api_cfg.max_arguments { - return Err(ApiError::TooManyArguments("too many arguments".into())); + + // todo make better use of SCE ledger info + + // map SCE ledger info and check for address length + let sce_ledger_info = if addresses.len() as u64 > api_cfg.max_arguments { + Err(ApiError::TooManyArguments("too many arguments".into())) + } else { + // get SCE ledger info + let mut sce_ledger_info: Map = + Map::with_capacity_and_hasher(addresses.len(), BuildMap::default()); + for addr in &addresses { + let active_entry = match self.0.execution_controller.get_full_ledger_entry(addr).1 { + None => continue, + Some(v) => SCELedgerInfo { + balance: v.parallel_balance, + module: Some(v.bytecode), + datastore: v.datastore.into_iter().collect(), + }, + }; + sce_ledger_info.insert(*addr, active_entry); } + Ok(sce_ledger_info) + }; + + let closure = async move || { + let sce_ledger_info = sce_ledger_info?; let mut res = Vec::with_capacity(addresses.len()); @@ -427,11 +449,10 @@ impl Endpoints for API { // roll and balance info let states = cmd_sender.get_addresses_info(addresses.iter().copied().collect()); - let sce_info = sce_command_sender.get_sce_ledger_for_addresses(addresses.clone()); // wait for both simultaneously - let (next_draws, states, sce_info) = tokio::join!(next_draws, states, sce_info); - let (next_draws, mut states, sce_info) = (next_draws?, states?, sce_info?); + let (next_draws, states) = tokio::join!(next_draws, states); + let (next_draws, mut states) = (next_draws?, states?); // operations block and endorsement info let mut operations: Map> = @@ -513,7 +534,7 @@ impl Endpoints for API { .remove(&address) .ok_or(ApiError::NotFound)?, production_stats: state.production_stats, - sce_ledger_info: sce_info.get(&address).cloned().unwrap_or_default(), + sce_ledger_info: sce_ledger_info.get(&address).cloned().unwrap_or_default(), }) } Ok(res) @@ -558,18 +579,17 @@ impl Endpoints for API { original_operation_id, }: EventFilter, ) -> BoxFuture, ApiError>> { - let execution_command_sender = self.0.execution_command_sender.clone(); - let closure = async move || { - Ok(execution_command_sender - .get_filtered_sc_output_event( - start, - end, - emitter_address, - original_caller_address, - original_operation_id, - ) - .await?) - }; + // get events + let events = self.0.execution_controller.get_filtered_sc_output_event( + start, + end, + emitter_address, + original_caller_address, + original_operation_id, + ); + + // TODO get rid of the async part + let closure = async move || Ok(events); Box::pin(closure()) } } diff --git a/massa-execution-exports/src/controller_traits.rs b/massa-execution-exports/src/controller_traits.rs index 59a772fe00e..11047ab43c8 100644 --- a/massa-execution-exports/src/controller_traits.rs +++ b/massa-execution-exports/src/controller_traits.rs @@ -45,7 +45,7 @@ pub trait ExecutionController: Sync + Send { /// Executes a readonly request fn execute_readonly_request( - &mut self, + &self, req: ReadOnlyExecutionRequest, ) -> Result; } diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index a9a1d84ca7e..8d850ea306c 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -115,7 +115,7 @@ impl ExecutionController for ExecutionControllerImpl { /// Executes a readonly request fn execute_readonly_request( - &mut self, + &self, req: ReadOnlyExecutionRequest, ) -> Result { // queue request From 9c6328221a84235cb7c8d658901372076a07a2f6 Mon Sep 17 00:00:00 2001 From: damip Date: Sat, 19 Feb 2022 11:25:48 +0100 Subject: [PATCH 23/73] make the whole program compile --- .../src/controller_traits.rs | 2 +- massa-execution-worker/src/controller.rs | 11 ++++---- massa-execution-worker/src/vm_thread.rs | 10 +++---- massa-ledger/src/lib.rs | 3 --- massa-node/src/main.rs | 27 ++++++++++--------- massa-node/src/settings.rs | 18 +++++++------ 6 files changed, 36 insertions(+), 35 deletions(-) diff --git a/massa-execution-exports/src/controller_traits.rs b/massa-execution-exports/src/controller_traits.rs index 11047ab43c8..52989cc9903 100644 --- a/massa-execution-exports/src/controller_traits.rs +++ b/massa-execution-exports/src/controller_traits.rs @@ -53,7 +53,7 @@ pub trait ExecutionController: Sync + Send { /// execution manager pub trait ExecutionManager { /// stops the VM - fn stop(self); + fn stop(&mut self); /// get a shared reference to the VM controller fn get_controller(&self) -> Box; diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index 8d850ea306c..4cb55049604 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -155,12 +155,12 @@ pub struct ExecutionManagerImpl { /// shared reference to the VM controller pub(crate) controller: ExecutionControllerImpl, /// handle used to join the VM thread - pub(crate) thread_handle: std::thread::JoinHandle<()>, + pub(crate) thread_handle: Option>, } impl ExecutionManager for ExecutionManagerImpl { /// stops the VM - fn stop(self) { + fn stop(&mut self) { info!("stopping VM controller..."); // notify the VM thread to stop { @@ -174,9 +174,10 @@ impl ExecutionManager for ExecutionManagerImpl { self.controller.input_data.0.notify_one(); } // join the VM thread - self.thread_handle - .join() - .expect("VM controller thread panicked"); + if let Some(join_handle) = self.thread_handle.take() { + join_handle.join().expect("VM controller thread panicked"); + } + info!("VM controller stopped"); } diff --git a/massa-execution-worker/src/vm_thread.rs b/massa-execution-worker/src/vm_thread.rs index a3fa1f46a91..8f91d416a8c 100644 --- a/massa-execution-worker/src/vm_thread.rs +++ b/massa-execution-worker/src/vm_thread.rs @@ -1,7 +1,7 @@ use crate::controller::{ExecutionControllerImpl, ExecutionManagerImpl, VMInputData}; use crate::execution::ExecutionState; use massa_execution_exports::{ - ExecutionConfig, ExecutionError, ExecutionOutput, ReadOnlyExecutionRequest, + ExecutionConfig, ExecutionError, ExecutionManager, ExecutionOutput, ReadOnlyExecutionRequest, }; use massa_ledger::FinalLedger; use massa_models::BlockId; @@ -423,7 +423,7 @@ impl VMThread { pub fn start_execution_worker( config: ExecutionConfig, final_ledger: Arc>, -) -> ExecutionManagerImpl { +) -> Box { // create an execution state let execution_state = Arc::new(RwLock::new(ExecutionState::new( config.clone(), @@ -450,8 +450,8 @@ pub fn start_execution_worker( }); // return the VM manager - ExecutionManagerImpl { + Box::new(ExecutionManagerImpl { controller, - thread_handle, - } + thread_handle: Some(thread_handle), + }) } diff --git a/massa-ledger/src/lib.rs b/massa-ledger/src/lib.rs index 5a07fe78cae..17c095ccd77 100644 --- a/massa-ledger/src/lib.rs +++ b/massa-ledger/src/lib.rs @@ -3,9 +3,6 @@ #![feature(map_first_last)] #![feature(async_closure)] -#[macro_use] -extern crate massa_logging; - mod bootstrap; mod config; mod error; diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index b8fea72a8ca..af1bdd7758b 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -18,7 +18,7 @@ use massa_ledger::{FinalLedger, LedgerConfig}; use massa_logging::massa_trace; use massa_models::{ constants::{ - END_TIMESTAMP, GENESIS_TIMESTAMP, MAX_GAS_PER_BLOCK, OPERATION_VALIDITY_PERIODS, + END_TIMESTAMP, GENESIS_TIMESTAMP, MAX_GAS_PER_BLOCK, OPERATION_VALIDITY_PERIODS, T0, THREAD_COUNT, VERSION, }, init_serialization_context, SerializationContext, @@ -28,7 +28,10 @@ use massa_pool::{start_pool_controller, PoolCommandSender, PoolManager}; use massa_protocol_exports::ProtocolManager; use massa_protocol_worker::start_protocol_controller; use massa_time::MassaTime; -use std::process; +use std::{ + process, + sync::{Arc, RwLock}, +}; use tokio::signal; use tokio::sync::mpsc; use tracing::{error, info, warn}; @@ -46,7 +49,7 @@ async fn launch() -> ( NetworkCommandSender, Option, ConsensusManager, - ExecutionManager, + Box, PoolManager, ProtocolManager, NetworkManager, @@ -123,7 +126,7 @@ async fn launch() -> ( // init ledger let ledger_config = LedgerConfig { - initial_sce_ledger_path: SETTINGS.ledger.initial_sce_ledger_path, + initial_sce_ledger_path: SETTINGS.ledger.initial_sce_ledger_path.clone(), final_history_length: SETTINGS.ledger.final_history_length, thread_count: THREAD_COUNT, }; @@ -140,7 +143,7 @@ async fn launch() -> ( clock_compensation: bootstrap_state.compensation_millis, thread_count: THREAD_COUNT, t0: T0, - genesis_timestamp: GENESIS_TIMESTAMP, + genesis_timestamp: *GENESIS_TIMESTAMP, }; let execution_manager = start_execution_worker(execution_config, final_ledger.clone()); @@ -180,7 +183,7 @@ async fn launch() -> ( let (api_private, api_private_stop_rx) = API::::new( consensus_command_sender.clone(), network_command_sender.clone(), - execution_controller.clone(), + execution_manager.get_controller(), &SETTINGS.api, consensus_config.clone(), ); @@ -189,7 +192,7 @@ async fn launch() -> ( // spawn public API let api_public = API::::new( consensus_command_sender.clone(), - execution_controller, + execution_manager.get_controller(), &SETTINGS.api, consensus_config, pool_command_sender.clone(), @@ -221,7 +224,7 @@ async fn launch() -> ( struct Managers { bootstrap_manager: Option, consensus_manager: ConsensusManager, - execution_manager: ExecutionManager, + execution_manager: Box, pool_manager: PoolManager, protocol_manager: ProtocolManager, network_manager: NetworkManager, @@ -232,7 +235,7 @@ async fn stop( Managers { bootstrap_manager, consensus_manager, - execution_manager, + mut execution_manager, pool_manager, protocol_manager, network_manager, @@ -255,15 +258,13 @@ async fn stop( api_private_handle.stop(); // stop consensus controller - let (protocol_event_receiver, _execution_event_receiver) = consensus_manager + let protocol_event_receiver = consensus_manager .stop(consensus_event_receiver) .await .expect("consensus shutdown failed"); // Stop execution controller. - execution_manager - .stop() - .expect("Failed to shutdown execution."); + execution_manager.stop(); // stop pool controller let protocol_pool_event_receiver = pool_manager.stop().await.expect("pool shutdown failed"); diff --git a/massa-node/src/settings.rs b/massa-node/src/settings.rs index cccd2586866..5e47c25a0d9 100644 --- a/massa-node/src/settings.rs +++ b/massa-node/src/settings.rs @@ -1,6 +1,8 @@ // Copyright (c) 2021 MASSA LABS //! Build here the default node settings from the config file toml +use std::path::PathBuf; + use massa_bootstrap::settings::BootstrapSettings; use massa_consensus_exports::ConsensusSettings; use massa_models::{ @@ -22,22 +24,22 @@ lazy_static::lazy_static! { }; } -#[derive(Debug, Deserialize, Clone, Copy)] +#[derive(Debug, Deserialize, Clone)] pub struct LoggingSettings { pub level: usize, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Deserialize)] pub struct ExecutionSettings { - max_final_events: usize, - readonly_queue_length: usize, - cursor_delay: MassaTime, + pub max_final_events: usize, + pub readonly_queue_length: usize, + pub cursor_delay: MassaTime, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Deserialize)] pub struct LedgerSettings { - initial_sce_ledger_path: usize, - final_history_length: usize, + pub initial_sce_ledger_path: PathBuf, + pub final_history_length: usize, } #[derive(Debug, Deserialize, Clone)] From 18294e520070ef407b63279b028689faf7642c90 Mon Sep 17 00:00:00 2001 From: damip Date: Sat, 19 Feb 2022 11:36:02 +0100 Subject: [PATCH 24/73] update config --- massa-api/src/public.rs | 1 + massa-node/base_config/config.toml | 13 +++++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index 1b5c4ceafec..6dbac451e27 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -94,6 +94,7 @@ impl Endpoints for API { }); // TODO: + // * set a maximum gas value for read-only executions to prevent attacks // * stop mapping request and result, reuse execution's structures // * remove async stuff diff --git a/massa-node/base_config/config.toml b/massa-node/base_config/config.toml index 08e3562fd31..633f93586a0 100644 --- a/massa-node/base_config/config.toml +++ b/massa-node/base_config/config.toml @@ -15,10 +15,19 @@ max_arguments = 128 [execution] - # path to the initial smart contract balance ledger - initial_sce_ledger_path = "base_config/initial_sce_ledger.json" # max number of generated events kept in RAM max_final_events = 100 + # maximum length of the read-only execution requests queue + readonly_queue_length = 10 + # by how many milliseconds shoud the execution lag behind real time + # higher values increase speculative execution lag but improve performance + cursor_delay = 0 + +[ledger] + # path to the initial smart contract balance ledger + initial_sce_ledger_path = "base_config/initial_sce_ledger.json" + # length of the changes history. Higher values allow bootstrapping nodes with slower connections + final_history_length = 100 [consensus] # max number of previously discarded blocks kept in RAM From 262336f24bffcb1aecf1f3929627b7532d9c83c8 Mon Sep 17 00:00:00 2001 From: damip Date: Sun, 20 Feb 2022 20:14:02 +0100 Subject: [PATCH 25/73] testing apparatus --- massa-execution-exports/Cargo.toml | 3 +++ massa-execution-exports/src/config.rs | 2 ++ .../src/controller_traits.rs | 2 ++ massa-execution-exports/src/error.rs | 2 ++ massa-execution-exports/src/event_store.rs | 2 ++ massa-execution-exports/src/lib.rs | 5 +++++ .../src/test_exports/config.rs | 21 +++++++++++++++++++ .../src/test_exports/mod.rs | 7 +++++++ massa-execution-exports/src/types.rs | 2 ++ massa-execution-worker/Cargo.toml | 2 ++ massa-execution-worker/src/context.rs | 2 ++ massa-execution-worker/src/controller.rs | 2 ++ massa-execution-worker/src/execution.rs | 2 ++ massa-execution-worker/src/interface_impl.rs | 2 ++ massa-execution-worker/src/lib.rs | 2 ++ .../src/speculative_ledger.rs | 2 ++ massa-execution-worker/src/tests/mod.rs | 2 +- .../src/tests/scenarios_mandatories.rs | 2 +- massa-execution-worker/src/vm_thread.rs | 2 ++ massa-ledger/src/bootstrap.rs | 2 ++ massa-ledger/src/config.rs | 2 +- massa-ledger/src/error.rs | 2 +- massa-ledger/src/ledger.rs | 2 +- massa-ledger/src/ledger_changes.rs | 2 ++ massa-ledger/src/ledger_entry.rs | 2 ++ massa-ledger/src/lib.rs | 2 +- massa-ledger/src/types.rs | 2 ++ 27 files changed, 76 insertions(+), 6 deletions(-) create mode 100644 massa-execution-exports/src/test_exports/config.rs create mode 100644 massa-execution-exports/src/test_exports/mod.rs diff --git a/massa-execution-exports/Cargo.toml b/massa-execution-exports/Cargo.toml index 1cdddf5ee01..f7bf3a4db14 100644 --- a/massa-execution-exports/Cargo.toml +++ b/massa-execution-exports/Cargo.toml @@ -29,3 +29,6 @@ massa_ledger = { path = "../massa-ledger" } pretty_assertions = "1.0" serial_test = "0.5" tempfile = "3.2" + +[features] +testing = [] diff --git a/massa-execution-exports/src/config.rs b/massa-execution-exports/src/config.rs index ef03e7778eb..95443434148 100644 --- a/massa-execution-exports/src/config.rs +++ b/massa-execution-exports/src/config.rs @@ -1,3 +1,5 @@ +// Copyright (c) 2022 MASSA LABS + use massa_time::MassaTime; /// VM module configuration diff --git a/massa-execution-exports/src/controller_traits.rs b/massa-execution-exports/src/controller_traits.rs index 52989cc9903..577977c3934 100644 --- a/massa-execution-exports/src/controller_traits.rs +++ b/massa-execution-exports/src/controller_traits.rs @@ -1,3 +1,5 @@ +// Copyright (c) 2022 MASSA LABS + use crate::types::ExecutionOutput; use crate::types::ReadOnlyExecutionRequest; use crate::ExecutionError; diff --git a/massa-execution-exports/src/error.rs b/massa-execution-exports/src/error.rs index 95c24f13326..ca9b82d9f83 100644 --- a/massa-execution-exports/src/error.rs +++ b/massa-execution-exports/src/error.rs @@ -1,3 +1,5 @@ +// Copyright (c) 2022 MASSA LABS + use displaydoc::Display; use thiserror::Error; diff --git a/massa-execution-exports/src/event_store.rs b/massa-execution-exports/src/event_store.rs index bc9fd0e3749..48c8f9eadff 100644 --- a/massa-execution-exports/src/event_store.rs +++ b/massa-execution-exports/src/event_store.rs @@ -1,3 +1,5 @@ +// Copyright (c) 2022 MASSA LABS + use massa_models::output_event::{SCOutputEvent, SCOutputEventId}; use massa_models::prehash::{Map, PreHashed, Set}; /// Define types used while executing block bytecodes diff --git a/massa-execution-exports/src/lib.rs b/massa-execution-exports/src/lib.rs index f2f823b5cc0..22951ed78e7 100644 --- a/massa-execution-exports/src/lib.rs +++ b/massa-execution-exports/src/lib.rs @@ -1,3 +1,5 @@ +// Copyright (c) 2022 MASSA LABS + mod config; mod controller_traits; mod error; @@ -9,3 +11,6 @@ pub use controller_traits::{ExecutionController, ExecutionManager}; pub use error::ExecutionError; pub use event_store::EventStore; pub use types::{ExecutionOutput, ExecutionStackElement, ReadOnlyExecutionRequest}; + +#[cfg(feature = "testing")] +pub mod test_exports; diff --git a/massa-execution-exports/src/test_exports/config.rs b/massa-execution-exports/src/test_exports/config.rs new file mode 100644 index 00000000000..d17011c7027 --- /dev/null +++ b/massa-execution-exports/src/test_exports/config.rs @@ -0,0 +1,21 @@ +// Copyright (c) 2022 MASSA LABS + +/// This file defines testing tools related to the config +use massa_time::MassaTime; + +use crate::ExecutionConfig; + +/// Default value of ExecutionConfig used for tests +impl Default for ExecutionConfig { + fn default() -> Self { + ExecutionConfig { + readonly_queue_length: 10, + max_final_events: 10, + thread_count: 2, + cursor_delay: 0.into(), + clock_compensation: 0, + genesis_timestamp: MassaTime::now().unwrap(), + t0: 1000.into(), + } + } +} diff --git a/massa-execution-exports/src/test_exports/mod.rs b/massa-execution-exports/src/test_exports/mod.rs new file mode 100644 index 00000000000..d9c62e484a2 --- /dev/null +++ b/massa-execution-exports/src/test_exports/mod.rs @@ -0,0 +1,7 @@ +// Copyright (c) 2022 MASSA LABS + +/// This module exposes useful tooling for testing. +/// It is only compiled and exported by the crate if the "testing" feature is enabled. +mod config; + +pub use config::*; diff --git a/massa-execution-exports/src/types.rs b/massa-execution-exports/src/types.rs index 186afc51f2c..28dd7a50616 100644 --- a/massa-execution-exports/src/types.rs +++ b/massa-execution-exports/src/types.rs @@ -1,3 +1,5 @@ +// Copyright (c) 2022 MASSA LABS + use crate::event_store::EventStore; use massa_ledger::LedgerChanges; use massa_models::{Address, Amount, BlockId, Slot}; diff --git a/massa-execution-worker/Cargo.toml b/massa-execution-worker/Cargo.toml index 7f7592c1690..30420235565 100644 --- a/massa-execution-worker/Cargo.toml +++ b/massa-execution-worker/Cargo.toml @@ -33,3 +33,5 @@ massa_ledger = { path = "../massa-ledger" } pretty_assertions = "1.0" serial_test = "0.5" tempfile = "3.2" +# custom modules with testing enabled +massa_execution_exports = { path = "../massa-execution-exports", features = ["testing"] } \ No newline at end of file diff --git a/massa-execution-worker/src/context.rs b/massa-execution-worker/src/context.rs index bc91cff4be1..08bc7d42e36 100644 --- a/massa-execution-worker/src/context.rs +++ b/massa-execution-worker/src/context.rs @@ -1,3 +1,5 @@ +// Copyright (c) 2022 MASSA LABS + use crate::speculative_ledger::SpeculativeLedger; use massa_execution_exports::{ EventStore, ExecutionError, ExecutionOutput, ExecutionStackElement, ReadOnlyExecutionRequest, diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index 4cb55049604..25fddf15c65 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -1,3 +1,5 @@ +// Copyright (c) 2022 MASSA LABS + use crate::execution::ExecutionState; use massa_execution_exports::{ ExecutionConfig, ExecutionController, ExecutionError, ExecutionManager, ExecutionOutput, diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 1f597b3f471..be88bda12ef 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -1,3 +1,5 @@ +// Copyright (c) 2022 MASSA LABS + use crate::context::ExecutionContext; use crate::interface_impl::InterfaceImpl; use massa_execution_exports::{ diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs index bc13ade2e1d..f58eea7b877 100644 --- a/massa-execution-worker/src/interface_impl.rs +++ b/massa-execution-worker/src/interface_impl.rs @@ -1,3 +1,5 @@ +// Copyright (c) 2022 MASSA LABS + /// Implementation of the interface used in the execution external library /// use crate::context::ExecutionContext; diff --git a/massa-execution-worker/src/lib.rs b/massa-execution-worker/src/lib.rs index 3f0b6bda647..816c2b0c61a 100644 --- a/massa-execution-worker/src/lib.rs +++ b/massa-execution-worker/src/lib.rs @@ -1,3 +1,5 @@ +// Copyright (c) 2022 MASSA LABS + #![feature(map_first_last)] #![feature(unzip_option)] diff --git a/massa-execution-worker/src/speculative_ledger.rs b/massa-execution-worker/src/speculative_ledger.rs index 2f6625fe4b4..bb9c7dd771f 100644 --- a/massa-execution-worker/src/speculative_ledger.rs +++ b/massa-execution-worker/src/speculative_ledger.rs @@ -1,3 +1,5 @@ +// Copyright (c) 2022 MASSA LABS + use massa_execution_exports::ExecutionError; use massa_hash::hash::Hash; use massa_ledger::{Applicable, FinalLedger, LedgerChanges}; diff --git a/massa-execution-worker/src/tests/mod.rs b/massa-execution-worker/src/tests/mod.rs index d6991d1f1f3..7ba4c32332a 100644 --- a/massa-execution-worker/src/tests/mod.rs +++ b/massa-execution-worker/src/tests/mod.rs @@ -1,3 +1,3 @@ -// Copyright (c) 2021 MASSA LABS +// Copyright (c) 2022 MASSA LABS mod scenarios_mandatories; diff --git a/massa-execution-worker/src/tests/scenarios_mandatories.rs b/massa-execution-worker/src/tests/scenarios_mandatories.rs index 09fa62234a9..616bd8d2966 100644 --- a/massa-execution-worker/src/tests/scenarios_mandatories.rs +++ b/massa-execution-worker/src/tests/scenarios_mandatories.rs @@ -1,4 +1,4 @@ -// Copyright (c) 2021 MASSA LABS +// Copyright (c) 2022 MASSA LABS use crate::{ settings::ExecutionConfigs, start_controller, ExecutionSettings, SCELedger, SCELedgerEntry, diff --git a/massa-execution-worker/src/vm_thread.rs b/massa-execution-worker/src/vm_thread.rs index 8f91d416a8c..ff27ea181de 100644 --- a/massa-execution-worker/src/vm_thread.rs +++ b/massa-execution-worker/src/vm_thread.rs @@ -1,3 +1,5 @@ +// Copyright (c) 2022 MASSA LABS + use crate::controller::{ExecutionControllerImpl, ExecutionManagerImpl, VMInputData}; use crate::execution::ExecutionState; use massa_execution_exports::{ diff --git a/massa-ledger/src/bootstrap.rs b/massa-ledger/src/bootstrap.rs index 5d10652eca7..4013bb815a7 100644 --- a/massa-ledger/src/bootstrap.rs +++ b/massa-ledger/src/bootstrap.rs @@ -1,3 +1,5 @@ +// Copyright (c) 2022 MASSA LABS + use std::collections::BTreeMap; use massa_models::{ diff --git a/massa-ledger/src/config.rs b/massa-ledger/src/config.rs index 0881de031e9..498ca245d11 100644 --- a/massa-ledger/src/config.rs +++ b/massa-ledger/src/config.rs @@ -1,4 +1,4 @@ -// Copyright (c) 2021 MASSA LABS +// Copyright (c) 2022 MASSA LABS use std::path::PathBuf; diff --git a/massa-ledger/src/error.rs b/massa-ledger/src/error.rs index d7e1d078c0e..17226bde2d0 100644 --- a/massa-ledger/src/error.rs +++ b/massa-ledger/src/error.rs @@ -1,4 +1,4 @@ -// Copyright (c) 2021 MASSA LABS +// Copyright (c) 2022 MASSA LABS use displaydoc::Display; use thiserror::Error; diff --git a/massa-ledger/src/ledger.rs b/massa-ledger/src/ledger.rs index 85584f369d5..8a4ac718d1b 100644 --- a/massa-ledger/src/ledger.rs +++ b/massa-ledger/src/ledger.rs @@ -1,4 +1,4 @@ -// Copyright (c) 2021 MASSA LABS +// Copyright (c) 2022 MASSA LABS use crate::ledger_changes::LedgerChanges; use crate::ledger_entry::LedgerEntry; diff --git a/massa-ledger/src/ledger_changes.rs b/massa-ledger/src/ledger_changes.rs index ea8afbf0f15..71e43e6990c 100644 --- a/massa-ledger/src/ledger_changes.rs +++ b/massa-ledger/src/ledger_changes.rs @@ -1,3 +1,5 @@ +// Copyright (c) 2022 MASSA LABS + use crate::ledger_entry::LedgerEntry; use crate::types::{Applicable, SetOrDelete, SetOrKeep, SetUpdateOrDelete}; use massa_hash::hash::Hash; diff --git a/massa-ledger/src/ledger_entry.rs b/massa-ledger/src/ledger_entry.rs index 0d2264a7741..9e6ae83976d 100644 --- a/massa-ledger/src/ledger_entry.rs +++ b/massa-ledger/src/ledger_entry.rs @@ -1,3 +1,5 @@ +// Copyright (c) 2022 MASSA LABS + use crate::ledger_changes::LedgerEntryUpdate; use crate::types::{Applicable, SetOrDelete}; use massa_hash::hash::Hash; diff --git a/massa-ledger/src/lib.rs b/massa-ledger/src/lib.rs index 17c095ccd77..1516f7f28da 100644 --- a/massa-ledger/src/lib.rs +++ b/massa-ledger/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright (c) 2021 MASSA LABS +// Copyright (c) 2022 MASSA LABS #![feature(map_first_last)] #![feature(async_closure)] diff --git a/massa-ledger/src/types.rs b/massa-ledger/src/types.rs index a28f896aa90..0af828b3feb 100644 --- a/massa-ledger/src/types.rs +++ b/massa-ledger/src/types.rs @@ -1,3 +1,5 @@ +// Copyright (c) 2022 MASSA LABS + /// represents a structure that supports another one being applied to it pub trait Applicable { fn apply(&mut self, _: V); From 09b6244f202fcf143de7d9904c298c6c8449f105 Mon Sep 17 00:00:00 2001 From: damip Date: Mon, 21 Feb 2022 00:02:58 +0100 Subject: [PATCH 26/73] test progress --- Cargo.lock | 1 - massa-bootstrap/Cargo.toml | 3 +- massa-bootstrap/src/tests/scenarios.rs | 24 +-- massa-bootstrap/src/tests/tools.rs | 160 +++++------------- .../src/controller_traits.rs | 2 +- .../src/test_exports/mock.rs | 135 +++++++++++++++ .../src/test_exports/mod.rs | 2 + massa-execution-worker/src/tests/mod.rs | 2 +- massa-ledger/Cargo.toml | 3 + massa-ledger/src/lib.rs | 3 + massa-ledger/src/test_exports/bootstrap.rs | 57 +++++++ massa-ledger/src/test_exports/config.rs | 15 ++ massa-ledger/src/test_exports/mod.rs | 7 + 13 files changed, 285 insertions(+), 129 deletions(-) create mode 100644 massa-execution-exports/src/test_exports/mock.rs create mode 100644 massa-ledger/src/test_exports/bootstrap.rs create mode 100644 massa-ledger/src/test_exports/config.rs create mode 100644 massa-ledger/src/test_exports/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 5b133bc815f..dc7f65242cf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1477,7 +1477,6 @@ dependencies = [ "futures 0.3.21", "lazy_static", "massa_consensus_exports", - "massa_execution_exports", "massa_graph", "massa_hash", "massa_ledger", diff --git a/massa-bootstrap/Cargo.toml b/massa-bootstrap/Cargo.toml index a20cba60db5..428896058b5 100644 --- a/massa-bootstrap/Cargo.toml +++ b/massa-bootstrap/Cargo.toml @@ -19,7 +19,6 @@ tokio = { version = "1.11", features = ["full"] } tracing = {version = "0.1", features = ["max_level_debug", "release_max_level_debug"] } # custom modules massa_consensus_exports = { path = "../massa-consensus-exports" } -massa_execution_exports = { path = "../massa-execution-exports" } massa_ledger = { path = "../massa-ledger" } massa_hash = { path = "../massa-hash" } massa_logging = { path = "../massa-logging" } @@ -34,3 +33,5 @@ massa_proof_of_stake_exports = { path = "../massa-proof-of-stake-exports" } bitvec = { version = "0.22", features = ["serde"] } pretty_assertions = "1.0" serial_test = "0.5" +# custom modules +massa_ledger = { path = "../massa-ledger", features=["testing"] } \ No newline at end of file diff --git a/massa-bootstrap/src/tests/scenarios.rs b/massa-bootstrap/src/tests/scenarios.rs index 061db29d878..2c75448ac20 100644 --- a/massa-bootstrap/src/tests/scenarios.rs +++ b/massa-bootstrap/src/tests/scenarios.rs @@ -3,28 +3,28 @@ use super::{ mock_establisher, tools::{ - bridge_mock_streams, get_boot_state, get_keys, get_peers, wait_consensus_command, - wait_network_command, + bridge_mock_streams, get_boot_state, get_keys, get_peers, + get_random_ledger_bootstrap_state, wait_consensus_command, wait_network_command, }, }; +use crate::BootstrapSettings; use crate::{ get_state, start_bootstrap_server, tests::tools::{ assert_eq_bootstrap_graph, assert_eq_thread_cycle_states, get_bootstrap_config, }, }; -use crate::{ - tests::tools::{assert_eq_exec, get_execution_state, wait_execution_command}, - BootstrapSettings, -}; use massa_consensus_exports::{commands::ConsensusCommand, ConsensusCommandSender}; -use massa_execution::{ExecutionCommand, ExecutionCommandSender}; +use massa_ledger::FinalLedger; use massa_models::Version; use massa_network::{NetworkCommand, NetworkCommandSender}; use massa_signature::PrivateKey; use massa_time::MassaTime; use serial_test::serial; -use std::str::FromStr; +use std::{ + str::FromStr, + sync::{Arc, RwLock}, +}; use tokio::sync::mpsc; lazy_static::lazy_static! { @@ -42,13 +42,17 @@ async fn test_bootstrap_server() { let (consensus_cmd_tx, mut consensus_cmd_rx) = mpsc::channel::(5); let (network_cmd_tx, mut network_cmd_rx) = mpsc::channel::(5); - let (execution_cmd_tx, mut execution_cmd_rx) = mpsc::channel::(5); + let ledger_bootstrap_state = get_random_ledger_bootstrap_state(); + let final_ledger = Arc::new(RwLock::new(FinalLedger::from_bootstrap_state( + Default::default(), + ledger_bootstrap_state.clone(), + ))); let (bootstrap_establisher, bootstrap_interface) = mock_establisher::new(); let bootstrap_manager = start_bootstrap_server( ConsensusCommandSender(consensus_cmd_tx), NetworkCommandSender(network_cmd_tx), - ExecutionCommandSender(execution_cmd_tx), + final_ledger, bootstrap_settings, bootstrap_establisher, *private_key, diff --git a/massa-bootstrap/src/tests/tools.rs b/massa-bootstrap/src/tests/tools.rs index c3ee86356bc..4439176d864 100644 --- a/massa-bootstrap/src/tests/tools.rs +++ b/massa-bootstrap/src/tests/tools.rs @@ -1,15 +1,14 @@ // Copyright (c) 2021 MASSA LABS -use crate::settings::BootstrapSettings; - use super::mock_establisher::Duplex; +use crate::settings::BootstrapSettings; use bitvec::prelude::*; use massa_consensus_exports::commands::ConsensusCommand; -use massa_execution::{BootstrapExecutionState, ExecutionCommand, SCELedger, SCELedgerEntry}; use massa_graph::{ export_active_block::ExportActiveBlock, ledger::LedgerSubset, BootstrapableGraph, }; use massa_hash::hash::Hash; +use massa_ledger::{test_exports::make_bootstrap_state, FinalLedgerBootstrapState, LedgerEntry}; use massa_models::{ clique::Clique, ledger_models::{LedgerChange, LedgerChanges, LedgerData}, @@ -23,14 +22,56 @@ use massa_signature::{ derive_public_key, generate_random_private_key, sign, PrivateKey, PublicKey, Signature, }; use massa_time::MassaTime; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use rand::Rng; use std::str::FromStr; +use std::{ + collections::BTreeMap, + net::{IpAddr, Ipv4Addr, SocketAddr}, +}; use tokio::io::AsyncReadExt; use tokio::io::AsyncWriteExt; use tokio::{sync::mpsc::Receiver, time::sleep}; pub const BASE_BOOTSTRAP_IP: IpAddr = IpAddr::V4(Ipv4Addr::new(169, 202, 0, 10)); +/// generates a small random number of bytes +fn get_some_random_bytes() -> Vec { + let mut rng = rand::thread_rng(); + (0usize..rng.gen_range(0..10)) + .map(|_| rand::random::()) + .collect() +} + +/// generates a random ledger entry +fn get_random_ledger_entry() -> LedgerEntry { + let mut rng = rand::thread_rng(); + let parallel_balance = Amount::from_raw(rng.gen::()); + let bytecode: Vec = get_some_random_bytes(); + let mut datastore = BTreeMap::new(); + for _ in 0usize..rng.gen_range(0..10) { + let key = Hash::compute_from(&get_some_random_bytes()); + let value = get_some_random_bytes(); + datastore.insert(key, value); + } + LedgerEntry { + parallel_balance, + bytecode, + datastore, + } +} + +/// generates a rendom bootstrap state for a final ledger +pub fn get_random_ledger_bootstrap_state() -> FinalLedgerBootstrapState { + let mut rng = rand::thread_rng(); + + let mut sorted_ledger = BTreeMap::new(); + for _ in 0usize..rng.gen_range(0..10) { + sorted_ledger.insert(get_random_address(), get_random_ledger_entry()); + } + + make_bootstrap_state(Slot::new(rng.gen::(), rng.gen::()), sorted_ledger) +} + pub fn get_dummy_block_id(s: &str) -> BlockId { BlockId(Hash::compute_from(s.as_bytes())) } @@ -116,27 +157,6 @@ where } } -pub async fn wait_execution_command( - execution_command_receiver: &mut Receiver, - timeout: MassaTime, - filter_map: F, -) -> Option -where - F: Fn(ExecutionCommand) -> Option, -{ - let timer = sleep(timeout.into()); - tokio::pin!(timer); - loop { - tokio::select! { - cmd = execution_command_receiver.recv() => match cmd { - Some(orig_evt) => if let Some(res_evt) = filter_map(orig_evt) { return Some(res_evt); }, - _ => panic!("execution event channel died") - }, - _ = &mut timer => return None - } - } -} - pub async fn wait_network_command( network_command_receiver: &mut Receiver, timeout: MassaTime, @@ -292,96 +312,6 @@ pub fn assert_eq_bootstrap_graph(v1: &BootstrapableGraph, v2: &BootstrapableGrap } } -/// generates a sample BootstrapExecutionState with a few ledger entries: -/// -/// * final_slot: (period 14, thread 1) -/// * final_ledger: -/// * (random address 1): -/// * balance: 129 -/// * opt_module: None -/// * data: -/// * hash(bytes("key_testA")): bytes("test1_data") -/// * hash(bytes("key_testB")): bytes("test2_data") -/// * hash(bytes("key_testC")): bytes("test3_data") -/// * (random address 2): -/// * balance: 878 -/// * opt_module: Some(bytes("bytecodebytecode")) -/// * data: -/// * hash(bytes("key_testD")): bytes("test4_data") -/// * hash(bytes("key_testE")): bytes("test5_data") -pub fn get_execution_state() -> BootstrapExecutionState { - BootstrapExecutionState { - final_slot: Slot::new(14, 1), - final_ledger: SCELedger( - vec![ - ( - get_random_address(), - SCELedgerEntry { - balance: Amount::from_str("129").unwrap(), - opt_module: None, - data: vec![ - ( - massa_hash::hash::Hash::compute_from("key_testA".as_bytes()), - "test1_data".into(), - ), - ( - massa_hash::hash::Hash::compute_from("key_testB".as_bytes()), - "test2_data".into(), - ), - ( - massa_hash::hash::Hash::compute_from("key_testC".as_bytes()), - "test3_data".into(), - ), - ] - .into_iter() - .collect(), - }, - ), - ( - get_random_address(), - SCELedgerEntry { - balance: Amount::from_str("878").unwrap(), - opt_module: Some("bytecodebytecode".into()), - data: vec![ - ( - massa_hash::hash::Hash::compute_from("key_testD".as_bytes()), - "test4_data".into(), - ), - ( - massa_hash::hash::Hash::compute_from("key_testE".as_bytes()), - "test5_data".into(), - ), - ] - .into_iter() - .collect(), - }, - ), - ] - .into_iter() - .collect(), - ), - } -} - -pub fn assert_eq_exec(v1: &BootstrapExecutionState, v2: &BootstrapExecutionState) { - assert_eq!(v1.final_slot, v2.final_slot, "final slot mismatch"); - assert_eq!( - v1.final_ledger.0.len(), - v2.final_ledger.0.len(), - "ledger len mismatch" - ); - for k in v1.final_ledger.0.keys() { - let itm1 = v1.final_ledger.0.get(k).unwrap(); - let itm2 = v2.final_ledger.0.get(k).expect("ledger key mismatch"); - assert_eq!(itm1.balance, itm2.balance, "ledger balance mismatch"); - assert_eq!( - itm1.opt_module, itm2.opt_module, - "ledger opt_module mismatch" - ); - assert_eq!(itm1.data, itm2.data, "ledger data mismatch"); - } -} - pub fn get_boot_state() -> (ExportProofOfStake, BootstrapableGraph) { let private_key = generate_random_private_key(); let public_key = derive_public_key(&private_key); diff --git a/massa-execution-exports/src/controller_traits.rs b/massa-execution-exports/src/controller_traits.rs index 577977c3934..c04469aa327 100644 --- a/massa-execution-exports/src/controller_traits.rs +++ b/massa-execution-exports/src/controller_traits.rs @@ -12,7 +12,7 @@ use massa_models::BlockId; use massa_models::OperationId; use massa_models::Slot; -pub trait ExecutionController: Sync + Send { +pub trait ExecutionController: Send + Sync { /// Update blockclique status /// /// # arguments diff --git a/massa-execution-exports/src/test_exports/mock.rs b/massa-execution-exports/src/test_exports/mock.rs new file mode 100644 index 00000000000..586ad7b4f4b --- /dev/null +++ b/massa-execution-exports/src/test_exports/mock.rs @@ -0,0 +1,135 @@ +// Copyright (c) 2022 MASSA LABS +// This file defines utilities to mock the crate for testing purposes + +use crate::{ExecutionController, ExecutionError, ExecutionOutput, ReadOnlyExecutionRequest}; +use massa_ledger::LedgerEntry; +use massa_models::{ + output_event::SCOutputEvent, prehash::Map, Address, Block, BlockId, OperationId, Slot, +}; +use std::sync::{ + mpsc::{self, Receiver}, + Arc, Mutex, +}; + +#[derive(Clone)] +pub enum MockExecutionControllerMessage { + UpdateBlockcliqueStatus { + finalized_blocks: Map, + blockclique: Map, + }, + GetFilteredScOutputEvent { + start: Option, + end: Option, + emitter_address: Option
, + original_caller_address: Option
, + original_operation_id: Option, + response_tx: mpsc::Sender>, + }, + GetFullLedgerEntry { + addr: Address, + response_tx: mpsc::Sender<(Option, Option)>, + }, + ExecuteReadonlyRequest { + req: ReadOnlyExecutionRequest, + response_tx: mpsc::Sender>, + }, +} + +#[derive(Clone)] +pub struct MockExecutionController(Arc>>); + +impl MockExecutionController { + pub fn new() -> ( + Box, + Receiver, + ) { + let (tx, rx) = mpsc::channel(); + ( + Box::new(MockExecutionController(Arc::new(Mutex::new(tx)))), + rx, + ) + } +} + +impl ExecutionController for MockExecutionController { + /// Update blockclique status + /// + /// # arguments + /// * finalized_blocks: newly finalized blocks + /// * blockclique: new blockclique + fn update_blockclique_status( + &self, + finalized_blocks: Map, + blockclique: Map, + ) { + self.0 + .lock() + .unwrap() + .send(MockExecutionControllerMessage::UpdateBlockcliqueStatus { + finalized_blocks, + blockclique, + }) + .unwrap(); + } + + /// Get events optionnally filtered by: + /// * start slot + /// * end slot + /// * emitter address + /// * original caller address + /// * operation id + fn get_filtered_sc_output_event( + &self, + start: Option, + end: Option, + emitter_address: Option
, + original_caller_address: Option
, + original_operation_id: Option, + ) -> Vec { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockExecutionControllerMessage::GetFilteredScOutputEvent { + start, + end, + emitter_address, + original_caller_address, + original_operation_id, + response_tx, + }) + .unwrap(); + response_rx.recv().unwrap() + } + + /// gets a copy of a full ledger entry + /// + /// # return value + /// * (final_entry, active_entry) + fn get_full_ledger_entry(&self, addr: &Address) -> (Option, Option) { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockExecutionControllerMessage::GetFullLedgerEntry { + addr: *addr, + response_tx, + }) + .unwrap(); + response_rx.recv().unwrap() + } + + /// Executes a readonly request + fn execute_readonly_request( + &self, + req: ReadOnlyExecutionRequest, + ) -> Result { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockExecutionControllerMessage::ExecuteReadonlyRequest { req, response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } +} diff --git a/massa-execution-exports/src/test_exports/mod.rs b/massa-execution-exports/src/test_exports/mod.rs index d9c62e484a2..561081b3048 100644 --- a/massa-execution-exports/src/test_exports/mod.rs +++ b/massa-execution-exports/src/test_exports/mod.rs @@ -3,5 +3,7 @@ /// This module exposes useful tooling for testing. /// It is only compiled and exported by the crate if the "testing" feature is enabled. mod config; +mod mock; pub use config::*; +pub use mock::*; diff --git a/massa-execution-worker/src/tests/mod.rs b/massa-execution-worker/src/tests/mod.rs index 7ba4c32332a..b2f126fd7c8 100644 --- a/massa-execution-worker/src/tests/mod.rs +++ b/massa-execution-worker/src/tests/mod.rs @@ -1,3 +1,3 @@ // Copyright (c) 2022 MASSA LABS -mod scenarios_mandatories; +//mod scenarios_mandatories; diff --git a/massa-ledger/Cargo.toml b/massa-ledger/Cargo.toml index cf923278308..4b79f6ac844 100644 --- a/massa-ledger/Cargo.toml +++ b/massa-ledger/Cargo.toml @@ -24,3 +24,6 @@ massa_time = { path = "../massa-time" } [dev-dependencies] pretty_assertions = "1.0" serial_test = "0.5" + +[features] +testing = [] diff --git a/massa-ledger/src/lib.rs b/massa-ledger/src/lib.rs index 1516f7f28da..48b2f66a9ae 100644 --- a/massa-ledger/src/lib.rs +++ b/massa-ledger/src/lib.rs @@ -21,3 +21,6 @@ pub use types::{Applicable, SetOrDelete, SetOrKeep, SetUpdateOrDelete}; #[cfg(test)] mod tests; + +#[cfg(feature = "testing")] +pub mod test_exports; diff --git a/massa-ledger/src/test_exports/bootstrap.rs b/massa-ledger/src/test_exports/bootstrap.rs new file mode 100644 index 00000000000..2a6aff0c4d7 --- /dev/null +++ b/massa-ledger/src/test_exports/bootstrap.rs @@ -0,0 +1,57 @@ +// Copyright (c) 2022 MASSA LABS + +use std::collections::BTreeMap; + +use massa_models::{Address, Slot}; + +use crate::{FinalLedgerBootstrapState, LedgerEntry}; + +/// This file defines tools to test the ledger bootstrap + +/// creates a ledger bootstrap state from components +pub fn make_bootstrap_state( + slot: Slot, + sorted_ledger: BTreeMap, +) -> FinalLedgerBootstrapState { + FinalLedgerBootstrapState { + slot, + sorted_ledger, + } +} + +/// asserts that two ledger entries are the same +pub fn assert_eq_ledger_entry(v1: &LedgerEntry, v2: &LedgerEntry) { + assert_eq!( + v1.parallel_balance, v2.parallel_balance, + "parallel balance mismatch" + ); + assert_eq!(v1.bytecode, v2.bytecode, "bytecode mismatch"); + assert_eq!( + v1.datastore.len(), + v2.datastore.len(), + "datastore len mismatch" + ); + for k in v1.datastore.keys() { + let itm1 = v1.datastore.get(k).unwrap(); + let itm2 = v2.datastore.get(k).expect("datastore key mismatch"); + assert_eq!(itm1, itm2, "datasore entry mismatch"); + } +} + +/// asserts that two FinalLedgerBootstrapState are equal +pub fn assert_eq_ledger_bootstrap_state( + v1: &FinalLedgerBootstrapState, + v2: &FinalLedgerBootstrapState, +) { + assert_eq!(v1.slot, v2.slot, "final slot mismatch"); + assert_eq!( + v1.sorted_ledger.len(), + v2.sorted_ledger.len(), + "ledger len mismatch" + ); + for k in v1.sorted_ledger.keys() { + let itm1 = v1.sorted_ledger.get(k).unwrap(); + let itm2 = v2.sorted_ledger.get(k).expect("ledger key mismatch"); + assert_eq_ledger_entry(itm1, itm2); + } +} diff --git a/massa-ledger/src/test_exports/config.rs b/massa-ledger/src/test_exports/config.rs new file mode 100644 index 00000000000..eada136589b --- /dev/null +++ b/massa-ledger/src/test_exports/config.rs @@ -0,0 +1,15 @@ +// Copyright (c) 2022 MASSA LABS + +/// This file defines testing tools related to the config +use crate::LedgerConfig; + +/// Default value of LedgerConfig used for tests +impl Default for LedgerConfig { + fn default() -> Self { + LedgerConfig { + initial_sce_ledger_path: "".into(), // unused by the mock + final_history_length: 10, + thread_count: 2, + } + } +} diff --git a/massa-ledger/src/test_exports/mod.rs b/massa-ledger/src/test_exports/mod.rs new file mode 100644 index 00000000000..cf3ced58ef9 --- /dev/null +++ b/massa-ledger/src/test_exports/mod.rs @@ -0,0 +1,7 @@ +// exports testing utilities + +mod bootstrap; +mod config; + +pub use bootstrap::*; +pub use config::*; From ad637b34d1b0423a51d2fe224fddd173048e1430 Mon Sep 17 00:00:00 2001 From: damip Date: Mon, 21 Feb 2022 00:39:18 +0100 Subject: [PATCH 27/73] repair bootsrap test --- massa-bootstrap/src/lib.rs | 7 ++-- massa-bootstrap/src/tests/scenarios.rs | 42 +++++++------------ massa-bootstrap/src/tests/tools.rs | 7 +++- massa-ledger/src/bootstrap.rs | 1 - .../src/proof_of_stake.rs | 2 +- 5 files changed, 24 insertions(+), 35 deletions(-) diff --git a/massa-bootstrap/src/lib.rs b/massa-bootstrap/src/lib.rs index 52b4b7dccc6..4dd6ab70dcd 100644 --- a/massa-bootstrap/src/lib.rs +++ b/massa-bootstrap/src/lib.rs @@ -411,9 +411,8 @@ impl BootstrapServer { // If the consensus state snapshot is older than the execution state snapshot, // the execution final ledger will be in the future after bootstrap, which causes an inconsistency. let peer_boot = self.network_command_sender.get_bootstrap_peers().await?; - let get_pos_graph = self.consensus_command_sender.get_bootstrap_state(); let res_ledger = self.final_ledger.read().expect("could not lock final ledger for reading").get_bootstrap_state(); - let (pos_boot, graph_boot) = get_pos_graph.await?; + let (pos_boot, graph_boot) = self.consensus_command_sender.get_bootstrap_state().await?; bootstrap_data = Some((pos_boot, graph_boot, peer_boot, res_ledger)); cache_timer.set(sleep(cache_timeout)); } @@ -499,13 +498,13 @@ async fn manage_bootstrap( ) .await?; - // Fourth, send execution state + // Fourth, send ledger state send_state_timeout( write_timeout, server.send(messages::BootstrapMessage::FinalLedgerState { ledger_state: ledger_state, }), - "bootstrap execution state send timed out", + "bootstrap ledger state send timed out", ) .await } diff --git a/massa-bootstrap/src/tests/scenarios.rs b/massa-bootstrap/src/tests/scenarios.rs index 2c75448ac20..0c8cdbdf199 100644 --- a/massa-bootstrap/src/tests/scenarios.rs +++ b/massa-bootstrap/src/tests/scenarios.rs @@ -15,7 +15,7 @@ use crate::{ }, }; use massa_consensus_exports::{commands::ConsensusCommand, ConsensusCommandSender}; -use massa_ledger::FinalLedger; +use massa_ledger::{test_exports::assert_eq_ledger_bootstrap_state, FinalLedger}; use massa_models::Version; use massa_network::{NetworkCommand, NetworkCommandSender}; use massa_signature::PrivateKey; @@ -42,7 +42,7 @@ async fn test_bootstrap_server() { let (consensus_cmd_tx, mut consensus_cmd_rx) = mpsc::channel::(5); let (network_cmd_tx, mut network_cmd_rx) = mpsc::channel::(5); - let ledger_bootstrap_state = get_random_ledger_bootstrap_state(); + let ledger_bootstrap_state = get_random_ledger_bootstrap_state(2); let final_ledger = Arc::new(RwLock::new(FinalLedger::from_bootstrap_state( Default::default(), ledger_bootstrap_state.clone(), @@ -108,7 +108,7 @@ async fn test_bootstrap_server() { bridge_mock_streams(remote_rw, bootstrap_rw).await; }); - // peers and execution are asked simultaneously + // intercept peers being asked let wait_peers = async move || { // wait for bootstrap to ask network for peers, send them let response = match wait_network_command(&mut network_cmd_rx, 1000.into(), |cmd| match cmd @@ -126,25 +126,10 @@ async fn test_bootstrap_server() { sent_peers }; - let wait_execution = async move || { - // wait for bootstrap to ask execution for bootstrap state, send it - let response = - match wait_execution_command(&mut execution_cmd_rx, 1000.into(), |cmd| match cmd { - ExecutionCommand::GetBootstrapState(resp) => Some(resp), - _ => None, - }) - .await - { - Some(resp) => resp, - None => panic!("timeout waiting for get boot execution command"), - }; - let sent_execution_state = get_execution_state(); - response.send(sent_execution_state.clone()).unwrap(); - sent_execution_state - }; + // wait for peers + let sent_peers = wait_peers().await; - // wait for peers and execution at the same time - let (sent_peers, sent_execution_state) = tokio::join!(wait_peers(), wait_execution()); + // here the ledger is queried directly. We don't intercept this // wait for bootstrap to ask consensus for bootstrap graph, send it let response = match wait_consensus_command(&mut consensus_cmd_rx, 1000.into(), |cmd| match cmd @@ -170,10 +155,6 @@ async fn test_bootstrap_server() { // wait for bridge bridge.await.expect("bridge join failed"); - // check states - assert_eq_thread_cycle_states(&sent_pos, &bootstrap_res.pos.unwrap()); - assert_eq_bootstrap_graph(&sent_graph, &bootstrap_res.graph.unwrap()); - // check peers assert_eq!( sent_peers.0, @@ -181,8 +162,15 @@ async fn test_bootstrap_server() { "mismatch between sent and received peers" ); - // check execution - assert_eq_exec(&sent_execution_state, &bootstrap_res.execution.unwrap()); + // check ledger + assert_eq_ledger_bootstrap_state( + &ledger_bootstrap_state, + &bootstrap_res.final_ledger.unwrap(), + ); + + // check states + assert_eq_thread_cycle_states(&sent_pos, &bootstrap_res.pos.unwrap()); + assert_eq_bootstrap_graph(&sent_graph, &bootstrap_res.graph.unwrap()); // stop bootstrap server bootstrap_manager diff --git a/massa-bootstrap/src/tests/tools.rs b/massa-bootstrap/src/tests/tools.rs index 4439176d864..f3a2fa94c0a 100644 --- a/massa-bootstrap/src/tests/tools.rs +++ b/massa-bootstrap/src/tests/tools.rs @@ -61,7 +61,7 @@ fn get_random_ledger_entry() -> LedgerEntry { } /// generates a rendom bootstrap state for a final ledger -pub fn get_random_ledger_bootstrap_state() -> FinalLedgerBootstrapState { +pub fn get_random_ledger_bootstrap_state(thread_count: u8) -> FinalLedgerBootstrapState { let mut rng = rand::thread_rng(); let mut sorted_ledger = BTreeMap::new(); @@ -69,7 +69,10 @@ pub fn get_random_ledger_bootstrap_state() -> FinalLedgerBootstrapState { sorted_ledger.insert(get_random_address(), get_random_ledger_entry()); } - make_bootstrap_state(Slot::new(rng.gen::(), rng.gen::()), sorted_ledger) + make_bootstrap_state( + Slot::new(rng.gen::(), rng.gen_range(0..thread_count)), + sorted_ledger, + ) } pub fn get_dummy_block_id(s: &str) -> BlockId { diff --git a/massa-ledger/src/bootstrap.rs b/massa-ledger/src/bootstrap.rs index 4013bb815a7..d72e11ca620 100644 --- a/massa-ledger/src/bootstrap.rs +++ b/massa-ledger/src/bootstrap.rs @@ -64,7 +64,6 @@ impl DeserializeCompact for FinalLedgerBootstrapState { // final ledger let mut sorted_ledger: BTreeMap = BTreeMap::new(); - cursor += delta; for _ in 0..ledger_size { // address let addr = Address::from_bytes(&array_from_slice(&buffer[cursor..])?)?; diff --git a/massa-proof-of-stake-exports/src/proof_of_stake.rs b/massa-proof-of-stake-exports/src/proof_of_stake.rs index 471a7bc1c54..1ffdc757d35 100644 --- a/massa-proof-of-stake-exports/src/proof_of_stake.rs +++ b/massa-proof-of-stake-exports/src/proof_of_stake.rs @@ -12,7 +12,7 @@ use massa_signature::derive_public_key; use num::rational::Ratio; use rand::{distributions::Uniform, Rng, SeedableRng}; use rand_xoshiro::Xoshiro256PlusPlus; -use tracing::log::warn; +use tracing::warn; use crate::{ error::POSResult, error::ProofOfStakeError, export_pos::ExportProofOfStake, From c75ba398465e2fa2bac22d9b8fdd651bf2b3ef1f Mon Sep 17 00:00:00 2001 From: damip Date: Mon, 21 Feb 2022 01:12:00 +0100 Subject: [PATCH 28/73] repair consensus tests --- massa-consensus-worker/Cargo.toml | 1 + .../src/tests/mock_execution_controller.rs | 84 ------------------- massa-consensus-worker/src/tests/mod.rs | 1 - .../src/tests/scenario_roll.rs | 16 ++-- .../src/tests/scenarios_ledger.rs | 8 +- .../tests/scenarios_note_attack_attempt.rs | 14 ++-- massa-consensus-worker/src/tests/tools.rs | 16 ++-- 7 files changed, 22 insertions(+), 118 deletions(-) delete mode 100644 massa-consensus-worker/src/tests/mock_execution_controller.rs diff --git a/massa-consensus-worker/Cargo.toml b/massa-consensus-worker/Cargo.toml index e0970dfb6d7..9d1a00bf98f 100644 --- a/massa-consensus-worker/Cargo.toml +++ b/massa-consensus-worker/Cargo.toml @@ -39,6 +39,7 @@ serial_test = "0.5" stderrlog = "0.5" tempfile = "3.2" massa_models = { path = "../massa-models", features = ["testing"] } +massa_execution_exports = { path = "../massa-execution-exports", features = ["testing"] } massa_consensus_exports = { path = "../massa-consensus-exports", features = ["testing"] } [features] diff --git a/massa-consensus-worker/src/tests/mock_execution_controller.rs b/massa-consensus-worker/src/tests/mock_execution_controller.rs deleted file mode 100644 index b395b876c6a..00000000000 --- a/massa-consensus-worker/src/tests/mock_execution_controller.rs +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) 2021 MASSA LABS - -use massa_execution::{ - ExecutionCommand, ExecutionCommandSender, ExecutionEvent, ExecutionEventReceiver, -}; -use massa_models::{constants::CHANNEL_SIZE, prehash::Map, Block, BlockId}; -use massa_time::MassaTime; -use tokio::{ - sync::mpsc::{channel, unbounded_channel, Receiver, Sender, UnboundedSender}, - time::sleep, -}; - -#[allow(dead_code)] -pub struct MockExecutionController { - execution_command_sender: Sender, - execution_command_receiver: Receiver, - event_sender: UnboundedSender, -} - -impl MockExecutionController { - pub fn new() -> (Self, ExecutionCommandSender, ExecutionEventReceiver) { - let (event_sender, event_rx) = unbounded_channel::(); - let (execution_command_sender, execution_command_receiver) = - channel::(CHANNEL_SIZE); - ( - MockExecutionController { - execution_command_sender: execution_command_sender.clone(), - execution_command_receiver, - event_sender, - }, - ExecutionCommandSender(execution_command_sender), - ExecutionEventReceiver(event_rx), - ) - } - - #[allow(dead_code)] - pub async fn wait_command(&mut self, timeout: MassaTime, filter_map: F) -> Option - where - F: Fn(ExecutionCommand) -> Option, - { - let timer = sleep(timeout.into()); - tokio::pin!(timer); - loop { - tokio::select! { - cmd_opt = self.execution_command_receiver.recv() => match cmd_opt { - Some(orig_cmd) => if let Some(res_cmd) = filter_map(orig_cmd) { return Some(res_cmd); }, - None => panic!("Unexpected closure of execution command command channel."), - }, - _ = &mut timer => return None - } - } - } - - #[allow(dead_code)] - pub async fn blockclique_changed( - &mut self, - blockclique: Map, - finalized_blocks: Map, - ) { - self.execution_command_sender - .send(ExecutionCommand::BlockCliqueChanged { - blockclique, - finalized_blocks, - }) - .await - .expect("could not send execution event"); - } - - #[allow(dead_code)] - pub async fn ignore_commands_while( - &mut self, - mut future: FutureT, - ) -> FutureT::Output { - loop { - tokio::select!( - res = &mut future => return res, - cmd = self.execution_command_receiver.recv() => match cmd { - Some(_) => {}, - None => return future.await - } - ); - } - } -} diff --git a/massa-consensus-worker/src/tests/mod.rs b/massa-consensus-worker/src/tests/mod.rs index 1578b7e6c8d..c8dca3c26a9 100644 --- a/massa-consensus-worker/src/tests/mod.rs +++ b/massa-consensus-worker/src/tests/mod.rs @@ -1,7 +1,6 @@ // Copyright (c) 2021 MASSA LABS mod block_factory; -mod mock_execution_controller; mod mock_pool_controller; mod mock_protocol_controller; mod scenario_block_creation; diff --git a/massa-consensus-worker/src/tests/scenario_roll.rs b/massa-consensus-worker/src/tests/scenario_roll.rs index f5d71ccb734..8502128803d 100644 --- a/massa-consensus-worker/src/tests/scenario_roll.rs +++ b/massa-consensus-worker/src/tests/scenario_roll.rs @@ -2,6 +2,7 @@ use massa_consensus_exports::tools; use massa_consensus_exports::{settings::ConsensusChannels, ConsensusConfig}; +use massa_execution_exports::test_exports::MockExecutionController; use massa_models::{Address, Amount, BlockId, Slot}; use massa_pool::PoolCommand; use massa_protocol_exports::ProtocolCommand; @@ -27,8 +28,6 @@ use crate::{ use massa_models::ledger_models::LedgerData; use massa_models::prehash::Set; -use super::mock_execution_controller::MockExecutionController; - #[tokio::test] #[serial] async fn test_roll() { @@ -503,8 +502,7 @@ async fn test_roll_block_creation() { let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = MockProtocolController::new(); let (mut pool_controller, pool_command_sender) = MockPoolController::new(); - let (mut _execution_controller, execution_command_sender, execution_event_receiver) = - MockExecutionController::new(); + let (execution_controller, _execution_rx) = MockExecutionController::new(); let init_time: MassaTime = 1000.into(); cfg.genesis_timestamp = MassaTime::now().unwrap().saturating_add(init_time); @@ -514,8 +512,7 @@ async fn test_roll_block_creation() { start_consensus_controller( cfg.clone(), ConsensusChannels { - execution_command_sender, - execution_event_receiver, + execution_controller, protocol_command_sender: protocol_command_sender.clone(), protocol_event_receiver, pool_command_sender, @@ -786,16 +783,15 @@ async fn test_roll_deactivation() { let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = MockProtocolController::new(); let (mut pool_controller, pool_command_sender) = MockPoolController::new(); - let (mut _execution_controller, execution_command_sender, execution_event_receiver) = - MockExecutionController::new(); + let (execution_controller, _execution_rx) = MockExecutionController::new(); + cfg.genesis_timestamp = MassaTime::now().unwrap().saturating_add(300.into()); // launch consensus controller let (consensus_command_sender, _consensus_event_receiver, _consensus_manager) = start_consensus_controller( cfg.clone(), ConsensusChannels { - execution_command_sender, - execution_event_receiver, + execution_controller, protocol_command_sender: protocol_command_sender.clone(), protocol_event_receiver, pool_command_sender, diff --git a/massa-consensus-worker/src/tests/scenarios_ledger.rs b/massa-consensus-worker/src/tests/scenarios_ledger.rs index b1ed361502d..f6a6ec5cae6 100644 --- a/massa-consensus-worker/src/tests/scenarios_ledger.rs +++ b/massa-consensus-worker/src/tests/scenarios_ledger.rs @@ -1,11 +1,11 @@ // Copyright (c) 2021 MASSA LABS use super::{ - mock_execution_controller::MockExecutionController, mock_pool_controller::{MockPoolController, PoolCommandSink}, mock_protocol_controller::MockProtocolController, }; use crate::start_consensus_controller; +use massa_execution_exports::test_exports::MockExecutionController; use super::tools::*; use massa_consensus_exports::ConsensusConfig; @@ -498,16 +498,14 @@ async fn test_ledger_update_when_a_batch_of_blocks_becomes_final() { MockProtocolController::new(); let (pool_controller, pool_command_sender) = MockPoolController::new(); let pool_sink = PoolCommandSink::new(pool_controller).await; - let (mut _execution_controller, execution_command_sender, execution_event_receiver) = - MockExecutionController::new(); + let (execution_controller, _execution_rx) = MockExecutionController::new(); // launch consensus controller let (consensus_command_sender, consensus_event_receiver, consensus_manager) = start_consensus_controller( cfg.clone(), ConsensusChannels { - execution_command_sender, - execution_event_receiver, + execution_controller, protocol_command_sender: protocol_command_sender.clone(), protocol_event_receiver, pool_command_sender, diff --git a/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs b/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs index 92651e7fa69..5c0bb369cfa 100644 --- a/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs +++ b/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs @@ -2,12 +2,12 @@ use super::tools::*; use super::{ - mock_execution_controller::MockExecutionController, mock_pool_controller::{MockPoolController, PoolCommandSink}, mock_protocol_controller::MockProtocolController, }; use crate::start_consensus_controller; use massa_consensus_exports::ConsensusConfig; +use massa_execution_exports::test_exports::MockExecutionController; use massa_consensus_exports::settings::ConsensusChannels; use massa_hash::hash::Hash; @@ -30,16 +30,14 @@ async fn test_invalid_block_notified_as_attack_attempt() { MockProtocolController::new(); let (pool_controller, pool_command_sender) = MockPoolController::new(); let pool_sink = PoolCommandSink::new(pool_controller).await; - let (mut _execution_controller, execution_command_sender, execution_event_receiver) = - MockExecutionController::new(); + let (execution_controller, _execution_rx) = MockExecutionController::new(); // launch consensus controller let (consensus_command_sender, consensus_event_receiver, consensus_manager) = start_consensus_controller( cfg.clone(), ConsensusChannels { - execution_command_sender, - execution_event_receiver, + execution_controller, protocol_command_sender: protocol_command_sender.clone(), protocol_event_receiver, pool_command_sender, @@ -96,8 +94,7 @@ async fn test_invalid_header_notified_as_attack_attempt() { let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = MockProtocolController::new(); let (pool_controller, pool_command_sender) = MockPoolController::new(); - let (mut _execution_controller, execution_command_sender, execution_event_receiver) = - MockExecutionController::new(); + let (execution_controller, _execution_rx) = MockExecutionController::new(); let pool_sink = PoolCommandSink::new(pool_controller).await; // launch consensus controller @@ -105,8 +102,7 @@ async fn test_invalid_header_notified_as_attack_attempt() { start_consensus_controller( cfg.clone(), ConsensusChannels { - execution_command_sender, - execution_event_receiver, + execution_controller, protocol_command_sender: protocol_command_sender.clone(), protocol_event_receiver, pool_command_sender, diff --git a/massa-consensus-worker/src/tests/tools.rs b/massa-consensus-worker/src/tests/tools.rs index bed955b6678..a6b69d19a80 100644 --- a/massa-consensus-worker/src/tests/tools.rs +++ b/massa-consensus-worker/src/tests/tools.rs @@ -2,7 +2,6 @@ #![allow(clippy::ptr_arg)] // this allow &Vec<..> as function argument type use super::{ - mock_execution_controller::MockExecutionController, mock_pool_controller::{MockPoolController, PoolCommandSink}, mock_protocol_controller::MockProtocolController, }; @@ -10,6 +9,7 @@ use crate::start_consensus_controller; use massa_consensus_exports::{ settings::ConsensusChannels, ConsensusCommandSender, ConsensusConfig, ConsensusEventReceiver, }; +use massa_execution_exports::test_exports::MockExecutionController; use massa_graph::{export_active_block::ExportActiveBlock, BlockGraphExport, BootstrapableGraph}; use massa_hash::hash::Hash; use massa_models::{ @@ -638,16 +638,15 @@ pub async fn consensus_pool_test( let (protocol_controller, protocol_command_sender, protocol_event_receiver) = MockProtocolController::new(); let (pool_controller, pool_command_sender) = MockPoolController::new(); - let (mut _execution_controller, execution_command_sender, execution_event_receiver) = - MockExecutionController::new(); + // for now, execution_rx is ignored: cique updates to Execution pile up and are discarded + let (execution_controller, _execution_rx) = MockExecutionController::new(); // launch consensus controller let (consensus_command_sender, consensus_event_receiver, consensus_manager) = start_consensus_controller( cfg.clone(), ConsensusChannels { - execution_command_sender, - execution_event_receiver, + execution_controller, protocol_command_sender: protocol_command_sender.clone(), protocol_event_receiver, pool_command_sender, @@ -700,8 +699,8 @@ where let (protocol_controller, protocol_command_sender, protocol_event_receiver) = MockProtocolController::new(); let (pool_controller, pool_command_sender) = MockPoolController::new(); - let (mut _execution_controller, execution_command_sender, execution_event_receiver) = - MockExecutionController::new(); + // for now, execution_rx is ignored: cique updates to Execution pile up and are discarded + let (execution_controller, _execution_rx) = MockExecutionController::new(); let pool_sink = PoolCommandSink::new(pool_controller).await; // launch consensus controller @@ -709,8 +708,7 @@ where start_consensus_controller( cfg.clone(), ConsensusChannels { - execution_command_sender, - execution_event_receiver, + execution_controller, protocol_command_sender: protocol_command_sender.clone(), protocol_event_receiver, pool_command_sender, From dd18dab5fdbf407509aae9412a53d6ec2e44ffcd Mon Sep 17 00:00:00 2001 From: damip Date: Mon, 21 Feb 2022 01:32:30 +0100 Subject: [PATCH 29/73] debug config --- massa-node/base_config/config.toml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/massa-node/base_config/config.toml b/massa-node/base_config/config.toml index 633f93586a0..755a5a5345b 100644 --- a/massa-node/base_config/config.toml +++ b/massa-node/base_config/config.toml @@ -143,10 +143,6 @@ max_send_wait = 500 # we forget we banned a node after ban_timeout milliseconds ban_timeout = 3600000 - # Timeout duration when in handshake we respond with a PeerList - # (on max in connection reached we send a list of peers) - peer_list_send_timeout = 100 - # Timeout duration when in handshake we respond with a PeerList # (on max in connection reached we send a list of peers) peer_list_send_timeout = 100 From 619fc3a02e120fa8d139c915aba6c526bc74d9d6 Mon Sep 17 00:00:00 2001 From: damip Date: Mon, 21 Feb 2022 14:17:33 +0100 Subject: [PATCH 30/73] repair readonly API and client --- massa-api/src/lib.rs | 4 +- massa-api/src/private.rs | 6 +-- massa-api/src/public.rs | 88 +++++++++++++++++++++------------------- massa-client/src/cmds.rs | 4 +- massa-client/src/rpc.rs | 6 +-- 5 files changed, 57 insertions(+), 51 deletions(-) diff --git a/massa-api/src/lib.rs b/massa-api/src/lib.rs index ae0ae0cf59a..fef2d2a87bf 100644 --- a/massa-api/src/lib.rs +++ b/massa-api/src/lib.rs @@ -116,8 +116,8 @@ pub trait Endpoints { #[rpc(name = "execute_read_only_request")] fn execute_read_only_request( &self, - _: ReadOnlyExecution, - ) -> BoxFuture>; + _: Vec, + ) -> BoxFuture, ApiError>>; /// Remove a vec of addresses used to stake. /// No confirmation to expect. diff --git a/massa-api/src/private.rs b/massa-api/src/private.rs index 6f34ccf3ad8..0fd3945da07 100644 --- a/massa-api/src/private.rs +++ b/massa-api/src/private.rs @@ -76,9 +76,9 @@ impl Endpoints for API { fn execute_read_only_request( &self, - _: ReadOnlyExecution, - ) -> BoxFuture> { - crate::wrong_api::() + _: Vec, + ) -> BoxFuture, ApiError>> { + crate::wrong_api::>() } fn remove_staking_addresses(&self, keys: Vec
) -> BoxFuture> { diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index 6dbac451e27..cde8ab6d9db 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -81,50 +81,56 @@ impl Endpoints for API { fn execute_read_only_request( &self, - ReadOnlyExecution { - max_gas, - simulated_gas_price, - bytecode, - address, - }: ReadOnlyExecution, - ) -> BoxFuture> { - let address = address.unwrap_or_else(|| { - // if no addr provided, use a random one - Address::from_public_key(&derive_public_key(&generate_random_private_key())) - }); - - // TODO: - // * set a maximum gas value for read-only executions to prevent attacks - // * stop mapping request and result, reuse execution's structures - // * remove async stuff - - // translate request - let req = ReadOnlyExecutionRequest { - max_gas, - simulated_gas_price, - bytecode, - call_stack: vec![ExecutionStackElement { - address, - coins: Default::default(), - owned_addresses: vec![address], - }], - }; + reqs: Vec + ) -> BoxFuture, ApiError>> { + if reqs.len() > self.0.api_settings.max_arguments as usize { + let closure = async move || Err(ApiError::TooManyArguments("too many arguments".into())); + return Box::pin(closure()); + } + + let mut res: Vec = Vec::with_capacity(reqs.len()); + for ReadOnlyExecution{max_gas, address, simulated_gas_price, bytecode} in reqs { + let address = address.unwrap_or_else(|| { + // if no addr provided, use a random one + Address::from_public_key(&derive_public_key(&generate_random_private_key())) + }); + + // TODO: + // * set a maximum gas value for read-only executions to prevent attacks + // * stop mapping request and result, reuse execution's structures + // * remove async stuff + + // translate request + let req = ReadOnlyExecutionRequest { + max_gas, + simulated_gas_price, + bytecode, + call_stack: vec![ExecutionStackElement { + address, + coins: Default::default(), + owned_addresses: vec![address], + }], + }; + + // run + let result = self.0.execution_controller.execute_readonly_request(req); + + // map result + let result = ExecuteReadOnlyResponse { + executed_at: result.as_ref().map_or_else(|_| Slot::new(0, 0), |v| v.slot), + result: result.as_ref().map_or_else( + |err| ReadOnlyResult::Error(format!("readonly call failed: {}", err)), + |_| ReadOnlyResult::Ok, + ), + output_events: result.map_or_else(|_| Default::default(), |v| v.events.export()), + }; + + res.push(result); + } - // run - let result = self.0.execution_controller.execute_readonly_request(req); - - // map result - let result = ExecuteReadOnlyResponse { - executed_at: result.as_ref().map_or_else(|_| Slot::new(0, 0), |v| v.slot), - result: result.as_ref().map_or_else( - |err| ReadOnlyResult::Error(format!("readonly call failed: {}", err)), - |_| ReadOnlyResult::Ok, - ), - output_events: result.map_or_else(|_| Default::default(), |v| v.events.export()), - }; // return result - let closure = async move || Ok(result); + let closure = async move || Ok(res); Box::pin(closure()) } diff --git a/massa-client/src/cmds.rs b/massa-client/src/cmds.rs index f09bb7757f7..2482d358e6e 100644 --- a/massa-client/src/cmds.rs +++ b/massa-client/src/cmds.rs @@ -174,7 +174,7 @@ pub enum Command { #[strum( ascii_case_insensitive, props(args = "PathToBytecode MaxGas GasPrice Address", todo = "[unstable] "), - message = "execute byte code, address is optionnal. Nothing is really executed on chain" + message = "execute bytecode without changing anything to the blockchain, address is optionnal." )] read_only_smart_contract, @@ -761,7 +761,7 @@ impl Command { }; let bytecode = get_file_as_byte_vec(&path).await?; match client - .private + .public .execute_read_only_request(ReadOnlyExecution { max_gas, simulated_gas_price, diff --git a/massa-client/src/rpc.rs b/massa-client/src/rpc.rs index 7aab25530b6..bfccf571f95 100644 --- a/massa-client/src/rpc.rs +++ b/massa-client/src/rpc.rs @@ -117,12 +117,12 @@ impl RpcClient { &self, read_only_execution: ReadOnlyExecution, ) -> RpcResult { - self.call_method( + self.call_method::, Vec>( "execute_read_only_request", "ExecuteReadOnlyResponse", - read_only_execution, + vec![read_only_execution], ) - .await + .await?.pop().ok_or(RpcError::Client("missing return value on execute_read_only_request".into())) } //////////////// From 57706ae1de80b0b89e6f1aad6490429d8538a0d3 Mon Sep 17 00:00:00 2001 From: damip Date: Mon, 21 Feb 2022 15:04:35 +0100 Subject: [PATCH 31/73] make the client readonly call work --- massa-client/src/rpc.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/massa-client/src/rpc.rs b/massa-client/src/rpc.rs index bfccf571f95..1292bfe5550 100644 --- a/massa-client/src/rpc.rs +++ b/massa-client/src/rpc.rs @@ -117,10 +117,10 @@ impl RpcClient { &self, read_only_execution: ReadOnlyExecution, ) -> RpcResult { - self.call_method::, Vec>( + self.call_method::>, Vec>( "execute_read_only_request", - "ExecuteReadOnlyResponse", - vec![read_only_execution], + "Vec", + vec![vec![read_only_execution]], ) .await?.pop().ok_or(RpcError::Client("missing return value on execute_read_only_request".into())) } From 52cd0caf3ffb8214c33c5e57f414b9cc9dc50250 Mon Sep 17 00:00:00 2001 From: damip Date: Mon, 21 Feb 2022 17:15:03 +0100 Subject: [PATCH 32/73] fmt and resolver --- Cargo.toml | 1 + massa-api/src/public.rs | 22 ++++++++++++++-------- massa-client/src/rpc.rs | 6 +++++- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 820f6b19618..ef50baf9c36 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,5 @@ [workspace] +resolver = "2" members = [ "massa-api", "massa-bootstrap", diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index cde8ab6d9db..0314bb2dc3c 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -81,25 +81,32 @@ impl Endpoints for API { fn execute_read_only_request( &self, - reqs: Vec + reqs: Vec, ) -> BoxFuture, ApiError>> { if reqs.len() > self.0.api_settings.max_arguments as usize { - let closure = async move || Err(ApiError::TooManyArguments("too many arguments".into())); + let closure = + async move || Err(ApiError::TooManyArguments("too many arguments".into())); return Box::pin(closure()); } let mut res: Vec = Vec::with_capacity(reqs.len()); - for ReadOnlyExecution{max_gas, address, simulated_gas_price, bytecode} in reqs { + for ReadOnlyExecution { + max_gas, + address, + simulated_gas_price, + bytecode, + } in reqs + { let address = address.unwrap_or_else(|| { // if no addr provided, use a random one Address::from_public_key(&derive_public_key(&generate_random_private_key())) }); - + // TODO: // * set a maximum gas value for read-only executions to prevent attacks // * stop mapping request and result, reuse execution's structures // * remove async stuff - + // translate request let req = ReadOnlyExecutionRequest { max_gas, @@ -111,10 +118,10 @@ impl Endpoints for API { owned_addresses: vec![address], }], }; - + // run let result = self.0.execution_controller.execute_readonly_request(req); - + // map result let result = ExecuteReadOnlyResponse { executed_at: result.as_ref().map_or_else(|_| Slot::new(0, 0), |v| v.slot), @@ -128,7 +135,6 @@ impl Endpoints for API { res.push(result); } - // return result let closure = async move || Ok(res); Box::pin(closure()) diff --git a/massa-client/src/rpc.rs b/massa-client/src/rpc.rs index 1292bfe5550..1fe2d0ca247 100644 --- a/massa-client/src/rpc.rs +++ b/massa-client/src/rpc.rs @@ -122,7 +122,11 @@ impl RpcClient { "Vec", vec![vec![read_only_execution]], ) - .await?.pop().ok_or(RpcError::Client("missing return value on execute_read_only_request".into())) + .await? + .pop() + .ok_or(RpcError::Client( + "missing return value on execute_read_only_request".into(), + )) } //////////////// From 858caac49fa8ecc86e71f8cce028c92190d79354 Mon Sep 17 00:00:00 2001 From: damip Date: Mon, 21 Feb 2022 18:54:34 +0100 Subject: [PATCH 33/73] improve documentation --- Cargo.toml | 2 +- massa-execution-exports/src/config.rs | 4 +- .../src/controller_traits.rs | 26 +++- massa-execution-exports/src/error.rs | 2 + massa-execution-exports/src/event_store.rs | 3 + massa-execution-exports/src/lib.rs | 20 +++ .../src/test_exports/config.rs | 3 +- .../src/test_exports/mock.rs | 35 ++--- .../src/test_exports/mod.rs | 5 +- massa-execution-exports/src/types.rs | 23 ++- massa-execution-worker/src/context.rs | 141 ++++++++++++++---- massa-execution-worker/src/tests/mod.rs | 2 +- 12 files changed, 202 insertions(+), 64 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ef50baf9c36..ffb6a3b9cf4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,4 @@ [workspace] -resolver = "2" members = [ "massa-api", "massa-bootstrap", @@ -23,6 +22,7 @@ members = [ "massa-wallet", "massa-ledger" ] +resolver = "2" # From https://doc.rust-lang.org/cargo/reference/profiles.html#overrides [profile.dev.package."*"] diff --git a/massa-execution-exports/src/config.rs b/massa-execution-exports/src/config.rs index 95443434148..964243f4a0a 100644 --- a/massa-execution-exports/src/config.rs +++ b/massa-execution-exports/src/config.rs @@ -1,8 +1,10 @@ // Copyright (c) 2022 MASSA LABS +//! This module provides the structures used to provide configuration parameters to the Execution system + use massa_time::MassaTime; -/// VM module configuration +/// Executio module configuration #[derive(Debug, Clone)] pub struct ExecutionConfig { /// read-only execution request queue length diff --git a/massa-execution-exports/src/controller_traits.rs b/massa-execution-exports/src/controller_traits.rs index c04469aa327..a18b3c71c7b 100644 --- a/massa-execution-exports/src/controller_traits.rs +++ b/massa-execution-exports/src/controller_traits.rs @@ -1,5 +1,7 @@ // Copyright (c) 2022 MASSA LABS +//! This module exports generic traits representing interfaces for interacting with the Execution worker + use crate::types::ExecutionOutput; use crate::types::ReadOnlyExecutionRequest; use crate::ExecutionError; @@ -12,8 +14,9 @@ use massa_models::BlockId; use massa_models::OperationId; use massa_models::Slot; +/// interface that communicates with the execution worker thread pub trait ExecutionController: Send + Sync { - /// Update blockclique status + /// Updates blockclique status by signalling newly finalized blocks and the latest blockclique. /// /// # arguments /// * finalized_blocks: newly finalized blocks @@ -24,7 +27,7 @@ pub trait ExecutionController: Send + Sync { blockclique: Map, ); - /// Get events optionnally filtered by: + /// Get execution events optionnally filtered by: /// * start slot /// * end slot /// * emitter address @@ -39,24 +42,33 @@ pub trait ExecutionController: Send + Sync { original_operation_id: Option, ) -> Vec; - /// gets a copy of a full ledger entry + /// Get a copy of a full ledger entry /// /// # return value /// * (final_entry, active_entry) fn get_full_ledger_entry(&self, addr: &Address) -> (Option, Option); - /// Executes a readonly request + /// Execute read-only bytecode without causing modifications to the consensus state + /// + /// # arguments + /// * req: an instance of ReadOnlyExecutionRequest describing the parameters of the execution + /// + /// # returns + /// An instance of ExecutionOutput containing a summary of the effects of the execution, + /// or an error if the execution failed. fn execute_readonly_request( &self, req: ReadOnlyExecutionRequest, ) -> Result; } -/// execution manager +/// Execution manager used to generate controllers and to stop the execution thread pub trait ExecutionManager { - /// stops the VM + /// Stop the execution thread + /// Note that we do not take self by value to consume it + /// because it is not allowed to move out of Box fn stop(&mut self); - /// get a shared reference to the VM controller + /// Get a new execution controller fn get_controller(&self) -> Box; } diff --git a/massa-execution-exports/src/error.rs b/massa-execution-exports/src/error.rs index ca9b82d9f83..ac667b28b33 100644 --- a/massa-execution-exports/src/error.rs +++ b/massa-execution-exports/src/error.rs @@ -1,5 +1,7 @@ // Copyright (c) 2022 MASSA LABS +//! this file defines all possible execution error categories + use displaydoc::Display; use thiserror::Error; diff --git a/massa-execution-exports/src/event_store.rs b/massa-execution-exports/src/event_store.rs index 48c8f9eadff..bd48ecafa83 100644 --- a/massa-execution-exports/src/event_store.rs +++ b/massa-execution-exports/src/event_store.rs @@ -1,5 +1,8 @@ // Copyright (c) 2022 MASSA LABS +//! This module represents an event store allowing to store, search and retrieve +//! a config-limited number of execution-generated events + use massa_models::output_event::{SCOutputEvent, SCOutputEventId}; use massa_models::prehash::{Map, PreHashed, Set}; /// Define types used while executing block bytecodes diff --git a/massa-execution-exports/src/lib.rs b/massa-execution-exports/src/lib.rs index 22951ed78e7..f4d29de4f0c 100644 --- a/massa-execution-exports/src/lib.rs +++ b/massa-execution-exports/src/lib.rs @@ -1,5 +1,25 @@ // Copyright (c) 2022 MASSA LABS +//! # Overview +//! +//! This crate provides all the facilities to interact with a running execution worker (massa-execution-worker crate) +//! that is in charge of executing operations containing bytecode in a virtual machine, +//! and applying the effects of the execution to a ledger. +//! +//! # Usage +//! +//! When an execution worker is launched to run in a separate worker thread for the whole duration of the process and +//! apply incoming requests, an instance of ExecutionManager is returned (see the documentation of massa-execution-worker). +//! +//! ExecutionManager allows stopping the execution worker thread, +//! but it also allows generating as many instances of ExecutionController as necessary. +//! +//! Each ExecutionController allows sending updates on the latest blockclique changes to the execution worker +//! for it to keep track of them and execute the bytecode present in blocks. +//! It also allows various read-only queries such as executing bytecode +//! while ignoring all the changes it would cause to the consensus state, +//! or reading the state at the output of the executed blockclique blocks. + mod config; mod controller_traits; mod error; diff --git a/massa-execution-exports/src/test_exports/config.rs b/massa-execution-exports/src/test_exports/config.rs index d17011c7027..589ad497aee 100644 --- a/massa-execution-exports/src/test_exports/config.rs +++ b/massa-execution-exports/src/test_exports/config.rs @@ -1,6 +1,7 @@ // Copyright (c) 2022 MASSA LABS -/// This file defines testing tools related to the config +//! This file defines testing tools related to the config + use massa_time::MassaTime; use crate::ExecutionConfig; diff --git a/massa-execution-exports/src/test_exports/mock.rs b/massa-execution-exports/src/test_exports/mock.rs index 586ad7b4f4b..e8372fb6a6c 100644 --- a/massa-execution-exports/src/test_exports/mock.rs +++ b/massa-execution-exports/src/test_exports/mock.rs @@ -1,5 +1,6 @@ // Copyright (c) 2022 MASSA LABS -// This file defines utilities to mock the crate for testing purposes + +//! This file defines utilities to mock the crate for testing purposes use crate::{ExecutionController, ExecutionError, ExecutionOutput, ReadOnlyExecutionRequest}; use massa_ledger::LedgerEntry; @@ -11,6 +12,11 @@ use std::sync::{ Arc, Mutex, }; +/// List of possible messages coming from the mock. +/// Each variant corresponds to a unique method in ExecutionController, +/// and is emitted in a thread-safe way by the mock whenever that method is called. +/// Some variants wait for a response on their response_tx field, if present. +/// See the documentation of ExecutionController for details on parameters and return values. #[derive(Clone)] pub enum MockExecutionControllerMessage { UpdateBlockcliqueStatus { @@ -35,10 +41,16 @@ pub enum MockExecutionControllerMessage { }, } +/// A mocked execution controller that will intercept calls on its methods +/// and emit corresponding MockExecutionControllerMessage messages through a MPSC in a thread-safe way. +/// For messages with a response_tx field, the mock will await a response through their response_tx channel +/// in order to simulate returning this value at the end of the call. #[derive(Clone)] pub struct MockExecutionController(Arc>>); impl MockExecutionController { + /// Create a new pair (mock execution controller, mpsc receiver for emitted messages) + /// Note that unbounded mpsc channels are used pub fn new() -> ( Box, Receiver, @@ -51,12 +63,12 @@ impl MockExecutionController { } } +/// Implements all the methods of the ExecutionController trait, +/// but simply make them emit a MockExecutionControllerMessage. +/// If the message contains a response_tx, +/// a response from that channel is read and returned as return value. +/// See the documentation of ExecutionController for details on each function. impl ExecutionController for MockExecutionController { - /// Update blockclique status - /// - /// # arguments - /// * finalized_blocks: newly finalized blocks - /// * blockclique: new blockclique fn update_blockclique_status( &self, finalized_blocks: Map, @@ -72,12 +84,6 @@ impl ExecutionController for MockExecutionController { .unwrap(); } - /// Get events optionnally filtered by: - /// * start slot - /// * end slot - /// * emitter address - /// * original caller address - /// * operation id fn get_filtered_sc_output_event( &self, start: Option, @@ -102,10 +108,6 @@ impl ExecutionController for MockExecutionController { response_rx.recv().unwrap() } - /// gets a copy of a full ledger entry - /// - /// # return value - /// * (final_entry, active_entry) fn get_full_ledger_entry(&self, addr: &Address) -> (Option, Option) { let (response_tx, response_rx) = mpsc::channel(); self.0 @@ -119,7 +121,6 @@ impl ExecutionController for MockExecutionController { response_rx.recv().unwrap() } - /// Executes a readonly request fn execute_readonly_request( &self, req: ReadOnlyExecutionRequest, diff --git a/massa-execution-exports/src/test_exports/mod.rs b/massa-execution-exports/src/test_exports/mod.rs index 561081b3048..d8b587eda2b 100644 --- a/massa-execution-exports/src/test_exports/mod.rs +++ b/massa-execution-exports/src/test_exports/mod.rs @@ -1,7 +1,8 @@ // Copyright (c) 2022 MASSA LABS -/// This module exposes useful tooling for testing. -/// It is only compiled and exported by the crate if the "testing" feature is enabled. +//! This module exposes useful tooling for testing. +//! It is only compiled and exported by the crate if the "testing" feature is enabled. + mod config; mod mock; diff --git a/massa-execution-exports/src/types.rs b/massa-execution-exports/src/types.rs index 28dd7a50616..9b9710290a8 100644 --- a/massa-execution-exports/src/types.rs +++ b/massa-execution-exports/src/types.rs @@ -1,10 +1,12 @@ // Copyright (c) 2022 MASSA LABS +//! This file exports useful types used to interact with the execution worker + use crate::event_store::EventStore; use massa_ledger::LedgerChanges; use massa_models::{Address, Amount, BlockId, Slot}; -/// structure describing the output of an execution +/// structure describing the output of a single execution #[derive(Debug, Clone)] pub struct ExecutionOutput { // slot @@ -30,12 +32,25 @@ pub struct ReadOnlyExecutionRequest { pub call_stack: Vec, } +/// Structure describing an element of the execution stack. +/// Every time a function is called from bytecode, +/// a new ExecutionStackElement is pushed at the top of the execution stack +/// to represent the local execution context of the called function, +/// instead of the caller's which should lie just below in the stack. #[derive(Debug, Clone)] pub struct ExecutionStackElement { - /// called address + /// Called address pub address: Address, - /// coins transferred to the target address during a call, + /// Coins transferred to the target address during the call pub coins: Amount, - /// list of addresses created so far during excution, + /// List of addresses owned by the current call, and on which the current call has write access. + /// This list should contain ExecutionStackElement::address in the sense that an address should have write access to itself. + /// This list should also contain all addresses created previously during the call + /// to allow write access on newly created addresses in order to set them up, + /// but only within the scope of the current stack element. + /// That way, only the current scope and neither its caller not the functions it calls gain this write access, + /// which is important for security. + /// Note that we use a Vec instead of a prehashed set to ensure order determinism, + /// the performance hit of linear search remains minimal because owned_addreses will always contain very few elements. pub owned_addresses: Vec
, } diff --git a/massa-execution-worker/src/context.rs b/massa-execution-worker/src/context.rs index 08bc7d42e36..b0fa5572ff4 100644 --- a/massa-execution-worker/src/context.rs +++ b/massa-execution-worker/src/context.rs @@ -1,5 +1,12 @@ // Copyright (c) 2022 MASSA LABS +//! This module represents the context in which the VM executes bytecode. +//! It provides information such as the current call stack. +//! It also maintians a "speculative" ledger state which is a virtual ledger +//! as seen after applying everything that happened so far in the context. +//! More generally, the context acts only on its own state +//! and does not write anything persistent to the conensus state. + use crate::speculative_ledger::SpeculativeLedger; use massa_execution_exports::{ EventStore, ExecutionError, ExecutionOutput, ExecutionStackElement, ReadOnlyExecutionRequest, @@ -11,11 +18,13 @@ use rand::SeedableRng; use rand_xoshiro::Xoshiro256PlusPlus; use std::sync::{Arc, RwLock}; +/// A snapshot taken from an ExecutionContext and that represents its current state. +/// The ExecutionContext state can then be restored later from this snapshot. pub(crate) struct ExecutionContextSnapshot { - // added speculative ledger changes + // speculative ledger changes caused so far in the context pub ledger_changes: LedgerChanges, - /// counter of newly created addresses so far during this execution + /// counter of newly created addresses so far at this slot during this execution pub created_addr_index: u64, /// counter of newly created events so far during this execution @@ -31,8 +40,12 @@ pub(crate) struct ExecutionContextSnapshot { pub unsafe_rng: Xoshiro256PlusPlus, } +/// An execution context that needs to be initialized before executing bytecode, +/// passed to the VM to interact with during bytecode execution (through ABIs), +/// and read after execution to gather results. pub(crate) struct ExecutionContext { - // speculative ledger + /// speculative ledger state, + /// as seen after everything that happened so far in the context speculative_ledger: SpeculativeLedger, /// max gas for this execution @@ -50,7 +63,7 @@ pub(crate) struct ExecutionContext { /// counter of newly created events so far during this execution pub created_event_index: u64, - /// block ID, if one is present at this slot + /// block ID, if one is present at the execution slot pub opt_block_id: Option, /// address call stack, most recent is at the back @@ -62,14 +75,25 @@ pub(crate) struct ExecutionContext { /// generated events during this execution, with multiple indexes pub events: EventStore, - /// Unsafe RNG state + /// Unsafe RNG state (can be predicted and manipulated) pub unsafe_rng: Xoshiro256PlusPlus, - /// origin operation id + /// operation id that originally caused this execution (if any) pub origin_operation_id: Option, } impl ExecutionContext { + /// Create a new empty ExecutionContext + /// This should only be used as a placeholder. + /// Further initialization is required before running bytecode + /// (see new_readonly and new_active_slot methods). + /// + /// # arguments + /// * final_ledger: thread-safe access to the final ledger. Note that this will be used only for reading, never for writing + /// * previous_changes: list of ledger changes that happened since the final ledger state and before the current execution + /// + /// # returns + /// A new (empty) ExecutionContext instance pub(crate) fn new( final_ledger: Arc>, previous_changes: LedgerChanges, @@ -90,7 +114,8 @@ impl ExecutionContext { } } - /// returns an copied execution state snapshot + /// Returns a snapshot containing the clone of the current execution state. + /// Note that the snapshot does not include slot-level information such as the slot number or block ID. pub(crate) fn get_snapshot(&self) -> ExecutionContextSnapshot { ExecutionContextSnapshot { ledger_changes: self.speculative_ledger.get_snapshot(), @@ -102,7 +127,8 @@ impl ExecutionContext { } } - /// resets context to a snapshot + /// Resets context to an existing snapshot + /// Note that the snapshot does not include slot-level information such as the slot number or block ID. pub fn reset_to_snapshot(&mut self, snapshot: ExecutionContextSnapshot) { self.speculative_ledger .reset_to_snapshot(snapshot.ledger_changes); @@ -113,17 +139,36 @@ impl ExecutionContext { self.unsafe_rng = snapshot.unsafe_rng; } - /// create the execution context at the beginning of a readonly execution + /// Create a new ExecutionContext for readonly execution + /// This should be used before performing a readonly execution. + /// + /// # arguments + /// * slot: slot at which the execution will happen + /// * req: parameters of the read only execution + /// * previous_changes: list of ledger changes that happened since the final ledger state and before this execution + /// * final_ledger: thread-safe access to the final ledger. Note that this will be used only for reading, never for writing + /// + /// # returns + /// A ExecutionContext instance ready for a read-only execution pub(crate) fn new_readonly( slot: Slot, req: ReadOnlyExecutionRequest, previous_changes: LedgerChanges, final_ledger: Arc>, ) -> Self { - // Seed the RNG + // Deterministically seed the unsafe RNG to allow the bytecode to use it. + // Note that consecutive read-only calls for the same slot will get the same random seed. + + // Add the current slot to the seed to ensure different draws at every slot let mut seed: Vec = slot.to_bytes_key().to_vec(); - seed.push(0u8); // read-only + // Add a marker to the seed indicating that we are in read-only mode + // to prevent random draw collisions with active executions + seed.push(0u8); // 0u8 = read-only let seed = massa_hash::hash::Hash::compute_from(&seed).to_bytes(); + // We use Xoshiro256PlusPlus because it is very fast, + // has a period long enough to ensure no repetitions will ever happen, + // of decent quality (given the unsafe constraints) + // but not cryptographically secure (and that's ok because the internal state is exposed anyways) let unsafe_rng = Xoshiro256PlusPlus::from_seed(seed); // return readonly context @@ -138,16 +183,31 @@ impl ExecutionContext { } } - /// create the execution context at the beginning of an active execution slot + /// Create a new ExecutionContext for executing an active slot. + /// This should be used before performing any executions at that slot. + /// + /// # arguments + /// * slot: slot at which the execution will happen + /// * opt_block_id: optional ID of the block at that slot + /// * previous_changes: list of ledger changes that happened since the final ledger state and before this execution + /// * final_ledger: thread-safe access to the final ledger. Note that this will be used only for reading, never for writing + /// + /// # returns + /// A ExecutionContext instance ready for a read-only execution pub(crate) fn new_active_slot( slot: Slot, opt_block_id: Option, previous_changes: LedgerChanges, final_ledger: Arc>, ) -> Self { - // seed the RNG + // Deterministically seed the unsafe RNG to allow the bytecode to use it. + + // Add the current slot to the seed to ensure different draws at every slot let mut seed: Vec = slot.to_bytes_key().to_vec(); - seed.push(1u8); // not read-only + // Add a marker to the seed indicating that we are in active mode + // to prevent random draw collisions with read-only executions + seed.push(1u8); // 1u8 = active + // For more deterministic entropy, seed with the block ID if any if let Some(block_id) = &opt_block_id { seed.extend(block_id.to_bytes()); // append block ID } @@ -163,7 +223,11 @@ impl ExecutionContext { } } - /// moves out the output of the execution, resetting some fields + /// Moves the output of the execution out of the context, + /// resetting some context fields in the process. + /// + /// This is used to get the output of an execution before discarding the context. + /// Note that we are not taking self by value to consume it because the context is shared. pub fn take_execution_output(&mut self) -> ExecutionOutput { ExecutionOutput { slot: self.slot, @@ -173,7 +237,7 @@ impl ExecutionContext { } } - /// gets the address at the top of the stack + /// Gets the address at the top of the call stack, if any pub fn get_current_address(&self) -> Result { match self.stack.last() { Some(addr) => Ok(addr.address), @@ -185,8 +249,8 @@ impl ExecutionContext { } } - /// gets the current list of owned addresses (top of the stack) - /// ordering is conserved for determinism + /// Gets the current list of owned addresses (top of the stack) + /// Ordering is conserved for determinism pub fn get_current_owned_addresses(&self) -> Result, ExecutionError> { match self.stack.last() { Some(v) => Ok(v.owned_addresses.clone()), @@ -198,7 +262,7 @@ impl ExecutionContext { } } - /// gets the current call coins + /// Gets the current call coins pub fn get_current_call_coins(&self) -> Result { match self.stack.last() { Some(v) => Ok(v.coins), @@ -210,39 +274,48 @@ impl ExecutionContext { } } - /// gets the call stack (addresses) + /// Gets the addresses from the call stack (last = top of the stack) pub fn get_call_stack(&self) -> Vec
{ self.stack.iter().map(|v| v.address).collect() } - /// check whether the context grants write access on a given address + /// Checks whether the context currently grants write access to a given address pub fn has_write_rights_on(&self, addr: &Address) -> bool { self.stack .last() .map_or(false, |v| v.owned_addresses.contains(&addr)) } - /// creates a new smart contract address with initial bytecode, within the current execution context + /// Creates a new smart contract address with initial bytecode, and returns this address pub fn create_new_sc_address(&mut self, bytecode: Vec) -> Result { // TODO: security problem: // prefix addresses to know if they are SCs or normal, otherwise people can already create new accounts by sending coins to the right hash // they won't have ownership over it but this can still be a pain - // generate address + // deterministically generate a new unique smart contract address + + // create a seed from the current slot let mut data: Vec = self.slot.to_bytes_key().to_vec(); + // add the index of the created address within this context to the seed data.append(&mut self.created_addr_index.to_be_bytes().to_vec()); + // add a flag on whether we are in read-only mode or not to the seed + // this prevents read-only contexts from shadowing existing addresses if self.read_only { data.push(0u8); } else { data.push(1u8); } + // hash the seed to get a unique address let address = Address(massa_hash::hash::Hash::compute_from(&data)); - // create address in the speculative ledger + // add this address with its bytecode to the speculative ledger self.speculative_ledger .create_new_sc_address(address, bytecode)?; - // add to owned addresses + // add the address to owned addresses + // so that the current call has write access to it + // from now and for its whole duration, + // in order to allow initializing newly created ledger entries. match self.stack.last_mut() { Some(v) => { v.owned_addresses.push(address); @@ -260,27 +333,35 @@ impl ExecutionContext { Ok(address) } - /// gets the bytecode of an address if it exists + /// gets the bytecode of an address if it exists in the speculative ledger, or returns None pub fn get_bytecode(&self, address: &Address) -> Option> { self.speculative_ledger.get_bytecode(address) } - /// gets the data from a datastore entry of an address if it exists + /// gets the data from a datastore entry of an address if it exists in the speculative ledger, or returns None pub fn get_data_entry(&self, address: &Address, key: &Hash) -> Option> { self.speculative_ledger.get_data_entry(address, key) } - /// checks if a datastore entry exists + /// checks if a datastore entry exists in the speculative ledger pub fn has_data_entry(&self, address: &Address, key: &Hash) -> bool { self.speculative_ledger.has_data_entry(address, key) } - /// gets the bytecode of an address if it exists + /// gets the bytecode of an address if it exists in the speculative ledger, or returns None pub fn get_parallel_balance(&self, address: &Address) -> Option { self.speculative_ledger.get_parallel_balance(address) } - /// checks if a datastore entry exists + /// Sets a datastore entry for an address in the speculative ledger. + /// Fail if the address is absent from the ledger. + /// The datastore entry is created if it is absent for that address. + /// + /// # Arguments + /// * address: the address of the ledger entry + /// * key: the datastore key + /// * data: the data to insert + /// * check_rights: if true, the function quits with an error if the current context has no writing rights on the target address pub fn set_data_entry( &mut self, address: &Address, diff --git a/massa-execution-worker/src/tests/mod.rs b/massa-execution-worker/src/tests/mod.rs index b2f126fd7c8..909935ad7a0 100644 --- a/massa-execution-worker/src/tests/mod.rs +++ b/massa-execution-worker/src/tests/mod.rs @@ -1,3 +1,3 @@ // Copyright (c) 2022 MASSA LABS -//mod scenarios_mandatories; +//TODO mod scenarios_mandatories; From b8572af7e4190e8de47b63b5f867e35dcc3144f8 Mon Sep 17 00:00:00 2001 From: damip Date: Tue, 22 Feb 2022 00:18:10 +0100 Subject: [PATCH 34/73] progress on documentation --- massa-execution-worker/src/controller.rs | 61 +++--- massa-execution-worker/src/execution.rs | 190 +++++++++++++------ massa-execution-worker/src/interface_impl.rs | 10 +- 3 files changed, 181 insertions(+), 80 deletions(-) diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index 25fddf15c65..1de1866a398 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -1,5 +1,8 @@ // Copyright (c) 2022 MASSA LABS +//! This module implements an execution controller. +//! See massa-execution-exports/controller_traits.rs for functional details. + use crate::execution::ExecutionState; use massa_execution_exports::{ ExecutionConfig, ExecutionController, ExecutionError, ExecutionManager, ExecutionOutput, @@ -22,11 +25,11 @@ pub(crate) struct VMInputData { pub stop: bool, /// signal whether the blockclique changed pub blockclique_changed: bool, - /// list of newly finalized blocks + /// list of newly finalized blocks, index by slot pub finalized_blocks: HashMap, - /// blockclique + /// blockclique, blocks indexed by slot pub blockclique: HashMap, - /// readonly execution requests and response mpscs + /// queue for readonly execution requests and response mpscs to send back their outputs pub readonly_requests: VecDeque<( ReadOnlyExecutionRequest, mpsc::Sender>, @@ -34,51 +37,58 @@ pub(crate) struct VMInputData { } #[derive(Clone)] -/// VM controller +/// implementation of the execution controller pub struct ExecutionControllerImpl { - /// VM config + /// execution config pub(crate) config: ExecutionConfig, - /// input data to process in the VM loop with a wakeup condition variable + /// input data to process in the VM loop + /// with a wakeup condition variable that needs to be triggered when the data changes pub(crate) input_data: Arc<(Condvar, Mutex)>, - /// execution state + /// current execution state (see execution.rs for details) pub(crate) execution_state: Arc>, } impl ExecutionControllerImpl { - /// reads the list of newly finalized blocks and the new blockclique, if there was a change - /// if found, remove from input queue + /// consumes and returns the input fed to the controller pub(crate) fn consume_input(&mut self) -> VMInputData { std::mem::take(&mut self.input_data.1.lock().expect("VM input data lock failed")) } } impl ExecutionController for ExecutionControllerImpl { - /// Updates blockclique status + /// called to signal changes on the current blockclique, also listing newly finalized blocks + /// + /// # arguments + /// * finalized_blocks: list of newly finalized blocks to be appended to the input finalized blocks + /// * blockclique: new blockclique, replaces the curren one in the input fn update_blockclique_status( &self, finalized_blocks: Map, blockclique: Map, ) { + // index newly finalized blocks by slot let mapped_finalized_blocks: HashMap<_, _> = finalized_blocks .into_iter() .map(|(b_id, b)| (b.header.content.slot, (b_id, b))) .collect(); + // index blockclique by slot let mapped_blockclique = blockclique .into_iter() .map(|(b_id, b)| (b.header.content.slot, (b_id, b))) .collect(); + //update input data let mut input_data = self .input_data .1 .lock() .expect("could not lock VM input data"); - input_data.blockclique = mapped_blockclique; - input_data.finalized_blocks.extend(mapped_finalized_blocks); - input_data.blockclique_changed = true; - self.input_data.0.notify_one(); + input_data.blockclique = mapped_blockclique; // replace blockclique + input_data.finalized_blocks.extend(mapped_finalized_blocks); // append finalized blocks + input_data.blockclique_changed = true; // signal a blockclique change + self.input_data.0.notify_one(); // wake up VM loop } - /// Get events optionnally filtered by: + /// Get the generated execution events, optionnally filtered by: /// * start slot /// * end slot /// * emitter address @@ -116,30 +126,35 @@ impl ExecutionController for ExecutionControllerImpl { } /// Executes a readonly request + /// Read-only requests do not modify consesnsus state fn execute_readonly_request( &self, req: ReadOnlyExecutionRequest, ) -> Result { - // queue request + // queue request into input, get response mpsc receiver let resp_rx = { let mut input_data = self .input_data .1 .lock() .expect("could not lock VM input data"); + // limit the read-only queue length if input_data.readonly_requests.len() >= self.config.readonly_queue_length { return Err(ExecutionError::RuntimeError( "too many queued readonly requests".into(), )); } + // prepare the channel to send back the result of the read-only execution let (resp_tx, resp_rx) = std::sync::mpsc::channel::>(); + // append to the queue of input read-only requests input_data.readonly_requests.push_back((req, resp_tx)); + // wake up VM loop self.input_data.0.notify_one(); resp_rx }; - // wait for response + // wait for the result of the execution match resp_rx.recv() { Ok(result) => return result, Err(err) => { @@ -153,18 +168,19 @@ impl ExecutionController for ExecutionControllerImpl { } /// Execution manager +/// Allows creating execution controllers, and stopping the execution worker pub struct ExecutionManagerImpl { - /// shared reference to the VM controller + /// shared reference to the execution controller pub(crate) controller: ExecutionControllerImpl, - /// handle used to join the VM thread + /// handle used to join the worker thread pub(crate) thread_handle: Option>, } impl ExecutionManager for ExecutionManagerImpl { - /// stops the VM + /// stops the worker fn stop(&mut self) { info!("stopping VM controller..."); - // notify the VM thread to stop + // notify the worker thread to stop { let mut input_wlock = self .controller @@ -179,11 +195,10 @@ impl ExecutionManager for ExecutionManagerImpl { if let Some(join_handle) = self.thread_handle.take() { join_handle.join().expect("VM controller thread panicked"); } - info!("VM controller stopped"); } - /// get a shared reference to the VM controller + /// return a new execution controller fn get_controller(&self) -> Box { Box::new(self.controller.clone()) } diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index be88bda12ef..797d4d5fa27 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -1,5 +1,8 @@ // Copyright (c) 2022 MASSA LABS +//! This module deals with executing final and active slots, as well as read-only requests. +//! It also keeps a history of executed slots, thus holding the speculative state of the ledger. + use crate::context::ExecutionContext; use crate::interface_impl::InterfaceImpl; use massa_execution_exports::{ @@ -17,6 +20,7 @@ use std::{ }; use tracing::debug; +/// Used to lock the execution context for exclusive access macro_rules! context_guard { ($self:ident) => { $self @@ -26,115 +30,155 @@ macro_rules! context_guard { }; } -/// structure holding consistent speculative and final execution states +/// Structure holding consistent speculative and final execution states, +/// and allowing access to them. pub(crate) struct ExecutionState { - // VM config + // execution config pub config: ExecutionConfig, - // active execution output history + // History of the outputs of recently executed slots. Slots should be consecutive, newest at the back. + // Whenever an active slot is executed, it is appended at the back of active_history. + // Whenever an executed active slot becomes final, + // its output is popped from the front of active_history and applied to the final state. pub active_history: VecDeque, - // active execution cursor + // a cursor pointing to the highest executed slot pub active_cursor: Slot, - // final execution cursor + // a cursor pointing to the highest executed final slot pub final_cursor: Slot, - // final events + // store containing execution events that became final pub final_events: EventStore, - // final ledger + // final ledger with atomic R/W access pub final_ledger: Arc>, - // execution context + // execution context (see documentation in context.rs) pub execution_context: Arc>, - // execution interface + // execution interface exposing ABI functions to the VM executing bytecode pub execution_interface: Box, } impl ExecutionState { - /// create a new execution state + /// Create a new execution state. This should be called only once at the start of the executon worker. + /// + /// # arguments + /// * config: execution config + /// * final_lefger: atomic access to the final ledger + /// + /// # returns + /// A new ExecutionState pub fn new(config: ExecutionConfig, final_ledger: Arc>) -> ExecutionState { - // get last final slot from final ledger + // Get the slot at the output of which the final ledger is attached. + // This should be among the latest final slots. let last_final_slot = final_ledger .read() .expect("could not r-lock final ledger") .slot; - // init execution context + // Create an empty placeholder execution context, with shared atomic access let execution_context = Arc::new(Mutex::new(ExecutionContext::new( final_ledger.clone(), Default::default(), ))); - // Instantiate the interface used by the assembly simulator. + // Instantiate the interface providing ABI access to the VM, share the execution contex with it let execution_interface = Box::new(InterfaceImpl::new( config.clone(), execution_context.clone(), )); - // build execution state + // build the execution state ExecutionState { config, final_ledger, execution_context, execution_interface, + // empty execution output history: it is not recovered through bootstrap active_history: Default::default(), + // empty final event store: it is not recovered through bootstrap final_events: Default::default(), + // no active slots executed yet: set active_cursor to the last final block active_cursor: last_final_slot, final_cursor: last_final_slot, } } - /// applies an execution output to the final state + /// Applies the output of an execution to the final execution state. + /// The newly applied final output should be from the slot just after the last executed final slot + /// + /// # Arguments + /// * exec_out: execution output to apply pub fn apply_final_execution_output(&mut self, exec_out: ExecutionOutput) { - // apply final ledger changes + // apply ledger changes to the final ledger self.final_ledger .write() .expect("could not lock final ledger for writing") .settle_slot(exec_out.slot, exec_out.ledger_changes); + // update the final ledger's slot self.final_cursor = exec_out.slot; - // update active cursor + // update active cursor: + // if it was at the previous latest final block, set it to point to the new one if self.active_cursor < self.final_cursor { self.active_cursor = self.final_cursor; } - // save generated events to final store + // append generated events to the final event store self.final_events.extend(exec_out.events); } - /// applies an execution output to the active state + /// Applies an execution output to the active (non-final) state + /// The newly active final output should be from the slot just after the last executed active slot + /// + /// # Arguments + /// * exec_out: execution output to apply pub fn apply_active_execution_output(&mut self, exec_out: ExecutionOutput) { - // update active cursor + // update active cursor to reflect the new latest active slot self.active_cursor = exec_out.slot; - // add execution output to history + // add the execution output at the end of the output history self.active_history.push_back(exec_out); } - /// clear execution history + /// Clear the whole execution history, + /// deleting caches on executed non-final slots. pub fn clear_history(&mut self) { // clear history self.active_history.clear(); - // reset active cursor + // reset active cursor to point to the latest final slot self.active_cursor = self.final_cursor; } - /// truncates active slots at the first mismatch - /// between the active execution output history and the planned active_slots + /// This function receives a new sequence of blocks to execute as argument. + /// It then scans the output history to see until which slot this sequence was already executed (and is outputs cached). + /// If a mismatch is found, it means that the sequence of blocks to execute has changed + /// and the existing output cache is truncated to keep output history only until the mismatch slot (excluded). + /// Slots after that point will need to be (re-executed) to account for the new sequence. + /// + /// # Arguments + /// * active_slots: A HashMap mapping each slot to a block or None if the slot is a miss pub fn truncate_history(&mut self, active_slots: &HashMap>) { // find mismatch point (included) let mut truncate_at = None; + // iterate over the output history, in chronological order for (hist_index, exec_output) in self.active_history.iter().enumerate() { + // try to find the corresponding slot in active_slots let found_block_id = active_slots .get(&exec_output.slot) .map(|opt_b| opt_b.as_ref().map(|(b_id, _b)| *b_id)); if found_block_id == Some(exec_output.block_id) { + // the slot number and block ID still match. Continue scanning continue; } + // mismatch found: stop scannig and return the cutoff index truncate_at = Some(hist_index); break; } - // truncate speculative execution output history + // If a mismatch was found if let Some(truncate_at) = truncate_at { + // Truncate the execution output history at the cutoff index (excluded) self.active_history.truncate(truncate_at); + // Now that part of the speculative executions were cancelled, + // update the active cursor to match the latest executed slot. + // The cursor is set to the latest executed final slot if the history is empty. self.active_cursor = self .active_history .back() @@ -142,10 +186,11 @@ impl ExecutionState { } } - /// returns the speculative ledger at the entrance of a given history slot - /// warning: only use in the main loop because the lock on the final ledger - /// at the base of the returned SpeculativeLedger is not held - /// TODO: do not do this anymore but allow the speculative ledger to lazily query any subentry + /// Returns he ledger changes accumulated from the beginning of the output history, + /// up until a provided slot (excluded). + /// Only used in the VM main loop because the lock on the final ledger + /// carried by the returned SpeculativeLedger is not held. + /// TODO optimization: do not do this anymore but allow the speculative ledger to lazily query any subentry /// by scanning through history from end to beginning pub fn get_accumulated_active_changes_at_slot(&self, slot: Slot) -> LedgerChanges { // check that the slot is within the reach of history @@ -160,7 +205,7 @@ impl ExecutionState { panic!("cannot execute at a slot beyond active cursor + 1"); } - // gather the history of changes + // gather the history of changes in the relevant history range let mut accumulated_changes = LedgerChanges::default(); for previous_output in &self.active_history { if previous_output.slot >= slot { @@ -172,13 +217,18 @@ impl ExecutionState { accumulated_changes } - /// execute an operation in the context of a block + /// Execute an operation in the context of a block. + /// Assumes the execution context was initialized at the beginning of the slot. + /// + /// # arguments + /// * operation: operation to execute + /// * block_creator_addr: address of the block creator pub fn execute_operation( &self, operation: &Operation, block_creator_addr: Address, ) -> Result<(), ExecutionError> { - // process ExecuteSC operations only + // process ExecuteSC operations only, ignore other types of operations let (bytecode, max_gas, coins, gas_price) = match &operation.content.op { OperationType::ExecuteSC { data, @@ -189,7 +239,7 @@ impl ExecutionState { _ => return Ok(()), }; - // get sender address + // get the operation's sender address let sender_addr = Address::from_public_key(&operation.content.sender_public_key); // get operation ID @@ -198,13 +248,15 @@ impl ExecutionState { .get_operation_id() .expect("could not compute operation ID"); - // prepare the context + // prepare the current slot context for executing the operation let context_snapshot; { + // get exclusive write access to the contex let mut context = context_guard!(self); - // credit the producer of the block B with max_gas * gas_price parallel coins - // note that errors are deterministic and do not cancel op execution + // Use the context to credit the producer of the block with max_gas * gas_price parallel coins. + // Note that errors are deterministic and do not cancel the operation execution. + // That way, even if the sender sent an invalid operation, the block producer will still get credited. let gas_fees = gas_price.saturating_mul_u64(*max_gas); if let Err(err) = context.transfer_parallel_coins(None, Some(block_creator_addr), gas_fees, false) @@ -215,8 +267,8 @@ impl ExecutionState { ); } - // credit Op's sender with `coins` parallel coins - // note that errors are deterministic and do not cancel op execution + // Credit the operation sender with `coins` parallel coins. + // Note that errors are deterministic and do not cancel op execution. if let Err(err) = context.transfer_parallel_coins(None, Some(sender_addr), *coins, false) { @@ -226,24 +278,35 @@ impl ExecutionState { ); } - // save a snapshot of the context state to restore it if the op fails to execute + // save a snapshot of the context state to restore it if the op fails to execute, + // this reverting any changes except the coin transfers above context_snapshot = context.get_snapshot(); - // prepare context for op execution + // set the context gas price to match the one defined in the operation context.gas_price = *gas_price; + + // set the context max gas to match the one defined in the operation context.max_gas = *max_gas; + + // Set the call stack to a single element: + // * the execution will happen in the context of the address of the operation's sender + // * the context will signal that `coins` were creditedto the parallel balance of the sender during that call + // * the context will give the operation's sender write access to its own ledger entry context.stack = vec![ExecutionStackElement { address: sender_addr, coins: *coins, owned_addresses: vec![sender_addr], }]; + + // set the context origin operation ID context.origin_operation_id = Some(operation_id); }; - // run the intepreter + // run the VM on the bytecode contained in the operation let run_result = massa_sc_runtime::run(bytecode, *max_gas, &*self.execution_interface); if let Err(err) = run_result { - // there was an error during bytecode execution: cancel the effects of the execution + // there was an error during bytecode execution: + // cancel the effects of the execution by resetting the context to the previously saved snapshot let mut context = context_guard!(self); context.origin_operation_id = None; context.reset_to_snapshot(context_snapshot); @@ -256,8 +319,15 @@ impl ExecutionState { Ok(()) } - /// executes a full slot without causing any changes to the state, - /// and yields an execution output + /// Executes a full slot (with or without a block inside) without causing any changes to the state, + /// just yielding the execution output. + /// + /// # Arguments + /// * slot: slot to execute + /// * opt_block: block ID if there is a block a that slot, otherwise None + /// + /// # Returns + /// An `ExecutionOutput` structure summarizing the output of the executed slot pub fn execute_slot(&self, slot: Slot, opt_block: Option<(BlockId, Block)>) -> ExecutionOutput { // get optional block ID and creator address let (opt_block_id, opt_block_creator_addr) = opt_block @@ -265,10 +335,10 @@ impl ExecutionState { .map(|(b_id, b)| (*b_id, Address::from_public_key(&b.header.content.creator))) .unzip(); - // accumulate previous active changes from history + // accumulate previous active changes from output history let previous_ledger_changes = self.get_accumulated_active_changes_at_slot(slot); - // prepare execution context for the whole active slot + // create a new execution context for the whole active slot let execution_context = ExecutionContext::new_active_slot( slot, opt_block_id, @@ -278,7 +348,7 @@ impl ExecutionState { // note that here, some pre-operations (like crediting block producers) can be performed before the lock - // set the execution context for slot execution + // apply the created execution context for slot execution *context_guard!(self) = execution_context; // note that here, async operations should be executed @@ -287,7 +357,8 @@ impl ExecutionState { if let (Some((block_id, block)), Some(block_creator_addr)) = (opt_block, opt_block_creator_addr) { - // execute operations + // Try executing the operations of this block in the order in which they appear in the block. + // Errors are logged but do not interrupt the execution of the slot. for (op_idx, operation) in block.operations.iter().enumerate() { if let Err(err) = self.execute_operation(operation, block_creator_addr) { debug!( @@ -302,21 +373,30 @@ impl ExecutionState { context_guard!(self).take_execution_output() } - /// execute a readonly request + /// Execues a read-only execution request. + /// The executed bytecode appears to be able to read and write the consensus state, + /// but all accumulated changes are simply returned as an ExecutionOutput object, + /// and not actually applied to the consensus state. + /// + /// # Arguments + /// * req: a read-only execution request + /// + /// # Returns + /// ExecutionOutput describing the output of the execution, or an error pub(crate) fn execute_readonly_request( &self, req: ReadOnlyExecutionRequest, ) -> Result { - // set the exec slot just after the latest executed active slot + // set the execution slot to be the one after the latest executed active slot let slot = self .active_cursor .get_next_slot(self.config.thread_count) .expect("slot overflow in readonly execution"); - // get previous changes + // accumulate ledger changes that happened in the output history before this slot let previous_ledger_changes = self.get_accumulated_active_changes_at_slot(slot); - // create readonly execution context + // create a readonly execution context let max_gas = req.max_gas; let bytecode = req.bytecode.clone(); let execution_context = ExecutionContext::new_readonly( @@ -337,7 +417,7 @@ impl ExecutionState { Ok(context_guard!(self).take_execution_output()) } - /// gets a full ledger entry both at final and active states + /// Gets a full ledger entry both at the latest final and active executed slots /// TODO: this can be heavily optimized, see comments /// /// # returns @@ -379,7 +459,7 @@ impl ExecutionState { (final_entry, active_entry) } - /// Get events optionnally filtered by: + /// Gets execution events optionnally filtered by: /// * start slot /// * end slot /// * emitter address diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs index f58eea7b877..9b379dd712a 100644 --- a/massa-execution-worker/src/interface_impl.rs +++ b/massa-execution-worker/src/interface_impl.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022 MASSA LABS -/// Implementation of the interface used in the execution external library -/// +//! Implementation of ABI that the VM provides to the executed bytecode. + use crate::context::ExecutionContext; use anyhow::{bail, Result}; use massa_execution_exports::ExecutionConfig; @@ -18,6 +18,7 @@ use std::str::FromStr; use std::sync::{Arc, Mutex}; use tracing::debug; +/// helper for locking the context mutex macro_rules! context_guard { ($self:ident) => { $self @@ -27,9 +28,12 @@ macro_rules! context_guard { }; } +/// an implementation of the Interface trait (see masa-sc-runtime crate) #[derive(Clone)] pub(crate) struct InterfaceImpl { + /// execution config config: ExecutionConfig, + /// exclusive access to the execution context (see context.rs) context: Arc>, } @@ -45,6 +49,8 @@ impl InterfaceClone for InterfaceImpl { } } +/// Implementation of the Interface trait providing an ABI to VM-executed bytecode. +/// See the massa-sc-runtime crate for a functional description of the trait. impl Interface for InterfaceImpl { fn print(&self, message: &str) -> Result<()> { debug!("SC print: {}", message); From 11f496884ad53e73518d35fa317dfed68690b439 Mon Sep 17 00:00:00 2001 From: damip Date: Tue, 22 Feb 2022 10:30:33 +0100 Subject: [PATCH 35/73] improve documentation --- massa-execution-worker/read-only.md | 38 ---- massa-execution-worker/src/execution.rs | 2 +- massa-execution-worker/src/interface_impl.rs | 210 ++++++++++++++---- massa-execution-worker/src/lib.rs | 13 ++ massa-execution-worker/src/spec.md | 71 ------ .../src/speculative_ledger.rs | 111 ++++++--- 6 files changed, 272 insertions(+), 173 deletions(-) delete mode 100644 massa-execution-worker/read-only.md delete mode 100644 massa-execution-worker/src/spec.md diff --git a/massa-execution-worker/read-only.md b/massa-execution-worker/read-only.md deleted file mode 100644 index 8c17ccd6614..00000000000 --- a/massa-execution-worker/read-only.md +++ /dev/null @@ -1,38 +0,0 @@ -# Read-only execution - -# Rationale - -When using Massa in a Web3 context for example, one should be able to perform read-only Smart Contract calls. - -See: https://ethereum.stackexchange.com/questions/765/what-is-the-difference-between-a-transaction-and-a-call/770 - -# Massa implementation - -## API - -Add a "sc_readonly_call" API endpoint - -Parameters: -* max_gas: u64 // max gas allowed for the readonly run -* simulated_gas_price: Amount // simulated gas price to expose to the smart contract context -* simulated_caller: Option
// pretend this address is executing the SC, if none provided a random one will be used. -* bytecode: `Vec` // bytecode to execute - -Return value: -* executed_at: Slot // slot at which the execution occurred -* result: - * (optional) error: Error - * (optional) output_events: `Vec` // output events generated during execution - - ## Operation - -* when the sc_readonly_call is called, the bytecode's main() function will be called with the following execution context: - * the execution will be done from the point of view of the latest slot at the current timestamp (see VM slot filler) - * Clear and update the context. - * set the call stack to simulated_caller_address - * set max_gas to its chosen value - * set gas_price to simulated_gas_price - * TODO: block ? maybe just assume a miss - * Note: do not apply changes to the ledger. - -* during the call, everything happens as with a normal ExecuteSC call, but when the call finishes, its effects are rollbacked (like when a SC execution fails) \ No newline at end of file diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 797d4d5fa27..6436be29fab 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -50,7 +50,7 @@ pub(crate) struct ExecutionState { pub final_ledger: Arc>, // execution context (see documentation in context.rs) pub execution_context: Arc>, - // execution interface exposing ABI functions to the VM executing bytecode + // execution interface allowing the VM runtime to access the Massa context pub execution_interface: Box, } diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs index 9b379dd712a..5436c1c5a1f 100644 --- a/massa-execution-worker/src/interface_impl.rs +++ b/massa-execution-worker/src/interface_impl.rs @@ -1,6 +1,9 @@ // Copyright (c) 2022 MASSA LABS -//! Implementation of ABI that the VM provides to the executed bytecode. +//! Implementation of the interface between massa-execution-worker and massa-sc-runtime. +//! This allows the VM runtime to acceess the Massa execution context, +//! for example to interact with the ledger. +//! See the definition of Interface in the massa-sc-runtime crate for functional details. use crate::context::ExecutionContext; use anyhow::{bail, Result}; @@ -38,45 +41,65 @@ pub(crate) struct InterfaceImpl { } impl InterfaceImpl { + /// creates a new InterfaceImpl + /// + /// # Arguments + /// * config: execution config + /// * context: exclusive access to the current execution context (see context.rs) pub fn new(config: ExecutionConfig, context: Arc>) -> InterfaceImpl { InterfaceImpl { config, context } } } impl InterfaceClone for InterfaceImpl { + /// allows cloning a boxed InterfaceImpl fn clone_box(&self) -> Box { Box::new(self.clone()) } } -/// Implementation of the Interface trait providing an ABI to VM-executed bytecode. -/// See the massa-sc-runtime crate for a functional description of the trait. +/// Implementation of the Interface trait providing functions for massa-sc-runtime to call +/// in order to interact with the execution context during bytecode execution. +/// See the massa-sc-runtime crate for a functional description of the trait and its methods. +/// Note that massa-sc-runtime uses basic types (str for addresses, u64 for amounts...) for genericity. impl Interface for InterfaceImpl { + /// prints a message in the node logs at log level 3 (debug) fn print(&self, message: &str) -> Result<()> { debug!("SC print: {}", message); Ok(()) } + /// Initialize the call when bytecode calls a function from another bytecode + /// This function transfers the coins passed as parameter, + /// prepares the current execution context by pushing a new element on the top of the call stack, + /// and returns the target bytecode from the ledger. + /// + /// # Arguments + /// * address: string representation of the target address on which the bytecode will be called + /// * raw_coins: raw representation (without decimal factor) of the amount of parallel coins to transfer from the caller address to the target address at the beginning of the call + /// + /// # Returns + /// The target bytecode or an error fn init_call(&self, address: &str, raw_coins: u64) -> Result> { - // get target + // get target address let to_address = massa_models::Address::from_str(address)?; // write-lock context let mut context = context_guard!(self); - // get bytecode + // get target bytecode let bytecode = match context.get_bytecode(&to_address) { Some(bytecode) => bytecode, None => bail!("bytecode not found for address {}", to_address), }; - // get caller + // get caller address let from_address = match context.stack.last() { Some(addr) => addr.address, _ => bail!("failed to read call stack current address"), }; - // transfer coins + // transfer coins from caller to target address let coins = massa_models::Amount::from_raw(raw_coins); if let Err(err) = context.transfer_parallel_coins(Some(from_address), Some(to_address), coins, true) @@ -90,16 +113,19 @@ impl Interface for InterfaceImpl { ); } - // prepare context + // push a new call stack element on top of the current call stack context.stack.push(ExecutionStackElement { address: to_address, coins, owned_addresses: vec![to_address], }); + // return the target bytecode Ok(bytecode) } + /// Called to finish the call process after a bytecode calls a function from another one. + /// This function just pops away the top element of the call stack. fn finish_call(&self) -> Result<()> { let mut context = context_guard!(self); @@ -110,7 +136,11 @@ impl Interface for InterfaceImpl { Ok(()) } - /// Returns zero as a default if address not found. + /// Gets the parallel balance of the current address address (top of the stack). + /// + /// # Returns + /// The raw representation (no decimal factor) of the parallel balance of the address, + /// or zero if the address is not found in the ledger. fn get_balance(&self) -> Result { let context = context_guard!(self); let address = context.get_current_address()?; @@ -120,7 +150,14 @@ impl Interface for InterfaceImpl { .to_raw()) } - /// Returns zero as a default if address not found. + /// Gets the parallel balance of arbitrary address passed as argument. + /// + /// # Arguments + /// * address: string representation of the address for which to get the balance + /// + /// # Returns + /// The raw representation (no decimal factor) of the parallel balance of the address, + /// or zero if the address is not found in the ledger. fn get_balance_for(&self, address: &str) -> Result { let address = massa_models::Address::from_str(address)?; Ok(context_guard!(self) @@ -129,20 +166,29 @@ impl Interface for InterfaceImpl { .to_raw()) } - /// Requires a new address that contains the sent bytecode. + /// Creates a new ledger entry with the initial bytecode given as argument. + /// A new unique address is generated for that entry and returned. /// - /// Generate a new address with a concatenation of the block_id hash, the - /// operation index in the block and the index of address owned in context. + /// # Arguments + /// * bytecode: the bytecode to set for the newly created address /// - /// Insert in the ledger the given bytecode in the generated address - fn create_module(&self, module: &[u8]) -> Result { - match context_guard!(self).create_new_sc_address(module.to_vec()) { + /// # Returns + /// The string representation of the newly created address + fn create_module(&self, bytecode: &[u8]) -> Result { + match context_guard!(self).create_new_sc_address(bytecode.to_vec()) { Ok(addr) => Ok(addr.to_bs58_check()), Err(err) => bail!("couldn't create new SC address: {}", err), } } - /// Requires the data at the address + /// Gets a datastore value by key for a given address. + /// + /// # Arguments + /// * address: string representation of the address + /// * key: string key of the datastore entry to retrieve + /// + /// # Returns + /// The datastore value matching the provided key, if found, otherwise an error. fn raw_get_data_for(&self, address: &str, key: &str) -> Result> { let addr = &massa_models::Address::from_bs58_check(address)?; let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); @@ -153,10 +199,12 @@ impl Interface for InterfaceImpl { } } - /// Requires to replace the data in the current address + /// Sets a datastore entry for a given address /// - /// Note: - /// The execution lib will allways use the current context address for the update + /// # Arguments + /// * address: string representation of the address + /// * key: string key of the datastore entry to set + /// * value: new value to set fn raw_set_data_for(&self, address: &str, key: &str, value: &[u8]) -> Result<()> { let addr = massa_models::Address::from_str(address)?; let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); @@ -165,6 +213,14 @@ impl Interface for InterfaceImpl { Ok(()) } + /// Checks if a datastore entry exists for a given address. + /// + /// # Arguments + /// * address: string representation of the address + /// * key: string key of the datastore entry to retrieve + /// + /// # Returns + /// true if the address exists and has the entry matching the provided key in its datastore, otherwise false fn has_data_for(&self, address: &str, key: &str) -> Result { let addr = massa_models::Address::from_str(address)?; let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); @@ -172,6 +228,13 @@ impl Interface for InterfaceImpl { Ok(context.has_data_entry(&addr, &key)) } + /// Gets a datastore value by key for a the current address (top of the call stack). + /// + /// # Arguments + /// * key: string key of the datastore entry to retrieve + /// + /// # Returns + /// The datastore value matching the provided key, if found, otherwise an error. fn raw_get_data(&self, key: &str) -> Result> { let context = context_guard!(self); let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); @@ -182,6 +245,12 @@ impl Interface for InterfaceImpl { } } + /// Sets a datastore entry for the current address (top of the call stack). + /// + /// # Arguments + /// * address: string representation of the address + /// * key: string key of the datastore entry to set + /// * value: new value to set fn raw_set_data(&self, key: &str, value: &[u8]) -> Result<()> { let mut context = context_guard!(self); let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); @@ -190,6 +259,13 @@ impl Interface for InterfaceImpl { Ok(()) } + /// Checks if a datastore entry exists for the current address (top of the call stack). + /// + /// # Arguments + /// * key: string key of the datastore entry to retrieve + /// + /// # Returns + /// true if the address exists and has the entry matching the provided key in its datastore, otherwise false fn has_data(&self, key: &str) -> Result { let context = context_guard!(self); let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); @@ -197,19 +273,39 @@ impl Interface for InterfaceImpl { Ok(context.has_data_entry(&addr, &key)) } - /// hash data + /// Hashses arbitrary data + /// + /// # Arguments + /// * data: data bytes to hash + /// + /// # Returns + /// The string representation of the resulting hash fn hash(&self, data: &[u8]) -> Result { Ok(massa_hash::hash::Hash::compute_from(data).to_bs58_check()) } - /// convert a pubkey to an address + /// Converts a pubkey to an address + /// + /// # Arguments + /// * public_key: string representation of the public key + /// + /// # Returns + /// The string representation of the resulting address fn address_from_public_key(&self, public_key: &str) -> Result { let public_key = massa_signature::PublicKey::from_bs58_check(public_key)?; let addr = massa_models::Address::from_public_key(&public_key); Ok(addr.to_bs58_check()) } - /// Verify signature + /// Verifies a signature + /// + /// # Arguments + /// * data: the data bytes that were signed + /// * signature: string representation of the signature + /// * public key: string representation of the public key to check against + /// + /// # Returns + /// true if the signature verification succeeded, false otherwise fn signature_verify(&self, data: &[u8], signature: &str, public_key: &str) -> Result { let signature = match massa_signature::Signature::from_bs58_check(signature) { Ok(sig) => sig, @@ -223,9 +319,11 @@ impl Interface for InterfaceImpl { Ok(massa_signature::verify_signature(&h, &signature, &public_key).is_ok()) } - /// Transfer parallel coins from the current address to a target address - /// to_address: target address - /// raw_amount: amount to transfer (in raw u64) + /// Transfer parallel coins from the current address (top of the call stack) towards a target address. + /// + /// # Arguments + /// * to_address: string representation of the address to which the coins are sent + /// * raw_amount: raw representation (no decimal factor) of the amount of coins to transfer fn transfer_coins(&self, to_address: &str, raw_amount: u64) -> Result<()> { let to_address = massa_models::Address::from_str(to_address)?; let amount = massa_models::Amount::from_raw(raw_amount); @@ -235,10 +333,12 @@ impl Interface for InterfaceImpl { Ok(()) } - /// Transfer coins from the current address to a target address - /// from_address: source address - /// to_address: target address - /// raw_amount: amount to transfer (in raw u64) + /// Transfer parallel coins from a given address towards a target address. + /// + /// # Arguments + /// * from_address: string representation of the address that is sending the coins + /// * to_address: string representation of the address to which the coins are sent + /// * raw_amount: raw representation (no decimal factor) of the amount of coins to transfer fn transfer_coins_for( &self, from_address: &str, @@ -253,7 +353,14 @@ impl Interface for InterfaceImpl { Ok(()) } - /// Return the list of owned adresses of a given SC user + /// Returns the list of owned adresses (top of the call stack). + /// Those addresses are the ones the current execution context has write access to, + /// typically it includes the current address itself, + /// but also the ones that were created previously by the current call to allow initializing them. + /// + /// # Returns + /// A vector with the string representation of each owned address. + /// Note that the ordering of this vector is deterministic and conserved. fn get_owned_addresses(&self) -> Result> { Ok(context_guard!(self) .get_current_owned_addresses()? @@ -262,7 +369,10 @@ impl Interface for InterfaceImpl { .collect()) } - /// Return the call stack (addresses) + /// Returns the addresses in the call stack, from the bottom to the top. + /// + /// # Returns + /// A vector with the string representation of each call stack address. fn get_call_stack(&self) -> Result> { Ok(context_guard!(self) .get_call_stack() @@ -271,21 +381,34 @@ impl Interface for InterfaceImpl { .collect()) } - /// Get the amount of coins that have been made available for use by the caller of the currently executing code. + /// Gets the amount of coins that have been ransferred at the beginning of the call. + /// See the init_call method. + /// + /// # Returns + /// The raw representation (no decimal factor) of the amount of coins fn get_call_coins(&self) -> Result { Ok(context_guard!(self).get_current_call_coins()?.to_raw()) } - /// generate an execution event and stores it + /// Emits an execution event to be stored. + /// + /// # Arguments: + /// data: the string data that is the payload of the event fn generate_event(&self, data: String) -> Result<()> { let mut execution_context = context_guard!(self); - // prepare id computation - // it is the hash of (slot, index_at_slot, readonly) + // Generate a unique event ID + // Initialize a seed from the current slot let mut to_hash: Vec = execution_context.slot.to_bytes_key().to_vec(); + // Append the index of the emitted event during the current slot to_hash.append(&mut execution_context.created_event_index.to_be_bytes().to_vec()); + // Append 0u8 if the context is readonly, 1u8 otherwise + // This is used to allow event ID collisions between readonly and active executions to_hash.push(!execution_context.read_only as u8); + // Hash the seed to generate the ID + let id = SCOutputEventId(Hash::compute_from(&to_hash)); + // Gather contextual information from the execution context let context = EventExecutionContext { slot: execution_context.slot, block: execution_context.opt_block_id, @@ -294,14 +417,21 @@ impl Interface for InterfaceImpl { index_in_slot: execution_context.created_event_index, origin_operation_id: execution_context.origin_operation_id, }; - let id = SCOutputEventId(Hash::compute_from(&to_hash)); + + // Generate the event let event = SCOutputEvent { id, context, data }; + + // Increment the event counter fot this slot execution_context.created_event_index += 1; + + // Add the event to the context store execution_context.events.insert(id, event); + Ok(()) } /// Returns the current time (millisecond unix timestamp) + /// Note that in order to ensure determinism, this is actually the time of the context slot. fn get_time(&self) -> Result { let slot = context_guard!(self).slot; let ts = get_block_slot_timestamp( @@ -313,7 +443,11 @@ impl Interface for InterfaceImpl { Ok(ts.to_millis()) } - /// Returns a random number (unsafe: can be predicted and manipulated) + /// Returns a pseudo-random deterministic i64 number + /// + /// # Warning + /// This random number generator is unsafe: + /// it can be both predicted and manipulated before the execution fn unsafe_random(&self) -> Result { let distr = rand::distributions::Uniform::new_inclusive(i64::MIN, i64::MAX); Ok(context_guard!(self).unsafe_rng.sample(distr)) diff --git a/massa-execution-worker/src/lib.rs b/massa-execution-worker/src/lib.rs index 816c2b0c61a..477dca337bf 100644 --- a/massa-execution-worker/src/lib.rs +++ b/massa-execution-worker/src/lib.rs @@ -1,5 +1,18 @@ // Copyright (c) 2022 MASSA LABS +//! +//! +//! +//! +//! +//! +//! TODO algo description +//! +//! +//! +//! +//! + #![feature(map_first_last)] #![feature(unzip_option)] diff --git a/massa-execution-worker/src/spec.md b/massa-execution-worker/src/spec.md deleted file mode 100644 index b2dc54ebe6e..00000000000 --- a/massa-execution-worker/src/spec.md +++ /dev/null @@ -1,71 +0,0 @@ -We described here what should be done whan a bytecode call another -(spoted by get_module in the interface) - -```rust - -/// ABI allowing a contract to call another. -fn _call(shared_env: &SharedExecutionContext, addr: Address, func_name: String, max_gas: u64) { - //TODO add arbitrary input parameters and return value - - //TODO metering / mem limit - - // prepare execution - let old_max_gas; - let old_coins; - let target_module; - let ledger_push; - { - let mut exec_context_guard = shared_env.0.lock().unwrap(); - - // TODO make sure max_gas >= context.remaining_gas - - // get target module - if let Some(module) = (*exec_context_guard).ledger_step._get_module(&addr) { - target_module = module; - } else { - // no module to call - // TODO error - return; - } - - // save old context values - ledger_push = (*exec_context_guard).ledger_step.caused_changes.clone(); - old_max_gas = (*exec_context_guard).max_gas; // save old max gas - old_coins = (*exec_context_guard).coins; - - // update context - (*exec_context_guard).max_gas = max_gas; - (*exec_context_guard).coins = AMOUNT_ZERO; // TODO maybe allow sending coins in the call - (*exec_context_guard).call_stack.push_back(addr); - } - - // run - let mut run_failed = false; - match Instance::new(&target_module, &ImportObject::new()) // TODO bring imports into the execution context (?) - .map(|inst| inst.exports.get_function(&func_name).unwrap().clone()) - .map(|f| f.native::<(), ()>().unwrap()) // TODO figure out the "native" explicit parameters - .map(|f| f.call()) - { - Ok(_rets) => { - // TODO check what to do with the return values. - } - Err(_err) => { - // failed to find target func, or invalid parameters, or execution error - run_failed = true; - } - } - - // unstack execution context - { - let mut exec_context_guard = shared_env.0.lock().unwrap(); - (*exec_context_guard).max_gas = old_max_gas; - (*exec_context_guard).coins = old_coins; - (*exec_context_guard).call_stack.pop_back(); - if run_failed { - // if the run failed, cancel its consequences on the ledger - (*exec_context_guard).ledger_step.caused_changes = ledger_push; - } - } -} - -``` \ No newline at end of file diff --git a/massa-execution-worker/src/speculative_ledger.rs b/massa-execution-worker/src/speculative_ledger.rs index bb9c7dd771f..db20ce23b25 100644 --- a/massa-execution-worker/src/speculative_ledger.rs +++ b/massa-execution-worker/src/speculative_ledger.rs @@ -1,30 +1,44 @@ // Copyright (c) 2022 MASSA LABS +//! The speculative ledger represents, in a compressed way, +//! the state of the ledger at an arbitrary execution slot. +//! It never actually writes to the consensus state +//! but keeps track of the changes that were applied to it since its creation. + use massa_execution_exports::ExecutionError; use massa_hash::hash::Hash; use massa_ledger::{Applicable, FinalLedger, LedgerChanges}; use massa_models::{Address, Amount}; use std::sync::{Arc, RwLock}; -/// represents a speculative ledger state combining -/// data from the final ledger, previous speculative changes, -/// and accumulated changes since the construction of the object +/// The SpeculativeLedger contains an exclusive reference to the final ledger (read-only), +/// a list of existing changes that happened o the ledger since its finality, +/// as well as an extra list of "added" changes. +/// The SpeculativeLedger makes it possible to transparently manipulate a virtual ledger +/// that takes into account all those ledger changes and allows adding more +/// while keeping track of all the newly added changes, and never writing in the final ledger. pub struct SpeculativeLedger { - /// final ledger + /// Exclusive access to the final ledger. For reading only. final_ledger: Arc>, - /// accumulation of previous changes + /// Accumulation of changes that previously happened to the ledger since finality. + /// This value is not modified by changes applied to the SpeculativeLedger. + /// /// TODO maybe have the history directly here, /// so that we can avoid accumulating all the changes at every slot /// but only lazily query addresses backwards in history (to avoid useless computations) with caching previous_changes: LedgerChanges, - /// list of added changes + /// list of ledger changes that were applied to this SpeculativeLedger since its creation added_changes: LedgerChanges, } impl SpeculativeLedger { /// creates a new SpeculativeLedger + /// + /// # Arguments + /// * final_ledger: exclusive access to the final ledger (for reading only) + /// * previous_changes: accumulation of changes that previously happened to the ledger since finality pub fn new(final_ledger: Arc>, previous_changes: LedgerChanges) -> Self { SpeculativeLedger { final_ledger, @@ -33,22 +47,29 @@ impl SpeculativeLedger { } } - /// takes the added changes (move) and resets added changes + /// Returns the changes caused to the SpeculativeLedger since its creation, + /// and resets their local value to nothing. pub fn take(&mut self) -> LedgerChanges { std::mem::take(&mut self.added_changes) } - /// takes a snapshot (clone) of the added changes + /// Takes a snapshot (clone) of the changes caused to the SpeculativeLedger since its creation pub fn get_snapshot(&self) -> LedgerChanges { self.added_changes.clone() } - /// resets to a snapshot of added ledger changes + /// Resets the SpeculativeLedger to a snapshot (see get_snapshot method) pub fn reset_to_snapshot(&mut self, snapshot: LedgerChanges) { self.added_changes = snapshot; } - /// gets the parallel balance of an address + /// Gets the effective parallel balance of an address + /// + /// # Arguments: + /// addr: the address to query + /// + /// # Returns + /// Some(Amount) if the address was found, otherwise None pub fn get_parallel_balance(&self, addr: &Address) -> Option { // try to read from added_changes, then previous_changes, then final_ledger self.added_changes.get_parallel_balance_or_else(addr, || { @@ -62,7 +83,13 @@ impl SpeculativeLedger { }) } - /// gets the bytecode of an address + /// Gets the effective bytecode of an address + /// + /// # Arguments: + /// addr: the address to query + /// + /// # Returns + /// Some(Vec) if the address was found, otherwise None pub fn get_bytecode(&self, addr: &Address) -> Option> { // try to read from added_changes, then previous_changes, then final_ledger self.added_changes.get_bytecode_or_else(addr, || { @@ -77,7 +104,7 @@ impl SpeculativeLedger { /// Transfers parallel coins from one address to another. /// No changes are retained in case of failure. - /// The spending address, if defined, must exist + /// The spending address, if defined, must exist. /// /// # parameters /// * from_addr: optional spending address (use None for pure coin creation) @@ -92,7 +119,7 @@ impl SpeculativeLedger { // init empty ledger changes let mut changes = LedgerChanges::default(); - // spend coins from sender address (if any) + // simulate spending coins from sender address (if any) if let Some(from_addr) = from_addr { let new_balance = self .get_parallel_balance(&from_addr) @@ -106,7 +133,7 @@ impl SpeculativeLedger { changes.set_parallel_balance(from_addr, new_balance); } - // credit coins to destination address (if any) + // simulate crediting coins to destination address (if any) // note that to_addr can be the same as from_addr if let Some(to_addr) = to_addr { let new_balance = changes @@ -119,13 +146,19 @@ impl SpeculativeLedger { changes.set_parallel_balance(to_addr, new_balance); } - // apply changes + // apply the simulated changes to the speculative ledger self.added_changes.apply(changes); Ok(()) } - /// checks if an address exists + /// Checks if an address exists in the speculative ledger + /// + /// # Arguments: + /// addr: the address to query + /// + /// # Returns + /// true if the address was found, otherwise false pub fn entry_exists(&self, addr: &Address) -> bool { // try to read from added_changes, then previous_changes, then final_ledger self.added_changes.entry_exists_or_else(addr, || { @@ -138,7 +171,11 @@ impl SpeculativeLedger { }) } - /// creates a new smart contract address with initial bytecode + /// Creates a new smart contract address with initial bytecode. + /// + /// # Arguments + /// * addr: address to create + /// * bytecode: bytecode to set in the new ledger entry pub fn create_new_sc_address( &mut self, addr: Address, @@ -148,11 +185,15 @@ impl SpeculativeLedger { Ok(self.added_changes.set_bytecode(addr, bytecode)) } - /// sets the bytecode of an address - /// fails if the address doesn't exist + /// Sets the bytecode associated to an address in the ledger. + /// Fails if the address doesn't exist. + /// + /// # Arguments + /// * addr: target address + /// * bytecode: bytecode to set for that address #[allow(dead_code)] // TODO remove when it is used pub fn set_bytecode(&mut self, addr: Address, bytecode: Vec) -> Result<(), ExecutionError> { - // check for existence + // check for address existence if !self.entry_exists(&addr) { return Err(ExecutionError::RuntimeError(format!( "could not set bytecode for address {}: entry does not exist", @@ -160,13 +201,20 @@ impl SpeculativeLedger { ))); } - //set bytecode + // set the bytecode of that address self.added_changes.set_bytecode(addr, bytecode); Ok(()) } - /// gets a copy of a data entry for a given address + /// Gets a copy of a datastore value for a given address and datastore key + /// + /// # Arguments + /// * addr: address to query + /// * key: key to query in the address' datastore + /// + /// # Returns + /// Some(Vec) if the value was found, None if the address does not exist or if the key is not in its datastore. pub fn get_data_entry(&self, addr: &Address, key: &Hash) -> Option> { // try to read from added_changes, then previous_changes, then final_ledger self.added_changes.get_data_entry_or_else(addr, key, || { @@ -179,7 +227,14 @@ impl SpeculativeLedger { }) } - /// checks if a data entry exists for a given address + /// Checks if a data entry exists for a given address + /// + /// # Arguments + /// * addr: address to query + /// * key: datastore key to look for + /// + /// # Returns + /// true if the key exists in the address' datastore, false otherwise pub fn has_data_entry(&self, addr: &Address, key: &Hash) -> bool { // try to read from added_changes, then previous_changes, then final_ledger self.added_changes.has_data_entry_or_else(addr, key, || { @@ -192,8 +247,14 @@ impl SpeculativeLedger { }) } - /// sets an entry for an address - /// fails if the address doesn't exist + /// Sets a dataset entry for a given address in the ledger. + /// Fails if the address doesn't exist. + /// If the datastore entry does not exist, it is created. + /// + /// # Arguments + /// * addr: target address + /// * key: datastore key + /// * data: value to associate to the datasotre key pub fn set_data_entry( &mut self, addr: &Address, From b410f79980ba528d2f2146d9862c56ce16b3c1c7 Mon Sep 17 00:00:00 2001 From: damip Date: Tue, 22 Feb 2022 13:03:49 +0100 Subject: [PATCH 36/73] add documentation --- massa-execution-exports/src/config.rs | 2 +- massa-execution-exports/src/lib.rs | 15 +- massa-execution-worker/src/controller.rs | 8 +- massa-execution-worker/src/execution.rs | 5 + massa-execution-worker/src/lib.rs | 54 ++- massa-execution-worker/src/vm_thread.rs | 459 ------------------- massa-execution-worker/src/worker.rs | 548 +++++++++++++++++++++++ 7 files changed, 619 insertions(+), 472 deletions(-) delete mode 100644 massa-execution-worker/src/vm_thread.rs create mode 100644 massa-execution-worker/src/worker.rs diff --git a/massa-execution-exports/src/config.rs b/massa-execution-exports/src/config.rs index 964243f4a0a..0822734a8c9 100644 --- a/massa-execution-exports/src/config.rs +++ b/massa-execution-exports/src/config.rs @@ -13,7 +13,7 @@ pub struct ExecutionConfig { pub max_final_events: usize, /// number of threads pub thread_count: u8, - /// extra lag to add on the cursor to improve performance + /// extra lag to add on the execution cursor to improve performance pub cursor_delay: MassaTime, /// time compensation in milliseconds pub clock_compensation: i64, diff --git a/massa-execution-exports/src/lib.rs b/massa-execution-exports/src/lib.rs index f4d29de4f0c..3794cb73794 100644 --- a/massa-execution-exports/src/lib.rs +++ b/massa-execution-exports/src/lib.rs @@ -3,22 +3,27 @@ //! # Overview //! //! This crate provides all the facilities to interact with a running execution worker (massa-execution-worker crate) -//! that is in charge of executing operations containing bytecode in a virtual machine, +//! that is in charge of executing operations in a virtual machine, //! and applying the effects of the execution to a ledger. //! //! # Usage //! -//! When an execution worker is launched to run in a separate worker thread for the whole duration of the process and -//! apply incoming requests, an instance of ExecutionManager is returned (see the documentation of massa-execution-worker). +//! When an execution worker is launched to run in a separate thread for the whole duration of the process, +//! an instance of ExecutionManager is returned (see the documentation of start_execution_worker in massa-execution-worker). //! //! ExecutionManager allows stopping the execution worker thread, //! but it also allows generating as many instances of ExecutionController as necessary. //! //! Each ExecutionController allows sending updates on the latest blockclique changes to the execution worker -//! for it to keep track of them and execute the bytecode present in blocks. +//! for it to keep track of them and execute the operations present in blocks. //! It also allows various read-only queries such as executing bytecode -//! while ignoring all the changes it would cause to the consensus state, +//! while ignoring all the changes it would cause to the consensus state (read-only execution), //! or reading the state at the output of the executed blockclique blocks. +//! +//! # Test exports +//! +//! When the crate feature `testing` is enabled, tooling useful for testing purposes is exported. +//! See test_exports/ for details. mod config; mod controller_traits; diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index 1de1866a398..11e1cd78eb4 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -18,7 +18,7 @@ use std::collections::{HashMap, VecDeque}; use std::sync::{mpsc, Arc, Condvar, Mutex, RwLock}; use tracing::info; -/// structure used to communicate with the VM thread +/// structure used to communicate with execution thread #[derive(Default)] pub(crate) struct VMInputData { /// set stop to true to stop the thread @@ -179,7 +179,7 @@ pub struct ExecutionManagerImpl { impl ExecutionManager for ExecutionManagerImpl { /// stops the worker fn stop(&mut self) { - info!("stopping VM controller..."); + info!("stopping Execution controller..."); // notify the worker thread to stop { let mut input_wlock = self @@ -191,11 +191,11 @@ impl ExecutionManager for ExecutionManagerImpl { input_wlock.stop = true; self.controller.input_data.0.notify_one(); } - // join the VM thread + // join the execution thread if let Some(join_handle) = self.thread_handle.take() { join_handle.join().expect("VM controller thread panicked"); } - info!("VM controller stopped"); + info!("Execution controller stopped"); } /// return a new execution controller diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 6436be29fab..7e8f679c5d9 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -2,6 +2,11 @@ //! This module deals with executing final and active slots, as well as read-only requests. //! It also keeps a history of executed slots, thus holding the speculative state of the ledger. +//! +//! Execution usually happens in the following way: +//! * an execution context is set up +//! * the VM is called for execution within this context +//! * the output of the execution is extracted from the context use crate::context::ExecutionContext; use crate::interface_impl::InterfaceImpl; diff --git a/massa-execution-worker/src/lib.rs b/massa-execution-worker/src/lib.rs index 477dca337bf..d3e1861b233 100644 --- a/massa-execution-worker/src/lib.rs +++ b/massa-execution-worker/src/lib.rs @@ -1,17 +1,65 @@ // Copyright (c) 2022 MASSA LABS +//! # General description //! +//! The execution worker launches a persistent thread allowing the execution +//! of operations that can contain executable bytecode and managing interactions with the ledger. +//! When the worker is launched, an ExecutionManager is returned, +//! allowing to stop the worker, +//! and to generate an arbitrary number of ExecutionController instances through which users interact with the worker. //! +//! The worker is fed through the ExecutionController with information about blockclique changes and newly finalized blocks +//! and will execute the operations in those blocks, as well as pending asynchronous operations on empty slots. +//! The worker can also query the current state of the ledger, and simulate operations in a read-only context. //! +//! The execution worker has shared read access to the final ledger, +//! and must be the only module with runtime write access to the final ledger. //! +//! # A note on finality //! //! -//! TODO algo description //! +//! The operations contained in a final slot are ready to be executed as final +//! only once all the previous slots are final and their operations are executed as final or ready to be so. +//! This ensures the sequentiality of the final executions of operations, +//! thus ensuring that writes to the final ledger are irreversible. //! +//! Slots are called "active" if they have not been executed as final, and are not ready to be executed as final. +//! Active slots can therefore be final slots, or slots containing blocks from the blockclique, or empty (miss) slots. +//! Active slots can be executed in a speculative way: their execution might need to be reverted +//! as new blocks finalize or arrive, causing changes to them or to active slots before them. //! +//! Miss slots are executed as well because they can contain implicit and async operations. //! +//! # Architecture //! +//! This crate is meant to be included only at the binary level to launch the worker, +//! not by the lib crates that will interact with it. +//! It depends on the massa-execution-exports crate that contains all the publicly exposed elements +//! and throuh which users will actually interact with the worker. +//! +//! ## worker.rs +//! This module runs the main loop of the worker thread. +//! It contains the logic to process incoming blockclique change notifications and read-only execution requests. +//! It sequences the blocks according to their slot number into queues, +//! and requests the execution of active and final slots to execution.rs. +//! +//! ## controller.rs +//! Implements ExecutionManager and ExecutionController +//! that serve as interfaces for users to interact with the worker in worker.rs. +//! +//! ## execution.rs +//! Contains the machinery to execute final and non-final slots, +//! and track the state and results of those executions. +//! This module initializes and holds a reference to the interface from interface_impl.rs +//! that allows the crate to provide execution state access +//! to the virtual machine runtime (massa-sc-runtime crate). +//! It also serves as an access point to the current execution state and speculative ledger +//! as defined in speculative_ledger.rs. +//! +//! ## speculative_ledger.rs +//! A speculative (non-final) ledger that supports cancelling already-executed operations +//! in the case of some blockclique changes. #![feature(map_first_last)] #![feature(unzip_option)] @@ -21,9 +69,9 @@ mod controller; mod execution; mod interface_impl; mod speculative_ledger; -mod vm_thread; +mod worker; -pub use vm_thread::start_execution_worker; +pub use worker::start_execution_worker; #[cfg(test)] mod tests; diff --git a/massa-execution-worker/src/vm_thread.rs b/massa-execution-worker/src/vm_thread.rs deleted file mode 100644 index ff27ea181de..00000000000 --- a/massa-execution-worker/src/vm_thread.rs +++ /dev/null @@ -1,459 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use crate::controller::{ExecutionControllerImpl, ExecutionManagerImpl, VMInputData}; -use crate::execution::ExecutionState; -use massa_execution_exports::{ - ExecutionConfig, ExecutionError, ExecutionManager, ExecutionOutput, ReadOnlyExecutionRequest, -}; -use massa_ledger::FinalLedger; -use massa_models::BlockId; -use massa_models::{ - timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}, - Block, Slot, -}; -use massa_time::MassaTime; -use std::sync::mpsc; -use std::{ - collections::HashMap, - sync::{Arc, Condvar, Mutex, RwLock}, -}; -use tracing::debug; -/// structure gathering all elements needed by the VM thread -pub(crate) struct VMThread { - // VM config - config: ExecutionConfig, - - // VM data exchange controller - controller: ExecutionControllerImpl, - // map of SCE-final blocks not executed yet - sce_finals: HashMap>, - // last SCE final slot in sce_finals list - last_sce_final: Slot, - // map of CSS-final but non-SCE-final blocks - remaining_css_finals: HashMap, - // last blockclique - blockclique: HashMap, - // map of active slots - active_slots: HashMap>, - // highest active slot - last_active_slot: Slot, - - // execution state - execution_state: Arc>, -} - -impl VMThread { - pub fn new( - config: ExecutionConfig, - controller: ExecutionControllerImpl, - execution_state: Arc>, - ) -> Self { - let final_cursor = execution_state - .read() - .expect("could not r-lock execution context") - .final_cursor; - - // return VMThread - VMThread { - last_active_slot: final_cursor, - controller, - last_sce_final: final_cursor, - sce_finals: Default::default(), - remaining_css_finals: Default::default(), - blockclique: Default::default(), - active_slots: Default::default(), - config, - execution_state, - } - } - - /// update final slots - fn update_final_slots(&mut self, new_css_finals: HashMap) { - // return if empty - if new_css_finals.is_empty() { - return; - } - - // add new_css_finals to pending css finals - self.remaining_css_finals.extend(new_css_finals); - - // get maximal css-final slot - let max_css_final_slot = self - .remaining_css_finals - .iter() - .max_by_key(|(s, _)| *s) - .map(|(s, _)| *s) - .expect("expected remaining_css_finals to be non-empty"); - - // detect SCE-final slots - let mut slot = self.last_sce_final; - while slot < max_css_final_slot { - slot = slot - .get_next_slot(self.config.thread_count) - .expect("final slot overflow in VM"); - - // pop slot from remaining CSS finals - if let Some((block_id, block)) = self.remaining_css_finals.remove(&slot) { - // CSS-final block found at slot: add block to to sce_finals - self.sce_finals.insert(slot, Some((block_id, block))); - self.last_sce_final = slot; - // continue the loop - continue; - } - - // no CSS-final block found: it's a miss - - // check if the miss is final - let mut miss_final = false; - let mut search_slot = slot; - while search_slot < max_css_final_slot { - search_slot = search_slot - .get_next_slot(self.config.thread_count) - .expect("final slot overflow in VM"); - if self.remaining_css_finals.contains_key(&search_slot) { - miss_final = true; - break; - } - } - - if miss_final { - // if the miss is final, set slot to be a final miss - self.sce_finals.insert(slot, None); - self.last_sce_final = slot; - } else { - // otherwise, this slot is not final => break - break; - } - } - } - - /// returns the end active slot (if any yet) - /// this is the slot at which the cursor ends and it depends on the cursor_delay setting - fn get_end_active_slot(&self) -> Option { - let target_time = MassaTime::compensated_now(self.config.clock_compensation) - .expect("could not read current time") - .saturating_sub(self.config.cursor_delay); - get_latest_block_slot_at_timestamp( - self.config.thread_count, - self.config.t0, - self.config.genesis_timestamp, - target_time, - ) - .expect("could not get current slot") - } - - /// update active slot sequence - fn update_active_slots(&mut self, new_blockclique: Option>) { - // update blockclique if changed - if let Some(blockclique) = new_blockclique { - self.blockclique = blockclique; - } - - // get end active slot, if any - let end_active_slot = self.get_end_active_slot(); - - // reset active slots - self.active_slots = HashMap::new(); - self.last_active_slot = self.last_sce_final; - - // if no active slot yet => keep the active_slots empty - let end_active_slot = match end_active_slot { - Some(s) => s, - None => return, - }; - - // recompute non-SCE-final slot sequence - let mut slot = self.last_sce_final; - while slot < end_active_slot { - slot = slot - .get_next_slot(self.config.thread_count) - .expect("active slot overflow in VM"); - if let Some((block_id, block)) = self.remaining_css_finals.get(&slot) { - // found in remaining_css_finals - self.active_slots - .insert(slot, Some((*block_id, block.clone()))); - } else if let Some((block_id, block)) = self.blockclique.get(&slot) { - // found in blockclique - self.active_slots - .insert(slot, Some((*block_id, block.clone()))); - } else { - // miss - self.active_slots.insert(slot, None); - } - self.last_active_slot = slot; - } - } - - /// executes one final slot, if any - /// returns true if something was executed - fn execute_one_final_slot(&mut self) -> bool { - // check if there are final slots to execute - if self.sce_finals.is_empty() { - return false; - } - - // w-lock execution state - let mut exec_state = self - .execution_state - .write() - .expect("could not lock execution state for writing"); - - // get the slot just after the last executed final slot - let slot = exec_state - .final_cursor - .get_next_slot(self.config.thread_count) - .expect("final slot overflow in VM"); - - // take the corresponding element from sce finals - let exec_target = self - .sce_finals - .remove(&slot) - .expect("the SCE final slot list skipped a slot"); - - // check if the final slot is cached at the front of the speculative execution history - if let Some(exec_out) = exec_state.active_history.pop_front() { - if exec_out.slot == slot - && exec_out.block_id == exec_target.as_ref().map(|(b_id, _)| *b_id) - { - // speculative execution front result matches what we want to compute - - // apply the cached output and return - exec_state.apply_final_execution_output(exec_out); - return true; - } - } - - // speculative cache mismatch - - // clear the speculative execution output cache completely - exec_state.clear_history(); - - // execute slot - let exec_out = exec_state.execute_slot(slot, exec_target); - - // apply execution output to final state - exec_state.apply_final_execution_output(exec_out); - - return true; - } - - /// executes one active slot, if any - /// returns true if something was executed - fn execute_one_active_slot(&mut self) -> bool { - // write-lock the execution state - let mut exec_state = self - .execution_state - .write() - .expect("could not lock execution state for writing"); - - // get the next active slot - let slot = exec_state - .active_cursor - .get_next_slot(self.config.thread_count) - .expect("active slot overflow in VM"); - - // choose the execution target - let exec_target = match self.active_slots.get(&slot) { - Some(b) => b.clone(), //TODO get rid of that clone - None => return false, - }; - - // execute the slot - let exec_out = exec_state.execute_slot(slot, exec_target); - - // apply execution output to active state - exec_state.apply_active_execution_output(exec_out); - - return true; - } - - /// gets the time until the next active slot (saturates down to 0) - fn get_time_until_next_active_slot(&self) -> MassaTime { - let next_slot = self - .last_active_slot - .get_next_slot(self.config.thread_count) - .expect("active slot overflow in VM"); - let next_timestmap = get_block_slot_timestamp( - self.config.thread_count, - self.config.t0, - self.config.genesis_timestamp, - next_slot, - ) - .expect("could not compute block timestmap in VM"); - let now = MassaTime::compensated_now(self.config.clock_compensation) - .expect("could not get current time in VM"); - next_timestmap.saturating_sub(now) - } - - /// truncates history if necessary - pub fn truncate_history(&mut self) { - // acquire write access to execution state - let mut exec_state = self - .execution_state - .write() - .expect("could not lock execution state for writing"); - - exec_state.truncate_history(&self.active_slots); - } - - /// execute readonly request - fn execute_readonly_request( - &self, - req: ReadOnlyExecutionRequest, - resp_tx: mpsc::Sender>, - ) { - // acquire read access to execution state and execute the request - let outcome = self - .execution_state - .read() - .expect("could not lock execution state for reading") - .execute_readonly_request(req); - - // send the response - if resp_tx.send(outcome).is_err() { - debug!("could not send execute_readonly_request response: response channel died"); - } - } - - /// main VM loop - pub fn main_loop(&mut self) { - loop { - // read input queues - let input_data = self.controller.consume_input(); - - // check for stop signal - if input_data.stop { - break; - } - - // update execution sequences - if input_data.blockclique_changed { - // changes detected in input - - // update final slot sequence - self.update_final_slots(input_data.finalized_blocks); - - // update active slot sequence - self.update_active_slots(Some(input_data.blockclique)); - } - - // execute one final slot, if any - if self.execute_one_final_slot() { - // a final slot was executed: continue - continue; - } - - // now all final slots have been executed - - // if the blockclique was not updated, still fill up active slots with misses until now() - if !input_data.blockclique_changed { - self.update_active_slots(None); - } - - // truncate the speculative execution outputs if necessary - if input_data.blockclique_changed { - self.truncate_history(); - } - - // speculatively execute one active slot, if any - if self.execute_one_active_slot() { - // an active slot was executed: continue - continue; - } - - // execute all queued readonly requests - // must be done in this loop because of the static shared context - for (req, resp_tx) in input_data.readonly_requests { - self.execute_readonly_request(req, resp_tx); - } - - // check if new data or requests arrived during the iteration - let input_data = self - .controller - .input_data - .1 - .lock() - .expect("could not lock VM input data"); - if input_data.stop { - break; - } - if input_data.blockclique_changed || !input_data.readonly_requests.is_empty() { - continue; - } - - // compute when the next slot is - let delay_until_next_slot = self.get_time_until_next_active_slot(); - if delay_until_next_slot == 0.into() { - // next slot is right now - continue; - } - - // wait for change or for next slot - let _ = self - .controller - .input_data - .0 - .wait_timeout(input_data, delay_until_next_slot.to_duration()) - .expect("VM main loop condition variable wait failed"); - } - - // signal cancellation to all remaining readonly requests - let mut input_data = self - .controller - .input_data - .1 - .lock() - .expect("could not lock VM input data"); - for (_req, resp_tx) in input_data.readonly_requests.drain(..) { - if resp_tx - .send(Err(ExecutionError::RuntimeError( - "readonly execution cancelled because VM is closing".into(), - ))) - .is_err() - { - debug!("failed sending readonly request response: channel down"); - } - } - } -} - -/// launches the VM and returns a VMManager -/// -/// # parameters -/// * config: VM configuration -/// * bootstrap: -pub fn start_execution_worker( - config: ExecutionConfig, - final_ledger: Arc>, -) -> Box { - // create an execution state - let execution_state = Arc::new(RwLock::new(ExecutionState::new( - config.clone(), - final_ledger.clone(), - ))); - - // create a controller - let controller = ExecutionControllerImpl { - config: config.clone(), - input_data: Arc::new(( - Condvar::new(), - Mutex::new(VMInputData { - blockclique_changed: true, - ..Default::default() - }), - )), - execution_state: execution_state.clone(), - }; - - // launch the VM thread - let ctl = controller.clone(); - let thread_handle = std::thread::spawn(move || { - VMThread::new(config, ctl, execution_state).main_loop(); - }); - - // return the VM manager - Box::new(ExecutionManagerImpl { - controller, - thread_handle: Some(thread_handle), - }) -} diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs new file mode 100644 index 00000000000..2a76e2c295a --- /dev/null +++ b/massa-execution-worker/src/worker.rs @@ -0,0 +1,548 @@ +// Copyright (c) 2022 MASSA LABS + +//! This module allows launching the execution worker thread, returning objects to communicate with it. +//! The worker thread processes incoming notifications of blockclique changes, +//! orders active and final blocks in queues sorted by increasing slot number, +//! and requests the execution of active and final slots from execution.rs. + +use crate::controller::{ExecutionControllerImpl, ExecutionManagerImpl, VMInputData}; +use crate::execution::ExecutionState; +use massa_execution_exports::{ + ExecutionConfig, ExecutionError, ExecutionManager, ExecutionOutput, ReadOnlyExecutionRequest, +}; +use massa_ledger::FinalLedger; +use massa_models::BlockId; +use massa_models::{ + timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}, + Block, Slot, +}; +use massa_time::MassaTime; +use std::sync::mpsc; +use std::{ + collections::HashMap, + sync::{Arc, Condvar, Mutex, RwLock}, +}; +use tracing::debug; + +/// Structure gathering all elements needed by the execution thread +pub(crate) struct ExecutionThread { + // Execution config + config: ExecutionConfig, + // A copy of the controller allowing access to incoming requests + controller: ExecutionControllerImpl, + // Map of final slots not executed yet but ready for execution + // See lib.rs for an explanation on final execution ordering. + ready_final_slots: HashMap>, + // Highest final slot that is ready to be executed + last_ready_final_slot: Slot, + // Map of final blocks that are not yet ready to be executed + // See lib.rs for an explanation on final execution ordering. + pending_final_blocks: HashMap, + // Current blockclique, indexed by slot number + blockclique: HashMap, + // Map of all active slots + active_slots: HashMap>, + // Highest active slot + last_active_slot: Slot, + // Execution state (see execution.rs) to which execution requests are sent + execution_state: Arc>, +} + +impl ExecutionThread { + /// Creates the ExecutionThread structure to gather all data and references + /// needed by the execution worker thread. + /// + /// # Arguments + /// * config: execution config + /// * controller: a copy of the ExecutionController to get incoming requests from + /// * execution_state: an exclusive reference to the execution state, which can be bootstrapped or newly created + pub fn new( + config: ExecutionConfig, + controller: ExecutionControllerImpl, + execution_state: Arc>, + ) -> Self { + // get the latest executed final slot, at the output of which the final ledger is attached + let final_cursor = execution_state + .read() + .expect("could not r-lock execution context") + .final_cursor; + + // create and return the ExecutionThread + ExecutionThread { + last_active_slot: final_cursor, + controller, + last_ready_final_slot: final_cursor, + ready_final_slots: Default::default(), + pending_final_blocks: Default::default(), + blockclique: Default::default(), + active_slots: Default::default(), + config, + execution_state, + } + } + + /// Update the sequence of final slots given newly finalized blocks. + /// This method is called from the execution worker's main loop. + /// + /// # Arguments + /// * new_final_blocks: a map of newly finalized blocks + fn update_final_slots(&mut self, new_final_blocks: HashMap) { + // if there are no new final blocks, exit and do nothing + if new_final_blocks.is_empty() { + return; + } + + // add new_final_blocks to the pending final blocks not ready for execution yet + self.pending_final_blocks.extend(new_final_blocks); + + // get maximal final slot + let max_final_slot = self + .pending_final_blocks + .iter() + .max_by_key(|(s, _)| *s) + .map(|(s, _)| *s) + .expect("expected pending_final_blocks to be non-empty"); + + // Given pending_final_blocks, detect he final slots that are ready to be executed. + // Those are the ones or which all the previous slots are also executed or ready to be so. + // Iterate over consecutive slots starting from the one just after the previous last final one. + let mut slot = self.last_ready_final_slot; + while slot < max_final_slot { + slot = slot + .get_next_slot(self.config.thread_count) + .expect("final slot overflow in VM"); + + // try to remove that slot out of pending_final_blocks + if let Some((block_id, block)) = self.pending_final_blocks.remove(&slot) { + // pending final block found at slot: + // add block to the ready_final_slots list of final slots ready for execution + self.ready_final_slots.insert(slot, Some((block_id, block))); + self.last_ready_final_slot = slot; + // continue the loop + continue; + } + + // no final block found at this slot: it's a miss + + // check if the miss is final by searching for final blocks later in the same thread + let mut miss_final = false; + let mut search_slot = slot; + while search_slot < max_final_slot { + search_slot = search_slot + .get_next_slot(self.config.thread_count) + .expect("final slot overflow in VM"); + if self.pending_final_blocks.contains_key(&search_slot) { + // A final block was found later in the same thread. + // The missed slot is therefore final. + miss_final = true; + break; + } + } + + if miss_final { + // If this slot is a final miss + // Add it to the list of final slots ready for execution + self.ready_final_slots.insert(slot, None); + self.last_ready_final_slot = slot; + } else { + // This slot is not final: + // we have reached the end of the list of consecutive final slots + // that are ready to be executed + break; + } + } + } + + /// Returns the latest slot that is at or just before the current timestamp. + /// If a non-zero cursor_delay config is defined, this extra lag is taken into account. + /// Such an extra lag can be useful for weaker nodes to perform less speculative executions + /// because more recent slots change more often and might require multiple re-executions. + /// + /// # Returns + /// The latest slot at or before now() - self.config.cursor_delay) if there is any, + /// or None if it falls behind the genesis timestamp. + fn get_end_active_slot(&self) -> Option { + let target_time = MassaTime::compensated_now(self.config.clock_compensation) + .expect("could not read current time") + .saturating_sub(self.config.cursor_delay); + get_latest_block_slot_at_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + target_time, + ) + .expect("could not get current slot") + } + + /// Updates the sequence of active slots. + /// If a new blockclique is provided, it is taken into account. + /// If no blockclique is provided, this function is still useful to fill + /// ready-to-be-executed active slots with misses until the current time. + /// + /// Arguments: + /// * new_blockclique: optionally provide a new blockclique + fn update_active_slots(&mut self, new_blockclique: Option>) { + // Update the current blockclique if it has changed + if let Some(blockclique) = new_blockclique { + self.blockclique = blockclique; + } + + // Get the latest slot at the current timestamp, if any + let end_active_slot = self.get_end_active_slot(); + + // Empty the list of active slots + self.active_slots = HashMap::new(); + self.last_active_slot = self.last_ready_final_slot; + + // If the current timestamp is before genesis time, keep the active_slots empty and return + let end_active_slot = match end_active_slot { + Some(s) => s, + None => return, + }; + + // Recompute the sequence of active slots + // by iterating over consecutive slots from the one just after last_ready_final_slot until the current timestamp, + // and looking for blocks into pending_final_blocks and the current blockclique + let mut slot = self.last_ready_final_slot; + while slot < end_active_slot { + slot = slot + .get_next_slot(self.config.thread_count) + .expect("active slot overflow in VM"); + // look for a block at that slot among the ones that are final but not ready for final execution yet + if let Some((block_id, block)) = self.pending_final_blocks.get(&slot) { + // A block at that slot was found in pending_final_blocks. + // Add it to the sequence of active slots. + self.active_slots + .insert(slot, Some((*block_id, block.clone()))); + self.last_active_slot = slot; + } else if let Some((block_id, block)) = self.blockclique.get(&slot) { + // A block at that slot was found in the current blockclique. + // Add it to the sequence of active slots. + self.active_slots + .insert(slot, Some((*block_id, block.clone()))); + self.last_active_slot = slot; + } else { + // No block was found at that slot: it's a miss + // Add the miss to the sequence of active slots + self.active_slots.insert(slot, None); + self.last_active_slot = slot; + } + } + } + + /// executes one final slot, if any + /// returns true if something was executed + fn execute_one_final_slot(&mut self) -> bool { + // check if there are final slots to execute + if self.ready_final_slots.is_empty() { + return false; + } + + // w-lock execution state + let mut exec_state = self + .execution_state + .write() + .expect("could not lock execution state for writing"); + + // get the slot just after the last executed final slot + let slot = exec_state + .final_cursor + .get_next_slot(self.config.thread_count) + .expect("final slot overflow in VM"); + + // take the corresponding element from sce finals + let exec_target = self + .ready_final_slots + .remove(&slot) + .expect("the SCE final slot list skipped a slot"); + + // check if the final slot is cached at the front of the speculative execution history + if let Some(exec_out) = exec_state.active_history.pop_front() { + if exec_out.slot == slot + && exec_out.block_id == exec_target.as_ref().map(|(b_id, _)| *b_id) + { + // speculative execution front result matches what we want to compute + + // apply the cached output and return + exec_state.apply_final_execution_output(exec_out); + return true; + } + } + + // speculative cache mismatch + + // clear the speculative execution output cache completely + exec_state.clear_history(); + + // execute slot + let exec_out = exec_state.execute_slot(slot, exec_target); + + // apply execution output to final state + exec_state.apply_final_execution_output(exec_out); + + return true; + } + + /// executes one active slot, if any + /// returns true if something was executed + fn execute_one_active_slot(&mut self) -> bool { + // write-lock the execution state + let mut exec_state = self + .execution_state + .write() + .expect("could not lock execution state for writing"); + + // get the next active slot + let slot = exec_state + .active_cursor + .get_next_slot(self.config.thread_count) + .expect("active slot overflow in VM"); + + // choose the execution target + let exec_target = match self.active_slots.get(&slot) { + Some(b) => b.clone(), //TODO get rid of that clone + None => return false, + }; + + // execute the slot + let exec_out = exec_state.execute_slot(slot, exec_target); + + // apply execution output to active state + exec_state.apply_active_execution_output(exec_out); + + return true; + } + + /// Gets the time from now() to the slot just after next last_active_slot. + /// Saturates down to 0 on negative durations. + /// Note that config.cursor_delay is taken into account. + fn get_time_until_next_active_slot(&self) -> MassaTime { + // get the timestamp of the slot after the current last active one + let next_slot = self + .last_active_slot + .get_next_slot(self.config.thread_count) + .expect("active slot overflow in VM"); + let next_timestmap = get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + next_slot, + ) + .expect("could not compute block timestmap in VM"); + + // get the current timestamp minus the cursor delay + let end_time = MassaTime::compensated_now(self.config.clock_compensation) + .expect("could not get current time in VM") + .saturating_sub(self.config.cursor_delay); + + // compute the time difference, saturating down to zero + next_timestmap.saturating_sub(end_time) + } + + /// Tells the execution state about the new sequence of active slots. + /// If some slots already executed in a speculative way changed, + /// or if one of their have predecessor slots changed, + /// the execution state will truncate the execution output history + /// to remove all out-of-date execution outputs. + /// Speculative execution will then resume from the point of truncation. + pub fn truncate_execution_history(&mut self) { + // acquire write access to execution state + let mut exec_state = self + .execution_state + .write() + .expect("could not lock execution state for writing"); + + // tells the execution state to truncate its execution output history + // given the new list of active slots + exec_state.truncate_history(&self.active_slots); + } + + /// Executes a read-only request, and asynchronously returns the result once finished. + /// + /// # Arguments + /// * req: read-only execution request parameters + /// * resp_tx: MPSC sender through which the execution output is sent when the execution is over + fn execute_readonly_request( + &self, + req: ReadOnlyExecutionRequest, + resp_tx: mpsc::Sender>, + ) { + // acquire read access to execution state and execute the read-only request + let outcome = self + .execution_state + .read() + .expect("could not lock execution state for reading") + .execute_readonly_request(req); + + // send the execution output through resp_tx + if resp_tx.send(outcome).is_err() { + debug!("could not send execute_readonly_request response: response channel died"); + } + } + + /// Main loop of the executin worker + pub fn main_loop(&mut self) { + loop { + // read input requests + let input_data = self.controller.consume_input(); + + // check for stop signal + if input_data.stop { + break; + } + + // if the blockclique has changed + if input_data.blockclique_changed { + // update the sequence of final slots given the newly finalized blocks + self.update_final_slots(input_data.finalized_blocks); + + // update the sequence of active slots given the new blockclique + self.update_active_slots(Some(input_data.blockclique)); + } + + // execute one slot as final, if there is one ready for final execution + if self.execute_one_final_slot() { + // A slot was executed as final: restart the loop + // This loop continue is useful for monitoring: + // it allows tracking the state of all execution queues + continue; + } + + // now all the slots that were ready for final execution have been executed as final + + // if the blockclique was not updated, the update_active_slots hasn't been called previously. + // But we still fill up active slots with misses until now() so we call it with None as argument. + if !input_data.blockclique_changed { + self.update_active_slots(None); + } + + // If the blockclique has changed, the list of active slots might have seen + // new insertions/deletions of blocks at different slot depths. + // It is therefore important to signal this to the execution state, + // so that it can remove out-of-date speculative execution results from its history. + if input_data.blockclique_changed { + self.truncate_execution_history(); + } + + // Execute one active slot in a speculative way, if there is one ready for that + if self.execute_one_active_slot() { + // An active slot was executed: restart the loop + // This loop continue is useful for monitoring: + // it allows tracking the state of all execution queues, + // as well as prioritizing executions in the following order: + // 1 - final executions + // 2 - speculative executions + // 3 - read-only executions + continue; + } + + // Execute all queued readonly requests (note that the queue is of finite length) + // This must be done in this loop because even though read-only executions do not alter consensus state, + // they still act temporarily on the static shared execution context. + for (req, resp_tx) in input_data.readonly_requests { + self.execute_readonly_request(req, resp_tx); + } + + // Peek into the input data to see if new input arrived during this iteration of the loop + let input_data = self + .controller + .input_data + .1 + .lock() + .expect("could not lock execution input data"); + if input_data.stop { + // there is a request to stop: quit the loop + break; + } + if input_data.blockclique_changed || !input_data.readonly_requests.is_empty() { + // there are blockclique updates or read-only requests: restart the loop + continue; + } + + // Here, we know that there is currently nothing to do for this worker + + // Compute when the next slot will be + // This is useful to wait for the next speculative miss to append to active slots. + let time_until_next_slot = self.get_time_until_next_active_slot(); + if time_until_next_slot == 0.into() { + // next slot is right now: simply restart the loop + continue; + } + + // Wait to be notified of new input, for at most time_until_next_slot + // Note: spurious wake-ups are not a problem: + // the next loop iteration will just do nohing and come back to wait here. + let _ = self + .controller + .input_data + .0 + .wait_timeout(input_data, time_until_next_slot.to_duration()) + .expect("Execution worker main loop condition variable wait failed"); + } + + // the execution worker is stopping: + // signal cancellation to all remaining read-only execution requests waiting for an MPSC response + let mut input_data = self + .controller + .input_data + .1 + .lock() + .expect("could not lock VM input data"); + for (_req, resp_tx) in input_data.readonly_requests.drain(..) { + if resp_tx + .send(Err(ExecutionError::RuntimeError( + "readonly execution cancelled because VM is closing".into(), + ))) + .is_err() + { + debug!("failed sending readonly request response: channel down"); + } + } + } +} + +/// Launches an execution worker thread and returns an ExecutionManager to interact with it +/// +/// # parameters +/// * config: execution config +/// * final_ledger: a reference to the final ledger for shared reading and exclusive writing +/// +/// # Returns +/// An instance of ExecutionManager allowing to stop the worker or generate ExecutionController instances, +/// which are used to send requests and notifications to the worker. +pub fn start_execution_worker( + config: ExecutionConfig, + final_ledger: Arc>, +) -> Box { + // create an execution state + let execution_state = Arc::new(RwLock::new(ExecutionState::new( + config.clone(), + final_ledger.clone(), + ))); + + // create a controller + let controller = ExecutionControllerImpl { + config: config.clone(), + input_data: Arc::new(( + Condvar::new(), + Mutex::new(VMInputData { + // ntify of a blockclique change to run one initialization loop itration + blockclique_changed: true, + ..Default::default() + }), + )), + execution_state: execution_state.clone(), + }; + + // launch the execution thread + let ctl = controller.clone(); + let thread_handle = std::thread::spawn(move || { + ExecutionThread::new(config, ctl, execution_state).main_loop(); + }); + + // return the execution manager + Box::new(ExecutionManagerImpl { + controller, + thread_handle: Some(thread_handle), + }) +} From d2f5a30f208a830beb74387ae3299c17daacd272 Mon Sep 17 00:00:00 2001 From: damip Date: Tue, 22 Feb 2022 14:48:14 +0100 Subject: [PATCH 37/73] first full documentation --- massa-execution-exports/src/lib.rs | 21 +- .../src/test_exports/mod.rs | 10 + massa-ledger/src/bootstrap.rs | 11 +- massa-ledger/src/config.rs | 2 + massa-ledger/src/error.rs | 2 + massa-ledger/src/ledger.rs | 103 +++++-- massa-ledger/src/ledger_changes.rs | 268 ++++++++++++++---- massa-ledger/src/ledger_entry.rs | 27 +- massa-ledger/src/lib.rs | 41 +++ massa-ledger/src/types.rs | 29 +- 10 files changed, 414 insertions(+), 100 deletions(-) diff --git a/massa-execution-exports/src/lib.rs b/massa-execution-exports/src/lib.rs index 3794cb73794..c2394241d3a 100644 --- a/massa-execution-exports/src/lib.rs +++ b/massa-execution-exports/src/lib.rs @@ -20,10 +20,27 @@ //! while ignoring all the changes it would cause to the consensus state (read-only execution), //! or reading the state at the output of the executed blockclique blocks. //! -//! # Test exports +//! # Architecture +//! +//! ## config.rs +//! Contains configuration parameters for the execution system. +//! +//! ## controller_traits.rs +//! Defines the ExecutionManager and ExecutionController traits for interacting with the execution worker. +//! +//! ## erorrs.rs +//! Defines error types for the crate. +//! +//! ## event_store.rs +//! Defines an indexed, finite-size storage system for execution events. +//! +//! ## types.rs +//! Defines useful shared structures. +//! +//! ## Test exports //! //! When the crate feature `testing` is enabled, tooling useful for testing purposes is exported. -//! See test_exports/ for details. +//! See test_exports/mod.rs for details. mod config; mod controller_traits; diff --git a/massa-execution-exports/src/test_exports/mod.rs b/massa-execution-exports/src/test_exports/mod.rs index d8b587eda2b..1dd974d43cb 100644 --- a/massa-execution-exports/src/test_exports/mod.rs +++ b/massa-execution-exports/src/test_exports/mod.rs @@ -2,6 +2,16 @@ //! This module exposes useful tooling for testing. //! It is only compiled and exported by the crate if the "testing" feature is enabled. +//! +//! +//! # Architecture +//! +//! ## config.rs +//! Provides a default execution configuration for testing. +//! +//! ## mock.rs +//! Provides a mock of ExecutionController to simulate interactions +//! with an execution worker within tests. mod config; mod mock; diff --git a/massa-ledger/src/bootstrap.rs b/massa-ledger/src/bootstrap.rs index d72e11ca620..3ec8070c74b 100644 --- a/massa-ledger/src/bootstrap.rs +++ b/massa-ledger/src/bootstrap.rs @@ -1,16 +1,17 @@ // Copyright (c) 2022 MASSA LABS -use std::collections::BTreeMap; +//! Provides serializable strucutres for bootstrapping the FinalLedger +use crate::LedgerEntry; use massa_models::{ array_from_slice, constants::ADDRESS_SIZE_BYTES, Address, DeserializeCompact, DeserializeVarInt, ModelsError, SerializeCompact, SerializeVarInt, Slot, }; use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; -use crate::LedgerEntry; - -/// temporary ledger bootstrap structure +/// Represents a snapshot of the ledger state, +/// which is enough to fully bootstrap a FinalLedger #[derive(Debug, Clone, Serialize, Deserialize)] pub struct FinalLedgerBootstrapState { /// ledger slot @@ -19,6 +20,7 @@ pub struct FinalLedgerBootstrapState { pub(crate) sorted_ledger: BTreeMap, } +/// Allows serializing the FinalLedgerBootstrapState to a compact binary representation impl SerializeCompact for FinalLedgerBootstrapState { fn to_bytes_compact(&self) -> Result, massa_models::ModelsError> { let mut res: Vec = Vec::new(); @@ -45,6 +47,7 @@ impl SerializeCompact for FinalLedgerBootstrapState { } } +/// Allows deserializing a FinalLedgerBootstrapState from its compact binary representation impl DeserializeCompact for FinalLedgerBootstrapState { fn from_bytes_compact(buffer: &[u8]) -> Result<(Self, usize), massa_models::ModelsError> { let mut cursor = 0usize; diff --git a/massa-ledger/src/config.rs b/massa-ledger/src/config.rs index 498ca245d11..9f654e1d296 100644 --- a/massa-ledger/src/config.rs +++ b/massa-ledger/src/config.rs @@ -1,5 +1,7 @@ // Copyright (c) 2022 MASSA LABS +//! This file defines a config strucutre containing all settings for the ledger system + use std::path::PathBuf; /// Ledger configuration diff --git a/massa-ledger/src/error.rs b/massa-ledger/src/error.rs index 17226bde2d0..5aaee3ec16e 100644 --- a/massa-ledger/src/error.rs +++ b/massa-ledger/src/error.rs @@ -1,5 +1,7 @@ // Copyright (c) 2022 MASSA LABS +//! This file defines all error types for the ledger system + use displaydoc::Display; use thiserror::Error; diff --git a/massa-ledger/src/ledger.rs b/massa-ledger/src/ledger.rs index 8a4ac718d1b..e63b7909214 100644 --- a/massa-ledger/src/ledger.rs +++ b/massa-ledger/src/ledger.rs @@ -1,5 +1,7 @@ // Copyright (c) 2022 MASSA LABS +//! This file defines the final ledger associating addresses to their balances, bytecode and data. + use crate::ledger_changes::LedgerChanges; use crate::ledger_entry::LedgerEntry; use crate::types::{Applicable, SetUpdateOrDelete}; @@ -8,40 +10,52 @@ use massa_hash::hash::Hash; use massa_models::{Address, Amount, Slot}; use std::collections::{BTreeMap, VecDeque}; -/// represents a final ledger +/// Represents a final ledger associating addresses to their balances, bytecode and data. +/// The final ledger is also attached to a final slot, can be boostrapped and allows others to bootstrap. +/// The ledger size can be very high: it can exceed 1TB. +/// To allow for storage on disk, the ledger uses trees and has `O(log(N))` access, insertion and deletion complexity. +/// +/// Note: currently the ledger is stored in RAM. TODO put it on the hard drive with cache. pub struct FinalLedger { /// ledger config config: LedgerConfig, - /// slot at which the final ledger is computed + /// slot at the output of which the final ledger is attached pub slot: Slot, - /// sorted ledger tree - /// TODO put it on the hard drive as it can reach 1TB + /// ledger tree, sorted by address sorted_ledger: BTreeMap, - /// history of recent final ledger changes + /// history of recent final ledger changes, useful for streaming bootstrap /// front = oldest, back = newest changes_history: VecDeque<(Slot, LedgerChanges)>, } +/// Allows applying LedgerChanges to the final ledger +/// +/// Warning: this does not push the changes in changes_history. +/// Always use FinalLedger::settle_slot to apply bootstrapable changes. impl Applicable for FinalLedger { - /// merges LedgerChanges to the final ledger fn apply(&mut self, changes: LedgerChanges) { // for all incoming changes for (addr, change) in changes.0 { match change { + // the incoming change sets a ledger entry to a new one SetUpdateOrDelete::Set(new_entry) => { - // inserts/overwrites the entry with an incoming absolute value + // inserts/overwrites the entry with the incoming one self.sorted_ledger.insert(addr, new_entry); } + + // the incoming change updates an existing ledger entry SetUpdateOrDelete::Update(entry_update) => { - // applies updates to an entry + // applies the updates to the entry // if the entry does not exist, inserts a default one and applies the updates to it self.sorted_ledger .entry(addr) .or_insert_with(|| Default::default()) .apply(entry_update); } + + // the incoming change deletes a ledger entry SetUpdateOrDelete::Delete => { - // deletes an entry, if it exists + // delete the entry, if it exists self.sorted_ledger.remove(&addr); } } @@ -49,6 +63,7 @@ impl Applicable for FinalLedger { } } +/// Macro used to shorten file error returns macro_rules! init_file_error { ($st:expr, $cfg:ident) => { |err| { @@ -65,9 +80,9 @@ macro_rules! init_file_error { pub(crate) use init_file_error; impl FinalLedger { - /// init from file + /// Initializes a new FinalLedger by reading its initial state from file. pub fn new(config: LedgerConfig) -> Result { - // load file + // load the ledger tree from file let sorted_ledger = serde_json::from_str::>( &std::fs::read_to_string(&config.initial_sce_ledger_path) .map_err(init_file_error!("loading", config))?, @@ -85,16 +100,25 @@ impl FinalLedger { }) .collect(); - // generate final ledger + // the initial ledger is attached to the output of the last genesis block + let slot = Slot::new(0, config.thread_count.saturating_sub(1)); + + // generate the final ledger Ok(FinalLedger { - slot: Slot::new(0, config.thread_count.saturating_sub(1)), + slot, sorted_ledger, changes_history: Default::default(), config, }) } - /// load from bootstrap + /// Intiialize a FinalLedger from a bootstrap state + /// + /// TODO: This loads the whole ledger in RAM. Switch to streaming in the future + /// + /// # Arguments + /// * config: ledger config + /// * state: bootstrap state pub fn from_bootstrap_state(config: LedgerConfig, state: FinalLedgerBootstrapState) -> Self { FinalLedger { slot: state.slot, @@ -104,7 +128,9 @@ impl FinalLedger { } } - /// get bootstrap state + /// Gets a snapshot of the ledger to bootstrap other nodes + /// + /// TODO: This loads the whole ledger in RAM. Switch to streaming in the future pub fn get_bootstrap_state(&self) -> FinalLedgerBootstrapState { FinalLedgerBootstrapState { slot: self.slot, @@ -112,17 +138,25 @@ impl FinalLedger { } } - /// gets a full cloned entry + /// Gets a copy of a full ledger entry. + /// + /// # Returns + /// A clone of the whole LedgerEntry, or None if not found. + /// + /// TODO: in the future, never manipulate full ledger entries because their datastore can be huge pub fn get_full_entry(&self, addr: &Address) -> Option { self.sorted_ledger.get(addr).cloned() } - /// settles a slot and saves the corresponding ledger changes to history + /// Applies changes to the ledger, pushes them to the bootstrap history, + /// and sets the ledger's attachment final slot. + /// After this is called, the final ledger is attached to the output of `slot` + /// and ready to bootstrap nodes with this new state. pub fn settle_slot(&mut self, slot: Slot, changes: LedgerChanges) { // apply changes self.apply(changes.clone()); - // update the slot + // update the attachment final slot self.slot = slot; // update and prune changes history @@ -132,29 +166,52 @@ impl FinalLedger { } } - /// gets the parallel balance of an entry + /// Gets the parallel balance of a ledger entry + /// + /// # Returns + /// The parallel balance, or None if the ledger entry was not found pub fn get_parallel_balance(&self, addr: &Address) -> Option { self.sorted_ledger.get(addr).map(|v| v.parallel_balance) } - /// gets a copy of the bytecode of an entry + /// Gets a copy of the bytecode of a ledger entry + /// + /// # Returns + /// A copy of the found bytecode, or None if the ledger entry was not found pub fn get_bytecode(&self, addr: &Address) -> Option> { self.sorted_ledger.get(addr).map(|v| v.bytecode.clone()) } - /// checks if an entry exists + /// Checks if a ledger entry exists + /// + /// # Returns + /// true if it exists, false otherwise. pub fn entry_exists(&self, addr: &Address) -> bool { self.sorted_ledger.contains_key(addr) } - /// gets a copy of a data entry + /// Gets a copy of the value of a datastore entry for a given address. + /// + /// # Arguments + /// * addr: target address + /// * key: datastore key + /// + /// # Returns + /// A copy of the datastore value, or None if the ledger entry or datastore entry was not found pub fn get_data_entry(&self, addr: &Address, key: &Hash) -> Option> { self.sorted_ledger .get(addr) .and_then(|v| v.datastore.get(key).cloned()) } - /// checks whether a data entry exists + /// Checks for the existence of a datastore entry for a given address. + /// + /// # Arguments + /// * addr: target address + /// * key: datastore key + /// + /// # Returns + /// true if the datastore entry was found, or false if the ledger entry or datastore entry was not found pub fn has_data_entry(&self, addr: &Address, key: &Hash) -> bool { self.sorted_ledger .get(addr) diff --git a/massa-ledger/src/ledger_changes.rs b/massa-ledger/src/ledger_changes.rs index 71e43e6990c..ee34a22438c 100644 --- a/massa-ledger/src/ledger_changes.rs +++ b/massa-ledger/src/ledger_changes.rs @@ -1,5 +1,7 @@ // Copyright (c) 2022 MASSA LABS +//! This file provides structures representing changes to ledger entries + use crate::ledger_entry::LedgerEntry; use crate::types::{Applicable, SetOrDelete, SetOrKeep, SetUpdateOrDelete}; use massa_hash::hash::Hash; @@ -9,9 +11,13 @@ use std::collections::hash_map; /// represents an update to one or more fields of a LedgerEntry #[derive(Default, Debug, Clone)] pub struct LedgerEntryUpdate { + /// change the number of rolls pub roll_count: SetOrKeep, + /// change the parallel balance pub parallel_balance: SetOrKeep, + /// change the executable bytecode pub bytecode: SetOrKeep>, + // change datastore entries pub datastore: Map>>, } @@ -25,7 +31,7 @@ impl Applicable for LedgerEntryUpdate { } } -/// represents a list of changes to ledger entries +/// represents a list of changes to multiple ledger entries #[derive(Default, Debug, Clone)] pub struct LedgerChanges(pub Map>); @@ -48,7 +54,7 @@ impl Applicable for LedgerChanges { } impl LedgerChanges { - /// get an item + /// get an item from the LedgerChanges pub fn get( &self, addr: &Address, @@ -56,94 +62,162 @@ impl LedgerChanges { self.0.get(addr) } - /// tries to return the parallel balance or gets it from a function + /// Tries to return the parallel balance of an entry + /// or gets it from a function if the entry's status is unknown. + /// + /// This function is used as an optimization: + /// if the value can be deduced unambiguously from the LedgerChanges, + /// no need to dig further (for example in the FinalLedger). + /// + /// # Arguments + /// * addr: address for which to get the value + /// * f: fallback function with no arguments and returning Option /// /// # Returns - /// * Some(v) if a value is present + /// * Some(v) if a value is present, where v is a copy of the value /// * None if the value is absent /// * f() if the value is unknown - /// - /// this is used as an optimization: - /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further pub fn get_parallel_balance_or_else Option>( &self, addr: &Address, f: F, ) -> Option { + // Get the changes for the provided address match self.0.get(addr) { + // This entry is being replaced by a new one: get the balance from the new entry Some(SetUpdateOrDelete::Set(v)) => Some(v.parallel_balance), + + // This entry is being updated Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { parallel_balance, .. })) => match parallel_balance { + // The update sets a new balance: return it SetOrKeep::Set(v) => Some(*v), + // The update keeps the old balance. + // We therefore have no info on the absolute value of the balance. + // We call the fallback function and return its output. SetOrKeep::Keep => f(), }, + + // This entry is being deleted: return None. Some(SetUpdateOrDelete::Delete) => None, + + // This entry is not being changed. + // We therefore have no info on the absolute value of the balance. + // We call the fallback function and return its output. None => f(), } } - /// tries to return the bytecode or gets it from a function + /// Tries to return the executable bytecode of an entry + /// or gets it from a function if the entry's status is unknown. + /// + /// This function is used as an optimization: + /// if the value can be deduced unambiguously from the LedgerChanges, + /// no need to dig further (for example in the FinalLedger). + /// + /// # Arguments + /// * addr: address for which to get the value + /// * f: fallback function with no arguments and returning Option> /// /// # Returns - /// * Some(v) if a value is present + /// * Some(v) if a value is present, where v is a copy of the value /// * None if the value is absent /// * f() if the value is unknown - /// - /// this is used as an optimization: - /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further pub fn get_bytecode_or_else Option>>( &self, addr: &Address, f: F, ) -> Option> { + // Get the changes to the provided address match self.0.get(addr) { + // This entry is being replaced by a new one: get the bytecode from the new entry Some(SetUpdateOrDelete::Set(v)) => Some(v.bytecode.clone()), + + // This entry is being updated Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { bytecode, .. })) => match bytecode { + // The update sets a new bytecode: return it SetOrKeep::Set(v) => Some(v.clone()), + + // The update keeps the old bytecode. + // We therefore have no info on the absolute value of the bytecode. + // We call the fallback function and return its output. SetOrKeep::Keep => f(), }, + + // This entry is being deleted: return None. Some(SetUpdateOrDelete::Delete) => None, + + // This entry is not being changed. + // We therefore have no info on the absolute contents of the bytecode. + // We call the fallback function and return its output. None => f(), } } - /// tries to return whether an entry exists or gets it from a function + /// Tries to return whether an entry exists + /// or gets the information from a function if the entry's status is unknown. /// - /// # Returns - /// * true if a entry is present - /// * false if the entry is absent - /// * f() if the existence of the value is unknown + /// This function is used as an optimization: + /// if the result can be deduced unambiguously from the LedgerChanges, + /// no need to dig further (for example in the FinalLedger). /// - /// this is used as an optimization: - /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further + /// # Arguments + /// * addr: address to search for + /// * f: fallback function with no arguments and returning bool + /// + /// # Returns + /// * true if the entry exists + /// * false if the value is absent + /// * f() if the value's existence is unknown pub fn entry_exists_or_else bool>(&self, addr: &Address, f: F) -> bool { + // Get the changes for the provided address match self.0.get(addr) { + // The entry is being replaced by a new one: it exists Some(SetUpdateOrDelete::Set(_)) => true, + + // The entry is being updated: + // assume it exists because it will be created on update if it doesn't Some(SetUpdateOrDelete::Update(_)) => true, + + // The entry is being deleted: it doesn't exist anymore Some(SetUpdateOrDelete::Delete) => false, + + // This entry is not being changed. + // We therefore have no info on its existence. + // We call the fallback function and return its output. None => f(), } } - /// set the parallel balance of an address + /// Set the parallel balance of an address. + /// If the address doesn't exist, its ledger entry is created. + /// + /// # Arguments + /// * addr: target address + /// * balance: parallel balance to set for the provided address pub fn set_parallel_balance(&mut self, addr: Address, balance: Amount) { + // Get the changes for the entry associated to the provided address match self.0.entry(addr) { + // That entry is being changed hash_map::Entry::Occupied(mut occ) => { match occ.get_mut() { + // The entry is being replaced by a new one SetUpdateOrDelete::Set(v) => { - // we currently set the absolute value of the entry - // so we need to update the parallel_balance of that value + // update the parallel_balance of the replacement entry v.parallel_balance = balance; } + + // The entry is being updated SetUpdateOrDelete::Update(u) => { - // we currently update the value of the entry - // so we need to set the parallel_balance for that update + // Make sure the update sets the paralell balance of the entry to its new value u.parallel_balance = SetOrKeep::Set(balance); } + + // The entry is being deleted d @ SetUpdateOrDelete::Delete => { - // we currently delete the entry - // so we need to create a default one with the target balance + // Replace that deletion with a replacement by a new default entry + // for which the parallel balance was properly set *d = SetUpdateOrDelete::Set(LedgerEntry { parallel_balance: balance, ..Default::default() @@ -151,9 +225,10 @@ impl LedgerChanges { } } } + + // This entry is not being changed hash_map::Entry::Vacant(vac) => { - // we currently aren't changing anything on that entry - // so we need to create an update with the target balance + // Induce an Update to the entry that sets the balance to its new value vac.insert(SetUpdateOrDelete::Update(LedgerEntryUpdate { parallel_balance: SetOrKeep::Set(balance), ..Default::default() @@ -162,24 +237,34 @@ impl LedgerChanges { } } - /// set the parallel balance of an address + /// Set the executable bytecode of an address. + /// If the address doesn't exist, its ledger entry is created. + /// + /// # Parameters + /// * addr: target address + /// * bytecode: executable bytecode to assign to that address pub fn set_bytecode(&mut self, addr: Address, bytecode: Vec) { + // Get the current changes being applied to the entry associated to that address match self.0.entry(addr) { + // There are changes currently being applied to the entry hash_map::Entry::Occupied(mut occ) => { match occ.get_mut() { + // The entry is being replaced by a new one SetUpdateOrDelete::Set(v) => { - // we currently set the absolute value of the entry - // so we need to update the bytecode of that value + // update the bytecode of the replacement entry v.bytecode = bytecode; } + + // The entry is being updated SetUpdateOrDelete::Update(u) => { - // we currently update the value of the entry - // so we need to set the bytecode for that update + // Ensure that the update includes setting the bytecode to its new value u.bytecode = SetOrKeep::Set(bytecode); } + + // The entry is being deleted d @ SetUpdateOrDelete::Delete => { - // we currently delete the entry - // so we need to create a default one with the target bytecode + // Replace that deletion with a replacement by a new default entry + // for which the bytecode was properly set *d = SetUpdateOrDelete::Set(LedgerEntry { bytecode, ..Default::default() @@ -187,9 +272,10 @@ impl LedgerChanges { } } } + + // This entry is not being changed hash_map::Entry::Vacant(vac) => { - // we currently aren't changing anything on that entry - // so we need to create an update with the target bytecode + // Induce an Update to the entry that sets the bytecode to its new value vac.insert(SetUpdateOrDelete::Update(LedgerEntryUpdate { bytecode: SetOrKeep::Set(bytecode), ..Default::default() @@ -198,82 +284,147 @@ impl LedgerChanges { } } - /// tries to return a data entry + /// Tries to return a datastore entry for a given address, + /// or gets it from a function if the value's status is unknown. + /// + /// This function is used as an optimization: + /// if the result can be deduced unambiguously from the LedgerChanges, + /// no need to dig further (for example in the FinalLedger). + /// + /// # Arguments + /// * addr: target address + /// * key: datastore key + /// * f: fallback function with no arguments and returning Option> /// /// # Returns - /// * Some(v) if a value is present + /// * Some(v) if the value was found, where v is a copy of the value /// * None if the value is absent /// * f() if the value is unknown - /// - /// this is used as an optimization: - /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further pub fn get_data_entry_or_else Option>>( &self, addr: &Address, key: &Hash, f: F, ) -> Option> { + // Get the current changes being applied to the ledger entry associated to that address match self.0.get(addr) { + // This ledger entry is being replaced by a new one: + // get the datastore entry from the new ledger entry Some(SetUpdateOrDelete::Set(v)) => v.datastore.get(key).cloned(), + + // This ledger entry is being updated Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { datastore, .. })) => { + // Get the update being applied to that datastore entry match datastore.get(key) { + // A new datastore value is being set: return a clone of it Some(SetOrDelete::Set(v)) => Some(v.clone()), + + // This datastore entry is being deleted: return None Some(SetOrDelete::Delete) => None, + + // There are no changes to this particular datastore entry. + // We therefore have no info on the absolute contents of the datastore entry. + // We call the fallback function and return its output. None => f(), } } + + // This ledger entry is being deleted: return None Some(SetUpdateOrDelete::Delete) => None, + + // This ledger entry is not being changed. + // We therefore have no info on the absolute contents of its datastore entry. + // We call the fallback function and return its output. None => f(), } } - /// tries to return whether a data entry exists + /// Tries to return wherther a datastore entry exists for a given address, + /// or gets it from a function if the datastore entry's status is unknown. /// - /// # Returns - /// * true if it does - /// * false if it does not - /// * f() if its existance is unknown + /// This function is used as an optimization: + /// if the result can be deduced unambiguously from the LedgerChanges, + /// no need to dig further (for example in the FinalLedger). + /// + /// # Arguments + /// * addr: target address + /// * key: datastore key + /// * f: fallback function with no arguments and returning bool /// - /// this is used as an optimization: - /// if the value can be deduced unambiguously from the LedgerChanges, no need to dig further + /// # Returns + /// * true if the ledger entry exists and the key is present in its datastore + /// * false if the ledger entry is absent, or if the key is not in its datastore + /// * f() if the existence of the ledger entry or datastore entry is unknown pub fn has_data_entry_or_else bool>( &self, addr: &Address, key: &Hash, f: F, ) -> bool { + // Get the current changes being applied to the ledger entry associated to that address match self.0.get(addr) { + // This ledger entry is being replaced by a new one: + // check if the replacement ledger entry has the key in its datastore Some(SetUpdateOrDelete::Set(v)) => v.datastore.contains_key(key), + + // This ledger entry is being updated Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { datastore, .. })) => { + // Get the update being applied to that datastore entry match datastore.get(key) { + // A new datastore value is being set: the datastore entry exists Some(SetOrDelete::Set(_)) => true, + + // The datastore entry is being deletes: it doesn't exist anymore Some(SetOrDelete::Delete) => false, + + // There are no changes to this particular datastore entry. + // We therefore have no info on its existence. + // We call the fallback function and return its output. None => f(), } } + + // This ledger entry is being deleted: it has no datastore anymore Some(SetUpdateOrDelete::Delete) => false, + + // This ledger entry is not being changed. + // We therefore have no info on its datastore. + // We call the fallback function and return its output. None => f(), } } - /// set a datastore entry for an address + /// Set a datastore entry for a given address. + /// If the address doesn't exist, its ledger entry is created. + /// If the datasotre entry exists, its value is replaced, otherwise it is created. + /// + /// # Arguments + /// * addr: target address + /// * key: datastore key + /// * data: datastore value to set pub fn set_data_entry(&mut self, addr: Address, key: Hash, data: Vec) { + // Get the changes being applied to the ledgr entry associated to that address match self.0.entry(addr) { + // There are changes currently being applied to the ledger entry hash_map::Entry::Occupied(mut occ) => { match occ.get_mut() { + // The ledger entry is being replaced by a new one SetUpdateOrDelete::Set(v) => { - // we currently set the absolute value of the entry - // so we need to update the data of that value + // Insert the value in the datastore of the replacement entry + // Any existing value is overwritten v.datastore.insert(key, data); } + + // The ledger entry is being updated SetUpdateOrDelete::Update(u) => { - // we currently update the value of the entry - // so we need to set the data for that update + // Ensure that the update includes setting the datastore entry u.datastore.insert(key, SetOrDelete::Set(data)); } + + // The ledger entry is being deleted d @ SetUpdateOrDelete::Delete => { - // we currently delete the entry - // so we need to create a default one with the target data + // Replace that ledger entry deletion with a replacement by a new default ledger entry + // for which the datastore contains the (key, value) to insert. *d = SetUpdateOrDelete::Set(LedgerEntry { datastore: vec![(key, data)].into_iter().collect(), ..Default::default() @@ -281,9 +432,10 @@ impl LedgerChanges { } } } + + // This ledger entry is not being changed hash_map::Entry::Vacant(vac) => { - // we currently aren't changing anything on that entry - // so we need to create an update with the target data + // Induce an Update to the ledger entry that sets the datastore entry to the desired value vac.insert(SetUpdateOrDelete::Update(LedgerEntryUpdate { datastore: vec![(key, SetOrDelete::Set(data))].into_iter().collect(), ..Default::default() diff --git a/massa-ledger/src/ledger_entry.rs b/massa-ledger/src/ledger_entry.rs index 9e6ae83976d..ac2a5299a69 100644 --- a/massa-ledger/src/ledger_entry.rs +++ b/massa-ledger/src/ledger_entry.rs @@ -1,5 +1,7 @@ // Copyright (c) 2022 MASSA LABS +//! This file defines the structure representing an entry in the FinalLedger + use crate::ledger_changes::LedgerEntryUpdate; use crate::types::{Applicable, SetOrDelete}; use massa_hash::hash::Hash; @@ -9,26 +11,42 @@ use massa_models::{DeserializeCompact, SerializeCompact}; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; -/// structure defining a ledger entry +/// Structure defining an entry associated to an address in the FinalLedger #[derive(Default, Debug, Clone, Serialize, Deserialize)] pub struct LedgerEntry { + /// The parallel balance of that entry. + /// See lib.rs for an explanation on parallel vs sequential balances. pub parallel_balance: Amount, + + /// Executable bytecode pub bytecode: Vec, + + /// A key-value store associating a hash to arbitrary bytes pub datastore: BTreeMap>, } -/// LedgerEntryUpdate can be applied to a LedgerEntry +/// A LedgerEntryUpdate can be applied to a LedgerEntry impl Applicable for LedgerEntry { - /// applies a LedgerEntryUpdate fn apply(&mut self, update: LedgerEntryUpdate) { + // apply updates to the parallel balance update.parallel_balance.apply_to(&mut self.parallel_balance); + + // apply updates to the executable bytecode update.bytecode.apply_to(&mut self.bytecode); + + // iterate over all datastore updates for (key, value_update) in update.datastore { match value_update { + // this update sets a new value to a datastore entry SetOrDelete::Set(v) => { + // insert the new value in the datastore, + // replacing any existing value self.datastore.insert(key, v); } + + // this update deletes a datastore entry SetOrDelete::Delete => { + // remove that entry from the datastore if it exists self.datastore.remove(&key); } } @@ -36,7 +54,7 @@ impl Applicable for LedgerEntry { } } -/// serialize as compact binary +/// Allow serializing the LedgerEntry into a compact binary representation impl SerializeCompact for LedgerEntry { fn to_bytes_compact(&self) -> Result, massa_models::ModelsError> { let mut res: Vec = Vec::new(); @@ -78,6 +96,7 @@ impl SerializeCompact for LedgerEntry { } } +/// Allow deserializing a LedgerEntry from its compact binary representation impl DeserializeCompact for LedgerEntry { fn from_bytes_compact(buffer: &[u8]) -> Result<(Self, usize), massa_models::ModelsError> { let mut cursor = 0usize; diff --git a/massa-ledger/src/lib.rs b/massa-ledger/src/lib.rs index 48b2f66a9ae..abcd825d7d1 100644 --- a/massa-ledger/src/lib.rs +++ b/massa-ledger/src/lib.rs @@ -1,5 +1,46 @@ // Copyright (c) 2022 MASSA LABS +//! # General description +//! +//! This crate implements a ledger matching addresses to balances, executable bytecode and data. +//! It also provides tools to manipulate ledger entries. +//! +//! FinalLedger representing a ledger at a given slot that was executed as final +//! (see the massa-execution-worker crate for details on execution). +//! Only the execution worker writes into the final ledger. +//! +//! # A note on parallel vs sequential balance +//! +//! The distinctions between the parallel and the sequential balance of a ledger entry are the following: +//! * the parallel balance can be credited or spent in any slot +//! * the sequential balance can be credited in any slot but only spent in slots form the address' thread +//! * block produers are credited fees from the sequential balance, +//! and they can ensure that this balance will be available for their block simply +//! by looking for sequential balance spendings within the block's thread. +//! +//! # Architecture +//! +//! ## ledger.rs +//! Defines the FinalLedger that matches an address to a LedgerEntry (see ledger_entry.rs), +//! and can be manipulated using LedgerChanges (see ledger_changes.rs). +//! The FinalLedger is bootstrapped using tooling available in bootstrap.rs +//! +//! ## ledger_entry.rs +//! Represents an entry in the ledger for a given address. +//! It contains balances, executable bytecode and an arbitrary datastore. +//! +//! ## ledger_changes.rs +//! Represents a list of changes to ledger entries that +//! can be modified, combined or applied to the final ledger. +//! +//! ## bootstrap.rs +//! Provides serializable strucutres and tools for bootstrapping the final ledger. +//! +//! ## Test exports +//! +//! When the crate feature `testing` is enabled, tooling useful for testing purposes is exported. +//! See test_exports/mod.rs for details. + #![feature(map_first_last)] #![feature(async_closure)] diff --git a/massa-ledger/src/types.rs b/massa-ledger/src/types.rs index 0af828b3feb..cd386577fab 100644 --- a/massa-ledger/src/types.rs +++ b/massa-ledger/src/types.rs @@ -1,22 +1,30 @@ // Copyright (c) 2022 MASSA LABS -/// represents a structure that supports another one being applied to it +//! Provides various tools to manipulate ledger entries and changes happening on them. + +/// Trait marking a structure that supports another one (V) being applied to it pub trait Applicable { fn apply(&mut self, _: V); } -/// represents a set/update/delete change +/// Enum representing set/update/delete change on a value T #[derive(Debug, Clone)] pub enum SetUpdateOrDelete, V: Applicable + Clone> { - /// sets a new absolute value T + /// Sets the value T a new absolute value T Set(T), - /// applies an update V to an existing value + + /// Applies an update V to an existing value T. + /// If the value T doesn't exist: + /// a `new_t = T::default()` is created, + /// the update V is applied to it, + /// and the enum is changed to `SetUpdateOrDelete::Set(new_t)` Update(V), - /// deletes a value + + /// Deletes the value T Delete, } -/// supports applying another SetUpdateOrDelete to self +/// support applying another SetUpdateOrDelete to self impl, V: Applicable> Applicable> for SetUpdateOrDelete where @@ -50,12 +58,13 @@ where } } -/// represents a set/delete change +/// Enum representing a set/delete change on a value T #[derive(Debug, Clone)] pub enum SetOrDelete { /// sets a new absolute value T Set(T), - /// deletes a value + + /// deletes the value Delete, } @@ -71,6 +80,7 @@ impl Applicable> for SetOrDelete { pub enum SetOrKeep { /// sets a new absolute value T Set(T), + /// keeps the existing value Keep, } @@ -86,7 +96,7 @@ impl Applicable> for SetOrKeep { } impl SetOrKeep { - /// applies the current SetOrKeep into a target mutable value + /// applies the current SetOrKeep to a target mutable value pub fn apply_to(self, val: &mut T) { if let SetOrKeep::Set(v) = self { // only change the value if self is setting a new one @@ -95,6 +105,7 @@ impl SetOrKeep { } } +/// By default, SetOrKeep keeps the existing value impl Default for SetOrKeep { fn default() -> Self { SetOrKeep::Keep From e36fc7e7b04d008f4ef950bc9a94cc21acd95330 Mon Sep 17 00:00:00 2001 From: damip Date: Tue, 22 Feb 2022 15:28:03 +0100 Subject: [PATCH 38/73] clippy lints --- massa-bootstrap/src/lib.rs | 4 +-- massa-client/src/rpc.rs | 4 +-- massa-execution-worker/src/context.rs | 26 +++++++------------ massa-execution-worker/src/controller.rs | 2 +- massa-execution-worker/src/execution.rs | 6 ++--- massa-execution-worker/src/interface_impl.rs | 5 ++-- .../src/speculative_ledger.rs | 23 ++++++++-------- massa-execution-worker/src/worker.rs | 8 +++--- massa-ledger/src/bootstrap.rs | 6 +---- massa-ledger/src/ledger.rs | 2 +- 10 files changed, 35 insertions(+), 51 deletions(-) diff --git a/massa-bootstrap/src/lib.rs b/massa-bootstrap/src/lib.rs index 4dd6ab70dcd..be0ae6e795a 100644 --- a/massa-bootstrap/src/lib.rs +++ b/massa-bootstrap/src/lib.rs @@ -501,9 +501,7 @@ async fn manage_bootstrap( // Fourth, send ledger state send_state_timeout( write_timeout, - server.send(messages::BootstrapMessage::FinalLedgerState { - ledger_state: ledger_state, - }), + server.send(messages::BootstrapMessage::FinalLedgerState { ledger_state }), "bootstrap ledger state send timed out", ) .await diff --git a/massa-client/src/rpc.rs b/massa-client/src/rpc.rs index 1fe2d0ca247..c38b234916b 100644 --- a/massa-client/src/rpc.rs +++ b/massa-client/src/rpc.rs @@ -124,9 +124,7 @@ impl RpcClient { ) .await? .pop() - .ok_or(RpcError::Client( - "missing return value on execute_read_only_request".into(), - )) + .ok_or_else(|| RpcError::Client("missing return value on execute_read_only_request".into())) } //////////////// diff --git a/massa-execution-worker/src/context.rs b/massa-execution-worker/src/context.rs index b0fa5572ff4..961231c6816 100644 --- a/massa-execution-worker/src/context.rs +++ b/massa-execution-worker/src/context.rs @@ -241,11 +241,9 @@ impl ExecutionContext { pub fn get_current_address(&self) -> Result { match self.stack.last() { Some(addr) => Ok(addr.address), - _ => { - return Err(ExecutionError::RuntimeError( - "failed to read current address: call stack empty".into(), - )) - } + _ => Err(ExecutionError::RuntimeError( + "failed to read current address: call stack empty".into(), + )), } } @@ -254,11 +252,9 @@ impl ExecutionContext { pub fn get_current_owned_addresses(&self) -> Result, ExecutionError> { match self.stack.last() { Some(v) => Ok(v.owned_addresses.clone()), - None => { - return Err(ExecutionError::RuntimeError( - "failed to read current owned addresses list: call stack empty".into(), - )) - } + None => Err(ExecutionError::RuntimeError( + "failed to read current owned addresses list: call stack empty".into(), + )), } } @@ -266,11 +262,9 @@ impl ExecutionContext { pub fn get_current_call_coins(&self) -> Result { match self.stack.last() { Some(v) => Ok(v.coins), - None => { - return Err(ExecutionError::RuntimeError( - "failed to read current call coins: call stack empty".into(), - )) - } + None => Err(ExecutionError::RuntimeError( + "failed to read current call coins: call stack empty".into(), + )), } } @@ -283,7 +277,7 @@ impl ExecutionContext { pub fn has_write_rights_on(&self, addr: &Address) -> bool { self.stack .last() - .map_or(false, |v| v.owned_addresses.contains(&addr)) + .map_or(false, |v| v.owned_addresses.contains(addr)) } /// Creates a new smart contract address with initial bytecode, and returns this address diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index 11e1cd78eb4..aaab0d8937f 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -156,7 +156,7 @@ impl ExecutionController for ExecutionControllerImpl { // wait for the result of the execution match resp_rx.recv() { - Ok(result) => return result, + Ok(result) => result, Err(err) => { return Err(ExecutionError::RuntimeError(format!( "the VM input channel failed: {}", diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 7e8f679c5d9..d137e05cc61 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -447,7 +447,7 @@ impl ExecutionState { .cloned(); let active_entry = match (&final_entry, active_change) { (final_v, None) => final_v.clone(), - (_, Some(SetUpdateOrDelete::Set(v))) => Some(v.clone()), + (_, Some(SetUpdateOrDelete::Set(v))) => Some(v), (_, Some(SetUpdateOrDelete::Delete)) => None, (None, Some(SetUpdateOrDelete::Update(u))) => { let mut v = LedgerEntry::default(); @@ -479,8 +479,8 @@ impl ExecutionState { original_operation_id: Option, ) -> Vec { // iter on step history chained with final events - let start = start.unwrap_or_else(|| Slot::min()); - let end = end.unwrap_or_else(|| Slot::max()); + let start = start.unwrap_or_else(Slot::min); + let end = end.unwrap_or_else(Slot::max); self.final_events .get_filtered_sc_output_event( start, diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs index 5436c1c5a1f..6727b8026f7 100644 --- a/massa-execution-worker/src/interface_impl.rs +++ b/massa-execution-worker/src/interface_impl.rs @@ -10,7 +10,6 @@ use anyhow::{bail, Result}; use massa_execution_exports::ExecutionConfig; use massa_execution_exports::ExecutionStackElement; use massa_hash::hash::Hash; -use massa_models::Amount; use massa_models::{ output_event::{EventExecutionContext, SCOutputEvent, SCOutputEventId}, timeslots::get_block_slot_timestamp, @@ -146,7 +145,7 @@ impl Interface for InterfaceImpl { let address = context.get_current_address()?; Ok(context .get_parallel_balance(&address) - .unwrap_or(Amount::default()) + .unwrap_or_default() .to_raw()) } @@ -162,7 +161,7 @@ impl Interface for InterfaceImpl { let address = massa_models::Address::from_str(address)?; Ok(context_guard!(self) .get_parallel_balance(&address) - .unwrap_or(Amount::default()) + .unwrap_or_default() .to_raw()) } diff --git a/massa-execution-worker/src/speculative_ledger.rs b/massa-execution-worker/src/speculative_ledger.rs index db20ce23b25..1694cddb0c8 100644 --- a/massa-execution-worker/src/speculative_ledger.rs +++ b/massa-execution-worker/src/speculative_ledger.rs @@ -123,13 +123,11 @@ impl SpeculativeLedger { if let Some(from_addr) = from_addr { let new_balance = self .get_parallel_balance(&from_addr) - .ok_or(ExecutionError::RuntimeError( - "source address not found".into(), - ))? + .ok_or_else(|| ExecutionError::RuntimeError("source address not found".into()))? .checked_sub(amount) - .ok_or(ExecutionError::RuntimeError( - "unsufficient from_addr balance".into(), - ))?; + .ok_or_else(|| { + ExecutionError::RuntimeError("unsufficient from_addr balance".into()) + })?; changes.set_parallel_balance(from_addr, new_balance); } @@ -138,11 +136,11 @@ impl SpeculativeLedger { if let Some(to_addr) = to_addr { let new_balance = changes .get_parallel_balance_or_else(&to_addr, || self.get_parallel_balance(&to_addr)) - .unwrap_or(Amount::default()) + .unwrap_or_default() .checked_add(amount) - .ok_or(ExecutionError::RuntimeError( - "overflow in to_addr balance".into(), - ))?; + .ok_or_else(|| { + ExecutionError::RuntimeError("overflow in to_addr balance".into()) + })?; changes.set_parallel_balance(to_addr, new_balance); } @@ -182,7 +180,8 @@ impl SpeculativeLedger { bytecode: Vec, ) -> Result<(), ExecutionError> { // set bytecode (create if non-existant) - Ok(self.added_changes.set_bytecode(addr, bytecode)) + self.added_changes.set_bytecode(addr, bytecode); + Ok(()) } /// Sets the bytecode associated to an address in the ledger. @@ -262,7 +261,7 @@ impl SpeculativeLedger { data: Vec, ) -> Result<(), ExecutionError> { // check for address existence - if !self.entry_exists(&addr) { + if !self.entry_exists(addr) { return Err(ExecutionError::RuntimeError(format!( "could not set data for address {}: entry does not exist", addr diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index 2a76e2c295a..86bc3987c66 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -280,7 +280,7 @@ impl ExecutionThread { // apply execution output to final state exec_state.apply_final_execution_output(exec_out); - return true; + true } /// executes one active slot, if any @@ -310,7 +310,7 @@ impl ExecutionThread { // apply execution output to active state exec_state.apply_active_execution_output(exec_out); - return true; + true } /// Gets the time from now() to the slot just after next last_active_slot. @@ -472,7 +472,7 @@ impl ExecutionThread { // Wait to be notified of new input, for at most time_until_next_slot // Note: spurious wake-ups are not a problem: // the next loop iteration will just do nohing and come back to wait here. - let _ = self + let (_lock, _timeout_result) = self .controller .input_data .0 @@ -517,7 +517,7 @@ pub fn start_execution_worker( // create an execution state let execution_state = Arc::new(RwLock::new(ExecutionState::new( config.clone(), - final_ledger.clone(), + final_ledger, ))); // create a controller diff --git a/massa-ledger/src/bootstrap.rs b/massa-ledger/src/bootstrap.rs index 3ec8070c74b..e5e92a8ce41 100644 --- a/massa-ledger/src/bootstrap.rs +++ b/massa-ledger/src/bootstrap.rs @@ -57,11 +57,7 @@ impl DeserializeCompact for FinalLedgerBootstrapState { cursor += delta; // ledger size - let (ledger_size, delta) = u64::from_varint_bytes(&buffer[cursor..])? - .try_into() - .map_err(|_| { - ModelsError::SerializeError("could not convert ledger size to usize".into()) - })?; + let (ledger_size, delta) = u64::from_varint_bytes(&buffer[cursor..])?; // TODO cap the ledger size cursor += delta; diff --git a/massa-ledger/src/ledger.rs b/massa-ledger/src/ledger.rs index e63b7909214..43394c4d6f7 100644 --- a/massa-ledger/src/ledger.rs +++ b/massa-ledger/src/ledger.rs @@ -49,7 +49,7 @@ impl Applicable for FinalLedger { // if the entry does not exist, inserts a default one and applies the updates to it self.sorted_ledger .entry(addr) - .or_insert_with(|| Default::default()) + .or_insert_with(Default::default) .apply(entry_update); } From 934feee2bace041c31ade67088aade3c46c98e55 Mon Sep 17 00:00:00 2001 From: damip Date: Tue, 22 Feb 2022 20:22:34 +0100 Subject: [PATCH 39/73] remove indentations confusing cargo --- massa-ledger/src/ledger_changes.rs | 54 +++++++++++++++--------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/massa-ledger/src/ledger_changes.rs b/massa-ledger/src/ledger_changes.rs index ee34a22438c..48cd6e32319 100644 --- a/massa-ledger/src/ledger_changes.rs +++ b/massa-ledger/src/ledger_changes.rs @@ -70,13 +70,13 @@ impl LedgerChanges { /// no need to dig further (for example in the FinalLedger). /// /// # Arguments - /// * addr: address for which to get the value - /// * f: fallback function with no arguments and returning Option + /// * addr: address for which to get the value + /// * f: fallback function with no arguments and returning Option /// /// # Returns - /// * Some(v) if a value is present, where v is a copy of the value - /// * None if the value is absent - /// * f() if the value is unknown + /// * Some(v) if a value is present, where v is a copy of the value + /// * None if the value is absent + /// * f() if the value is unknown pub fn get_parallel_balance_or_else Option>( &self, addr: &Address, @@ -117,13 +117,13 @@ impl LedgerChanges { /// no need to dig further (for example in the FinalLedger). /// /// # Arguments - /// * addr: address for which to get the value - /// * f: fallback function with no arguments and returning Option> + /// * addr: address for which to get the value + /// * f: fallback function with no arguments and returning Option> /// /// # Returns - /// * Some(v) if a value is present, where v is a copy of the value - /// * None if the value is absent - /// * f() if the value is unknown + /// * Some(v) if a value is present, where v is a copy of the value + /// * None if the value is absent + /// * f() if the value is unknown pub fn get_bytecode_or_else Option>>( &self, addr: &Address, @@ -163,13 +163,13 @@ impl LedgerChanges { /// no need to dig further (for example in the FinalLedger). /// /// # Arguments - /// * addr: address to search for - /// * f: fallback function with no arguments and returning bool + /// * addr: address to search for + /// * f: fallback function with no arguments and returning bool /// /// # Returns - /// * true if the entry exists - /// * false if the value is absent - /// * f() if the value's existence is unknown + /// * true if the entry exists + /// * false if the value is absent + /// * f() if the value's existence is unknown pub fn entry_exists_or_else bool>(&self, addr: &Address, f: F) -> bool { // Get the changes for the provided address match self.0.get(addr) { @@ -292,14 +292,14 @@ impl LedgerChanges { /// no need to dig further (for example in the FinalLedger). /// /// # Arguments - /// * addr: target address - /// * key: datastore key - /// * f: fallback function with no arguments and returning Option> + /// * addr: target address + /// * key: datastore key + /// * f: fallback function with no arguments and returning Option> /// /// # Returns - /// * Some(v) if the value was found, where v is a copy of the value - /// * None if the value is absent - /// * f() if the value is unknown + /// * Some(v) if the value was found, where v is a copy of the value + /// * None if the value is absent + /// * f() if the value is unknown pub fn get_data_entry_or_else Option>>( &self, addr: &Address, @@ -347,14 +347,14 @@ impl LedgerChanges { /// no need to dig further (for example in the FinalLedger). /// /// # Arguments - /// * addr: target address - /// * key: datastore key - /// * f: fallback function with no arguments and returning bool + /// * addr: target address + /// * key: datastore key + /// * f: fallback function with no arguments and returning bool /// /// # Returns - /// * true if the ledger entry exists and the key is present in its datastore - /// * false if the ledger entry is absent, or if the key is not in its datastore - /// * f() if the existence of the ledger entry or datastore entry is unknown + /// * true if the ledger entry exists and the key is present in its datastore + /// * false if the ledger entry is absent, or if the key is not in its datastore + /// * f() if the existence of the ledger entry or datastore entry is unknown pub fn has_data_entry_or_else bool>( &self, addr: &Address, From 1dbbe66b95ada35a939e4f5232f7dd6524a3bb2f Mon Sep 17 00:00:00 2001 From: damip Date: Wed, 23 Feb 2022 10:44:21 +0100 Subject: [PATCH 40/73] minimiz lock time --- massa-execution-worker/src/interface_impl.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs index 6727b8026f7..013ce670676 100644 --- a/massa-execution-worker/src/interface_impl.rs +++ b/massa-execution-worker/src/interface_impl.rs @@ -235,8 +235,8 @@ impl Interface for InterfaceImpl { /// # Returns /// The datastore value matching the provided key, if found, otherwise an error. fn raw_get_data(&self, key: &str) -> Result> { - let context = context_guard!(self); let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); + let context = context_guard!(self); let addr = context.get_current_address()?; match context.get_data_entry(&addr, &key) { Some(data) => Ok(data), @@ -251,8 +251,8 @@ impl Interface for InterfaceImpl { /// * key: string key of the datastore entry to set /// * value: new value to set fn raw_set_data(&self, key: &str, value: &[u8]) -> Result<()> { - let mut context = context_guard!(self); let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); + let mut context = context_guard!(self); let addr = context.get_current_address()?; context.set_data_entry(&addr, key, value.to_vec(), true)?; Ok(()) @@ -266,8 +266,8 @@ impl Interface for InterfaceImpl { /// # Returns /// true if the address exists and has the entry matching the provided key in its datastore, otherwise false fn has_data(&self, key: &str) -> Result { - let context = context_guard!(self); let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); + let context = context_guard!(self); let addr = context.get_current_address()?; Ok(context.has_data_entry(&addr, &key)) } From 99c87d131942c61c5f857f7708250e3aabaaf76f Mon Sep 17 00:00:00 2001 From: damip Date: Wed, 23 Feb 2022 11:36:30 +0100 Subject: [PATCH 41/73] rename exclusive to shared --- massa-execution-worker/src/execution.rs | 4 ++-- massa-execution-worker/src/interface_impl.rs | 4 ++-- massa-execution-worker/src/speculative_ledger.rs | 6 +++--- massa-execution-worker/src/worker.rs | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index d137e05cc61..b5fb12ccd5f 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -25,7 +25,7 @@ use std::{ }; use tracing::debug; -/// Used to lock the execution context for exclusive access +/// Used to acquire a lock on the execution context macro_rules! context_guard { ($self:ident) => { $self @@ -256,7 +256,7 @@ impl ExecutionState { // prepare the current slot context for executing the operation let context_snapshot; { - // get exclusive write access to the contex + // acquire write access to the context let mut context = context_guard!(self); // Use the context to credit the producer of the block with max_gas * gas_price parallel coins. diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs index 013ce670676..d94c1bce05b 100644 --- a/massa-execution-worker/src/interface_impl.rs +++ b/massa-execution-worker/src/interface_impl.rs @@ -35,7 +35,7 @@ macro_rules! context_guard { pub(crate) struct InterfaceImpl { /// execution config config: ExecutionConfig, - /// exclusive access to the execution context (see context.rs) + /// thread-safe sared access to the execution context (see context.rs) context: Arc>, } @@ -44,7 +44,7 @@ impl InterfaceImpl { /// /// # Arguments /// * config: execution config - /// * context: exclusive access to the current execution context (see context.rs) + /// * context: thread-safe shared access to the current execution context (see context.rs) pub fn new(config: ExecutionConfig, context: Arc>) -> InterfaceImpl { InterfaceImpl { config, context } } diff --git a/massa-execution-worker/src/speculative_ledger.rs b/massa-execution-worker/src/speculative_ledger.rs index 1694cddb0c8..4bc398c870e 100644 --- a/massa-execution-worker/src/speculative_ledger.rs +++ b/massa-execution-worker/src/speculative_ledger.rs @@ -11,14 +11,14 @@ use massa_ledger::{Applicable, FinalLedger, LedgerChanges}; use massa_models::{Address, Amount}; use std::sync::{Arc, RwLock}; -/// The SpeculativeLedger contains an exclusive reference to the final ledger (read-only), +/// The SpeculativeLedger contains an thread-safe shared reference to the final ledger (read-only), /// a list of existing changes that happened o the ledger since its finality, /// as well as an extra list of "added" changes. /// The SpeculativeLedger makes it possible to transparently manipulate a virtual ledger /// that takes into account all those ledger changes and allows adding more /// while keeping track of all the newly added changes, and never writing in the final ledger. pub struct SpeculativeLedger { - /// Exclusive access to the final ledger. For reading only. + /// Thread-safe shared access to the final ledger. For reading only. final_ledger: Arc>, /// Accumulation of changes that previously happened to the ledger since finality. @@ -37,7 +37,7 @@ impl SpeculativeLedger { /// creates a new SpeculativeLedger /// /// # Arguments - /// * final_ledger: exclusive access to the final ledger (for reading only) + /// * final_ledger: thread-safe shared access to the final ledger (for reading only) /// * previous_changes: accumulation of changes that previously happened to the ledger since finality pub fn new(final_ledger: Arc>, previous_changes: LedgerChanges) -> Self { SpeculativeLedger { diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index 86bc3987c66..7f27a12f331 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -55,7 +55,7 @@ impl ExecutionThread { /// # Arguments /// * config: execution config /// * controller: a copy of the ExecutionController to get incoming requests from - /// * execution_state: an exclusive reference to the execution state, which can be bootstrapped or newly created + /// * execution_state: an thread-safe shared access to the execution state, which can be bootstrapped or newly created pub fn new( config: ExecutionConfig, controller: ExecutionControllerImpl, @@ -505,7 +505,7 @@ impl ExecutionThread { /// /// # parameters /// * config: execution config -/// * final_ledger: a reference to the final ledger for shared reading and exclusive writing +/// * final_ledger: a thread-safe shared access to the final ledger for reading and writing /// /// # Returns /// An instance of ExecutionManager allowing to stop the worker or generate ExecutionController instances, From aed41c23d3fca06ca65555f61b1e36e50c07b977 Mon Sep 17 00:00:00 2001 From: damip Date: Wed, 23 Feb 2022 13:16:12 +0100 Subject: [PATCH 42/73] added a comment on unsized_fn_params --- massa-execution-exports/src/controller_traits.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/massa-execution-exports/src/controller_traits.rs b/massa-execution-exports/src/controller_traits.rs index a18b3c71c7b..7c384c2b53c 100644 --- a/massa-execution-exports/src/controller_traits.rs +++ b/massa-execution-exports/src/controller_traits.rs @@ -67,6 +67,7 @@ pub trait ExecutionManager { /// Stop the execution thread /// Note that we do not take self by value to consume it /// because it is not allowed to move out of Box + /// This will improve if the `unsized_fn_params` feature stabilizes enough to be safely usable. fn stop(&mut self); /// Get a new execution controller From 51e96d05f38ca8040de506989cfff58bc2e745dd Mon Sep 17 00:00:00 2001 From: damip Date: Wed, 23 Feb 2022 13:30:53 +0100 Subject: [PATCH 43/73] switch to parking_lot --- Cargo.lock | 10 ++-- massa-bootstrap/Cargo.toml | 1 + massa-bootstrap/src/lib.rs | 5 +- massa-bootstrap/src/tests/scenarios.rs | 6 +-- massa-execution-exports/Cargo.toml | 5 -- massa-execution-worker/Cargo.toml | 3 +- massa-execution-worker/src/context.rs | 3 +- massa-execution-worker/src/controller.rs | 46 +++++------------ massa-execution-worker/src/execution.rs | 20 ++------ massa-execution-worker/src/interface_impl.rs | 8 ++- .../src/speculative_ledger.rs | 34 ++++--------- massa-execution-worker/src/worker.rs | 51 ++++--------------- massa-node/Cargo.toml | 1 + massa-node/src/main.rs | 6 +-- 14 files changed, 57 insertions(+), 142 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dc7f65242cf..30379b3f517 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1414,6 +1414,7 @@ dependencies = [ "massa_protocol_exports", "massa_protocol_worker", "massa_time", + "parking_lot 0.12.0", "pretty_assertions", "serde 1.0.136", "serde_json", @@ -1487,6 +1488,7 @@ dependencies = [ "massa_signature", "massa_time", "num_enum", + "parking_lot 0.12.0", "pretty_assertions", "rand 0.8.5", "serde 1.0.136", @@ -1570,19 +1572,14 @@ version = "0.1.0" dependencies = [ "anyhow", "displaydoc", - "lazy_static", "massa_ledger", "massa_models", "massa_time", "pretty_assertions", - "rand 0.8.5", - "rand_xoshiro", "serde 1.0.136", "serde_json", "serial_test", - "tempfile", "thiserror", - "tokio", "tracing", ] @@ -1592,7 +1589,6 @@ version = "0.1.0" dependencies = [ "anyhow", "displaydoc", - "lazy_static", "massa-sc-runtime", "massa_execution_exports", "massa_hash", @@ -1600,6 +1596,7 @@ dependencies = [ "massa_models", "massa_signature", "massa_time", + "parking_lot 0.12.0", "pretty_assertions", "rand 0.8.5", "rand_xoshiro", @@ -1608,7 +1605,6 @@ dependencies = [ "serial_test", "tempfile", "thiserror", - "tokio", "tracing", ] diff --git a/massa-bootstrap/Cargo.toml b/massa-bootstrap/Cargo.toml index 428896058b5..03488bfd47a 100644 --- a/massa-bootstrap/Cargo.toml +++ b/massa-bootstrap/Cargo.toml @@ -15,6 +15,7 @@ rand = "0.8" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" +parking_lot = "0.12" tokio = { version = "1.11", features = ["full"] } tracing = {version = "0.1", features = ["max_level_debug", "release_max_level_debug"] } # custom modules diff --git a/massa-bootstrap/src/lib.rs b/massa-bootstrap/src/lib.rs index be0ae6e795a..675e85faf4c 100644 --- a/massa-bootstrap/src/lib.rs +++ b/massa-bootstrap/src/lib.rs @@ -18,11 +18,12 @@ use massa_proof_of_stake_exports::ExportProofOfStake; use massa_signature::{PrivateKey, PublicKey}; use massa_time::MassaTime; use messages::BootstrapMessage; +use parking_lot::RwLock; use rand::{prelude::SliceRandom, rngs::StdRng, SeedableRng}; use settings::BootstrapSettings; use std::collections::{hash_map, HashMap}; use std::net::SocketAddr; -use std::sync::{Arc, RwLock}; +use std::sync::Arc; use std::{convert::TryInto, net::IpAddr}; use tokio::time::Instant; use tokio::{sync::mpsc, task::JoinHandle, time::sleep}; @@ -411,7 +412,7 @@ impl BootstrapServer { // If the consensus state snapshot is older than the execution state snapshot, // the execution final ledger will be in the future after bootstrap, which causes an inconsistency. let peer_boot = self.network_command_sender.get_bootstrap_peers().await?; - let res_ledger = self.final_ledger.read().expect("could not lock final ledger for reading").get_bootstrap_state(); + let res_ledger = self.final_ledger.read().get_bootstrap_state(); let (pos_boot, graph_boot) = self.consensus_command_sender.get_bootstrap_state().await?; bootstrap_data = Some((pos_boot, graph_boot, peer_boot, res_ledger)); cache_timer.set(sleep(cache_timeout)); diff --git a/massa-bootstrap/src/tests/scenarios.rs b/massa-bootstrap/src/tests/scenarios.rs index 0c8cdbdf199..4b7674c666c 100644 --- a/massa-bootstrap/src/tests/scenarios.rs +++ b/massa-bootstrap/src/tests/scenarios.rs @@ -20,11 +20,9 @@ use massa_models::Version; use massa_network::{NetworkCommand, NetworkCommandSender}; use massa_signature::PrivateKey; use massa_time::MassaTime; +use parking_lot::RwLock; use serial_test::serial; -use std::{ - str::FromStr, - sync::{Arc, RwLock}, -}; +use std::{str::FromStr, sync::Arc}; use tokio::sync::mpsc; lazy_static::lazy_static! { diff --git a/massa-execution-exports/Cargo.toml b/massa-execution-exports/Cargo.toml index f7bf3a4db14..2795458716b 100644 --- a/massa-execution-exports/Cargo.toml +++ b/massa-execution-exports/Cargo.toml @@ -9,13 +9,9 @@ edition = "2021" [dependencies] anyhow = "1" displaydoc = "0.2" -lazy_static = "1.4.0" -rand = "0.8" -rand_xoshiro = "0.6" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" -tokio = { version = "1.11", features = ["full"] } tracing = { version = "0.1", features = [ "max_level_debug", "release_max_level_debug", @@ -28,7 +24,6 @@ massa_ledger = { path = "../massa-ledger" } [dev-dependencies] pretty_assertions = "1.0" serial_test = "0.5" -tempfile = "3.2" [features] testing = [] diff --git a/massa-execution-worker/Cargo.toml b/massa-execution-worker/Cargo.toml index 30420235565..de479db09c8 100644 --- a/massa-execution-worker/Cargo.toml +++ b/massa-execution-worker/Cargo.toml @@ -9,13 +9,12 @@ edition = "2021" [dependencies] anyhow = "1" displaydoc = "0.2" -lazy_static = "1.4.0" rand = "0.8" rand_xoshiro = "0.6" +parking_lot = "0.12" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" -tokio = { version = "1.11", features = ["full"] } tracing = { version = "0.1", features = [ "max_level_debug", "release_max_level_debug", diff --git a/massa-execution-worker/src/context.rs b/massa-execution-worker/src/context.rs index 961231c6816..4e6e88d7817 100644 --- a/massa-execution-worker/src/context.rs +++ b/massa-execution-worker/src/context.rs @@ -14,9 +14,10 @@ use massa_execution_exports::{ use massa_hash::hash::Hash; use massa_ledger::{FinalLedger, LedgerChanges}; use massa_models::{Address, Amount, BlockId, OperationId, Slot}; +use parking_lot::RwLock; use rand::SeedableRng; use rand_xoshiro::Xoshiro256PlusPlus; -use std::sync::{Arc, RwLock}; +use std::sync::Arc; /// A snapshot taken from an ExecutionContext and that represents its current state. /// The ExecutionContext state can then be restored later from this snapshot. diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index aaab0d8937f..d6b03eaa01c 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -14,8 +14,9 @@ use massa_models::prehash::Map; use massa_models::Address; use massa_models::OperationId; use massa_models::{Block, BlockId, Slot}; +use parking_lot::{Condvar, Mutex, RwLock}; use std::collections::{HashMap, VecDeque}; -use std::sync::{mpsc, Arc, Condvar, Mutex, RwLock}; +use std::sync::{mpsc, Arc}; use tracing::info; /// structure used to communicate with execution thread @@ -51,7 +52,7 @@ pub struct ExecutionControllerImpl { impl ExecutionControllerImpl { /// consumes and returns the input fed to the controller pub(crate) fn consume_input(&mut self) -> VMInputData { - std::mem::take(&mut self.input_data.1.lock().expect("VM input data lock failed")) + std::mem::take(&mut self.input_data.1.lock()) } } @@ -77,11 +78,7 @@ impl ExecutionController for ExecutionControllerImpl { .map(|(b_id, b)| (b.header.content.slot, (b_id, b))) .collect(); //update input data - let mut input_data = self - .input_data - .1 - .lock() - .expect("could not lock VM input data"); + let mut input_data = self.input_data.1.lock(); input_data.blockclique = mapped_blockclique; // replace blockclique input_data.finalized_blocks.extend(mapped_finalized_blocks); // append finalized blocks input_data.blockclique_changed = true; // signal a blockclique change @@ -102,16 +99,13 @@ impl ExecutionController for ExecutionControllerImpl { original_caller_address: Option
, original_operation_id: Option, ) -> Vec { - self.execution_state - .read() - .expect("could not lock execution state for reading") - .get_filtered_sc_output_event( - start, - end, - emitter_address, - original_caller_address, - original_operation_id, - ) + self.execution_state.read().get_filtered_sc_output_event( + start, + end, + emitter_address, + original_caller_address, + original_operation_id, + ) } /// gets a copy of a full ledger entry @@ -119,10 +113,7 @@ impl ExecutionController for ExecutionControllerImpl { /// # return value /// * (final_entry, active_entry) fn get_full_ledger_entry(&self, addr: &Address) -> (Option, Option) { - self.execution_state - .read() - .expect("could not lock execution state for reading") - .get_full_ledger_entry(addr) + self.execution_state.read().get_full_ledger_entry(addr) } /// Executes a readonly request @@ -133,11 +124,7 @@ impl ExecutionController for ExecutionControllerImpl { ) -> Result { // queue request into input, get response mpsc receiver let resp_rx = { - let mut input_data = self - .input_data - .1 - .lock() - .expect("could not lock VM input data"); + let mut input_data = self.input_data.1.lock(); // limit the read-only queue length if input_data.readonly_requests.len() >= self.config.readonly_queue_length { return Err(ExecutionError::RuntimeError( @@ -182,12 +169,7 @@ impl ExecutionManager for ExecutionManagerImpl { info!("stopping Execution controller..."); // notify the worker thread to stop { - let mut input_wlock = self - .controller - .input_data - .1 - .lock() - .expect("could not lock VM input data"); + let mut input_wlock = self.controller.input_data.1.lock(); input_wlock.stop = true; self.controller.input_data.0.notify_one(); } diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index b5fb12ccd5f..0a55e6ca0ec 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -19,19 +19,17 @@ use massa_models::output_event::SCOutputEvent; use massa_models::{Address, BlockId, Operation, OperationId, OperationType}; use massa_models::{Block, Slot}; use massa_sc_runtime::Interface; +use parking_lot::{Mutex, RwLock}; use std::{ collections::{HashMap, VecDeque}, - sync::{Arc, Mutex, RwLock}, + sync::Arc, }; use tracing::debug; /// Used to acquire a lock on the execution context macro_rules! context_guard { ($self:ident) => { - $self - .execution_context - .lock() - .expect("failed to acquire lock on execution context") + $self.execution_context.lock() }; } @@ -71,10 +69,7 @@ impl ExecutionState { pub fn new(config: ExecutionConfig, final_ledger: Arc>) -> ExecutionState { // Get the slot at the output of which the final ledger is attached. // This should be among the latest final slots. - let last_final_slot = final_ledger - .read() - .expect("could not r-lock final ledger") - .slot; + let last_final_slot = final_ledger.read().slot; // Create an empty placeholder execution context, with shared atomic access let execution_context = Arc::new(Mutex::new(ExecutionContext::new( @@ -113,7 +108,6 @@ impl ExecutionState { // apply ledger changes to the final ledger self.final_ledger .write() - .expect("could not lock final ledger for writing") .settle_slot(exec_out.slot, exec_out.ledger_changes); // update the final ledger's slot self.final_cursor = exec_out.slot; @@ -432,11 +426,7 @@ impl ExecutionState { addr: &Address, ) -> (Option, Option) { // get the full entry from the final ledger - let final_entry = self - .final_ledger - .read() - .expect("could not r-lock final ledger") - .get_full_entry(addr); + let final_entry = self.final_ledger.read().get_full_entry(addr); // get cumulative active changes and apply them // TODO there is a lot of overhead here: we only need to compute the changes for one entry and no need to clone it diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs index d94c1bce05b..9473b4a2e3b 100644 --- a/massa-execution-worker/src/interface_impl.rs +++ b/massa-execution-worker/src/interface_impl.rs @@ -15,18 +15,16 @@ use massa_models::{ timeslots::get_block_slot_timestamp, }; use massa_sc_runtime::{Interface, InterfaceClone}; +use parking_lot::Mutex; use rand::Rng; use std::str::FromStr; -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use tracing::debug; /// helper for locking the context mutex macro_rules! context_guard { ($self:ident) => { - $self - .context - .lock() - .expect("failed to acquire lock on execution context") + $self.context.lock() }; } diff --git a/massa-execution-worker/src/speculative_ledger.rs b/massa-execution-worker/src/speculative_ledger.rs index 4bc398c870e..6a0041b36ba 100644 --- a/massa-execution-worker/src/speculative_ledger.rs +++ b/massa-execution-worker/src/speculative_ledger.rs @@ -9,7 +9,8 @@ use massa_execution_exports::ExecutionError; use massa_hash::hash::Hash; use massa_ledger::{Applicable, FinalLedger, LedgerChanges}; use massa_models::{Address, Amount}; -use std::sync::{Arc, RwLock}; +use parking_lot::RwLock; +use std::sync::Arc; /// The SpeculativeLedger contains an thread-safe shared reference to the final ledger (read-only), /// a list of existing changes that happened o the ledger since its finality, @@ -75,10 +76,7 @@ impl SpeculativeLedger { self.added_changes.get_parallel_balance_or_else(addr, || { self.previous_changes .get_parallel_balance_or_else(addr, || { - self.final_ledger - .read() - .expect("couldn't r-lock final ledger") - .get_parallel_balance(addr) + self.final_ledger.read().get_parallel_balance(addr) }) }) } @@ -93,12 +91,8 @@ impl SpeculativeLedger { pub fn get_bytecode(&self, addr: &Address) -> Option> { // try to read from added_changes, then previous_changes, then final_ledger self.added_changes.get_bytecode_or_else(addr, || { - self.previous_changes.get_bytecode_or_else(addr, || { - self.final_ledger - .read() - .expect("couldn't r-lock final ledger") - .get_bytecode(addr) - }) + self.previous_changes + .get_bytecode_or_else(addr, || self.final_ledger.read().get_bytecode(addr)) }) } @@ -160,12 +154,8 @@ impl SpeculativeLedger { pub fn entry_exists(&self, addr: &Address) -> bool { // try to read from added_changes, then previous_changes, then final_ledger self.added_changes.entry_exists_or_else(addr, || { - self.previous_changes.entry_exists_or_else(addr, || { - self.final_ledger - .read() - .expect("couldn't r-lock final ledger") - .entry_exists(addr) - }) + self.previous_changes + .entry_exists_or_else(addr, || self.final_ledger.read().entry_exists(addr)) }) } @@ -218,10 +208,7 @@ impl SpeculativeLedger { // try to read from added_changes, then previous_changes, then final_ledger self.added_changes.get_data_entry_or_else(addr, key, || { self.previous_changes.get_data_entry_or_else(addr, key, || { - self.final_ledger - .read() - .expect("couldn't r-lock final ledger") - .get_data_entry(addr, key) + self.final_ledger.read().get_data_entry(addr, key) }) }) } @@ -238,10 +225,7 @@ impl SpeculativeLedger { // try to read from added_changes, then previous_changes, then final_ledger self.added_changes.has_data_entry_or_else(addr, key, || { self.previous_changes.has_data_entry_or_else(addr, key, || { - self.final_ledger - .read() - .expect("couldn't r-lock final ledger") - .has_data_entry(addr, key) + self.final_ledger.read().has_data_entry(addr, key) }) }) } diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index 7f27a12f331..09350b938ce 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -17,11 +17,9 @@ use massa_models::{ Block, Slot, }; use massa_time::MassaTime; +use parking_lot::{Condvar, Mutex, RwLock}; use std::sync::mpsc; -use std::{ - collections::HashMap, - sync::{Arc, Condvar, Mutex, RwLock}, -}; +use std::{collections::HashMap, sync::Arc}; use tracing::debug; /// Structure gathering all elements needed by the execution thread @@ -62,10 +60,7 @@ impl ExecutionThread { execution_state: Arc>, ) -> Self { // get the latest executed final slot, at the output of which the final ledger is attached - let final_cursor = execution_state - .read() - .expect("could not r-lock execution context") - .final_cursor; + let final_cursor = execution_state.read().final_cursor; // create and return the ExecutionThread ExecutionThread { @@ -239,10 +234,7 @@ impl ExecutionThread { } // w-lock execution state - let mut exec_state = self - .execution_state - .write() - .expect("could not lock execution state for writing"); + let mut exec_state = self.execution_state.write(); // get the slot just after the last executed final slot let slot = exec_state @@ -287,10 +279,7 @@ impl ExecutionThread { /// returns true if something was executed fn execute_one_active_slot(&mut self) -> bool { // write-lock the execution state - let mut exec_state = self - .execution_state - .write() - .expect("could not lock execution state for writing"); + let mut exec_state = self.execution_state.write(); // get the next active slot let slot = exec_state @@ -347,10 +336,7 @@ impl ExecutionThread { /// Speculative execution will then resume from the point of truncation. pub fn truncate_execution_history(&mut self) { // acquire write access to execution state - let mut exec_state = self - .execution_state - .write() - .expect("could not lock execution state for writing"); + let mut exec_state = self.execution_state.write(); // tells the execution state to truncate its execution output history // given the new list of active slots @@ -368,11 +354,7 @@ impl ExecutionThread { resp_tx: mpsc::Sender>, ) { // acquire read access to execution state and execute the read-only request - let outcome = self - .execution_state - .read() - .expect("could not lock execution state for reading") - .execute_readonly_request(req); + let outcome = self.execution_state.read().execute_readonly_request(req); // send the execution output through resp_tx if resp_tx.send(outcome).is_err() { @@ -444,12 +426,7 @@ impl ExecutionThread { } // Peek into the input data to see if new input arrived during this iteration of the loop - let input_data = self - .controller - .input_data - .1 - .lock() - .expect("could not lock execution input data"); + let mut input_data = self.controller.input_data.1.lock(); if input_data.stop { // there is a request to stop: quit the loop break; @@ -472,22 +449,16 @@ impl ExecutionThread { // Wait to be notified of new input, for at most time_until_next_slot // Note: spurious wake-ups are not a problem: // the next loop iteration will just do nohing and come back to wait here. - let (_lock, _timeout_result) = self + let _res = self .controller .input_data .0 - .wait_timeout(input_data, time_until_next_slot.to_duration()) - .expect("Execution worker main loop condition variable wait failed"); + .wait_for(&mut input_data, time_until_next_slot.to_duration()); } // the execution worker is stopping: // signal cancellation to all remaining read-only execution requests waiting for an MPSC response - let mut input_data = self - .controller - .input_data - .1 - .lock() - .expect("could not lock VM input data"); + let mut input_data = self.controller.input_data.1.lock(); for (_req, resp_tx) in input_data.readonly_requests.drain(..) { if resp_tx .send(Err(ExecutionError::RuntimeError( diff --git a/massa-node/Cargo.toml b/massa-node/Cargo.toml index 2a437c0f666..8ed7259bbab 100644 --- a/massa-node/Cargo.toml +++ b/massa-node/Cargo.toml @@ -12,6 +12,7 @@ config = "0.11" directories = "4.0" futures = "0.3" lazy_static = "1.4.0" +parking_lot = "0.12" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" tokio = { version = "1.11", features = ["full"] } diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index af1bdd7758b..0cd9150dc1a 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -28,10 +28,8 @@ use massa_pool::{start_pool_controller, PoolCommandSender, PoolManager}; use massa_protocol_exports::ProtocolManager; use massa_protocol_worker::start_protocol_controller; use massa_time::MassaTime; -use std::{ - process, - sync::{Arc, RwLock}, -}; +use parking_lot::RwLock; +use std::{process, sync::Arc}; use tokio::signal; use tokio::sync::mpsc; use tracing::{error, info, warn}; From a13076440a34850345ce6aaa2d44503943b5ef2a Mon Sep 17 00:00:00 2001 From: damip Date: Wed, 23 Feb 2022 13:58:09 +0100 Subject: [PATCH 44/73] make controller clonable --- .../src/controller_traits.rs | 17 ++++++-- massa-execution-exports/src/lib.rs | 8 ++-- .../src/test_exports/mock.rs | 4 ++ massa-execution-worker/src/controller.rs | 25 +++++++----- massa-execution-worker/src/worker.rs | 40 +++++++++++-------- massa-node/src/main.rs | 9 +++-- 6 files changed, 64 insertions(+), 39 deletions(-) diff --git a/massa-execution-exports/src/controller_traits.rs b/massa-execution-exports/src/controller_traits.rs index 7c384c2b53c..234ca3b5613 100644 --- a/massa-execution-exports/src/controller_traits.rs +++ b/massa-execution-exports/src/controller_traits.rs @@ -60,16 +60,25 @@ pub trait ExecutionController: Send + Sync { &self, req: ReadOnlyExecutionRequest, ) -> Result; + + /// Returns a boxed clone of self. + /// Useful to alow cloning Box. + fn clone_box(&self) -> Box; +} + +/// Allow cloning Box +/// Uses ExecutionController::clone_box internally +impl Clone for Box { + fn clone(&self) -> Box { + self.clone_box() + } } -/// Execution manager used to generate controllers and to stop the execution thread +/// Execution manager used to stop the execution thread pub trait ExecutionManager { /// Stop the execution thread /// Note that we do not take self by value to consume it /// because it is not allowed to move out of Box /// This will improve if the `unsized_fn_params` feature stabilizes enough to be safely usable. fn stop(&mut self); - - /// Get a new execution controller - fn get_controller(&self) -> Box; } diff --git a/massa-execution-exports/src/lib.rs b/massa-execution-exports/src/lib.rs index c2394241d3a..4906d59835d 100644 --- a/massa-execution-exports/src/lib.rs +++ b/massa-execution-exports/src/lib.rs @@ -9,12 +9,12 @@ //! # Usage //! //! When an execution worker is launched to run in a separate thread for the whole duration of the process, -//! an instance of ExecutionManager is returned (see the documentation of start_execution_worker in massa-execution-worker). +//! an instance of ExecutionManager is returned (see the documentation of start_execution_worker in massa-execution-worker), +//! as well as an instance of ExecutionController. //! -//! ExecutionManager allows stopping the execution worker thread, -//! but it also allows generating as many instances of ExecutionController as necessary. +//! The non-clonable ExecutionManager allows stopping the execution worker thread. //! -//! Each ExecutionController allows sending updates on the latest blockclique changes to the execution worker +//! The clonable ExecutionController allows sending updates on the latest blockclique changes to the execution worker //! for it to keep track of them and execute the operations present in blocks. //! It also allows various read-only queries such as executing bytecode //! while ignoring all the changes it would cause to the consensus state (read-only execution), diff --git a/massa-execution-exports/src/test_exports/mock.rs b/massa-execution-exports/src/test_exports/mock.rs index e8372fb6a6c..4d6706496a8 100644 --- a/massa-execution-exports/src/test_exports/mock.rs +++ b/massa-execution-exports/src/test_exports/mock.rs @@ -133,4 +133,8 @@ impl ExecutionController for MockExecutionController { .unwrap(); response_rx.recv().unwrap() } + + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } } diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index d6b03eaa01c..2a14cc6ed53 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -152,13 +152,21 @@ impl ExecutionController for ExecutionControllerImpl { } } } + + /// Returns a boxed clone of self. + /// Allows cloning Box, + /// see massa-execution-exports/controller_traits.rs + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } } /// Execution manager -/// Allows creating execution controllers, and stopping the execution worker +/// Allows stopping the execution worker pub struct ExecutionManagerImpl { - /// shared reference to the execution controller - pub(crate) controller: ExecutionControllerImpl, + /// input data to process in the VM loop + /// with a wakeup condition variable that needs to be triggered when the data changes + pub(crate) input_data: Arc<(Condvar, Mutex)>, /// handle used to join the worker thread pub(crate) thread_handle: Option>, } @@ -169,19 +177,14 @@ impl ExecutionManager for ExecutionManagerImpl { info!("stopping Execution controller..."); // notify the worker thread to stop { - let mut input_wlock = self.controller.input_data.1.lock(); + let mut input_wlock = self.input_data.1.lock(); input_wlock.stop = true; - self.controller.input_data.0.notify_one(); + self.input_data.0.notify_one(); } // join the execution thread if let Some(join_handle) = self.thread_handle.take() { join_handle.join().expect("VM controller thread panicked"); } - info!("Execution controller stopped"); - } - - /// return a new execution controller - fn get_controller(&self) -> Box { - Box::new(self.controller.clone()) + info!("execution controller stopped"); } } diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index 09350b938ce..87beeb251cd 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -8,7 +8,8 @@ use crate::controller::{ExecutionControllerImpl, ExecutionManagerImpl, VMInputData}; use crate::execution::ExecutionState; use massa_execution_exports::{ - ExecutionConfig, ExecutionError, ExecutionManager, ExecutionOutput, ReadOnlyExecutionRequest, + ExecutionConfig, ExecutionController, ExecutionError, ExecutionManager, ExecutionOutput, + ReadOnlyExecutionRequest, }; use massa_ledger::FinalLedger; use massa_models::BlockId; @@ -479,29 +480,33 @@ impl ExecutionThread { /// * final_ledger: a thread-safe shared access to the final ledger for reading and writing /// /// # Returns -/// An instance of ExecutionManager allowing to stop the worker or generate ExecutionController instances, -/// which are used to send requests and notifications to the worker. +/// A pair (execution_manager, execution_controller) where: +/// * execution_manager allows to stop the worker +/// * execution_controller allows sending requests and notifications to the worker pub fn start_execution_worker( config: ExecutionConfig, final_ledger: Arc>, -) -> Box { +) -> (Box, Box) { // create an execution state let execution_state = Arc::new(RwLock::new(ExecutionState::new( config.clone(), final_ledger, ))); + // define the input data interface + let input_data = Arc::new(( + Condvar::new(), + Mutex::new(VMInputData { + // notify of a blockclique change to run one initialization loop itration + blockclique_changed: true, + ..Default::default() + }), + )); + // create a controller let controller = ExecutionControllerImpl { config: config.clone(), - input_data: Arc::new(( - Condvar::new(), - Mutex::new(VMInputData { - // ntify of a blockclique change to run one initialization loop itration - blockclique_changed: true, - ..Default::default() - }), - )), + input_data: input_data.clone(), execution_state: execution_state.clone(), }; @@ -511,9 +516,12 @@ pub fn start_execution_worker( ExecutionThread::new(config, ctl, execution_state).main_loop(); }); - // return the execution manager - Box::new(ExecutionManagerImpl { - controller, + // create a manager + let manager = ExecutionManagerImpl { + input_data, thread_handle: Some(thread_handle), - }) + }; + + // return the execution manager and controller pair + (Box::new(manager), Box::new(controller)) } diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 0cd9150dc1a..ee38bc06816 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -143,7 +143,8 @@ async fn launch() -> ( t0: T0, genesis_timestamp: *GENESIS_TIMESTAMP, }; - let execution_manager = start_execution_worker(execution_config, final_ledger.clone()); + let (execution_manager, execution_controller) = + start_execution_worker(execution_config, final_ledger.clone()); let consensus_config = ConsensusConfig::from(&SETTINGS.consensus); // launch consensus controller @@ -151,7 +152,7 @@ async fn launch() -> ( start_consensus_controller( consensus_config.clone(), ConsensusChannels { - execution_controller: execution_manager.get_controller(), + execution_controller: execution_controller.clone(), protocol_command_sender: protocol_command_sender.clone(), protocol_event_receiver, pool_command_sender: pool_command_sender.clone(), @@ -181,7 +182,7 @@ async fn launch() -> ( let (api_private, api_private_stop_rx) = API::::new( consensus_command_sender.clone(), network_command_sender.clone(), - execution_manager.get_controller(), + execution_controller.clone(), &SETTINGS.api, consensus_config.clone(), ); @@ -190,7 +191,7 @@ async fn launch() -> ( // spawn public API let api_public = API::::new( consensus_command_sender.clone(), - execution_manager.get_controller(), + execution_controller.clone(), &SETTINGS.api, consensus_config, pool_command_sender.clone(), From 4f5bb8fff8835e91743df1c80cb4e14c880fc7c1 Mon Sep 17 00:00:00 2001 From: damip Date: Wed, 23 Feb 2022 14:05:59 +0100 Subject: [PATCH 45/73] update doc --- massa-execution-worker/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/massa-execution-worker/src/lib.rs b/massa-execution-worker/src/lib.rs index d3e1861b233..9448afbf3ba 100644 --- a/massa-execution-worker/src/lib.rs +++ b/massa-execution-worker/src/lib.rs @@ -4,9 +4,9 @@ //! //! The execution worker launches a persistent thread allowing the execution //! of operations that can contain executable bytecode and managing interactions with the ledger. -//! When the worker is launched, an ExecutionManager is returned, -//! allowing to stop the worker, -//! and to generate an arbitrary number of ExecutionController instances through which users interact with the worker. +//! When the worker is launched, a ExecutionManager and a ExecutionController are returned. +//! ExecutionManager allows stopping the worker, +//! and ExecutionController is the clonable structure through which users interact with the worker. //! //! The worker is fed through the ExecutionController with information about blockclique changes and newly finalized blocks //! and will execute the operations in those blocks, as well as pending asynchronous operations on empty slots. From a8dba46302da7e80fe0e31ffdf736f0893a196ca Mon Sep 17 00:00:00 2001 From: damip Date: Wed, 23 Feb 2022 17:04:07 +0100 Subject: [PATCH 46/73] clippy in tests --- massa-consensus-worker/src/tests/scenario_roll.rs | 4 ++-- massa-consensus-worker/src/tests/scenarios_ledger.rs | 2 +- .../src/tests/scenarios_note_attack_attempt.rs | 4 ++-- massa-consensus-worker/src/tests/tools.rs | 4 ++-- massa-execution-exports/src/test_exports/mock.rs | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/massa-consensus-worker/src/tests/scenario_roll.rs b/massa-consensus-worker/src/tests/scenario_roll.rs index 8502128803d..6f01dd7e27e 100644 --- a/massa-consensus-worker/src/tests/scenario_roll.rs +++ b/massa-consensus-worker/src/tests/scenario_roll.rs @@ -502,7 +502,7 @@ async fn test_roll_block_creation() { let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = MockProtocolController::new(); let (mut pool_controller, pool_command_sender) = MockPoolController::new(); - let (execution_controller, _execution_rx) = MockExecutionController::new(); + let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); let init_time: MassaTime = 1000.into(); cfg.genesis_timestamp = MassaTime::now().unwrap().saturating_add(init_time); @@ -783,7 +783,7 @@ async fn test_roll_deactivation() { let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = MockProtocolController::new(); let (mut pool_controller, pool_command_sender) = MockPoolController::new(); - let (execution_controller, _execution_rx) = MockExecutionController::new(); + let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); cfg.genesis_timestamp = MassaTime::now().unwrap().saturating_add(300.into()); // launch consensus controller diff --git a/massa-consensus-worker/src/tests/scenarios_ledger.rs b/massa-consensus-worker/src/tests/scenarios_ledger.rs index f6a6ec5cae6..2f2f91f1a73 100644 --- a/massa-consensus-worker/src/tests/scenarios_ledger.rs +++ b/massa-consensus-worker/src/tests/scenarios_ledger.rs @@ -498,7 +498,7 @@ async fn test_ledger_update_when_a_batch_of_blocks_becomes_final() { MockProtocolController::new(); let (pool_controller, pool_command_sender) = MockPoolController::new(); let pool_sink = PoolCommandSink::new(pool_controller).await; - let (execution_controller, _execution_rx) = MockExecutionController::new(); + let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); // launch consensus controller let (consensus_command_sender, consensus_event_receiver, consensus_manager) = diff --git a/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs b/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs index 5c0bb369cfa..a7364836e63 100644 --- a/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs +++ b/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs @@ -30,7 +30,7 @@ async fn test_invalid_block_notified_as_attack_attempt() { MockProtocolController::new(); let (pool_controller, pool_command_sender) = MockPoolController::new(); let pool_sink = PoolCommandSink::new(pool_controller).await; - let (execution_controller, _execution_rx) = MockExecutionController::new(); + let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); // launch consensus controller let (consensus_command_sender, consensus_event_receiver, consensus_manager) = @@ -94,7 +94,7 @@ async fn test_invalid_header_notified_as_attack_attempt() { let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = MockProtocolController::new(); let (pool_controller, pool_command_sender) = MockPoolController::new(); - let (execution_controller, _execution_rx) = MockExecutionController::new(); + let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); let pool_sink = PoolCommandSink::new(pool_controller).await; // launch consensus controller diff --git a/massa-consensus-worker/src/tests/tools.rs b/massa-consensus-worker/src/tests/tools.rs index a6b69d19a80..3e61c701576 100644 --- a/massa-consensus-worker/src/tests/tools.rs +++ b/massa-consensus-worker/src/tests/tools.rs @@ -639,7 +639,7 @@ pub async fn consensus_pool_test( MockProtocolController::new(); let (pool_controller, pool_command_sender) = MockPoolController::new(); // for now, execution_rx is ignored: cique updates to Execution pile up and are discarded - let (execution_controller, _execution_rx) = MockExecutionController::new(); + let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); // launch consensus controller let (consensus_command_sender, consensus_event_receiver, consensus_manager) = @@ -700,7 +700,7 @@ where MockProtocolController::new(); let (pool_controller, pool_command_sender) = MockPoolController::new(); // for now, execution_rx is ignored: cique updates to Execution pile up and are discarded - let (execution_controller, _execution_rx) = MockExecutionController::new(); + let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); let pool_sink = PoolCommandSink::new(pool_controller).await; // launch consensus controller diff --git a/massa-execution-exports/src/test_exports/mock.rs b/massa-execution-exports/src/test_exports/mock.rs index 4d6706496a8..543484970c1 100644 --- a/massa-execution-exports/src/test_exports/mock.rs +++ b/massa-execution-exports/src/test_exports/mock.rs @@ -51,7 +51,7 @@ pub struct MockExecutionController(Arc ( + pub fn new_with_receiver() -> ( Box, Receiver, ) { From b6c49b6f58d53adcf072abcc512f37810553d4bc Mon Sep 17 00:00:00 2001 From: Damir Vodenicarevic Date: Wed, 23 Feb 2022 17:09:26 +0100 Subject: [PATCH 47/73] Update massa-execution-exports/src/types.rs Co-authored-by: Aurelia <56112063+AureliaDolo@users.noreply.github.com> --- massa-execution-exports/src/types.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/massa-execution-exports/src/types.rs b/massa-execution-exports/src/types.rs index 9b9710290a8..6220bb0d398 100644 --- a/massa-execution-exports/src/types.rs +++ b/massa-execution-exports/src/types.rs @@ -28,7 +28,7 @@ pub struct ReadOnlyExecutionRequest { pub simulated_gas_price: Amount, /// The code to execute. pub bytecode: Vec, - /// Call stack to simulate + /// Call stack to simulate, older caller first pub call_stack: Vec, } From c80433ffbc639499ea02b8ead776e8118c568618 Mon Sep 17 00:00:00 2001 From: Damir Vodenicarevic Date: Wed, 23 Feb 2022 17:09:36 +0100 Subject: [PATCH 48/73] Update massa-execution-worker/src/interface_impl.rs Co-authored-by: Aurelia <56112063+AureliaDolo@users.noreply.github.com> --- massa-execution-worker/src/interface_impl.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs index 9473b4a2e3b..ee7a63f81d8 100644 --- a/massa-execution-worker/src/interface_impl.rs +++ b/massa-execution-worker/src/interface_impl.rs @@ -33,7 +33,7 @@ macro_rules! context_guard { pub(crate) struct InterfaceImpl { /// execution config config: ExecutionConfig, - /// thread-safe sared access to the execution context (see context.rs) + /// thread-safe shared access to the execution context (see context.rs) context: Arc>, } From 4b6155e8a2bd595a13a331fa16759960c0342d95 Mon Sep 17 00:00:00 2001 From: damip Date: Wed, 23 Feb 2022 17:15:40 +0100 Subject: [PATCH 49/73] correct fmt --- massa-execution-worker/src/context.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/massa-execution-worker/src/context.rs b/massa-execution-worker/src/context.rs index 4e6e88d7817..6fa80159d4d 100644 --- a/massa-execution-worker/src/context.rs +++ b/massa-execution-worker/src/context.rs @@ -208,7 +208,8 @@ impl ExecutionContext { // Add a marker to the seed indicating that we are in active mode // to prevent random draw collisions with read-only executions seed.push(1u8); // 1u8 = active - // For more deterministic entropy, seed with the block ID if any + + // For more deterministic entropy, seed with the block ID if any if let Some(block_id) = &opt_block_id { seed.extend(block_id.to_bytes()); // append block ID } From 4e0da2301af6280e977064a8c35d655bf364461c Mon Sep 17 00:00:00 2001 From: Damir Vodenicarevic Date: Wed, 23 Feb 2022 17:25:28 +0100 Subject: [PATCH 50/73] Update massa-execution-worker/src/controller.rs Co-authored-by: Aurelia <56112063+AureliaDolo@users.noreply.github.com> --- massa-execution-worker/src/controller.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index 2a14cc6ed53..1ae96b2cac9 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -26,7 +26,7 @@ pub(crate) struct VMInputData { pub stop: bool, /// signal whether the blockclique changed pub blockclique_changed: bool, - /// list of newly finalized blocks, index by slot + /// list of newly finalized blocks, indexed by slot pub finalized_blocks: HashMap, /// blockclique, blocks indexed by slot pub blockclique: HashMap, From d979c06bf7dac427d09a00eeff8eb50c86ebf399 Mon Sep 17 00:00:00 2001 From: Damir Vodenicarevic Date: Wed, 23 Feb 2022 17:25:59 +0100 Subject: [PATCH 51/73] Update massa-execution-worker/src/controller.rs Co-authored-by: Aurelia <56112063+AureliaDolo@users.noreply.github.com> --- massa-execution-worker/src/controller.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index 1ae96b2cac9..da2aae7ad8c 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -61,7 +61,7 @@ impl ExecutionController for ExecutionControllerImpl { /// /// # arguments /// * finalized_blocks: list of newly finalized blocks to be appended to the input finalized blocks - /// * blockclique: new blockclique, replaces the curren one in the input + /// * blockclique: new blockclique, replaces the current one in the input fn update_blockclique_status( &self, finalized_blocks: Map, From 55446b2d07ada6aa7c8d9da73a325bdc531b3001 Mon Sep 17 00:00:00 2001 From: damip Date: Wed, 23 Feb 2022 17:28:07 +0100 Subject: [PATCH 52/73] elude contet constructor name --- massa-execution-worker/src/context.rs | 6 +++--- massa-execution-worker/src/execution.rs | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/massa-execution-worker/src/context.rs b/massa-execution-worker/src/context.rs index 6fa80159d4d..95f1b619b33 100644 --- a/massa-execution-worker/src/context.rs +++ b/massa-execution-worker/src/context.rs @@ -87,7 +87,7 @@ impl ExecutionContext { /// Create a new empty ExecutionContext /// This should only be used as a placeholder. /// Further initialization is required before running bytecode - /// (see new_readonly and new_active_slot methods). + /// (see readonly and active_slot methods). /// /// # arguments /// * final_ledger: thread-safe access to the final ledger. Note that this will be used only for reading, never for writing @@ -151,7 +151,7 @@ impl ExecutionContext { /// /// # returns /// A ExecutionContext instance ready for a read-only execution - pub(crate) fn new_readonly( + pub(crate) fn readonly( slot: Slot, req: ReadOnlyExecutionRequest, previous_changes: LedgerChanges, @@ -195,7 +195,7 @@ impl ExecutionContext { /// /// # returns /// A ExecutionContext instance ready for a read-only execution - pub(crate) fn new_active_slot( + pub(crate) fn active_slot( slot: Slot, opt_block_id: Option, previous_changes: LedgerChanges, diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 0a55e6ca0ec..45d32d053d2 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -338,7 +338,7 @@ impl ExecutionState { let previous_ledger_changes = self.get_accumulated_active_changes_at_slot(slot); // create a new execution context for the whole active slot - let execution_context = ExecutionContext::new_active_slot( + let execution_context = ExecutionContext::active_slot( slot, opt_block_id, previous_ledger_changes, @@ -398,7 +398,7 @@ impl ExecutionState { // create a readonly execution context let max_gas = req.max_gas; let bytecode = req.bytecode.clone(); - let execution_context = ExecutionContext::new_readonly( + let execution_context = ExecutionContext::readonly( slot, req, previous_ledger_changes, From d1cb98d36a809d66c55a70be1b1ac877ba99767d Mon Sep 17 00:00:00 2001 From: Damir Vodenicarevic Date: Wed, 23 Feb 2022 17:31:32 +0100 Subject: [PATCH 53/73] Update massa-execution-worker/src/controller.rs Co-authored-by: Aurelia <56112063+AureliaDolo@users.noreply.github.com> --- massa-execution-worker/src/controller.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index da2aae7ad8c..a4c12e653df 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -77,7 +77,7 @@ impl ExecutionController for ExecutionControllerImpl { .into_iter() .map(|(b_id, b)| (b.header.content.slot, (b_id, b))) .collect(); - //update input data + // update input data let mut input_data = self.input_data.1.lock(); input_data.blockclique = mapped_blockclique; // replace blockclique input_data.finalized_blocks.extend(mapped_finalized_blocks); // append finalized blocks From 776656b0ade640c73609111db8a7fe79d2bd8e44 Mon Sep 17 00:00:00 2001 From: damip Date: Wed, 23 Feb 2022 17:41:15 +0100 Subject: [PATCH 54/73] rename get_final_and_active_ledger_entry --- massa-api/src/public.rs | 7 ++++++- massa-execution-exports/src/controller_traits.rs | 7 +++++-- massa-execution-exports/src/test_exports/mock.rs | 5 ++++- massa-execution-worker/src/controller.rs | 9 +++++++-- massa-execution-worker/src/execution.rs | 2 +- 5 files changed, 23 insertions(+), 7 deletions(-) diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index 0314bb2dc3c..5829ce2d31f 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -425,7 +425,12 @@ impl Endpoints for API { let mut sce_ledger_info: Map = Map::with_capacity_and_hasher(addresses.len(), BuildMap::default()); for addr in &addresses { - let active_entry = match self.0.execution_controller.get_full_ledger_entry(addr).1 { + let active_entry = match self + .0 + .execution_controller + .get_final_and_active_ledger_entry(addr) + .1 + { None => continue, Some(v) => SCELedgerInfo { balance: v.parallel_balance, diff --git a/massa-execution-exports/src/controller_traits.rs b/massa-execution-exports/src/controller_traits.rs index 234ca3b5613..a355fd2f3f2 100644 --- a/massa-execution-exports/src/controller_traits.rs +++ b/massa-execution-exports/src/controller_traits.rs @@ -42,11 +42,14 @@ pub trait ExecutionController: Send + Sync { original_operation_id: Option, ) -> Vec; - /// Get a copy of a full ledger entry + /// Get a copy of a full ledger entry with its final and active values /// /// # return value /// * (final_entry, active_entry) - fn get_full_ledger_entry(&self, addr: &Address) -> (Option, Option); + fn get_final_and_active_ledger_entry( + &self, + addr: &Address, + ) -> (Option, Option); /// Execute read-only bytecode without causing modifications to the consensus state /// diff --git a/massa-execution-exports/src/test_exports/mock.rs b/massa-execution-exports/src/test_exports/mock.rs index 543484970c1..d96fbd249d7 100644 --- a/massa-execution-exports/src/test_exports/mock.rs +++ b/massa-execution-exports/src/test_exports/mock.rs @@ -108,7 +108,10 @@ impl ExecutionController for MockExecutionController { response_rx.recv().unwrap() } - fn get_full_ledger_entry(&self, addr: &Address) -> (Option, Option) { + fn get_final_and_active_ledger_entry( + &self, + addr: &Address, + ) -> (Option, Option) { let (response_tx, response_rx) = mpsc::channel(); self.0 .lock() diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index da2aae7ad8c..611d8a7d12c 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -112,8 +112,13 @@ impl ExecutionController for ExecutionControllerImpl { /// /// # return value /// * (final_entry, active_entry) - fn get_full_ledger_entry(&self, addr: &Address) -> (Option, Option) { - self.execution_state.read().get_full_ledger_entry(addr) + fn get_final_and_active_ledger_entry( + &self, + addr: &Address, + ) -> (Option, Option) { + self.execution_state + .read() + .get_final_and_active_ledger_entry(addr) } /// Executes a readonly request diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 45d32d053d2..c5a7ffcef6b 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -421,7 +421,7 @@ impl ExecutionState { /// /// # returns /// (final_entry, active_entry) - pub fn get_full_ledger_entry( + pub fn get_final_and_active_ledger_entry( &self, addr: &Address, ) -> (Option, Option) { From 424c8c952178a65397c889df084086e04670f0a4 Mon Sep 17 00:00:00 2001 From: Damir Vodenicarevic Date: Wed, 23 Feb 2022 17:44:44 +0100 Subject: [PATCH 55/73] Update massa-execution-worker/src/execution.rs Co-authored-by: Aurelia <56112063+AureliaDolo@users.noreply.github.com> --- massa-execution-worker/src/execution.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index c5a7ffcef6b..7e48e080535 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -482,7 +482,7 @@ impl ExecutionState { .into_iter() .chain( // TODO note that active history is made of consecutive slots, - // so this algo does not need to scan all history items as iteation bounds can be derived a priori + // so this algo does not need to scan all history items as iteration bounds can be derived a priori self.active_history .iter() .filter(|item| item.slot >= start && item.slot < end) From e30e47b077537c1f0375c53bf96252879ef7eef0 Mon Sep 17 00:00:00 2001 From: damip Date: Wed, 23 Feb 2022 17:51:42 +0100 Subject: [PATCH 56/73] issue refs --- massa-execution-worker/src/execution.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index c5a7ffcef6b..01eac2ca359 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -243,6 +243,8 @@ impl ExecutionState { // get operation ID // TODO have operation_id contained in the Operation object in the future to avoid recomputation + // https://github.com/massalabs/massa/issues/1121 + // https://github.com/massalabs/massa/issues/2264 let operation_id = operation .get_operation_id() .expect("could not compute operation ID"); From 74bdf5322cf14e335b75e9e1e1bd4f6d8561fd70 Mon Sep 17 00:00:00 2001 From: Damir Vodenicarevic Date: Wed, 23 Feb 2022 17:52:12 +0100 Subject: [PATCH 57/73] Update massa-execution-worker/src/interface_impl.rs Co-authored-by: Aurelia <56112063+AureliaDolo@users.noreply.github.com> --- massa-execution-worker/src/interface_impl.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs index ee7a63f81d8..5e86de4aa75 100644 --- a/massa-execution-worker/src/interface_impl.rs +++ b/massa-execution-worker/src/interface_impl.rs @@ -28,7 +28,7 @@ macro_rules! context_guard { }; } -/// an implementation of the Interface trait (see masa-sc-runtime crate) +/// an implementation of the Interface trait (see massa-sc-runtime crate) #[derive(Clone)] pub(crate) struct InterfaceImpl { /// execution config From 4299a31d4917713f0c6f1825bdae2dccf623667b Mon Sep 17 00:00:00 2001 From: damip Date: Wed, 23 Feb 2022 17:58:46 +0100 Subject: [PATCH 58/73] force access check --- massa-execution-worker/src/context.rs | 8 ++------ massa-execution-worker/src/execution.rs | 6 ++---- massa-execution-worker/src/interface_impl.rs | 10 +++++----- 3 files changed, 9 insertions(+), 15 deletions(-) diff --git a/massa-execution-worker/src/context.rs b/massa-execution-worker/src/context.rs index 95f1b619b33..5621e5f6f26 100644 --- a/massa-execution-worker/src/context.rs +++ b/massa-execution-worker/src/context.rs @@ -357,16 +357,14 @@ impl ExecutionContext { /// * address: the address of the ledger entry /// * key: the datastore key /// * data: the data to insert - /// * check_rights: if true, the function quits with an error if the current context has no writing rights on the target address pub fn set_data_entry( &mut self, address: &Address, key: Hash, data: Vec, - check_rights: bool, ) -> Result<(), ExecutionError> { // check access right - if check_rights && !self.has_write_rights_on(address) { + if !self.has_write_rights_on(address) { return Err(ExecutionError::RuntimeError(format!( "writing in the datastore of address {} is not allowed in this context", address @@ -385,17 +383,15 @@ impl ExecutionContext { /// * from_addr: optional spending address (use None for pure coin creation) /// * to_addr: optional crediting address (use None for pure coin destruction) /// * amount: amount of coins to transfer - /// * check_rights: if true, access rights are checked pub fn transfer_parallel_coins( &mut self, from_addr: Option
, to_addr: Option
, amount: Amount, - check_rights: bool, ) -> Result<(), ExecutionError> { // check access rights if let Some(from_addr) = &from_addr { - if check_rights && !self.has_write_rights_on(from_addr) { + if !self.has_write_rights_on(from_addr) { return Err(ExecutionError::RuntimeError(format!( "spending from address {} is not allowed in this context", from_addr diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 827416b44c1..a26412b76ae 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -260,7 +260,7 @@ impl ExecutionState { // That way, even if the sender sent an invalid operation, the block producer will still get credited. let gas_fees = gas_price.saturating_mul_u64(*max_gas); if let Err(err) = - context.transfer_parallel_coins(None, Some(block_creator_addr), gas_fees, false) + context.transfer_parallel_coins(None, Some(block_creator_addr), gas_fees) { debug!( "failed to credit block producer {} with {} gas fee coins: {}", @@ -270,9 +270,7 @@ impl ExecutionState { // Credit the operation sender with `coins` parallel coins. // Note that errors are deterministic and do not cancel op execution. - if let Err(err) = - context.transfer_parallel_coins(None, Some(sender_addr), *coins, false) - { + if let Err(err) = context.transfer_parallel_coins(None, Some(sender_addr), *coins) { debug!( "failed to credit operation sender {} with {} operation coins: {}", sender_addr, *coins, err diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs index 5e86de4aa75..c49a3db2b68 100644 --- a/massa-execution-worker/src/interface_impl.rs +++ b/massa-execution-worker/src/interface_impl.rs @@ -99,7 +99,7 @@ impl Interface for InterfaceImpl { // transfer coins from caller to target address let coins = massa_models::Amount::from_raw(raw_coins); if let Err(err) = - context.transfer_parallel_coins(Some(from_address), Some(to_address), coins, true) + context.transfer_parallel_coins(Some(from_address), Some(to_address), coins) { bail!( "error transferring {} parallel coins from {} to {}: {}", @@ -206,7 +206,7 @@ impl Interface for InterfaceImpl { let addr = massa_models::Address::from_str(address)?; let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); let mut context = context_guard!(self); - context.set_data_entry(&addr, key, value.to_vec(), true)?; + context.set_data_entry(&addr, key, value.to_vec())?; Ok(()) } @@ -252,7 +252,7 @@ impl Interface for InterfaceImpl { let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); let mut context = context_guard!(self); let addr = context.get_current_address()?; - context.set_data_entry(&addr, key, value.to_vec(), true)?; + context.set_data_entry(&addr, key, value.to_vec())?; Ok(()) } @@ -326,7 +326,7 @@ impl Interface for InterfaceImpl { let amount = massa_models::Amount::from_raw(raw_amount); let mut context = context_guard!(self); let from_address = context.get_current_address()?; - context.transfer_parallel_coins(Some(from_address), Some(to_address), amount, true)?; + context.transfer_parallel_coins(Some(from_address), Some(to_address), amount)?; Ok(()) } @@ -346,7 +346,7 @@ impl Interface for InterfaceImpl { let to_address = massa_models::Address::from_str(to_address)?; let amount = massa_models::Amount::from_raw(raw_amount); let mut context = context_guard!(self); - context.transfer_parallel_coins(Some(from_address), Some(to_address), amount, true)?; + context.transfer_parallel_coins(Some(from_address), Some(to_address), amount)?; Ok(()) } From 3a686872d36d98997332220286fe317bd00a4b94 Mon Sep 17 00:00:00 2001 From: damip Date: Wed, 23 Feb 2022 18:39:28 +0100 Subject: [PATCH 59/73] link issue --- massa-execution-worker/src/context.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/massa-execution-worker/src/context.rs b/massa-execution-worker/src/context.rs index 5621e5f6f26..7411352f272 100644 --- a/massa-execution-worker/src/context.rs +++ b/massa-execution-worker/src/context.rs @@ -284,9 +284,13 @@ impl ExecutionContext { /// Creates a new smart contract address with initial bytecode, and returns this address pub fn create_new_sc_address(&mut self, bytecode: Vec) -> Result { - // TODO: security problem: - // prefix addresses to know if they are SCs or normal, otherwise people can already create new accounts by sending coins to the right hash - // they won't have ownership over it but this can still be a pain + // TODO: collision problem: + // prefix addresses to know if they are SCs or normal, + // otherwise people can already create new accounts by sending coins to the right hash + // they won't have ownership over it but this can still be unexpected + // to have initial extra coins when an address is created + // It may also induce that for read-only calls. + // https://github.com/massalabs/massa/issues/2331 // deterministically generate a new unique smart contract address From 664efb63fcca8303a53289d0386bc279154e9b6f Mon Sep 17 00:00:00 2001 From: Damir Vodenicarevic Date: Thu, 24 Feb 2022 12:47:03 +0100 Subject: [PATCH 60/73] Update massa-execution-worker/src/execution.rs Co-authored-by: Aurelia <56112063+AureliaDolo@users.noreply.github.com> --- massa-execution-worker/src/execution.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index a26412b76ae..8f4e80b0ce3 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -372,7 +372,7 @@ impl ExecutionState { context_guard!(self).take_execution_output() } - /// Execues a read-only execution request. + /// Executes a read-only execution request. /// The executed bytecode appears to be able to read and write the consensus state, /// but all accumulated changes are simply returned as an ExecutionOutput object, /// and not actually applied to the consensus state. From 0d7a72085e66f7dace6e3235e887be157fb4e5b8 Mon Sep 17 00:00:00 2001 From: damip Date: Thu, 24 Feb 2022 17:01:25 +0100 Subject: [PATCH 61/73] remove rolls --- massa-ledger/src/ledger_changes.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/massa-ledger/src/ledger_changes.rs b/massa-ledger/src/ledger_changes.rs index 48cd6e32319..15ee4a60746 100644 --- a/massa-ledger/src/ledger_changes.rs +++ b/massa-ledger/src/ledger_changes.rs @@ -11,8 +11,6 @@ use std::collections::hash_map; /// represents an update to one or more fields of a LedgerEntry #[derive(Default, Debug, Clone)] pub struct LedgerEntryUpdate { - /// change the number of rolls - pub roll_count: SetOrKeep, /// change the parallel balance pub parallel_balance: SetOrKeep, /// change the executable bytecode @@ -24,7 +22,6 @@ pub struct LedgerEntryUpdate { impl Applicable for LedgerEntryUpdate { /// extends the LedgerEntryUpdate with another one fn apply(&mut self, update: LedgerEntryUpdate) { - self.roll_count.apply(update.roll_count); self.parallel_balance.apply(update.parallel_balance); self.bytecode.apply(update.bytecode); self.datastore.extend(update.datastore); From b1cae68a725889d784239807823f726adfd2c1b2 Mon Sep 17 00:00:00 2001 From: damip Date: Thu, 24 Feb 2022 17:41:38 +0100 Subject: [PATCH 62/73] copyright update --- massa-consensus-exports/src/consensus_controller.rs | 2 +- massa-ledger/src/tests/mod.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/massa-consensus-exports/src/consensus_controller.rs b/massa-consensus-exports/src/consensus_controller.rs index e846d85032f..2e155ed6ce1 100644 --- a/massa-consensus-exports/src/consensus_controller.rs +++ b/massa-consensus-exports/src/consensus_controller.rs @@ -1,4 +1,4 @@ -// Copyright (c) 2021 MASSA LABS +// Copyright (c) 2022 MASSA LABS use massa_graph::{BlockGraphExport, BootstrapableGraph, ExportBlockStatus, Status}; use massa_models::{ address::AddressState, api::EndorsementInfo, Endorsement, EndorsementId, OperationId, diff --git a/massa-ledger/src/tests/mod.rs b/massa-ledger/src/tests/mod.rs index 00e9f959513..ca6fbbd29ca 100644 --- a/massa-ledger/src/tests/mod.rs +++ b/massa-ledger/src/tests/mod.rs @@ -1 +1 @@ -// Copyright (c) 2021 MASSA LABS +// Copyright (c) 2022 MASSA LABS From b66b360b009216f072e50f1870605b75a30028df Mon Sep 17 00:00:00 2001 From: damip Date: Thu, 24 Feb 2022 18:09:57 +0100 Subject: [PATCH 63/73] restart loop on readonly execution --- massa-execution-worker/src/worker.rs | 97 +++++++++++++++++++++------- 1 file changed, 72 insertions(+), 25 deletions(-) diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index 87beeb251cd..3c3c4c41ba6 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -19,6 +19,7 @@ use massa_models::{ }; use massa_time::MassaTime; use parking_lot::{Condvar, Mutex, RwLock}; +use std::collections::VecDeque; use std::sync::mpsc; use std::{collections::HashMap, sync::Arc}; use tracing::debug; @@ -45,6 +46,11 @@ pub(crate) struct ExecutionThread { last_active_slot: Slot, // Execution state (see execution.rs) to which execution requests are sent execution_state: Arc>, + /// queue for readonly execution requests and response mpscs to send back their outputs + readonly_requests: VecDeque<( + ReadOnlyExecutionRequest, + mpsc::Sender>, + )>, } impl ExecutionThread { @@ -74,6 +80,7 @@ impl ExecutionThread { active_slots: Default::default(), config, execution_state, + readonly_requests: Default::default(), } } @@ -344,27 +351,64 @@ impl ExecutionThread { exec_state.truncate_history(&self.active_slots); } - /// Executes a read-only request, and asynchronously returns the result once finished. - /// - /// # Arguments - /// * req: read-only execution request parameters - /// * resp_tx: MPSC sender through which the execution output is sent when the execution is over - fn execute_readonly_request( - &self, - req: ReadOnlyExecutionRequest, - resp_tx: mpsc::Sender>, + /// Append incoming read-only requests to the relevant queue, + /// Cancel those that are in excess if there are too many. + fn update_readonly_requests( + &mut self, + new_requests: VecDeque<( + ReadOnlyExecutionRequest, + mpsc::Sender>, + )>, ) { - // acquire read access to execution state and execute the read-only request - let outcome = self.execution_state.read().execute_readonly_request(req); + // append incoming readonly requests to our readonly request queue + self.readonly_requests.extend(new_requests); + + // if there are too many requests, cancel those in excess + if self.readonly_requests.len() > self.config.readonly_queue_length { + for (_req, resp_tx) in self + .readonly_requests + .drain(self.config.readonly_queue_length..) + { + // send a message to the requests in excess to signal the cancelling + if resp_tx + .send(Err(ExecutionError::RuntimeError( + "too many queued readonly requests".into(), + ))) + .is_err() + { + debug!("failed sending readonly request response: channel down"); + } + } + } + } - // send the execution output through resp_tx - if resp_tx.send(outcome).is_err() { - debug!("could not send execute_readonly_request response: response channel died"); + /// Executes a read-only request from the queue, if any. + /// The result of the execution is sent asynchronously through the response channel provided with the request. + /// + /// # Returns + /// true if a request was executed, false otherwise + fn execute_one_readonly_request(&mut self) -> bool { + if let Some((req, resp_tx)) = self.readonly_requests.pop_front() { + // acquire read access to the execution state and execute the read-only request + let outcome = self.execution_state.read().execute_readonly_request(req); + + // send the execution output through resp_tx + if resp_tx.send(outcome).is_err() { + debug!("could not send execute_readonly_request response: response channel died"); + } + + return true; } + false } /// Main loop of the executin worker pub fn main_loop(&mut self) { + // This loop restarts everytime an execution happens for easier tracking. + // It also prioritizes executions in the following order: + // 1 - final executions + // 2 - speculative executions + // 3 - read-only executions loop { // read input requests let input_data = self.controller.consume_input(); @@ -383,6 +427,9 @@ impl ExecutionThread { self.update_active_slots(Some(input_data.blockclique)); } + // update the sequence of read-only requests + self.update_readonly_requests(input_data.readonly_requests); + // execute one slot as final, if there is one ready for final execution if self.execute_one_final_slot() { // A slot was executed as final: restart the loop @@ -410,20 +457,15 @@ impl ExecutionThread { // Execute one active slot in a speculative way, if there is one ready for that if self.execute_one_active_slot() { // An active slot was executed: restart the loop - // This loop continue is useful for monitoring: - // it allows tracking the state of all execution queues, - // as well as prioritizing executions in the following order: - // 1 - final executions - // 2 - speculative executions - // 3 - read-only executions continue; } - // Execute all queued readonly requests (note that the queue is of finite length) + // Execute a read-only request (note that the queue is of finite length), if there is one ready. // This must be done in this loop because even though read-only executions do not alter consensus state, // they still act temporarily on the static shared execution context. - for (req, resp_tx) in input_data.readonly_requests { - self.execute_readonly_request(req, resp_tx); + if self.execute_one_readonly_request() { + // a read-only request was executed: restart the loop + continue; } // Peek into the input data to see if new input arrived during this iteration of the loop @@ -457,10 +499,15 @@ impl ExecutionThread { .wait_for(&mut input_data, time_until_next_slot.to_duration()); } - // the execution worker is stopping: + // The execution worker is stopping: // signal cancellation to all remaining read-only execution requests waiting for an MPSC response + // (both in input_data and in the internal queue) let mut input_data = self.controller.input_data.1.lock(); - for (_req, resp_tx) in input_data.readonly_requests.drain(..) { + let request_iterator = self + .readonly_requests + .drain(..) + .chain(input_data.readonly_requests.drain(..)); + for (_req, resp_tx) in request_iterator { if resp_tx .send(Err(ExecutionError::RuntimeError( "readonly execution cancelled because VM is closing".into(), From b852ae38bd5cd20e21f248ef40af64b604e6589f Mon Sep 17 00:00:00 2001 From: damip Date: Thu, 24 Feb 2022 18:15:41 +0100 Subject: [PATCH 64/73] improve execution loop --- massa-execution-worker/src/worker.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index 3c3c4c41ba6..bc2b9fdea68 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -418,11 +418,11 @@ impl ExecutionThread { break; } + // update the sequence of final slots given the newly finalized blocks + self.update_final_slots(input_data.finalized_blocks); + // if the blockclique has changed if input_data.blockclique_changed { - // update the sequence of final slots given the newly finalized blocks - self.update_final_slots(input_data.finalized_blocks); - // update the sequence of active slots given the new blockclique self.update_active_slots(Some(input_data.blockclique)); } From dcc0900df509204961ab6a2d4fb8952bedd154a6 Mon Sep 17 00:00:00 2001 From: damip Date: Thu, 24 Feb 2022 18:33:01 +0100 Subject: [PATCH 65/73] simplify structure --- massa-execution-worker/src/controller.rs | 22 +++------- massa-execution-worker/src/worker.rs | 54 +++++++++--------------- 2 files changed, 27 insertions(+), 49 deletions(-) diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index 2dd69d917b8..1248f6bbfab 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -21,15 +21,13 @@ use tracing::info; /// structure used to communicate with execution thread #[derive(Default)] -pub(crate) struct VMInputData { +pub(crate) struct ExecutionInputData { /// set stop to true to stop the thread pub stop: bool, - /// signal whether the blockclique changed - pub blockclique_changed: bool, /// list of newly finalized blocks, indexed by slot pub finalized_blocks: HashMap, - /// blockclique, blocks indexed by slot - pub blockclique: HashMap, + /// new blockclique (if there is a new one), blocks indexed by slot + pub new_blockclique: Option>, /// queue for readonly execution requests and response mpscs to send back their outputs pub readonly_requests: VecDeque<( ReadOnlyExecutionRequest, @@ -44,18 +42,11 @@ pub struct ExecutionControllerImpl { pub(crate) config: ExecutionConfig, /// input data to process in the VM loop /// with a wakeup condition variable that needs to be triggered when the data changes - pub(crate) input_data: Arc<(Condvar, Mutex)>, + pub(crate) input_data: Arc<(Condvar, Mutex)>, /// current execution state (see execution.rs for details) pub(crate) execution_state: Arc>, } -impl ExecutionControllerImpl { - /// consumes and returns the input fed to the controller - pub(crate) fn consume_input(&mut self) -> VMInputData { - std::mem::take(&mut self.input_data.1.lock()) - } -} - impl ExecutionController for ExecutionControllerImpl { /// called to signal changes on the current blockclique, also listing newly finalized blocks /// @@ -79,9 +70,8 @@ impl ExecutionController for ExecutionControllerImpl { .collect(); // update input data let mut input_data = self.input_data.1.lock(); - input_data.blockclique = mapped_blockclique; // replace blockclique + input_data.new_blockclique = Some(mapped_blockclique); // replace blockclique input_data.finalized_blocks.extend(mapped_finalized_blocks); // append finalized blocks - input_data.blockclique_changed = true; // signal a blockclique change self.input_data.0.notify_one(); // wake up VM loop } @@ -171,7 +161,7 @@ impl ExecutionController for ExecutionControllerImpl { pub struct ExecutionManagerImpl { /// input data to process in the VM loop /// with a wakeup condition variable that needs to be triggered when the data changes - pub(crate) input_data: Arc<(Condvar, Mutex)>, + pub(crate) input_data: Arc<(Condvar, Mutex)>, /// handle used to join the worker thread pub(crate) thread_handle: Option>, } diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index bc2b9fdea68..24fccc72de4 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -5,7 +5,7 @@ //! orders active and final blocks in queues sorted by increasing slot number, //! and requests the execution of active and final slots from execution.rs. -use crate::controller::{ExecutionControllerImpl, ExecutionManagerImpl, VMInputData}; +use crate::controller::{ExecutionControllerImpl, ExecutionInputData, ExecutionManagerImpl}; use crate::execution::ExecutionState; use massa_execution_exports::{ ExecutionConfig, ExecutionController, ExecutionError, ExecutionManager, ExecutionOutput, @@ -28,8 +28,8 @@ use tracing::debug; pub(crate) struct ExecutionThread { // Execution config config: ExecutionConfig, - // A copy of the controller allowing access to incoming requests - controller: ExecutionControllerImpl, + // A copy of the input data allowing access to incoming requests + input_data: Arc<(Condvar, Mutex)>, // Map of final slots not executed yet but ready for execution // See lib.rs for an explanation on final execution ordering. ready_final_slots: HashMap>, @@ -59,11 +59,11 @@ impl ExecutionThread { /// /// # Arguments /// * config: execution config - /// * controller: a copy of the ExecutionController to get incoming requests from + /// * input_data: a copy of the input data interface to get incoming requests from /// * execution_state: an thread-safe shared access to the execution state, which can be bootstrapped or newly created pub fn new( config: ExecutionConfig, - controller: ExecutionControllerImpl, + input_data: Arc<(Condvar, Mutex)>, execution_state: Arc>, ) -> Self { // get the latest executed final slot, at the output of which the final ledger is attached @@ -72,7 +72,7 @@ impl ExecutionThread { // create and return the ExecutionThread ExecutionThread { last_active_slot: final_cursor, - controller, + input_data, last_ready_final_slot: final_cursor, ready_final_slots: Default::default(), pending_final_blocks: Default::default(), @@ -411,7 +411,7 @@ impl ExecutionThread { // 3 - read-only executions loop { // read input requests - let input_data = self.controller.consume_input(); + let input_data: ExecutionInputData = std::mem::take(&mut self.input_data.1.lock()); // check for stop signal if input_data.stop { @@ -421,11 +421,9 @@ impl ExecutionThread { // update the sequence of final slots given the newly finalized blocks self.update_final_slots(input_data.finalized_blocks); - // if the blockclique has changed - if input_data.blockclique_changed { - // update the sequence of active slots given the new blockclique - self.update_active_slots(Some(input_data.blockclique)); - } + // update the sequence of active slots + let blockclique_changed = input_data.new_blockclique.is_some(); + self.update_active_slots(input_data.new_blockclique); // update the sequence of read-only requests self.update_readonly_requests(input_data.readonly_requests); @@ -440,17 +438,11 @@ impl ExecutionThread { // now all the slots that were ready for final execution have been executed as final - // if the blockclique was not updated, the update_active_slots hasn't been called previously. - // But we still fill up active slots with misses until now() so we call it with None as argument. - if !input_data.blockclique_changed { - self.update_active_slots(None); - } - // If the blockclique has changed, the list of active slots might have seen // new insertions/deletions of blocks at different slot depths. // It is therefore important to signal this to the execution state, // so that it can remove out-of-date speculative execution results from its history. - if input_data.blockclique_changed { + if blockclique_changed { self.truncate_execution_history(); } @@ -460,6 +452,8 @@ impl ExecutionThread { continue; } + // now all the slots that were ready for final and active execution have been executed + // Execute a read-only request (note that the queue is of finite length), if there is one ready. // This must be done in this loop because even though read-only executions do not alter consensus state, // they still act temporarily on the static shared execution context. @@ -468,13 +462,15 @@ impl ExecutionThread { continue; } + // now there are no more executions to run + // Peek into the input data to see if new input arrived during this iteration of the loop - let mut input_data = self.controller.input_data.1.lock(); + let mut input_data = self.input_data.1.lock(); if input_data.stop { // there is a request to stop: quit the loop break; } - if input_data.blockclique_changed || !input_data.readonly_requests.is_empty() { + if input_data.new_blockclique.is_some() || !input_data.readonly_requests.is_empty() { // there are blockclique updates or read-only requests: restart the loop continue; } @@ -493,7 +489,6 @@ impl ExecutionThread { // Note: spurious wake-ups are not a problem: // the next loop iteration will just do nohing and come back to wait here. let _res = self - .controller .input_data .0 .wait_for(&mut input_data, time_until_next_slot.to_duration()); @@ -502,7 +497,7 @@ impl ExecutionThread { // The execution worker is stopping: // signal cancellation to all remaining read-only execution requests waiting for an MPSC response // (both in input_data and in the internal queue) - let mut input_data = self.controller.input_data.1.lock(); + let mut input_data = self.input_data.1.lock(); let request_iterator = self .readonly_requests .drain(..) @@ -541,14 +536,7 @@ pub fn start_execution_worker( ))); // define the input data interface - let input_data = Arc::new(( - Condvar::new(), - Mutex::new(VMInputData { - // notify of a blockclique change to run one initialization loop itration - blockclique_changed: true, - ..Default::default() - }), - )); + let input_data = Arc::new((Condvar::new(), Mutex::new(ExecutionInputData::default()))); // create a controller let controller = ExecutionControllerImpl { @@ -558,9 +546,9 @@ pub fn start_execution_worker( }; // launch the execution thread - let ctl = controller.clone(); + let input_data_clone = input_data.clone(); let thread_handle = std::thread::spawn(move || { - ExecutionThread::new(config, ctl, execution_state).main_loop(); + ExecutionThread::new(config, input_data_clone, execution_state).main_loop(); }); // create a manager From efea7d9e8ea518eb8bad782532069331a5c68ac9 Mon Sep 17 00:00:00 2001 From: damip Date: Fri, 25 Feb 2022 01:08:30 +0100 Subject: [PATCH 66/73] improve execution loop --- massa-execution-exports/src/error.rs | 14 +- massa-execution-worker/src/controller.rs | 62 ++++-- massa-execution-worker/src/lib.rs | 6 + massa-execution-worker/src/request_queue.rs | 157 ++++++++++++++ massa-execution-worker/src/worker.rs | 218 ++++++++++---------- 5 files changed, 318 insertions(+), 139 deletions(-) create mode 100644 massa-execution-worker/src/request_queue.rs diff --git a/massa-execution-exports/src/error.rs b/massa-execution-exports/src/error.rs index ac667b28b33..750b8b81109 100644 --- a/massa-execution-exports/src/error.rs +++ b/massa-execution-exports/src/error.rs @@ -7,23 +7,11 @@ use thiserror::Error; /// Errors of the execution component. #[non_exhaustive] -#[derive(Display, Error, Debug)] +#[derive(Clone, Display, Error, Debug)] pub enum ExecutionError { /// Channel error ChannelError(String), - /// Join error - JoinError, - - /// crypto error: {0} - ModelsError(#[from] massa_models::ModelsError), - - /// time error: {0} - TimeError(#[from] massa_time::TimeError), - - /// File error - FileError(String), - /// Runtime error: {0} RuntimeError(String), } diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs index 1248f6bbfab..0cbf10d0946 100644 --- a/massa-execution-worker/src/controller.rs +++ b/massa-execution-worker/src/controller.rs @@ -4,6 +4,7 @@ //! See massa-execution-exports/controller_traits.rs for functional details. use crate::execution::ExecutionState; +use crate::request_queue::{RequestQueue, RequestWithResponseSender}; use massa_execution_exports::{ ExecutionConfig, ExecutionController, ExecutionError, ExecutionManager, ExecutionOutput, ReadOnlyExecutionRequest, @@ -15,12 +16,11 @@ use massa_models::Address; use massa_models::OperationId; use massa_models::{Block, BlockId, Slot}; use parking_lot::{Condvar, Mutex, RwLock}; -use std::collections::{HashMap, VecDeque}; -use std::sync::{mpsc, Arc}; +use std::collections::HashMap; +use std::sync::Arc; use tracing::info; /// structure used to communicate with execution thread -#[derive(Default)] pub(crate) struct ExecutionInputData { /// set stop to true to stop the thread pub stop: bool, @@ -29,17 +29,35 @@ pub(crate) struct ExecutionInputData { /// new blockclique (if there is a new one), blocks indexed by slot pub new_blockclique: Option>, /// queue for readonly execution requests and response mpscs to send back their outputs - pub readonly_requests: VecDeque<( - ReadOnlyExecutionRequest, - mpsc::Sender>, - )>, + pub readonly_requests: RequestQueue, +} + +impl ExecutionInputData { + /// Creates a new empty ExecutionInputData + pub fn new(config: ExecutionConfig) -> Self { + ExecutionInputData { + stop: Default::default(), + finalized_blocks: Default::default(), + new_blockclique: Default::default(), + readonly_requests: RequestQueue::new(config.max_final_events), + } + } + + /// Takes the current input data into a clone that is returned, + /// and resets self. + pub fn take(&mut self) -> Self { + ExecutionInputData { + stop: std::mem::take(&mut self.stop), + finalized_blocks: std::mem::take(&mut self.finalized_blocks), + new_blockclique: std::mem::take(&mut self.new_blockclique), + readonly_requests: self.readonly_requests.take(), + } + } } #[derive(Clone)] /// implementation of the execution controller pub struct ExecutionControllerImpl { - /// execution config - pub(crate) config: ExecutionConfig, /// input data to process in the VM loop /// with a wakeup condition variable that needs to be triggered when the data changes pub(crate) input_data: Arc<(Condvar, Mutex)>, @@ -117,31 +135,37 @@ impl ExecutionController for ExecutionControllerImpl { &self, req: ReadOnlyExecutionRequest, ) -> Result { - // queue request into input, get response mpsc receiver let resp_rx = { let mut input_data = self.input_data.1.lock(); - // limit the read-only queue length - if input_data.readonly_requests.len() >= self.config.readonly_queue_length { - return Err(ExecutionError::RuntimeError( + + // if the read-onlyi queue is already full, return an error + if input_data.readonly_requests.is_full() { + return Err(ExecutionError::ChannelError( "too many queued readonly requests".into(), )); } + // prepare the channel to send back the result of the read-only execution let (resp_tx, resp_rx) = std::sync::mpsc::channel::>(); - // append to the queue of input read-only requests - input_data.readonly_requests.push_back((req, resp_tx)); - // wake up VM loop + + // append the request to the queue of input read-only requests + input_data + .readonly_requests + .push(RequestWithResponseSender::new(req, resp_tx)); + + // wake up the execution main loop self.input_data.0.notify_one(); + resp_rx }; - // wait for the result of the execution + // Wait for the result of the execution match resp_rx.recv() { Ok(result) => result, Err(err) => { - return Err(ExecutionError::RuntimeError(format!( - "the VM input channel failed: {}", + return Err(ExecutionError::ChannelError(format!( + "readonly execution response channel readout failed: {}", err ))) } diff --git a/massa-execution-worker/src/lib.rs b/massa-execution-worker/src/lib.rs index 9448afbf3ba..52de649218e 100644 --- a/massa-execution-worker/src/lib.rs +++ b/massa-execution-worker/src/lib.rs @@ -60,6 +60,11 @@ //! ## speculative_ledger.rs //! A speculative (non-final) ledger that supports cancelling already-executed operations //! in the case of some blockclique changes. +//! +//! ## request_queue.rs +//! This module contains the implementation of a generic finite-size execution request queue. +//! It handles requests that come with an MPSC to send back the result of their execution once it's done. +//! #![feature(map_first_last)] #![feature(unzip_option)] @@ -68,6 +73,7 @@ mod context; mod controller; mod execution; mod interface_impl; +mod request_queue; mod speculative_ledger; mod worker; diff --git a/massa-execution-worker/src/request_queue.rs b/massa-execution-worker/src/request_queue.rs new file mode 100644 index 00000000000..0da52ec15d5 --- /dev/null +++ b/massa-execution-worker/src/request_queue.rs @@ -0,0 +1,157 @@ +// Copyright (c) 2022 MASSA LABS + +//! This file defines a generic finite-size execution request queue with an MPSC-based result sender. + +use massa_execution_exports::ExecutionError; +use std::collections::VecDeque; +use std::sync::mpsc::Sender; + +/// Represents an execution request T coupled with an MPSC sender for a result of type R +pub(crate) struct RequestWithResponseSender { + /// The underlying execution request + request: T, + /// An std::mpsc::Sender to later send the execution output R (or an error) + response_tx: Sender>, +} + +impl RequestWithResponseSender { + /// Create a new request with response sender + /// + /// # Arguments + /// * request: the underlying request of type T + /// * response_tx an std::mpsc::Sender to later send the execution output R (or an error) + pub fn new(request: T, response_tx: Sender>) -> Self { + RequestWithResponseSender { + request, + response_tx, + } + } + + /// Cancel the request by consuming the object and sending an error through the response channel. + /// + /// # Arguments + /// * err: the error to send through the response channel + pub fn cancel(self, err: ExecutionError) { + // Send a message to the request's sender to signal the cancellation. + // Ignore errors because they just mean that the emitter of the request + // has dropped the receiver and does not need the response anymore. + let _ = self.response_tx.send(Err(err)); + } + + /// Destructure self into a (request, response sender) pair + pub fn into_request_sender_pair(self) -> (T, Sender>) { + (self.request, self.response_tx) + } +} + +/// Structure representing an execution request queue with maximal length. +/// Each request is a RequestWithResponseSender that comes with an MPSC sender +/// to return the exection result when the execution is over (or an error). +pub(crate) struct RequestQueue { + /// Max number of item in the queue. + /// When the queue is full, extra new items are cancelled and dropped. + max_items: usize, + + /// The actual underlying queue + queue: VecDeque>, +} + +impl RequestQueue { + /// Create a new request queue + /// + /// # Arguments + /// * max_items: the maximal number of items in the queue. When full, extra new elements are cancelled and dropped. + pub fn new(max_items: usize) -> Self { + RequestQueue { + max_items, + queue: VecDeque::with_capacity(max_items), + } + } + + /// Extends Self with the contents of another RequestQueue. + /// The contents of the incoming queue are appended last. + /// Excess items with respect to self.max_items are cancelled and dropped. + pub fn extend(&mut self, mut other: RequestQueue) { + // compute the number of available item slots + let free_slots = self.max_items.saturating_sub(self.queue.len()); + + // if there are no available slots remaining, do nothing + if free_slots == 0 { + return; + } + + // if there are not enough available slots to fit the entire incoming queue + if free_slots < other.queue.len() { + // truncate the incoming queue to the size that fits, cancelling excess items + other.queue.drain(free_slots..).for_each(|req| { + req.cancel(ExecutionError::ChannelError( + "maximal request queue capacity reached".into(), + )) + }); + } + + // append the kept part of the incoming queue + self.queue.extend(other.queue); + } + + /// Cancel all queued items. + /// + /// # Arguments + /// * err: the error to send through the response channel of cancelled items + pub fn cancel(&mut self, err: ExecutionError) { + for req in self.queue.drain(..) { + req.cancel(err.clone()); + } + } + + /// Pop out the oldest element of the queue + /// + /// # Returns + /// The oldest element of the queue, or None if the queue is empty + pub fn pop(&mut self) -> Option> { + self.queue.pop_front() + } + + /// Push a new element at the end of the queue. + /// May fail if maximum capacity is reached, + /// in which case the request is cancelled and dropped. + /// + /// # Returns + /// The oldest element of the queue, or None if the queue is empty + pub fn push(&mut self, req: RequestWithResponseSender) { + // If the queue is already full, cancel the incoming request and return. + if self.queue.len() >= self.max_items { + req.cancel(ExecutionError::ChannelError( + "maximal request queue capacity reached".into(), + )); + return; + } + + // Append the incoming request to the end of the queue. + self.queue.push_back(req); + } + + /// Take all the elements into a new queue and reset the current queue + pub fn take(&mut self) -> Self { + RequestQueue { + max_items: self.max_items, + queue: std::mem::take(&mut self.queue), + } + } + + /// Checks whether the queue is full + /// + /// # Returns + /// true if the queue is full, false otherwise + pub fn is_full(&self) -> bool { + self.queue.len() >= self.max_items + } + + /// Checks whether the queue is empty + /// + /// # Returns + /// true if the queue is empty, false otherwise + pub fn is_empty(&self) -> bool { + self.queue.is_empty() + } +} diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index 24fccc72de4..110ddd529fb 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -7,6 +7,7 @@ use crate::controller::{ExecutionControllerImpl, ExecutionInputData, ExecutionManagerImpl}; use crate::execution::ExecutionState; +use crate::request_queue::RequestQueue; use massa_execution_exports::{ ExecutionConfig, ExecutionController, ExecutionError, ExecutionManager, ExecutionOutput, ReadOnlyExecutionRequest, @@ -19,10 +20,7 @@ use massa_models::{ }; use massa_time::MassaTime; use parking_lot::{Condvar, Mutex, RwLock}; -use std::collections::VecDeque; -use std::sync::mpsc; use std::{collections::HashMap, sync::Arc}; -use tracing::debug; /// Structure gathering all elements needed by the execution thread pub(crate) struct ExecutionThread { @@ -47,10 +45,7 @@ pub(crate) struct ExecutionThread { // Execution state (see execution.rs) to which execution requests are sent execution_state: Arc>, /// queue for readonly execution requests and response mpscs to send back their outputs - readonly_requests: VecDeque<( - ReadOnlyExecutionRequest, - mpsc::Sender>, - )>, + readonly_requests: RequestQueue, } impl ExecutionThread { @@ -78,9 +73,9 @@ impl ExecutionThread { pending_final_blocks: Default::default(), blockclique: Default::default(), active_slots: Default::default(), + readonly_requests: RequestQueue::new(config.readonly_queue_length), config, execution_state, - readonly_requests: Default::default(), } } @@ -283,6 +278,21 @@ impl ExecutionThread { true } + /// Check if there are any active slots ready for execution + /// This is used to check if the main loop should run an iteration + fn are_there_active_slots_ready_for_execution(&self) -> bool { + let execution_state = self.execution_state.read(); + + // get the next active slot + let slot = execution_state + .active_cursor + .get_next_slot(self.config.thread_count) + .expect("active slot overflow in VM"); + + // check if it is in the active slot queue + self.active_slots.contains_key(&slot) + } + /// executes one active slot, if any /// returns true if something was executed fn execute_one_active_slot(&mut self) -> bool { @@ -355,31 +365,11 @@ impl ExecutionThread { /// Cancel those that are in excess if there are too many. fn update_readonly_requests( &mut self, - new_requests: VecDeque<( - ReadOnlyExecutionRequest, - mpsc::Sender>, - )>, + new_requests: RequestQueue, ) { - // append incoming readonly requests to our readonly request queue + // Append incoming readonly requests to our readonly request queue + // Excess requests are cancelld self.readonly_requests.extend(new_requests); - - // if there are too many requests, cancel those in excess - if self.readonly_requests.len() > self.config.readonly_queue_length { - for (_req, resp_tx) in self - .readonly_requests - .drain(self.config.readonly_queue_length..) - { - // send a message to the requests in excess to signal the cancelling - if resp_tx - .send(Err(ExecutionError::RuntimeError( - "too many queued readonly requests".into(), - ))) - .is_err() - { - debug!("failed sending readonly request response: channel down"); - } - } - } } /// Executes a read-only request from the queue, if any. @@ -388,20 +378,93 @@ impl ExecutionThread { /// # Returns /// true if a request was executed, false otherwise fn execute_one_readonly_request(&mut self) -> bool { - if let Some((req, resp_tx)) = self.readonly_requests.pop_front() { - // acquire read access to the execution state and execute the read-only request + if let Some(req_resp) = self.readonly_requests.pop() { + let (req, resp_tx) = req_resp.into_request_sender_pair(); + + // Acquire read access to the execution state and execute the read-only request let outcome = self.execution_state.read().execute_readonly_request(req); - // send the execution output through resp_tx - if resp_tx.send(outcome).is_err() { - debug!("could not send execute_readonly_request response: response channel died"); - } + // Send the execution output through resp_tx. + // Ignore errors because they just mean that the request emitter dropped the received + // because it doesn't need the response anymore. + let _ = resp_tx.send(outcome); return true; } false } + /// Waits for an event to trigger a new iteration in the excution main loop. + /// + /// # Returns + /// Some(ExecutionInputData) representing the input requests, + /// or None if the main loop needs to stop. + fn wait_loop_event(&mut self) -> Option { + let mut cancel_input = loop { + let mut input_data_lock = self.input_data.1.lock(); + + // take current input data, resetting it + let input_data: ExecutionInputData = input_data_lock.take(); + + // check for stop signal + if input_data.stop { + break input_data; + } + + // Check for readonly requests, new blockclique or final slot changes + // The most frequent triggers are checked first. + if !input_data.readonly_requests.is_empty() + || input_data.new_blockclique.is_some() + || !input_data.finalized_blocks.is_empty() + { + return Some(input_data); + } + + // Check for slots to execute. + // The most frequent triggers are checked first, + // except for the active slot check which is last because it is more expensive. + if !self.readonly_requests.is_empty() + || !self.ready_final_slots.is_empty() + || self.are_there_active_slots_ready_for_execution() + { + return Some(input_data); + } + + // No input data, and no slots to execute. + + // Compute when the next slot will be + // This is useful to wait for the next speculative miss to append to active slots. + let time_until_next_slot = self.get_time_until_next_active_slot(); + if time_until_next_slot == 0.into() { + // next slot is right now: the loop needs to iterate + return Some(input_data); + } + + // Wait to be notified of new input, for at most time_until_next_slot + // The return value is ignored because we don't care what woke up the condition variable. + let _res = self + .input_data + .0 + .wait_for(&mut input_data_lock, time_until_next_slot.to_duration()); + }; + + // The loop needs to quit + + // Cancel pending readonly requests + let cancel_err = ExecutionError::RuntimeError( + "readonly execution cancelled because VM is closing".into(), + ); + cancel_input.readonly_requests.cancel(cancel_err.clone()); + self.input_data + .1 + .lock() + .take() + .readonly_requests + .cancel(cancel_err); + + None + } + /// Main loop of the executin worker pub fn main_loop(&mut self) { // This loop restarts everytime an execution happens for easier tracking. @@ -409,22 +472,19 @@ impl ExecutionThread { // 1 - final executions // 2 - speculative executions // 3 - read-only executions - loop { - // read input requests - let input_data: ExecutionInputData = std::mem::take(&mut self.input_data.1.lock()); - - // check for stop signal - if input_data.stop { - break; - } - + while let Some(input_data) = self.wait_loop_event() { // update the sequence of final slots given the newly finalized blocks self.update_final_slots(input_data.finalized_blocks); // update the sequence of active slots - let blockclique_changed = input_data.new_blockclique.is_some(); self.update_active_slots(input_data.new_blockclique); + // The list of active slots might have seen + // new insertions/deletions of blocks at different slot depths. + // It is therefore important to signal this to the execution state, + // so that it can remove out-of-date speculative execution results from its history. + self.truncate_execution_history(); + // update the sequence of read-only requests self.update_readonly_requests(input_data.readonly_requests); @@ -438,14 +498,6 @@ impl ExecutionThread { // now all the slots that were ready for final execution have been executed as final - // If the blockclique has changed, the list of active slots might have seen - // new insertions/deletions of blocks at different slot depths. - // It is therefore important to signal this to the execution state, - // so that it can remove out-of-date speculative execution results from its history. - if blockclique_changed { - self.truncate_execution_history(); - } - // Execute one active slot in a speculative way, if there is one ready for that if self.execute_one_active_slot() { // An active slot was executed: restart the loop @@ -461,56 +513,6 @@ impl ExecutionThread { // a read-only request was executed: restart the loop continue; } - - // now there are no more executions to run - - // Peek into the input data to see if new input arrived during this iteration of the loop - let mut input_data = self.input_data.1.lock(); - if input_data.stop { - // there is a request to stop: quit the loop - break; - } - if input_data.new_blockclique.is_some() || !input_data.readonly_requests.is_empty() { - // there are blockclique updates or read-only requests: restart the loop - continue; - } - - // Here, we know that there is currently nothing to do for this worker - - // Compute when the next slot will be - // This is useful to wait for the next speculative miss to append to active slots. - let time_until_next_slot = self.get_time_until_next_active_slot(); - if time_until_next_slot == 0.into() { - // next slot is right now: simply restart the loop - continue; - } - - // Wait to be notified of new input, for at most time_until_next_slot - // Note: spurious wake-ups are not a problem: - // the next loop iteration will just do nohing and come back to wait here. - let _res = self - .input_data - .0 - .wait_for(&mut input_data, time_until_next_slot.to_duration()); - } - - // The execution worker is stopping: - // signal cancellation to all remaining read-only execution requests waiting for an MPSC response - // (both in input_data and in the internal queue) - let mut input_data = self.input_data.1.lock(); - let request_iterator = self - .readonly_requests - .drain(..) - .chain(input_data.readonly_requests.drain(..)); - for (_req, resp_tx) in request_iterator { - if resp_tx - .send(Err(ExecutionError::RuntimeError( - "readonly execution cancelled because VM is closing".into(), - ))) - .is_err() - { - debug!("failed sending readonly request response: channel down"); - } } } } @@ -536,11 +538,13 @@ pub fn start_execution_worker( ))); // define the input data interface - let input_data = Arc::new((Condvar::new(), Mutex::new(ExecutionInputData::default()))); + let input_data = Arc::new(( + Condvar::new(), + Mutex::new(ExecutionInputData::new(config.clone())), + )); // create a controller let controller = ExecutionControllerImpl { - config: config.clone(), input_data: input_data.clone(), execution_state: execution_state.clone(), }; From 21eb23be4e63512f5e95b9572d9089219802a23f Mon Sep 17 00:00:00 2001 From: damip Date: Fri, 25 Feb 2022 01:38:13 +0100 Subject: [PATCH 67/73] error formulation --- massa-execution-worker/src/worker.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index 110ddd529fb..7d4eec59058 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -452,7 +452,7 @@ impl ExecutionThread { // Cancel pending readonly requests let cancel_err = ExecutionError::RuntimeError( - "readonly execution cancelled because VM is closing".into(), + "readonly execution cancelled because the execution worker is closing".into(), ); cancel_input.readonly_requests.cancel(cancel_err.clone()); self.input_data From dcd6e789d82ccdd4debe14b715e85bb9ccdfc5ed Mon Sep 17 00:00:00 2001 From: damip Date: Fri, 25 Feb 2022 08:19:22 +0100 Subject: [PATCH 68/73] typo --- massa-models/src/api.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/massa-models/src/api.rs b/massa-models/src/api.rs index 725dc866be5..29fb2ba0d85 100644 --- a/massa-models/src/api.rs +++ b/massa-models/src/api.rs @@ -172,8 +172,8 @@ impl std::fmt::Display for AddressInfo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!(f, "Address: {}", self.address)?; writeln!(f, "Thread: {}", self.thread)?; - writeln!(f, "Parallel balance:\n{}", self.ledger_info)?; - writeln!(f, "Sequential balance:\n{}", self.sce_ledger_info)?; + writeln!(f, "Sequential balance:\n{}", self.ledger_info)?; + writeln!(f, "Parallel balance:\n{}", self.sce_ledger_info)?; writeln!(f, "Rolls:\n{}", self.rolls)?; writeln!( f, From 711a3457f1b05e8269e30e0d859a36ccc44cc638 Mon Sep 17 00:00:00 2001 From: damip Date: Fri, 25 Feb 2022 14:09:01 +0100 Subject: [PATCH 69/73] link TODOs to issues --- massa-execution-worker/src/execution.rs | 5 ++++- massa-execution-worker/src/tests/mod.rs | 2 +- massa-execution-worker/src/worker.rs | 2 +- massa-ledger/src/bootstrap.rs | 2 +- massa-ledger/src/ledger.rs | 1 + massa-ledger/src/ledger_entry.rs | 6 +++--- 6 files changed, 11 insertions(+), 7 deletions(-) diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 8f4e80b0ce3..398364895d7 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -191,6 +191,7 @@ impl ExecutionState { /// carried by the returned SpeculativeLedger is not held. /// TODO optimization: do not do this anymore but allow the speculative ledger to lazily query any subentry /// by scanning through history from end to beginning + /// https://github.com/massalabs/massa/issues/2343 pub fn get_accumulated_active_changes_at_slot(&self, slot: Slot) -> LedgerChanges { // check that the slot is within the reach of history if slot <= self.final_cursor { @@ -417,7 +418,7 @@ impl ExecutionState { } /// Gets a full ledger entry both at the latest final and active executed slots - /// TODO: this can be heavily optimized, see comments + /// TODO: this can be heavily optimized, see comments and https://github.com/massalabs/massa/issues/2343 /// /// # returns /// (final_entry, active_entry) @@ -431,6 +432,7 @@ impl ExecutionState { // get cumulative active changes and apply them // TODO there is a lot of overhead here: we only need to compute the changes for one entry and no need to clone it // also we should proceed backwards through history for performance + // https://github.com/massalabs/massa/issues/2343 let active_change = self .get_accumulated_active_changes_at_slot(self.active_cursor) .get(addr) @@ -483,6 +485,7 @@ impl ExecutionState { .chain( // TODO note that active history is made of consecutive slots, // so this algo does not need to scan all history items as iteration bounds can be derived a priori + // https://github.com/massalabs/massa/issues/2335 self.active_history .iter() .filter(|item| item.slot >= start && item.slot < end) diff --git a/massa-execution-worker/src/tests/mod.rs b/massa-execution-worker/src/tests/mod.rs index 909935ad7a0..0ffb9de2a68 100644 --- a/massa-execution-worker/src/tests/mod.rs +++ b/massa-execution-worker/src/tests/mod.rs @@ -1,3 +1,3 @@ // Copyright (c) 2022 MASSA LABS -//TODO mod scenarios_mandatories; +//TODO mod scenarios_mandatories; https://github.com/massalabs/massa/pull/2296 diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index 7d4eec59058..5e1f452793a 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -307,7 +307,7 @@ impl ExecutionThread { // choose the execution target let exec_target = match self.active_slots.get(&slot) { - Some(b) => b.clone(), //TODO get rid of that clone + Some(b) => b.clone(), //TODO get rid of that clone on storage refactorig https://github.com/massalabs/massa/issues/2178 None => return false, }; diff --git a/massa-ledger/src/bootstrap.rs b/massa-ledger/src/bootstrap.rs index e5e92a8ce41..f56980c4b0c 100644 --- a/massa-ledger/src/bootstrap.rs +++ b/massa-ledger/src/bootstrap.rs @@ -58,7 +58,7 @@ impl DeserializeCompact for FinalLedgerBootstrapState { // ledger size let (ledger_size, delta) = u64::from_varint_bytes(&buffer[cursor..])?; - // TODO cap the ledger size + // TODO cap the ledger size https://github.com/massalabs/massa/issues/1200 cursor += delta; // final ledger diff --git a/massa-ledger/src/ledger.rs b/massa-ledger/src/ledger.rs index 43394c4d6f7..450a0a227d4 100644 --- a/massa-ledger/src/ledger.rs +++ b/massa-ledger/src/ledger.rs @@ -144,6 +144,7 @@ impl FinalLedger { /// A clone of the whole LedgerEntry, or None if not found. /// /// TODO: in the future, never manipulate full ledger entries because their datastore can be huge + /// https://github.com/massalabs/massa/issues/2342 pub fn get_full_entry(&self, addr: &Address) -> Option { self.sorted_ledger.get(addr).cloned() } diff --git a/massa-ledger/src/ledger_entry.rs b/massa-ledger/src/ledger_entry.rs index ac2a5299a69..14abb937641 100644 --- a/massa-ledger/src/ledger_entry.rs +++ b/massa-ledger/src/ledger_entry.rs @@ -110,7 +110,7 @@ impl DeserializeCompact for LedgerEntry { let bytecode_len: usize = bytecode_len.try_into().map_err(|_| { ModelsError::SerializeError("could not convert bytecode size to usize".into()) })?; - //TODO cap bytecode length + //TODO cap bytecode length https://github.com/massalabs/massa/issues/1200 cursor += delta; // bytecode @@ -128,7 +128,7 @@ impl DeserializeCompact for LedgerEntry { let datastore_len: usize = datastore_len.try_into().map_err(|_| { ModelsError::SerializeError("could not convert datastore size to usize".into()) })?; - //TODO cap datastore length + //TODO cap datastore length https://github.com/massalabs/massa/issues/1200 cursor += delta; // datastore entries @@ -145,7 +145,7 @@ impl DeserializeCompact for LedgerEntry { "could not convert datastore entry value size to usize".into(), ) })?; - //TODO cap value length + //TODO cap value length https://github.com/massalabs/massa/issues/1200 cursor += delta; // value From d71d32ad2bec2c3d9825720a8ddfdf3e8935d08f Mon Sep 17 00:00:00 2001 From: damip Date: Fri, 25 Feb 2022 15:53:21 +0100 Subject: [PATCH 70/73] more accurate error type --- massa-execution-worker/src/worker.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index 5e1f452793a..15e12b44c06 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -451,7 +451,7 @@ impl ExecutionThread { // The loop needs to quit // Cancel pending readonly requests - let cancel_err = ExecutionError::RuntimeError( + let cancel_err = ExecutionError::ChannelError( "readonly execution cancelled because the execution worker is closing".into(), ); cancel_input.readonly_requests.cancel(cancel_err.clone()); From 04b769962b243d2a21735b8a879f95de6cd81c0e Mon Sep 17 00:00:00 2001 From: Damir Vodenicarevic Date: Tue, 1 Mar 2022 11:54:24 +0100 Subject: [PATCH 71/73] add execution sink --- massa-consensus-worker/src/tests/tools.rs | 32 ++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/massa-consensus-worker/src/tests/tools.rs b/massa-consensus-worker/src/tests/tools.rs index e6d964af740..dbbb0d9e674 100644 --- a/massa-consensus-worker/src/tests/tools.rs +++ b/massa-consensus-worker/src/tests/tools.rs @@ -23,8 +23,12 @@ use massa_signature::{ derive_public_key, generate_random_private_key, sign, PrivateKey, PublicKey, Signature, }; use massa_time::MassaTime; -use std::str::FromStr; use std::{collections::HashSet, future::Future}; +use std::{ + str::FromStr, + sync::{Arc, Mutex}, + time::Duration, +}; use tracing::info; @@ -639,7 +643,14 @@ pub async fn consensus_pool_test( MockProtocolController::new(); let (pool_controller, pool_command_sender) = MockPoolController::new(); // for now, execution_rx is ignored: cique updates to Execution pile up and are discarded - let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); + let (execution_controller, execution_rx) = MockExecutionController::new_with_receiver(); + let stop_sinks = Arc::new(Mutex::new(false)); + let stop_sinks_clone = stop_sinks.clone(); + let execution_sink = std::thread::spawn(move || { + while *stop_sinks_clone.lock().unwrap() == true { + let _ = execution_rx.recv_timeout(Duration::from_millis(500)); + } + }); // launch consensus controller let (consensus_command_sender, consensus_event_receiver, consensus_manager) = @@ -681,6 +692,10 @@ pub async fn consensus_pool_test( .await .unwrap(); pool_sink.stop().await; + + // stop sinks + *stop_sinks.lock().unwrap() = true; + execution_sink.join().unwrap(); } /// Runs a consensus test, without passing a mock pool controller to it. @@ -700,7 +715,14 @@ where MockProtocolController::new(); let (pool_controller, pool_command_sender) = MockPoolController::new(); // for now, execution_rx is ignored: cique updates to Execution pile up and are discarded - let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); + let (execution_controller, execution_rx) = MockExecutionController::new_with_receiver(); + let stop_sinks = Arc::new(Mutex::new(false)); + let stop_sinks_clone = stop_sinks.clone(); + let execution_sink = std::thread::spawn(move || { + while *stop_sinks_clone.lock().unwrap() == true { + let _ = execution_rx.recv_timeout(Duration::from_millis(500)); + } + }); let pool_sink = PoolCommandSink::new(pool_controller).await; // launch consensus controller @@ -736,6 +758,10 @@ where .await .unwrap(); pool_sink.stop().await; + + // stop sinks + *stop_sinks.lock().unwrap() = true; + execution_sink.join().unwrap(); } pub fn get_cliques(graph: &BlockGraphExport, hash: BlockId) -> HashSet { From f89a6eb134349be91b94db7df4bcee7672f9fa1f Mon Sep 17 00:00:00 2001 From: Damir Vodenicarevic Date: Tue, 1 Mar 2022 20:26:23 +0100 Subject: [PATCH 72/73] update tests --- massa-consensus-worker/src/tests/tools.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/massa-consensus-worker/src/tests/tools.rs b/massa-consensus-worker/src/tests/tools.rs index dbbb0d9e674..4ae4d9c1854 100644 --- a/massa-consensus-worker/src/tests/tools.rs +++ b/massa-consensus-worker/src/tests/tools.rs @@ -647,7 +647,7 @@ pub async fn consensus_pool_test( let stop_sinks = Arc::new(Mutex::new(false)); let stop_sinks_clone = stop_sinks.clone(); let execution_sink = std::thread::spawn(move || { - while *stop_sinks_clone.lock().unwrap() == true { + while *stop_sinks_clone.lock().unwrap() == false { let _ = execution_rx.recv_timeout(Duration::from_millis(500)); } }); @@ -719,7 +719,7 @@ where let stop_sinks = Arc::new(Mutex::new(false)); let stop_sinks_clone = stop_sinks.clone(); let execution_sink = std::thread::spawn(move || { - while *stop_sinks_clone.lock().unwrap() == true { + while *stop_sinks_clone.lock().unwrap() == false { let _ = execution_rx.recv_timeout(Duration::from_millis(500)); } }); From 68c6eb246b1cfe80c2b89b0319f7e14c8992a5a1 Mon Sep 17 00:00:00 2001 From: Damir Vodenicarevic Date: Tue, 1 Mar 2022 20:27:29 +0100 Subject: [PATCH 73/73] clippy --- massa-consensus-worker/src/tests/tools.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/massa-consensus-worker/src/tests/tools.rs b/massa-consensus-worker/src/tests/tools.rs index 4ae4d9c1854..f64d41f9431 100644 --- a/massa-consensus-worker/src/tests/tools.rs +++ b/massa-consensus-worker/src/tests/tools.rs @@ -647,7 +647,7 @@ pub async fn consensus_pool_test( let stop_sinks = Arc::new(Mutex::new(false)); let stop_sinks_clone = stop_sinks.clone(); let execution_sink = std::thread::spawn(move || { - while *stop_sinks_clone.lock().unwrap() == false { + while !*stop_sinks_clone.lock().unwrap() { let _ = execution_rx.recv_timeout(Duration::from_millis(500)); } }); @@ -719,7 +719,7 @@ where let stop_sinks = Arc::new(Mutex::new(false)); let stop_sinks_clone = stop_sinks.clone(); let execution_sink = std::thread::spawn(move || { - while *stop_sinks_clone.lock().unwrap() == false { + while !*stop_sinks_clone.lock().unwrap() { let _ = execution_rx.recv_timeout(Duration::from_millis(500)); } });