diff --git a/.changelog/unreleased/bug-fixes/1182-dont-persist-genesis-on-init-chain.md b/.changelog/unreleased/bug-fixes/1182-dont-persist-genesis-on-init-chain.md new file mode 100644 index 0000000000..179f39c7ca --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1182-dont-persist-genesis-on-init-chain.md @@ -0,0 +1,4 @@ +- Fixed the init-chain handler to stop committing state to the DB + as it may be re-applied when the node is shut-down before the + first block is committed, leading to an invalid genesis state. + ([#1182](https://github.com/anoma/namada/pull/1182)) \ No newline at end of file diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 2ac9d482aa..336c4d9415 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -402,7 +402,6 @@ where let new_epoch = self .wl_storage - .storage .update_epoch(height, header_time) .expect("Must be able to update epoch"); @@ -837,7 +836,7 @@ mod test_finalize_block { min_duration: DurationSecs(0), }; namada::ledger::parameters::update_epoch_parameter( - &mut shell.wl_storage.storage, + &mut shell.wl_storage, &epoch_duration, ) .unwrap(); @@ -878,7 +877,7 @@ mod test_finalize_block { add_proposal(1, ProposalVote::Nay); // Commit the genesis state - shell.wl_storage.commit_genesis().unwrap(); + shell.wl_storage.commit_block().unwrap(); shell.commit(); // Collect all storage key-vals into a sorted map diff --git a/apps/src/lib/node/ledger/shell/init_chain.rs b/apps/src/lib/node/ledger/shell/init_chain.rs index fec7864306..1198007bee 100644 --- a/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/apps/src/lib/node/ledger/shell/init_chain.rs @@ -25,6 +25,8 @@ where /// Create a new genesis for the chain with specified id. This includes /// 1. A set of initial users and tokens /// 2. Setting up the validity predicates for both users and tokens + /// + /// INVARIANT: This method must not commit the state changes to DB. pub fn init_chain( &mut self, init: request::InitChain, @@ -138,12 +140,15 @@ where #[cfg(not(feature = "mainnet"))] wrapper_tx_fees, }; - parameters.init_storage(&mut self.wl_storage.storage); + parameters + .init_storage(&mut self.wl_storage) + .expect("Initializing chain parameters must not fail"); // Initialize governance parameters genesis .gov_params - .init_storage(&mut self.wl_storage.storage); + .init_storage(&mut self.wl_storage) + .expect("Initializing governance parameters must not fail"); // Depends on parameters being initialized self.wl_storage @@ -342,7 +347,7 @@ where .map(|validator| validator.pos_data), current_epoch, ); - ibc::init_genesis_storage(&mut self.wl_storage.storage); + ibc::init_genesis_storage(&mut self.wl_storage); // Set the initial validator set for validator in genesis.validators { @@ -360,10 +365,6 @@ where response.validators.push(abci_validator); } - self.wl_storage - .commit_genesis() - .expect("Must be able to commit genesis state"); - Ok(response) } } @@ -391,3 +392,59 @@ where } } } + +#[cfg(test)] +mod test { + use std::collections::BTreeMap; + use std::str::FromStr; + + use namada::ledger::storage::DBIter; + use namada::types::chain::ChainId; + use namada::types::storage; + + use crate::facade::tendermint_proto::abci::RequestInitChain; + use crate::facade::tendermint_proto::google::protobuf::Timestamp; + use crate::node::ledger::shell::test_utils::TestShell; + + /// Test that the init-chain handler never commits changes directly to the + /// DB. + #[test] + fn test_init_chain_doesnt_commit_db() { + let (mut shell, _receiver) = TestShell::new(); + + // Collect all storage key-vals into a sorted map + let store_block_state = |shell: &TestShell| -> BTreeMap<_, _> { + let prefix: storage::Key = FromStr::from_str("").unwrap(); + shell + .wl_storage + .storage + .db + .iter_prefix(&prefix) + .map(|(key, val, _gas)| (key, val)) + .collect() + }; + + // Store the full state in sorted map + let initial_storage_state: std::collections::BTreeMap> = + store_block_state(&shell); + + shell.init_chain(RequestInitChain { + time: Some(Timestamp { + seconds: 0, + nanos: 0, + }), + chain_id: ChainId::default().to_string(), + ..Default::default() + }); + + // Store the full state again + let storage_state: std::collections::BTreeMap> = + store_block_state(&shell); + + // The storage state must be unchanged + itertools::assert_equal( + initial_storage_state.iter(), + storage_state.iter(), + ); + } +} diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 6b4b05b5ad..ef7fe504a1 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -709,9 +709,9 @@ where tx: &namada::types::transaction::WrapperTx, ) -> bool { if let Some(solution) = &tx.pow_solution { - if let (Some(faucet_address), _gas) = + if let Some(faucet_address) = namada::ledger::parameters::read_faucet_account_parameter( - &self.wl_storage.storage, + &self.wl_storage, ) .expect("Must be able to read faucet account parameter") { @@ -727,11 +727,10 @@ where #[cfg(not(feature = "mainnet"))] /// Get fixed amount of fees for wrapper tx fn get_wrapper_tx_fees(&self) -> token::Amount { - let (fees, _gas) = - namada::ledger::parameters::read_wrapper_tx_fees_parameter( - &self.wl_storage.storage, - ) - .expect("Must be able to read wrapper tx fees parameter"); + let fees = namada::ledger::parameters::read_wrapper_tx_fees_parameter( + &self.wl_storage, + ) + .expect("Must be able to read wrapper tx fees parameter"); fees.unwrap_or(token::Amount::whole(MIN_FEE)) } @@ -743,9 +742,9 @@ where tx: &namada::types::transaction::WrapperTx, ) -> bool { if let Some(solution) = &tx.pow_solution { - if let (Some(faucet_address), _gas) = + if let Some(faucet_address) = namada::ledger::parameters::read_faucet_account_parameter( - &self.wl_storage.storage, + &self.wl_storage, ) .expect("Must be able to read faucet account parameter") { diff --git a/apps/src/lib/node/ledger/storage/mod.rs b/apps/src/lib/node/ledger/storage/mod.rs index a5d0fed81d..d477ff2de3 100644 --- a/apps/src/lib/node/ledger/storage/mod.rs +++ b/apps/src/lib/node/ledger/storage/mod.rs @@ -389,7 +389,7 @@ mod tests { itertools::assert_equal(iter, expected.clone()); // Commit genesis state - storage.commit_genesis().unwrap(); + storage.commit_block().unwrap(); // Again, try to iterate over their prefix let iter = storage_api::iter_prefix(&storage, &prefix) @@ -440,7 +440,7 @@ mod tests { itertools::assert_equal(iter, expected.clone()); // Commit genesis state - storage.commit_genesis().unwrap(); + storage.commit_block().unwrap(); // And check again let iter = storage_api::iter_prefix(&storage, &prefix) diff --git a/core/src/ledger/governance/parameters.rs b/core/src/ledger/governance/parameters.rs index 71dca8c91b..9ae820d96c 100644 --- a/core/src/ledger/governance/parameters.rs +++ b/core/src/ledger/governance/parameters.rs @@ -3,8 +3,7 @@ use std::fmt::Display; use borsh::{BorshDeserialize, BorshSerialize}; use super::storage as gov_storage; -use crate::ledger::storage::types::encode; -use crate::ledger::storage::{self, Storage}; +use crate::ledger::storage_api::{self, StorageRead, StorageWrite}; use crate::types::token::Amount; #[derive( @@ -66,10 +65,9 @@ impl Default for GovParams { impl GovParams { /// Initialize governance parameters into storage - pub fn init_storage(&self, storage: &mut Storage) + pub fn init_storage(&self, storage: &mut S) -> storage_api::Result<()> where - DB: storage::DB + for<'iter> storage::DBIter<'iter>, - H: storage::StorageHasher, + S: StorageRead + StorageWrite, { let Self { min_proposal_fund, @@ -82,49 +80,31 @@ impl GovParams { let min_proposal_fund_key = gov_storage::get_min_proposal_fund_key(); let amount = Amount::whole(*min_proposal_fund); - storage - .write(&min_proposal_fund_key, encode(&amount)) - .unwrap(); + storage.write(&min_proposal_fund_key, amount)?; let max_proposal_code_size_key = gov_storage::get_max_proposal_code_size_key(); - storage - .write(&max_proposal_code_size_key, encode(max_proposal_code_size)) - .unwrap(); + storage.write(&max_proposal_code_size_key, max_proposal_code_size)?; let min_proposal_period_key = gov_storage::get_min_proposal_period_key(); - storage - .write(&min_proposal_period_key, encode(min_proposal_period)) - .unwrap(); + storage.write(&min_proposal_period_key, min_proposal_period)?; let max_proposal_period_key = gov_storage::get_max_proposal_period_key(); - storage - .write(&max_proposal_period_key, encode(max_proposal_period)) - .unwrap(); + storage.write(&max_proposal_period_key, max_proposal_period)?; let max_proposal_content_size_key = gov_storage::get_max_proposal_content_key(); storage - .write( - &max_proposal_content_size_key, - encode(max_proposal_content_size), - ) - .expect("Should be able to write to storage"); + .write(&max_proposal_content_size_key, max_proposal_content_size)?; let min_proposal_grace_epoch_key = gov_storage::get_min_proposal_grace_epoch_key(); storage - .write( - &min_proposal_grace_epoch_key, - encode(min_proposal_grace_epochs), - ) - .expect("Should be able to write to storage"); + .write(&min_proposal_grace_epoch_key, min_proposal_grace_epochs)?; let counter_key = gov_storage::get_counter_key(); - storage - .write(&counter_key, encode(&u64::MIN)) - .expect("Should be able to write to storage"); + storage.write(&counter_key, u64::MIN) } } diff --git a/core/src/ledger/parameters/mod.rs b/core/src/ledger/parameters/mod.rs index 7cb8174f0b..442a614d68 100644 --- a/core/src/ledger/parameters/mod.rs +++ b/core/src/ledger/parameters/mod.rs @@ -5,12 +5,11 @@ use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use rust_decimal::Decimal; use thiserror::Error; -use super::storage::types::{decode, encode}; -use super::storage::{types, Storage}; +use super::storage::types; +use super::storage_api::{self, ResultExt, StorageRead, StorageWrite}; use crate::ledger::storage::{self as ledger_storage}; use crate::types::address::{Address, InternalAddress}; use crate::types::chain::ProposalBytes; -use crate::types::storage::Key; use crate::types::time::DurationSecs; use crate::types::token; @@ -103,10 +102,9 @@ pub enum WriteError { impl Parameters { /// Initialize parameters in storage in the genesis block. - pub fn init_storage(&self, storage: &mut Storage) + pub fn init_storage(&self, storage: &mut S) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let Self { epoch_duration, @@ -128,518 +126,359 @@ impl Parameters { // write max proposal bytes parameter let max_proposal_bytes_key = storage::get_max_proposal_bytes_key(); - let max_proposal_bytes_value = encode(&max_proposal_bytes); - storage - .write(&max_proposal_bytes_key, max_proposal_bytes_value) - .expect( - "Max proposal bytes parameter must be initialized in the \ - genesis block", - ); + storage.write(&max_proposal_bytes_key, max_proposal_bytes)?; // write epoch parameters let epoch_key = storage::get_epoch_duration_storage_key(); - let epoch_value = encode(epoch_duration); - storage.write(&epoch_key, epoch_value).expect( - "Epoch parameters must be initialized in the genesis block", - ); + storage.write(&epoch_key, epoch_duration)?; // write vp whitelist parameter let vp_whitelist_key = storage::get_vp_whitelist_storage_key(); - let vp_whitelist_value = encode( - &vp_whitelist - .iter() - .map(|id| id.to_lowercase()) - .collect::>(), - ); - storage.write(&vp_whitelist_key, vp_whitelist_value).expect( - "Vp whitelist parameter must be initialized in the genesis block", - ); + let vp_whitelist = vp_whitelist + .iter() + .map(|id| id.to_lowercase()) + .collect::>(); + storage.write(&vp_whitelist_key, vp_whitelist)?; // write tx whitelist parameter let tx_whitelist_key = storage::get_tx_whitelist_storage_key(); - let tx_whitelist_value = encode( - &tx_whitelist - .iter() - .map(|id| id.to_lowercase()) - .collect::>(), - ); - storage.write(&tx_whitelist_key, tx_whitelist_value).expect( - "Tx whitelist parameter must be initialized in the genesis block", - ); + let tx_whitelist = tx_whitelist + .iter() + .map(|id| id.to_lowercase()) + .collect::>(); + storage.write(&tx_whitelist_key, tx_whitelist)?; - // write tx whitelist parameter + // write max expected time per block let max_expected_time_per_block_key = storage::get_max_expected_time_per_block_key(); - let max_expected_time_per_block_value = - encode(&max_expected_time_per_block); - storage - .write( - &max_expected_time_per_block_key, - max_expected_time_per_block_value, - ) - .expect( - "Max expected time per block parameter must be initialized in \ - the genesis block", - ); + storage.write( + &max_expected_time_per_block_key, + max_expected_time_per_block, + )?; // write implicit vp parameter let implicit_vp_key = storage::get_implicit_vp_key(); - storage.write(&implicit_vp_key, implicit_vp).expect( - "Implicit VP parameter must be initialized in the genesis block", - ); + // Using `fn write_bytes` here, because implicit_vp doesn't need to be + // encoded, it's bytes already. + storage.write_bytes(&implicit_vp_key, implicit_vp)?; let epochs_per_year_key = storage::get_epochs_per_year_key(); - let epochs_per_year_value = encode(epochs_per_year); - storage - .write(&epochs_per_year_key, epochs_per_year_value) - .expect( - "Epochs per year parameter must be initialized in the genesis \ - block", - ); + storage.write(&epochs_per_year_key, epochs_per_year)?; let pos_gain_p_key = storage::get_pos_gain_p_key(); - let pos_gain_p_value = encode(pos_gain_p); - storage.write(&pos_gain_p_key, pos_gain_p_value).expect( - "PoS P-gain parameter must be initialized in the genesis block", - ); + storage.write(&pos_gain_p_key, pos_gain_p)?; let pos_gain_d_key = storage::get_pos_gain_d_key(); - let pos_gain_d_value = encode(pos_gain_d); - storage.write(&pos_gain_d_key, pos_gain_d_value).expect( - "PoS D-gain parameter must be initialized in the genesis block", - ); + storage.write(&pos_gain_d_key, pos_gain_d)?; let staked_ratio_key = storage::get_staked_ratio_key(); - let staked_ratio_val = encode(staked_ratio); - storage.write(&staked_ratio_key, staked_ratio_val).expect( - "PoS staked ratio parameter must be initialized in the genesis \ - block", - ); + storage.write(&staked_ratio_key, staked_ratio)?; let pos_inflation_key = storage::get_pos_inflation_amount_key(); - let pos_inflation_val = encode(pos_inflation_amount); - storage.write(&pos_inflation_key, pos_inflation_val).expect( - "PoS inflation rate parameter must be initialized in the genesis \ - block", - ); + storage.write(&pos_inflation_key, pos_inflation_amount)?; #[cfg(not(feature = "mainnet"))] if let Some(faucet_account) = faucet_account { let faucet_account_key = storage::get_faucet_account_key(); - let faucet_account_val = encode(faucet_account); - storage - .write(&faucet_account_key, faucet_account_val) - .expect( - "Faucet account parameter must be initialized in the \ - genesis block, if any", - ); + storage.write(&faucet_account_key, faucet_account)?; } #[cfg(not(feature = "mainnet"))] { let wrapper_tx_fees_key = storage::get_wrapper_tx_fees_key(); - let wrapper_tx_fees_val = - encode(&wrapper_tx_fees.unwrap_or(token::Amount::whole(100))); - storage - .write(&wrapper_tx_fees_key, wrapper_tx_fees_val) - .expect( - "Wrapper tx fees must be initialized in the genesis block", - ); + let wrapper_tx_fees = + wrapper_tx_fees.unwrap_or(token::Amount::whole(100)); + storage.write(&wrapper_tx_fees_key, wrapper_tx_fees)?; } + Ok(()) } } + /// Update the max_expected_time_per_block parameter in storage. Returns the /// parameters and gas cost. -pub fn update_max_expected_time_per_block_parameter( - storage: &mut Storage, +pub fn update_max_expected_time_per_block_parameter( + storage: &mut S, value: &DurationSecs, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_max_expected_time_per_block_key(); - update(storage, value, key) + storage.write(&key, value) } /// Update the vp whitelist parameter in storage. Returns the parameters and gas /// cost. -pub fn update_vp_whitelist_parameter( - storage: &mut Storage, +pub fn update_vp_whitelist_parameter( + storage: &mut S, value: Vec, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_vp_whitelist_storage_key(); - update( - storage, - &value + storage.write( + &key, + value .iter() .map(|id| id.to_lowercase()) .collect::>(), - key, ) } /// Update the tx whitelist parameter in storage. Returns the parameters and gas /// cost. -pub fn update_tx_whitelist_parameter( - storage: &mut Storage, +pub fn update_tx_whitelist_parameter( + storage: &mut S, value: Vec, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_tx_whitelist_storage_key(); - update( - storage, - &value + storage.write( + &key, + value .iter() .map(|id| id.to_lowercase()) .collect::>(), - key, ) } /// Update the epoch parameter in storage. Returns the parameters and gas /// cost. -pub fn update_epoch_parameter( - storage: &mut Storage, +pub fn update_epoch_parameter( + storage: &mut S, value: &EpochDuration, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_epoch_duration_storage_key(); - update(storage, value, key) + storage.write(&key, value) } /// Update the epochs_per_year parameter in storage. Returns the parameters and /// gas cost. -pub fn update_epochs_per_year_parameter( - storage: &mut Storage, +pub fn update_epochs_per_year_parameter( + storage: &mut S, value: &EpochDuration, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_epochs_per_year_key(); - update(storage, value, key) + storage.write(&key, value) } /// Update the PoS P-gain parameter in storage. Returns the parameters and gas /// cost. -pub fn update_pos_gain_p_parameter( - storage: &mut Storage, +pub fn update_pos_gain_p_parameter( + storage: &mut S, value: &EpochDuration, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_pos_gain_p_key(); - update(storage, value, key) + storage.write(&key, value) } /// Update the PoS D-gain parameter in storage. Returns the parameters and gas /// cost. -pub fn update_pos_gain_d_parameter( - storage: &mut Storage, +pub fn update_pos_gain_d_parameter( + storage: &mut S, value: &EpochDuration, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_pos_gain_d_key(); - update(storage, value, key) + storage.write(&key, value) } /// Update the PoS staked ratio parameter in storage. Returns the parameters and /// gas cost. -pub fn update_staked_ratio_parameter( - storage: &mut Storage, +pub fn update_staked_ratio_parameter( + storage: &mut S, value: &EpochDuration, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_staked_ratio_key(); - update(storage, value, key) + storage.write(&key, value) } /// Update the PoS inflation rate parameter in storage. Returns the parameters /// and gas cost. -pub fn update_pos_inflation_amount_parameter( - storage: &mut Storage, +pub fn update_pos_inflation_amount_parameter( + storage: &mut S, value: &EpochDuration, -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_pos_inflation_amount_key(); - update(storage, value, key) + storage.write(&key, value) } /// Update the implicit VP parameter in storage. Return the gas cost. -pub fn update_implicit_vp( - storage: &mut Storage, +pub fn update_implicit_vp( + storage: &mut S, implicit_vp: &[u8], -) -> std::result::Result +) -> storage_api::Result<()> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead + StorageWrite, { let key = storage::get_implicit_vp_key(); - // Not using `fn update` here, because implicit_vp doesn't need to be + // Using `fn write_bytes` here, because implicit_vp doesn't need to be // encoded, it's bytes already. - let (gas, _size_diff) = storage - .write(&key, implicit_vp) - .map_err(WriteError::StorageError)?; - Ok(gas) -} - -/// Update the parameters in storage. Returns the parameters and gas -/// cost. -pub fn update( - storage: &mut Storage, - value: &T, - key: Key, -) -> std::result::Result -where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, - T: BorshSerialize, -{ - let serialized_value = value - .try_to_vec() - .map_err(|e| WriteError::SerializeError(e.to_string()))?; - let (gas, _size_diff) = storage - .write(&key, serialized_value) - .map_err(WriteError::StorageError)?; - Ok(gas) + storage.write_bytes(&key, implicit_vp) } /// Read the the epoch duration parameter from store -pub fn read_epoch_duration_parameter( - storage: &Storage, -) -> std::result::Result<(EpochDuration, u64), ReadError> +pub fn read_epoch_duration_parameter( + storage: &S, +) -> storage_api::Result where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead, { // read epoch let epoch_key = storage::get_epoch_duration_storage_key(); - let (value, gas) = - storage.read(&epoch_key).map_err(ReadError::StorageError)?; - let epoch_duration: EpochDuration = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; - - Ok((epoch_duration, gas)) + let epoch_duration = storage.read(&epoch_key)?; + epoch_duration + .ok_or(ReadError::ParametersMissing) + .into_storage_result() } #[cfg(not(feature = "mainnet"))] /// Read the faucet account's address, if any -pub fn read_faucet_account_parameter( - storage: &Storage, -) -> std::result::Result<(Option
, u64), ReadError> +pub fn read_faucet_account_parameter( + storage: &S, +) -> storage_api::Result> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead, { let faucet_account_key = storage::get_faucet_account_key(); - let (value, gas_faucet_account) = storage - .read(&faucet_account_key) - .map_err(ReadError::StorageError)?; - let address: Option
= value - .map(|value| decode(value).map_err(ReadError::StorageTypeError)) - .transpose()?; - Ok((address, gas_faucet_account)) + storage.read(&faucet_account_key) } #[cfg(not(feature = "mainnet"))] /// Read the wrapper tx fees amount, if any -pub fn read_wrapper_tx_fees_parameter( - storage: &Storage, -) -> std::result::Result<(Option, u64), ReadError> +pub fn read_wrapper_tx_fees_parameter( + storage: &S, +) -> storage_api::Result> where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead, { let wrapper_tx_fees_key = storage::get_wrapper_tx_fees_key(); - let (value, gas_wrapper_tx_fees) = storage - .read(&wrapper_tx_fees_key) - .map_err(ReadError::StorageError)?; - let fee: Option = value - .map(|value| decode(value).map_err(ReadError::StorageTypeError)) - .transpose()?; - Ok((fee, gas_wrapper_tx_fees)) + storage.read(&wrapper_tx_fees_key) } // Read the all the parameters from storage. Returns the parameters and gas /// cost. -pub fn read( - storage: &Storage, -) -> std::result::Result<(Parameters, u64), ReadError> +pub fn read(storage: &S) -> storage_api::Result where - DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: ledger_storage::StorageHasher, + S: StorageRead, { // read max proposal bytes - let (max_proposal_bytes, gas_proposal_bytes) = { + let max_proposal_bytes: ProposalBytes = { let key = storage::get_max_proposal_bytes_key(); - let (value, gas) = - storage.read(&key).map_err(ReadError::StorageError)?; - let value: ProposalBytes = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; - (value, gas) + let value = storage.read(&key)?; + value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()? }; // read epoch duration - let (epoch_duration, gas_epoch) = read_epoch_duration_parameter(storage) - .expect("Couldn't read epoch duration parameters"); + let epoch_duration = read_epoch_duration_parameter(storage)?; // read vp whitelist let vp_whitelist_key = storage::get_vp_whitelist_storage_key(); - let (value, gas_vp) = storage - .read(&vp_whitelist_key) - .map_err(ReadError::StorageError)?; - let vp_whitelist: Vec = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; + let value = storage.read(&vp_whitelist_key)?; + let vp_whitelist: Vec = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; // read tx whitelist let tx_whitelist_key = storage::get_tx_whitelist_storage_key(); - let (value, gas_tx) = storage - .read(&tx_whitelist_key) - .map_err(ReadError::StorageError)?; - let tx_whitelist: Vec = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; + let value = storage.read(&tx_whitelist_key)?; + let tx_whitelist: Vec = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; let max_expected_time_per_block_key = storage::get_max_expected_time_per_block_key(); - let (value, gas_time) = storage - .read(&max_expected_time_per_block_key) - .map_err(ReadError::StorageError)?; - let max_expected_time_per_block: DurationSecs = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; + let value = storage.read(&max_expected_time_per_block_key)?; + let max_expected_time_per_block: DurationSecs = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; let implicit_vp_key = storage::get_implicit_vp_key(); - let (value, gas_implicit_vp) = storage - .read(&implicit_vp_key) - .map_err(ReadError::StorageError)?; - let implicit_vp = value.ok_or(ReadError::ParametersMissing)?; + let value = storage.read_bytes(&implicit_vp_key)?; + let implicit_vp = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; // read epochs per year let epochs_per_year_key = storage::get_epochs_per_year_key(); - let (value, gas_epy) = storage - .read(&epochs_per_year_key) - .map_err(ReadError::StorageError)?; - let epochs_per_year: u64 = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; + let value = storage.read(&epochs_per_year_key)?; + let epochs_per_year: u64 = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; // read PoS gain P let pos_gain_p_key = storage::get_pos_gain_p_key(); - let (value, gas_gain_p) = storage - .read(&pos_gain_p_key) - .map_err(ReadError::StorageError)?; - let pos_gain_p: Decimal = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; + let value = storage.read(&pos_gain_p_key)?; + let pos_gain_p: Decimal = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; // read PoS gain D let pos_gain_d_key = storage::get_pos_gain_d_key(); - let (value, gas_gain_d) = storage - .read(&pos_gain_d_key) - .map_err(ReadError::StorageError)?; - let pos_gain_d: Decimal = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; + let value = storage.read(&pos_gain_d_key)?; + let pos_gain_d: Decimal = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; // read staked ratio let staked_ratio_key = storage::get_staked_ratio_key(); - let (value, gas_staked) = storage - .read(&staked_ratio_key) - .map_err(ReadError::StorageError)?; - let staked_ratio: Decimal = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; + let value = storage.read(&staked_ratio_key)?; + let staked_ratio: Decimal = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; // read PoS inflation rate let pos_inflation_key = storage::get_pos_inflation_amount_key(); - let (value, gas_reward) = storage - .read(&pos_inflation_key) - .map_err(ReadError::StorageError)?; - let pos_inflation_amount: u64 = - decode(value.ok_or(ReadError::ParametersMissing)?) - .map_err(ReadError::StorageTypeError)?; + let value = storage.read(&pos_inflation_key)?; + let pos_inflation_amount: u64 = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; // read faucet account #[cfg(not(feature = "mainnet"))] - let (faucet_account, gas_faucet_account) = - read_faucet_account_parameter(storage)?; - #[cfg(feature = "mainnet")] - let gas_faucet_account = 0; + let faucet_account = read_faucet_account_parameter(storage)?; // read faucet account #[cfg(not(feature = "mainnet"))] - let (wrapper_tx_fees, gas_wrapper_tx_fees) = - read_wrapper_tx_fees_parameter(storage)?; - #[cfg(feature = "mainnet")] - let gas_wrapper_tx_fees = 0; - - let total_gas_cost = [ - gas_epoch, - gas_tx, - gas_vp, - gas_time, - gas_implicit_vp, - gas_epy, - gas_gain_p, - gas_gain_d, - gas_staked, - gas_reward, - gas_proposal_bytes, - gas_faucet_account, - gas_wrapper_tx_fees, - ] - .into_iter() - .fold(0u64, |accum, gas| { - accum - .checked_add(gas) - .expect("u64 overflow occurred while doing gas arithmetic") - }); - - Ok(( - Parameters { - epoch_duration, - max_expected_time_per_block, - max_proposal_bytes, - vp_whitelist, - tx_whitelist, - implicit_vp, - epochs_per_year, - pos_gain_p, - pos_gain_d, - staked_ratio, - pos_inflation_amount, - #[cfg(not(feature = "mainnet"))] - faucet_account, - #[cfg(not(feature = "mainnet"))] - wrapper_tx_fees, - }, - total_gas_cost, - )) + let wrapper_tx_fees = read_wrapper_tx_fees_parameter(storage)?; + + Ok(Parameters { + epoch_duration, + max_expected_time_per_block, + max_proposal_bytes, + vp_whitelist, + tx_whitelist, + implicit_vp, + epochs_per_year, + pos_gain_p, + pos_gain_d, + staked_ratio, + pos_inflation_amount, + #[cfg(not(feature = "mainnet"))] + faucet_account, + #[cfg(not(feature = "mainnet"))] + wrapper_tx_fees, + }) } diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index e2ac4da235..0ddbc600c5 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -710,40 +710,6 @@ where } } - /// Initialize a new epoch when the current epoch is finished. Returns - /// `true` on a new epoch. - pub fn update_epoch( - &mut self, - height: BlockHeight, - time: DateTimeUtc, - ) -> Result { - let (parameters, _gas) = - parameters::read(self).expect("Couldn't read protocol parameters"); - - // Check if the current epoch is over - let new_epoch = height >= self.next_epoch_min_start_height - && time >= self.next_epoch_min_start_time; - if new_epoch { - // Begin a new epoch - self.block.epoch = self.block.epoch.next(); - let EpochDuration { - min_num_of_blocks, - min_duration, - } = parameters.epoch_duration; - self.next_epoch_min_start_height = height + min_num_of_blocks; - self.next_epoch_min_start_time = time + min_duration; - // TODO put this into PoS parameters and pass it to tendermint - // `consensus_params` on `InitChain` and `EndBlock` - let evidence_max_age_num_blocks: u64 = 100000; - self.block - .pred_epochs - .new_epoch(height, evidence_max_age_num_blocks); - tracing::info!("Began a new epoch {}", self.block.epoch); - } - self.update_epoch_in_merkle_tree()?; - Ok(new_epoch) - } - /// Get the current conversions pub fn get_conversion_state(&self) -> &ConversionState { &self.conversion_state @@ -954,11 +920,15 @@ mod tests { min_blocks_delta, min_duration_delta, max_time_per_block_delta) in arb_and_epoch_duration_start_and_block()) { - let mut storage = TestStorage { - next_epoch_min_start_height: - start_height + epoch_duration.min_num_of_blocks, - next_epoch_min_start_time: - start_time + epoch_duration.min_duration, + let mut wl_storage = + TestWlStorage { + storage: TestStorage { + next_epoch_min_start_height: + start_height + epoch_duration.min_num_of_blocks, + next_epoch_min_start_time: + start_time + epoch_duration.min_duration, + ..Default::default() + }, ..Default::default() }; let mut parameters = Parameters { @@ -978,13 +948,13 @@ mod tests { #[cfg(not(feature = "mainnet"))] wrapper_tx_fees: None, }; - parameters.init_storage(&mut storage); + parameters.init_storage(&mut wl_storage).unwrap(); - let epoch_before = storage.last_epoch; - assert_eq!(epoch_before, storage.block.epoch); + let epoch_before = wl_storage.storage.last_epoch; + assert_eq!(epoch_before, wl_storage.storage.block.epoch); // Try to apply the epoch update - storage.update_epoch(block_height, block_time).unwrap(); + wl_storage.update_epoch(block_height, block_time).unwrap(); // Test for 1. if block_height.0 - start_height.0 @@ -995,28 +965,28 @@ mod tests { epoch_duration.min_duration, ) { - assert_eq!(storage.block.epoch, epoch_before.next()); - assert_eq!(storage.next_epoch_min_start_height, + assert_eq!(wl_storage.storage.block.epoch, epoch_before.next()); + assert_eq!(wl_storage.storage.next_epoch_min_start_height, block_height + epoch_duration.min_num_of_blocks); - assert_eq!(storage.next_epoch_min_start_time, + assert_eq!(wl_storage.storage.next_epoch_min_start_time, block_time + epoch_duration.min_duration); assert_eq!( - storage.block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), + wl_storage.storage.block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), Some(epoch_before)); assert_eq!( - storage.block.pred_epochs.get_epoch(block_height), + wl_storage.storage.block.pred_epochs.get_epoch(block_height), Some(epoch_before.next())); } else { - assert_eq!(storage.block.epoch, epoch_before); + assert_eq!(wl_storage.storage.block.epoch, epoch_before); assert_eq!( - storage.block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), + wl_storage.storage.block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), Some(epoch_before)); assert_eq!( - storage.block.pred_epochs.get_epoch(block_height), + wl_storage.storage.block.pred_epochs.get_epoch(block_height), Some(epoch_before)); } // Last epoch should only change when the block is committed - assert_eq!(storage.last_epoch, epoch_before); + assert_eq!(wl_storage.storage.last_epoch, epoch_before); // Update the epoch duration parameters parameters.epoch_duration.min_num_of_blocks = @@ -1026,33 +996,33 @@ mod tests { Duration::seconds(min_duration + min_duration_delta).into(); parameters.max_expected_time_per_block = Duration::seconds(max_expected_time_per_block + max_time_per_block_delta).into(); - parameters::update_max_expected_time_per_block_parameter(&mut storage, ¶meters.max_expected_time_per_block).unwrap(); - parameters::update_epoch_parameter(&mut storage, ¶meters.epoch_duration).unwrap(); + parameters::update_max_expected_time_per_block_parameter(&mut wl_storage, ¶meters.max_expected_time_per_block).unwrap(); + parameters::update_epoch_parameter(&mut wl_storage, ¶meters.epoch_duration).unwrap(); // Test for 2. - let epoch_before = storage.block.epoch; - let height_of_update = storage.next_epoch_min_start_height.0 ; - let time_of_update = storage.next_epoch_min_start_time; + let epoch_before = wl_storage.storage.block.epoch; + let height_of_update = wl_storage.storage.next_epoch_min_start_height.0 ; + let time_of_update = wl_storage.storage.next_epoch_min_start_time; let height_before_update = BlockHeight(height_of_update - 1); let height_of_update = BlockHeight(height_of_update); let time_before_update = time_of_update - Duration::seconds(1); // No update should happen before both epoch duration conditions are // satisfied - storage.update_epoch(height_before_update, time_before_update).unwrap(); - assert_eq!(storage.block.epoch, epoch_before); - storage.update_epoch(height_of_update, time_before_update).unwrap(); - assert_eq!(storage.block.epoch, epoch_before); - storage.update_epoch(height_before_update, time_of_update).unwrap(); - assert_eq!(storage.block.epoch, epoch_before); + wl_storage.update_epoch(height_before_update, time_before_update).unwrap(); + assert_eq!(wl_storage.storage.block.epoch, epoch_before); + wl_storage.update_epoch(height_of_update, time_before_update).unwrap(); + assert_eq!(wl_storage.storage.block.epoch, epoch_before); + wl_storage.update_epoch(height_before_update, time_of_update).unwrap(); + assert_eq!(wl_storage.storage.block.epoch, epoch_before); // Update should happen at this or after this height and time - storage.update_epoch(height_of_update, time_of_update).unwrap(); - assert_eq!(storage.block.epoch, epoch_before.next()); + wl_storage.update_epoch(height_of_update, time_of_update).unwrap(); + assert_eq!(wl_storage.storage.block.epoch, epoch_before.next()); // The next epoch's minimum duration should change - assert_eq!(storage.next_epoch_min_start_height, + assert_eq!(wl_storage.storage.next_epoch_min_start_height, height_of_update + parameters.epoch_duration.min_num_of_blocks); - assert_eq!(storage.next_epoch_min_start_time, + assert_eq!(wl_storage.storage.next_epoch_min_start_time, time_of_update + parameters.epoch_duration.min_duration); } } diff --git a/core/src/ledger/storage/wl_storage.rs b/core/src/ledger/storage/wl_storage.rs index 8c89d3e6c4..067fc1b37b 100644 --- a/core/src/ledger/storage/wl_storage.rs +++ b/core/src/ledger/storage/wl_storage.rs @@ -2,12 +2,14 @@ use std::iter::Peekable; +use crate::ledger::parameters::EpochDuration; use crate::ledger::storage::write_log::{self, WriteLog}; use crate::ledger::storage::{DBIter, Storage, StorageHasher, DB}; use crate::ledger::storage_api::{ResultExt, StorageRead, StorageWrite}; -use crate::ledger::{gas, storage_api}; +use crate::ledger::{gas, parameters, storage_api}; use crate::types::address::Address; -use crate::types::storage; +use crate::types::storage::{self, BlockHeight}; +use crate::types::time::DateTimeUtc; /// Storage with write log that allows to implement prefix iterator that works /// with changes not yet committed to the DB. @@ -33,17 +35,6 @@ where Self { write_log, storage } } - /// Commit the genesis state to DB. This should only be used before any - /// blocks are produced. - pub fn commit_genesis(&mut self) -> storage_api::Result<()> { - // Because the `impl StorageWrite for WlStorage` writes into block-level - // write log, we just commit the `block_write_log`, but without - // committing an actual block in storage - self.write_log - .commit_block(&mut self.storage) - .into_storage_result() - } - /// Commit the current transaction's write log to the block when it's /// accepted by all the triggered validity predicates. Starts a new /// transaction write log. @@ -65,6 +56,42 @@ where .into_storage_result()?; self.storage.commit_block().into_storage_result() } + + /// Initialize a new epoch when the current epoch is finished. Returns + /// `true` on a new epoch. + pub fn update_epoch( + &mut self, + height: BlockHeight, + time: DateTimeUtc, + ) -> crate::ledger::storage::Result { + let parameters = + parameters::read(self).expect("Couldn't read protocol parameters"); + + // Check if the current epoch is over + let new_epoch = height >= self.storage.next_epoch_min_start_height + && time >= self.storage.next_epoch_min_start_time; + if new_epoch { + // Begin a new epoch + self.storage.block.epoch = self.storage.block.epoch.next(); + let EpochDuration { + min_num_of_blocks, + min_duration, + } = parameters.epoch_duration; + self.storage.next_epoch_min_start_height = + height + min_num_of_blocks; + self.storage.next_epoch_min_start_time = time + min_duration; + // TODO put this into PoS parameters and pass it to tendermint + // `consensus_params` on `InitChain` and `EndBlock` + let evidence_max_age_num_blocks: u64 = 100000; + self.storage + .block + .pred_epochs + .new_epoch(height, evidence_max_age_num_blocks); + tracing::info!("Began a new epoch {}", self.storage.block.epoch); + } + self.storage.update_epoch_in_merkle_tree()?; + Ok(new_epoch) + } } /// Prefix iterator for [`WlStorage`]. diff --git a/proof_of_stake/src/tests.rs b/proof_of_stake/src/tests.rs index 964f2283b5..7ff9914032 100644 --- a/proof_of_stake/src/tests.rs +++ b/proof_of_stake/src/tests.rs @@ -192,7 +192,7 @@ fn test_bonds_aux(params: PosParams, validators: Vec) { current_epoch, ) .unwrap(); - s.commit_genesis().unwrap(); + s.commit_block().unwrap(); // Advance to epoch 1 current_epoch = advance_epoch(&mut s, ¶ms); @@ -624,7 +624,7 @@ fn test_become_validator_aux( current_epoch, ) .unwrap(); - s.commit_genesis().unwrap(); + s.commit_block().unwrap(); // Advance to epoch 1 current_epoch = advance_epoch(&mut s, ¶ms); diff --git a/shared/src/ledger/ibc/mod.rs b/shared/src/ledger/ibc/mod.rs index 6cf1d6c9f1..101eb4b8af 100644 --- a/shared/src/ledger/ibc/mod.rs +++ b/shared/src/ledger/ibc/mod.rs @@ -7,11 +7,13 @@ use namada_core::ledger::ibc::storage::{ capability_index_key, channel_counter_key, client_counter_key, connection_counter_key, }; +use namada_core::ledger::storage::WlStorage; +use namada_core::ledger::storage_api::StorageWrite; -use crate::ledger::storage::{self as ledger_storage, Storage, StorageHasher}; +use crate::ledger::storage::{self as ledger_storage, StorageHasher}; /// Initialize storage in the genesis block. -pub fn init_genesis_storage(storage: &mut Storage) +pub fn init_genesis_storage(storage: &mut WlStorage) where DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, H: StorageHasher, @@ -23,27 +25,27 @@ where let key = client_counter_key(); let value = 0_u64.to_be_bytes().to_vec(); storage - .write(&key, value) + .write_bytes(&key, value) .expect("Unable to write the initial client counter"); // the connection counter let key = connection_counter_key(); let value = 0_u64.to_be_bytes().to_vec(); storage - .write(&key, value) + .write_bytes(&key, value) .expect("Unable to write the initial connection counter"); // the channel counter let key = channel_counter_key(); let value = 0_u64.to_be_bytes().to_vec(); storage - .write(&key, value) + .write_bytes(&key, value) .expect("Unable to write the initial channel counter"); // the capability index let key = capability_index_key(); let value = 0_u64.to_be_bytes().to_vec(); storage - .write(&key, value) + .write_bytes(&key, value) .expect("Unable to write the initial capability index"); } diff --git a/shared/src/ledger/ibc/vp/channel.rs b/shared/src/ledger/ibc/vp/channel.rs index e85a221212..d4ffc12dfa 100644 --- a/shared/src/ledger/ibc/vp/channel.rs +++ b/shared/src/ledger/ibc/vp/channel.rs @@ -954,13 +954,8 @@ where } fn max_expected_time_per_block(&self) -> Duration { - match parameters::read(self.ctx.storage) { - Ok((parameters, gas)) => { - match self.ctx.gas_meter.borrow_mut().add(gas) { - Ok(_) => parameters.max_expected_time_per_block.into(), - Err(_) => Duration::default(), - } - } + match parameters::read(&self.ctx.pre()) { + Ok(parameters) => parameters.max_expected_time_per_block.into(), Err(_) => Duration::default(), } } diff --git a/shared/src/ledger/ibc/vp/mod.rs b/shared/src/ledger/ibc/vp/mod.rs index 108942b548..5703d460e8 100644 --- a/shared/src/ledger/ibc/vp/mod.rs +++ b/shared/src/ledger/ibc/vp/mod.rs @@ -355,6 +355,7 @@ mod tests { use crate::ibc::tx_msg::Msg; use crate::ibc::Height; use crate::ibc_proto::cosmos::base::v1beta1::Coin; + use namada_core::ledger::storage::testing::TestWlStorage; use prost::Message; use crate::tendermint::time::Time as TmTime; use crate::tendermint_proto::Protobuf; @@ -393,17 +394,18 @@ mod tests { ClientId::from_str("test_client").expect("Creating a client ID failed") } - fn insert_init_states() -> (TestStorage, WriteLog) { - let mut storage = TestStorage::default(); - let mut write_log = WriteLog::default(); + fn insert_init_states() -> TestWlStorage { + let mut wl_storage = TestWlStorage::default(); // initialize the storage - super::super::init_genesis_storage(&mut storage); + super::super::init_genesis_storage(&mut wl_storage); // set a dummy header - storage + wl_storage + .storage .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - storage + wl_storage + .storage .begin_block(BlockHash::default(), BlockHeight(1)) .unwrap(); @@ -411,7 +413,8 @@ mod tests { let client_id = get_client_id(); let client_type_key = client_type_key(&client_id); let client_type = ClientType::Mock.as_str().as_bytes().to_vec(); - write_log + wl_storage + .write_log .write(&client_type_key, client_type) .expect("write failed"); // insert a mock client state @@ -423,33 +426,37 @@ mod tests { }; let client_state = MockClientState::new(header).wrap_any(); let bytes = client_state.encode_vec().expect("encoding failed"); - write_log + wl_storage + .write_log .write(&client_state_key, bytes) .expect("write failed"); // insert a mock consensus state let consensus_key = consensus_state_key(&client_id, height); let consensus_state = MockConsensusState::new(header).wrap_any(); let bytes = consensus_state.encode_vec().expect("encoding failed"); - write_log + wl_storage + .write_log .write(&consensus_key, bytes) .expect("write failed"); // insert update time and height let client_update_time_key = client_update_timestamp_key(&client_id); let bytes = TmTime::now().encode_vec().expect("encoding failed"); - write_log + wl_storage + .write_log .write(&client_update_time_key, bytes) .expect("write failed"); let client_update_height_key = client_update_height_key(&client_id); let host_height = Height::new(10, 100); - write_log + wl_storage + .write_log .write( &client_update_height_key, host_height.encode_vec().expect("encoding failed"), ) .expect("write failed"); - write_log.commit_tx(); + wl_storage.write_log.commit_tx(); - (storage, write_log) + wl_storage } fn get_connection_id() -> ConnectionId { @@ -676,8 +683,8 @@ mod tests { #[test] fn test_update_client() { - let (mut storage, mut write_log) = insert_init_states(); - write_log.commit_block(&mut storage).expect("commit failed"); + let mut wl_storage = insert_init_states(); + wl_storage.commit_block().expect("commit failed"); // update the client let client_id = get_client_id(); @@ -694,24 +701,30 @@ mod tests { }; let client_state = MockClientState::new(header).wrap_any(); let bytes = client_state.encode_vec().expect("encoding failed"); - write_log + wl_storage + .write_log .write(&client_state_key, bytes) .expect("write failed"); let consensus_key = consensus_state_key(&client_id, height); let consensus_state = MockConsensusState::new(header).wrap_any(); let bytes = consensus_state.encode_vec().expect("encoding failed"); - write_log + wl_storage + .write_log .write(&consensus_key, bytes) .expect("write failed"); let event = make_update_client_event(&client_id, &msg); - write_log.set_ibc_event(event.try_into().unwrap()); + wl_storage + .write_log + .set_ibc_event(event.try_into().unwrap()); // update time and height for this updating let key = client_update_timestamp_key(&client_id); - write_log + wl_storage + .write_log .write(&key, TmTime::now().encode_vec().expect("encoding failed")) .expect("write failed"); let key = client_update_height_key(&client_id); - write_log + wl_storage + .write_log .write( &key, Height::new(10, 101).encode_vec().expect("encoding failed"), @@ -733,8 +746,8 @@ mod tests { let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -756,8 +769,8 @@ mod tests { #[test] fn test_init_connection() { - let (mut storage, mut write_log) = insert_init_states(); - write_log.commit_block(&mut storage).expect("commit failed"); + let mut wl_storage = insert_init_states(); + wl_storage.commit_block().expect("commit failed"); // prepare a message let msg = MsgConnectionOpenInit { @@ -773,9 +786,14 @@ mod tests { let conn_key = connection_key(&conn_id); let conn = init_connection(&msg); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); let event = make_open_init_connection_event(&conn_id, &msg); - write_log.set_ibc_event(event.try_into().unwrap()); + wl_storage + .write_log + .set_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -792,8 +810,8 @@ mod tests { let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -870,8 +888,11 @@ mod tests { #[test] fn test_try_connection() { - let (mut storage, mut write_log) = insert_init_states(); - write_log.commit_block(&mut storage).expect("commit failed"); + let mut wl_storage = insert_init_states(); + wl_storage + .write_log + .commit_block(&mut wl_storage.storage) + .expect("commit failed"); // prepare data let height = Height::new(0, 1); @@ -911,9 +932,14 @@ mod tests { let conn_key = connection_key(&conn_id); let conn = try_connection(&msg); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); let event = make_open_try_connection_event(&conn_id, &msg); - write_log.set_ibc_event(event.try_into().unwrap()); + wl_storage + .write_log + .set_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -930,8 +956,8 @@ mod tests { let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -953,18 +979,27 @@ mod tests { #[test] fn test_ack_connection() { - let (mut storage, mut write_log) = insert_init_states(); + let mut wl_storage = insert_init_states(); // insert an Init connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Init); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage + .write_log + .commit_block(&mut wl_storage.storage) + .expect("commit failed"); // update the connection to Open let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); // prepare data let height = Height::new(0, 1); @@ -1002,7 +1037,9 @@ mod tests { signer: Signer::new("account0"), }; let event = make_open_ack_connection_event(&msg); - write_log.set_ibc_event(event.try_into().unwrap()); + wl_storage + .write_log + .set_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let mut tx_data = vec![]; @@ -1018,8 +1055,8 @@ mod tests { let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1040,18 +1077,24 @@ mod tests { #[test] fn test_confirm_connection() { - let (mut storage, mut write_log) = insert_init_states(); + let mut wl_storage = insert_init_states(); // insert a TryOpen connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::TryOpen); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); // update the connection to Open let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); // prepare data let height = Height::new(0, 1); @@ -1077,7 +1120,9 @@ mod tests { signer: Signer::new("account0"), }; let event = make_open_confirm_connection_event(&msg); - write_log.set_ibc_event(event.try_into().unwrap()); + wl_storage + .write_log + .set_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let mut tx_data = vec![]; @@ -1093,8 +1138,8 @@ mod tests { let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1115,13 +1160,16 @@ mod tests { #[test] fn test_init_channel() { - let (mut storage, mut write_log) = insert_init_states(); + let mut wl_storage = insert_init_states(); // insert an opened connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + wl_storage.commit_block().expect("commit failed"); // prepare data let channel = get_channel(ChanState::Init, Order::Ordered); @@ -1132,12 +1180,17 @@ mod tests { }; // insert an Init channel - set_port(&mut write_log, 0); + set_port(&mut wl_storage.write_log, 0); let channel_key = channel_key(&get_port_channel_id()); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); let event = make_open_init_channel_event(&get_channel_id(), &msg); - write_log.set_ibc_event(event.try_into().unwrap()); + wl_storage + .write_log + .set_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -1154,8 +1207,8 @@ mod tests { let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1176,13 +1229,16 @@ mod tests { #[test] fn test_try_channel() { - let (mut storage, mut write_log) = insert_init_states(); + let mut wl_storage = insert_init_states(); // insert an opend connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); + wl_storage.commit_block().expect("commit failed"); // prepare data let height = Height::new(0, 1); @@ -1212,12 +1268,17 @@ mod tests { }; // insert a TryOpen channel - set_port(&mut write_log, 0); + set_port(&mut wl_storage.write_log, 0); let channel_key = channel_key(&get_port_channel_id()); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); let event = make_open_try_channel_event(&get_channel_id(), &msg); - write_log.set_ibc_event(event.try_into().unwrap()); + wl_storage + .write_log + .set_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -1234,8 +1295,8 @@ mod tests { let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1256,20 +1317,26 @@ mod tests { #[test] fn test_ack_channel() { - let (mut storage, mut write_log) = insert_init_states(); + let mut wl_storage = insert_init_states(); // insert an opend connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); // insert an Init channel - set_port(&mut write_log, 0); + set_port(&mut wl_storage.write_log, 0); let channel_key = channel_key(&get_port_channel_id()); let channel = get_channel(ChanState::Init, Order::Ordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); // prepare data let height = Height::new(0, 1); @@ -1302,10 +1369,15 @@ mod tests { // update the channel to Open let channel = get_channel(ChanState::Open, Order::Ordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); let event = make_open_ack_channel_event(&msg, &channel).expect("no connection"); - write_log.set_ibc_event(event.try_into().unwrap()); + wl_storage + .write_log + .set_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -1322,8 +1394,8 @@ mod tests { let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1344,20 +1416,26 @@ mod tests { #[test] fn test_confirm_channel() { - let (mut storage, mut write_log) = insert_init_states(); + let mut wl_storage = insert_init_states(); // insert an opend connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); // insert a TryOpen channel - set_port(&mut write_log, 0); + set_port(&mut wl_storage.write_log, 0); let channel_key = channel_key(&get_port_channel_id()); let channel = get_channel(ChanState::TryOpen, Order::Ordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); // prepare data let height = Height::new(0, 1); @@ -1386,11 +1464,16 @@ mod tests { // update the channel to Open let channel = get_channel(ChanState::Open, Order::Ordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); let event = make_open_confirm_channel_event(&msg, &channel) .expect("no connection"); - write_log.set_ibc_event(event.try_into().unwrap()); + wl_storage + .write_log + .set_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -1407,8 +1490,8 @@ mod tests { let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1429,9 +1512,9 @@ mod tests { #[test] fn test_validate_port() { - let (storage, mut write_log) = insert_init_states(); + let mut wl_storage = insert_init_states(); // insert a port - set_port(&mut write_log, 0); + set_port(&mut wl_storage.write_log, 0); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -1447,8 +1530,8 @@ mod tests { let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1469,10 +1552,10 @@ mod tests { #[test] fn test_validate_capability() { - let (storage, mut write_log) = insert_init_states(); + let mut wl_storage = insert_init_states(); // insert a port let index = 0; - set_port(&mut write_log, index); + set_port(&mut wl_storage.write_log, index); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -1489,8 +1572,8 @@ mod tests { let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1512,20 +1595,26 @@ mod tests { #[test] fn test_validate_seq_send() { - let (mut storage, mut write_log) = insert_init_states(); + let mut wl_storage = insert_init_states(); // insert an opened connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); // insert an opened channel - set_port(&mut write_log, 0); + set_port(&mut wl_storage.write_log, 0); let channel_key = channel_key(&get_port_channel_id()); let channel = get_channel(ChanState::Open, Order::Ordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); // prepare a message let timeout_timestamp = @@ -1545,15 +1634,16 @@ mod tests { // get and increment the nextSequenceSend let seq_key = next_sequence_send_key(&get_port_channel_id()); - let sequence = get_next_seq(&storage, &seq_key); - increment_seq(&mut write_log, &seq_key, sequence); + let sequence = get_next_seq(&wl_storage.storage, &seq_key); + increment_seq(&mut wl_storage.write_log, &seq_key, sequence); // make a packet let counterparty = get_channel_counterparty(); let packet = packet_from_message(&msg, sequence, &counterparty); // insert a commitment let commitment = actions::commitment(&packet); let key = commitment_key(&get_port_id(), &get_channel_id(), sequence); - write_log + wl_storage + .write_log .write(&key, commitment.into_vec()) .expect("write failed"); @@ -1572,8 +1662,8 @@ mod tests { let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1594,25 +1684,31 @@ mod tests { #[test] fn test_validate_seq_recv() { - let (mut storage, mut write_log) = insert_init_states(); + let mut wl_storage = insert_init_states(); // insert an opened connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); // insert an opened channel - set_port(&mut write_log, 0); + set_port(&mut wl_storage.write_log, 0); let channel_key = channel_key(&get_port_channel_id()); let channel = get_channel(ChanState::Open, Order::Ordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); // get and increment the nextSequenceRecv let seq_key = next_sequence_recv_key(&get_port_channel_id()); - let sequence = get_next_seq(&storage, &seq_key); - increment_seq(&mut write_log, &seq_key, sequence); + let sequence = get_next_seq(&wl_storage.storage, &seq_key); + increment_seq(&mut wl_storage.write_log, &seq_key, sequence); // make a packet and data let counterparty = get_channel_counterparty(); let timeout_timestamp = @@ -1639,12 +1735,13 @@ mod tests { // insert a receipt and an ack let key = receipt_key(&get_port_id(), &get_channel_id(), sequence); - write_log + wl_storage + .write_log .write(&key, PacketReceipt::default().as_bytes().to_vec()) .expect("write failed"); let key = ack_key(&get_port_id(), &get_channel_id(), sequence); let ack = PacketAck::result_success().encode_to_vec(); - write_log.write(&key, ack).expect("write failed"); + wl_storage.write_log.write(&key, ack).expect("write failed"); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -1661,8 +1758,8 @@ mod tests { let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1683,10 +1780,10 @@ mod tests { #[test] fn test_validate_seq_ack() { - let (mut storage, mut write_log) = insert_init_states(); + let mut wl_storage = insert_init_states(); // get the nextSequenceAck let seq_key = next_sequence_ack_key(&get_port_channel_id()); - let sequence = get_next_seq(&storage, &seq_key); + let sequence = get_next_seq(&wl_storage.storage, &seq_key); // make a packet let counterparty = get_channel_counterparty(); let timeout_timestamp = @@ -1705,22 +1802,29 @@ mod tests { let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); // insert an opened channel - set_port(&mut write_log, 0); + set_port(&mut wl_storage.write_log, 0); let channel_key = channel_key(&get_port_channel_id()); let channel = get_channel(ChanState::Open, Order::Ordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); // insert a commitment let commitment = actions::commitment(&packet); let commitment_key = commitment_key(&get_port_id(), &get_channel_id(), sequence); - write_log + wl_storage + .write_log .write(&commitment_key, commitment.into_vec()) .expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); // prepare data let ack = PacketAck::result_success().encode_to_vec(); @@ -1736,9 +1840,12 @@ mod tests { }; // increment the nextSequenceAck - increment_seq(&mut write_log, &seq_key, sequence); + increment_seq(&mut wl_storage.write_log, &seq_key, sequence); // delete the commitment - write_log.delete(&commitment_key).expect("delete failed"); + wl_storage + .write_log + .delete(&commitment_key) + .expect("delete failed"); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -1755,8 +1862,8 @@ mod tests { let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1777,20 +1884,26 @@ mod tests { #[test] fn test_validate_commitment() { - let (mut storage, mut write_log) = insert_init_states(); + let mut wl_storage = insert_init_states(); // insert an opened connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); // insert an opened channel - set_port(&mut write_log, 0); + set_port(&mut wl_storage.write_log, 0); let channel_key = channel_key(&get_port_channel_id()); let channel = get_channel(ChanState::Open, Order::Ordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage.commit_block().expect("commit failed"); // prepare a message let timeout_timestamp = @@ -1810,7 +1923,7 @@ mod tests { // make a packet let seq_key = next_sequence_send_key(&get_port_channel_id()); - let sequence = get_next_seq(&storage, &seq_key); + let sequence = get_next_seq(&wl_storage.storage, &seq_key); let counterparty = get_channel_counterparty(); let packet = packet_from_message(&msg, sequence, &counterparty); // insert a commitment @@ -1820,11 +1933,14 @@ mod tests { &packet.source_channel, sequence, ); - write_log + wl_storage + .write_log .write(&commitment_key, commitment.into_vec()) .expect("write failed"); let event = make_send_packet_event(packet); - write_log.set_ibc_event(event.try_into().unwrap()); + wl_storage + .write_log + .set_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -1841,8 +1957,8 @@ mod tests { let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1863,20 +1979,29 @@ mod tests { #[test] fn test_validate_receipt() { - let (mut storage, mut write_log) = insert_init_states(); + let mut wl_storage = insert_init_states(); // insert an opened connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec().expect("encoding failed"); - write_log.write(&conn_key, bytes).expect("write failed"); + wl_storage + .write_log + .write(&conn_key, bytes) + .expect("write failed"); // insert an opened channel - set_port(&mut write_log, 0); + set_port(&mut wl_storage.write_log, 0); let channel_key = channel_key(&get_port_channel_id()); let channel = get_channel(ChanState::Open, Order::Ordered); let bytes = channel.encode_vec().expect("encoding failed"); - write_log.write(&channel_key, bytes).expect("write failed"); - write_log.commit_tx(); - write_log.commit_block(&mut storage).expect("commit failed"); + wl_storage + .write_log + .write(&channel_key, bytes) + .expect("write failed"); + wl_storage.write_log.commit_tx(); + wl_storage + .write_log + .commit_block(&mut wl_storage.storage) + .expect("commit failed"); // make a packet and data let counterparty = get_channel_counterparty(); @@ -1908,7 +2033,8 @@ mod tests { &msg.packet.destination_channel, msg.packet.sequence, ); - write_log + wl_storage + .write_log .write(&receipt_key, PacketReceipt::default().as_bytes().to_vec()) .expect("write failed"); let ack_key = ack_key( @@ -1917,7 +2043,10 @@ mod tests { msg.packet.sequence, ); let ack = PacketAck::result_success().encode_to_vec(); - write_log.write(&ack_key, ack).expect("write failed"); + wl_storage + .write_log + .write(&ack_key, ack) + .expect("write failed"); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -1933,8 +2062,8 @@ mod tests { let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, @@ -1956,18 +2085,22 @@ mod tests { #[test] fn test_validate_ack() { - let (storage, mut write_log) = insert_init_states(); + let mut wl_storage = insert_init_states(); // insert a receipt and an ack let receipt_key = receipt_key(&get_port_id(), &get_channel_id(), Sequence::from(1)); - write_log + wl_storage + .write_log .write(&receipt_key, PacketReceipt::default().as_bytes().to_vec()) .expect("write failed"); let ack_key = ack_key(&get_port_id(), &get_channel_id(), Sequence::from(1)); let ack = PacketAck::result_success().encode_to_vec(); - write_log.write(&ack_key, ack).expect("write failed"); + wl_storage + .write_log + .write(&ack_key, ack) + .expect("write failed"); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -1982,8 +2115,8 @@ mod tests { let verifiers = BTreeSet::new(); let ctx = Ctx::new( &ADDRESS, - &storage, - &write_log, + &wl_storage.storage, + &wl_storage.write_log, &tx, &tx_index, gas_meter, diff --git a/tests/src/e2e/ibc_tests.rs b/tests/src/e2e/ibc_tests.rs index 8151cdaaca..bc3fabc5a3 100644 --- a/tests/src/e2e/ibc_tests.rs +++ b/tests/src/e2e/ibc_tests.rs @@ -101,6 +101,11 @@ fn run_ledger_ibc() -> Result<()> { ledger_b.exp_string("Namada ledger node started")?; ledger_a.exp_string("This node is a validator")?; ledger_b.exp_string("This node is a validator")?; + + // Wait for a first block + ledger_a.exp_string("Committed block hash")?; + ledger_b.exp_string("Committed block hash")?; + let _bg_ledger_a = ledger_a.background(); let _bg_ledger_b = ledger_b.background(); diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index 9437103546..7649b379f2 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -220,11 +220,13 @@ fn run_ledger_load_state_and_reset() -> Result<()> { ledger.exp_string("No state could be found")?; // Wait to commit a block ledger.exp_regex(r"Committed block hash.*, height: [0-9]+")?; + let bg_ledger = ledger.background(); // Wait for a new epoch let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); epoch_sleep(&test, &validator_one_rpc, 30)?; // 2. Shut it down + let mut ledger = bg_ledger.foreground(); ledger.send_control('c')?; // Wait for the node to stop running to finish writing the state and tx // queue @@ -287,7 +289,8 @@ fn ledger_txs_and_queries() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); let vp_user = wasm_abs_path(VP_USER_WASM); @@ -526,7 +529,8 @@ fn masp_txs_and_queries() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); @@ -792,7 +796,8 @@ fn masp_pinned_txs() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); @@ -955,7 +960,8 @@ fn masp_incentives() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); @@ -1653,9 +1659,9 @@ fn invalid_transactions() -> Result<()> { // 1. Run the ledger node let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; - // Wait to commit a block - ledger.exp_regex(r"Committed block hash.*, height: [0-9]+")?; + + // Wait for a first block + ledger.exp_string("Committed block hash")?; let bg_ledger = ledger.background(); @@ -1810,7 +1816,8 @@ fn pos_bonds() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); @@ -2014,7 +2021,8 @@ fn pos_init_validator() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); @@ -2180,10 +2188,8 @@ fn ledger_many_txs_in_a_block() -> Result<()> { let mut ledger = run_as!(*test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; - - // Wait to commit a block - ledger.exp_regex(r"Committed block hash.*, height: [0-9]+")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let bg_ledger = ledger.background(); let validator_one_rpc = Arc::new(get_actor_rpc(&test, &Who::Validator(0))); @@ -2296,7 +2302,8 @@ fn proposal_submission() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); @@ -2647,7 +2654,8 @@ fn proposal_offline() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(20))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); @@ -3095,6 +3103,11 @@ fn test_genesis_validators() -> Result<()> { non_validator.exp_string("Namada ledger node started")?; non_validator.exp_string("This node is not a validator")?; + // Wait for a first block + validator_0.exp_string("Committed block hash")?; + validator_1.exp_string("Committed block hash")?; + non_validator.exp_string("Committed block hash")?; + let bg_validator_0 = validator_0.background(); let bg_validator_1 = validator_1.background(); let _bg_non_validator = non_validator.background(); @@ -3325,7 +3338,8 @@ fn implicit_account_reveal_pk() -> Result<()> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + // Wait for a first block + ledger.exp_string("Committed block hash")?; let _bg_ledger = ledger.background(); let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); diff --git a/tests/src/vm_host_env/ibc.rs b/tests/src/vm_host_env/ibc.rs index 1e60480186..7755627c6b 100644 --- a/tests/src/vm_host_env/ibc.rs +++ b/tests/src/vm_host_env/ibc.rs @@ -183,7 +183,7 @@ pub fn validate_token_vp_from_tx<'a>( /// Initialize the test storage. Requires initialized [`tx_host_env::ENV`]. pub fn init_storage() -> (Address, Address) { tx_host_env::with(|env| { - init_genesis_storage(&mut env.wl_storage.storage); + init_genesis_storage(&mut env.wl_storage); // block header to check timeout timestamp env.wl_storage .storage diff --git a/tests/src/vm_host_env/tx.rs b/tests/src/vm_host_env/tx.rs index 6c3ccd55ae..41f07aada5 100644 --- a/tests/src/vm_host_env/tx.rs +++ b/tests/src/vm_host_env/tx.rs @@ -99,21 +99,24 @@ impl TestTxEnv { vp_whitelist: Option>, tx_whitelist: Option>, ) { - let _ = parameters::update_epoch_parameter( - &mut self.wl_storage.storage, + parameters::update_epoch_parameter( + &mut self.wl_storage, &epoch_duration.unwrap_or(EpochDuration { min_num_of_blocks: 1, min_duration: DurationSecs(5), }), - ); - let _ = parameters::update_tx_whitelist_parameter( - &mut self.wl_storage.storage, + ) + .unwrap(); + parameters::update_tx_whitelist_parameter( + &mut self.wl_storage, tx_whitelist.unwrap_or_default(), - ); - let _ = parameters::update_vp_whitelist_parameter( - &mut self.wl_storage.storage, + ) + .unwrap(); + parameters::update_vp_whitelist_parameter( + &mut self.wl_storage, vp_whitelist.unwrap_or_default(), - ); + ) + .unwrap(); } /// Fake accounts' existence by initializing their VP storage. @@ -145,7 +148,7 @@ impl TestTxEnv { /// Commit the genesis state. Typically, you'll want to call this after /// setting up the initial state, before running a transaction. pub fn commit_genesis(&mut self) { - self.wl_storage.commit_genesis().unwrap(); + self.wl_storage.commit_block().unwrap(); } pub fn commit_tx_and_block(&mut self) {